├── .defaults.sh
├── .install_scripts
├── bootstrap.sh
├── clusterversion.sh
├── create_lb.sh
├── create_nodes.sh
├── destroy.sh
├── dns_check.sh
├── download_prepare.sh
├── libvirt_network.sh
├── post.sh
├── process_args.sh
├── sanity_check.sh
├── show_help.sh
├── utils.sh
└── version_check.sh
├── .post_scripts
├── add_node.sh
└── expose_cluster.sh
├── README.md
└── ocp4_setup_upi_kvm.sh
/.defaults.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # These are the default values used by the script i.e,
4 | # If an option/switch is not provided, these are the values that the script will use.
5 | # These values can be overridden by providing corresponding switches/options.
6 | # You can change these default values to the ones appropriate to your environment,
7 | # to avoid passing them every time.
8 |
9 | # -O, --ocp-version VERSION
10 | export OCP_VERSION="stable"
11 |
12 | # -R, --rhcos-version VERSION
13 | export RHCOS_VERSION=""
14 |
15 | # -m, --masters N
16 | export N_MAST="3"
17 |
18 | # -w, --workers N
19 | export N_WORK="2"
20 |
21 | # --master-cpu N(vCPU)
22 | export MAS_CPU="4"
23 |
24 | # --master-mem SIZE(MB)
25 | export MAS_MEM="16000"
26 |
27 | # --worker-cpu N(vCPU)
28 | export WOR_CPU="2"
29 |
30 | # --worker-mem SIZE(MB)
31 | export WOR_MEM="8000"
32 |
33 | # --bootstrap-cpu N(vCPU)
34 | export BTS_CPU="4"
35 |
36 | # --bootstrap-mem SIZE(MB)
37 | export BTS_MEM="16000"
38 |
39 | # --lb-cpu N(vCPU)
40 | export LB_CPU="1"
41 |
42 | # --lb-mem SIZE(MB)
43 | export LB_MEM="1024"
44 |
45 | # -n, --libvirt-network NETWORK
46 | export DEF_LIBVIRT_NET="default"
47 |
48 | # -N, --libvirt-oct OCTET
49 | export VIR_NET_OCT=""
50 |
51 | # -c, --cluster-name NAME
52 | export CLUSTER_NAME="ocp4"
53 |
54 | # -d, --cluster-domain DOMAIN
55 | export BASE_DOM="local"
56 |
57 | # -z, --dns-dir DIR
58 | export DNS_DIR="/etc/NetworkManager/dnsmasq.d"
59 |
60 | # -v, --vm-dir DIR
61 | export VM_DIR="/var/lib/libvirt/images"
62 |
63 | # -s, --setup-dir DIR
64 | # By default set to /root/ocp4_cluster_$CLUSTER_NAME
65 | export SETUP_DIR=""
66 |
67 | # -x, --cache-dir DIR
68 | export CACHE_DIR="/root/ocp4_downloads"
69 |
70 | # -p, --pull-secret FILE
71 | export PULL_SEC_F="/root/pull-secret"
72 |
73 | # --ssh-pub-key-file
74 | # By default a new ssh key pair is generated in $SETUP_DIR
75 | export SSH_PUB_KEY_FILE=""
76 |
77 |
78 | # Below are some "flags" which by default are set to "no"
79 | # and can be overriden by their respective switches.
80 | # If you set them to "yes" here, you won't need pass those
81 | # switches everytime you run the script.
82 |
83 | # --autostart-vms
84 | export AUTOSTART_VMS="no"
85 |
86 | # -k, --keep-bootstrap
87 | export KEEP_BS="no"
88 |
89 | # -X, --fresh-download
90 | export FRESH_DOWN="no"
91 |
92 | # --destroy
93 | # Don't set this to yes
94 | export DESTROY="no"
95 |
96 | # -y, --yes
97 | export YES="no"
98 |
99 |
100 | export OCP_MIRROR="https://mirror.openshift.com/pub/openshift-v4/clients/ocp"
101 | export RHCOS_MIRROR="https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos"
102 | export LB_IMG_URL="https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
103 |
--------------------------------------------------------------------------------
/.install_scripts/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "###############################"
5 | echo "#### OPENSHIFT BOOTSTRAPING ###"
6 | echo "###############################"
7 | echo
8 |
9 | cp install_dir/auth/kubeconfig install_dir/auth/kubeconfig.orig
10 | export KUBECONFIG="install_dir/auth/kubeconfig"
11 |
12 |
13 | echo "====> Waiting for Boostraping to finish: "
14 | echo "(Monitoring activity on bootstrap.${CLUSTER_NAME}.${BASE_DOM})"
15 | a_dones=()
16 | a_conts=()
17 | a_images=()
18 | a_nodes=()
19 | s_api="Down"
20 | btk_started=0
21 | no_output_counter=0
22 | while true; do
23 | output_flag=0
24 | if [ "${s_api}" == "Down" ]; then
25 | ./oc get --raw / &> /dev/null && \
26 | { echo " ==> Kubernetes API is Up"; s_api="Up"; output_flag=1; } || true
27 | else
28 | nodes=($(./oc get nodes 2> /dev/null | grep -v "^NAME" | awk '{print $1 "_" $2}' )) || true
29 | for n in ${nodes[@]}; do
30 | if [[ ! " ${a_nodes[@]} " =~ " ${n} " ]]; then
31 | echo " --> Node $(echo $n | tr '_' ' ')"
32 | output_flag=1
33 | a_nodes+=( "${n}" )
34 | fi
35 | done
36 | fi
37 | images=($(ssh -i sshkey "core@bootstrap.${CLUSTER_NAME}.${BASE_DOM}" "sudo podman images 2> /dev/null | grep -v '^REPOSITORY' | awk '{print \$1 \"-\" \$3}'" )) || true
38 | for i in ${images[@]}; do
39 | if [[ ! " ${a_images[@]} " =~ " ${i} " ]]; then
40 | echo " --> Image Downloaded: ${i}"
41 | output_flag=1
42 | a_images+=( "${i}" )
43 | fi
44 | done
45 | dones=($(ssh -i sshkey "core@bootstrap.${CLUSTER_NAME}.${BASE_DOM}" "ls /opt/openshift/*.done 2> /dev/null" )) || true
46 | for d in ${dones[@]}; do
47 | if [[ ! " ${a_dones[@]} " =~ " ${d} " ]]; then
48 | echo " --> Phase Completed: $(echo $d | sed 's/.*\/\(.*\)\.done/\1/')"
49 | output_flag=1
50 | a_dones+=( "${d}" )
51 | fi
52 | done
53 | conts=($(ssh -i sshkey "core@bootstrap.${CLUSTER_NAME}.${BASE_DOM}" "sudo crictl ps -a 2> /dev/null | grep -v '^CONTAINER' | rev | awk '{print \$4 \"_\" \$2 \"_\" \$3}' | rev" )) || true
54 | for c in ${conts[@]}; do
55 | if [[ ! " ${a_conts[@]} " =~ " ${c} " ]]; then
56 | echo " --> Container: $(echo $c | tr '_' ' ')"
57 | output_flag=1
58 | a_conts+=( "${c}" )
59 | fi
60 | done
61 |
62 | btk_stat=$(ssh -i sshkey "core@bootstrap.${CLUSTER_NAME}.${BASE_DOM}" "sudo systemctl is-active bootkube.service 2> /dev/null" ) || true
63 | test "$btk_stat" = "active" -a "$btk_started" = "0" && btk_started=1 || true
64 |
65 | test "$output_flag" = "0" && no_output_counter=$(( $no_output_counter + 1 )) || no_output_counter=0
66 |
67 | test "$no_output_counter" -gt "8" && \
68 | { echo " --> (bootkube.service is ${btk_stat}, Kube API is ${s_api})"; no_output_counter=0; }
69 |
70 | test "$btk_started" = "1" -a "$btk_stat" = "inactive" -a "$s_api" = "Down" && \
71 | { echo '[Warning] Some thing went wrong. Bootkube service wasnt able to bring up Kube API'; }
72 |
73 | test "$btk_stat" = "inactive" -a "$s_api" = "Up" && break
74 |
75 | sleep 15
76 |
77 | done
78 |
79 | ./openshift-install --dir=install_dir wait-for bootstrap-complete
80 |
81 | echo -n "====> Removing Boostrap VM: "
82 | if [ "${KEEP_BS}" == "no" ]; then
83 | virsh destroy ${CLUSTER_NAME}-bootstrap > /dev/null || err "virsh destroy ${CLUSTER_NAME}-bootstrap failed"
84 | virsh undefine ${CLUSTER_NAME}-bootstrap --remove-all-storage > /dev/null || err "virsh undefine ${CLUSTER_NAME}-bootstrap --remove-all-storage"; ok
85 | else
86 | ok "skipping"
87 | fi
88 |
89 | echo -n "====> Removing Bootstrap from haproxy: "
90 | ssh -i sshkey "lb.${CLUSTER_NAME}.${BASE_DOM}" \
91 | "sed -i '/bootstrap\.${CLUSTER_NAME}\.${BASE_DOM}/d' /etc/haproxy/haproxy.cfg" || err "failed"
92 | ssh -i sshkey "lb.${CLUSTER_NAME}.${BASE_DOM}" "systemctl restart haproxy" || err "failed"; ok
93 |
94 |
--------------------------------------------------------------------------------
/.install_scripts/clusterversion.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "#################################"
5 | echo "#### OPENSHIFT CLUSTERVERSION ###"
6 | echo "#################################"
7 | echo
8 |
9 | echo "====> Waiting for clusterversion: "
10 | ingress_patched=0
11 | imgreg_patched=0
12 | output_delay=0
13 | nodes_total=$(( $N_MAST + $N_WORK ))
14 | nodes_ready=0
15 | while true
16 | do
17 | cv_prog_msg=$(./oc get clusterversion -o jsonpath='{.items[*].status.conditions[?(.type=="Progressing")].message}' 2> /dev/null) || continue
18 | cv_avail=$(./oc get clusterversion -o jsonpath='{.items[*].status.conditions[?(.type=="Available")].status}' 2> /dev/null) || continue
19 | nodes_ready=$(./oc get nodes | grep 'Ready' | wc -l)
20 |
21 | if [ "$imgreg_patched" == "0" ]; then
22 | ./oc get configs.imageregistry.operator.openshift.io cluster &> /dev/null && \
23 | {
24 | sleep 30
25 | echo -n ' --> Patching image registry to use EmptyDir: ';
26 | ./oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}}}}' 2> /dev/null && \
27 | imgreg_patched=1 || true
28 | sleep 30
29 | test "$imgreg_patched" -eq "1" && ./oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"managementState": "Managed"}}' &> /dev/null || true
30 | } || true
31 | fi
32 |
33 | if [ "$ingress_patched" == "0" ]; then
34 | ./oc get -n openshift-ingress-operator ingresscontroller default &> /dev/null && \
35 | {
36 | sleep 30
37 | echo -n ' --> Patching ingress controller to run router pods on master nodes: ';
38 | ./oc patch ingresscontroller default -n openshift-ingress-operator \
39 | --type merge \
40 | --patch '{
41 | "spec":{
42 | "replicas": '"${N_MAST}"',
43 | "nodePlacement":{
44 | "nodeSelector":{
45 | "matchLabels":{
46 | "node-role.kubernetes.io/master":""
47 | }
48 | },
49 | "tolerations":[{
50 | "effect": "NoSchedule",
51 | "operator": "Exists"
52 | }]
53 | }
54 | }
55 | }' 2> /dev/null && ingress_patched=1 || true
56 | } || true
57 | fi
58 |
59 | for csr in $(./oc get csr 2> /dev/null | grep -w 'Pending' | awk '{print $1}'); do
60 | echo -n ' --> Approving CSR: ';
61 | ./oc adm certificate approve "$csr" 2> /dev/null || true
62 | output_delay=0
63 | done
64 |
65 | if [ "$output_delay" -gt 8 ]; then
66 | if [ "$cv_avail" == "True" ]; then
67 | echo " --> Waiting for all nodes to ready. $nodes_ready/$nodes_total are ready."
68 | else
69 | echo -n " --> ${cv_prog_msg:0:70}"; test -n "${cv_prog_msg:71}" && echo " ..." || echo
70 | fi
71 | output_delay=0
72 | fi
73 |
74 | test "$cv_avail" = "True" && test "$nodes_ready" -ge "$nodes_total" && break
75 | output_delay=$(( output_delay + 1 ))
76 | sleep 15
77 | done
78 |
79 | export END_TS=$(date +%s)
80 | export TIME_TAKEN="$(( ($END_TS - $START_TS) / 60 ))"
81 |
82 | echo
83 | echo "######################################################"
84 | echo "#### OPENSHIFT 4 INSTALLATION FINISHED SUCCESSFULLY###"
85 | echo "######################################################"
86 | echo " time taken = $TIME_TAKEN minutes"
87 | echo
88 |
89 | ./openshift-install --dir=install_dir wait-for install-complete
90 |
91 |
--------------------------------------------------------------------------------
/.install_scripts/create_lb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "#################################"
5 | echo "### CREATING LOAD BALANCER VM ###"
6 | echo "#################################"
7 | echo
8 |
9 |
10 | echo -n "====> Downloading Centos 7 cloud image: "; download get "$LB_IMG" "$LB_IMG_URL";
11 |
12 | echo -n "====> Copying Image for Loadbalancer VM: "
13 | cp "${CACHE_DIR}/CentOS-7-x86_64-GenericCloud.qcow2" "${VM_DIR}/${CLUSTER_NAME}-lb.qcow2" || \
14 | err "Copying '${VM_DIR}/CentOS-7-x86_64-GenericCloud.qcow2' to '${VM_DIR}/${CLUSTER_NAME}-lb.qcow2' failed"; ok
15 |
16 | echo "====> Setting up Loadbalancer VM: "
17 | virt-customize -a "${VM_DIR}/${CLUSTER_NAME}-lb.qcow2" \
18 | --uninstall cloud-init --ssh-inject root:file:${SSH_PUB_KEY_FILE} --selinux-relabel --install haproxy --install bind-utils \
19 | --copy-in install_dir/bootstrap.ign:/opt/ --copy-in install_dir/master.ign:/opt/ --copy-in install_dir/worker.ign:/opt/ \
20 | --copy-in "${CACHE_DIR}/${IMAGE}":/opt/ --copy-in tmpws.service:/etc/systemd/system/ \
21 | --copy-in haproxy.cfg:/etc/haproxy/ \
22 | --run-command "systemctl daemon-reload" --run-command "systemctl enable tmpws.service" || \
23 | err "Setting up Loadbalancer VM image ${VM_DIR}/${CLUSTER_NAME}-lb.qcow2 failed"
24 |
25 | echo -n "====> Creating Loadbalancer VM: "
26 | virt-install --import --name ${CLUSTER_NAME}-lb --disk "${VM_DIR}/${CLUSTER_NAME}-lb.qcow2" \
27 | --memory ${LB_MEM} --cpu host --vcpus ${LB_CPU} --os-type linux --os-variant rhel7.0 --network network=${VIR_NET},model=virtio \
28 | --noreboot --noautoconsole > /dev/null || \
29 | err "Creating Loadbalancer VM from ${VM_DIR}/${CLUSTER_NAME}-lb.qcow2 failed"; ok
30 |
31 | echo -n "====> Starting Loadbalancer VM "
32 | virsh start ${CLUSTER_NAME}-lb > /dev/null || err "Starting Loadbalancer VM ${CLUSTER_NAME}-lb failed"; ok
33 |
34 | echo -n "====> Waiting for Loadbalancer VM to obtain IP address: "
35 | while true; do
36 | sleep 5
37 | LBIP=$(virsh domifaddr "${CLUSTER_NAME}-lb" | grep ipv4 | head -n1 | awk '{print $4}' | cut -d'/' -f1 2> /dev/null)
38 | test "$?" -eq "0" -a -n "$LBIP" && { echo "$LBIP"; break; }
39 | done
40 | MAC=$(virsh domifaddr "${CLUSTER_NAME}-lb" | grep ipv4 | head -n1 | awk '{print $2}')
41 |
42 | echo -n "====> Adding DHCP reservation for LB IP/MAC: "
43 | virsh net-update ${VIR_NET} add-last ip-dhcp-host --xml "" --live --config &> /dev/null || \
44 | err "Adding DHCP reservation for $LBIP/$MAC failed"; ok
45 |
46 | echo -n "====> Adding LB hosts entry in /etc/hosts.${CLUSTER_NAME}: "
47 | echo "$LBIP lb.${CLUSTER_NAME}.${BASE_DOM}" \
48 | "api.${CLUSTER_NAME}.${BASE_DOM}" \
49 | "api-int.${CLUSTER_NAME}.${BASE_DOM}" >> /etc/hosts.${CLUSTER_NAME}; ok
50 |
51 | systemctl $DNS_CMD $DNS_SVC || err "systemctl $DNS_CMD $DNS_SVC failed";
52 |
53 | echo -n "====> Waiting for SSH access on LB VM: "
54 | ssh-keygen -R lb.${CLUSTER_NAME}.${BASE_DOM} &> /dev/null || true
55 | ssh-keygen -R ${LBIP} &> /dev/null || true
56 | while true; do
57 | sleep 1
58 | ssh -i sshkey -o StrictHostKeyChecking=no lb.${CLUSTER_NAME}.${BASE_DOM} true &> /dev/null || continue
59 | break
60 | done
61 | ssh -i sshkey "lb.${CLUSTER_NAME}.${BASE_DOM}" true || err "SSH to lb.${CLUSTER_NAME}.${BASE_DOM} failed"; ok
62 |
63 |
--------------------------------------------------------------------------------
/.install_scripts/create_nodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "############################################"
5 | echo "#### CREATE BOOTSTRAPING RHCOS/OCP NODES ###"
6 | echo "############################################"
7 | echo
8 |
9 | if [ -n "$RHCOS_LIVE" ]; then
10 | RHCOS_I_ARG="coreos.live.rootfs_url"
11 | else
12 | RHCOS_I_ARG="coreos.inst.image_url"
13 | fi
14 |
15 | echo -n "====> Creating Boostrap VM: "
16 | virt-install --name ${CLUSTER_NAME}-bootstrap \
17 | --disk "${VM_DIR}/${CLUSTER_NAME}-bootstrap.qcow2,size=50" --ram ${BTS_MEM} --cpu host --vcpus ${BTS_CPU} \
18 | --os-type linux --os-variant rhel7.0 \
19 | --network network=${VIR_NET},model=virtio --noreboot --noautoconsole \
20 | --location rhcos-install/ \
21 | --extra-args "nomodeset rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda ${RHCOS_I_ARG}=http://${LBIP}:${WS_PORT}/${IMAGE} coreos.inst.ignition_url=http://${LBIP}:${WS_PORT}/bootstrap.ign" > /dev/null || err "Creating boostrap vm failed"; ok
22 |
23 | for i in $(seq 1 ${N_MAST})
24 | do
25 | echo -n "====> Creating Master-${i} VM: "
26 | virt-install --name ${CLUSTER_NAME}-master-${i} \
27 | --disk "${VM_DIR}/${CLUSTER_NAME}-master-${i}.qcow2,size=50" --ram ${MAS_MEM} --cpu host --vcpus ${MAS_CPU} \
28 | --os-type linux --os-variant rhel7.0 \
29 | --network network=${VIR_NET},model=virtio --noreboot --noautoconsole \
30 | --location rhcos-install/ \
31 | --extra-args "nomodeset rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda ${RHCOS_I_ARG}=http://${LBIP}:${WS_PORT}/${IMAGE} coreos.inst.ignition_url=http://${LBIP}:${WS_PORT}/master.ign" > /dev/null || err "Creating master-${i} vm failed "; ok
32 | done
33 |
34 | for i in $(seq 1 ${N_WORK})
35 | do
36 | echo -n "====> Creating Worker-${i} VM: "
37 | virt-install --name ${CLUSTER_NAME}-worker-${i} \
38 | --disk "${VM_DIR}/${CLUSTER_NAME}-worker-${i}.qcow2,size=50" --ram ${WOR_MEM} --cpu host --vcpus ${WOR_CPU} \
39 | --os-type linux --os-variant rhel7.0 \
40 | --network network=${VIR_NET},model=virtio --noreboot --noautoconsole \
41 | --location rhcos-install/ \
42 | --extra-args "nomodeset rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda ${RHCOS_I_ARG}=http://${LBIP}:${WS_PORT}/${IMAGE} coreos.inst.ignition_url=http://${LBIP}:${WS_PORT}/worker.ign" > /dev/null || err "Creating worker-${i} vm failed "; ok
43 | done
44 |
45 | echo "====> Waiting for RHCOS Installation to finish: "
46 | while rvms=$(virsh list --name | grep "${CLUSTER_NAME}-master-\|${CLUSTER_NAME}-worker-\|${CLUSTER_NAME}-bootstrap" 2> /dev/null); do
47 | sleep 15
48 | echo " --> VMs with pending installation: $(echo "$rvms" | tr '\n' ' ')"
49 | done
50 |
51 | echo -n "====> Marking ${CLUSTER_NAME}.${BASE_DOM} as local in dnsmasq: "
52 | echo "local=/${CLUSTER_NAME}.${BASE_DOM}/" >> ${DNS_DIR}/${CLUSTER_NAME}.conf || err "Updating ${DNS_DIR}/${CLUSTER_NAME}.conf failed"; ok
53 |
54 | echo -n "====> Starting Bootstrap VM: "
55 | virsh start ${CLUSTER_NAME}-bootstrap > /dev/null || err "virsh start ${CLUSTER_NAME}-bootstrap failed"; ok
56 |
57 | for i in $(seq 1 ${N_MAST})
58 | do
59 | echo -n "====> Starting Master-${i} VM: "
60 | virsh start ${CLUSTER_NAME}-master-${i} > /dev/null || err "virsh start ${CLUSTER_NAME}-master-${i} failed"; ok
61 | done
62 |
63 | for i in $(seq 1 ${N_WORK})
64 | do
65 | echo -n "====> Starting Worker-${i} VMs: "
66 | virsh start ${CLUSTER_NAME}-worker-${i} > /dev/null || err "virsh start ${CLUSTER_NAME}-worker-${i} failed"; ok
67 | done
68 |
69 | echo -n "====> Waiting for Bootstrap to obtain IP address: "
70 | while true; do
71 | sleep 5
72 | BSIP=$(virsh domifaddr "${CLUSTER_NAME}-bootstrap" | grep ipv4 | head -n1 | awk '{print $4}' | cut -d'/' -f1 2> /dev/null)
73 | test "$?" -eq "0" -a -n "$BSIP" && { echo "$BSIP"; break; }
74 | done
75 | MAC=$(virsh domifaddr "${CLUSTER_NAME}-bootstrap" | grep ipv4 | head -n1 | awk '{print $2}')
76 |
77 | echo -n " ==> Adding DHCP reservation: "
78 | virsh net-update ${VIR_NET} add-last ip-dhcp-host --xml "" --live --config > /dev/null || \
79 | err "Adding DHCP reservation failed"; ok
80 |
81 | echo -n " ==> Adding hosts entry in /etc/hosts.${CLUSTER_NAME}: "
82 | echo "$BSIP bootstrap.${CLUSTER_NAME}.${BASE_DOM}" >> /etc/hosts.${CLUSTER_NAME} || err "failed"; ok
83 |
84 | for i in $(seq 1 ${N_MAST}); do
85 | echo -n "====> Waiting for Master-$i to obtain IP address: "
86 | while true
87 | do
88 | sleep 5
89 | IP=$(virsh domifaddr "${CLUSTER_NAME}-master-${i}" | grep ipv4 | head -n1 | awk '{print $4}' | cut -d'/' -f1 2> /dev/null)
90 | test "$?" -eq "0" -a -n "$IP" && { echo "$IP"; break; }
91 | done
92 | MAC=$(virsh domifaddr "${CLUSTER_NAME}-master-${i}" | grep ipv4 | head -n1 | awk '{print $2}')
93 |
94 | echo -n " ==> Adding DHCP reservation: "
95 | virsh net-update ${VIR_NET} add-last ip-dhcp-host --xml "" --live --config > /dev/null || \
96 | err "Adding DHCP reservation failed"; ok
97 |
98 | echo -n " ==> Adding hosts entry in /etc/hosts.${CLUSTER_NAME}: "
99 | echo "$IP master-${i}.${CLUSTER_NAME}.${BASE_DOM}" \
100 | "etcd-$((i-1)).${CLUSTER_NAME}.${BASE_DOM}" >> /etc/hosts.${CLUSTER_NAME} || err "failed"; ok
101 |
102 | echo -n " ==> Adding SRV record in dnsmasq: "
103 | echo "srv-host=_etcd-server-ssl._tcp.${CLUSTER_NAME}.${BASE_DOM},etcd-$((i-1)).${CLUSTER_NAME}.${BASE_DOM},2380,0,10" >> ${DNS_DIR}/${CLUSTER_NAME}.conf || \
104 | err "failed"; ok
105 | done
106 |
107 | for i in $(seq 1 ${N_WORK}); do
108 | echo -n "====> Waiting for Worker-$i to obtain IP address: "
109 | while true
110 | do
111 | sleep 5
112 | IP=$(virsh domifaddr "${CLUSTER_NAME}-worker-${i}" | grep ipv4 | head -n1 | awk '{print $4}' | cut -d'/' -f1 2> /dev/null)
113 | test "$?" -eq "0" -a -n "$IP" && { echo "$IP"; break; }
114 | done
115 | MAC=$(virsh domifaddr "${CLUSTER_NAME}-worker-${i}" | grep ipv4 | head -n1 | awk '{print $2}')
116 |
117 | echo -n " ==> Adding DHCP reservation: "
118 | virsh net-update ${VIR_NET} add-last ip-dhcp-host --xml "" --live --config > /dev/null || \
119 | err "Adding DHCP reservation failed"; ok
120 |
121 | echo -n " ==> Adding hosts entry in /etc/hosts.${CLUSTER_NAME}: "
122 | echo "$IP worker-${i}.${CLUSTER_NAME}.${BASE_DOM}" >> /etc/hosts.${CLUSTER_NAME} || err "failed"; ok
123 | done
124 |
125 | echo -n '====> Adding wild-card (*.apps) dns record in dnsmasq: '
126 | echo "address=/apps.${CLUSTER_NAME}.${BASE_DOM}/${LBIP}" >> ${DNS_DIR}/${CLUSTER_NAME}.conf || err "failed"; ok
127 |
128 | echo -n "====> Resstarting libvirt and dnsmasq: "
129 | systemctl restart libvirtd || err "systemctl restart libvirtd failed"
130 | systemctl $DNS_CMD $DNS_SVC || err "systemctl $DNS_CMD $DNS_SVC"; ok
131 |
132 |
133 | echo -n "====> Configuring haproxy in LB VM: "
134 | ssh -i sshkey "lb.${CLUSTER_NAME}.${BASE_DOM}" "semanage port -a -t http_port_t -p tcp 6443" || \
135 | err "semanage port -a -t http_port_t -p tcp 6443 failed" && echo -n "."
136 | ssh -i sshkey "lb.${CLUSTER_NAME}.${BASE_DOM}" "semanage port -a -t http_port_t -p tcp 22623" || \
137 | err "semanage port -a -t http_port_t -p tcp 22623 failed" && echo -n "."
138 | ssh -i sshkey "lb.${CLUSTER_NAME}.${BASE_DOM}" "systemctl start haproxy" || \
139 | err "systemctl start haproxy failed" && echo -n "."
140 | ssh -i sshkey "lb.${CLUSTER_NAME}.${BASE_DOM}" "systemctl -q enable haproxy" || \
141 | err "systemctl enable haproxy failed" && echo -n "."
142 | ssh -i sshkey "lb.${CLUSTER_NAME}.${BASE_DOM}" "systemctl -q is-active haproxy" || \
143 | err "haproxy not working as expected" && echo -n "."
144 | ok
145 |
146 |
147 | if [ "$AUTOSTART_VMS" == "yes" ]; then
148 | echo -n "====> Setting VMs to autostart: "
149 | for vm in $(virsh list --all --name --no-autostart | grep "^${CLUSTER_NAME}-"); do
150 | virsh autostart "${vm}" &> /dev/null
151 | echo -n "."
152 | done
153 | ok
154 | fi
155 |
156 |
157 | echo -n "====> Waiting for SSH access on Boostrap VM: "
158 | ssh-keygen -R bootstrap.${CLUSTER_NAME}.${BASE_DOM} &> /dev/null || true
159 | ssh-keygen -R $BSIP &> /dev/null || true
160 | while true; do
161 | sleep 1
162 | ssh -i sshkey -o StrictHostKeyChecking=no core@bootstrap.${CLUSTER_NAME}.${BASE_DOM} true &> /dev/null || continue
163 | break
164 | done
165 | ssh -i sshkey "core@bootstrap.${CLUSTER_NAME}.${BASE_DOM}" true || err "SSH to lb.${CLUSTER_NAME}.${BASE_DOM} failed"; ok
166 |
167 |
--------------------------------------------------------------------------------
/.install_scripts/destroy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "##################"
5 | echo "#### DESTROY ###"
6 | echo "##################"
7 | echo
8 |
9 | if [ -n "$VIR_NET_OCT" -a -z "$VIR_NET" ]; then
10 | VIR_NET="ocp-${VIR_NET_OCT}"
11 | fi
12 |
13 | for vm in $(virsh list --all --name | grep "${CLUSTER_NAME}-lb\|${CLUSTER_NAME}-master-\|${CLUSTER_NAME}-worker-\|${CLUSTER_NAME}-bootstrap"); do
14 | check_if_we_can_continue "Deleting VM $vm"
15 | MAC=$(virsh domiflist "$vm" | grep network | awk '{print $5}')
16 | DHCP_LEASE=$(virsh net-dumpxml ${VIR_NET} | grep ' Deleting DHCP reservation for VM $vm: "
18 | virsh net-update ${VIR_NET} delete ip-dhcp-host --xml "$DHCP_LEASE" --live --config &> /dev/null || \
19 | echo -n "dhcp reservation delete failed (ignoring) ... "
20 | ok
21 | echo -n "XXXX> Deleting VM $vm: "
22 | virsh destroy "$vm" &> /dev/null || echo -n "stopping vm failed (ignoring) ... "
23 | virsh undefine "$vm" --remove-all-storage &> /dev/null || echo -n "deleting vm failed (ignoring) ... "
24 | ok
25 | done
26 |
27 | if [ -n "$VIR_NET_OCT" ]; then
28 | virnet=$(virsh net-uuid "ocp-${VIR_NET_OCT}" 2> /dev/null || true)
29 | if [ -n "$virnet" ]; then
30 | check_if_we_can_continue "Deleting libvirt network ocp-${VIR_NET_OCT}"
31 | echo -n "XXXX> Deleting libvirt network ocp-${VIR_NET_OCT}: "
32 | virsh net-destroy "ocp-${VIR_NET_OCT}" > /dev/null || echo -n "virsh net-destroy ocp-${VIR_NET_OCT} failed (ignoring) ... "
33 | virsh net-undefine "ocp-${VIR_NET_OCT}" > /dev/null || echo -n "virsh net-undefine ocp-${VIR_NET_OCT} failed (ignoring) ... "
34 | ok
35 | fi
36 | fi
37 |
38 | if [ -d "${SETUP_DIR}" ]; then
39 | check_if_we_can_continue "Removing directory (rm -rf) $SETUP_DIR"
40 | echo -n "XXXX> Deleting (rm -rf) directory $SETUP_DIR: "
41 | rm -rf "$SETUP_DIR" || echo -n "Deleting directory failed (ignoring) ... "
42 | ok
43 | fi
44 |
45 | h_rec=$(cat /etc/hosts | grep -v "^#" | grep -q -s "${CLUSTER_NAME}\.${BASE_DOM}$" 2> /dev/null || true)
46 | if [ -n "$h_rec" ]; then
47 | check_if_we_can_continue "Commenting entries in /etc/hosts for ${CLUSTER_NAME}.${BASE_DOM}"
48 | echo -n "XXXX> Commenting entries in /etc/hosts for ${CLUSTER_NAME}.${BASE_DOM}: "
49 | sed -i "s/\(.*\.${CLUSTER_NAME}\.${BASE_DOM}$\)/#\1/" "/etc/hosts" || echo -n "sed failed (ignoring) ... "
50 | ok
51 | fi
52 |
53 | if [ -f "${DNS_DIR}/${CLUSTER_NAME}.conf" ]; then
54 | check_if_we_can_continue "Removing file ${DNS_DIR}/${CLUSTER_NAME}.conf"
55 | echo -n "XXXX> Removing file ${DNS_DIR}/${CLUSTER_NAME}.conf: "
56 | rm -f "${DNS_DIR}/${CLUSTER_NAME}.conf" &> /dev/null || echo -n "removing file failed (ignoring) ... "
57 | ok
58 | fi
59 |
60 | if [ -f "/etc/hosts.${CLUSTER_NAME}" ]; then
61 | check_if_we_can_continue "Removing file /etc/hosts.${CLUSTER_NAME}"
62 | echo -n "XXXX> Removing file /etc/hosts.${CLUSTER_NAME}: "
63 | rm -f "/etc/hosts.${CLUSTER_NAME}" &> /dev/null || echo -n "removing file failed (ignoring) ... "
64 | ok
65 | fi
66 |
--------------------------------------------------------------------------------
/.install_scripts/dns_check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "##################"
5 | echo "#### DNS CHECK ###"
6 | echo "##################"
7 | echo
8 |
9 | reload_dns(){
10 | systemctl $DNS_CMD $DNS_SVC || err "systemctl $DNS_CMD $DNS_SVC failed"; echo -n "."
11 | sleep 5
12 | systemctl restart libvirtd || err "systemctl restart libvirtd failed"; echo -n "."
13 | sleep 5
14 | }
15 |
16 | cleanup() {
17 | rm -f "/etc/hosts.dnstest" "${DNS_DIR}/dnstest.conf" &> /dev/null || \
18 | "Removing files /etc/hosts.dnstest, ${DNS_DIR}/dnstest.conf failed"; echo -n "."
19 | reload_dns
20 | }
21 |
22 | fail() {
23 | echo -n "Failed! Cleaning up: "
24 | cleanup
25 | err "$@" \
26 | "This means that when we created dns records using dnsmasq," \
27 | "they are not being picked up by the system/libvirt" \
28 | "See: https://github.com/kxr/ocp4_setup_upi_kvm/wiki/Setting-Up-DNS"
29 | }
30 |
31 |
32 | echo -n "====> Checking if first entry in /etc/resolv.conf is pointing locally: "
33 | first_ns="$(grep -m1 "^nameserver " /etc/resolv.conf | awk '{print $2}')"
34 | first_ns_oct=$(echo "${first_ns}" | cut -d '.' -f 1)
35 | test "${first_ns_oct}" = "127" || err "First nameserver in /etc/resolv.conf is not pointing locally"
36 | ok
37 |
38 |
39 | echo -n "====> Creating a test host file for dnsmasq /etc/hosts.dnstest: "
40 | echo "1.2.3.4 xxxtestxxx.${BASE_DOM}" > /etc/hosts.dnstest; ok
41 |
42 | echo -n "====> Creating a test dnsmasq config file ${DNS_DIR}/dnstest.conf: "
43 | cat < "${DNS_DIR}/dnstest.conf"
44 | local=/${CLUSTER_NAME}.${BASE_DOM}/
45 | addn-hosts=/etc/hosts.dnstest
46 | address=/test-wild-card.${CLUSTER_NAME}.${BASE_DOM}/5.6.7.8
47 | EOF
48 | ok
49 |
50 |
51 | echo -n "====> Reloading libvirt and dnsmasq: "
52 | reload_dns; ok
53 |
54 | failed=""
55 | for dns_host in ${first_ns} ${LIBVIRT_GWIP} ""; do
56 | echo
57 | dig_dest=""
58 | test -n "${dns_host}" && dig_dest="@${dns_host}"
59 |
60 | echo -n "====> Testing forward dns via $dig_dest: "
61 | fwd_dig=$(dig +short "xxxtestxxx.${BASE_DOM}" ${dig_dest} 2> /dev/null)
62 | test "$?" -eq "0" -a "$fwd_dig" = "1.2.3.4" && ok || { failed="yes"; echo failed; }
63 |
64 | echo -n "====> Testing reverse dns via $dig_dest: "
65 | rev_dig=$(dig +short -x "1.2.3.4" ${dig_dest} 2> /dev/null)
66 | test "$?" -eq "0" -a "$rev_dig" = "xxxtestxxx.${BASE_DOM}." && ok || { failed="yes"; echo failed; }
67 |
68 | echo -n "====> Testing wildcard record via $dig_dest: "
69 | wc_dig=$(dig +short "blah.test-wild-card.${CLUSTER_NAME}.${BASE_DOM}" ${dig_dest} 2> /dev/null)
70 | test "$?" -eq "0" -a "$wc_dig" = "5.6.7.8" && ok || { failed="yes"; echo failed; }
71 | done
72 |
73 | echo
74 |
75 | test -z "${failed}" || fail "One or more DNS tests failed"
76 |
77 |
78 | echo -n "====> All DNS tests passed. Cleaning up: "
79 | cleanup; ok
80 |
--------------------------------------------------------------------------------
/.install_scripts/download_prepare.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "#####################################################"
5 | echo "### DOWNLOAD AND PREPARE OPENSHIFT 4 INSTALLATION ###"
6 | echo "#####################################################"
7 | echo
8 |
9 |
10 |
11 | echo -n "====> Creating and using directory ${SETUP_DIR}: "
12 | mkdir -p ${SETUP_DIR} && cd ${SETUP_DIR} || err "using ${SETUP_DIR} failed"
13 | ok
14 |
15 | echo -n "====> Creating a hosts file for this cluster (/etc/hosts.${CLUSTER_NAME}): "
16 | touch /etc/hosts.${CLUSTER_NAME} || err "Creating /etc/hosts.${CLUSTER_NAME} failed"
17 | ok
18 |
19 | echo -n "====> Creating a dnsmasq conf for this cluster (${DNS_DIR}/${CLUSTER_NAME}.conf): "
20 | echo "addn-hosts=/etc/hosts.${CLUSTER_NAME}" > ${DNS_DIR}/${CLUSTER_NAME}.conf || err "Creating ${DNS_DIR}/${CLUSTER_NAME}.conf failed"
21 | ok
22 |
23 | echo -n "====> SSH key to be injected in all VMs: "
24 | if [ -z "${SSH_PUB_KEY_FILE}" ]; then
25 | ssh-keygen -f sshkey -q -N "" || err "ssh-keygen failed"
26 | export SSH_PUB_KEY_FILE="sshkey.pub"; ok "generated new ssh key"
27 | elif [ -f "${SSH_PUB_KEY_FILE}" ]; then
28 | ok "using existing ${SSH_PUB_KEY_FILE}"
29 | else
30 | err "Unable to select SSH public key!"
31 | fi
32 |
33 |
34 | echo -n "====> Downloading OCP Client: "; download get "$CLIENT" "$CLIENT_URL";
35 | echo -n "====> Downloading OCP Installer: "; download get "$INSTALLER" "$INSTALLER_URL";
36 | tar -xf "${CACHE_DIR}/${CLIENT}" && rm -f README.md
37 | tar -xf "${CACHE_DIR}/${INSTALLER}" && rm -f rm -f README.md
38 |
39 | echo -n "====> Downloading RHCOS Image: "; download get "$IMAGE" "$IMAGE_URL";
40 | echo -n "====> Downloading RHCOS Kernel: "; download get "$KERNEL" "$KERNEL_URL";
41 | echo -n "====> Downloading RHCOS Initramfs: "; download get "$INITRAMFS" "$INITRAMFS_URL";
42 |
43 | mkdir rhcos-install
44 | cp "${CACHE_DIR}/${KERNEL}" "rhcos-install/vmlinuz"
45 | cp "${CACHE_DIR}/${INITRAMFS}" "rhcos-install/initramfs.img"
46 | cat < rhcos-install/.treeinfo
47 | [general]
48 | arch = x86_64
49 | family = Red Hat CoreOS
50 | platforms = x86_64
51 | version = ${OCP_VER}
52 | [images-x86_64]
53 | initrd = initramfs.img
54 | kernel = vmlinuz
55 | EOF
56 |
57 |
58 | mkdir install_dir
59 | cat < install_dir/install-config.yaml
60 | apiVersion: v1
61 | baseDomain: ${BASE_DOM}
62 | compute:
63 | - hyperthreading: Disabled
64 | name: worker
65 | replicas: 0
66 | controlPlane:
67 | hyperthreading: Disabled
68 | name: master
69 | replicas: ${N_MAST}
70 | metadata:
71 | name: ${CLUSTER_NAME}
72 | networking:
73 | clusterNetworks:
74 | - cidr: 10.128.0.0/14
75 | hostPrefix: 23
76 | networkType: OpenShiftSDN
77 | serviceNetwork:
78 | - 172.30.0.0/16
79 | platform:
80 | none: {}
81 | pullSecret: '${PULL_SEC}'
82 | sshKey: '$(cat ${SSH_PUB_KEY_FILE})'
83 | EOF
84 |
85 |
86 | echo "====> Creating ignition configs: "
87 | ./openshift-install create ignition-configs --dir=./install_dir || \
88 | err "./openshift-install create ignition-configs --dir=./install_dir failed"
89 |
90 | WS_PORT="1234"
91 | cat < tmpws.service
92 | [Unit]
93 | After=network.target
94 | [Service]
95 | Type=simple
96 | WorkingDirectory=/opt
97 | ExecStart=/usr/bin/python -m SimpleHTTPServer ${WS_PORT}
98 | [Install]
99 | WantedBy=default.target
100 | EOF
101 |
102 |
103 |
104 | echo "
105 | global
106 | log 127.0.0.1 local2
107 | chroot /var/lib/haproxy
108 | pidfile /var/run/haproxy.pid
109 | maxconn 4000
110 | user haproxy
111 | group haproxy
112 | daemon
113 | stats socket /var/lib/haproxy/stats
114 |
115 | defaults
116 | mode tcp
117 | log global
118 | option tcplog
119 | option dontlognull
120 | option redispatch
121 | retries 3
122 | timeout queue 1m
123 | timeout connect 10s
124 | timeout client 1m
125 | timeout server 1m
126 | timeout check 10s
127 | maxconn 3000
128 | # 6443 points to control plan
129 | frontend ${CLUSTER_NAME}-api *:6443
130 | default_backend master-api
131 | backend master-api
132 | balance source
133 | server bootstrap bootstrap.${CLUSTER_NAME}.${BASE_DOM}:6443 check" > haproxy.cfg
134 | for i in $(seq 1 ${N_MAST})
135 | do
136 | echo " server master-${i} master-${i}.${CLUSTER_NAME}.${BASE_DOM}:6443 check" >> haproxy.cfg
137 | done
138 | echo "
139 |
140 | # 22623 points to control plane
141 | frontend ${CLUSTER_NAME}-mapi *:22623
142 | default_backend master-mapi
143 | backend master-mapi
144 | balance source
145 | server bootstrap bootstrap.${CLUSTER_NAME}.${BASE_DOM}:22623 check" >> haproxy.cfg
146 | for i in $(seq 1 ${N_MAST})
147 | do
148 | echo " server master-${i} master-${i}.${CLUSTER_NAME}.${BASE_DOM}:22623 check" >> haproxy.cfg
149 | done
150 | echo "
151 | # 80 points to master nodes
152 | frontend ${CLUSTER_NAME}-http *:80
153 | default_backend ingress-http
154 | backend ingress-http
155 | balance source" >> haproxy.cfg
156 | for i in $(seq 1 ${N_MAST})
157 | do
158 | echo " server master-${i} master-${i}.${CLUSTER_NAME}.${BASE_DOM}:80 check" >> haproxy.cfg
159 | done
160 | echo "
161 | # 443 points to master nodes
162 | frontend ${CLUSTER_NAME}-https *:443
163 | default_backend infra-https
164 | backend infra-https
165 | balance source" >> haproxy.cfg
166 | for i in $(seq 1 ${N_MAST})
167 | do
168 | echo " server master-${i} master-${i}.${CLUSTER_NAME}.${BASE_DOM}:443 check" >> haproxy.cfg
169 | done
170 |
171 |
--------------------------------------------------------------------------------
/.install_scripts/libvirt_network.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "#######################"
5 | echo "### LIBVIRT NETWORK ###"
6 | echo "#######################"
7 | echo
8 |
9 | echo -n "====> Checking libvirt network: "
10 | if [ -n "$VIR_NET_OCT" ]; then
11 | virsh net-uuid "ocp-${VIR_NET_OCT}" &> /dev/null && \
12 | { export VIR_NET="ocp-${VIR_NET_OCT}"
13 | ok "re-using ocp-${VIR_NET_OCT}"
14 | unset VIR_NET_OCT
15 | } || \
16 | {
17 | ok "will create ocp-${VIR_NET_OCT} (192.168.${VIR_NET_OCT}.0/24)"
18 | }
19 | elif [ -n "$VIR_NET" ]; then
20 | virsh net-uuid "${VIR_NET}" &> /dev/null || \
21 | err "${VIR_NET} doesn't exist"
22 | ok "using $VIR_NET"
23 | else
24 | err "Sorry, unhandled situation. Exiting"
25 | fi
26 |
27 |
28 | if [ -n "$VIR_NET_OCT" ]; then
29 | echo -n "====> Creating a new libvirt network ocp-${VIR_NET_OCT}: "
30 |
31 | cat < /tmp/new-net.xml
32 |
33 | ocp-${VIR_NET_OCT}
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | EOF
43 |
44 | virsh net-define /tmp/new-net.xml > /dev/null || err "virsh net-define /tmp/new-net.xml failed"
45 | virsh net-autostart ocp-${VIR_NET_OCT} > /dev/null || err "virsh net-autostart ocp-${VIR_NET_OCT} failed"
46 | virsh net-start ocp-${VIR_NET_OCT} > /dev/null || err "virsh net-start ocp-${VIR_NET_OCT} failed"
47 | systemctl restart libvirtd > /dev/null || err "systemctl restart libvirtd failed"
48 | echo "ocp-${VIR_NET_OCT} created"
49 | export VIR_NET="ocp-${VIR_NET_OCT}"
50 | fi
51 |
52 |
53 | export LIBVIRT_BRIDGE=$(virsh net-info ${VIR_NET} | grep "^Bridge:" | awk '{print $2}')
54 | export LIBVIRT_GWIP=$(ip -f inet addr show ${LIBVIRT_BRIDGE} | awk '/inet / {print $2}' | cut -d '/' -f1)
55 |
--------------------------------------------------------------------------------
/.install_scripts/post.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cat < env
4 | # OCP4 Automated Install using https://github.com/kxr/ocp4_setup_upi_kvm
5 | # Script location: ${SDIR}
6 | # Script invoked with: ${SINV}
7 | # OpenShift version: ${OCP_NORMALIZED_VER}
8 | # Red Hat CoreOS version: ${RHCOS_NORMALIZED_VER}
9 | #
10 | # Script start time: $(date -d @${START_TS})
11 | # Script end time: $(date -d @${END_TS})
12 | # Script finished in: ${TIME_TAKEN} minutes
13 | #
14 | # VARS:
15 |
16 | export SDIR="${SDIR}"
17 | export SETUP_DIR="${SETUP_DIR}"
18 | export DNS_DIR="${DNS_DIR}"
19 | export VM_DIR="${VM_DIR}"
20 | export KUBECONFIG="${SETUP_DIR}/install_dir/auth/kubeconfig"
21 |
22 | export CLUSTER_NAME="${CLUSTER_NAME}"
23 | export BASE_DOM="${BASE_DOM}"
24 |
25 | export LBIP="${LBIP}"
26 | export WS_PORT="${WS_PORT}"
27 | export IMAGE="${IMAGE}"
28 | export RHCOS_LIVE="${RHCOS_LIVE}"
29 |
30 | export VIR_NET="${VIR_NET}"
31 | export DNS_CMD="${DNS_CMD}"
32 | export DNS_SVC="${DNS_SVC}"
33 |
34 | EOF
35 |
36 | cp ${SDIR}/.post_scripts/*.sh ${SETUP_DIR}/
37 |
38 |
--------------------------------------------------------------------------------
/.install_scripts/process_args.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | while [[ $# -gt 0 ]]
4 | do
5 | key="$1"
6 | case $key in
7 | -O|--ocp-version)
8 | export OCP_VERSION="$2"
9 | shift
10 | shift
11 | ;;
12 | -R|--rhcos-version)
13 | export RHCOS_VERSION="$2"
14 | shift
15 | shift
16 | ;;
17 | -m|--masters)
18 | test "$2" -gt "0" &>/dev/null || err "Invalid masters: $N_MAST"
19 | export N_MAST="$2"
20 | shift
21 | shift
22 | ;;
23 | -w|--workers)
24 | test "$2" -ge "0" &> /dev/null || err "Invalid workers: $N_WORK"
25 | export N_WORK="$2"
26 | shift
27 | shift
28 | ;;
29 | -p|--pull-secret)
30 | export PULL_SEC_F="$2"
31 | shift
32 | shift
33 | ;;
34 | -n|--libvirt-network)
35 | export VIR_NET="$2"
36 | shift
37 | shift
38 | ;;
39 | -N|--libvirt-oct)
40 | test "$2" -gt "0" -a "$2" -lt "255" || err "Invalid subnet octet $VIR_NET_OCT"
41 | export VIR_NET_OCT="$2"
42 | shift
43 | shift
44 | ;;
45 | -c|--cluster-name)
46 | export CLUSTER_NAME="$2"
47 | shift
48 | shift
49 | ;;
50 | -d|--cluster-domain)
51 | export BASE_DOM="$2"
52 | shift
53 | shift
54 | ;;
55 | -v|--vm-dir)
56 | export VM_DIR="$2"
57 | shift
58 | shift
59 | ;;
60 | -z|--dns-dir)
61 | export DNS_DIR="$2"
62 | shift
63 | shift
64 | ;;
65 | -s|--setup-dir)
66 | export SETUP_DIR="$2"
67 | shift
68 | shift
69 | ;;
70 | -x|--cache-dir)
71 | export CACHE_DIR="$2"
72 | shift
73 | shift
74 | ;;
75 | --master-cpu)
76 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --master-cpu"
77 | export MAS_CPU="$2"
78 | shift
79 | shift
80 | ;;
81 | --master-mem)
82 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --master-mem"
83 | export MAS_MEM="$2"
84 | shift
85 | shift
86 | ;;
87 | --worker-cpu)
88 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --worker-cpu"
89 | export WOR_CPU="$2"
90 | shift
91 | shift
92 | ;;
93 | --worker-mem)
94 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --worker-mem"
95 | export WOR_MEM="$2"
96 | shift
97 | shift
98 | ;;
99 | --bootstrap-cpu)
100 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --bootstrap-cpu"
101 | export BTS_CPU="$2"
102 | shift
103 | shift
104 | ;;
105 | --bootstrap-mem)
106 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --bootstrap-mem"
107 | export BTS_MEM="$2"
108 | shift
109 | shift
110 | ;;
111 | --lb-cpu)
112 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --lb-cpu"
113 | export LB_CPU="$2"
114 | shift
115 | shift
116 | ;;
117 | --lb-mem)
118 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --lb-mem"
119 | export LB_MEM="$2"
120 | shift
121 | shift
122 | ;;
123 | --ssh-pub-key-file)
124 | test -f "$2" || err "SSH public key file not found: ${2}"
125 | export SSH_PUB_KEY_FILE="$2"
126 | shift
127 | shift
128 | ;;
129 | -X|--fresh-download)
130 | export FRESH_DOWN="yes"
131 | shift
132 | ;;
133 | -k|--keep-bootstrap)
134 | export KEEP_BS="yes"
135 | shift
136 | ;;
137 | --autostart-vms)
138 | export AUTOSTART_VMS="yes"
139 | shift
140 | ;;
141 | --no-autostart-vms)
142 | export AUTOSTART_VMS="no"
143 | shift
144 | ;;
145 | --destroy)
146 | export DESTROY="yes"
147 | shift
148 | ;;
149 | -y|--yes)
150 | export YES="yes"
151 | shift
152 | ;;
153 | -h|--help)
154 | source ${SDIR}/.install_scripts/show_help.sh
155 | exit 0
156 | shift
157 | ;;
158 | *)
159 | echo "ERROR: Invalid argument $key"
160 | exit 1
161 | ;;
162 | esac
163 | done
164 |
165 | test -z "${SETUP_DIR}" && export SETUP_DIR="/root/ocp4_cluster_${CLUSTER_NAME}" || true
166 |
167 | test -n "$VIR_NET" -a -n "$VIR_NET_OCT" && err "Specify either -n or -N" || true
168 | test -z "$VIR_NET" -a -z "$VIR_NET_OCT" && export VIR_NET="${DEF_LIBVIRT_NET}" || true
169 |
--------------------------------------------------------------------------------
/.install_scripts/sanity_check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "####################################"
5 | echo "### DEPENDENCIES & SANITY CHECKS ###"
6 | echo "####################################"
7 | echo
8 |
9 |
10 | echo -n "====> Checking if we have all the dependencies: "
11 | for x in virsh virt-install virt-customize systemctl dig wget
12 | do
13 | builtin type -P $x &> /dev/null || err "executable $x not found"
14 | done
15 | test -n "$(find /usr -type f -name libvirt_driver_network.so 2> /dev/null)" || \
16 | err "libvirt_driver_network.so not found"
17 | ok
18 |
19 | echo -n "====> Checking if the script/working directory already exists: "
20 | test -d "${SETUP_DIR}" && \
21 | err "Directory ${SETUP_DIR} already exists" \
22 | "" \
23 | "You can use --destroy to remove your existing installation" \
24 | "You can also use --setup-dir to specify a different directory for this installation"
25 | ok
26 |
27 | echo -n "====> Checking for pull-secret (${PULL_SEC_F}): "
28 | test -f "${PULL_SEC_F}" \
29 | && export PULL_SEC=$(cat ${PULL_SEC_F}) \
30 | || err "Pull secret not found." \
31 | "Please specify the pull secret file using -p or --pull-secret"
32 | ok
33 |
34 | echo -n "====> Checking if libvirt is running or enabled: "
35 | systemctl -q is-active libvirtd || systemctl -q is-enabled libvirtd || err "libvirtd is not running nor enabled"
36 | ok
37 |
38 | echo -n "====> Checking if we have any existing leftover VMs: "
39 | existing=$(virsh list --all --name | grep -m1 "${CLUSTER_NAME}-lb\|${CLUSTER_NAME}-master-\|${CLUSTER_NAME}-worker-\|${CLUSTER_NAME}-bootstrap") || true
40 | test -z "$existing" || err "Found existing VM: $existing"
41 | ok
42 |
43 | echo -n "====> Checking if DNS service (dnsmasq or NetworkManager) is active: "
44 | test -d "/etc/NetworkManager/dnsmasq.d" -o -d "/etc/dnsmasq.d" || err "No dnsmasq found"
45 | if [ "${DNS_DIR}" == "/etc/NetworkManager/dnsmasq.d" ]
46 | then
47 | test -d "/etc/NetworkManager/dnsmasq.d" || err "/etc/NetworkManager/dnsmasq.d not found"
48 | DNS_SVC="NetworkManager"; DNS_CMD="reload";
49 | elif [ "${DNS_DIR}" == "/etc/dnsmasq.d" ]
50 | then
51 | test -d "/etc/dnsmasq.d" || err "/etc/dnsmasq.d not found"
52 | DNS_SVC="dnsmasq"; DNS_CMD="restart";
53 | else
54 | err "DNS_DIR (-z|--dns-dir), should be either /etc/dnsmasq.d or /etc/NetworkManager/dnsmasq.d"
55 | fi
56 | systemctl -q is-active $DNS_SVC || err "DNS_DIR points to $DNS_DIR but $DNS_SVC is not active"
57 | ok "${DNS_SVC}"
58 |
59 | if [ "${DNS_SVC}" == "NetworkManager" ]; then
60 | echo -n "====> Checking if dnsmasq is enabled in NetworkManager: "
61 | find /etc/NetworkManager/ -name *.conf -exec cat {} \; | grep -v "^#" | grep dnsmasq &> /dev/null \
62 | || err "DNS Directory is set to NetworkManager but dnsmasq is not enabled in NetworkManager" \
63 | "See: https://github.com/kxr/ocp4_setup_upi_kvm/wiki/Setting-Up-DNS"
64 | ok
65 | fi
66 |
67 | echo -n "====> Testing dnsmasq reload (systemctl ${DNS_CMD} ${DNS_SVC}): "
68 | systemctl $DNS_CMD $DNS_SVC || err "systemctl ${DNS_CMD} ${DNS_SVC} failed"
69 | ok
70 |
71 | echo -n "====> Testing libvirtd restart (systemctl restart libvirtd): "
72 | systemctl restart libvirtd || err "systemctl restart libvirtd failed"
73 | ok
74 |
75 | echo -n "====> Checking for any leftover dnsmasq config: "
76 | test -f "${DNS_DIR}/${CLUSTER_NAME}.conf" && err "Existing dnsmasq config file found: ${DNS_DIR}/${CLUSTER_NAME}.conf"
77 | ok
78 |
79 | echo -n "====> Checking for any leftover hosts file: "
80 | test -f "/etc/hosts.${CLUSTER_NAME}" && err "Existing hosts file found: /etc/hosts.${CLUSTER_NAME}"
81 | ok
82 |
83 | echo -n "====> Checking for any leftover/conflicting dns records: "
84 | for h in api api-int bootstrap master-1 master-2 master-3 etcd-0 etcd-1 etcd-2 worker-1 worker-2 test.apps; do
85 | res=$(dig +short "${h}.${CLUSTER_NAME}.${BASE_DOM}" @127.0.0.1) || err "Failed dig @127.0.0.1"
86 | test -z "${res}" || err "Found existing dns record for ${h}.${CLUSTER_NAME}.${BASE_DOM}: ${res}"
87 | done
88 | existing=$(cat /etc/hosts | grep -v "^#" | grep -w -m1 "${CLUSTER_NAME}\.${BASE_DOM}") || true
89 | test -z "$existing" || err "Found existing /etc/hosts records" "$existing"
90 | ok
91 |
92 |
--------------------------------------------------------------------------------
/.install_scripts/show_help.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo COLS=$COLUMNS
3 | cat <" || echo "")
118 |
119 | -k, --keep-bootstrap
120 | Flag to keep the bootstrap VM after the bootstrapping is completed.
121 | Set this if you want to keep the bootstrap VM post cluster installation. By default the script removes the bootstrap VM once the bootstraping is finished
122 | Default: $(test "${KEEP_BS}" = "yes" && echo "" || echo "")
123 |
124 | --autostart-vms
125 | Flag to set the cluster VMs to auto-start on reboot.
126 | Default: $(test "${AUTOSTART_VMS}" = "yes" && echo "" || echo "")
127 |
128 | -y, --yes
129 | Flag to assume yes/continue to all questions/checks.
130 | Set this for the script to be non-interactive and continue with out asking for confirmation
131 | Default: $(test "${YES}" = "yes" && echo "" || echo "")
132 |
133 | --destroy
134 | Flat to un-install/destroy the cluster.
135 | Set this if you want the script to destroy everything it created.
136 | Use this option with the same options you used to install the cluster.
137 | Be carefull this deletes the setup directory, VMs, DNS entries and also the libvirt network (if created by the script using -N)
138 | Default: $(test "${DESTROY}" = "yes" && echo "" || echo "")
139 |
140 | Note: The default values for all these options can be changed in the .defaults.sh file.
141 |
142 | Examples:
143 |
144 | # Deploy OpenShift 4.3.12 cluster
145 | ./ocp4_setup_upi_kvm.sh --ocp-version 4.3.12
146 |
147 | # Deploy OpenShift 4.3.12 cluster with RHCOS 4.3.0
148 | ./ocp4_setup_upi_kvm.sh --ocp-version 4.3.12 --rhcos-version 4.3.0
149 |
150 | # Deploy latest OpenShift version with pull secret from a custom location
151 | ./ocp4_setup_upi_kvm.sh --pull-secret /home/knaeem/Downloads/pull-secret --ocp-version latest
152 |
153 | # Deploy OpenShift 4.2.latest with custom cluster name and domain
154 | ./ocp4_setup_upi_kvm.sh --cluster-name ocp43 --cluster-domain lab.test.com --ocp-version 4.2.latest
155 |
156 | # Deploy OpenShift 4.2.stable on new libvirt network (192.168.155.0/24)
157 | ./ocp4_setup_upi_kvm.sh --ocp-version 4.2.stable --libvirt-oct 155
158 |
159 | # Destory the already installed cluster
160 | ./ocp4_setup_upi_kvm.sh --cluster-name ocp43 --cluster-domain lab.test.com --destroy
161 |
162 | EOF
163 |
--------------------------------------------------------------------------------
/.install_scripts/utils.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | err() {
4 | echo; echo;
5 | echo -e "\e[97m\e[101m[ERROR]\e[0m ${1}"; shift; echo;
6 | while [[ $# -gt 0 ]]; do
7 | echo " ${1}"
8 | shift
9 | done
10 | echo; exit 1;
11 | }
12 |
13 | ok() {
14 | test -z "$1" && echo " ok" || echo " ${1}"
15 | }
16 |
17 | check_if_we_can_continue() {
18 | if [ "${YES}" != "yes" ]; then
19 | echo; echo;
20 | while [[ $# -gt 0 ]]; do
21 | echo "[NOTE] ${1}"
22 | shift
23 | done
24 | echo -n "Press [Enter] to continue, [Ctrl]+C to abort: "; read userinput;
25 | fi
26 | }
27 |
28 | download() {
29 | # download [check|get] filename url
30 | test -n "${1}" && cmd="${1}" || err "Invalid download ${0} ${@}"
31 | test -n "${2}" && file="${2}" || err "Invalid download ${0} ${@}"
32 | test -n "${3}" && url="${3}" || err "Invalid download ${0} ${@}"
33 |
34 | mkdir -p "${CACHE_DIR}"
35 |
36 | if [ "${cmd}" == "check" ]
37 | then
38 | if [ -f "${CACHE_DIR}/${file}" ]; then
39 | echo "(reusing cached file ${file})"
40 | else
41 | timeout 10 curl -qs --head --fail "${url}" &> /dev/null && ok || err "${url} not reachable"
42 | fi
43 | elif [ "${cmd}" == "get" ]
44 | then
45 | if [ "${FRESH_DOWN}" == "yes" -a -f "${CACHE_DIR}/${file}" ]; then
46 | rm -f "${CACHE_DIR}/${file}" || err "Error removing ${CACHE_DIR}/${file}"
47 | fi
48 | if [ -f "${CACHE_DIR}/${file}" ]; then
49 | echo "(reusing cached file ${file})"
50 | else
51 | echo
52 | wget ${url} -O "${CACHE_DIR}/${file}.part" && mv "${CACHE_DIR}/${file}.part" "${CACHE_DIR}/${file}"
53 | test -f "${CACHE_DIR}/${file}" || err "Error dowloading ${file} from ${url}"
54 | fi
55 | else
56 | err "Invalid download ${0} ${@}"
57 | fi
58 | }
59 |
--------------------------------------------------------------------------------
/.install_scripts/version_check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo
4 | echo "##########################################"
5 | echo "### OPENSHIFT/RHCOS VERSION/URL CHECK ###"
6 | echo "##########################################"
7 | echo
8 |
9 | # OCP4 INSTALL AND CLIENT FILES
10 |
11 | if [ "$OCP_VERSION" == "latest" -o "$OCP_VERSION" == "stable" ]; then
12 | urldir="$OCP_VERSION"
13 | else
14 | test "$(echo $OCP_VERSION | cut -d '.' -f1)" = "4" || err "Invalid OpenShift version $OCP_VERSION"
15 | OCP_VER=$(echo "$OCP_VERSION" | cut -d '.' -f1-2)
16 | OCP_MINOR=$(echo "$OCP_VERSION" | cut -d '.' -f3-)
17 | test -z "$OCP_MINOR" && OCP_MINOR="stable"
18 | if [ "$OCP_MINOR" == "latest" -o "$OCP_MINOR" == "stable" ]
19 | then
20 | urldir="${OCP_MINOR}-${OCP_VER}"
21 | else
22 | urldir="${OCP_VER}.${OCP_MINOR}"
23 | fi
24 | fi
25 | echo -n "====> Looking up OCP4 client for release $urldir: "
26 | CLIENT=$(curl -N --fail -qs "${OCP_MIRROR}/${urldir}/" | grep -m1 "client-linux" | sed 's/.*href="\(openshift-.*\)">.*/\1/')
27 | test -n "$CLIENT" || err "No client found in ${OCP_MIRROR}/${urldir}/"; ok "$CLIENT"
28 | CLIENT_URL="${OCP_MIRROR}/${urldir}/${CLIENT}"
29 | echo -n "====> Checking if Client URL is downloadable: "; download check "$CLIENT" "$CLIENT_URL";
30 |
31 | echo -n "====> Looking up OCP4 installer for release $urldir: "
32 | INSTALLER=$(curl -N --fail -qs "${OCP_MIRROR}/${urldir}/" | grep -m1 "install-linux" | sed 's/.*href="\(openshift-.*\)">.*/\1/')
33 | test -n "$INSTALLER" || err "No installer found in ${OCP_MIRROR}/${urldir}/"; ok "$INSTALLER"
34 | INSTALLER_URL="${OCP_MIRROR}/${urldir}/${INSTALLER}"
35 | echo -n "====> Checking if Installer URL is downloadable: "; download check "$INSTALLER" "$INSTALLER_URL";
36 |
37 | OCP_NORMALIZED_VER=$(echo "${INSTALLER}" | sed 's/.*-\(4\..*\)\.tar.*/\1/' )
38 |
39 | # RHCOS KERNEL, INITRAMFS AND IMAGE FILES
40 |
41 | if [ -z "$RHCOS_VERSION" ]; then
42 | RHCOS_VER=$(echo "${OCP_NORMALIZED_VER}" | cut -d '.' -f1-2 )
43 | RHCOS_MINOR="latest"
44 | else
45 | RHCOS_VER=$(echo "$RHCOS_VERSION" | cut -d '.' -f1-2)
46 | RHCOS_MINOR=$(echo "$RHCOS_VERSION" | cut -d '.' -f3)
47 | test -z "$RHCOS_MINOR" && RHCOS_MINOR="latest"
48 | fi
49 |
50 | if [ "$RHCOS_MINOR" == "latest" ]
51 | then
52 | urldir="$RHCOS_MINOR"
53 | else
54 | urldir="${RHCOS_VER}.${RHCOS_MINOR}"
55 | fi
56 |
57 | echo -n "====> Looking up RHCOS kernel for release $RHCOS_VER/$urldir: "
58 | KERNEL=$(curl -N --fail -qs "${RHCOS_MIRROR}/${RHCOS_VER}/${urldir}/" | grep -m1 "installer-kernel\|live-kernel" | sed 's/.*href="\(rhcos-.*\)">.*/\1/')
59 | test -n "$KERNEL" || err "No kernel found in ${RHCOS_MIRROR}/${RHCOS_VER}/${urldir}/"; ok "$KERNEL"
60 | KERNEL_URL="${RHCOS_MIRROR}/${RHCOS_VER}/${urldir}/${KERNEL}"
61 | echo -n "====> Checking if Kernel URL is downloadable: "; download check "$KERNEL" "$KERNEL_URL";
62 |
63 | echo -n "====> Looking up RHCOS initramfs for release $RHCOS_VER/$urldir: "
64 | INITRAMFS=$(curl -N --fail -qs ${RHCOS_MIRROR}/${RHCOS_VER}/${urldir}/ | grep -m1 "installer-initramfs\|live-initramfs" | sed 's/.*href="\(rhcos-.*\)">.*/\1/')
65 | test -n "$INITRAMFS" || err "No initramfs found in ${RHCOS_MIRROR}/${RHCOS_VER}/${urldir}/"; ok "$INITRAMFS"
66 | INITRAMFS_URL="$RHCOS_MIRROR/${RHCOS_VER}/${urldir}/${INITRAMFS}"
67 | echo -n "====> Checking if Initramfs URL is downloadable: "; download check "$INITRAMFS" "$INITRAMFS_URL";
68 |
69 | # Handling case of rhcos "live" (rhcos >= 4.6)
70 | if [[ "$KERNEL" =~ "live" && "$INITRAMFS" =~ "live" ]]; then
71 | RHCOS_LIVE="yes"
72 | elif [[ "$KERNEL" =~ "installer" && "$INITRAMFS" =~ "installer" ]]; then
73 | RHCOS_LIVE=""
74 | else
75 | err "Sorry, unhandled situation. Exiting"
76 | fi
77 |
78 | echo -n "====> Looking up RHCOS image for release $RHCOS_VER/$urldir: "
79 | if [ -n "$RHCOS_LIVE" ]; then
80 | IMAGE=$(curl -N --fail -qs ${RHCOS_MIRROR}/${RHCOS_VER}/${urldir}/ | grep -m1 "live-rootfs" | sed 's/.*href="\(rhcos-.*.img\)".*/\1/')
81 | else
82 | IMAGE=$(curl -N --fail -qs ${RHCOS_MIRROR}/${RHCOS_VER}/${urldir}/ | grep -m1 "metal" | sed 's/.*href="\(rhcos-.*.raw.gz\)".*/\1/')
83 | fi
84 | test -n "$IMAGE" || err "No image found in ${RHCOS_MIRROR}/${RHCOS_VER}/${urldir}/"; ok "$IMAGE"
85 | IMAGE_URL="$RHCOS_MIRROR/${RHCOS_VER}/${urldir}/${IMAGE}"
86 | echo -n "====> Checking if Image URL is downloadable: "; download check "$IMAGE" "$IMAGE_URL";
87 |
88 | RHCOS_NORMALIZED_VER=$(echo "${IMAGE}" | sed 's/.*-\(4\..*\)-x86.*/\1/')
89 |
90 | # CENTOS CLOUD IMAGE
91 | LB_IMG="${LB_IMG_URL##*/}"
92 | echo -n "====> Checking if Centos cloud image URL is downloadable: "; download check "$LB_IMG" "$LB_IMG_URL";
93 |
94 |
95 | echo
96 | echo
97 | echo " Red Hat OpenShift Version = $OCP_NORMALIZED_VER"
98 | echo
99 | echo " Red Hat CoreOS Version = $RHCOS_NORMALIZED_VER"
100 |
101 | check_if_we_can_continue
102 |
103 |
--------------------------------------------------------------------------------
/.post_scripts/add_node.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # https://github.com/kxr/ocp4_setup_upi_kvm
3 | set -e
4 |
5 | show_help() {
6 | echo
7 | echo "Usage: ${0} [OPTIONS]"
8 | echo
9 | cat << EOF | column -L -t -s '|' -N OPTION,DESCRIPTION -W DESCRIPTION
10 |
11 | --name NAME|The node name without the domain.
12 | |For example: If you specify storage-1, and your cluster name is "ocp4" and base domain is "local", the new node would be "storage-1.ocp4.local"
13 | |Default:
14 |
15 | -c, --cpu N|Number of CPUs to be attached to this node's VM.
16 | |Default: 2
17 |
18 | -m, --memory SIZE|Amount of Memory to be attached to this node's VM. Size in MB.
19 | |Default: 4096
20 |
21 | -a, --add-disk SIZE|You can add additional disks to this node. Size in GB.
22 | |This option can be specified multiple times. Disks are added in order for example if you specify "--add-disk 10 --add-disk 100", two disks will be added (on top of the OS disk vda) first of 10GB (/dev/vdb) and second disk of 100GB (/dev/vdc)
23 | |Default:
24 |
25 | -v, --vm-dir|The location where you want to store the VM Disks
26 | |By default the location used by the cluster VMs will be used.
27 |
28 | -N, --libvirt-oct OCTET|You can specify a 192.168.{OCTET}.0 subnet octet and this script will create a new libvirt network for this node.
29 | |The network will be named ocp-{OCTET}. If the libvirt network ocp-{OCTET} already exists, it will be used.
30 | |This can be useful if you want to add a node in different network than the one used by the cluster.
31 | |Default:
32 |
33 | -n, --libvirt-network NETWORK|The libvirt network to use. Select this option if you want to use an existing libvirt network.
34 | |By default the existing libvirt network used by the cluster will be used.
35 |
36 | EOF
37 |
38 | }
39 |
40 | err() {
41 | echo; echo;
42 | echo -e "\e[97m\e[101m[ERROR]\e[0m ${1}"; shift; echo;
43 | while [[ $# -gt 0 ]]; do echo " $1"; shift; done
44 | echo; exit 1;
45 | }
46 | ok() {
47 | test -z "$1" && echo "ok" || echo "$1"
48 | }
49 |
50 | SDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
51 | source ${SDIR}/env || err "${SDIR}/env not found."
52 |
53 |
54 | # Process Arguments
55 | while [[ $# -gt 0 ]]
56 | do
57 | key="$1"
58 | case $key in
59 | --name)
60 | NODE="$2"
61 | shift
62 | shift
63 | ;;
64 | -c|--cpu)
65 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --cpu"
66 | CPU="$2"
67 | shift
68 | shift
69 | ;;
70 | -m|--memory)
71 | test "$2" -gt "0" &>/dev/null || err "Invalid value $2 for --memory"
72 | MEM="$2"
73 | shift
74 | shift
75 | ;;
76 | -a|--add-disk)
77 | test "$2" -gt "0" &>/dev/null || err "Invalid disk size. Enter size in GB";
78 | ADD_DISK="${ADD_DISK} --disk ${VM_DIR}/${CLUSTER_NAME}-${NODE}-${2}GB-$(shuf -zer -n 5 {a..z}|tr -d '\0').qcow2,size=${2}"
79 | shift
80 | shift
81 | ;;
82 | -N|--libvirt-oct)
83 | VIR_NET_OCT="$2"
84 | test "$VIR_NET_OCT" -gt "0" -a "$VIR_NET_OCT" -lt "255" || err "Invalid subnet octet $VIR_NET_OCT"
85 | shift
86 | shift
87 | ;;
88 | -n|--libvirt-network)
89 | VIR_NET="$2"
90 | shift
91 | shift
92 | ;;
93 | -v|--vm-dir)
94 | VM_DIR="$2"
95 | shift
96 | shift
97 | ;;
98 | -h|--help)
99 | show_help
100 | exit
101 | ;;
102 | *)
103 | echo "ERROR: Invalid argument $key"
104 | exit 1
105 | esac
106 | done
107 |
108 | test -z "$NODE" && err "Please specify the node name using --name " \
109 | "see --help for more details"
110 | test -z "$CPU" && CPU="2"
111 | test -z "$MEM" && MEM="4096"
112 |
113 | # Checking if we are root
114 | test "$(whoami)" = "root" || err "Not running as root"
115 |
116 | echo -n "====> Checking if libvirt is running: "
117 | systemctl -q is-active libvirtd || err "libvirtd is not running"; ok
118 |
119 | echo -n "====> Checking libvirt network: "
120 | if [ -n "$VIR_NET_OCT" ]; then
121 | virsh net-uuid "ocp-${VIR_NET_OCT}" &> /dev/null
122 | if [ "$?" -eq "0" ]; then
123 | VIR_NET="ocp-${VIR_NET_OCT}"
124 | ok "re-using ocp-${VIR_NET_OCT}"
125 | unset VIR_NET_OCT
126 | else
127 | ok "will create ocp-${VIR_NET_OCT} (192.168.${VIR_NET_OCT}.0/24)"
128 | fi
129 | elif [ -n "$VIR_NET" ]; then
130 | virsh net-uuid "${VIR_NET}" &> /dev/null || \
131 | err "${VIR_NET} doesn't exist"
132 | ok "using $VIR_NET"
133 | else
134 | err "Sorry, unhandled situation. Exiting"
135 | fi
136 |
137 | if [ -n "$VIR_NET_OCT" ]; then
138 | echo -n "====> Creating libvirt network ocp-${VIR_NET_OCT} "
139 | /usr/bin/cp /usr/share/libvirt/networks/default.xml /tmp/new-net.xml > /dev/null || err "Network creation failed"
140 | sed -i "s/default/ocp-${VIR_NET_OCT}/" /tmp/new-net.xml
141 | sed -i "s/virbr0/ocp-${VIR_NET_OCT}/" /tmp/new-net.xml
142 | sed -i "s/122/${VIR_NET_OCT}/g" /tmp/new-net.xml
143 | virsh net-define /tmp/new-net.xml > /dev/null || err "virsh net-define failed"
144 | virsh net-autostart ocp-${VIR_NET_OCT} > /dev/null || err "virsh net-autostart failed"
145 | virsh net-start ocp-${VIR_NET_OCT} > /dev/null || err "virsh net-start failed"
146 | systemctl restart libvirtd > /dev/null || err "systemctl restart libvirtd failed"
147 | echo "ocp-${VIR_NET_OCT} created"
148 | VIR_NET="ocp-${VIR_NET_OCT}"
149 | fi
150 |
151 | cd ${SETUP_DIR}
152 |
153 |
154 | if [ -n "$RHCOS_LIVE" ]; then
155 | RHCOS_I_ARG="coreos.live.rootfs_url"
156 | else
157 | RHCOS_I_ARG="coreos.inst.image_url"
158 | fi
159 |
160 | echo -n "====> Creating ${NODE} VM: "
161 | virt-install --name ${CLUSTER_NAME}-${NODE} \
162 | --disk "${VM_DIR}/${CLUSTER_NAME}-${NODE}.qcow2,size=50" ${ADD_DISK} \
163 | --ram ${MEM} --cpu host --vcpus ${CPU} \
164 | --os-type linux --os-variant rhel7-unknown \
165 | --network network=${VIR_NET},model=virtio --noreboot --noautoconsole \
166 | --location rhcos-install/ \
167 | --extra-args "nomodeset rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda ${RHCOS_I_ARG}=http://${LBIP}:${WS_PORT}/${IMAGE} coreos.inst.ignition_url=http://${LBIP}:${WS_PORT}/worker.ign" > /dev/null || err "Creating ${NODE} vm failed "; ok
168 |
169 |
170 | echo "====> Waiting for RHCOS Installation to finish: "
171 | while rvms=$(virsh list --name | grep "${CLUSTER_NAME}-${NODE}" 2> /dev/null); do
172 | sleep 15
173 | echo " --> VMs with pending installation: $(echo "$rvms" | tr '\n' ' ')"
174 | done
175 |
176 | echo -n "====> Starting ${NODE} VM: "
177 | virsh start ${CLUSTER_NAME}-${NODE} > /dev/null || err "virsh start ${CLUSTER_NAME}-worker-${i} failed"; ok
178 |
179 |
180 | echo -n "====> Waiting for ${NODE} to obtain IP address: "
181 | while true
182 | do
183 | sleep 5
184 | IP=$(virsh domifaddr "${CLUSTER_NAME}-${NODE}" | grep ipv4 | head -n1 | awk '{print $4}' | cut -d'/' -f1 2> /dev/null)
185 | test "$?" -eq "0" -a -n "$IP" && { echo "$IP"; break; }
186 | done
187 | MAC=$(virsh domifaddr "${CLUSTER_NAME}-${NODE}" | grep ipv4 | head -n1 | awk '{print $2}')
188 |
189 | echo -n " ==> Adding DHCP reservation: "
190 | virsh net-update ${VIR_NET} add-last ip-dhcp-host --xml "" --live --config > /dev/null || \
191 | err "Adding DHCP reservation failed"; ok
192 |
193 | echo -n " ==> Adding /etc/hosts entry: "
194 | echo "$IP ${NODE}.${CLUSTER_NAME}.${BASE_DOM}" >> /etc/hosts || err "failed"; ok
195 |
196 | echo -n "====> Resstarting libvirt and dnsmasq: "
197 | systemctl restart libvirtd || err "systemctl restart libvirtd failed"
198 | systemctl $DNS_CMD $DNS_SVC || err "systemctl $DNS_CMD $DNS_SVC failed"; ok
199 |
200 | echo
201 | echo
202 | echo "NOTE: Please check the cluster for CSRs and approve them"
203 | echo
204 | echo " # oc get csr"
205 | echo " # oc adm certificate approve "
206 |
--------------------------------------------------------------------------------
/.post_scripts/expose_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # https://github.com/kxr/ocp4_setup_upi_kvm
3 |
4 | ###############################################################################
5 | # .expose_loadbalancer.sh
6 | # This will expose the OpenShift cluster load balancer via firewall port
7 | # forward rules. Before you can run this script, you must have already
8 | # successfully installed your OpenShift cluster via the
9 | # ocp4-setup-upi-kvm.sh script.
10 | ###############################################################################
11 |
12 | set -e
13 |
14 | err() {
15 | echo; echo;
16 | echo -e "\e[97m\e[101m[ERROR]\e[0m ${1}"; shift; echo;
17 | while [[ $# -gt 0 ]]; do echo " $1"; shift; done
18 | echo; exit 1;
19 | }
20 | ok() {
21 | test -z "$1" && echo "ok" || echo "$1"
22 | }
23 | SDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
24 | source ${SDIR}/env || err "${SDIR}/env not found."
25 |
26 | # Process Arguments
27 | while [[ $# -gt 0 ]]
28 | do
29 | key="$1"
30 | case $key in
31 | -m|--method)
32 | EXPOSE_METHOD="$2"
33 | shift
34 | shift
35 | ;;
36 | -h|--help)
37 | SHOW_HELP="yes"
38 | shift
39 | ;;
40 | *)
41 | echo "ERROR: Invalid argument $key"
42 | exit 1
43 | ;;
44 | esac
45 | done
46 |
47 | if [ "$SHOW_HELP" == "yes" ]; then
48 | echo
49 | echo "Usage: ${0} --method [ firewalld | haproxy ]"
50 | echo
51 | cat << EOF | column -L -t -s '|' -N OPTION,DESCRIPTION -W DESCRIPTION
52 |
53 | -m, --method NAME|Select the method with which you want to expose this cluster.
54 | |Valid options are "firewalld" and "haproxy"
55 | |
56 |
57 | EOF
58 | exit
59 | fi
60 |
61 | check_if_we_can_continue() {
62 | if [ "$YES" != "yes" ]; then
63 | echo;
64 | test -n "$1" && echo "[NOTE] $1"
65 | echo -n "Press enter to continue"; read x;
66 | fi
67 | }
68 |
69 | # Checking if we are root
70 | test "$(whoami)" = "root" || err "Not running as root"
71 |
72 | # Check if we have the --method set
73 | test -n "$EXPOSE_METHOD" || \
74 | err "Please set the expose method using --method" \
75 | "Run: '${0} --help' for details"
76 |
77 | # Check if we have the required variables from env file
78 | test -n "$CLUSTER_NAME" -a -n "$BASE_DOM" -a -n "$SETUP_DIR" -a -n "$VIR_NET" || \
79 | err "Unable to find existing cluster info"
80 |
81 |
82 | # --method firewalld
83 | if [ "$EXPOSE_METHOD" == "firewalld" ]; then
84 |
85 | # Checking if ip_forward is enabled
86 | echo -n "====> Checking if ip_forward is enabled: "
87 | IP_FWD=$(cat /proc/sys/net/ipv4/ip_forward)
88 | test "$IP_FWD" = "1" || \
89 | err "IP forwarding not enabled." "/proc/sys/net/ipv4/ip_forward has $IP_FWD"; ok
90 |
91 | # If method is firewall, firewall should be active
92 | echo -n "====> Checking if firewalld is active: "
93 | systemctl -q is-active firewalld || err "firewalld is not running"; ok
94 |
95 | # Check that we have the necessary firewall utility
96 | echo -n "====> Checking firewall-cmd: "
97 | test "$(which firewall-cmd)" || err "You do not have firewall-cmd in your PATH"; ok
98 |
99 | # Determine the interface
100 | echo -n "====> Determining the libvirt interface: "
101 | VIR_INT=$(virsh net-info ${VIR_NET} | grep Bridge | awk '{print $2}' 2> /dev/null) && \
102 | test -n "$VIR_INT" || \
103 | err "Unable to find interface for libvirt network"; ok
104 |
105 | # Checking if we have existing port forwarding
106 | echo -n "====> Checking if we have existing port forwarding: "
107 | EXIS_FWD=$(firewall-cmd --list-forward-ports | grep "^port=80:proto=tcp:\|^port=443:proto=tcp:\|^port=6443:proto=tcp:") || true
108 | test -z "$EXIS_FWD" || \
109 | {
110 | echo "Error"
111 | echo
112 | echo "# Existing port forwarding found which is conflicting"
113 | echo "# Please delete these rules:"
114 | echo
115 | for x in ${EXIS_FWD}; do
116 | echo "firewall-cmd --remove-forward-port='$x'"
117 | done
118 | echo "firewall-cmd --runtime-to-permanent"
119 | err ""
120 | }
121 | ok
122 |
123 | echo
124 | echo "#######################"
125 | echo "### FIREWALLD RULES ###"
126 | echo "#######################"
127 | echo
128 | echo "# This script will now try to add the following firewalld rules"
129 | echo "# firewall-cmd will not be run using --permanent, to avoid permanent lockdown"
130 | echo "# To make the rules permanent you can run 'firewall-cmd --runtime-to-permanent'"
131 | echo "# You can also press Ctrl+C now and run these commands manually if you want any customization"
132 | echo
133 | echo "firewall-cmd --add-forward-port=port=443:proto=tcp:toaddr=${LBIP}:toport=443"
134 | echo "firewall-cmd --add-forward-port=port=6443:proto=tcp:toaddr=${LBIP}:toport=6443"
135 | echo "firewall-cmd --add-forward-port=port=80:proto=tcp:toaddr=${LBIP}:toport=80"
136 | echo "firewall-cmd --direct --passthrough ipv4 -I FORWARD -i ${VIR_INT} -j ACCEPT"
137 | echo "firewall-cmd --direct --passthrough ipv4 -I FORWARD -o ${VIR_INT} -j ACCEPT"
138 | echo
139 | check_if_we_can_continue
140 |
141 | echo -n "====> Adding forward-port rule port=443:proto=tcp:toaddr=${LBIP}:toport=443: "
142 | firewall-cmd --add-forward-port=port=443:proto=tcp:toaddr=${LBIP}:toport=443 || echo "Failed"
143 |
144 | echo -n "====> Adding forward-port rule port=6443:proto=tcp:toaddr=${LBIP}:toport=6443: "
145 | firewall-cmd --add-forward-port=port=6443:proto=tcp:toaddr=${LBIP}:toport=6443 || echo "Failed"
146 |
147 | echo -n "====> Adding forward-port rule port=80:proto=tcp:toaddr=${LBIP}:toport=80: "
148 | firewall-cmd --add-forward-port=port=80:proto=tcp:toaddr=${LBIP}:toport=80 || echo "Failed"
149 |
150 | echo -n "====> Adding passthrough forwarding -I FORWARD -i ${VIR_INT}: "
151 | firewall-cmd --direct --passthrough ipv4 -I FORWARD -i ${VIR_INT} -j ACCEPT || echo "Failed"
152 |
153 | echo -n "====> Adding passthrough forwarding -I FORWARD -o ${VIR_INT}: "
154 | firewall-cmd --direct --passthrough ipv4 -I FORWARD -o ${VIR_INT} -j ACCEPT || echo "Failed"
155 |
156 |
157 | # --method haproxy
158 | elif [ "$EXPOSE_METHOD" == "haproxy" ]; then
159 |
160 | RANDSTRING=$(shuf -zer -n 4 {a..z} {0..9} | tr -d '\0')
161 | HAPROXY_CFG=/tmp/haproxy-${RANDSTRING}.cfg
162 |
163 | cat < ${HAPROXY_CFG}
164 | global
165 | log 127.0.0.1 local2
166 | chroot /var/lib/haproxy
167 | pidfile /var/run/haproxy.pid
168 | maxconn 4000
169 | user haproxy
170 | group haproxy
171 | daemon
172 | stats socket /var/lib/haproxy/stats
173 | ssl-default-bind-ciphers PROFILE=SYSTEM
174 | ssl-default-server-ciphers PROFILE=SYSTEM
175 | defaults
176 | log global
177 | option dontlognull
178 | option redispatch
179 | retries 3
180 | timeout http-request 10s
181 | timeout queue 1m
182 | timeout connect 10s
183 | timeout client 1m
184 | timeout server 1m
185 | timeout http-keep-alive 10s
186 | timeout check 10s
187 | maxconn 3000
188 | frontend fe-api
189 | bind *:6443
190 | mode tcp
191 | option tcplog
192 | tcp-request inspect-delay 10s
193 | tcp-request content accept if { req_ssl_hello_type 1 }
194 | use_backend ${CLUSTER_NAME}-api if { req.ssl_sni -m end api.${CLUSTER_NAME}.${BASE_DOM} }
195 | frontend fe-https
196 | bind *:443
197 | mode tcp
198 | option tcplog
199 | tcp-request inspect-delay 10s
200 | tcp-request content accept if { req_ssl_hello_type 1 }
201 | use_backend ${CLUSTER_NAME}-https if { req.ssl_sni -m end apps.${CLUSTER_NAME}.${BASE_DOM} }
202 | frontend fe-http
203 | bind *:80
204 | mode http
205 | option httplog
206 | use_backend ${CLUSTER_NAME}-http if { hdr(host) -m end apps.${CLUSTER_NAME}.${BASE_DOM} }
207 |
208 | backend ${CLUSTER_NAME}-api
209 | balance source
210 | mode tcp
211 | option ssl-hello-chk
212 | server main lb.${CLUSTER_NAME}.${BASE_DOM}:6443
213 | backend ${CLUSTER_NAME}-https
214 | balance source
215 | mode tcp
216 | option ssl-hello-chk
217 | server main lb.${CLUSTER_NAME}.${BASE_DOM}:443
218 | backend ${CLUSTER_NAME}-http
219 | balance source
220 | mode http
221 | server main lb.${CLUSTER_NAME}.${BASE_DOM}:80
222 | EOF
223 |
224 | echo
225 | echo "######################"
226 | echo "### HAPROXY CONFIG ###"
227 | echo "######################"
228 | echo
229 | echo "# haproxy configuration has been saved to: $HAPROXY_CFG Please review it before applying"
230 | echo "# To apply, simply move this config to haproxy. e.g:"
231 | echo
232 | echo " mv '$HAPROXY_CFG' '/etc/haproxy/haproxy.cfg'"
233 | echo
234 | echo "# haproxy can be used to front multiple clusters. If that is the case,"
235 | echo "# you only need to merge the 'use_backend' lines and the 'backend' blocks from this confiugration in haproxy.cfg"
236 | echo
237 | echo "# You will also need to open the ports (80,443 and 6443) e.g:"
238 | echo
239 | echo " firewall-cmd --add-service=http"
240 | echo " firewall-cmd --add-service=https"
241 | echo " firewall-cmd --add-port=6443/tcp"
242 | echo " firewall-cmd --runtime-to-permanent"
243 | echo
244 | echo "# If SELinux is in Enforcing mode, you need to tell it to treat port 6443 as a webport, e.g:"
245 | echo
246 | echo " semanage port -a -t http_port_t -p tcp 6443"
247 | echo
248 | echo
249 |
250 |
251 | ## TODO --method iptables
252 | #elif [ "$EXPOSE_METHOD" == "iptables" ]; then
253 | else
254 | err "Unkown method"
255 | fi
256 |
257 |
258 | echo
259 | echo
260 | echo "[NOTE]: When accessing this cluster from outside make sure that cluster FQDNs resolve from outside"
261 | echo
262 | echo " For basic api/console access, the following /etc/hosts entry should work:"
263 | echo
264 | echo " api.${CLUSTER_NAME}.${BASE_DOM} console-openshift-console.apps.${CLUSTER_NAME}.${BASE_DOM} oauth-openshift.apps.${CLUSTER_NAME}.${BASE_DOM}"
265 | echo
266 |
267 | exit 0
268 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Automated OpenShift 4 Cluster Installation on KVM
2 |
3 | ### Prerequistes:
4 |
5 | - Internet connected physical host running a modern linux distribution
6 | - Virtualization enabled and Libvirt/KVM setup [(more details)](https://github.com/kxr/ocp4_setup_upi_kvm/wiki/Setup-KVM-Libvirt)
7 | - DNS on the host managed by dnsmasq or NetworkManager/dnsmasq [(more details)](https://github.com/kxr/ocp4_setup_upi_kvm/wiki/Setting-Up-DNS)
8 | - OpenShift 4 Pull secret (Download from [here](https://cloud.redhat.com/openshift/install/pull-secret))
9 |
10 | ## Installing OpenShift 4 Cluster
11 |
12 | ### Demo:
13 |
14 | [](https://asciinema.org/a/bw6Wja2vBLrAkpKHTV0yGeuzo)
15 |
16 | ### Usage:
17 | ./ocp4_setup_upi_kvm.sh [OPTIONS]
18 |
19 |
20 | | Option |Description |
21 | | :------------ | :------------ |
22 | |______________________________||
23 | | -O, --ocp-version VERSION | You can set this to "latest", "stable" or a specific version like "4.1", "4.1.2", "4.1.latest", "4.1.stable" etc.
Default: stable |
24 | | -R, --rhcos-version VERSION | You can set a specific RHCOS version to use. For example "4.1.0", "4.2.latest" etc.
By default the RHCOS version is matched from the OpenShift version. For example, if you selected 4.1.2 RHCOS 4.1/latest will be used |
25 | | -p, --pull-secret FILE | Location of the pull secret file
Default: /root/pull-secret |
26 | | -c, --cluster-name NAME | OpenShift 4 cluster name
Default: ocp4 |
27 | | -d, --cluster-domain DOMAIN | OpenShift 4 cluster domain
Default: local |
28 | | -m, --masters N | Number of masters to deploy
Default: 3 |
29 | | -w, --worker N | Number of workers to deploy
Default: 2 |
30 | | --master-cpu N | Number of CPUs for the master VM(s)
Default: 4 |
31 | | --master-mem SIZE(MB) | RAM size (MB) of master VM(s)
Default: 16000 |
32 | | --worker-cpu N | Number of CPUs for the worker VM(s)
Default: 4 |
33 | | --worker-mem SIZE(MB) | RAM size (MB) of worker VM(s)
Default: 8000 |
34 | | --bootstrap-cpu N | Number of CPUs for the bootstrap VM
Default: 4 |
35 | | --bootstrap-mem SIZE(MB) | RAM size (MB) of bootstrap VM
Default: 16000 |
36 | | --lb-cpu N | Number of CPUs for the load balancer VM
Default: 1 |
37 | | --lb-mem SIZE(MB) | RAM size (MB) of load balancer VM
Default: 1024 |
38 | | -n, --libvirt-network NETWORK | The libvirt network to use. Select this option if you want to use an existing libvirt network
The libvirt network should already exist. If you want the script to create a separate network for this installation see: -N, --libvirt-oct
Default: default |
39 | | -N, --libvirt-oct OCTET | You can specify a 192.168.{OCTET}.0 subnet octet and this script will create a new libvirt network for the cluster
The network will be named ocp-{OCTET}. If the libvirt network ocp-{OCTET} already exists, it will be used.
Default: [not set] |
40 | | -v, --vm-dir | The location where you want to store the VM Disks
Default: /var/lib/libvirt/images |
41 | | -z, --dns-dir DIR | We expect the DNS on the host to be managed by dnsmasq. You can use NetworkMananger's built-in dnsmasq or use a separate dnsmasq running on the host. If you are running a separate dnsmasq on the host, set this to "/etc/dnsmasq.d"
Default: /etc/NetworkManager/dnsmasq.d |
42 | | -s, --setup-dir DIR | The location where we the script keeps all the files related to the installation
Default: /root/ocp4\_setup\_{CLUSTER_NAME} |
43 | | -x, --cache-dir DIR | To avoid un-necessary downloads we download the OpenShift/RHCOS files to a cache directory and reuse the files if they exist
This way you only download a file once and reuse them for future installs
You can force the script to download a fresh copy by using -X, --fresh-download
Default: /root/ocp4_downloads |
44 | | -X, --fresh-download | Set this if you want to force the script to download a fresh copy of the files instead of reusing the existing ones in cache dir
Default: [not set] |
45 | | -k, --keep-bootstrap | Set this if you want to keep the bootstrap VM. By default bootstrap VM is removed once the bootstraping is finished
Default: [not set] |
46 | | --autostart-vms | Set this if you want to the cluster VMs to be set to auto-start on reboot
Default: [not set] |
47 | | -y, --yes | Set this for the script to be non-interactive and continue with out asking for confirmation
Default: [not set] |
48 | | --destroy | Set this if you want the script to destroy everything it has created
Use this option with the same options you used to install the cluster
Be carefull this deletes the VMs, DNS entries and the libvirt network (if created by the script)
Default: [not set] |
49 |
50 |
51 | ### Examples
52 | # Deploy OpenShift 4.3.12 cluster
53 | ./ocp4_setup_upi_kvm.sh --ocp-version 4.3.12
54 | ./ocp4_setup_upi_kvm.sh -O 4.3.12
55 |
56 | # Deploy OpenShift 4.3.12 cluster with RHCOS 4.3.0
57 | ./ocp4_setup_upi_kvm.sh --ocp-version 4.3.12 --rhcos-version 4.3.0
58 | ./ocp4_setup_upi_kvm.sh -O 4.3.12 -R 4.3.0
59 |
60 | # Deploy latest OpenShift version with pull secret from a custom location
61 | ./ocp4_setup_upi_kvm.sh --pull-secret /home/knaeem/Downloads/pull-secret --ocp-version latest
62 | ./ocp4_setup_upi_kvm.sh -p /home/knaeem/Downloads/pull-secret -O latest
63 |
64 | # Deploy OpenShift 4.2.latest with custom cluster name and domain
65 | ./ocp4_setup_upi_kvm.sh --cluster-name ocp43 --cluster-domain lab.test.com --ocp-version 4.2.latest
66 | ./ocp4_setup_upi_kvm.sh -c ocp43 -d lab.test.com -O 4.2.latest
67 |
68 | # Deploy OpenShift 4.2.stable on new libvirt network (192.168.155.0/24)
69 | ./ocp4_setup_upi_kvm.sh --ocp-version 4.2.stable --libvirt-oct 155
70 | ./ocp4_setup_upi_kvm.sh -O 4.2.stable -N 155
71 |
72 | # Destory the already installed cluster
73 | ./ocp4_setup_upi_kvm.sh --cluster-name ocp43 --cluster-domain lab.test.com --destroy
74 | ./ocp4_setup_upi_kvm.sh -c ocp43 -d lab.test.com --destroy
75 |
76 | ___
77 |
78 | ## Adding Nodes
79 |
80 | Once the installation is successful, you will find a `add_node.sh` script in the `--setup-dir` (default: /root/ocp4\_setup\_{CLUSTER_NAME}). You can use this to add more nodes to the cluster, post installation.
81 |
82 | ### Usage:
83 | cd [setup-dir]
84 | ./add_node.sh --name [node-name] [OPTIONS]
85 |
86 | | Option |Description |
87 | | :------------ | :------------ |
88 | |______________________________||
89 | | --name NAME | The node name without the domain.
For example: If you specify storage-1, and your cluster name is "ocp4" and base domain is "local", the new node would be "storage-1.ocp4.local".
Default: [not set] [REQUIRED] |
90 | | -c, --cpu N | Number of CPUs to be attached to this node's VM. Default: 2|
91 | | -m, --memory SIZE(MB) | Amount of Memory to be attached to this node's VM. Size in MB.
Default: 4096 |
92 | | -a, --add-disk SIZE(GB) | You can add additional disks to this node. Size in GB.
This option can be specified multiple times. Disks are added in order for example if you specify "--add-disk 10 --add-disk 100", two disks will be added (on top of the OS disk vda) first of 10GB (/dev/vdb) and second disk of 100GB (/dev/vdc).
Default: [not set] |
93 | | -v, --vm-dir | The location where you want to store the VM Disks.
By default the location used by the cluster VMs will be used. |
94 | | -N, --libvirt-oct OCTET| You can specify a 192.168.{OCTET}.0 subnet octet and this script will create a new libvirt network for this node.
The network will be named ocp-{OCTET}. If the libvirt network ocp-{OCTET} already exists, it will be used.
This can be useful if you want to add a node in different network than the one used by the cluster.
Default: [not set] |
95 | | -n, --libvirt-network NETWORK | The libvirt network to use. Select this option if you want to use an existing libvirt network.
By default the existing libvirt network used by the cluster will be used. |
96 |
97 | ___
98 |
99 | ## Exposing the cluster outside the host/hypervisor
100 | Once the installation is successful, you will find a `expose_cluster.sh` script in the `--setup-dir` (default: /root/ocp4\_setup\_{CLUSTER_NAME}). You can use this to expose this cluster so it can be accessed from outside.
101 |
102 | ### Usage:
103 |
104 | cd [setup-dir]
105 | ./expose_cluster.sh --method [ firewalld | haproxy ]
106 |
107 | If you are running a single cluster on your bare metal machine, you can expose that cluster via firewalld method (port forwarding). If you want to host and access multiple clusters, you can use the haproxy method.
108 |
109 | ### DNS (External)
110 |
111 | Once you have exposed your cluster(s), you must ensure you have the proper DNS entries available to your external clients. One simple way to do this is to edit the `/etc/hosts` file on your client machines such that your exposed cluster endpoints are declared. The output of the `.expose_cluster.sh` script will give you an example line you can use for your `/etc/hosts` file.
112 |
113 | You need to expose a minimum of three endpoints: the OpenShift console, the API endpoint, and the OAuth endpoint. For example, if you installed with the default names (i.e. the cluster name is "ocp4" and the base domain is "local") you will need to expose these three endpoints:
114 |
115 | * console-openshift-console.apps.ocp4.local
116 | * api.ocp4.local
117 | * oauth-openshift.apps.ocp4.local
118 |
119 | If you will later configure OpenShift to expose its image registry (a typical dev use case that will allow you to push images directly into your cluster), you will need to expose this endpoint as well:
120 |
121 | * default-route-openshift-image-registry.apps.ocp4.local
122 |
123 | Finally, any custom Route resources you create in your OpenShift cluster will also need to be exposed via DNS.
124 |
125 | ### haproxy
126 |
127 | If you are exposing your cluster using haproxy and SELinux is in Enforcing mode (on the hypervisor), you need to tell it to treat port 6443 as a webport via `semanage port -a -t http_port_t -p tcp 6443`. Otherwise, SELinux will not let haproxy listen on port `6443`
128 |
129 | Similarly is firewalld is enabled, you need to open up the necessary ports via:
130 |
131 | ```
132 | firewall-cmd --add-service=http
133 | firewall-cmd --add-service=https
134 | firewall-cmd --add-port=6443/tcp
135 | ```
136 |
137 | The output of the `expose_cluster.sh --method haproxy` script will remind you about these additional configurations.
138 |
139 | ___
140 |
141 | ## Auto Starting VMs
142 |
143 | By default, if you reboot the host/hypervisor, the VMs will not start up automatically. You can set `--autostart-vms` when running the install script that will mark the VMs to auto-start. To see which VMs are set or not set to auto-start you can run `virsh list --all --name --autostart` or `virsh list --all --name --no-autostart` respectively.
144 |
145 | If you want to change/set the autostart behaviour, you can set the VMs to auto-start by running:
146 |
147 | ~~~
148 | for vm in $(virsh list --all --name --no-autostart | grep ""); do
149 | virsh autostart ${vm}
150 | done
151 | ~~~
152 |
153 | Similarly, to disable the auto starting of VMs, you can run:
154 |
155 | ~~~
156 | for vm in $(virsh list --all --name --autostart | grep ""); do
157 | virsh autostart --disable ${vm}
158 | done
159 | ~~~
160 |
161 | Note: Replace `` with the cluster name or any matching string to filter out VMs that you want to set/un-set to be auto-started.
162 |
163 | ___
164 |
165 | ## Errors While Waiting for clusterversion
166 |
167 | When the bootstrap process is complete, the script waits for clusterversion to become ready before the cluster installation is considered completed. During this phase the script just shows the status/message of the the clustervresion operator. You can see different kind of errors which are normal. This is due to the nature of operator reconciliation process. For example:
168 |
169 | ~~~
170 | ====> Waiting for clusterversion:
171 | --> Unable to apply 4.5.0-rc.6: an unknown error has occurred: MultipleErr ...
172 | --> Unable to apply 4.5.0-rc.6: an unknown error has occurred: MultipleErr ...
173 | --> Working towards 4.5.0-rc.6: 62% complete
174 | --> Working towards 4.5.0-rc.6: 62% complete
175 | --> Unable to apply 4.5.0-rc.6: an unknown error has occurred: MultipleErr ...
176 | --> Working towards 4.5.0-rc.6: 99% complete
177 | ~~~
178 |
179 | ~~~
180 | ====> Waiting for clusterversion:
181 | --> Working towards 4.3.12: 46% complete
182 | --> Unable to apply 4.3.12: an unknown error has occurred
183 | --> Working towards 4.3.12: 61% complete
184 | --> Unable to apply 4.3.12: an unknown error has occurred
185 | --> Unable to apply 4.3.12: an unknown error has occurred
186 | --> Unable to apply 4.3.12: an unknown error has occurred
187 | ~~~
188 |
189 | Just let it run and hopefully the clusterversion operator will reconcile and become ready eventually.
190 | ___
191 |
192 | ## Number of masters and workers
193 |
194 | ___
195 |
196 | ## [Setting up OCS](https://github.com/kxr/ocp4_setup_upi_kvm/wiki/Installing-OCS-4-(OpenShift-Container-Storage))
197 |
198 |
199 |
200 |
--------------------------------------------------------------------------------
/ocp4_setup_upi_kvm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # https://github.com/kxr/ocp4_setup_upi_kvm
3 |
4 | set -e
5 | export START_TS=$(date +%s)
6 | export SINV="${0} ${@}"
7 | export SDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
8 | export COLS="$(stty size | awk '{print $2}')"
9 |
10 | # Utility function err,ok,download etc.
11 | source ${SDIR}/.install_scripts/utils.sh
12 |
13 | # Checking if we are root
14 | test "$(whoami)" = "root" || err "Not running as root"
15 |
16 | # Process Arguments
17 | source ${SDIR}/.defaults.sh
18 | source ${SDIR}/.install_scripts/process_args.sh ${@}
19 |
20 | # Destroy
21 | if [ "${DESTROY}" == "yes" ]; then
22 | source ${SDIR}/.install_scripts/destroy.sh
23 | exit 0
24 | fi
25 |
26 | # Dependencies & Sanity checks
27 | source ${SDIR}/.install_scripts/sanity_check.sh
28 |
29 | # Libvirt Network
30 | source ${SDIR}/.install_scripts/libvirt_network.sh
31 |
32 | # DNS Check
33 | source ${SDIR}/.install_scripts/dns_check.sh
34 |
35 | # Version check
36 | source ${SDIR}/.install_scripts/version_check.sh
37 |
38 | # Download & Prepare
39 | source ${SDIR}/.install_scripts/download_prepare.sh
40 |
41 | # Create LB VM
42 | source ${SDIR}/.install_scripts/create_lb.sh
43 |
44 | # Create Cluster Nodes
45 | source ${SDIR}/.install_scripts/create_nodes.sh
46 |
47 | # OpenShift Bootstraping
48 | source ${SDIR}/.install_scripts/bootstrap.sh
49 |
50 | # OpenShift ClusterVersion
51 | source ${SDIR}/.install_scripts/clusterversion.sh
52 |
53 | # Generate env file and copy post scripts
54 | source ${SDIR}/.install_scripts/post.sh
55 |
56 |
--------------------------------------------------------------------------------