├── .gitignore ├── README.md ├── ignition ├── main.tf ├── output.tf ├── templates │ ├── 30-mtu │ ├── 99_01-post-deployment.yaml │ ├── 99_02-post-deployment.yaml │ ├── 99_03-post-deployment.yaml │ ├── 99_04-post-deployment.yaml │ ├── 99_05-post-deployment.yaml │ ├── 99_06-post-deployment.yaml │ ├── approve-csrs.sh │ ├── chrony.conf │ └── common.sh └── variables.tf ├── main.tf ├── media ├── topology.drawio └── topology.png ├── terraform.tfvars.example ├── variables.tf ├── versions.tf └── vm ├── ifcfg.tmpl ├── ignition.tf ├── main.tf ├── variables.tf └── versions.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # .tfvars files 9 | *.tfvars 10 | 11 | # Mac stuff 12 | .DS_Store 13 | 14 | # openshift pull-secret 15 | pull-secret.txt 16 | 17 | # local 18 | makefile 19 | **/artifacts/* 20 | **/terraform.tfstate.d/* 21 | **/installer/* 22 | .terraform.lock.hcl 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenShift 4.6 UPI Deployment with Static IPs 2 | 3 | Deploy OpenShift 4.6 and later using static IP addresses for CoreOS nodes. The `ignition` module will inject code into the cluster that will automatically approve all node CSRs. This runs only once at cluster creation. You can delete the `ibm-post-deployment` namespace once your cluster is up and running. 4 | 5 | **NOTE**: This requires OpenShift 4.6 or later to deploy, if you're looking for 4.5 or earlier, take a look at the `pre-4.6` [branch](https://github.com/ibm-cloud-architecture/terraform-openshift4-vmware/tree/pre-4.6) 6 | 7 | **NOTE**: Requires terraform 0.13 or later. 8 | 9 | ## Architecture 10 | 11 | OpenShift 4.6 User-Provided Infrastructure 12 | 13 | ![topology](./media/topology.png) 14 | 15 | ## Prereqs 16 | 17 | 1. [DNS](https://docs.openshift.com/container-platform/4.6/installing/installing_vsphere/installing-vsphere.html#installation-dns-user-infra_installing-vsphere) needs to be configured for external cluster access. 18 | - api.`cluster_id`.`base_domain` points to `openshift_api_virtualip` 19 | - *.apps.`cluster_id`.`base_domain` points to `openshift_ingress_virtualip` 20 | - Point both of those DNS A or CNAME records to your LoadBalancers 21 | 2. [CoreOS OVA](http://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/) must be uploaded to vCenter as a template. 22 | 23 | ## Installation Process 24 | 25 | ```bash 26 | git clone https://github.com/ibm-cloud-architecture/terraform-openshift4-vmware 27 | cd terraform-openshift4-vmware 28 | ``` 29 | 30 | Update your `terraform.tfvars` with your environment values. See sample `terraform.tfvars.example` file for details 31 | 32 | ```bash 33 | terraform init 34 | terraform plan 35 | terraform apply 36 | ``` 37 | 38 | ## terraform variables 39 | 40 | | Variable | Description | Type | Default | 41 | | -------------------------------- | ------------------------------------------------------------ | ------ | ------- | 42 | | vsphere_server | FQDN or IP Address of your vSphere Server | string | - | 43 | | vsphere_user | vSphere username | string | - | 44 | | vsphere_password | vSphere password | string | - | 45 | | vsphere_datacenter | vSphere Datacenter where OpenShift will be deployed | string | - | 46 | | vsphere_cluster | vSphere Cluster where OpenShift will be deployed | string | - | 47 | | vsphere_datastore | vSphere Datastore for OpenShift nodes | string | - | 48 | | vsphere_folder | The relative path to the folder which should be used or created for VMs. | string | - | 49 | | vsphere_preexisting_folder | If false, creates a top-level folder with the name from vsphere_folder. | bool | false | 50 | | vsphere_resource_pool | The resource pool that should be used or created for VMs | string | - | 51 | | vsphere_preexisting_resourcepool | If false, creates a resource pool for OpenShift nodes | bool | - | 52 | | vm_template | Name of CoreOS OVA template from prereq #2 | string | - | 53 | | vm_network | vSphere Network for OpenShift nodes | string | - | 54 | | vm_dns_addresses | List of DNS servers to use for your OpenShift Nodes | list | 8.8.8.8, 8.8.4.4 | 55 | | vm_gateway | IP Address of default gateway. If not set, defaults to first host in machine_cidr | string | null | 56 | | cluster_id | This cluster id must be of max length 27 and must have only alphanumeric or hyphen characters. | string | - | 57 | | base_domain | Base domain for your OpenShift Cluster | string | - | 58 | | machine_cidr | CIDR for your CoreOS VMs in `subnet/mask` format. | string | - | 59 | | bootstrap_ip_address | IP Address for bootstrap node | string | - | 60 | | control_plane_ip_addresses | List of IP addresses for your control plane nodes | list | - | 61 | | control_plane_count | Number of control plane VMs to create | string | 3 | 62 | | control_plane_memory | Memory, in MB, to allocate to control plane VMs | string | 16384 | 63 | | control_plane_num_cpus | Number of CPUs to allocate for control plane VMs | string | 4 | 64 | | control_plane_disk_size | Disk Size, in GB, to allocate for control plane VMs | number | 120 | 65 | | compute_ip_addresses | List of IP addresses for your compute nodes | list | - | 66 | | compute_count | Number of compute VMs to create | string | 3| 67 | | compute_memory | Memory, in MB, to allocate to compute VMs | string | 8192 | 68 | | compute_num_cpus | Number of CPUs to allocate for compute VMs | string | 3 | 69 | | compute_disk_size | Disk Size, in GB, to allocate for compute VMs | number | 60 | 70 | | storage_ip_addresses | List of IP addresses for your storage nodes | list | `Empty` | 71 | | storage_count | Number of storage VMs to create | string | 0 | 72 | | storage_memory | Memory, in MB to allocate to storage VMs | string | 65536 | 73 | | storage_num_cpus | Number of CPUs to allocate for storage VMs | string | 16 | 74 | | storage_disk_size | Disk Size, in GB, to allocate for storage VMs | number | 120 | 75 | | openshift_pull_secret | Path to your OpenShift [pull secret](https://cloud.redhat.com/openshift/install/vsphere/user-provisioned) | string | - | 76 | | openshift_sdn | OpenShift SDN to use. Use `OVNKubernetes` for 4.12 and later| string | OpenShiftSDN | 77 | | openshift_cluster_cidr | CIDR for pods in the OpenShift SDN | string | 10.128.0.0/14 | 78 | | openshift_service_cidr | CIDR for services in the OpenShift SDN | string | 172.30.0.0/16 | 79 | | openshift_host_prefix | Controls the number of pods to allocate to each node from the `openshift_cluster_cidr` CIDR. For example, 23 would allocate 2^(32-23) 512 pods to each node. | string | 23 | 80 | | openshift_version | Version of OpenShift to install. 4.6 or later. | string | 4.6 | 81 | | create_openshift_vips | Create the ingressVIP and apiVIP on nodes (same as IPI). | bool | true | 82 | | openshift_api_virtualip | The IP Address that will be used for the `api` LoadBalanced endpoint. Must be on the same CIDR range as the master nodes | string | - | 83 | | openshift_ingress_virtualip | The IP Address that will be used for the `*.apps` LoadBalanced endpoint. Must be on the same CIDR range as the worker nodes | string | - | 84 | | ssh_public_key | Path to your ssh public key. If left blank we will generate one. | string | - | 85 | | openshift_worker_mtu | Set the NIC MTU value of your worker nodes | number | 1450 | 86 | | openshift_ntp_server | Set the default NTP server | string | - | 87 | | airgapped | Configuration for an AirGapped environment | see sample tfvars file | map | 88 | | proxy_config | Configuration for Cluster wide proxy | see sample tfvars file | map | 89 | | openshift_additional_trust_bundle | Path to file containing custom certificate authority | - | string | 90 | -------------------------------------------------------------------------------- /ignition/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | minor_version = split("-", split(".", var.openshift_version)[1])[0] 3 | } 4 | 5 | data "template_file" "install_config" { 6 | template = < 8 ? "v1" : "v1beta1" 89 | }) 90 | } 91 | 92 | data "template_file" "post_deployment_05" { 93 | template = templatefile("${path.module}/templates/99_05-post-deployment.yaml", { 94 | csr_common_secret = base64encode(file("${path.module}/templates/common.sh")) 95 | csr_approve_secret = base64encode(file("${path.module}/templates/approve-csrs.sh")) 96 | }) 97 | } 98 | 99 | data "template_file" "post_deployment_06" { 100 | template = templatefile("${path.module}/templates/99_06-post-deployment.yaml", { 101 | node_count = var.total_node_count 102 | }) 103 | } 104 | 105 | 106 | data "template_file" "mtu_script" { 107 | template = templatefile("${path.module}/templates/30-mtu", { 108 | mtu = var.worker_mtu 109 | interface = var.default_interface 110 | }) 111 | } 112 | 113 | data "template_file" "mtu_machineconfig_worker" { 114 | template = < 0 )); do 85 | arg=$1 86 | case $arg in 87 | -h|-help|--help ) usage 88 | trace $SCRIPT $LINENO "main" "END $SCRIPT" 89 | exit 0 90 | ;; 91 | 92 | --nodes|--node-count ) node_count=$2; shift 93 | ;; 94 | 95 | --wait-time ) wait_time=$2; shift 96 | ;; 97 | 98 | --wait-count ) wait_count=$2; shift 99 | ;; 100 | 101 | * ) usage; trace $SCRIPT $LINENO "main" "Unknown option: $arg in command line." 102 | exit 2 103 | ;; 104 | esac 105 | # shift to next key-value pair 106 | shift 107 | done 108 | 109 | if [ -z "$node_count" ]; then 110 | node_count=0 111 | fi 112 | 113 | if [ -z "$wait_time" ]; then 114 | wait_time=60 115 | fi 116 | 117 | if [ -z "$wait_count" ]; then 118 | wait_count=10 119 | fi 120 | 121 | if [ -e $PWD/oc ]; then 122 | export OC="./oc" 123 | else 124 | test -e $(which oc) && { 125 | export OC=$(which oc) 126 | } || { 127 | trace $SCRIPT $LINENO "main" "ERROR: oc not found in current working directory or path." 128 | exit 3 129 | } 130 | fi 131 | 132 | 133 | if [ -e "$PWD/auth/kubeconfig" ]; then 134 | export KUBECONFIG=$PWD/auth/kubeconfig 135 | else 136 | test -e $KUBECONFIG && { 137 | export KUBECONFIG 138 | } || { 139 | trace $SCRIPT $LINENO "main" "ERROR: kubeconfig file not found." 140 | echo "Get a copy of the cluster kubeconfig and put it here: $PWD/auth/kubeconfig" 141 | exit 4 142 | } 143 | fi 144 | 145 | if [ $node_count -gt 0 ]; then 146 | trace $SCRIPT $LINENO "main" "INFO: Invoking: approve-csrs node_count=$node_count wait_time=$wait_time wait_count=$wait_count..." 147 | approve-csrs $node_count $wait_time $wait_count 148 | else 149 | trace $SCRIPT $LINENO "main" "INFO: Invoking: approve-all-csrs wait_time=$wait_time wait_count=$wait_count..." 150 | approve-all-csrs $wait_time $wait_count 151 | fi 152 | 153 | trace $SCRIPT $LINENO "main" "END $SCRIPT" 154 | -------------------------------------------------------------------------------- /ignition/templates/chrony.conf: -------------------------------------------------------------------------------- 1 | server ${server} iburst 2 | driftfile /var/lib/chrony/drift 3 | makestep 1.0 3 4 | rtcsync 5 | logdir /var/log/chrony -------------------------------------------------------------------------------- /ignition/templates/common.sh: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # The trace() function has the following invocation form: 3 | # trace $file $LINENO $method "msg" 4 | # trace expects up to 2 "global" environment variables to be set: 5 | # $LOGFILE - the full path to the log file associated with 6 | # the script that is calling trace() 7 | # $CALLER_LOGFILE - the full path to the log file associated with the 8 | # caller of the script. This env var may be empty 9 | # in which case there is no caller log file. 10 | # This additional log file is intended to support 11 | # an aggregated log file. 12 | # 13 | function trace { 14 | local file=$1; shift 15 | local lineno=$1; shift 16 | local method=$1; shift 17 | 18 | ts=$(date +[%Y/%m/%d-%T]) 19 | echo "$ts $file:$method($lineno) $*" | tee -a $LOGFILE $CALLER_LOGFILE 20 | } 21 | 22 | # The roll_file() function takes the following argument(s) 23 | # 1. filePath - full path to the file to be "rolled" 24 | # 25 | # It is assumed the caller has write permission in the directory 26 | # where the file is located. 27 | # The roll_file() function adds a time stamp to the file name 28 | # immediately before the file extension, then moves the given 29 | # file to the new file with the time stamp in its name. 30 | # 31 | # The roll_file() function is intended to be used in scenarios such 32 | # as logging, where it is desirable to retain previous log files and 33 | # a new log file with the same name is to be created. 34 | # 35 | function roll_file { 36 | local filePath="$1" 37 | local ts=$(date "+%Y-%m-%d_%H_%M_%s") 38 | # Extract the directory part of the path 39 | dir=${filePath%/*} 40 | # Extract the file name part of the path 41 | fileName=${filePath##*/} 42 | # Strip the extension from the file name 43 | name=${fileName%.*} 44 | # Extract the file name extension 45 | ext=${fileName##*.} 46 | newFilePath="${dir}/${name}_${ts}.${ext}" 47 | mv "$filePath" "$newFilePath" 48 | } 49 | 50 | 51 | # The general-prereqs() function checks the bash version, jq and other utilities 52 | function general-prereqs { 53 | local bash_major_version=${BASH_VERSION%%.*} 54 | local the_rest=${BASH_VERSION#*.} 55 | local bash_minor_version=${the_rest%%.*} 56 | 57 | [[ $bash_major_version -gt 4 || ( $bash_major_version -eq 4 && $bash_minor_version -ge 3 ) ]] || { 58 | echo "The version of bash, ${BASH_VERSION}, does not support associative arrays and by-name parameters." 59 | echo " Upgrade bash to the latest version." 60 | echo " To install the latest bash on MacOS, see https://itnext.io/upgrading-bash-on-macos-7138bd1066ba" 61 | exit 1 62 | } 63 | } 64 | 65 | 66 | function check-prereqs { 67 | general-prereqs 68 | } 69 | 70 | function get-compute-node-count { 71 | oc get nodes --selector=node-role.kubernetes.io/worker | awk 'NR>1' | wc -l | sed -e 's/^[ \t]*//' 72 | } 73 | 74 | # The approve-node-bootstrapper-csrs() approves the first round of CSRs that appear 75 | # for new nodes in an OCP cluster. All nodes need CSRs approved when a cluster is 76 | # first created. When nodes are added to the cluster, the CSRs need to be approved. 77 | # For each new node, a node-bootstrapper CSR needs to be approved. 78 | function approve-node-bootstrapper-csrs() { 79 | local node_count=$1 80 | local wait_time=${2:-60} 81 | local wait_count=${3:-5} 82 | local csrs_approved=0 wait=0 pending_csrs 83 | local file="common.sh" method="approve-node-bootstrapper-csrs" 84 | 85 | while [ $csrs_approved -lt $node_count ] && [ $wait -lt $wait_count ]; do 86 | pending_csrs=$($OC get csr | grep "Pending" | grep "node-bootstrapper" | wc -l) 87 | if [ $pending_csrs -eq 0 ]; then 88 | trace $file $LINENO $method "INFO: Waiting for pending CSRs." 89 | else 90 | pending_csrs=$($OC get csr | grep "Pending" | grep "node-bootstrapper" | awk '{print $1}' | tr '\n' ' ') 91 | for csr in $pending_csrs; do 92 | trace $file $LINENO $method "Approving CSR $csr..." 93 | $OC adm certificate approve $csr | tee -a $LOGFILE 94 | csrs_approved=$(( csrs_approved + 1 )) 95 | done 96 | fi 97 | sleep $wait_time 98 | wait=$(( wait + 1 )) 99 | done 100 | 101 | if [ $csrs_approved -eq $node_count ]; then 102 | trace $file $LINENO $method "INFO: Approved $csrs_approved/$node_count node-bootstrapper CSRs." 103 | else 104 | trace $file $LINENO $method "WARNING: Timed out waiting for pending node-bootstrapper CSRS. Approved $csrs_approved/$node_count node-bootstrapper CSRs" 105 | fi 106 | } 107 | 108 | 109 | # The approve-csrs() function is used to approve all the CSRs associated 110 | # with new nodes in an OCP cluster. All nodes need CSRs approved when a cluster is 111 | # first created. When nodes are added to the cluster, the CSRs need to be approved. 112 | # If the number of nodes for which CSRs need to be approved is unknown, use approve-all-csrs() 113 | # defined below. 114 | function approve-csrs() { 115 | local node_count=$1 116 | local wait_time=${2:-60} 117 | local wait_count=${3:-5} 118 | declare file="common.sh" method="approve-csrs" 119 | 120 | trace $file $LINENO $method "INFO: Approving CSRs for ${node_count} nodes..." 121 | approve-node-bootstrapper-csrs $node_count $wait_time $wait_count 122 | 123 | local csrs_approved=0 wait=0 csr_node_name="" pending_csrs 124 | while [ $csrs_approved -lt $node_count ] && [ $wait -lt $wait_count ]; do 125 | pending_csrs=$($OC get csr | tail -n +2 | grep "Pending" | wc -l) 126 | if [ $pending_csrs -eq 0 ]; then 127 | trace $file $LINENO $method "INFO: Waiting for pending CSRs." 128 | else 129 | pending_csrs=$($OC get csr | grep "Pending" | awk '{print $1}' | tr '\n' ' ') 130 | for csr in $pending_csrs; do 131 | csr_node_name=$($OC get csr | grep $csr | awk '{print $3}') 132 | csr_node_name=${csr_node_name##*:} # get just the fqdn part of the CSR requestor 133 | trace $file $LINENO $method "INFO: Approving CSR $csr for node $csr_node_name..." 134 | $OC adm certificate approve $csr | tee -a $LOGFILE 135 | csrs_approved=$(( csrs_approved + 1 )) 136 | done 137 | fi 138 | sleep $wait_time 139 | wait=$(( wait + 1 )) 140 | done 141 | } 142 | 143 | # The approve-all-csrs() function is called when the number of CSRs to approve is unknown. 144 | # This function is in support of a cluster deployment process where the the number of nodes 145 | # in the new cluster has not been provided. The script sits in a loop for the given number 146 | # of wait_counts, sleeping each time for the given wait_time seconds before checking again 147 | # to see if any CSRs are pending. 148 | # If the number of nodes for which CSRs need to be approved is known, use approve-csrs() 149 | # defined above. It's more deterministic and exits as soon as its work is complete. 150 | # The approve-all-csrs() function completes after wait_time*wait_count seconds. 151 | function approve-all-csrs() { 152 | local wait_time=${1:-30} 153 | local wait_count=${2:-20} 154 | declare file="common.sh" method="approve-all-csrs" 155 | 156 | trace $file $LINENO $method "INFO: Approving all CSRs..." 157 | 158 | local csrs_approved=0 wait=0 csr_node_name="" pending_csrs 159 | while [ $wait -lt $wait_count ]; do 160 | pending_csrs=$($OC get csr | tail -n +2 | grep "Pending" | wc -l) 161 | if [ $pending_csrs -eq 0 ]; then 162 | trace $file $LINENO $method "INFO: Waiting for pending CSRs." 163 | else 164 | pending_csrs=$($OC get csr | grep "Pending" | awk '{print $1}' | tr '\n' ' ') 165 | for csr in $pending_csrs; do 166 | csr_node_name=$($OC get csr | grep $csr | awk '{print $3}') 167 | csr_node_name=${csr_node_name##*:} # get just the fqdn part of the CSR requestor 168 | trace $file $LINENO $method "INFO: Approving CSR $csr for node $csr_node_name..." 169 | $OC adm certificate approve $csr | tee -a $LOGFILE 170 | csrs_approved=$(( csrs_approved + 1 )) 171 | done 172 | fi 173 | sleep $wait_time 174 | wait=$(( wait + 1 )) 175 | done 176 | 177 | trace $file $LINENO $method "INFO: Approved ${csrs_approved} CSRs." 178 | } 179 | -------------------------------------------------------------------------------- /ignition/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_domain" { 2 | type = string 3 | } 4 | 5 | variable "cluster_cidr" { 6 | type = string 7 | } 8 | 9 | variable "cluster_hostprefix" { 10 | type = string 11 | } 12 | 13 | variable "cluster_id" { 14 | type = string 15 | } 16 | 17 | variable "cluster_sdn" { 18 | type = string 19 | } 20 | 21 | variable "cluster_servicecidr" { 22 | type = string 23 | } 24 | 25 | variable "machine_cidr" { 26 | type = string 27 | } 28 | 29 | variable "control_plane_count" { 30 | type = string 31 | default = "3" 32 | } 33 | 34 | variable "control_plane_memory" { 35 | type = string 36 | default = "16384" 37 | } 38 | 39 | variable "control_plane_num_cpus" { 40 | type = string 41 | default = "4" 42 | } 43 | 44 | variable "control_plane_disk_size" { 45 | type = number 46 | default = 120 47 | } 48 | 49 | variable "pull_secret" { 50 | type = string 51 | } 52 | 53 | variable "ssh_public_key" { 54 | type = string 55 | } 56 | 57 | variable "vsphere_datacenter" { 58 | type = string 59 | } 60 | 61 | variable "vsphere_datastore" { 62 | type = string 63 | } 64 | 65 | variable "vsphere_password" { 66 | type = string 67 | } 68 | 69 | variable "vsphere_server" { 70 | type = string 71 | } 72 | 73 | variable "vsphere_network" { 74 | type = string 75 | } 76 | 77 | variable "vsphere_cluster" { 78 | type = string 79 | } 80 | 81 | variable "vsphere_username" { 82 | type = string 83 | } 84 | 85 | variable "vsphere_folder" { 86 | type = string 87 | } 88 | 89 | variable "openshift_version" { 90 | type = string 91 | } 92 | 93 | variable "total_node_count" { 94 | type = number 95 | } 96 | 97 | variable "api_vip" { 98 | type = string 99 | } 100 | 101 | variable "ingress_vip" { 102 | type = string 103 | } 104 | 105 | variable "worker_mtu" { 106 | type = number 107 | default = 1500 108 | } 109 | 110 | variable "default_interface" { 111 | type = string 112 | default = "ens192" 113 | } 114 | 115 | variable "ntp_server" { 116 | type = string 117 | default = "" 118 | } 119 | 120 | variable "proxy_config" { 121 | type = map(string) 122 | default = { 123 | enabled = false 124 | httpProxy = "" 125 | httpsProxy = "" 126 | noProxy = "" 127 | } 128 | } 129 | 130 | variable "trust_bundle" { 131 | type = string 132 | default = "" 133 | } 134 | 135 | variable "airgapped" { 136 | type = map(string) 137 | default = { 138 | enabled = false 139 | repository = "" 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # force local ignition provider binary 2 | # provider "ignition" { 3 | # version = "0.0.0" 4 | # } 5 | 6 | locals { 7 | cluster_domain = "${var.cluster_id}.${var.base_domain}" 8 | bootstrap_fqdns = ["bootstrap-0.${local.cluster_domain}"] 9 | control_plane_fqdns = [for idx in range(var.control_plane_count) : "control-plane-${idx}.${local.cluster_domain}"] 10 | compute_fqdns = [for idx in range(var.compute_count) : "compute-${idx}.${local.cluster_domain}"] 11 | storage_fqdns = [for idx in range(var.storage_count) : "storage-${idx}.${local.cluster_domain}"] 12 | ssh_public_key = var.ssh_public_key == "" ? chomp(tls_private_key.installkey[0].public_key_openssh) : chomp(file(pathexpand(var.ssh_public_key))) 13 | folder_path = var.vsphere_folder == "" ? var.cluster_id : var.vsphere_folder 14 | resource_pool_id = var.vsphere_preexisting_resourcepool ? data.vsphere_resource_pool.resource_pool[0].id : vsphere_resource_pool.resource_pool[0].id 15 | } 16 | 17 | provider "vsphere" { 18 | user = var.vsphere_user 19 | password = var.vsphere_password 20 | vsphere_server = var.vsphere_server 21 | allow_unverified_ssl = true 22 | } 23 | 24 | data "vsphere_datacenter" "dc" { 25 | name = var.vsphere_datacenter 26 | } 27 | 28 | data "vsphere_compute_cluster" "compute_cluster" { 29 | name = var.vsphere_cluster 30 | datacenter_id = data.vsphere_datacenter.dc.id 31 | } 32 | 33 | data "vsphere_datastore" "datastore" { 34 | name = var.vsphere_datastore 35 | datacenter_id = data.vsphere_datacenter.dc.id 36 | } 37 | 38 | data "vsphere_network" "network" { 39 | name = var.vm_network 40 | datacenter_id = data.vsphere_datacenter.dc.id 41 | } 42 | 43 | data "vsphere_virtual_machine" "template" { 44 | name = var.vm_template 45 | datacenter_id = data.vsphere_datacenter.dc.id 46 | } 47 | 48 | resource "vsphere_resource_pool" "resource_pool" { 49 | count = var.vsphere_preexisting_resourcepool ? 0 : 1 50 | 51 | name = var.vsphere_resource_pool == "" ? var.cluster_id : var.vsphere_resource_pool 52 | parent_resource_pool_id = data.vsphere_compute_cluster.compute_cluster.resource_pool_id 53 | } 54 | 55 | data "vsphere_resource_pool" "resource_pool" { 56 | count = var.vsphere_preexisting_resourcepool ? 1 : 0 57 | 58 | name = var.vsphere_resource_pool 59 | datacenter_id = data.vsphere_datacenter.dc.id 60 | } 61 | 62 | resource "vsphere_folder" "folder" { 63 | count = var.vsphere_preexisting_folder ? 0 : 1 64 | 65 | path = var.vsphere_folder == "" ? var.cluster_id : var.vsphere_folder 66 | type = "vm" 67 | datacenter_id = data.vsphere_datacenter.dc.id 68 | } 69 | 70 | resource "tls_private_key" "installkey" { 71 | count = var.ssh_public_key == "" ? 1 : 0 72 | 73 | algorithm = "RSA" 74 | rsa_bits = 4096 75 | } 76 | 77 | resource "local_file" "write_private_key" { 78 | count = var.ssh_public_key == "" ? 1 : 0 79 | 80 | content = tls_private_key.installkey[0].private_key_pem 81 | filename = "${path.root}/installer/${var.cluster_id}/sshkeys/openshift_rsa" 82 | file_permission = 0600 83 | } 84 | 85 | resource "local_file" "write_public_key" { 86 | count = var.ssh_public_key == "" ? 1 : 0 87 | 88 | content = tls_private_key.installkey[0].public_key_openssh 89 | filename = "${path.root}/installer/${var.cluster_id}/sshkeys/openshift_rsa.pub" 90 | file_permission = 0600 91 | } 92 | 93 | module "ignition" { 94 | source = "./ignition" 95 | ssh_public_key = local.ssh_public_key 96 | base_domain = var.base_domain 97 | cluster_id = var.cluster_id 98 | cluster_cidr = var.openshift_cluster_cidr 99 | cluster_hostprefix = var.openshift_host_prefix 100 | cluster_servicecidr = var.openshift_service_cidr 101 | cluster_sdn = var.openshift_sdn 102 | machine_cidr = var.machine_cidr 103 | vsphere_server = var.vsphere_server 104 | vsphere_username = var.vsphere_user 105 | vsphere_password = var.vsphere_password 106 | vsphere_datacenter = var.vsphere_datacenter 107 | vsphere_datastore = var.vsphere_datastore 108 | vsphere_cluster = var.vsphere_cluster 109 | vsphere_network = var.vm_network 110 | vsphere_folder = local.folder_path 111 | api_vip = var.create_openshift_vips ? var.openshift_api_virtualip : "" 112 | ingress_vip = var.create_openshift_vips ? var.openshift_ingress_virtualip : "" 113 | pull_secret = var.openshift_pull_secret 114 | openshift_version = var.openshift_version 115 | total_node_count = var.compute_count + var.storage_count 116 | worker_mtu = var.openshift_worker_mtu 117 | ntp_server = var.openshift_ntp_server 118 | airgapped = var.airgapped 119 | proxy_config = var.proxy_config 120 | trust_bundle = var.openshift_additional_trust_bundle 121 | } 122 | 123 | module "bootstrap" { 124 | source = "./vm" 125 | 126 | ignition = module.ignition.bootstrap_ignition 127 | 128 | hostnames_ip_addresses = zipmap( 129 | local.bootstrap_fqdns, 130 | [var.bootstrap_ip_address] 131 | ) 132 | 133 | resource_pool_id = local.resource_pool_id 134 | datastore_id = data.vsphere_datastore.datastore.id 135 | datacenter_id = data.vsphere_datacenter.dc.id 136 | network_id = data.vsphere_network.network.id 137 | folder_id = local.folder_path 138 | guest_id = data.vsphere_virtual_machine.template.guest_id 139 | template_uuid = data.vsphere_virtual_machine.template.id 140 | disk_thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned 141 | 142 | cluster_domain = local.cluster_domain 143 | machine_cidr = var.machine_cidr 144 | 145 | num_cpus = 2 146 | memory = 8192 147 | disk_size = 60 148 | dns_addresses = var.vm_dns_addresses 149 | vm_gateway = var.vm_gateway == null ? cidrhost(var.machine_cidr, 1) : var.vm_gateway 150 | } 151 | 152 | module "control_plane_vm" { 153 | source = "./vm" 154 | 155 | hostnames_ip_addresses = zipmap( 156 | local.control_plane_fqdns, 157 | var.control_plane_ip_addresses 158 | ) 159 | 160 | ignition = module.ignition.master_ignition 161 | 162 | resource_pool_id = local.resource_pool_id 163 | datastore_id = data.vsphere_datastore.datastore.id 164 | datacenter_id = data.vsphere_datacenter.dc.id 165 | network_id = data.vsphere_network.network.id 166 | folder_id = local.folder_path 167 | guest_id = data.vsphere_virtual_machine.template.guest_id 168 | template_uuid = data.vsphere_virtual_machine.template.id 169 | disk_thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned 170 | 171 | cluster_domain = local.cluster_domain 172 | machine_cidr = var.machine_cidr 173 | 174 | num_cpus = var.control_plane_num_cpus 175 | memory = var.control_plane_memory 176 | disk_size = var.control_plane_disk_size 177 | dns_addresses = var.vm_dns_addresses 178 | vm_gateway = var.vm_gateway == null ? cidrhost(var.machine_cidr, 1) : var.vm_gateway 179 | } 180 | 181 | module "compute_vm" { 182 | source = "./vm" 183 | 184 | hostnames_ip_addresses = zipmap( 185 | local.compute_fqdns, 186 | var.compute_ip_addresses 187 | ) 188 | 189 | ignition = module.ignition.worker_ignition 190 | 191 | resource_pool_id = local.resource_pool_id 192 | datastore_id = data.vsphere_datastore.datastore.id 193 | datacenter_id = data.vsphere_datacenter.dc.id 194 | network_id = data.vsphere_network.network.id 195 | folder_id = local.folder_path 196 | guest_id = data.vsphere_virtual_machine.template.guest_id 197 | template_uuid = data.vsphere_virtual_machine.template.id 198 | disk_thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned 199 | 200 | cluster_domain = local.cluster_domain 201 | machine_cidr = var.machine_cidr 202 | 203 | num_cpus = var.compute_num_cpus 204 | memory = var.compute_memory 205 | disk_size = var.compute_disk_size 206 | dns_addresses = var.vm_dns_addresses 207 | vm_gateway = var.vm_gateway == null ? cidrhost(var.machine_cidr, 1) : var.vm_gateway 208 | } 209 | 210 | module "storage_vm" { 211 | source = "./vm" 212 | 213 | hostnames_ip_addresses = zipmap( 214 | local.storage_fqdns, 215 | var.storage_ip_addresses 216 | ) 217 | 218 | ignition = module.ignition.worker_ignition 219 | 220 | resource_pool_id = local.resource_pool_id 221 | datastore_id = data.vsphere_datastore.datastore.id 222 | datacenter_id = data.vsphere_datacenter.dc.id 223 | network_id = data.vsphere_network.network.id 224 | folder_id = local.folder_path 225 | guest_id = data.vsphere_virtual_machine.template.guest_id 226 | template_uuid = data.vsphere_virtual_machine.template.id 227 | disk_thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned 228 | 229 | cluster_domain = local.cluster_domain 230 | machine_cidr = var.machine_cidr 231 | 232 | num_cpus = var.storage_num_cpus 233 | memory = var.storage_memory 234 | disk_size = var.storage_disk_size 235 | dns_addresses = var.vm_dns_addresses 236 | vm_gateway = var.vm_gateway == null ? cidrhost(var.machine_cidr, 1) : var.vm_gateway 237 | } 238 | 239 | -------------------------------------------------------------------------------- /media/topology.drawio: -------------------------------------------------------------------------------- 1 | 7Vxbb9s2FP41BraHGtRderSdtA2aYcZSbN1eDFqiZa2yKFB0bO/Xj5QoWaJoxW2c+AIVQSMeXkSdy3cOP8sZGJPV9hOB6fI3HKB4oINgOzDuBrquacBmv7hkV0hcSy8EIYmCQgT2gqfoPyRmltJ1FKBMyAoRxTimUdoU+jhJkE8bMkgI3jSHLXAcNAQpDFFL8OTDuC39Kwrosnwu29t3fEZRuBS3dnWn6FjBcrB4kmwJA7ypiYz7gTEhGNPiarWdoJgrr6mXjwd6q40RlNBjJnz4utllyH78+sn+PfKmXwJgf/mg6VaxzjOM1+KRBxN9MGJrAenCxyTFBFLEJAmiG0y+iyeju1JdBK+TAPE7agNjDIkvLMrMaYwDmC2rvowS/B1NcIxJPtX4aHl3ls16FlEcl/IEJ2z6OCQwiNhzSmK+4BRSikjChC7bilmtXFpL5/uIo5CPiNGC8hvghD6JPfOtPCNCI2bykRhGccqkbf0KlfPhaFsTCX1/QniFKNmxIaJXB8L2wvkrX9jsXclxhGxZ8yKtnAiF+4bV2nsLswth5B8yuN02uGzFUiGPcI7iKc4iGmGumDmmFK+YbpZ0FQvl1a3FrDjSLGMEVPZd5P8Oqru0kc/UjdiUcYqjhOaPb43ZD1PIBAwt/v/AYutNuGyY+y+T63KHEDSFKpmjErqOcllNuYdysCN35AJNsQdZVj7FfgX2IzSAyP0zKhSRh80SptxKq23IAXfoR5mPZxlcoKGPVym3FXrwubXG87C4UI1lmMrUPZzHMPk+C9Bz5PN4Iih7cUqG/DVBsyDJThMjriPFiOm0YsRShIhpvVWEmKAVEChgSUE0MaFLHOIExvd76XgPfNz592MeMffu3HT/Ikp3Ag/hmuJmHKFtRL/Vrv/mSzFvKVp3W7Fy3tjVGlNEIvbgPGQKWcKU8C2frLllWyxmGKVgv17e2tVb8ooHjZzhNfFRJ9gIW1JIQiQmj71/vkTh/F9Pg/Zn/QH+sQLeB4FBXM+dTkNQDGn03EzOKhcQU6c8hPbO5th209lsb+i5zVWKvYqJki9VO3kNADstAMYpSrJltOA7XWdM8T0g3zQgMzdh9euxiJt7hFy1nKA4MWXgtY4CXusEpYkSAwz3nLhbuE0de53XgK+EvG+Gu11o+iLqsnzwFrA7IgTuagMEcBxEZdfUmp7oas2TzAvjTRNIzlfs4GdxW60r77K8U79977QuxDu9pnda7gve2RxvOe7be6euONTZMT/sztlFyC8eMQxYUoeJzwuMopPdq+o/uugoyoOTVhxVHdMXHddxCoyZM83mpTcZ41jykcp+JyhUHF0q2k37vCdETxFtBziz3HQM8DlDGa8zqqjte8qsizIzdK9lbCVlZlru682txlZXAa6SEUNmxfT4x6/4ajgvVwDdainvWarFtoeu7nquZQGHBYTZUpINVLyirg9BbZpxAp2pT7lAlZGOU1q3EV6tSunYY9hDG7i2Z9mWrjmG67Y0adgKTTrGkA/3NAOYhum4HVXH67CmzRasoBJHVJmynQrlNNhKgY0sJ+U8toacBJttpynQ2mlQToHSBmuCVquxPJDuD2obzNMmXtM4Shhalp8LgcOgWKtlNkuWOp9SmFewG5b9SvAToKwrwFDOxnCTmcPCufktpWS6753hZJYStIpyKqCF7tbInrgH0L2bshGgnbHHiJLwMW/dGUBsXXWLKs+oS/sXYvxldK+Fk3vGaKo+V+nDqQ+nKw6nsrfMZRcTXlofXn143Up4VbF0MeHVLgbnGFNmC5j2EdZH2LtF2I8d0Q4fxfQfPYt55zyKtXkAZklmJT4zjSEzhByDCku1zKqZzv14JHM9aoPKniaZr1pKMvzpDXgOoqHzI6wu5ns0fWAj/nyY9rS3GuN72vtnaO+Z3vqEL4gISy6Fv2xQRruYcZilRSJaRFu+wNvBrGU0Ydayh5aK5qpehalHssHQ9c2YVU8RvGdmVi1gDp1aPmkryvK89wE8dRZq64z7/jp/LfcmspB3wLSdWei8RtGAKg29hu4+pIQbp7s1YLYUyV807xmE/nxzfgbBOBCUF8x3t7/e0YdTH07XFk4Xy3e3v0zTh1cfXlcaXmfkuztfAO3idh6SkB3csya/Myd7dqdnfHrG59oZn+NPxBXj03xDnjM+Tn4CdmxX02xDcep7d/anZKXOy/4YDUXZJhgCcBHsj9oV9JbOMooJ/xK7Dn7BKXc5GP/a0uN1UUCVZ1wmBaS2zIkZoIM6uB0GSK3HngDqS+qzl9TdIX6J/I96xz390wfT1QfT+dkf9RP05E8fXLcSXO/H/bDm/o9eFd9/3f/pMOP+fw== -------------------------------------------------------------------------------- /media/topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/terraform-openshift4-vmware/ef10b93c9990ae14cecf37f62d3f96b33113f0e5/media/topology.png -------------------------------------------------------------------------------- /terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | // ID identifying the cluster to create. Use your username so that resources created can be tracked back to you. 2 | cluster_id = "upi46" 3 | 4 | // Base domain from which the cluster domain is a subdomain. 5 | base_domain = "ncolon.xyz" 6 | 7 | // Name of the vSphere server. The dev cluster is on "vcsa.vmware.devcluster.openshift.com". 8 | vsphere_server = "vcsa67.rtp.raleigh.ibm.com" 9 | 10 | // User on the vSphere server. 11 | vsphere_user = "administrator@vsphere.local" 12 | 13 | // Password of the user on the vSphere server. 14 | vsphere_password = "MySecretPa55w0rd" 15 | 16 | // Name of the vSphere cluster. The dev cluster is "devel". 17 | vsphere_cluster = "cluster01" 18 | 19 | // Name of the vSphere data center. The dev cluster is "dc1". 20 | vsphere_datacenter = "dc01" 21 | 22 | // Name of the vSphere data store to use for the VMs. The dev cluster uses "nvme-ds1". 23 | vsphere_datastore = "openshift" 24 | 25 | // Name of the RHCOS VM template to clone to create VMs for the cluster 26 | vm_template = "rhcos-4.6-template" 27 | 28 | // Name of the VM Network for your cluster nodes 29 | vm_network = "vdpg-192.168.100" 30 | 31 | // Name of the VM Network for loadbalancer NIC in loadbalancer. 32 | // loadbalancer_network = "vDPortGroup" 33 | 34 | // The machine_cidr where IP addresses will be assigned for cluster nodes. 35 | // Additionally, IPAM will assign IPs based on the network ID. 36 | machine_cidr = "192.168.100.0/24" 37 | 38 | // The number of control plane VMs to create. Default is 3. 39 | control_plane_count = 3 40 | 41 | // The number of compute VMs to create. Default is 3. 42 | compute_count = 3 43 | 44 | // Set bootstrap_ip, control_plane_ip, and compute_ip if you want to use static 45 | // IPs reserved someone else, rather than the IPAM server. 46 | 47 | // The IP address to assign to the bootstrap VM. 48 | bootstrap_ip_address = "192.168.100.80" 49 | 50 | // The IP addresses to assign to the control plane VMs. The length of this list 51 | // must match the value of control_plane_count. 52 | control_plane_ip_addresses = ["192.168.100.81", "192.168.100.82", "192.168.100.83"] 53 | 54 | // The IP addresses to assign to the compute VMs. The length of this list must 55 | // match the value of compute_count. 56 | compute_ip_addresses = ["192.168.100.84", "192.168.100.85", "192.168.100.86"] 57 | 58 | // The IP addresses of your DNS servers for your OpenShift nodes 59 | vm_dns_addresses = ["9.42.106.2", "9.42.106.3"] 60 | 61 | // The IP address of the default gateway. If not set, it will use the frist host of the machine_cidr range. 62 | // vm_gateway = "192.168.100.254" 63 | 64 | // Path to your OpenShift Pull Secret 65 | openshift_pull_secret = "~/.pull-secret" 66 | 67 | // Set to true (default) so that OpenShift self-hosts its own LoadBalancers (similar to IPI deployments) 68 | // If set to false, you must bring your own LoadBalancers and point the api enpoint to masters and apps endpoint to workers 69 | create_openshift_vips = true 70 | 71 | // If create_openshift_vips is set to true, you must provide the IP addresses that will be used for the api and *.apps endpoints 72 | // These IP addresses MUST be on the same CIDR range as machine_cidr 73 | openshift_api_virtualip = "192.168.100.201" 74 | openshift_ingress_virtualip = "192.168.100.200" 75 | 76 | // The number of storage VMs to create. Default is 0. Set to 0 or 3 77 | // storage_count = 3 78 | 79 | 80 | // The IP addresses to assign to the storage VMs. The length of this list must 81 | // match the value of storage_count. 82 | //storage_ip_addresses = ["192.168.100.87", "192.168.100.88", "192.168.100.89"] 83 | 84 | // Set MTU for worker VMs 85 | // openshift_worker_mtu = 9000 86 | 87 | // Set NTP server 88 | // openshift_ntp_server = vistime.rtp.raleigh.ibm.com 89 | 90 | // AirGapped Configuration 91 | // Set enabled to true to configure your cluster for airgap install 92 | // set repository to the hostname:port of your image registry 93 | // NOTE: 94 | // The registry has to be running on an SSL/TLS secured port. 95 | // If the registry certificate uses a certificate signed by a a certificate authority 96 | // which RHCOS does not trust by default copy the certificate authority certificate 97 | // locally and update the openshift_additional_trust_bundle variable 98 | // https://docs.openshift.com/container-platform/4.7/installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.html#installation-creating-image-restricted_installing-restricted-networks-installer-provisioned-vsphere 99 | // https://github.com/openshift/installer/blob/master/docs/user/customization.md#image-content-sources 100 | // airgapped = { 101 | // enabled = true 102 | // repository = "myrepository.example.com:8443" 103 | // } 104 | 105 | // Proxy Configuration 106 | // Set enabled to true to configure your cluster-wide proxy at install time 107 | // https://github.com/openshift/installer/blob/master/docs/user/customization.md#proxy 108 | // proxy_config = { 109 | // enabled = true 110 | // httpProxy = "http://user:pass@proxy:port" 111 | // httpsProxy = "http://user:pass@proxy:port" 112 | // noProxy = "ip1, ip2, ip3, .example.com, cidr/mask" 113 | // } 114 | // NOTE: 115 | // If the proxy uses a customCA to do traffic inspection, 116 | // (also known as man-in-the-middle proxy) 117 | // copy the customCA certificate locally and update the 118 | // openshift_additional_trust_bundle variable 119 | 120 | // Custom CA certificate 121 | // NOTE: 122 | // If any component in your cluster will communicate to services 123 | // secured by SSL/TLS certificates signed by a certificate authority 124 | // which RHCOS does not trust by default copy the certificate authority certificate 125 | // locally and update this variable. 126 | // 127 | // The certificate MUST have the CA:TRUE basicContstain extension set. 128 | // https://github.com/openshift/installer/blob/master/docs/user/customization.md#additional-trust-bundle 129 | // openshift_additional_trust_bundle = "/path/to/customCA.crt" 130 | 131 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | ////// 2 | // vSphere variables 3 | ////// 4 | 5 | variable "vsphere_server" { 6 | type = string 7 | description = "This is the vSphere server for the environment." 8 | } 9 | 10 | variable "vsphere_user" { 11 | type = string 12 | description = "vSphere server user for the environment." 13 | } 14 | 15 | variable "vsphere_password" { 16 | type = string 17 | description = "vSphere server password" 18 | } 19 | 20 | variable "vsphere_cluster" { 21 | type = string 22 | description = "This is the name of the vSphere cluster." 23 | } 24 | 25 | variable "vsphere_datacenter" { 26 | type = string 27 | description = "This is the name of the vSphere data center." 28 | } 29 | 30 | variable "vsphere_datastore" { 31 | type = string 32 | description = "This is the name of the vSphere data store." 33 | } 34 | 35 | variable "vm_template" { 36 | type = string 37 | description = "This is the name of the VM template to clone." 38 | } 39 | 40 | variable "vm_network" { 41 | type = string 42 | description = "This is the name of the publicly accessible network for cluster ingress and access." 43 | default = "VM Network" 44 | } 45 | 46 | variable "vm_dns_addresses" { 47 | type = list(string) 48 | description = "List of DNS servers to use for your OpenShift Nodes" 49 | default = ["8.8.8.8", "8.8.4.4"] 50 | } 51 | 52 | variable "vm_gateway" { 53 | type = string 54 | description = "IP Address to use for VM default gateway. If not set, default is the first host in the CIDR range" 55 | default = null 56 | } 57 | 58 | ///////// 59 | // OpenShift cluster variables 60 | ///////// 61 | 62 | variable "cluster_id" { 63 | type = string 64 | description = "This cluster id must be of max length 27 and must have only alphanumeric or hyphen characters." 65 | } 66 | 67 | variable "base_domain" { 68 | type = string 69 | description = "The base DNS zone to add the sub zone to." 70 | } 71 | 72 | variable "machine_cidr" { 73 | type = string 74 | } 75 | 76 | ///////// 77 | // Bootstrap machine variables 78 | ///////// 79 | variable "bootstrap_ip_address" { 80 | type = string 81 | default = "" 82 | } 83 | 84 | /////////// 85 | // control-plane machine variables 86 | /////////// 87 | 88 | variable "control_plane_count" { 89 | type = string 90 | default = "3" 91 | } 92 | 93 | variable "control_plane_ip_addresses" { 94 | type = list(string) 95 | default = [] 96 | } 97 | variable "control_plane_memory" { 98 | type = string 99 | default = "16384" 100 | } 101 | 102 | variable "control_plane_num_cpus" { 103 | type = string 104 | default = "4" 105 | } 106 | 107 | variable "control_plane_disk_size" { 108 | type = number 109 | default = 60 110 | } 111 | 112 | ////////// 113 | // compute machine variables 114 | ////////// 115 | 116 | 117 | variable "compute_count" { 118 | type = string 119 | default = "3" 120 | } 121 | 122 | variable "compute_ip_addresses" { 123 | type = list(string) 124 | default = [] 125 | } 126 | 127 | variable "compute_memory" { 128 | type = string 129 | default = "8192" 130 | } 131 | 132 | variable "compute_num_cpus" { 133 | type = string 134 | default = "4" 135 | } 136 | 137 | variable "compute_disk_size" { 138 | type = number 139 | default = 60 140 | } 141 | 142 | ////////// 143 | // storage machine variables 144 | ////////// 145 | 146 | variable "storage_count" { 147 | type = string 148 | default = "0" 149 | } 150 | 151 | variable "storage_ip_addresses" { 152 | type = list(string) 153 | default = [] 154 | } 155 | 156 | variable "storage_memory" { 157 | type = string 158 | default = "65536" 159 | } 160 | 161 | variable "storage_num_cpus" { 162 | type = string 163 | default = "16" 164 | } 165 | 166 | variable "storage_disk_size" { 167 | type = number 168 | default = 120 169 | } 170 | 171 | variable "openshift_api_virtualip" { 172 | type = string 173 | description = "Virtual IP used to access the cluster API." 174 | } 175 | 176 | variable "openshift_ingress_virtualip" { 177 | type = string 178 | description = "Virtual IP used for cluster ingress traffic." 179 | } 180 | 181 | variable "openshift_pull_secret" { 182 | type = string 183 | } 184 | 185 | variable "openshift_sdn" { 186 | type = string 187 | default = "OpenShiftSDN" 188 | } 189 | 190 | variable "openshift_cluster_cidr" { 191 | type = string 192 | default = "10.128.0.0/14" 193 | } 194 | 195 | variable "openshift_service_cidr" { 196 | type = string 197 | default = "172.30.0.0/16" 198 | } 199 | 200 | variable "openshift_host_prefix" { 201 | type = string 202 | default = 23 203 | } 204 | 205 | variable "openshift_version" { 206 | type = string 207 | description = "Specify the OpenShift version you want to deploy. Must be 4.6.x or later to use this automation" 208 | default = "4.6.16" 209 | } 210 | 211 | variable "create_openshift_vips" { 212 | type = bool 213 | # https://github.com/openshift/installer/blob/master/docs/user/vsphere/vips-dns.md 214 | description = "Deploy OpenShift with self contained LoadBalancer" 215 | default = true 216 | } 217 | 218 | variable "ssh_public_key" { 219 | type = string 220 | description = "Path to your ssh public key. If left blank we will generate one." 221 | default = "" 222 | } 223 | 224 | variable "vsphere_preexisting_folder" { 225 | type = bool 226 | description = "If false, creates a top-level folder with the name from vsphere_folder_rel_path." 227 | default = false 228 | } 229 | 230 | variable "vsphere_folder" { 231 | type = string 232 | description = "The relative path to the folder which should be used or created for VMs." 233 | default = "" 234 | } 235 | 236 | variable "vsphere_preexisting_resourcepool" { 237 | description = "If false, creates a resource pool for OpenShift nodes." 238 | default = false 239 | } 240 | 241 | variable "vsphere_resource_pool" { 242 | type = string 243 | description = "The resource pool that should be used or created for VMs" 244 | default = "" 245 | } 246 | 247 | variable "openshift_worker_mtu" { 248 | type = number 249 | description = "The NIC MTU to be used for compute OpenShift VMs" 250 | default = 1500 251 | } 252 | 253 | variable "openshift_ntp_server" { 254 | type = string 255 | default = "" 256 | } 257 | 258 | variable "proxy_config" { 259 | type = map(string) 260 | default = { 261 | enabled = false 262 | httpProxy = "http://user:password@ip:port" 263 | httpsProxy = "http://user:password@ip:port" 264 | noProxy = "ip1,ip2,ip3,.example.com,cidr/mask" 265 | } 266 | } 267 | 268 | variable "openshift_additional_trust_bundle" { 269 | description = "path to a file with all your additional ca certificates" 270 | type = string 271 | default = "" 272 | } 273 | 274 | variable "airgapped" { 275 | type = map(string) 276 | default = { 277 | enabled = false 278 | repository = "" 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | local = { 4 | source = "hashicorp/local" 5 | } 6 | tls = { 7 | source = "hashicorp/tls" 8 | } 9 | vsphere = { 10 | source = "hashicorp/vsphere" 11 | } 12 | } 13 | required_version = ">= 0.13" 14 | } 15 | -------------------------------------------------------------------------------- /vm/ifcfg.tmpl: -------------------------------------------------------------------------------- 1 | TYPE=Ethernet 2 | BOOTPROTO=none 3 | NAME=ens192 4 | DEVICE=ens192 5 | ONBOOT=yes 6 | IPADDR=${ip_address} 7 | PREFIX=${element(split("/", machine_cidr), 1)} 8 | GATEWAY=${gateway} 9 | DOMAIN=${cluster_domain} 10 | %{ for index, ip in dns_addresses ~} 11 | DNS${index+1}=${ip} 12 | %{ endfor ~} 13 | -------------------------------------------------------------------------------- /vm/ignition.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | ignition_encoded = "data:text/plain;charset=utf-8;base64,${base64encode(var.ignition)}" 3 | } 4 | 5 | data "ignition_file" "hostname" { 6 | for_each = var.hostnames_ip_addresses 7 | 8 | path = "/etc/hostname" 9 | mode = "420" 10 | 11 | content { 12 | content = element(split(".", each.key), 0) 13 | } 14 | } 15 | 16 | data "ignition_file" "static_ip" { 17 | for_each = var.hostnames_ip_addresses 18 | 19 | path = "/etc/sysconfig/network-scripts/ifcfg-ens192" 20 | mode = "420" 21 | 22 | content { 23 | content = templatefile("${path.module}/ifcfg.tmpl", { 24 | dns_addresses = var.dns_addresses, 25 | machine_cidr = var.machine_cidr 26 | ip_address = each.value 27 | cluster_domain = var.cluster_domain 28 | gateway = var.vm_gateway 29 | }) 30 | } 31 | } 32 | 33 | data "ignition_config" "ign" { 34 | for_each = var.hostnames_ip_addresses 35 | 36 | merge { 37 | source = local.ignition_encoded 38 | } 39 | 40 | files = [ 41 | data.ignition_file.hostname[each.key].rendered, 42 | data.ignition_file.static_ip[each.key].rendered, 43 | ] 44 | } 45 | 46 | -------------------------------------------------------------------------------- /vm/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | disks = compact(tolist([var.disk_size, var.extra_disk_size == 0 ? "" : var.extra_disk_size])) 3 | disk_sizes = zipmap( 4 | range(length(local.disks)), 5 | local.disks 6 | ) 7 | } 8 | 9 | resource "vsphere_virtual_machine" "vm" { 10 | for_each = var.hostnames_ip_addresses 11 | 12 | name = element(split(".", each.key), 0) 13 | 14 | resource_pool_id = var.resource_pool_id 15 | datastore_id = var.datastore_id 16 | num_cpus = var.num_cpus 17 | memory = var.memory 18 | guest_id = var.guest_id 19 | folder = var.folder_id 20 | enable_disk_uuid = "true" 21 | 22 | dynamic "disk" { 23 | for_each = local.disk_sizes 24 | content { 25 | label = "disk${disk.key}" 26 | size = disk.value 27 | thin_provisioned = var.disk_thin_provisioned 28 | unit_number = disk.key 29 | } 30 | } 31 | 32 | wait_for_guest_net_timeout = "0" 33 | wait_for_guest_net_routable = "false" 34 | 35 | nested_hv_enabled = var.nested_hv_enabled 36 | 37 | network_interface { 38 | network_id = var.network_id 39 | } 40 | 41 | clone { 42 | template_uuid = var.template_uuid 43 | } 44 | 45 | extra_config = { 46 | "guestinfo.ignition.config.data" = base64encode(var.ignition) 47 | "guestinfo.ignition.config.data.encoding" = "base64" 48 | "guestinfo.afterburn.initrd.network-kargs" = "ip=${each.value}::${var.vm_gateway}:${cidrnetmask(var.machine_cidr)}:${element(split(".", each.key), 0)}:ens192:none ${join(" ", formatlist("nameserver=%v", var.dns_addresses))}" 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /vm/variables.tf: -------------------------------------------------------------------------------- 1 | variable "hostnames_ip_addresses" { 2 | type = map(string) 3 | } 4 | 5 | variable "ignition" { 6 | type = string 7 | default = "" 8 | } 9 | 10 | variable "disk_thin_provisioned" { 11 | type = bool 12 | } 13 | 14 | variable "template_uuid" { 15 | type = string 16 | } 17 | 18 | variable "guest_id" { 19 | type = string 20 | } 21 | 22 | variable "resource_pool_id" { 23 | type = string 24 | } 25 | 26 | variable "folder_id" { 27 | type = string 28 | } 29 | 30 | variable "datastore_id" { 31 | type = string 32 | } 33 | 34 | variable "network_id" { 35 | type = string 36 | } 37 | 38 | variable "cluster_domain" { 39 | type = string 40 | } 41 | 42 | variable "datacenter_id" { 43 | type = string 44 | } 45 | 46 | variable "machine_cidr" { 47 | type = string 48 | } 49 | 50 | variable "memory" { 51 | type = string 52 | } 53 | 54 | variable "num_cpus" { 55 | type = string 56 | } 57 | 58 | variable "dns_addresses" { 59 | type = list(string) 60 | } 61 | 62 | variable "disk_size" { 63 | type = number 64 | default = 60 65 | } 66 | 67 | variable "extra_disk_size" { 68 | type = number 69 | default = 0 70 | } 71 | 72 | variable "nested_hv_enabled" { 73 | type = bool 74 | default = false 75 | } 76 | 77 | variable "vm_gateway" { 78 | type = string 79 | default = null 80 | } 81 | -------------------------------------------------------------------------------- /vm/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | template = { 4 | source = "hashicorp/template" 5 | } 6 | vsphere = { 7 | source = "hashicorp/vsphere" 8 | } 9 | ignition = { 10 | source = "community-terraform-providers/ignition" 11 | } 12 | } 13 | required_version = ">= 0.13" 14 | } 15 | --------------------------------------------------------------------------------