├── .gitignore ├── AIRGAPPED.md ├── README.md ├── bootstrap ├── main.tf ├── outputs.tf ├── variables.tf └── versions.tf ├── dns ├── dns.tf ├── variables.tf └── versions.tf ├── ignition ├── ignition.tf ├── output.tf ├── scripts │ ├── download.sh.tmpl │ ├── ignition.sh.tmpl │ └── manifests.sh.tmpl ├── templates.tf ├── variables.tf └── versions.tf ├── main.tf ├── master ├── master.tf ├── outputs.tf ├── variables.tf └── versions.tf ├── media └── topology.svg ├── outputs.tf ├── variables-azure.tf ├── versions.tf └── vnet ├── common.tf ├── internal-lb.tf ├── nsg.tf ├── outputs.tf ├── public-lb.tf ├── variables.tf ├── versions.tf └── vnet.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | 5 | 6 | # .tfstate files 7 | *.tfstate 8 | *.tfstate.* 9 | .terraform.lock.hcl 10 | 11 | # .tfvars files 12 | *.tfvars 13 | 14 | # Mac stuff 15 | .DS_Store 16 | 17 | # openshift pull-secret 18 | pull-secret 19 | 20 | # local 21 | makefile 22 | **/artifacts/* 23 | **/terraform.state.d/* 24 | *.log 25 | 26 | # installer-files 27 | **/installer-files/* 28 | 29 | # optional files 30 | extras.tf 31 | vnet/nat-gateway.tf 32 | bastion.tf 33 | -------------------------------------------------------------------------------- /AIRGAPPED.md: -------------------------------------------------------------------------------- 1 | # Configuration for an AirGapped environment in Azure 2 | 3 | This repository allows for a completely private, AirGapped implementation. To configure it, a couple of pre-reqs first need to be met: 4 | 5 | 1. Create an image registry. You can either create an image repository from scratch, or configure one using Azure's Container Registry. The CoreOS VMs must be able to reach this repository. 6 | 7 | ## Creating an internal image regisry 8 | 9 | Creating an image registry using RedHat's [documentation](https://docs.openshift.com/container-platform/4.2/installing/installing_restricted_networks/installing-restricted-networks-preparations.html). 10 | Follow all steps up to Step 4 of [Mirroring the OpenShift Container Platform image repository](https://docs.openshift.com/container-platform/4.2/installing/installing_restricted_networks/installing-restricted-networks-preparations.html#installation-mirror-repository_installing-restricted-networks-preparations) 11 | 12 | 13 | ```bash 14 | $ export OCP_RELEASE="4.6.32-x86_64" 15 | $ export LOCAL_REGISTRY="openshiftrepo.example.com:443" 16 | $ export LOCAL_REPOSITORY="ocp4/openshift4" 17 | $ export PRODUCT_REPO='openshift-release-dev' 18 | $ export LOCAL_SECRET_JSON='' 19 | $ export RELEASE_NAME="ocp-release" 20 | 21 | $ oc adm -a ${LOCAL_SECRET_JSON} release mirror \ 22 | --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE} \ 23 | --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ 24 | --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE} 25 | info: Mirroring 103 images to openshiftrepo.example.com:443/ocp4/openshift4 ... 26 | openshiftrepo.example.com:443/ 27 | ocp4/openshift4 28 | blobs: 29 | ... 30 | 31 | sha256:1a69a2bf32e9c39c4395de6c7fbcfbe8f430eb23476a8b85036e67e60050ce53 openshiftrepo.example.com:443/ocp4/openshift4:4.3.26-cluster-authentication-operator 32 | info: Mirroring completed in 1m27.76s (8.084MB/s) 33 | 34 | Success 35 | Update image: openshiftrepo.example.com:443/ocp4/openshift4:4.3.26-x86_64 36 | Mirror prefix: openshiftrepo.example.com:443/ocp4/openshift4 37 | 38 | To use the new mirrored repository to install, add the following section to the install-config.yaml: 39 | 40 | imageContentSources: 41 | - mirrors: 42 | - openshiftrepo.example.com:443/ocp4/openshift4 43 | source: quay.io/openshift-release-dev/ocp-release 44 | - mirrors: 45 | - openshiftrepo.example.com:443/ocp4/openshift4 46 | source: quay.io/openshift-release-dev/ocp-v4.0-art-dev 47 | 48 | 49 | To use the new mirrored repository for upgrades, use the following to create an ImageContentSourcePolicy: 50 | 51 | apiVersion: operator.openshift.io/v1alpha1 52 | kind: ImageContentSourcePolicy 53 | metadata: 54 | name: example 55 | spec: 56 | repositoryDigestMirrors: 57 | - mirrors: 58 | - openshiftrepo.example.com:443/ocp4/openshift4 59 | source: quay.io/openshift-release-dev/ocp-release 60 | - mirrors: 61 | - openshiftrepo.example.com:443/ocp4/openshift4 62 | source: quay.io/openshift-release-dev/ocp-v4.0-art-dev 63 | ``` 64 | 65 | ## Creating an [Azure Container Registry](https://azure.microsoft.com/en-us/services/container-registry/) Instance 66 | 67 | On a separate resource group, [create an instance](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-portal) of the Azure Container Registry (ACR) service. You can use the same resource group where the public DNZ Zone is hosted. When selecting the ACR SKU, select Premium if you wish to restrict network access to specific networks inside your subscription. Otherwise select Standard which provides 100GB of storage. Basic only provides 10GB of storage and should not be used. Enable Admin User to quickly get an username and password that can be used to generate the OpenShift Pull Secret for this repository. You can find these values in the `Access Keys` configuration of the ACR. 68 | 69 | Once the registry is created, follow the RedHat Documentation from [Creating a pull secret for your mirror registry](https://docs.openshift.com/container-platform/4.2/installing/installing_restricted_networks/installing-restricted-networks-preparations.html#installation-local-registry-pull-secret_installing-restricted-networks-preparations) up to Step 4 of [Mirroring the OpenShift Container Platform image repository](https://docs.openshift.com/container-platform/4.2/installing/installing_restricted_networks/installing-restricted-networks-preparations.html#installation-mirror-repository_installing-restricted-networks-preparations) 70 | 71 | 72 | # Airgapped Scenarios 73 | 74 | ## Private Endpoints with Egress Provided by Azure Public LB 75 | When `azure_private` is set to true, the `api` and `*.apps` domains are configured on Private LoadBalancers. A public loadbalancer is created to provide egress access to the cluster, but no inbound access is allowed. If you want to use a mirrored registry, you can also include the `airgapped` variable in your terraform.tfvars file 76 | 77 | ```terraform 78 | azure_private = true 79 | airgapped = { 80 | enabled = true 81 | repository = "example.azurecr.io/ocp4/openshift4" 82 | } 83 | ``` 84 | 85 | ## Private Endpoints with User Defined Routing 86 | In addition to `azure_private` and `airgapped` variables, you can set other variables that ensure all communication to your cluster are handled via internal endpoints and no traffic goes thru the Azure public network. You are responsible for configuring addecuate access to the internet in your VNET (via a ExpressRoute, Proxies, etc). Set the `azure_outbound_user_defined_routing` and `azure_preexisting_network` variabes to `true` and provide your VNET Resource Group, VNET Name and Control Plane and Compute Subnets 87 | 88 | ```terraform 89 | azure_private = true 90 | airgapped = { 91 | enabled = true 92 | repository = "example.azurecr.io/ocp4/openshift4" 93 | } 94 | azure_outbound_user_defined_routing = true 95 | azure_preexisting_network = true 96 | azure_network_resource_group_name = "yourNetworkResourceGroup" 97 | azure_virtual_network = "yourVNETName" 98 | azure_control_plane_subnet = "yourControlPlaneSubnetName" 99 | azure_compute_subnet = "yourComputeSubnetName" 100 | ``` 101 | 102 | This ensures that terraform generates the `installer-config.yaml` and `ImageContentSourcePolicy` templates for a private, disconnected installation. 103 | 104 | ## Proxied Environments 105 | When `proxy_config` is set, the cluster wide proxy will be configured during install for your OCP cluster. You can specify your http and https proxies, any addresses that are not to be proxied, as well as the certificate trust bundle for your proxy. When `proxy_config.enabled` is set to true, your install-config.yaml will be auto-generated with the proper proxy configuration 106 | 107 | ```terraform 108 | proxy_config = { 109 | enabled = true # set to true to enable proxy configuration 110 | httpProxy = "http://user:password@proxy.example.com:80" # only supports http proxies at this time 111 | httpsProxy = "http://user:password@proxy.example.com:80" # only supports http proxies at this time 112 | noProxy = "ip1,ip2,ip3,.example.com,10.0.0.0/8" # comma delimited values 113 | additionalTrustBundle = "/path/to/trust/bundle.pem" # set to "" for no additionalTrustBundle 114 | } 115 | ``` 116 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenShift 4 UPI on Azure Cloud 2 | 3 | This [terraform](terraform.io) implementation will deploy OpenShift 4.x into an Azure VNET, with two subnets for controlplane and worker nodes. Traffic to the master nodes is handled via a pair of loadbalancers, one for internal traffic and another for external API traffic. Application loadbalancing is handled by a third loadbalancer that talks to the router pods on the infra nodes. Worker, Infra and Master nodes are deployed across 3 Availability Zones 4 | 5 | ![Topology](./media/topology.svg) 6 | 7 | ## Prerequisites 8 | 9 | 1. [Configure DNS](https://github.com/openshift/installer/blob/d0f7654bc4a0cf73392371962aef68cd9552b5dd/docs/user/azure/dnszone.md) 10 | 11 | 2. [Create a Service Principal](https://github.com/openshift/installer/blob/d0f7654bc4a0cf73392371962aef68cd9552b5dd/docs/user/azure/credentials.md) with proper IAM roles 12 | 13 | 3. [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) 14 | 15 | ## Minimal TFVARS file 16 | 17 | ```terraform 18 | azure_region = "eastus2" 19 | cluster_name = "ocp46" 20 | 21 | # From Prereq. Step #1 22 | base_domain = "azure.example.com" 23 | azure_base_domain_resource_group_name = "openshift4-common-rg" 24 | 25 | # From Prereq. Step #2 26 | azure_subscription_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" 27 | azure_tenant_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY" 28 | azure_client_id = "ZZZZZZZZ-ZZZZ-ZZZZ-ZZZZ-ZZZZZZZZZZZZ" 29 | azure_client_secret = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAAA" 30 | ``` 31 | 32 | ## Customizable Variables 33 | 34 | | Variable | Description | Default | Type | 35 | | ------------------------------------- | -------------------------------------------------------------- | --------------- | ------ | 36 | | azure_subscription_id | Subscription ID for Azure Account | - | string | 37 | | azure_tenant_id | Tenant ID for Azure Subscription | - | string | 38 | | azure_client_id | Application Client ID (from Prereq Step #2) | - | string | 39 | | azure_client_secret | Application Client Secret (from Prereq Step #2) | - | string | 40 | | azure_region | Azure Region to deploy to | - | string | 41 | | cluster_name | Cluster Identifier | - | string | 42 | | master_count | Number of master nodes to deploy | 3 | string | 43 | | worker_count | Number of worker nodes to deploy | 3 | string | 44 | | infra_count | Number of infra nodes to deploy | 0 | string | 45 | | machine_v4_cidrs | IPv4 CIDR for OpenShift VNET | \[10.0.0.0/16\] | list | 46 | | machine_v6_cidrs | IPv6 CIDR for OpenShift VNET | \[\] | list | 47 | | base_domain | DNS name for your deployment | - | string | 48 | | azure_base_domain_resource_group_name | Resource group where DNS is hosted. Must be on zame region. | - | string | 49 | | azure_bootstrap_vm_type | Size of bootstrap VM | Standard_D4s_v3 | string | 50 | | azure_master_vm_type | Size of master node VMs | Standard_D4s_v3 | string | 51 | | azure_infra_vm_type | Size of infra node VMs | Standard_D4s_v3 | string | 52 | | azure_worker_vm_type | Sizs of worker node VMs | Standard_D4s_v3 | string | 53 | | openshift_cluster_network_cidr | CIDR for Kubernetes pods | 10.128.0.0/14 | string | 54 | | openshift_cluster_network_host_prefix | Detemines the number of pods a node can host. 23 gives you 510 pods per node. | 23 | string | 55 | | openshift_service_network_cidr | CIDR for Kubernetes services | 172.30.0.0/16 | string | 56 | | openshift_pull_secret | Filename that holds your OpenShift [pull-secret](https://cloud.redhat.com/openshift/install/azure/installer-provisioned) | - | string | 57 | | azure_master_root_volume_size | Size of master node root volume | 512 | string | 58 | | azure_worker_root_volume_size | Size of worker node root volume | 128 | string | 59 | | azure_infra_root_volume_size | Size of infra node root volume | 128 | string | 60 | | azure_master_root_volume_type | Storage type for master root volume | Premium_LRS | string | 61 | | openshift_version | Version of OpenShift to deploy. | 4.6.13 | strig | 62 | | bootstrap_completed | Control variable to delete bootstrap node after initialization | false | bool | 63 | | azure_private | If set to `true` will deploy `api` and `*.apps` endpoints as private LoadBalancers | - | bool | 64 | | azure_extra_tags | Extra Azure tags to be applied to created resources | {} | map | 65 | | airgapped | Configuration for an AirGapped environment | [AirGapped](AIRGAPPED.md) | map | 66 | | azure_environment | The target Azure cloud environment for the cluster | public | string | 67 | | azure_master_availability_zones | The availability zones in which to create the masters. The length of this list must match `master_count`| ["1","2","3"]| list | 68 | | azure_preexisting_network | Specifies whether an existing network should be used or a new one created for installation. | false | bool | 69 | | azure_resource_group_name | The name of the resource group for the cluster. If this is set, the cluster is installed to that existing resource group otherwise a new resource group will be created using cluster id. | - | string | 70 | | azure_network_resource_group_name | The name of the network resource group, either existing or to be created | `null` | string | 71 | | azure_virtual_network | The name of the virtual network, either existing or to be created | `null` | string | 72 | | azure_control_plane_subnet | The name of the subnet for the control plane, either existing or to be created | `null` | string | 73 | | azure_compute_subnet | The name of the subnet for worker nodes, either existing or to be created | `null` | string | 74 | | azure_emulate_single_stack_ipv6 | This determines whether a dual-stack cluster is configured to emulate single-stack IPv6 | false | bool | 75 | | azure_outbound_user_defined_routing | This determined whether User defined routing will be used for egress to Internet. When `false`, Standard LB will be used for egress to the Internet. | false | bool | 76 | | use_ipv4 | This determines wether your cluster will use IPv4 networking | true | bool | 77 | | use_ipv6 | This determines wether your cluster will use IPv6 networking | false | bool | 78 | | proxy_config | Configuration for Cluster wide proxy | [AirGapped](AIRGAPPED.md)| map | 79 | | openshift_ssh_key | Path to your own SSH Public Key. If none provided it will create one for you | - | string | 80 | | openshift_additional_trust_bundle | Path to your trusted CA bundle in pem format | - | string | 81 | | openshift_byo_dns | If set to true, we will not create Azure Public/Private DNS zones. **You'll need to manually create `api`, `api-int` and `*.apps` DNS records** | false | bool | 82 | 83 | ## Deploy with Terraform 84 | 85 | 1. Clone github repository 86 | 87 | ```bash 88 | git clone git@github.com:ibm-cloud-architecture/terraform-openshift4-azure.git 89 | ``` 90 | 91 | 2. Create your `terraform.tfvars` file 92 | 93 | 3. Deploy with terraform 94 | 95 | ```bash 96 | terraform init 97 | terraform plan 98 | terraform apply 99 | ``` 100 | 101 | 4. Destroy bootstrap node 102 | 103 | ```bash 104 | TF_VAR_bootstrap_complete=true terraform apply 105 | ``` 106 | 107 | 5. To access your cluster 108 | 109 | ```bash 110 | $ export KUBECONFIG=$PWD/installer-files/auth/kubeconfig 111 | $ oc get nodes 112 | NAME STATUS ROLES AGE VERSION 113 | fs2021-hv0eu-infra-eastus21-6kqlt Ready infra,worker 20m v1.19.0+3b01205 114 | fs2021-hv0eu-infra-eastus22-m826l Ready infra,worker 20m v1.19.0+3b01205 115 | fs2021-hv0eu-infra-eastus23-qf4kc Ready infra,worker 19m v1.19.0+3b01205 116 | fs2021-hv0eu-master-0 Ready master 30m v1.19.0+3b01205 117 | fs2021-hv0eu-master-1 Ready master 30m v1.19.0+3b01205 118 | fs2021-hv0eu-master-2 Ready master 30m v1.19.0+3b01205 119 | fs2021-hv0eu-worker-eastus21-bw8nq Ready worker 19m v1.19.0+3b01205 120 | fs2021-hv0eu-worker-eastus22-rtwwh Ready worker 20m v1.19.0+3b01205 121 | fs2021-hv0eu-worker-eastus23-tsw44 Ready worker 20m v1.19.0+3b01205 122 | ``` 123 | 124 | ## Infra and Worker Node Deployment 125 | 126 | Deployment of Openshift Worker and Infra nodes is handled by the machine-operator-api cluster operator. 127 | 128 | ```bash 129 | $ oc get machineset -n openshift-machine-api 130 | NAME DESIRED CURRENT READY AVAILABLE AGE 131 | fs2021-hv0eu-infra-eastus21 1 1 1 1 35m 132 | fs2021-hv0eu-infra-eastus22 1 1 1 1 35m 133 | fs2021-hv0eu-infra-eastus23 1 1 1 1 35m 134 | fs2021-hv0eu-worker-eastus21 1 1 1 1 35m 135 | fs2021-hv0eu-worker-eastus22 1 1 1 1 35m 136 | fs2021-hv0eu-worker-eastus23 1 1 1 1 35m 137 | 138 | $ oc get machines -n openshift-machine-api 139 | NAME PHASE TYPE REGION ZONE AGE 140 | fs2021-hv0eu-infra-eastus21-6kqlt Running Standard_D4s_v3 eastus2 1 31m 141 | fs2021-hv0eu-infra-eastus22-m826l Running Standard_D4s_v3 eastus2 2 31m 142 | fs2021-hv0eu-infra-eastus23-qf4kc Running Standard_D4s_v3 eastus2 3 31m 143 | fs2021-hv0eu-master-0 Running Standard_D8s_v3 eastus2 1 37m 144 | fs2021-hv0eu-master-1 Running Standard_D8s_v3 eastus2 2 37m 145 | fs2021-hv0eu-master-2 Running Standard_D8s_v3 eastus2 3 37m 146 | fs2021-hv0eu-worker-eastus21-bw8nq Running Standard_D8s_v3 eastus2 1 31m 147 | fs2021-hv0eu-worker-eastus22-rtwwh Running Standard_D8s_v3 eastus2 2 31m 148 | fs2021-hv0eu-worker-eastus23-tsw44 Running Standard_D8s_v3 eastus2 3 31m 149 | ``` 150 | 151 | The infra nodes host the router/ingress pods, all the monitoring infrastrucutre, and the image registry. 152 | -------------------------------------------------------------------------------- /bootstrap/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | bootstrap_nic_ip_v4_configuration_name = "bootstrap-nic-ip-v4" 3 | bootstrap_nic_ip_v6_configuration_name = "bootstrap-nic-ip-v6" 4 | } 5 | 6 | 7 | resource "azurerm_public_ip" "bootstrap_public_ip_v4" { 8 | count = var.private || ! var.use_ipv4 ? 0 : 1 9 | 10 | sku = "Standard" 11 | location = var.region 12 | name = "${var.cluster_id}-bootstrap-pip-v4" 13 | resource_group_name = var.resource_group_name 14 | allocation_method = "Static" 15 | } 16 | 17 | data "azurerm_public_ip" "bootstrap_public_ip_v4" { 18 | count = var.private ? 0 : 1 19 | 20 | name = azurerm_public_ip.bootstrap_public_ip_v4[0].name 21 | resource_group_name = var.resource_group_name 22 | } 23 | 24 | resource "azurerm_public_ip" "bootstrap_public_ip_v6" { 25 | count = var.private || ! var.use_ipv6 ? 0 : 1 26 | 27 | sku = "Standard" 28 | location = var.region 29 | name = "${var.cluster_id}-bootstrap-pip-v6" 30 | resource_group_name = var.resource_group_name 31 | allocation_method = "Static" 32 | ip_version = "IPv6" 33 | } 34 | 35 | data "azurerm_public_ip" "bootstrap_public_ip_v6" { 36 | count = var.private || ! var.use_ipv6 ? 0 : 1 37 | 38 | name = azurerm_public_ip.bootstrap_public_ip_v6[0].name 39 | resource_group_name = var.resource_group_name 40 | } 41 | 42 | resource "azurerm_network_interface" "bootstrap" { 43 | name = "${var.cluster_id}-bootstrap-nic" 44 | location = var.region 45 | resource_group_name = var.resource_group_name 46 | 47 | dynamic "ip_configuration" { 48 | for_each = [for ip in [ 49 | { 50 | // LIMITATION: azure does not allow an ipv6 address to be primary today 51 | primary : var.use_ipv4, 52 | name : local.bootstrap_nic_ip_v4_configuration_name, 53 | ip_address_version : "IPv4", 54 | public_ip_id : var.private ? null : azurerm_public_ip.bootstrap_public_ip_v4[0].id, 55 | include : var.use_ipv4 || var.use_ipv6, 56 | }, 57 | { 58 | primary : ! var.use_ipv4, 59 | name : local.bootstrap_nic_ip_v6_configuration_name, 60 | ip_address_version : "IPv6", 61 | public_ip_id : var.private || ! var.use_ipv6 ? null : azurerm_public_ip.bootstrap_public_ip_v6[0].id, 62 | include : var.use_ipv6, 63 | }, 64 | ] : { 65 | primary : ip.primary 66 | name : ip.name 67 | ip_address_version : ip.ip_address_version 68 | public_ip_id : ip.public_ip_id 69 | include : ip.include 70 | } if ip.include 71 | ] 72 | content { 73 | primary = ip_configuration.value.primary 74 | name = ip_configuration.value.name 75 | subnet_id = var.subnet_id 76 | private_ip_address_version = ip_configuration.value.ip_address_version 77 | private_ip_address_allocation = "Dynamic" 78 | public_ip_address_id = ip_configuration.value.public_ip_id 79 | } 80 | } 81 | } 82 | 83 | resource "azurerm_network_interface_backend_address_pool_association" "public_lb_bootstrap_v4" { 84 | // This is required because terraform cannot calculate counts during plan phase completely and therefore the `vnet/public-lb.tf` 85 | // conditional need to be recreated. See https://github.com/hashicorp/terraform/issues/12570 86 | count = (! var.private || ! var.outbound_udr) ? 1 : 0 87 | 88 | network_interface_id = azurerm_network_interface.bootstrap.id 89 | backend_address_pool_id = var.elb_backend_pool_v4_id 90 | ip_configuration_name = local.bootstrap_nic_ip_v4_configuration_name 91 | } 92 | 93 | resource "azurerm_network_interface_backend_address_pool_association" "public_lb_bootstrap_v6" { 94 | // This is required because terraform cannot calculate counts during plan phase completely and therefore the `vnet/public-lb.tf` 95 | // conditional need to be recreated. See https://github.com/hashicorp/terraform/issues/12570 96 | count = var.use_ipv6 && (! var.private || ! var.outbound_udr) ? 1 : 0 97 | 98 | network_interface_id = azurerm_network_interface.bootstrap.id 99 | backend_address_pool_id = var.elb_backend_pool_v6_id 100 | ip_configuration_name = local.bootstrap_nic_ip_v6_configuration_name 101 | } 102 | 103 | resource "azurerm_network_interface_backend_address_pool_association" "internal_lb_bootstrap_v4" { 104 | count = var.use_ipv4 ? 1 : 0 105 | 106 | network_interface_id = azurerm_network_interface.bootstrap.id 107 | backend_address_pool_id = var.ilb_backend_pool_v4_id 108 | ip_configuration_name = local.bootstrap_nic_ip_v4_configuration_name 109 | } 110 | 111 | resource "azurerm_network_interface_backend_address_pool_association" "internal_lb_bootstrap_v6" { 112 | count = var.use_ipv6 ? 1 : 0 113 | 114 | network_interface_id = azurerm_network_interface.bootstrap.id 115 | backend_address_pool_id = var.ilb_backend_pool_v6_id 116 | ip_configuration_name = local.bootstrap_nic_ip_v6_configuration_name 117 | } 118 | 119 | resource "azurerm_linux_virtual_machine" "bootstrap" { 120 | name = "${var.cluster_id}-bootstrap" 121 | location = var.region 122 | resource_group_name = var.resource_group_name 123 | network_interface_ids = [azurerm_network_interface.bootstrap.id] 124 | size = var.vm_size 125 | admin_username = "core" 126 | # The password is normally applied by WALA (the Azure agent), but this 127 | # isn't installed in RHCOS. As a result, this password is never set. It is 128 | # included here because it is required by the Azure ARM API. 129 | admin_password = "NotActuallyApplied!" 130 | disable_password_authentication = false 131 | 132 | identity { 133 | type = "UserAssigned" 134 | identity_ids = [var.identity] 135 | } 136 | 137 | os_disk { 138 | name = "${var.cluster_id}-bootstrap_OSDisk" # os disk name needs to match cluster-api convention 139 | caching = "ReadWrite" 140 | storage_account_type = "Premium_LRS" 141 | disk_size_gb = 100 142 | } 143 | 144 | source_image_id = var.vm_image 145 | 146 | computer_name = "${var.cluster_id}-bootstrap-vm" 147 | custom_data = base64encode(var.ignition) 148 | 149 | boot_diagnostics { 150 | storage_account_uri = var.storage_account.primary_blob_endpoint 151 | } 152 | 153 | depends_on = [ 154 | azurerm_network_interface_backend_address_pool_association.public_lb_bootstrap_v4, 155 | azurerm_network_interface_backend_address_pool_association.public_lb_bootstrap_v6, 156 | azurerm_network_interface_backend_address_pool_association.internal_lb_bootstrap_v4, 157 | azurerm_network_interface_backend_address_pool_association.internal_lb_bootstrap_v6 158 | ] 159 | } 160 | 161 | resource "azurerm_network_security_rule" "bootstrap_ssh_in" { 162 | name = "bootstrap_ssh_in" 163 | priority = 103 164 | direction = "Inbound" 165 | access = "Allow" 166 | protocol = "Tcp" 167 | source_port_range = "*" 168 | destination_port_range = "22" 169 | source_address_prefix = "*" 170 | destination_address_prefix = "*" 171 | resource_group_name = var.resource_group_name 172 | network_security_group_name = var.nsg_name 173 | } 174 | -------------------------------------------------------------------------------- /bootstrap/outputs.tf: -------------------------------------------------------------------------------- 1 | output "bootstrap_public_ip" { 2 | value = var.private ? null : azurerm_public_ip.bootstrap_public_ip_v4[0].ip_address 3 | } 4 | -------------------------------------------------------------------------------- /bootstrap/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vm_size" { 2 | type = string 3 | description = "The SKU ID for the bootstrap node." 4 | } 5 | 6 | variable "vm_image" { 7 | type = string 8 | description = "The resource id of the vm image used for bootstrap." 9 | } 10 | 11 | variable "region" { 12 | type = string 13 | description = "The region for the deployment." 14 | } 15 | 16 | variable "resource_group_name" { 17 | type = string 18 | description = "The resource group name for the deployment." 19 | } 20 | 21 | variable "cluster_id" { 22 | type = string 23 | description = "The identifier for the cluster." 24 | } 25 | 26 | variable "identity" { 27 | type = string 28 | description = "The user assigned identity id for the vm." 29 | } 30 | 31 | variable "ignition" { 32 | type = string 33 | description = "The content of the bootstrap ignition file." 34 | } 35 | 36 | variable "subnet_id" { 37 | type = string 38 | description = "The subnet ID for the bootstrap node." 39 | } 40 | 41 | variable "elb_backend_pool_v4_id" { 42 | type = string 43 | description = "The external load balancer bakend pool id. used to attach the bootstrap NIC" 44 | } 45 | 46 | variable "elb_backend_pool_v6_id" { 47 | type = string 48 | description = "The external load balancer bakend pool id for ipv6. used to attach the bootstrap NIC" 49 | } 50 | 51 | variable "ilb_backend_pool_v4_id" { 52 | type = string 53 | description = "The internal load balancer bakend pool id. used to attach the bootstrap NIC" 54 | } 55 | 56 | variable "ilb_backend_pool_v6_id" { 57 | type = string 58 | description = "The internal load balancer bakend pool id for ipv6. used to attach the bootstrap NIC" 59 | } 60 | 61 | variable "storage_account" { 62 | type = any 63 | description = "the storage account for the cluster. It can be used for boot diagnostics." 64 | } 65 | 66 | variable "tags" { 67 | type = map(string) 68 | default = {} 69 | description = "tags to be applied to created resources." 70 | } 71 | 72 | variable "nsg_name" { 73 | type = string 74 | description = "The network security group for the subnet." 75 | } 76 | 77 | variable "private" { 78 | type = bool 79 | description = "This value determines if this is a private cluster or not." 80 | } 81 | 82 | variable "use_ipv4" { 83 | type = bool 84 | description = "This value determines if this is cluster should use IPv4 networking." 85 | } 86 | 87 | variable "use_ipv6" { 88 | type = bool 89 | description = "This value determines if this is cluster should use IPv6 networking." 90 | } 91 | 92 | variable "emulate_single_stack_ipv6" { 93 | type = bool 94 | description = "This determines whether a dual-stack cluster is configured to emulate single-stack IPv6." 95 | } 96 | 97 | variable "outbound_udr" { 98 | type = bool 99 | default = false 100 | 101 | description = <" from 3 | api_external_name = "api.${replace(var.cluster_domain, ".${var.base_domain}", "")}" 4 | } 5 | 6 | resource "azurerm_private_dns_zone" "private" { 7 | name = var.cluster_domain 8 | resource_group_name = var.resource_group_name 9 | 10 | depends_on = [azurerm_dns_cname_record.api_external_v4, azurerm_dns_cname_record.api_external_v6] 11 | } 12 | 13 | resource "azurerm_private_dns_zone_virtual_network_link" "network" { 14 | name = "${var.cluster_id}-network-link" 15 | resource_group_name = var.resource_group_name 16 | private_dns_zone_name = azurerm_private_dns_zone.private.name 17 | virtual_network_id = var.virtual_network_id 18 | } 19 | 20 | resource "azurerm_private_dns_a_record" "apiint_internal" { 21 | // TODO: internal LB should block v4 for better single stack emulation (&& ! var.emulate_single_stack_ipv6) 22 | // but RHCoS initramfs can't do v6 and so fails to ignite. https://issues.redhat.com/browse/GRPA-1343 23 | count = var.use_ipv4 ? 1 : 0 24 | 25 | name = "api-int" 26 | zone_name = azurerm_private_dns_zone.private.name 27 | resource_group_name = var.resource_group_name 28 | ttl = 300 29 | records = [var.internal_lb_ipaddress_v4] 30 | } 31 | 32 | resource "azurerm_private_dns_aaaa_record" "apiint_internal_v6" { 33 | count = var.use_ipv6 ? 1 : 0 34 | 35 | name = "api-int" 36 | zone_name = azurerm_private_dns_zone.private.name 37 | resource_group_name = var.resource_group_name 38 | ttl = 300 39 | records = [var.internal_lb_ipaddress_v6] 40 | } 41 | 42 | resource "azurerm_private_dns_a_record" "api_internal" { 43 | // TODO: internal LB should block v4 for better single stack emulation (&& ! var.emulate_single_stack_ipv6) 44 | // but RHCoS initramfs can't do v6 and so fails to ignite. https://issues.redhat.com/browse/GRPA-1343 45 | count = var.use_ipv4 ? 1 : 0 46 | 47 | name = "api" 48 | zone_name = azurerm_private_dns_zone.private.name 49 | resource_group_name = var.resource_group_name 50 | ttl = 300 51 | records = [var.internal_lb_ipaddress_v4] 52 | } 53 | 54 | resource "azurerm_private_dns_aaaa_record" "api_internal_v6" { 55 | count = var.use_ipv6 ? 1 : 0 56 | 57 | name = "api" 58 | zone_name = azurerm_private_dns_zone.private.name 59 | resource_group_name = var.resource_group_name 60 | ttl = 300 61 | records = [var.internal_lb_ipaddress_v6] 62 | } 63 | 64 | resource "azurerm_dns_cname_record" "api_external_v4" { 65 | count = var.private || !var.use_ipv4 ? 0 : 1 66 | 67 | name = local.api_external_name 68 | zone_name = var.base_domain 69 | resource_group_name = var.base_domain_resource_group_name 70 | ttl = 300 71 | record = var.external_lb_fqdn_v4 72 | } 73 | 74 | resource "azurerm_dns_cname_record" "api_external_v6" { 75 | count = var.private || !var.use_ipv6 ? 0 : 1 76 | 77 | name = "v6-${local.api_external_name}" 78 | zone_name = var.base_domain 79 | resource_group_name = var.base_domain_resource_group_name 80 | ttl = 300 81 | record = var.external_lb_fqdn_v6 82 | } 83 | -------------------------------------------------------------------------------- /dns/variables.tf: -------------------------------------------------------------------------------- 1 | variable "tags" { 2 | type = map(string) 3 | default = {} 4 | description = "tags to be applied to created resources." 5 | } 6 | 7 | variable "cluster_id" { 8 | description = "The identifier for the cluster." 9 | type = string 10 | } 11 | 12 | variable "cluster_domain" { 13 | description = "The domain for the cluster that all DNS records must belong" 14 | type = string 15 | } 16 | 17 | variable "base_domain" { 18 | description = "The base domain used for public records" 19 | type = string 20 | } 21 | 22 | variable "base_domain_resource_group_name" { 23 | description = "The resource group where the base domain is" 24 | type = string 25 | } 26 | 27 | variable "external_lb_fqdn_v4" { 28 | description = "External API's LB fqdn for IPv4" 29 | type = string 30 | } 31 | 32 | variable "external_lb_fqdn_v6" { 33 | description = "External API's LB fqdn for IPv6" 34 | type = string 35 | } 36 | 37 | variable "internal_lb_ipaddress_v4" { 38 | description = "External API's LB IP v4 address" 39 | type = string 40 | } 41 | 42 | variable "internal_lb_ipaddress_v6" { 43 | description = "External API's LB IP v6 address" 44 | type = string 45 | } 46 | 47 | variable "virtual_network_id" { 48 | description = "The ID for Virtual Network that will be linked to the Private DNS zone." 49 | type = string 50 | } 51 | 52 | variable "resource_group_name" { 53 | type = string 54 | description = "Resource group for the deployment" 55 | } 56 | 57 | variable "private" { 58 | type = bool 59 | description = "This value determines if this is a private cluster or not." 60 | } 61 | 62 | variable "use_ipv4" { 63 | type = bool 64 | description = "This value determines if this is cluster should use IPv4 networking." 65 | } 66 | 67 | variable "use_ipv6" { 68 | type = bool 69 | description = "This value determines if this is cluster should use IPv6 networking." 70 | } 71 | 72 | variable "emulate_single_stack_ipv6" { 73 | type = bool 74 | description = "This determines whether a dual-stack cluster is configured to emulate single-stack IPv6." 75 | } 76 | -------------------------------------------------------------------------------- /dns/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.13" 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /ignition/ignition.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_storage_account" "ignition" { 2 | name = "ignition${local.cluster_nr}" 3 | resource_group_name = var.resource_group_name 4 | location = var.azure_region 5 | account_tier = "Standard" 6 | account_replication_type = "LRS" 7 | } 8 | 9 | data "azurerm_storage_account_sas" "ignition" { 10 | connection_string = azurerm_storage_account.ignition.primary_connection_string 11 | https_only = true 12 | 13 | resource_types { 14 | service = false 15 | container = false 16 | object = true 17 | } 18 | 19 | services { 20 | blob = true 21 | queue = false 22 | table = false 23 | file = false 24 | } 25 | 26 | start = timestamp() 27 | 28 | expiry = timeadd(timestamp(), "24h") 29 | 30 | permissions { 31 | read = true 32 | list = true 33 | create = false 34 | add = false 35 | delete = false 36 | process = false 37 | write = false 38 | update = false 39 | } 40 | } 41 | 42 | resource "azurerm_storage_container" "ignition" { 43 | name = "ignition" 44 | storage_account_name = azurerm_storage_account.ignition.name 45 | container_access_type = "private" 46 | } 47 | 48 | locals { 49 | installer_workspace = "${path.root}/installer-files/" 50 | openshift_installer_url = "${var.openshift_installer_url}/${var.openshift_version}" 51 | cluster_nr = join("", split("-", var.cluster_id)) 52 | } 53 | 54 | resource "null_resource" "download_binaries" { 55 | provisioner "local-exec" { 56 | when = create 57 | command = templatefile("${path.module}/scripts/download.sh.tmpl", { 58 | installer_workspace = local.installer_workspace 59 | installer_url = local.openshift_installer_url 60 | airgapped_enabled = var.airgapped["enabled"] 61 | airgapped_repository = var.airgapped["repository"] 62 | pull_secret = var.openshift_pull_secret 63 | openshift_version = var.openshift_version 64 | path_root = path.root 65 | }) 66 | } 67 | 68 | provisioner "local-exec" { 69 | when = destroy 70 | command = "rm -rf ./installer-files" 71 | } 72 | 73 | } 74 | 75 | 76 | resource "null_resource" "generate_manifests" { 77 | triggers = { 78 | install_config = data.template_file.install_config_yaml.rendered 79 | } 80 | 81 | depends_on = [ 82 | null_resource.download_binaries, 83 | local_file.install_config_yaml, 84 | ] 85 | 86 | provisioner "local-exec" { 87 | command = templatefile("${path.module}/scripts/manifests.sh.tmpl", { 88 | installer_workspace = local.installer_workspace 89 | }) 90 | } 91 | } 92 | 93 | # see templates.tf for generation of yaml config files 94 | 95 | resource "null_resource" "generate_ignition" { 96 | depends_on = [ 97 | null_resource.download_binaries, 98 | local_file.install_config_yaml, 99 | null_resource.generate_manifests, 100 | local_file.cluster-infrastructure-02-config, 101 | local_file.cluster-dns-02-config, 102 | local_file.cloud-provider-config, 103 | local_file.openshift-cluster-api_master-machines, 104 | local_file.openshift-cluster-api_worker-machineset, 105 | local_file.openshift-cluster-api_infra-machineset, 106 | #local_file.ingresscontroller-default, 107 | local_file.cloud-creds-secret-kube-system, 108 | #local_file.cluster-scheduler-02-config, 109 | local_file.cluster-monitoring-configmap, 110 | #local_file.private-cluster-outbound-service, 111 | ] 112 | 113 | provisioner "local-exec" { 114 | command = templatefile("${path.module}/scripts/ignition.sh.tmpl", { 115 | installer_workspace = local.installer_workspace 116 | cluster_id = var.cluster_id 117 | }) 118 | } 119 | } 120 | 121 | resource "azurerm_storage_blob" "ignition-bootstrap" { 122 | name = "bootstrap.ign" 123 | source = "${local.installer_workspace}/bootstrap.ign" 124 | storage_account_name = azurerm_storage_account.ignition.name 125 | storage_container_name = azurerm_storage_container.ignition.name 126 | type = "Block" 127 | depends_on = [ 128 | null_resource.generate_ignition 129 | ] 130 | } 131 | 132 | resource "azurerm_storage_blob" "ignition-master" { 133 | name = "master.ign" 134 | source = "${local.installer_workspace}/master.ign" 135 | storage_account_name = azurerm_storage_account.ignition.name 136 | storage_container_name = azurerm_storage_container.ignition.name 137 | type = "Block" 138 | depends_on = [ 139 | null_resource.generate_ignition 140 | ] 141 | } 142 | 143 | resource "azurerm_storage_blob" "ignition-worker" { 144 | name = "worker.ign" 145 | source = "${local.installer_workspace}/worker.ign" 146 | storage_account_name = azurerm_storage_account.ignition.name 147 | storage_container_name = azurerm_storage_container.ignition.name 148 | type = "Block" 149 | depends_on = [ 150 | null_resource.generate_ignition 151 | ] 152 | } 153 | 154 | data "ignition_config" "master_redirect" { 155 | replace { 156 | source = "${azurerm_storage_blob.ignition-master.url}${data.azurerm_storage_account_sas.ignition.sas}" 157 | } 158 | } 159 | 160 | data "ignition_config" "bootstrap_redirect" { 161 | replace { 162 | source = "${azurerm_storage_blob.ignition-bootstrap.url}${data.azurerm_storage_account_sas.ignition.sas}" 163 | } 164 | } 165 | 166 | data "ignition_config" "worker_redirect" { 167 | replace { 168 | source = "${azurerm_storage_blob.ignition-worker.url}${data.azurerm_storage_account_sas.ignition.sas}" 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /ignition/output.tf: -------------------------------------------------------------------------------- 1 | output "bootstrap_ignition" { 2 | value = data.ignition_config.bootstrap_redirect.rendered 3 | } 4 | 5 | output "master_ignition" { 6 | value = data.ignition_config.master_redirect.rendered 7 | } 8 | -------------------------------------------------------------------------------- /ignition/scripts/download.sh.tmpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | test -e ${installer_workspace} || mkdir -p ${installer_workspace} 4 | 5 | case $(uname -s) in 6 | Darwin) 7 | wget -r -l1 -np -nd -q ${installer_url} -P ${installer_workspace} -A 'openshift-install-mac-4*.tar.gz' 8 | tar zxvf ${installer_workspace}/openshift-install-mac-4*.tar.gz -C ${installer_workspace} 9 | wget -r -l1 -np -nd -q ${installer_url} -P ${installer_workspace} -A 'openshift-client-mac-4*.tar.gz' 10 | tar zxvf ${installer_workspace}/openshift-client-mac-4*.tar.gz -C ${installer_workspace} 11 | wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 -O ${installer_workspace}/jq > /dev/null 2>&1 12 | ;; 13 | Linux) 14 | wget -r -l1 -np -nd -q ${installer_url} -P ${installer_workspace} -A 'openshift-install-linux-4*.tar.gz' 15 | tar zxvf ${installer_workspace}/openshift-install-linux-4*.tar.gz -C ${installer_workspace} 16 | wget -r -l1 -np -nd -q ${installer_url} -P ${installer_workspace} -A 'openshift-client-linux-4*.tar.gz' 17 | tar zxvf ${installer_workspace}/openshift-client-linux-4*.tar.gz -C ${installer_workspace} 18 | wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 -O ${installer_workspace}/jq 19 | ;; 20 | *) 21 | exit 1;; 22 | esac 23 | chmod u+x ${installer_workspace}/jq 24 | rm -f ${installer_workspace}/*.tar.gz ${installer_workspace}/robots*.txt* ${installer_workspace}/README.md 25 | if [[ "${airgapped_enabled}" == "true" ]]; then 26 | ${installer_workspace}/oc adm release extract -a ${pull_secret} --command=openshift-install ${airgapped_repository}:${openshift_version}-x86_64 27 | mv ${path_root}/openshift-install ${installer_workspace} 28 | fi 29 | -------------------------------------------------------------------------------- /ignition/scripts/ignition.sh.tmpl: -------------------------------------------------------------------------------- 1 | ${installer_workspace}/openshift-install --dir=${installer_workspace} create ignition-configs --log-level=debug 2 | ${installer_workspace}/jq '.infraID="${cluster_id}"' ${installer_workspace}/metadata.json > _metadata.json 3 | mv _metadata.json ${installer_workspace}/metadata.json 4 | -------------------------------------------------------------------------------- /ignition/scripts/manifests.sh.tmpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ${installer_workspace}/openshift-install --dir=${installer_workspace} create manifests --log-level=debug 4 | rm ${installer_workspace}/openshift/99_openshift-cluster-api_worker-machineset-* 5 | rm ${installer_workspace}/openshift/99_openshift-cluster-api_master-machines-* 6 | -------------------------------------------------------------------------------- /ignition/templates.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "install_config_yaml" { 2 | template = < 1}zone: "${var.availability_zones[count.index]}"%{endif} 232 | EOF 233 | } 234 | 235 | resource "local_file" "openshift-cluster-api_master-machines" { 236 | count = var.master_count 237 | content = element(data.template_file.openshift-cluster-api_master-machines.*.rendered, count.index) 238 | filename = "${local.installer_workspace}/openshift/99_openshift-cluster-api_master-machines-${count.index}.yaml" 239 | depends_on = [ 240 | null_resource.download_binaries, 241 | null_resource.generate_manifests, 242 | ] 243 | } 244 | locals { 245 | zone_node_replicas = [for idx in range(length(var.availability_zones)) : floor(var.node_count / length(var.availability_zones)) + (idx + 1 > (var.node_count % length(var.availability_zones)) ? 0 : 1)] 246 | zone_infra_replicas = [for idx in range(length(var.availability_zones)) : floor(var.infra_count / length(var.availability_zones)) + (idx + 1 > (var.infra_count % length(var.availability_zones)) ? 0 : 1)] 247 | } 248 | 249 | data "template_file" "openshift-cluster-api_worker-machineset" { 250 | count = length(var.availability_zones) 251 | template = < 1}zone: "${var.availability_zones[count.index]}"%{endif} 315 | EOF 316 | } 317 | 318 | resource "local_file" "openshift-cluster-api_worker-machineset" { 319 | count = length(var.availability_zones) 320 | content = element(data.template_file.openshift-cluster-api_worker-machineset.*.rendered, count.index) 321 | filename = "${local.installer_workspace}/openshift/99_openshift-cluster-api_worker-machineset-${count.index}.yaml" 322 | depends_on = [ 323 | null_resource.download_binaries, 324 | null_resource.generate_manifests, 325 | ] 326 | } 327 | 328 | data "template_file" "openshift-cluster-api_infra-machineset" { 329 | count = var.infra_count > 0 ? length(var.availability_zones) : 0 330 | template = < 1}zone: "${var.availability_zones[count.index]}"%{endif} 396 | EOF 397 | } 398 | 399 | resource "local_file" "openshift-cluster-api_infra-machineset" { 400 | count = var.infra_count > 0 ? length(var.availability_zones) : 0 401 | content = element(data.template_file.openshift-cluster-api_infra-machineset.*.rendered, count.index) 402 | filename = "${local.installer_workspace}/openshift/99_openshift-cluster-api_infra-machineset-${count.index}.yaml" 403 | depends_on = [ 404 | null_resource.download_binaries, 405 | null_resource.generate_manifests, 406 | ] 407 | } 408 | 409 | 410 | data "template_file" "cloud-creds-secret-kube-system" { 411 | template = < 0 ? 1 : 0 478 | content = data.template_file.cluster-monitoring-configmap.rendered 479 | filename = "${local.installer_workspace}/openshift/99_cluster-monitoring-configmap.yml" 480 | depends_on = [ 481 | null_resource.download_binaries, 482 | null_resource.generate_manifests, 483 | ] 484 | } 485 | 486 | 487 | data "template_file" "configure-image-registry-job-serviceaccount" { 488 | template = < 0 ? 1 : 0 499 | content = data.template_file.configure-image-registry-job-serviceaccount.rendered 500 | filename = "${local.installer_workspace}/openshift/99_configure-image-registry-job-serviceaccount.yml" 501 | depends_on = [ 502 | null_resource.download_binaries, 503 | null_resource.generate_manifests, 504 | ] 505 | } 506 | 507 | data "template_file" "configure-image-registry-job-clusterrole" { 508 | template = < 0 ? 1 : 0 523 | content = data.template_file.configure-image-registry-job-clusterrole.rendered 524 | filename = "${local.installer_workspace}/openshift/99_configure-image-registry-job-clusterrole.yml" 525 | depends_on = [ 526 | null_resource.download_binaries, 527 | null_resource.generate_manifests, 528 | ] 529 | } 530 | 531 | data "template_file" "configure-image-registry-job-clusterrolebinding" { 532 | template = < 0 ? 1 : 0 550 | content = data.template_file.configure-image-registry-job-clusterrolebinding.rendered 551 | filename = "${local.installer_workspace}/openshift/99_configure-image-registry-job-clusterrolebinding.yml" 552 | depends_on = [ 553 | null_resource.download_binaries, 554 | null_resource.generate_manifests, 555 | ] 556 | } 557 | 558 | data "template_file" "configure-image-registry-job" { 559 | template = </dev/null 2>&1; do sleep 1;done;/usr/bin/oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\": {\"nodeSelector\": {\"node-role.kubernetes.io/infra\": \"\"}}}'"] 580 | restartPolicy: Never 581 | EOF 582 | } 583 | 584 | resource "local_file" "configure-image-registry-job" { 585 | count = var.infra_count > 0 ? 1 : 0 586 | content = data.template_file.configure-image-registry-job.rendered 587 | filename = "${local.installer_workspace}/openshift/99_configure-image-registry-job.yml" 588 | depends_on = [ 589 | null_resource.download_binaries, 590 | null_resource.generate_manifests, 591 | ] 592 | } 593 | 594 | data "template_file" "configure-ingress-job-serviceaccount" { 595 | template = < 0 ? 1 : 0 606 | content = data.template_file.configure-ingress-job-serviceaccount.rendered 607 | filename = "${local.installer_workspace}/openshift/99_configure-ingress-job-serviceaccount.yml" 608 | depends_on = [ 609 | null_resource.download_binaries, 610 | null_resource.generate_manifests, 611 | ] 612 | } 613 | 614 | data "template_file" "configure-ingress-job-clusterrole" { 615 | template = < 0 ? 1 : 0 630 | content = data.template_file.configure-ingress-job-clusterrole.rendered 631 | filename = "${local.installer_workspace}/openshift/99_configure-ingress-job-clusterrole.yml" 632 | depends_on = [ 633 | null_resource.download_binaries, 634 | null_resource.generate_manifests, 635 | ] 636 | } 637 | 638 | data "template_file" "configure-ingress-job-clusterrolebinding" { 639 | template = < 0 ? 1 : 0 657 | content = data.template_file.configure-ingress-job-clusterrolebinding.rendered 658 | filename = "${local.installer_workspace}/openshift/99_configure-ingress-job-clusterrolebinding.yml" 659 | depends_on = [ 660 | null_resource.download_binaries, 661 | null_resource.generate_manifests, 662 | ] 663 | } 664 | 665 | data "template_file" "configure-ingress-job" { 666 | template = </dev/null 2>&1; do sleep 1;done;/usr/bin/oc patch ingresscontrollers.operator.openshift.io default -n openshift-ingress-operator --type merge --patch '{\"spec\": {\"nodePlacement\": {\"nodeSelector\": {\"matchLabels\": {\"node-role.kubernetes.io/infra\": \"\"}}}}}'"] 687 | restartPolicy: Never 688 | EOF 689 | } 690 | 691 | resource "local_file" "configure-ingress-job" { 692 | count = var.infra_count > 0 ? 1 : 0 693 | content = data.template_file.configure-ingress-job.rendered 694 | filename = "${local.installer_workspace}/openshift/99_configure-ingress-job.yml" 695 | depends_on = [ 696 | null_resource.download_binaries, 697 | null_resource.generate_manifests, 698 | ] 699 | } 700 | 701 | 702 | data "template_file" "private-cluster-outbound-service" { 703 | template = < 1 ? var.availability_zones[count.index] : var.availability_zones[0] 90 | resource_group_name = var.resource_group_name 91 | network_interface_ids = [element(azurerm_network_interface.master.*.id, count.index)] 92 | size = var.vm_size 93 | admin_username = "core" 94 | # The password is normally applied by WALA (the Azure agent), but this 95 | # isn't installed in RHCOS. As a result, this password is never set. It is 96 | # included here because it is required by the Azure ARM API. 97 | admin_password = "NotActuallyApplied!" 98 | disable_password_authentication = false 99 | 100 | identity { 101 | type = "UserAssigned" 102 | identity_ids = [var.identity] 103 | } 104 | 105 | os_disk { 106 | name = "${var.cluster_id}-master-${count.index}_OSDisk" # os disk name needs to match cluster-api convention 107 | caching = "ReadOnly" 108 | storage_account_type = var.os_volume_type 109 | disk_size_gb = var.os_volume_size 110 | } 111 | 112 | source_image_id = var.vm_image 113 | 114 | //we don't provide a ssh key, because it is set with ignition. 115 | //it is required to provide at least 1 auth method to deploy a linux vm 116 | computer_name = "${var.cluster_id}-master-${count.index}" 117 | custom_data = base64encode(var.ignition) 118 | 119 | boot_diagnostics { 120 | storage_account_uri = var.storage_account.primary_blob_endpoint 121 | } 122 | } 123 | 124 | -------------------------------------------------------------------------------- /master/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ip_addresses" { 2 | value = azurerm_network_interface.master.*.private_ip_address 3 | } 4 | -------------------------------------------------------------------------------- /master/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | description = "The region for the deployment." 4 | } 5 | 6 | variable "resource_group_name" { 7 | type = string 8 | description = "The resource group name for the deployment." 9 | } 10 | 11 | variable "cluster_id" { 12 | type = string 13 | } 14 | 15 | variable "vm_size" { 16 | type = string 17 | } 18 | 19 | variable "vm_image" { 20 | type = string 21 | description = "The resource id of the vm image used for masters." 22 | } 23 | 24 | variable "identity" { 25 | type = string 26 | description = "The user assigned identity id for the vm." 27 | } 28 | 29 | variable "instance_count" { 30 | type = string 31 | } 32 | 33 | variable "elb_backend_pool_v4_id" { 34 | type = string 35 | } 36 | 37 | variable "elb_backend_pool_v6_id" { 38 | type = string 39 | } 40 | 41 | variable "ilb_backend_pool_v4_id" { 42 | type = string 43 | } 44 | 45 | variable "ilb_backend_pool_v6_id" { 46 | type = string 47 | } 48 | 49 | variable "ignition_master" { 50 | type = string 51 | default = "" 52 | } 53 | 54 | variable "kubeconfig_content" { 55 | type = string 56 | default = "" 57 | } 58 | 59 | variable "subnet_id" { 60 | type = string 61 | description = "The subnet to attach the masters to." 62 | } 63 | 64 | variable "os_volume_type" { 65 | type = string 66 | description = "The type of the volume for the root block device." 67 | } 68 | 69 | variable "os_volume_size" { 70 | type = string 71 | description = "The size of the volume in gigabytes for the root block device." 72 | } 73 | 74 | variable "tags" { 75 | type = map(string) 76 | default = {} 77 | description = "tags to be applied to created resources." 78 | } 79 | 80 | variable "storage_account" { 81 | type = any 82 | description = "the storage account for the cluster. It can be used for boot diagnostics." 83 | } 84 | 85 | variable "ignition" { 86 | type = string 87 | } 88 | 89 | variable "availability_zones" { 90 | type = list(string) 91 | description = "List of the availability zones in which to create the masters. The length of this list must match instance_count." 92 | } 93 | 94 | variable "private" { 95 | type = bool 96 | description = "This value determines if this is a private cluster or not." 97 | } 98 | 99 | variable "use_ipv4" { 100 | type = bool 101 | description = "This value determines if this is cluster should use IPv4 networking." 102 | } 103 | 104 | variable "use_ipv6" { 105 | type = bool 106 | description = "This value determines if this is cluster should use IPv6 networking." 107 | } 108 | 109 | variable "emulate_single_stack_ipv6" { 110 | type = bool 111 | description = "This determines whether a dual-stack cluster is configured to emulate single-stack IPv6." 112 | } 113 | 114 | variable "outbound_udr" { 115 | type = bool 116 | default = false 117 | 118 | description = < 2 | Icon-networking-61 3 | 4 | 5 | 6 | Icon-networking-80 7 | 8 | Icon-compute-21 9 | 10 | Icon-compute-25 11 | 12 | Icon-networking-62 13 | 14 | 15 | 16 | Icon-networking-69 17 | 18 | Icon-compute-25 19 | 20 | Icon-networking-62 21 | 22 | 23 | 24 | 25 | 26 | Icon-networking-69 27 | 28 | Icon-networking-69 29 | 30 | Icon-networking-80 31 | 32 | Icon-compute-21 33 | 34 | Icon-networking-80 35 | 36 | Icon-compute-21 37 | 38 | Icon-networking-80 39 | 40 | Icon-compute-21 41 | 42 | Icon-networking-67 43 | 44 | 45 | 46 | Icon-networking-80 47 | 48 | Icon-compute-21 49 | 50 | Icon-networking-80 51 | 52 | Icon-compute-21 53 | 54 | Icon-networking-80 55 | 56 | Icon-compute-21 57 | fs2021-yqizu-vnetfs2021-yqizu-mast...fs2021-yqizu-boot...fs2021-yqizu-boot...fs2021-yqizufs2021-yqizuapi-internal-probepublic-lb-ip-v4fs2021-yqizufs2021-yqizu-inte...api-internal-probesint-probeinternal-lb-ip-v4fs2021-yqizu-boot...fs2021-yqizu-mast...fs2021-yqizu-mast...fs2021-yqizu-mast...fs2021-yqizu-mast...fs2021-yqizu-mast...fs2021-yqizu-mast...fs2021-yqizu-nsgfs2021-yqizu-work...fs2021-yqizu-work...fs2021-yqizu-work...fs2021-yqizu-work...fs2021-yqizu-work...fs2021-yqizu-work...fs2021-yqizu-work... -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_id" { 2 | value = local.cluster_id 3 | } 4 | 5 | output "resource_group" { 6 | value = data.azurerm_resource_group.main.name 7 | } 8 | 9 | output "bootstrap_public_ip" { 10 | value = module.bootstrap.bootstrap_public_ip 11 | } 12 | 13 | output "api-int-ipaddress" { 14 | value = var.openshift_byo_dns ? module.vnet.internal_lb_ip_v4_address : null 15 | } 16 | 17 | output "api-ipaddress" { 18 | value = var.openshift_byo_dns ? module.vnet.public_lb_ip_v4_address : null 19 | } 20 | -------------------------------------------------------------------------------- /variables-azure.tf: -------------------------------------------------------------------------------- 1 | variable "azure_config_version" { 2 | description = < 1 240 | error_message = "The worker_count value must be greater than 1." 241 | } 242 | } 243 | 244 | variable "infra_count" { 245 | type = string 246 | default = 0 247 | } 248 | 249 | variable "azure_infra_vm_type" { 250 | type = string 251 | default = "Standard_D4s_v3" 252 | } 253 | 254 | variable "azure_worker_vm_type" { 255 | type = string 256 | default = "Standard_D8s_v3" 257 | } 258 | 259 | variable "airgapped" { 260 | type = map(string) 261 | default = { 262 | enabled = false 263 | repository = "" 264 | } 265 | } 266 | 267 | variable "proxy_config" { 268 | type = map(string) 269 | default = { 270 | enabled = false 271 | httpProxy = "http://user:password@ip:port" 272 | httpsProxy = "http://user:password@ip:port" 273 | noProxy = "ip1,ip2,ip3,.example.com,cidr/mask" 274 | } 275 | } 276 | 277 | variable "openshift_additional_trust_bundle" { 278 | description = "path to a file with all your additional ca certificates" 279 | type = string 280 | default = "" 281 | } 282 | 283 | variable "openshift_ssh_key" { 284 | description = "SSH Public Key to use for OpenShift Installation" 285 | type = string 286 | default = "" 287 | } 288 | 289 | variable "openshift_byo_dns" { 290 | description = "Do not deploy any public or private DNS zone into Azure" 291 | type = bool 292 | default = false 293 | } 294 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.13" 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | } 8 | local = { 9 | source = "hashicorp/local" 10 | } 11 | random = { 12 | source = "hashicorp/random" 13 | } 14 | tls = { 15 | source = "hashicorp/tls" 16 | } 17 | template = { 18 | source = "hashicorp/template" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /vnet/common.tf: -------------------------------------------------------------------------------- 1 | # Canonical internal state definitions for this module. 2 | # read only: only locals and data source definitions allowed. No resources or module blocks in this file 3 | 4 | data "azurerm_subnet" "preexisting_master_subnet" { 5 | count = var.preexisting_network ? 1 : 0 6 | 7 | resource_group_name = var.network_resource_group_name 8 | virtual_network_name = var.virtual_network_name 9 | name = var.master_subnet 10 | } 11 | 12 | data "azurerm_subnet" "preexisting_worker_subnet" { 13 | count = var.preexisting_network ? 1 : 0 14 | 15 | resource_group_name = var.network_resource_group_name 16 | virtual_network_name = var.virtual_network_name 17 | name = var.worker_subnet 18 | } 19 | 20 | data "azurerm_virtual_network" "preexisting_virtual_network" { 21 | count = var.preexisting_network ? 1 : 0 22 | 23 | resource_group_name = var.network_resource_group_name 24 | name = var.virtual_network_name 25 | } 26 | 27 | // Only reference data sources which are guaranteed to exist at any time (above) in this locals{} block 28 | locals { 29 | master_subnet_cidr_v4 = var.use_ipv4 ? cidrsubnet(var.vnet_v4_cidrs[0], 3, 0) : null #master subnet is a smaller subnet within the vnet. i.e from /21 to /24 30 | master_subnet_cidr_v6 = var.use_ipv6 ? cidrsubnet(var.vnet_v6_cidrs[0], 16, 0) : null #master subnet is a smaller subnet within the vnet. i.e from /48 to /64 31 | 32 | worker_subnet_cidr_v4 = var.use_ipv4 ? cidrsubnet(var.vnet_v4_cidrs[0], 3, 1) : null #node subnet is a smaller subnet within the vnet. i.e from /21 to /24 33 | worker_subnet_cidr_v6 = var.use_ipv6 ? cidrsubnet(var.vnet_v6_cidrs[0], 16, 1) : null #node subnet is a smaller subnet within the vnet. i.e from /48 to /64 34 | 35 | master_subnet_id = var.preexisting_network ? data.azurerm_subnet.preexisting_master_subnet[0].id : azurerm_subnet.master_subnet[0].id 36 | worker_subnet_id = var.preexisting_network ? data.azurerm_subnet.preexisting_worker_subnet[0].id : azurerm_subnet.worker_subnet[0].id 37 | 38 | virtual_network = var.preexisting_network ? data.azurerm_virtual_network.preexisting_virtual_network[0].name : azurerm_virtual_network.cluster_vnet[0].name 39 | virtual_network_id = var.preexisting_network ? data.azurerm_virtual_network.preexisting_virtual_network[0].id : azurerm_virtual_network.cluster_vnet[0].id 40 | } 41 | -------------------------------------------------------------------------------- /vnet/internal-lb.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | internal_lb_frontend_ip_v4_configuration_name = "internal-lb-ip-v4" 3 | internal_lb_frontend_ip_v6_configuration_name = "internal-lb-ip-v6" 4 | } 5 | 6 | resource "azurerm_lb" "internal" { 7 | sku = "Standard" 8 | name = "${var.cluster_id}-internal" 9 | resource_group_name = var.resource_group_name 10 | location = var.region 11 | 12 | dynamic "frontend_ip_configuration" { 13 | for_each = [for ip in [ 14 | // TODO: internal LB should block v4 for better single stack emulation (&& ! var.emulate_single_stack_ipv6) 15 | // but RHCoS initramfs can't do v6 and so fails to ignite. https://issues.redhat.com/browse/GRPA-1343 16 | { name : local.internal_lb_frontend_ip_v4_configuration_name, ipv6 : false, include : var.use_ipv4 }, 17 | { name : local.internal_lb_frontend_ip_v6_configuration_name, ipv6 : true, include : var.use_ipv6 }, 18 | ] : { 19 | name : ip.name 20 | ipv6 : ip.ipv6 21 | include : ip.include 22 | } if ip.include 23 | ] 24 | 25 | content { 26 | name = frontend_ip_configuration.value.name 27 | subnet_id = local.master_subnet_id 28 | private_ip_address_version = frontend_ip_configuration.value.ipv6 ? "IPv6" : "IPv4" 29 | # WORKAROUND: Allocate a high ipv6 internal LB address to avoid the race with NIC allocation (a master and the LB 30 | # were being assigned the same IP dynamically). Issue is being tracked as a support ticket to Azure. 31 | private_ip_address_allocation = frontend_ip_configuration.value.ipv6 ? "Static" : "Dynamic" 32 | private_ip_address = frontend_ip_configuration.value.ipv6 ? cidrhost(local.master_subnet_cidr_v6, -2) : null 33 | } 34 | } 35 | } 36 | 37 | resource "azurerm_lb_backend_address_pool" "internal_lb_controlplane_pool_v4" { 38 | count = var.use_ipv4 ? 1 : 0 39 | 40 | resource_group_name = var.resource_group_name 41 | loadbalancer_id = azurerm_lb.internal.id 42 | name = var.cluster_id 43 | } 44 | 45 | resource "azurerm_lb_backend_address_pool" "internal_lb_controlplane_pool_v6" { 46 | count = var.use_ipv6 ? 1 : 0 47 | 48 | resource_group_name = var.resource_group_name 49 | loadbalancer_id = azurerm_lb.internal.id 50 | name = "${var.cluster_id}-IPv6" 51 | } 52 | 53 | resource "azurerm_lb_rule" "internal_lb_rule_api_internal_v4" { 54 | count = var.use_ipv4 ? 1 : 0 55 | 56 | name = "api-internal-v4" 57 | resource_group_name = var.resource_group_name 58 | protocol = "Tcp" 59 | backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v4[0].id 60 | loadbalancer_id = azurerm_lb.internal.id 61 | frontend_port = 6443 62 | backend_port = 6443 63 | frontend_ip_configuration_name = local.internal_lb_frontend_ip_v4_configuration_name 64 | enable_floating_ip = false 65 | idle_timeout_in_minutes = 30 66 | load_distribution = "Default" 67 | probe_id = azurerm_lb_probe.internal_lb_probe_api_internal.id 68 | } 69 | 70 | resource "azurerm_lb_rule" "internal_lb_rule_api_internal_v6" { 71 | count = var.use_ipv6 ? 1 : 0 72 | 73 | name = "api-internal-v6" 74 | resource_group_name = var.resource_group_name 75 | protocol = "Tcp" 76 | backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id 77 | loadbalancer_id = azurerm_lb.internal.id 78 | frontend_port = 6443 79 | backend_port = 6443 80 | frontend_ip_configuration_name = local.internal_lb_frontend_ip_v6_configuration_name 81 | enable_floating_ip = false 82 | idle_timeout_in_minutes = 30 83 | load_distribution = "Default" 84 | probe_id = azurerm_lb_probe.internal_lb_probe_api_internal.id 85 | } 86 | 87 | resource "azurerm_lb_rule" "internal_lb_rule_sint_v4" { 88 | count = var.use_ipv4 ? 1 : 0 89 | 90 | name = "sint-v4" 91 | resource_group_name = var.resource_group_name 92 | protocol = "Tcp" 93 | backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v4[0].id 94 | loadbalancer_id = azurerm_lb.internal.id 95 | frontend_port = 22623 96 | backend_port = 22623 97 | frontend_ip_configuration_name = local.internal_lb_frontend_ip_v4_configuration_name 98 | enable_floating_ip = false 99 | idle_timeout_in_minutes = 30 100 | load_distribution = "Default" 101 | probe_id = azurerm_lb_probe.internal_lb_probe_sint.id 102 | } 103 | 104 | resource "azurerm_lb_rule" "internal_lb_rule_sint_v6" { 105 | count = var.use_ipv6 ? 1 : 0 106 | 107 | name = "sint-v6" 108 | resource_group_name = var.resource_group_name 109 | protocol = "Tcp" 110 | backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id 111 | loadbalancer_id = azurerm_lb.internal.id 112 | frontend_port = 22623 113 | backend_port = 22623 114 | frontend_ip_configuration_name = local.internal_lb_frontend_ip_v6_configuration_name 115 | enable_floating_ip = false 116 | idle_timeout_in_minutes = 30 117 | load_distribution = "Default" 118 | probe_id = azurerm_lb_probe.internal_lb_probe_sint.id 119 | } 120 | 121 | resource "azurerm_lb_probe" "internal_lb_probe_sint" { 122 | name = "sint-probe" 123 | resource_group_name = var.resource_group_name 124 | interval_in_seconds = 5 125 | number_of_probes = 2 126 | loadbalancer_id = azurerm_lb.internal.id 127 | port = 22623 128 | protocol = "HTTPS" 129 | request_path = "/healthz" 130 | } 131 | 132 | resource "azurerm_lb_probe" "internal_lb_probe_api_internal" { 133 | name = "api-internal-probe" 134 | resource_group_name = var.resource_group_name 135 | interval_in_seconds = 5 136 | number_of_probes = 2 137 | loadbalancer_id = azurerm_lb.internal.id 138 | port = 6443 139 | protocol = "HTTPS" 140 | request_path = "/readyz" 141 | } 142 | -------------------------------------------------------------------------------- /vnet/nsg.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_network_security_group" "cluster" { 2 | name = "${var.cluster_id}-nsg" 3 | location = var.region 4 | resource_group_name = var.resource_group_name 5 | } 6 | 7 | resource "azurerm_subnet_network_security_group_association" "master" { 8 | count = var.preexisting_network ? 0 : 1 9 | 10 | subnet_id = azurerm_subnet.master_subnet[0].id 11 | network_security_group_id = azurerm_network_security_group.cluster.id 12 | } 13 | 14 | resource "azurerm_subnet_network_security_group_association" "worker" { 15 | count = var.preexisting_network ? 0 : 1 16 | 17 | subnet_id = azurerm_subnet.worker_subnet[0].id 18 | network_security_group_id = azurerm_network_security_group.cluster.id 19 | } 20 | 21 | resource "azurerm_network_security_rule" "apiserver_in" { 22 | name = "apiserver_in" 23 | priority = 101 24 | direction = "Inbound" 25 | access = "Allow" 26 | protocol = "Tcp" 27 | source_port_range = "*" 28 | destination_port_range = "6443" 29 | source_address_prefix = "*" 30 | destination_address_prefix = "*" 31 | resource_group_name = var.resource_group_name 32 | network_security_group_name = azurerm_network_security_group.cluster.name 33 | } 34 | -------------------------------------------------------------------------------- /vnet/outputs.tf: -------------------------------------------------------------------------------- 1 | output "public_lb_backend_pool_v4_id" { 2 | value = local.need_public_ipv4 ? azurerm_lb_backend_address_pool.public_lb_pool_v4[0].id : null 3 | } 4 | 5 | output "public_lb_backend_pool_v6_id" { 6 | value = local.need_public_ipv6 ? azurerm_lb_backend_address_pool.public_lb_pool_v6[0].id : null 7 | } 8 | 9 | output "internal_lb_backend_pool_v4_id" { 10 | value = var.use_ipv4 ? azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v4[0].id : null 11 | } 12 | 13 | output "internal_lb_backend_pool_v6_id" { 14 | value = var.use_ipv6 ? azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id : null 15 | } 16 | 17 | output "public_lb_id" { 18 | value = var.private ? null : azurerm_lb.public.id 19 | } 20 | 21 | output "public_lb_pip_v4_fqdn" { 22 | value = local.need_public_ipv4 ? data.azurerm_public_ip.cluster_public_ip_v4[0].fqdn : null 23 | } 24 | 25 | output "public_lb_pip_v6_fqdn" { 26 | value = local.need_public_ipv6 ? data.azurerm_public_ip.cluster_public_ip_v6[0].fqdn : null 27 | } 28 | 29 | output "public_lb_ip_v4_address" { 30 | value = local.need_public_ipv4 ? data.azurerm_public_ip.cluster_public_ip_v4[0].ip_address : null 31 | } 32 | 33 | output "public_lb_ip_v6_address" { 34 | value = local.need_public_ipv6 ? data.azurerm_public_ip.cluster_public_ip_v6[0].ip_address : null 35 | } 36 | 37 | output "internal_lb_ip_v4_address" { 38 | value = var.use_ipv4 ? azurerm_lb.internal.private_ip_addresses[0] : null 39 | } 40 | 41 | output "internal_lb_ip_v6_address" { 42 | // TODO: internal LB should block v4 for better single stack emulation (&& ! var.emulate_single_stack_ipv6) 43 | // but RHCoS initramfs can't do v6 and so fails to ignite. https://issues.redhat.com/browse/GRPA-1343 44 | value = var.use_ipv6 ? azurerm_lb.internal.private_ip_addresses[1] : null 45 | } 46 | 47 | output "cluster_nsg_name" { 48 | value = azurerm_network_security_group.cluster.name 49 | } 50 | 51 | output "virtual_network_id" { 52 | value = local.virtual_network_id 53 | } 54 | 55 | output "master_subnet_id" { 56 | value = local.master_subnet_id 57 | } 58 | 59 | output "worker_subnet_id" { 60 | value = local.worker_subnet_id 61 | } 62 | 63 | output "private" { 64 | value = var.private 65 | } 66 | -------------------------------------------------------------------------------- /vnet/public-lb.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | public_lb_frontend_ip_v4_configuration_name = "public-lb-ip-v4" 3 | public_lb_frontend_ip_v6_configuration_name = "public-lb-ip-v6" 4 | } 5 | 6 | locals { 7 | // DEBUG: Azure apparently requires dual stack LB for v6 8 | need_public_ipv4 = ! var.private || ! var.outbound_udr 9 | 10 | need_public_ipv6 = var.use_ipv6 && (! var.private || ! var.outbound_udr) 11 | } 12 | 13 | 14 | resource "azurerm_public_ip" "cluster_public_ip_v4" { 15 | count = local.need_public_ipv4 ? 1 : 0 16 | 17 | sku = "Standard" 18 | location = var.region 19 | name = "${var.cluster_id}-pip-v4" 20 | resource_group_name = var.resource_group_name 21 | allocation_method = "Static" 22 | domain_name_label = var.dns_label 23 | } 24 | 25 | data "azurerm_public_ip" "cluster_public_ip_v4" { 26 | // DEBUG: Azure apparently requires dual stack LB for v6 27 | count = local.need_public_ipv4 ? 1 : 0 28 | 29 | name = azurerm_public_ip.cluster_public_ip_v4[0].name 30 | resource_group_name = var.resource_group_name 31 | } 32 | 33 | 34 | resource "azurerm_public_ip" "cluster_public_ip_v6" { 35 | count = local.need_public_ipv6 ? 1 : 0 36 | 37 | ip_version = "IPv6" 38 | sku = "Standard" 39 | location = var.region 40 | name = "${var.cluster_id}-pip-v6" 41 | resource_group_name = var.resource_group_name 42 | allocation_method = "Static" 43 | domain_name_label = var.dns_label 44 | } 45 | 46 | data "azurerm_public_ip" "cluster_public_ip_v6" { 47 | count = local.need_public_ipv6 ? 1 : 0 48 | 49 | name = azurerm_public_ip.cluster_public_ip_v6[0].name 50 | resource_group_name = var.resource_group_name 51 | } 52 | 53 | resource "azurerm_lb" "public" { 54 | sku = "Standard" 55 | name = var.cluster_id 56 | resource_group_name = var.resource_group_name 57 | location = var.region 58 | 59 | dynamic "frontend_ip_configuration" { 60 | for_each = [for ip in [ 61 | // DEBUG: Azure apparently requires dual stack LB for external load balancers v6 62 | { 63 | name : local.public_lb_frontend_ip_v4_configuration_name, 64 | value : local.need_public_ipv4 ? azurerm_public_ip.cluster_public_ip_v4[0].id : null, 65 | include : local.need_public_ipv4, 66 | ipv6 : false, 67 | }, 68 | { 69 | name : local.public_lb_frontend_ip_v6_configuration_name, 70 | value : local.need_public_ipv6 ? azurerm_public_ip.cluster_public_ip_v6[0].id : null, 71 | include : local.need_public_ipv6, 72 | ipv6 : true, 73 | }, 74 | ] : { 75 | name : ip.name 76 | value : ip.value 77 | ipv6 : ip.ipv6 78 | include : ip.include 79 | } if ip.include 80 | ] 81 | 82 | content { 83 | name = frontend_ip_configuration.value.name 84 | public_ip_address_id = frontend_ip_configuration.value.value 85 | private_ip_address_version = frontend_ip_configuration.value.ipv6 ? "IPv6" : "IPv4" 86 | private_ip_address_allocation = "Dynamic" 87 | } 88 | } 89 | } 90 | 91 | // The backends are only created when frontend configuration exists, because of the following error from Azure API; 92 | // ``` 93 | // Load Balancer /subscriptions/xx/resourceGroups/xx/providers/Microsoft.Network/loadBalancers/xx-public-lb does not have Frontend IP Configuration, 94 | // but it has other child resources. This setup is not supported. 95 | // ``` 96 | resource "azurerm_lb_backend_address_pool" "public_lb_pool_v4" { 97 | count = local.need_public_ipv4 ? 1 : 0 98 | 99 | resource_group_name = var.resource_group_name 100 | loadbalancer_id = azurerm_lb.public.id 101 | name = var.cluster_id 102 | } 103 | 104 | resource "azurerm_lb_backend_address_pool" "public_lb_pool_v6" { 105 | count = local.need_public_ipv6 ? 1 : 0 106 | 107 | resource_group_name = var.resource_group_name 108 | loadbalancer_id = azurerm_lb.public.id 109 | name = "${var.cluster_id}-IPv6" 110 | } 111 | 112 | resource "azurerm_lb_rule" "public_lb_rule_api_internal_v4" { 113 | count = var.use_ipv4 && ! var.private ? 1 : 0 114 | 115 | name = "api-internal-v4" 116 | resource_group_name = var.resource_group_name 117 | protocol = "Tcp" 118 | backend_address_pool_id = azurerm_lb_backend_address_pool.public_lb_pool_v4[0].id 119 | loadbalancer_id = azurerm_lb.public.id 120 | frontend_port = 6443 121 | backend_port = 6443 122 | frontend_ip_configuration_name = local.public_lb_frontend_ip_v4_configuration_name 123 | enable_floating_ip = false 124 | idle_timeout_in_minutes = 30 125 | load_distribution = "Default" 126 | probe_id = azurerm_lb_probe.public_lb_probe_api_internal[0].id 127 | } 128 | 129 | resource "azurerm_lb_rule" "public_lb_rule_api_internal_v6" { 130 | count = var.use_ipv6 && ! var.private ? 1 : 0 131 | 132 | name = "api-internal-v6" 133 | resource_group_name = var.resource_group_name 134 | protocol = "Tcp" 135 | backend_address_pool_id = azurerm_lb_backend_address_pool.public_lb_pool_v6[0].id 136 | loadbalancer_id = azurerm_lb.public.id 137 | frontend_port = 6443 138 | backend_port = 6443 139 | frontend_ip_configuration_name = local.public_lb_frontend_ip_v6_configuration_name 140 | enable_floating_ip = false 141 | idle_timeout_in_minutes = 30 142 | load_distribution = "Default" 143 | probe_id = azurerm_lb_probe.public_lb_probe_api_internal[0].id 144 | } 145 | 146 | resource "azurerm_lb_outbound_rule" "public_lb_outbound_rule_v4" { 147 | count = var.use_ipv4 && var.private && ! var.outbound_udr ? 1 : 0 148 | 149 | name = "outbound-rule-v4" 150 | resource_group_name = var.resource_group_name 151 | loadbalancer_id = azurerm_lb.public.id 152 | backend_address_pool_id = azurerm_lb_backend_address_pool.public_lb_pool_v4[0].id 153 | protocol = "All" 154 | 155 | frontend_ip_configuration { 156 | name = local.public_lb_frontend_ip_v4_configuration_name 157 | } 158 | } 159 | 160 | resource "azurerm_lb_outbound_rule" "public_lb_outbound_rule_v6" { 161 | count = var.use_ipv6 && var.private && ! var.outbound_udr ? 1 : 0 162 | 163 | name = "outbound-rule-v6" 164 | resource_group_name = var.resource_group_name 165 | loadbalancer_id = azurerm_lb.public.id 166 | backend_address_pool_id = azurerm_lb_backend_address_pool.public_lb_pool_v6[0].id 167 | protocol = "All" 168 | 169 | frontend_ip_configuration { 170 | name = local.public_lb_frontend_ip_v6_configuration_name 171 | } 172 | } 173 | 174 | resource "azurerm_lb_probe" "public_lb_probe_api_internal" { 175 | count = var.private ? 0 : 1 176 | 177 | name = "api-internal-probe" 178 | resource_group_name = var.resource_group_name 179 | interval_in_seconds = 5 180 | number_of_probes = 2 181 | loadbalancer_id = azurerm_lb.public.id 182 | port = 6443 183 | protocol = "HTTPS" 184 | request_path = "/readyz" 185 | } 186 | -------------------------------------------------------------------------------- /vnet/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vnet_v4_cidrs" { 2 | type = list(string) 3 | } 4 | 5 | variable "vnet_v6_cidrs" { 6 | type = list(string) 7 | } 8 | 9 | variable "resource_group_name" { 10 | type = string 11 | description = "Resource group for the deployment" 12 | } 13 | 14 | variable "cluster_id" { 15 | type = string 16 | } 17 | 18 | variable "region" { 19 | type = string 20 | description = "The target Azure region for the cluster." 21 | } 22 | 23 | variable "tags" { 24 | type = map(string) 25 | default = {} 26 | description = "Azure tags to be applied to created resources." 27 | } 28 | 29 | variable "dns_label" { 30 | type = string 31 | description = "The label used to build the dns name. i.e.