├── examples ├── with_acr │ ├── outputs.tf │ ├── main_override.tf │ ├── variables.tf │ ├── providers.tf │ └── main.tf ├── with_acr_v4 │ ├── outputs.tf │ ├── data.tf │ ├── main_override.tf │ ├── variables.tf │ ├── providers_override.tf │ ├── providers.tf │ └── main.tf ├── multiple_node_pools │ ├── outputs.tf │ ├── main_override.tf │ ├── variables.tf │ ├── providers.tf │ ├── main.tf │ └── README.md ├── multiple_node_pools_v4 │ ├── outputs.tf │ ├── providers_override.tf │ ├── main_override.tf │ ├── variables.tf │ ├── providers.tf │ └── main.tf ├── application_gateway_ingress_v4 │ ├── data.tf │ ├── outputs.tf │ ├── main_override.tf │ ├── providers_override.tf │ ├── variables.tf │ ├── providers.tf │ ├── k8s_workload.tf │ └── main.tf ├── uai_and_assign_role_on_subnet_v4 │ ├── data.tf │ ├── main_override.tf │ ├── variables.tf │ ├── providers_override.tf │ ├── providers.tf │ └── main.tf ├── startup │ ├── main_override.tf │ ├── variables.tf │ ├── providers.tf │ ├── outputs.tf │ ├── disk_encryption_set.tf │ └── main.tf ├── named_cluster │ ├── main_override.tf │ ├── outputs.tf │ ├── variables.tf │ ├── providers.tf │ ├── kms.tf │ ├── disk_encryption_set.tf │ ├── key_vault.tf │ └── main.tf ├── without_monitor │ ├── main_override.tf │ ├── outputs.tf │ ├── variables.tf │ ├── providers.tf │ ├── main.tf │ └── disk_encryption_set.tf ├── startup_v4 │ ├── main_override.tf │ ├── providers_override.tf │ ├── variables.tf │ ├── providers.tf │ ├── outputs.tf │ ├── disk_encryption_set.tf │ └── main.tf ├── without_monitor_v4 │ ├── main_override.tf │ ├── outputs.tf │ ├── providers_override.tf │ ├── variables.tf │ ├── providers.tf │ ├── main.tf │ └── disk_encryption_set.tf ├── named_cluster_v4 │ ├── main_override.tf │ ├── outputs.tf │ ├── providers_override.tf │ ├── variables.tf │ ├── providers.tf │ ├── kms.tf │ ├── disk_encryption_set.tf │ ├── key_vault.tf │ └── main.tf ├── uai_and_assign_role_on_subnet │ ├── variables.tf │ ├── providers.tf │ └── main.tf └── application_gateway_ingress │ ├── outputs.tf │ ├── variables.tf │ ├── providers.tf │ ├── k8s_workload.tf │ └── main.tf ├── CHANGELOG.md ├── GNUmakefile ├── main_override.tf ├── unit-test-fixture ├── alt_locals.tf ├── outputs.tf └── locals.tf ├── v4 ├── versions_override.tf ├── versions.tf ├── v4_variables.tf ├── extra_node_pool_override.tf ├── locals.tf ├── log_analytics.tf └── role_assignments.tf ├── CODE_OF_CONDUCT.md ├── versions.tf ├── NoticeOnUpgradeTov6.0.md ├── NoticeOnUpgradeTov9.0.md ├── CHANGELOG-v4.md ├── extra_node_pool_override.tf ├── LICENSE ├── SECURITY.md ├── tfvmmakefile ├── NoticeOnUpgradeTov10.0.md ├── CHANGELOG-v8.md ├── NoticeOnUpgradeTov7.0.md ├── CHANGELOG-v5.md ├── NoticeOnUpgradeTov5.0.md ├── NoticeOnUpgradeTov8.0.md ├── locals.tf ├── log_analytics.tf ├── test ├── upgrade │ └── upgrade_test.go └── go.mod ├── role_assignments.tf └── CHANGELOG-v9.md /examples/with_acr/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/with_acr_v4/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/multiple_node_pools/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/multiple_node_pools_v4/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/with_acr_v4/data.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "this" {} -------------------------------------------------------------------------------- /examples/application_gateway_ingress_v4/data.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "this" {} -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet_v4/data.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "this" {} -------------------------------------------------------------------------------- /examples/startup/main_override.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_subnet" "test" { 2 | enforce_private_link_endpoint_network_policies = true 3 | } -------------------------------------------------------------------------------- /examples/named_cluster/main_override.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_subnet" "test" { 2 | enforce_private_link_endpoint_network_policies = true 3 | } -------------------------------------------------------------------------------- /examples/without_monitor/main_override.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_subnet" "test" { 2 | enforce_private_link_endpoint_network_policies = true 3 | } -------------------------------------------------------------------------------- /examples/startup_v4/main_override.tf: -------------------------------------------------------------------------------- 1 | module "aks" { 2 | source = "../../v4" 3 | rbac_aad_tenant_id = data.azurerm_client_config.current.tenant_id 4 | } -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet_v4/main_override.tf: -------------------------------------------------------------------------------- 1 | module "aks" { 2 | source = "../../v4" 3 | rbac_aad_tenant_id = data.azurerm_client_config.this.tenant_id 4 | } -------------------------------------------------------------------------------- /examples/without_monitor_v4/main_override.tf: -------------------------------------------------------------------------------- 1 | module "aks_without_monitor" { 2 | source = "../../v4" 3 | rbac_aad_tenant_id = data.azurerm_client_config.current.tenant_id 4 | } -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Important Notice 4 | 5 | * fix: add back `private_cluster_enabled` variable by @tobiasehlert [#667](https://github.com/Azure/terraform-azurerm-aks/pull/667) 6 | -------------------------------------------------------------------------------- /examples/named_cluster_v4/main_override.tf: -------------------------------------------------------------------------------- 1 | 2 | module "aks_cluster_name" { 3 | source = "../../v4" 4 | rbac_aad_tenant_id = data.azurerm_client_config.current.tenant_id 5 | } 6 | -------------------------------------------------------------------------------- /GNUmakefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | 3 | $(shell curl -H 'Cache-Control: no-cache, no-store' -sSL "https://raw.githubusercontent.com/Azure/tfmod-scaffold/refs/heads/main/GNUmakefile" -o tfvmmakefile) 4 | -include tfvmmakefile -------------------------------------------------------------------------------- /main_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: azurerm_resource_tag 2 | 3 | resource "azurerm_kubernetes_cluster" "main" { 4 | automatic_channel_upgrade = var.automatic_channel_upgrade 5 | node_os_channel_upgrade = var.node_os_channel_upgrade 6 | } -------------------------------------------------------------------------------- /examples/named_cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "test_aks_named_id" { 2 | value = module.aks_cluster_name.aks_id 3 | } 4 | 5 | output "test_aks_named_identity" { 6 | sensitive = true 7 | value = try(module.aks_cluster_name.cluster_identity, "") 8 | } 9 | -------------------------------------------------------------------------------- /examples/named_cluster_v4/outputs.tf: -------------------------------------------------------------------------------- 1 | output "test_aks_named_id" { 2 | value = module.aks_cluster_name.aks_id 3 | } 4 | 5 | output "test_aks_named_identity" { 6 | sensitive = true 7 | value = try(module.aks_cluster_name.cluster_identity, "") 8 | } 9 | -------------------------------------------------------------------------------- /examples/with_acr_v4/main_override.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_container_registry" "example" { 2 | retention_policy_in_days = 7 3 | } 4 | 5 | module "aks" { 6 | source = "../../v4" 7 | rbac_aad_tenant_id = data.azurerm_client_config.this.tenant_id 8 | } -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet/variables.tf: -------------------------------------------------------------------------------- 1 | variable "kubernetes_cluster_name" { 2 | default = "myAks" 3 | } 4 | 5 | variable "location" { 6 | default = "eastus" 7 | } 8 | 9 | variable "resource_group_name" { 10 | default = "tfmod-aks" 11 | } 12 | -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet_v4/variables.tf: -------------------------------------------------------------------------------- 1 | variable "kubernetes_cluster_name" { 2 | default = "myAks" 3 | } 4 | 5 | variable "location" { 6 | default = "eastus" 7 | } 8 | 9 | variable "resource_group_name" { 10 | default = "tfmod-aks" 11 | } 12 | -------------------------------------------------------------------------------- /examples/multiple_node_pools/main_override.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_subnet" "default_node_pool_subnet" { 2 | enforce_private_link_endpoint_network_policies = true 3 | } 4 | 5 | resource "azurerm_subnet" "node_pool_subnet" { 6 | enforce_private_link_endpoint_network_policies = true 7 | } -------------------------------------------------------------------------------- /examples/with_acr/main_override.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_subnet" "test" { 2 | enforce_private_link_endpoint_network_policies = true 3 | } 4 | 5 | resource "azurerm_container_registry" "example" { 6 | retention_policy { 7 | days = 7 8 | enabled = true 9 | } 10 | } -------------------------------------------------------------------------------- /examples/without_monitor/outputs.tf: -------------------------------------------------------------------------------- 1 | output "test_aks_without_monitor_id" { 2 | value = module.aks_without_monitor.aks_id 3 | } 4 | 5 | output "test_aks_without_monitor_identity" { 6 | sensitive = true 7 | value = try(module.aks_without_monitor.cluster_identity, "") 8 | } 9 | -------------------------------------------------------------------------------- /examples/without_monitor_v4/outputs.tf: -------------------------------------------------------------------------------- 1 | output "test_aks_without_monitor_id" { 2 | value = module.aks_without_monitor.aks_id 3 | } 4 | 5 | output "test_aks_without_monitor_identity" { 6 | sensitive = true 7 | value = try(module.aks_without_monitor.cluster_identity, "") 8 | } 9 | -------------------------------------------------------------------------------- /examples/application_gateway_ingress/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ingress_endpoint" { 2 | depends_on = [time_sleep.wait_for_ingress] 3 | value = try("http://${data.kubernetes_ingress_v1.ing.status[0].load_balancer[0].ingress[0].ip}", "if it's not a http url, you need further investigation") 4 | } 5 | -------------------------------------------------------------------------------- /examples/application_gateway_ingress_v4/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ingress_endpoint" { 2 | depends_on = [time_sleep.wait_for_ingress] 3 | value = try("http://${data.kubernetes_ingress_v1.ing.status[0].load_balancer[0].ingress[0].ip}", "if it's not a http url, you need further investigation") 4 | } 5 | -------------------------------------------------------------------------------- /examples/application_gateway_ingress_v4/main_override.tf: -------------------------------------------------------------------------------- 1 | module "aks" { 2 | #checkov:skip=CKV_AZURE_141:We enable admin account here so we can provision K8s resources directly in this simple example 3 | source = "../../v4" 4 | rbac_aad_tenant_id = data.azurerm_client_config.this.tenant_id 5 | } -------------------------------------------------------------------------------- /examples/with_acr/variables.tf: -------------------------------------------------------------------------------- 1 | variable "create_resource_group" { 2 | type = bool 3 | default = true 4 | nullable = false 5 | } 6 | 7 | variable "location" { 8 | default = "eastus" 9 | } 10 | 11 | variable "resource_group_name" { 12 | type = string 13 | default = null 14 | } 15 | -------------------------------------------------------------------------------- /examples/with_acr_v4/variables.tf: -------------------------------------------------------------------------------- 1 | variable "create_resource_group" { 2 | type = bool 3 | default = true 4 | nullable = false 5 | } 6 | 7 | variable "location" { 8 | default = "eastus" 9 | } 10 | 11 | variable "resource_group_name" { 12 | type = string 13 | default = null 14 | } 15 | -------------------------------------------------------------------------------- /examples/with_acr_v4/providers_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: terraform_required_version_declaration 2 | 3 | terraform { 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | version = "~> 4.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.3.2" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /examples/multiple_node_pools_v4/providers_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: terraform_required_version_declaration 2 | 3 | terraform { 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | version = "~> 4.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.3.2" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet_v4/providers_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: terraform_required_version_declaration 2 | 3 | terraform { 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | version = "~> 4.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.3.2" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /unit-test-fixture/alt_locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | azurerm_log_analytics_workspace_id = "azurerm_log_analytics_workspace_id" 3 | azurerm_log_analytics_workspace_name = "azurerm_log_analytics_workspace_name" 4 | azurerm_log_analytics_workspace_location = var.location 5 | azurerm_log_analytics_workspace_resource_group_name = var.resource_group_name 6 | } -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | random = { 9 | source = "hashicorp/random" 10 | version = "3.3.2" 11 | } 12 | } 13 | } 14 | 15 | provider "azurerm" { 16 | features {} 17 | } 18 | 19 | provider "random" {} -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet_v4/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | random = { 9 | source = "hashicorp/random" 10 | version = "3.3.2" 11 | } 12 | } 13 | } 14 | 15 | provider "azurerm" { 16 | features {} 17 | } 18 | 19 | provider "random" {} -------------------------------------------------------------------------------- /examples/startup_v4/providers_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: terraform_required_version_declaration 2 | 3 | terraform { 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | version = "~> 4.0" 8 | } 9 | curl = { 10 | source = "anschoewe/curl" 11 | version = "1.0.2" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.3.2" 16 | } 17 | } 18 | } -------------------------------------------------------------------------------- /examples/named_cluster_v4/providers_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: terraform_required_version_declaration 2 | 3 | terraform { 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | version = "~> 4.0" 8 | } 9 | curl = { 10 | source = "anschoewe/curl" 11 | version = "1.0.2" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.3.2" 16 | } 17 | } 18 | } -------------------------------------------------------------------------------- /examples/without_monitor_v4/providers_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: terraform_required_version_declaration 2 | 3 | terraform { 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | version = "~> 4.0" 8 | } 9 | curl = { 10 | source = "anschoewe/curl" 11 | version = "1.0.2" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.3.2" 16 | } 17 | } 18 | } -------------------------------------------------------------------------------- /examples/multiple_node_pools_v4/main_override.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_subnet" "default_node_pool_subnet" { 2 | private_endpoint_network_policies = "Disabled" 3 | private_link_service_network_policies_enabled = true 4 | } 5 | 6 | resource "azurerm_subnet" "node_pool_subnet" { 7 | private_endpoint_network_policies = "Disabled" 8 | private_link_service_network_policies_enabled = true 9 | } 10 | 11 | module "aks" { 12 | source = "../../v4" 13 | } -------------------------------------------------------------------------------- /v4/versions_override.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | azapi = { 4 | source = "Azure/azapi" 5 | version = ">=2.0, < 3.0" 6 | } 7 | azurerm = { 8 | source = "hashicorp/azurerm" 9 | version = ">= 4.16.0, < 5.0" 10 | } 11 | null = { 12 | source = "hashicorp/null" 13 | version = ">= 3.0" 14 | } 15 | tls = { 16 | source = "hashicorp/tls" 17 | version = ">= 3.1" 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | This code of conduct outlines expectations for participation in Microsoft-managed open source communities, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community. 4 | 5 | Please read the full text at [https://opensource.microsoft.com/codeofconduct/](https://opensource.microsoft.com/codeofconduct/) 6 | -------------------------------------------------------------------------------- /examples/multiple_node_pools/variables.tf: -------------------------------------------------------------------------------- 1 | variable "create_resource_group" { 2 | type = bool 3 | default = true 4 | nullable = false 5 | } 6 | 7 | variable "location" { 8 | default = "centralus" 9 | } 10 | 11 | variable "resource_group_name" { 12 | type = string 13 | default = null 14 | } 15 | 16 | variable "kubernetes_version" { 17 | type = string 18 | default = null 19 | } 20 | 21 | variable "orchestrator_version" { 22 | type = string 23 | default = null 24 | } 25 | -------------------------------------------------------------------------------- /examples/multiple_node_pools_v4/variables.tf: -------------------------------------------------------------------------------- 1 | variable "create_resource_group" { 2 | type = bool 3 | default = true 4 | nullable = false 5 | } 6 | 7 | variable "location" { 8 | default = "centralus" 9 | } 10 | 11 | variable "resource_group_name" { 12 | type = string 13 | default = null 14 | } 15 | 16 | variable "kubernetes_version" { 17 | type = string 18 | default = null 19 | } 20 | 21 | variable "orchestrator_version" { 22 | type = string 23 | default = null 24 | } 25 | -------------------------------------------------------------------------------- /examples/with_acr/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | random = { 9 | source = "hashicorp/random" 10 | version = "3.3.2" 11 | } 12 | } 13 | } 14 | 15 | provider "azurerm" { 16 | features { 17 | resource_group { 18 | prevent_deletion_if_contains_resources = false 19 | } 20 | } 21 | } 22 | 23 | provider "random" {} -------------------------------------------------------------------------------- /examples/with_acr_v4/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | random = { 9 | source = "hashicorp/random" 10 | version = "3.3.2" 11 | } 12 | } 13 | } 14 | 15 | provider "azurerm" { 16 | features { 17 | resource_group { 18 | prevent_deletion_if_contains_resources = false 19 | } 20 | } 21 | } 22 | 23 | provider "random" {} -------------------------------------------------------------------------------- /examples/multiple_node_pools/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | random = { 9 | source = "hashicorp/random" 10 | version = "3.3.2" 11 | } 12 | } 13 | } 14 | 15 | provider "azurerm" { 16 | features { 17 | resource_group { 18 | prevent_deletion_if_contains_resources = false 19 | } 20 | } 21 | } 22 | 23 | provider "random" {} -------------------------------------------------------------------------------- /examples/multiple_node_pools_v4/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | random = { 9 | source = "hashicorp/random" 10 | version = "3.3.2" 11 | } 12 | } 13 | } 14 | 15 | provider "azurerm" { 16 | features { 17 | resource_group { 18 | prevent_deletion_if_contains_resources = false 19 | } 20 | } 21 | } 22 | 23 | provider "random" {} -------------------------------------------------------------------------------- /examples/without_monitor/variables.tf: -------------------------------------------------------------------------------- 1 | variable "create_resource_group" { 2 | type = bool 3 | default = true 4 | nullable = false 5 | } 6 | 7 | variable "key_vault_firewall_bypass_ip_cidr" { 8 | type = string 9 | default = null 10 | } 11 | 12 | variable "location" { 13 | default = "eastus" 14 | } 15 | 16 | variable "managed_identity_principal_id" { 17 | type = string 18 | default = null 19 | } 20 | 21 | variable "resource_group_name" { 22 | type = string 23 | default = null 24 | } 25 | -------------------------------------------------------------------------------- /examples/without_monitor_v4/variables.tf: -------------------------------------------------------------------------------- 1 | variable "create_resource_group" { 2 | type = bool 3 | default = true 4 | nullable = false 5 | } 6 | 7 | variable "key_vault_firewall_bypass_ip_cidr" { 8 | type = string 9 | default = null 10 | } 11 | 12 | variable "location" { 13 | default = "eastus" 14 | } 15 | 16 | variable "managed_identity_principal_id" { 17 | type = string 18 | default = null 19 | } 20 | 21 | variable "resource_group_name" { 22 | type = string 23 | default = null 24 | } 25 | -------------------------------------------------------------------------------- /examples/application_gateway_ingress_v4/providers_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: terraform_required_version_declaration 2 | 3 | terraform { 4 | required_providers { 5 | azurerm = { 6 | source = "hashicorp/azurerm" 7 | version = "~> 4.0" 8 | } 9 | kubernetes = { 10 | source = "hashicorp/kubernetes" 11 | version = "2.22.0" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.3.2" 16 | } 17 | time = { 18 | source = "hashicorp/time" 19 | version = "0.9.1" 20 | } 21 | } 22 | } -------------------------------------------------------------------------------- /examples/startup/variables.tf: -------------------------------------------------------------------------------- 1 | variable "client_id" { 2 | } 3 | 4 | variable "client_secret" { 5 | } 6 | 7 | variable "create_resource_group" { 8 | type = bool 9 | default = true 10 | nullable = false 11 | } 12 | 13 | variable "key_vault_firewall_bypass_ip_cidr" { 14 | type = string 15 | default = null 16 | } 17 | 18 | variable "location" { 19 | default = "eastus" 20 | } 21 | 22 | variable "managed_identity_principal_id" { 23 | type = string 24 | default = null 25 | } 26 | 27 | variable "resource_group_name" { 28 | type = string 29 | default = null 30 | } 31 | -------------------------------------------------------------------------------- /examples/startup_v4/variables.tf: -------------------------------------------------------------------------------- 1 | variable "client_id" { 2 | } 3 | 4 | variable "client_secret" { 5 | } 6 | 7 | variable "create_resource_group" { 8 | type = bool 9 | default = true 10 | nullable = false 11 | } 12 | 13 | variable "key_vault_firewall_bypass_ip_cidr" { 14 | type = string 15 | default = null 16 | } 17 | 18 | variable "location" { 19 | default = "eastus" 20 | } 21 | 22 | variable "managed_identity_principal_id" { 23 | type = string 24 | default = null 25 | } 26 | 27 | variable "resource_group_name" { 28 | type = string 29 | default = null 30 | } 31 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3" 3 | 4 | required_providers { 5 | azapi = { 6 | source = "Azure/azapi" 7 | version = ">=2.0, < 3.0" 8 | } 9 | azurerm = { 10 | source = "hashicorp/azurerm" 11 | version = ">= 3.107.0, < 4.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 3.0" 16 | } 17 | time = { 18 | source = "hashicorp/time" 19 | version = ">= 0.5" 20 | } 21 | tls = { 22 | source = "hashicorp/tls" 23 | version = ">= 3.1" 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /examples/named_cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "create_resource_group" { 2 | type = bool 3 | default = true 4 | nullable = false 5 | } 6 | 7 | variable "key_vault_firewall_bypass_ip_cidr" { 8 | type = string 9 | default = null 10 | } 11 | 12 | variable "location" { 13 | default = "eastus" 14 | } 15 | 16 | variable "log_analytics_workspace_location" { 17 | default = null 18 | } 19 | 20 | variable "managed_identity_principal_id" { 21 | type = string 22 | default = null 23 | } 24 | 25 | variable "resource_group_name" { 26 | type = string 27 | default = null 28 | } 29 | -------------------------------------------------------------------------------- /examples/named_cluster_v4/variables.tf: -------------------------------------------------------------------------------- 1 | variable "create_resource_group" { 2 | type = bool 3 | default = true 4 | nullable = false 5 | } 6 | 7 | variable "key_vault_firewall_bypass_ip_cidr" { 8 | type = string 9 | default = null 10 | } 11 | 12 | variable "location" { 13 | default = "eastus" 14 | } 15 | 16 | variable "log_analytics_workspace_location" { 17 | default = null 18 | } 19 | 20 | variable "managed_identity_principal_id" { 21 | type = string 22 | default = null 23 | } 24 | 25 | variable "resource_group_name" { 26 | type = string 27 | default = null 28 | } 29 | -------------------------------------------------------------------------------- /v4/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3" 3 | 4 | required_providers { 5 | azapi = { 6 | source = "Azure/azapi" 7 | version = ">=2.0, < 3.0" 8 | } 9 | azurerm = { 10 | source = "hashicorp/azurerm" 11 | version = ">= 3.107.0, < 4.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 3.0" 16 | } 17 | time = { 18 | source = "hashicorp/time" 19 | version = ">= 0.5" 20 | } 21 | tls = { 22 | source = "hashicorp/tls" 23 | version = ">= 3.1" 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /NoticeOnUpgradeTov6.0.md: -------------------------------------------------------------------------------- 1 | # Notice on Upgrade to v6.x 2 | 3 | We've added a CI pipeline for this module to speed up our code review and to enforce a high code quality standard, if you want to contribute by submitting a pull request, please read [Pre-Commit & Pr-Check & Test](#Pre-Commit--Pr-Check--Test) section, or your pull request might be rejected by CI pipeline. 4 | 5 | A pull request will be reviewed when it has passed Pre Pull Request Check in the pipeline, and will be merged when it has passed the acceptance tests. Once the ci Pipeline failed, please read the pipeline's output, thanks for your cooperation. 6 | -------------------------------------------------------------------------------- /v4/v4_variables.tf: -------------------------------------------------------------------------------- 1 | variable "upgrade_override" { 2 | type = object({ 3 | force_upgrade_enabled = bool 4 | effective_until = optional(string) 5 | }) 6 | default = null 7 | description = <<-EOT 8 | `force_upgrade_enabled` - (Required) Whether to force upgrade the cluster. Possible values are `true` or `false`. 9 | `effective_until` - (Optional) Specifies the duration, in RFC 3339 format (e.g., `2025-10-01T13:00:00Z`), the upgrade_override values are effective. This field must be set for the `upgrade_override` values to take effect. The date-time must be within the next 30 days. 10 | EOT 11 | } 12 | -------------------------------------------------------------------------------- /examples/application_gateway_ingress/variables.tf: -------------------------------------------------------------------------------- 1 | variable "bring_your_own_vnet" { 2 | type = bool 3 | default = true 4 | } 5 | 6 | variable "create_resource_group" { 7 | type = bool 8 | default = true 9 | nullable = false 10 | } 11 | 12 | variable "create_role_assignments_for_application_gateway" { 13 | type = bool 14 | default = true 15 | } 16 | 17 | variable "location" { 18 | default = "eastus" 19 | } 20 | 21 | variable "resource_group_name" { 22 | type = string 23 | default = null 24 | } 25 | 26 | variable "use_brown_field_application_gateway" { 27 | type = bool 28 | default = false 29 | } 30 | -------------------------------------------------------------------------------- /examples/application_gateway_ingress_v4/variables.tf: -------------------------------------------------------------------------------- 1 | variable "bring_your_own_vnet" { 2 | type = bool 3 | default = true 4 | } 5 | 6 | variable "create_resource_group" { 7 | type = bool 8 | default = true 9 | nullable = false 10 | } 11 | 12 | variable "create_role_assignments_for_application_gateway" { 13 | type = bool 14 | default = true 15 | } 16 | 17 | variable "location" { 18 | default = "eastus" 19 | } 20 | 21 | variable "resource_group_name" { 22 | type = string 23 | default = null 24 | } 25 | 26 | variable "use_brown_field_application_gateway" { 27 | type = bool 28 | default = false 29 | } 30 | -------------------------------------------------------------------------------- /NoticeOnUpgradeTov9.0.md: -------------------------------------------------------------------------------- 1 | # Notice on Upgrade to v9.x 2 | 3 | ## New default value for variable `agents_pool_max_surge` 4 | 5 | variable `agents_pool_max_surge` now has default value `10%`. This change might cause configuration drift. If you want to keep the old value, please set it explicitly in your configuration. 6 | 7 | ## API version for `azapi_update_resource` resource has been upgraded from `Microsoft.ContainerService/managedClusters@2023-01-02-preview` to `Microsoft.ContainerService/managedClusters@2024-02-01`. 8 | 9 | After a test, it won't affect the existing Terraform state and cause configuration drift. The upgrade is caused by the retirement of original API. 10 | -------------------------------------------------------------------------------- /v4/extra_node_pool_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: azurerm_resource_tag 2 | 3 | resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { 4 | auto_scaling_enabled = each.value.enable_auto_scaling 5 | host_encryption_enabled = each.value.enable_host_encryption 6 | node_public_ip_enabled = each.value.enable_node_public_ip 7 | temporary_name_for_rotation = each.value.temporary_name_for_rotation 8 | } 9 | 10 | resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { 11 | auto_scaling_enabled = each.value.enable_auto_scaling 12 | host_encryption_enabled = each.value.enable_host_encryption 13 | node_public_ip_enabled = each.value.enable_node_public_ip 14 | } -------------------------------------------------------------------------------- /examples/startup/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | curl = { 9 | source = "anschoewe/curl" 10 | version = "1.0.2" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = "3.3.2" 15 | } 16 | } 17 | } 18 | 19 | provider "azurerm" { 20 | features { 21 | key_vault { 22 | purge_soft_delete_on_destroy = false 23 | purge_soft_deleted_keys_on_destroy = false 24 | recover_soft_deleted_key_vaults = false 25 | } 26 | resource_group { 27 | prevent_deletion_if_contains_resources = false 28 | } 29 | } 30 | } 31 | 32 | provider "curl" {} 33 | 34 | provider "random" {} -------------------------------------------------------------------------------- /examples/startup_v4/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | curl = { 9 | source = "anschoewe/curl" 10 | version = "1.0.2" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = "3.3.2" 15 | } 16 | } 17 | } 18 | 19 | provider "azurerm" { 20 | features { 21 | key_vault { 22 | purge_soft_delete_on_destroy = false 23 | purge_soft_deleted_keys_on_destroy = false 24 | recover_soft_deleted_key_vaults = false 25 | } 26 | resource_group { 27 | prevent_deletion_if_contains_resources = false 28 | } 29 | } 30 | } 31 | 32 | provider "curl" {} 33 | 34 | provider "random" {} -------------------------------------------------------------------------------- /examples/named_cluster/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">=3.51.0, < 4.0" 7 | } 8 | curl = { 9 | source = "anschoewe/curl" 10 | version = "1.0.2" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = "3.3.2" 15 | } 16 | } 17 | } 18 | 19 | provider "azurerm" { 20 | features { 21 | key_vault { 22 | purge_soft_delete_on_destroy = false 23 | purge_soft_deleted_keys_on_destroy = false 24 | recover_soft_deleted_key_vaults = false 25 | } 26 | resource_group { 27 | prevent_deletion_if_contains_resources = false 28 | } 29 | } 30 | } 31 | 32 | provider "curl" {} 33 | 34 | provider "random" {} -------------------------------------------------------------------------------- /examples/named_cluster_v4/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">=3.51.0, < 4.0" 7 | } 8 | curl = { 9 | source = "anschoewe/curl" 10 | version = "1.0.2" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = "3.3.2" 15 | } 16 | } 17 | } 18 | 19 | provider "azurerm" { 20 | features { 21 | key_vault { 22 | purge_soft_delete_on_destroy = false 23 | purge_soft_deleted_keys_on_destroy = false 24 | recover_soft_deleted_key_vaults = false 25 | } 26 | resource_group { 27 | prevent_deletion_if_contains_resources = false 28 | } 29 | } 30 | } 31 | 32 | provider "curl" {} 33 | 34 | provider "random" {} -------------------------------------------------------------------------------- /examples/without_monitor/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | curl = { 9 | source = "anschoewe/curl" 10 | version = "1.0.2" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = "3.3.2" 15 | } 16 | } 17 | } 18 | 19 | provider "azurerm" { 20 | features { 21 | key_vault { 22 | purge_soft_delete_on_destroy = false 23 | purge_soft_deleted_keys_on_destroy = false 24 | recover_soft_deleted_key_vaults = false 25 | } 26 | resource_group { 27 | prevent_deletion_if_contains_resources = false 28 | } 29 | } 30 | } 31 | 32 | provider "curl" {} 33 | 34 | provider "random" {} -------------------------------------------------------------------------------- /examples/without_monitor_v4/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | curl = { 9 | source = "anschoewe/curl" 10 | version = "1.0.2" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = "3.3.2" 15 | } 16 | } 17 | } 18 | 19 | provider "azurerm" { 20 | features { 21 | key_vault { 22 | purge_soft_delete_on_destroy = false 23 | purge_soft_deleted_keys_on_destroy = false 24 | recover_soft_deleted_key_vaults = false 25 | } 26 | resource_group { 27 | prevent_deletion_if_contains_resources = false 28 | } 29 | } 30 | } 31 | 32 | provider "curl" {} 33 | 34 | provider "random" {} -------------------------------------------------------------------------------- /unit-test-fixture/outputs.tf: -------------------------------------------------------------------------------- 1 | output "create_analytics_solution" { 2 | value = local.create_analytics_solution 3 | } 4 | 5 | output "create_analytics_workspace" { 6 | value = local.create_analytics_workspace 7 | } 8 | 9 | output "log_analytics_workspace" { 10 | value = local.log_analytics_workspace 11 | } 12 | 13 | output "automatic_channel_upgrade_check" { 14 | value = local.automatic_channel_upgrade_check 15 | } 16 | 17 | output "auto_scaler_profile_scale_down_delay_after_delete" { 18 | value = local.auto_scaler_profile_scale_down_delay_after_delete 19 | } 20 | 21 | output "auto_scaler_profile_scan_interval" { 22 | value = var.auto_scaler_profile_scan_interval 23 | } 24 | 25 | output "query_datasource_for_log_analytics_workspace_location" { 26 | value = local.query_datasource_for_log_analytics_workspace_location 27 | } -------------------------------------------------------------------------------- /CHANGELOG-v4.md: -------------------------------------------------------------------------------- 1 | ## 4.15.0 (May 06, 2022) 2 | 3 | ENHANCEMENTS: 4 | 5 | * Added output for `kube_admin_config_raw` ([#146](https://github.com/Azure/terraform-azurerm-aks/pull/146)) 6 | * Include `node_resource_group` as variable ([#136](https://github.com/Azure/terraform-azurerm-aks/pull/136)) 7 | 8 | BUG FIXES: 9 | 10 | ## 4.16.0 (June 02, 2022) 11 | 12 | ENHANCEMENTS: 13 | 14 | * Added output for `addon_profile` ([#151](https://github.com/Azure/terraform-azurerm-aks/pull/151)) 15 | * Adding Microsoft SECURITY.MD ([#167](https://github.com/Azure/terraform-azurerm-aks/pull/167)) 16 | * Added variable `os_disk_type` for default node pools ([#169](https://github.com/Azure/terraform-azurerm-aks/pull/169)) 17 | 18 | BUG FIXES: 19 | 20 | * Trivial fix to the example in the README ([#166](https://github.com/Azure/terraform-azurerm-aks/pull/166)) 21 | -------------------------------------------------------------------------------- /extra_node_pool_override.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore-file: azurerm_resource_tag 2 | 3 | resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { 4 | custom_ca_trust_enabled = each.value.custom_ca_trust_enabled 5 | enable_auto_scaling = each.value.enable_auto_scaling 6 | enable_host_encryption = each.value.enable_host_encryption 7 | enable_node_public_ip = each.value.enable_node_public_ip 8 | message_of_the_day = each.value.message_of_the_day 9 | } 10 | 11 | resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { 12 | custom_ca_trust_enabled = each.value.custom_ca_trust_enabled 13 | enable_auto_scaling = each.value.enable_auto_scaling 14 | enable_host_encryption = each.value.enable_host_encryption 15 | enable_node_public_ip = each.value.enable_node_public_ip 16 | message_of_the_day = each.value.message_of_the_day 17 | } -------------------------------------------------------------------------------- /examples/application_gateway_ingress/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | kubernetes = { 9 | source = "hashicorp/kubernetes" 10 | version = "2.22.0" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = "3.3.2" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.9.1" 19 | } 20 | } 21 | } 22 | 23 | provider "azurerm" { 24 | features { 25 | resource_group { 26 | prevent_deletion_if_contains_resources = false 27 | } 28 | } 29 | } 30 | 31 | # DO NOT DO THIS IN PRODUCTION ENVIRONMENT 32 | provider "kubernetes" { 33 | host = module.aks.admin_host 34 | client_certificate = base64decode(module.aks.admin_client_certificate) 35 | client_key = base64decode(module.aks.admin_client_key) 36 | cluster_ca_certificate = base64decode(module.aks.admin_cluster_ca_certificate) 37 | } 38 | 39 | provider "random" {} -------------------------------------------------------------------------------- /examples/application_gateway_ingress_v4/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">= 3.51, < 4.0" 7 | } 8 | kubernetes = { 9 | source = "hashicorp/kubernetes" 10 | version = "2.22.0" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = "3.3.2" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.9.1" 19 | } 20 | } 21 | } 22 | 23 | provider "azurerm" { 24 | features { 25 | resource_group { 26 | prevent_deletion_if_contains_resources = false 27 | } 28 | } 29 | } 30 | 31 | # DO NOT DO THIS IN PRODUCTION ENVIRONMENT 32 | provider "kubernetes" { 33 | host = module.aks.admin_host 34 | client_certificate = base64decode(module.aks.admin_client_certificate) 35 | client_key = base64decode(module.aks.admin_client_key) 36 | cluster_ca_certificate = base64decode(module.aks.admin_cluster_ca_certificate) 37 | } 38 | 39 | provider "random" {} -------------------------------------------------------------------------------- /examples/named_cluster/kms.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_key_vault_key" "kms" { 2 | key_opts = [ 3 | "decrypt", 4 | "encrypt", 5 | "sign", 6 | "unwrapKey", 7 | "verify", 8 | "wrapKey", 9 | ] 10 | key_type = "RSA" 11 | key_vault_id = azurerm_key_vault.des_vault.id 12 | name = "etcd-encryption" 13 | expiration_date = timeadd("${formatdate("YYYY-MM-DD", timestamp())}T00:00:00Z", "168h") 14 | key_size = 2048 15 | 16 | depends_on = [ 17 | azurerm_key_vault_access_policy.current_user 18 | ] 19 | 20 | lifecycle { 21 | ignore_changes = [expiration_date] 22 | } 23 | } 24 | 25 | resource "azurerm_key_vault_access_policy" "kms" { 26 | key_vault_id = azurerm_key_vault.des_vault.id 27 | object_id = azurerm_user_assigned_identity.test.principal_id 28 | tenant_id = azurerm_user_assigned_identity.test.tenant_id 29 | key_permissions = [ 30 | "Decrypt", 31 | "Encrypt", 32 | ] 33 | } 34 | 35 | resource "azurerm_role_assignment" "kms" { 36 | principal_id = azurerm_user_assigned_identity.test.principal_id 37 | scope = azurerm_key_vault.des_vault.id 38 | role_definition_name = "Key Vault Contributor" 39 | } 40 | -------------------------------------------------------------------------------- /examples/named_cluster_v4/kms.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_key_vault_key" "kms" { 2 | key_opts = [ 3 | "decrypt", 4 | "encrypt", 5 | "sign", 6 | "unwrapKey", 7 | "verify", 8 | "wrapKey", 9 | ] 10 | key_type = "RSA" 11 | key_vault_id = azurerm_key_vault.des_vault.id 12 | name = "etcd-encryption" 13 | expiration_date = timeadd("${formatdate("YYYY-MM-DD", timestamp())}T00:00:00Z", "168h") 14 | key_size = 2048 15 | 16 | depends_on = [ 17 | azurerm_key_vault_access_policy.current_user 18 | ] 19 | 20 | lifecycle { 21 | ignore_changes = [expiration_date] 22 | } 23 | } 24 | 25 | resource "azurerm_key_vault_access_policy" "kms" { 26 | key_vault_id = azurerm_key_vault.des_vault.id 27 | object_id = azurerm_user_assigned_identity.test.principal_id 28 | tenant_id = azurerm_user_assigned_identity.test.tenant_id 29 | key_permissions = [ 30 | "Decrypt", 31 | "Encrypt", 32 | ] 33 | } 34 | 35 | resource "azurerm_role_assignment" "kms" { 36 | principal_id = azurerm_user_assigned_identity.test.principal_id 37 | scope = azurerm_key_vault.des_vault.id 38 | role_definition_name = "Key Vault Contributor" 39 | } 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /examples/named_cluster/disk_encryption_set.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_key_vault_key" "des_key" { 2 | key_opts = [ 3 | "decrypt", 4 | "encrypt", 5 | "sign", 6 | "unwrapKey", 7 | "verify", 8 | "wrapKey", 9 | ] 10 | key_type = "RSA-HSM" 11 | key_vault_id = azurerm_key_vault.des_vault.id 12 | name = "des-key" 13 | expiration_date = timeadd("${formatdate("YYYY-MM-DD", timestamp())}T00:00:00Z", "168h") 14 | key_size = 2048 15 | 16 | depends_on = [ 17 | azurerm_key_vault_access_policy.current_user 18 | ] 19 | 20 | lifecycle { 21 | ignore_changes = [expiration_date] 22 | } 23 | } 24 | 25 | resource "azurerm_disk_encryption_set" "des" { 26 | key_vault_key_id = azurerm_key_vault_key.des_key.id 27 | location = local.resource_group.location 28 | name = "des" 29 | resource_group_name = local.resource_group.name 30 | 31 | identity { 32 | type = "SystemAssigned" 33 | } 34 | } 35 | 36 | resource "azurerm_key_vault_access_policy" "des" { 37 | key_vault_id = azurerm_key_vault.des_vault.id 38 | object_id = azurerm_disk_encryption_set.des.identity[0].principal_id 39 | tenant_id = azurerm_disk_encryption_set.des.identity[0].tenant_id 40 | key_permissions = [ 41 | "Get", 42 | "WrapKey", 43 | "UnwrapKey" 44 | ] 45 | } 46 | -------------------------------------------------------------------------------- /examples/named_cluster_v4/disk_encryption_set.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_key_vault_key" "des_key" { 2 | key_opts = [ 3 | "decrypt", 4 | "encrypt", 5 | "sign", 6 | "unwrapKey", 7 | "verify", 8 | "wrapKey", 9 | ] 10 | key_type = "RSA-HSM" 11 | key_vault_id = azurerm_key_vault.des_vault.id 12 | name = "des-key" 13 | expiration_date = timeadd("${formatdate("YYYY-MM-DD", timestamp())}T00:00:00Z", "168h") 14 | key_size = 2048 15 | 16 | depends_on = [ 17 | azurerm_key_vault_access_policy.current_user 18 | ] 19 | 20 | lifecycle { 21 | ignore_changes = [expiration_date] 22 | } 23 | } 24 | 25 | resource "azurerm_disk_encryption_set" "des" { 26 | key_vault_key_id = azurerm_key_vault_key.des_key.id 27 | location = local.resource_group.location 28 | name = "des" 29 | resource_group_name = local.resource_group.name 30 | 31 | identity { 32 | type = "SystemAssigned" 33 | } 34 | } 35 | 36 | resource "azurerm_key_vault_access_policy" "des" { 37 | key_vault_id = azurerm_key_vault.des_vault.id 38 | object_id = azurerm_disk_encryption_set.des.identity[0].principal_id 39 | tenant_id = azurerm_disk_encryption_set.des.identity[0].tenant_id 40 | key_permissions = [ 41 | "Get", 42 | "WrapKey", 43 | "UnwrapKey" 44 | ] 45 | } 46 | -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "this" {} 2 | 3 | resource "azurerm_resource_group" "rg" { 4 | location = var.location 5 | name = "${var.resource_group_name}-${random_pet.this.id}" 6 | } 7 | 8 | resource "azurerm_virtual_network" "vnet" { 9 | address_space = ["192.168.0.0/16"] 10 | location = var.location 11 | name = "vnet" 12 | resource_group_name = azurerm_resource_group.rg.name 13 | } 14 | 15 | resource "azurerm_subnet" "subnet" { 16 | address_prefixes = ["192.168.0.0/24"] 17 | name = "subnet" 18 | resource_group_name = azurerm_resource_group.rg.name 19 | virtual_network_name = azurerm_virtual_network.vnet.name 20 | } 21 | 22 | resource "azurerm_user_assigned_identity" "main" { 23 | location = azurerm_resource_group.rg.location 24 | name = "uami-${var.kubernetes_cluster_name}" 25 | resource_group_name = azurerm_resource_group.rg.name 26 | } 27 | 28 | module "aks" { 29 | source = "../../" 30 | 31 | location = azurerm_resource_group.rg.location 32 | cluster_name = var.kubernetes_cluster_name 33 | prefix = var.kubernetes_cluster_name 34 | resource_group_name = azurerm_resource_group.rg.name 35 | identity_ids = [azurerm_user_assigned_identity.main.id] 36 | identity_type = "UserAssigned" 37 | vnet_subnet = { 38 | id = azurerm_subnet.subnet.id 39 | } 40 | network_contributor_role_assigned_subnet_ids = { 41 | vnet_subnet = azurerm_subnet.subnet.id 42 | } 43 | } -------------------------------------------------------------------------------- /examples/uai_and_assign_role_on_subnet_v4/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "this" {} 2 | 3 | resource "azurerm_resource_group" "rg" { 4 | location = var.location 5 | name = "${var.resource_group_name}-${random_pet.this.id}" 6 | } 7 | 8 | resource "azurerm_virtual_network" "vnet" { 9 | address_space = ["192.168.0.0/16"] 10 | location = var.location 11 | name = "vnet" 12 | resource_group_name = azurerm_resource_group.rg.name 13 | } 14 | 15 | resource "azurerm_subnet" "subnet" { 16 | address_prefixes = ["192.168.0.0/24"] 17 | name = "subnet" 18 | resource_group_name = azurerm_resource_group.rg.name 19 | virtual_network_name = azurerm_virtual_network.vnet.name 20 | } 21 | 22 | resource "azurerm_user_assigned_identity" "main" { 23 | location = azurerm_resource_group.rg.location 24 | name = "uami-${var.kubernetes_cluster_name}" 25 | resource_group_name = azurerm_resource_group.rg.name 26 | } 27 | 28 | module "aks" { 29 | source = "../../" 30 | 31 | location = azurerm_resource_group.rg.location 32 | cluster_name = var.kubernetes_cluster_name 33 | prefix = var.kubernetes_cluster_name 34 | resource_group_name = azurerm_resource_group.rg.name 35 | identity_ids = [azurerm_user_assigned_identity.main.id] 36 | identity_type = "UserAssigned" 37 | vnet_subnet = { 38 | id = azurerm_subnet.subnet.id 39 | } 40 | network_contributor_role_assigned_subnet_ids = { 41 | vnet_subnet = azurerm_subnet.subnet.id 42 | } 43 | } -------------------------------------------------------------------------------- /examples/startup/outputs.tf: -------------------------------------------------------------------------------- 1 | output "test_admin_client_certificate" { 2 | sensitive = true 3 | value = module.aks.admin_client_certificate 4 | } 5 | 6 | output "test_admin_client_key" { 7 | sensitive = true 8 | value = module.aks.admin_client_key 9 | } 10 | 11 | output "test_admin_cluster_ca_certificate" { 12 | sensitive = true 13 | value = module.aks.admin_client_certificate 14 | } 15 | 16 | output "test_admin_host" { 17 | sensitive = true 18 | value = module.aks.admin_host 19 | } 20 | 21 | output "test_admin_password" { 22 | sensitive = true 23 | value = module.aks.admin_password 24 | } 25 | 26 | output "test_admin_username" { 27 | sensitive = true 28 | value = module.aks.admin_username 29 | } 30 | 31 | output "test_aks_id" { 32 | value = module.aks.aks_id 33 | } 34 | 35 | output "test_client_certificate" { 36 | sensitive = true 37 | value = module.aks.client_certificate 38 | } 39 | 40 | output "test_client_key" { 41 | sensitive = true 42 | value = module.aks.client_key 43 | } 44 | 45 | output "test_cluster_ca_certificate" { 46 | sensitive = true 47 | value = module.aks.client_certificate 48 | } 49 | 50 | output "test_cluster_portal_fqdn" { 51 | value = module.aks.cluster_portal_fqdn 52 | } 53 | 54 | output "test_host" { 55 | sensitive = true 56 | value = module.aks.host 57 | } 58 | 59 | output "test_kube_raw" { 60 | sensitive = true 61 | value = module.aks.kube_config_raw 62 | } 63 | 64 | output "test_password" { 65 | sensitive = true 66 | value = module.aks.password 67 | } 68 | 69 | output "test_username" { 70 | sensitive = true 71 | value = module.aks.username 72 | } 73 | -------------------------------------------------------------------------------- /examples/startup_v4/outputs.tf: -------------------------------------------------------------------------------- 1 | output "test_admin_client_certificate" { 2 | sensitive = true 3 | value = module.aks.admin_client_certificate 4 | } 5 | 6 | output "test_admin_client_key" { 7 | sensitive = true 8 | value = module.aks.admin_client_key 9 | } 10 | 11 | output "test_admin_cluster_ca_certificate" { 12 | sensitive = true 13 | value = module.aks.admin_client_certificate 14 | } 15 | 16 | output "test_admin_host" { 17 | sensitive = true 18 | value = module.aks.admin_host 19 | } 20 | 21 | output "test_admin_password" { 22 | sensitive = true 23 | value = module.aks.admin_password 24 | } 25 | 26 | output "test_admin_username" { 27 | sensitive = true 28 | value = module.aks.admin_username 29 | } 30 | 31 | output "test_aks_id" { 32 | value = module.aks.aks_id 33 | } 34 | 35 | output "test_client_certificate" { 36 | sensitive = true 37 | value = module.aks.client_certificate 38 | } 39 | 40 | output "test_client_key" { 41 | sensitive = true 42 | value = module.aks.client_key 43 | } 44 | 45 | output "test_cluster_ca_certificate" { 46 | sensitive = true 47 | value = module.aks.client_certificate 48 | } 49 | 50 | output "test_cluster_portal_fqdn" { 51 | value = module.aks.cluster_portal_fqdn 52 | } 53 | 54 | output "test_host" { 55 | sensitive = true 56 | value = module.aks.host 57 | } 58 | 59 | output "test_kube_raw" { 60 | sensitive = true 61 | value = module.aks.kube_config_raw 62 | } 63 | 64 | output "test_password" { 65 | sensitive = true 66 | value = module.aks.password 67 | } 68 | 69 | output "test_username" { 70 | sensitive = true 71 | value = module.aks.username 72 | } 73 | -------------------------------------------------------------------------------- /examples/named_cluster/key_vault.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "current" {} 2 | 3 | resource "random_string" "key_vault_prefix" { 4 | length = 6 5 | numeric = false 6 | special = false 7 | upper = false 8 | } 9 | 10 | module "public_ip" { 11 | source = "lonegunmanb/public-ip/lonegunmanb" 12 | version = "0.1.0" 13 | } 14 | 15 | locals { 16 | # We cannot use coalesce here because it's not short-circuit and public_ip's index will cause error 17 | public_ip = var.key_vault_firewall_bypass_ip_cidr == null ? module.public_ip.public_ip : var.key_vault_firewall_bypass_ip_cidr 18 | } 19 | 20 | resource "azurerm_key_vault" "des_vault" { 21 | location = local.resource_group.location 22 | name = "${random_string.key_vault_prefix.result}-des-keyvault" 23 | resource_group_name = local.resource_group.name 24 | sku_name = "premium" 25 | tenant_id = data.azurerm_client_config.current.tenant_id 26 | enabled_for_disk_encryption = true 27 | purge_protection_enabled = true 28 | soft_delete_retention_days = 7 29 | 30 | network_acls { 31 | bypass = "AzureServices" 32 | default_action = "Allow" 33 | ip_rules = [local.public_ip] 34 | } 35 | } 36 | 37 | resource "azurerm_key_vault_access_policy" "current_user" { 38 | key_vault_id = azurerm_key_vault.des_vault.id 39 | object_id = coalesce(var.managed_identity_principal_id, data.azurerm_client_config.current.object_id) 40 | tenant_id = data.azurerm_client_config.current.tenant_id 41 | key_permissions = [ 42 | "Get", 43 | "Create", 44 | "Delete", 45 | "GetRotationPolicy", 46 | "Recover", 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /examples/named_cluster_v4/key_vault.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "current" {} 2 | 3 | resource "random_string" "key_vault_prefix" { 4 | length = 6 5 | numeric = false 6 | special = false 7 | upper = false 8 | } 9 | 10 | module "public_ip" { 11 | source = "lonegunmanb/public-ip/lonegunmanb" 12 | version = "0.1.0" 13 | } 14 | 15 | locals { 16 | # We cannot use coalesce here because it's not short-circuit and public_ip's index will cause error 17 | public_ip = var.key_vault_firewall_bypass_ip_cidr == null ? module.public_ip.public_ip : var.key_vault_firewall_bypass_ip_cidr 18 | } 19 | 20 | resource "azurerm_key_vault" "des_vault" { 21 | location = local.resource_group.location 22 | name = "${random_string.key_vault_prefix.result}-des-keyvault" 23 | resource_group_name = local.resource_group.name 24 | sku_name = "premium" 25 | tenant_id = data.azurerm_client_config.current.tenant_id 26 | enabled_for_disk_encryption = true 27 | purge_protection_enabled = true 28 | soft_delete_retention_days = 7 29 | 30 | network_acls { 31 | bypass = "AzureServices" 32 | default_action = "Allow" 33 | ip_rules = [local.public_ip] 34 | } 35 | } 36 | 37 | resource "azurerm_key_vault_access_policy" "current_user" { 38 | key_vault_id = azurerm_key_vault.des_vault.id 39 | object_id = coalesce(var.managed_identity_principal_id, data.azurerm_client_config.current.object_id) 40 | tenant_id = data.azurerm_client_config.current.tenant_id 41 | key_permissions = [ 42 | "Get", 43 | "Create", 44 | "Delete", 45 | "GetRotationPolicy", 46 | "Recover", 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /examples/without_monitor/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | count = var.create_resource_group ? 1 : 0 7 | 8 | location = var.location 9 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 10 | } 11 | 12 | locals { 13 | resource_group = { 14 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 15 | location = var.location 16 | } 17 | } 18 | 19 | resource "azurerm_virtual_network" "test" { 20 | address_space = ["10.52.0.0/16"] 21 | location = local.resource_group.location 22 | name = "${random_id.prefix.hex}-vn" 23 | resource_group_name = local.resource_group.name 24 | } 25 | 26 | resource "azurerm_subnet" "test" { 27 | address_prefixes = ["10.52.0.0/24"] 28 | name = "${random_id.prefix.hex}-sn" 29 | resource_group_name = local.resource_group.name 30 | virtual_network_name = azurerm_virtual_network.test.name 31 | } 32 | 33 | resource "azurerm_dns_zone" "aks_web_app_routing" { 34 | name = "fakeaks.com" 35 | resource_group_name = local.resource_group.name 36 | } 37 | 38 | module "aks_without_monitor" { 39 | source = "../.." 40 | 41 | prefix = "prefix2-${random_id.prefix.hex}" 42 | resource_group_name = local.resource_group.name 43 | location = local.resource_group.location 44 | admin_username = null 45 | azure_policy_enabled = true 46 | disk_encryption_set_id = azurerm_disk_encryption_set.des.id 47 | #checkov:skip=CKV_AZURE_4:The logging is turn off for demo purpose. DO NOT DO THIS IN PRODUCTION ENVIRONMENT! 48 | log_analytics_workspace_enabled = false 49 | net_profile_pod_cidr = "10.1.0.0/16" 50 | private_cluster_enabled = true 51 | role_based_access_control_enabled = true 52 | web_app_routing = { 53 | dns_zone_ids = [azurerm_dns_zone.aks_web_app_routing.id] 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /examples/without_monitor_v4/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | count = var.create_resource_group ? 1 : 0 7 | 8 | location = var.location 9 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 10 | } 11 | 12 | locals { 13 | resource_group = { 14 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 15 | location = var.location 16 | } 17 | } 18 | 19 | resource "azurerm_virtual_network" "test" { 20 | address_space = ["10.52.0.0/16"] 21 | location = local.resource_group.location 22 | name = "${random_id.prefix.hex}-vn" 23 | resource_group_name = local.resource_group.name 24 | } 25 | 26 | resource "azurerm_subnet" "test" { 27 | address_prefixes = ["10.52.0.0/24"] 28 | name = "${random_id.prefix.hex}-sn" 29 | resource_group_name = local.resource_group.name 30 | virtual_network_name = azurerm_virtual_network.test.name 31 | } 32 | 33 | resource "azurerm_dns_zone" "aks_web_app_routing" { 34 | name = "fakeaks.com" 35 | resource_group_name = local.resource_group.name 36 | } 37 | 38 | module "aks_without_monitor" { 39 | source = "../.." 40 | 41 | prefix = "prefix2-${random_id.prefix.hex}" 42 | resource_group_name = local.resource_group.name 43 | location = local.resource_group.location 44 | admin_username = null 45 | azure_policy_enabled = true 46 | disk_encryption_set_id = azurerm_disk_encryption_set.des.id 47 | #checkov:skip=CKV_AZURE_4:The logging is turn off for demo purpose. DO NOT DO THIS IN PRODUCTION ENVIRONMENT! 48 | log_analytics_workspace_enabled = false 49 | net_profile_pod_cidr = "10.1.0.0/16" 50 | private_cluster_enabled = true 51 | role_based_access_control_enabled = true 52 | web_app_routing = { 53 | dns_zone_ids = [azurerm_dns_zone.aks_web_app_routing.id] 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /examples/with_acr/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | count = var.create_resource_group ? 1 : 0 7 | 8 | location = var.location 9 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 10 | } 11 | 12 | locals { 13 | resource_group = { 14 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 15 | location = var.location 16 | } 17 | } 18 | 19 | resource "azurerm_virtual_network" "test" { 20 | address_space = ["10.52.0.0/16"] 21 | location = local.resource_group.location 22 | name = "${random_id.prefix.hex}-vn" 23 | resource_group_name = local.resource_group.name 24 | } 25 | 26 | resource "azurerm_subnet" "test" { 27 | address_prefixes = ["10.52.0.0/24"] 28 | name = "${random_id.prefix.hex}-sn" 29 | resource_group_name = local.resource_group.name 30 | virtual_network_name = azurerm_virtual_network.test.name 31 | } 32 | 33 | resource "random_string" "acr_suffix" { 34 | length = 8 35 | numeric = true 36 | special = false 37 | upper = false 38 | } 39 | 40 | resource "azurerm_container_registry" "example" { 41 | location = local.resource_group.location 42 | name = "aksacrtest${random_string.acr_suffix.result}" 43 | resource_group_name = local.resource_group.name 44 | sku = "Premium" 45 | } 46 | 47 | module "aks" { 48 | source = "../.." 49 | 50 | prefix = "prefix-${random_id.prefix.hex}" 51 | resource_group_name = local.resource_group.name 52 | location = local.resource_group.location 53 | kubernetes_version = "1.30" # don't specify the patch version! 54 | automatic_channel_upgrade = "patch" 55 | attached_acr_id_map = { 56 | example = azurerm_container_registry.example.id 57 | } 58 | network_plugin = "azure" 59 | network_policy = "azure" 60 | os_disk_size_gb = 60 61 | rbac_aad = false 62 | sku_tier = "Standard" 63 | vnet_subnet = { 64 | id = azurerm_subnet.test.id 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /examples/with_acr_v4/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | count = var.create_resource_group ? 1 : 0 7 | 8 | location = var.location 9 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 10 | } 11 | 12 | locals { 13 | resource_group = { 14 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 15 | location = var.location 16 | } 17 | } 18 | 19 | resource "azurerm_virtual_network" "test" { 20 | address_space = ["10.52.0.0/16"] 21 | location = local.resource_group.location 22 | name = "${random_id.prefix.hex}-vn" 23 | resource_group_name = local.resource_group.name 24 | } 25 | 26 | resource "azurerm_subnet" "test" { 27 | address_prefixes = ["10.52.0.0/24"] 28 | name = "${random_id.prefix.hex}-sn" 29 | resource_group_name = local.resource_group.name 30 | virtual_network_name = azurerm_virtual_network.test.name 31 | } 32 | 33 | resource "random_string" "acr_suffix" { 34 | length = 8 35 | numeric = true 36 | special = false 37 | upper = false 38 | } 39 | 40 | resource "azurerm_container_registry" "example" { 41 | location = local.resource_group.location 42 | name = "aksacrtest${random_string.acr_suffix.result}" 43 | resource_group_name = local.resource_group.name 44 | sku = "Premium" 45 | } 46 | 47 | module "aks" { 48 | source = "../.." 49 | 50 | prefix = "prefix-${random_id.prefix.hex}" 51 | resource_group_name = local.resource_group.name 52 | location = local.resource_group.location 53 | kubernetes_version = "1.30" # don't specify the patch version! 54 | automatic_channel_upgrade = "patch" 55 | attached_acr_id_map = { 56 | example = azurerm_container_registry.example.id 57 | } 58 | network_plugin = "azure" 59 | network_policy = "azure" 60 | os_disk_size_gb = 60 61 | rbac_aad = false 62 | sku_tier = "Standard" 63 | vnet_subnet = { 64 | id = azurerm_subnet.test.id 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /examples/multiple_node_pools/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | count = var.create_resource_group ? 1 : 0 7 | 8 | location = var.location 9 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 10 | } 11 | 12 | locals { 13 | resource_group = { 14 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 15 | location = var.location 16 | } 17 | } 18 | 19 | resource "azurerm_virtual_network" "test" { 20 | address_space = ["10.52.0.0/16"] 21 | location = local.resource_group.location 22 | name = "${random_id.prefix.hex}-vn" 23 | resource_group_name = local.resource_group.name 24 | } 25 | 26 | resource "azurerm_subnet" "default_node_pool_subnet" { 27 | address_prefixes = ["10.52.0.0/24"] 28 | name = "${random_id.prefix.hex}-defaultsn" 29 | resource_group_name = local.resource_group.name 30 | virtual_network_name = azurerm_virtual_network.test.name 31 | } 32 | 33 | resource "azurerm_subnet" "node_pool_subnet" { 34 | count = 3 35 | address_prefixes = ["10.52.${count.index + 1}.0/24"] 36 | name = "${random_id.prefix.hex}-sn${count.index}" 37 | resource_group_name = local.resource_group.name 38 | virtual_network_name = azurerm_virtual_network.test.name 39 | } 40 | 41 | locals { 42 | nodes = { 43 | for i in range(3) : "worker${i}" => { 44 | name = substr("worker${i}${random_id.prefix.hex}", 0, 8) 45 | vm_size = "Standard_D2s_v3" 46 | node_count = 1 47 | vnet_subnet = { id = azurerm_subnet.node_pool_subnet[i].id } 48 | create_before_destroy = i % 2 == 0 49 | } 50 | } 51 | } 52 | 53 | module "aks" { 54 | source = "../.." 55 | 56 | prefix = "prefix-${random_id.prefix.hex}" 57 | resource_group_name = local.resource_group.name 58 | location = local.resource_group.location 59 | os_disk_size_gb = 60 60 | rbac_aad = true 61 | sku_tier = "Standard" 62 | private_cluster_enabled = false 63 | vnet_subnet = { 64 | id = azurerm_subnet.default_node_pool_subnet.id 65 | } 66 | node_pools = local.nodes 67 | kubernetes_version = var.kubernetes_version 68 | orchestrator_version = var.orchestrator_version 69 | create_role_assignment_network_contributor = true 70 | } 71 | -------------------------------------------------------------------------------- /examples/multiple_node_pools_v4/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | count = var.create_resource_group ? 1 : 0 7 | 8 | location = var.location 9 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 10 | } 11 | 12 | locals { 13 | resource_group = { 14 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 15 | location = var.location 16 | } 17 | } 18 | 19 | resource "azurerm_virtual_network" "test" { 20 | address_space = ["10.52.0.0/16"] 21 | location = local.resource_group.location 22 | name = "${random_id.prefix.hex}-vn" 23 | resource_group_name = local.resource_group.name 24 | } 25 | 26 | resource "azurerm_subnet" "default_node_pool_subnet" { 27 | address_prefixes = ["10.52.0.0/24"] 28 | name = "${random_id.prefix.hex}-defaultsn" 29 | resource_group_name = local.resource_group.name 30 | virtual_network_name = azurerm_virtual_network.test.name 31 | } 32 | 33 | resource "azurerm_subnet" "node_pool_subnet" { 34 | count = 3 35 | address_prefixes = ["10.52.${count.index + 1}.0/24"] 36 | name = "${random_id.prefix.hex}-sn${count.index}" 37 | resource_group_name = local.resource_group.name 38 | virtual_network_name = azurerm_virtual_network.test.name 39 | } 40 | 41 | locals { 42 | nodes = { 43 | for i in range(3) : "worker${i}" => { 44 | name = substr("worker${i}${random_id.prefix.hex}", 0, 8) 45 | vm_size = "Standard_D2s_v3" 46 | node_count = 1 47 | vnet_subnet = { id = azurerm_subnet.node_pool_subnet[i].id } 48 | create_before_destroy = i % 2 == 0 49 | } 50 | } 51 | } 52 | 53 | module "aks" { 54 | source = "../.." 55 | 56 | prefix = "prefix-${random_id.prefix.hex}" 57 | resource_group_name = local.resource_group.name 58 | location = local.resource_group.location 59 | os_disk_size_gb = 60 60 | rbac_aad = true 61 | sku_tier = "Standard" 62 | private_cluster_enabled = false 63 | vnet_subnet = { 64 | id = azurerm_subnet.default_node_pool_subnet.id 65 | } 66 | node_pools = local.nodes 67 | kubernetes_version = var.kubernetes_version 68 | orchestrator_version = var.orchestrator_version 69 | create_role_assignment_network_contributor = true 70 | } 71 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /examples/application_gateway_ingress/k8s_workload.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_namespace_v1" "example" { 2 | metadata { 3 | name = "example" 4 | } 5 | 6 | depends_on = [module.aks] 7 | } 8 | 9 | resource "kubernetes_pod" "aspnet_app" { 10 | #checkov:skip=CKV_K8S_8:We don't need readiness probe for this simple example. 11 | #checkov:skip=CKV_K8S_9:We don't need readiness probe for this simple example. 12 | #checkov:skip=CKV_K8S_22:readOnlyRootFilesystem would block our pod from working 13 | #checkov:skip=CKV_K8S_28:capabilities would block our pod from working 14 | metadata { 15 | labels = { 16 | app = "aspnetapp" 17 | } 18 | name = "aspnetapp" 19 | namespace = kubernetes_namespace_v1.example.metadata[0].name 20 | } 21 | spec { 22 | container { 23 | name = "aspnetapp-image" 24 | image = "mcr.microsoft.com/dotnet/samples@sha256:7070894cc10d2b1e68e72057cca22040c5984cfae2ec3e079e34cf0a4da7fcea" 25 | image_pull_policy = "Always" 26 | 27 | port { 28 | container_port = 80 29 | protocol = "TCP" 30 | } 31 | resources { 32 | limits = { 33 | cpu = "250m" 34 | memory = "256Mi" 35 | } 36 | requests = { 37 | cpu = "250m" 38 | memory = "256Mi" 39 | } 40 | } 41 | security_context {} 42 | } 43 | } 44 | } 45 | 46 | resource "kubernetes_service" "svc" { 47 | metadata { 48 | name = "aspnetapp" 49 | namespace = kubernetes_namespace_v1.example.metadata[0].name 50 | } 51 | spec { 52 | selector = { 53 | app = "aspnetapp" 54 | } 55 | 56 | port { 57 | port = 80 58 | protocol = "TCP" 59 | target_port = 80 60 | } 61 | } 62 | } 63 | 64 | resource "kubernetes_ingress_v1" "ing" { 65 | metadata { 66 | annotations = { 67 | "kubernetes.io/ingress.class" : "azure/application-gateway" 68 | } 69 | name = "aspnetapp" 70 | namespace = kubernetes_namespace_v1.example.metadata[0].name 71 | } 72 | spec { 73 | rule { 74 | http { 75 | path { 76 | path = "/" 77 | path_type = "Exact" 78 | 79 | backend { 80 | service { 81 | name = "aspnetapp" 82 | 83 | port { 84 | number = 80 85 | } 86 | } 87 | } 88 | } 89 | } 90 | } 91 | } 92 | 93 | depends_on = [ 94 | module.aks, 95 | ] 96 | } 97 | 98 | resource "time_sleep" "wait_for_ingress" { 99 | create_duration = "15m" 100 | 101 | depends_on = [kubernetes_ingress_v1.ing] 102 | } 103 | 104 | data "kubernetes_ingress_v1" "ing" { 105 | metadata { 106 | name = "aspnetapp" 107 | namespace = kubernetes_namespace_v1.example.metadata[0].name 108 | } 109 | 110 | depends_on = [time_sleep.wait_for_ingress] 111 | } -------------------------------------------------------------------------------- /examples/application_gateway_ingress_v4/k8s_workload.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_namespace_v1" "example" { 2 | metadata { 3 | name = "example" 4 | } 5 | 6 | depends_on = [module.aks] 7 | } 8 | 9 | resource "kubernetes_pod" "aspnet_app" { 10 | #checkov:skip=CKV_K8S_8:We don't need readiness probe for this simple example. 11 | #checkov:skip=CKV_K8S_9:We don't need readiness probe for this simple example. 12 | #checkov:skip=CKV_K8S_22:readOnlyRootFilesystem would block our pod from working 13 | #checkov:skip=CKV_K8S_28:capabilities would block our pod from working 14 | metadata { 15 | labels = { 16 | app = "aspnetapp" 17 | } 18 | name = "aspnetapp" 19 | namespace = kubernetes_namespace_v1.example.metadata[0].name 20 | } 21 | spec { 22 | container { 23 | name = "aspnetapp-image" 24 | image = "mcr.microsoft.com/dotnet/samples@sha256:7070894cc10d2b1e68e72057cca22040c5984cfae2ec3e079e34cf0a4da7fcea" 25 | image_pull_policy = "Always" 26 | 27 | port { 28 | container_port = 80 29 | protocol = "TCP" 30 | } 31 | resources { 32 | limits = { 33 | cpu = "250m" 34 | memory = "256Mi" 35 | } 36 | requests = { 37 | cpu = "250m" 38 | memory = "256Mi" 39 | } 40 | } 41 | security_context {} 42 | } 43 | } 44 | } 45 | 46 | resource "kubernetes_service" "svc" { 47 | metadata { 48 | name = "aspnetapp" 49 | namespace = kubernetes_namespace_v1.example.metadata[0].name 50 | } 51 | spec { 52 | selector = { 53 | app = "aspnetapp" 54 | } 55 | 56 | port { 57 | port = 80 58 | protocol = "TCP" 59 | target_port = 80 60 | } 61 | } 62 | } 63 | 64 | resource "kubernetes_ingress_v1" "ing" { 65 | metadata { 66 | annotations = { 67 | "kubernetes.io/ingress.class" : "azure/application-gateway" 68 | } 69 | name = "aspnetapp" 70 | namespace = kubernetes_namespace_v1.example.metadata[0].name 71 | } 72 | spec { 73 | rule { 74 | http { 75 | path { 76 | path = "/" 77 | path_type = "Exact" 78 | 79 | backend { 80 | service { 81 | name = "aspnetapp" 82 | 83 | port { 84 | number = 80 85 | } 86 | } 87 | } 88 | } 89 | } 90 | } 91 | } 92 | 93 | depends_on = [ 94 | module.aks, 95 | ] 96 | } 97 | 98 | resource "time_sleep" "wait_for_ingress" { 99 | create_duration = "15m" 100 | 101 | depends_on = [kubernetes_ingress_v1.ing] 102 | } 103 | 104 | data "kubernetes_ingress_v1" "ing" { 105 | metadata { 106 | name = "aspnetapp" 107 | namespace = kubernetes_namespace_v1.example.metadata[0].name 108 | } 109 | 110 | depends_on = [time_sleep.wait_for_ingress] 111 | } -------------------------------------------------------------------------------- /examples/startup/disk_encryption_set.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "current" {} 2 | 3 | resource "random_string" "key_vault_prefix" { 4 | length = 6 5 | numeric = false 6 | special = false 7 | upper = false 8 | } 9 | 10 | module "public_ip" { 11 | source = "lonegunmanb/public-ip/lonegunmanb" 12 | version = "0.1.0" 13 | } 14 | 15 | locals { 16 | # We cannot use coalesce here because it's not short-circuit and public_ip's index will cause error 17 | public_ip = var.key_vault_firewall_bypass_ip_cidr == null ? module.public_ip.public_ip : var.key_vault_firewall_bypass_ip_cidr 18 | } 19 | 20 | resource "azurerm_key_vault" "des_vault" { 21 | location = local.resource_group.location 22 | name = "${random_string.key_vault_prefix.result}-des-keyvault" 23 | resource_group_name = local.resource_group.name 24 | sku_name = "premium" 25 | tenant_id = data.azurerm_client_config.current.tenant_id 26 | enabled_for_disk_encryption = true 27 | purge_protection_enabled = true 28 | soft_delete_retention_days = 7 29 | 30 | network_acls { 31 | bypass = "AzureServices" 32 | default_action = "Deny" 33 | ip_rules = [local.public_ip] 34 | } 35 | } 36 | 37 | resource "azurerm_key_vault_key" "des_key" { 38 | key_opts = [ 39 | "decrypt", 40 | "encrypt", 41 | "sign", 42 | "unwrapKey", 43 | "verify", 44 | "wrapKey", 45 | ] 46 | key_type = "RSA-HSM" 47 | key_vault_id = azurerm_key_vault.des_vault.id 48 | name = "des-key" 49 | expiration_date = timeadd("${formatdate("YYYY-MM-DD", timestamp())}T00:00:00Z", "168h") 50 | key_size = 2048 51 | 52 | depends_on = [ 53 | azurerm_key_vault_access_policy.current_user 54 | ] 55 | 56 | lifecycle { 57 | ignore_changes = [expiration_date] 58 | } 59 | } 60 | 61 | resource "azurerm_disk_encryption_set" "des" { 62 | location = local.resource_group.location 63 | name = "des" 64 | resource_group_name = local.resource_group.name 65 | key_vault_key_id = azurerm_key_vault_key.des_key.id 66 | 67 | identity { 68 | type = "SystemAssigned" 69 | } 70 | } 71 | 72 | resource "azurerm_key_vault_access_policy" "des" { 73 | key_vault_id = azurerm_key_vault.des_vault.id 74 | object_id = azurerm_disk_encryption_set.des.identity[0].principal_id 75 | tenant_id = azurerm_disk_encryption_set.des.identity[0].tenant_id 76 | key_permissions = [ 77 | "Get", 78 | "WrapKey", 79 | "UnwrapKey" 80 | ] 81 | } 82 | 83 | resource "azurerm_key_vault_access_policy" "current_user" { 84 | key_vault_id = azurerm_key_vault.des_vault.id 85 | object_id = coalesce(var.managed_identity_principal_id, data.azurerm_client_config.current.object_id) 86 | tenant_id = data.azurerm_client_config.current.tenant_id 87 | key_permissions = [ 88 | "Get", 89 | "Create", 90 | "Delete", 91 | "GetRotationPolicy", 92 | "Recover", 93 | ] 94 | } -------------------------------------------------------------------------------- /examples/startup_v4/disk_encryption_set.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "current" {} 2 | 3 | resource "random_string" "key_vault_prefix" { 4 | length = 6 5 | numeric = false 6 | special = false 7 | upper = false 8 | } 9 | 10 | module "public_ip" { 11 | source = "lonegunmanb/public-ip/lonegunmanb" 12 | version = "0.1.0" 13 | } 14 | 15 | locals { 16 | # We cannot use coalesce here because it's not short-circuit and public_ip's index will cause error 17 | public_ip = var.key_vault_firewall_bypass_ip_cidr == null ? module.public_ip.public_ip : var.key_vault_firewall_bypass_ip_cidr 18 | } 19 | 20 | resource "azurerm_key_vault" "des_vault" { 21 | location = local.resource_group.location 22 | name = "${random_string.key_vault_prefix.result}-des-keyvault" 23 | resource_group_name = local.resource_group.name 24 | sku_name = "premium" 25 | tenant_id = data.azurerm_client_config.current.tenant_id 26 | enabled_for_disk_encryption = true 27 | purge_protection_enabled = true 28 | soft_delete_retention_days = 7 29 | 30 | network_acls { 31 | bypass = "AzureServices" 32 | default_action = "Deny" 33 | ip_rules = [local.public_ip] 34 | } 35 | } 36 | 37 | resource "azurerm_key_vault_key" "des_key" { 38 | key_opts = [ 39 | "decrypt", 40 | "encrypt", 41 | "sign", 42 | "unwrapKey", 43 | "verify", 44 | "wrapKey", 45 | ] 46 | key_type = "RSA-HSM" 47 | key_vault_id = azurerm_key_vault.des_vault.id 48 | name = "des-key" 49 | expiration_date = timeadd("${formatdate("YYYY-MM-DD", timestamp())}T00:00:00Z", "168h") 50 | key_size = 2048 51 | 52 | depends_on = [ 53 | azurerm_key_vault_access_policy.current_user 54 | ] 55 | 56 | lifecycle { 57 | ignore_changes = [expiration_date] 58 | } 59 | } 60 | 61 | resource "azurerm_disk_encryption_set" "des" { 62 | location = local.resource_group.location 63 | name = "des" 64 | resource_group_name = local.resource_group.name 65 | key_vault_key_id = azurerm_key_vault_key.des_key.id 66 | 67 | identity { 68 | type = "SystemAssigned" 69 | } 70 | } 71 | 72 | resource "azurerm_key_vault_access_policy" "des" { 73 | key_vault_id = azurerm_key_vault.des_vault.id 74 | object_id = azurerm_disk_encryption_set.des.identity[0].principal_id 75 | tenant_id = azurerm_disk_encryption_set.des.identity[0].tenant_id 76 | key_permissions = [ 77 | "Get", 78 | "WrapKey", 79 | "UnwrapKey" 80 | ] 81 | } 82 | 83 | resource "azurerm_key_vault_access_policy" "current_user" { 84 | key_vault_id = azurerm_key_vault.des_vault.id 85 | object_id = coalesce(var.managed_identity_principal_id, data.azurerm_client_config.current.object_id) 86 | tenant_id = data.azurerm_client_config.current.tenant_id 87 | key_permissions = [ 88 | "Get", 89 | "Create", 90 | "Delete", 91 | "GetRotationPolicy", 92 | "Recover", 93 | ] 94 | } -------------------------------------------------------------------------------- /examples/without_monitor/disk_encryption_set.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "current" {} 2 | 3 | resource "random_string" "key_vault_prefix" { 4 | length = 6 5 | numeric = false 6 | special = false 7 | upper = false 8 | } 9 | 10 | module "public_ip" { 11 | count = var.key_vault_firewall_bypass_ip_cidr == null ? 1 : 0 12 | 13 | source = "lonegunmanb/public-ip/lonegunmanb" 14 | version = "0.1.0" 15 | } 16 | 17 | locals { 18 | # We cannot use coalesce here because it's not short-circuit and public_ip's index will cause error 19 | public_ip = try(module.public_ip[0].public_ip, var.key_vault_firewall_bypass_ip_cidr) 20 | } 21 | 22 | resource "azurerm_key_vault" "des_vault" { 23 | location = local.resource_group.location 24 | name = "${random_string.key_vault_prefix.result}-des-keyvault" 25 | resource_group_name = local.resource_group.name 26 | sku_name = "premium" 27 | tenant_id = data.azurerm_client_config.current.tenant_id 28 | enabled_for_disk_encryption = true 29 | purge_protection_enabled = true 30 | soft_delete_retention_days = 7 31 | 32 | network_acls { 33 | bypass = "AzureServices" 34 | default_action = "Deny" 35 | ip_rules = [local.public_ip] 36 | } 37 | } 38 | 39 | resource "azurerm_key_vault_key" "des_key" { 40 | key_opts = [ 41 | "decrypt", 42 | "encrypt", 43 | "sign", 44 | "unwrapKey", 45 | "verify", 46 | "wrapKey", 47 | ] 48 | key_type = "RSA-HSM" 49 | key_vault_id = azurerm_key_vault.des_vault.id 50 | name = "des-key" 51 | expiration_date = timeadd("${formatdate("YYYY-MM-DD", timestamp())}T00:00:00Z", "168h") 52 | key_size = 2048 53 | 54 | depends_on = [ 55 | azurerm_key_vault_access_policy.current_user 56 | ] 57 | 58 | lifecycle { 59 | ignore_changes = [expiration_date] 60 | } 61 | } 62 | 63 | resource "azurerm_disk_encryption_set" "des" { 64 | key_vault_key_id = azurerm_key_vault_key.des_key.id 65 | location = local.resource_group.location 66 | name = "des" 67 | resource_group_name = local.resource_group.name 68 | 69 | identity { 70 | type = "SystemAssigned" 71 | } 72 | } 73 | 74 | resource "azurerm_key_vault_access_policy" "des" { 75 | key_vault_id = azurerm_key_vault.des_vault.id 76 | object_id = azurerm_disk_encryption_set.des.identity[0].principal_id 77 | tenant_id = azurerm_disk_encryption_set.des.identity[0].tenant_id 78 | key_permissions = [ 79 | "Get", 80 | "WrapKey", 81 | "UnwrapKey" 82 | ] 83 | } 84 | 85 | resource "azurerm_key_vault_access_policy" "current_user" { 86 | key_vault_id = azurerm_key_vault.des_vault.id 87 | object_id = coalesce(var.managed_identity_principal_id, data.azurerm_client_config.current.object_id) 88 | tenant_id = data.azurerm_client_config.current.tenant_id 89 | key_permissions = [ 90 | "Get", 91 | "Create", 92 | "Delete", 93 | "GetRotationPolicy", 94 | "Recover", 95 | ] 96 | } -------------------------------------------------------------------------------- /examples/without_monitor_v4/disk_encryption_set.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "current" {} 2 | 3 | resource "random_string" "key_vault_prefix" { 4 | length = 6 5 | numeric = false 6 | special = false 7 | upper = false 8 | } 9 | 10 | module "public_ip" { 11 | count = var.key_vault_firewall_bypass_ip_cidr == null ? 1 : 0 12 | 13 | source = "lonegunmanb/public-ip/lonegunmanb" 14 | version = "0.1.0" 15 | } 16 | 17 | locals { 18 | # We cannot use coalesce here because it's not short-circuit and public_ip's index will cause error 19 | public_ip = try(module.public_ip[0].public_ip, var.key_vault_firewall_bypass_ip_cidr) 20 | } 21 | 22 | resource "azurerm_key_vault" "des_vault" { 23 | location = local.resource_group.location 24 | name = "${random_string.key_vault_prefix.result}-des-keyvault" 25 | resource_group_name = local.resource_group.name 26 | sku_name = "premium" 27 | tenant_id = data.azurerm_client_config.current.tenant_id 28 | enabled_for_disk_encryption = true 29 | purge_protection_enabled = true 30 | soft_delete_retention_days = 7 31 | 32 | network_acls { 33 | bypass = "AzureServices" 34 | default_action = "Deny" 35 | ip_rules = [local.public_ip] 36 | } 37 | } 38 | 39 | resource "azurerm_key_vault_key" "des_key" { 40 | key_opts = [ 41 | "decrypt", 42 | "encrypt", 43 | "sign", 44 | "unwrapKey", 45 | "verify", 46 | "wrapKey", 47 | ] 48 | key_type = "RSA-HSM" 49 | key_vault_id = azurerm_key_vault.des_vault.id 50 | name = "des-key" 51 | expiration_date = timeadd("${formatdate("YYYY-MM-DD", timestamp())}T00:00:00Z", "168h") 52 | key_size = 2048 53 | 54 | depends_on = [ 55 | azurerm_key_vault_access_policy.current_user 56 | ] 57 | 58 | lifecycle { 59 | ignore_changes = [expiration_date] 60 | } 61 | } 62 | 63 | resource "azurerm_disk_encryption_set" "des" { 64 | key_vault_key_id = azurerm_key_vault_key.des_key.id 65 | location = local.resource_group.location 66 | name = "des" 67 | resource_group_name = local.resource_group.name 68 | 69 | identity { 70 | type = "SystemAssigned" 71 | } 72 | } 73 | 74 | resource "azurerm_key_vault_access_policy" "des" { 75 | key_vault_id = azurerm_key_vault.des_vault.id 76 | object_id = azurerm_disk_encryption_set.des.identity[0].principal_id 77 | tenant_id = azurerm_disk_encryption_set.des.identity[0].tenant_id 78 | key_permissions = [ 79 | "Get", 80 | "WrapKey", 81 | "UnwrapKey" 82 | ] 83 | } 84 | 85 | resource "azurerm_key_vault_access_policy" "current_user" { 86 | key_vault_id = azurerm_key_vault.des_vault.id 87 | object_id = coalesce(var.managed_identity_principal_id, data.azurerm_client_config.current.object_id) 88 | tenant_id = data.azurerm_client_config.current.tenant_id 89 | key_permissions = [ 90 | "Get", 91 | "Create", 92 | "Delete", 93 | "GetRotationPolicy", 94 | "Recover", 95 | ] 96 | } -------------------------------------------------------------------------------- /tfvmmakefile: -------------------------------------------------------------------------------- 1 | REMOTE_SCRIPT := "https://raw.githubusercontent.com/Azure/tfmod-scaffold/main/scripts" 2 | 3 | fmt: 4 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fmt.sh" | bash 5 | 6 | fumpt: 7 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumpt.sh" | bash 8 | 9 | gosec: 10 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gosec.sh" | bash 11 | 12 | tffmt: 13 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/tffmt.sh" | bash 14 | 15 | tffmtcheck: 16 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-fmt.sh" | bash 17 | 18 | tfvalidatecheck: 19 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-validate.sh" | bash 20 | 21 | terrafmtcheck: 22 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt-check.sh" | bash 23 | 24 | gofmtcheck: 25 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gofmtcheck.sh" | bash 26 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumptcheck.sh" | bash 27 | 28 | golint: 29 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-golangci-lint.sh" | bash 30 | 31 | tflint: 32 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-tflint.sh" | bash 33 | 34 | lint: golint tflint gosec 35 | 36 | checkovcheck: 37 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovcheck.sh" | bash 38 | 39 | checkovplancheck: 40 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovplancheck.sh" | bash 41 | 42 | fmtcheck: gofmtcheck tfvalidatecheck tffmtcheck terrafmtcheck 43 | 44 | pr-check: depscheck fmtcheck lint unit-test checkovcheck 45 | 46 | unit-test: 47 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-unit-test.sh" | bash 48 | 49 | e2e-test: 50 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-e2e-test.sh" | bash 51 | 52 | version-upgrade-test: 53 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/version-upgrade-test.sh" | bash 54 | 55 | terrafmt: 56 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt.sh" | bash 57 | 58 | pre-commit: tffmt terrafmt depsensure fmt fumpt generate 59 | 60 | depsensure: 61 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-ensure.sh" | bash 62 | 63 | depscheck: 64 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-check.sh" | bash 65 | 66 | generate: 67 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/generate.sh" | bash 68 | 69 | gencheck: 70 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gencheck.sh" | bash 71 | 72 | yor-tag: 73 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/yor-tag.sh" | bash 74 | 75 | autofix: 76 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/autofix.sh" | bash 77 | 78 | test: fmtcheck 79 | @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-gradually-deprecated.sh" | bash 80 | @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-test.sh" | bash 81 | 82 | build-test: 83 | curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/build-test.sh" | bash 84 | 85 | .PHONY: fmt fmtcheck pr-check -------------------------------------------------------------------------------- /NoticeOnUpgradeTov10.0.md: -------------------------------------------------------------------------------- 1 | # Notice on Upgrade to v10.x 2 | 3 | ## AzAPI provider version constraint has been updated to `>=2.0, < 3.0`. 4 | 5 | ## [`var.web_app_routing` type change](https://github.com/Azure/terraform-azurerm-aks/pull/606) 6 | 7 | `var.web_app_routing.dns_zone_id` has been replaced by `var.web_app_routing.dns_zone_ids`. The new variable is a list of DNS zone IDs. This change allows for the specification of multiple DNS zones for routing. 8 | 9 | ## [`data.azurerm_resource_group.main` in this module has been removed, `var.location` is a required variable now.](https://github.com/Azure/terraform-azurerm-aks/pull/644) 10 | 11 | ## [Create log analytics workspace would also create required monitor data collection rule now](https://github.com/Azure/terraform-azurerm-aks/pull/623) 12 | 13 | The changes in this pull request introduce support for a Data Collection Rule (DCR) for Azure Monitor Container Insights in the Terraform module. 14 | 15 | ## [`CHANGELOG.md` file is no longer maintained, please read release note in GitHub repository instead](https://github.com/Azure/terraform-azurerm-aks/pull/651) 16 | 17 | [New release notes](https://github.com/Azure/terraform-azurerm-aks/releases). 18 | 19 | ## [The following variables have been removed:](https://github.com/Azure/terraform-azurerm-aks/pull/652) 20 | 21 | * `agents_taints` 22 | * `api_server_subnet_id` 23 | * `private_cluster_enabled` 24 | * `rbac_aad_client_app_id` 25 | * `rbac_aad_managed` 26 | * `rbac_aad_server_app_id` 27 | * `rbac_aad_server_app_secret` 28 | 29 | ## `var.pod_subnet_id` has been replaced by `var.pod_subnet.id` 30 | 31 | ## `var.vnet_subnet_id` has been replaced by `var.vnet_subnet.id` 32 | 33 | ## `var.node_pools.pod_subnet_id` has been replaced by `var.node_pools.pod_subnet.id` 34 | 35 | ## `var.node_pools.vnet_subnet_id` has been replaced by `var.node_pools.vnet_subnet.id` 36 | 37 | ## `azurerm_role_assignment.network_contributor` will be re-created 38 | 39 | Since `for_each`'s target has been changed from a set of string to a map of object to avoid "Known after apply" values in iterator, we have to re-create the `azurerm_role_assignment.network_contributor` resource. This will cause the role assignment to be removed and re-added, which may result in a brief period of time where the role assignment is not present. 40 | 41 | ## When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself. 42 | 43 | ## `var.client_secret` now is `sensitive` 44 | 45 | ## New interval between cluster creation and kubernetes version upgrade 46 | 47 | New variable `interval_before_cluster_update` was added. Sometimes when we tried to update cluster's kubernetes version after cluster creation, we got the error `Operation is not allowed because there's an in progress update managed cluster operation on the managed cluster started`. A `time_sleep` was added to avoid such potential conflict. You can set this variable to `null` to bypass the sleep. 48 | 49 | ## @zioproto is no longer a maintainer of this module 50 | 51 | For personal reasons, @zioproto is no longer a maintainer of this module. I want to express my sincere gratitude for his contributions and support over the years. His dedication and hard work are invaluable to this module. 52 | 53 | THANK YOU @zioproto ! 54 | -------------------------------------------------------------------------------- /CHANGELOG-v8.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | **Merged pull requests:** 4 | 5 | - Add support for nodepool's `gpu_instance` [\#519](https://github.com/Azure/terraform-azurerm-aks/pull/519) ([lonegunmanb](https://github.com/lonegunmanb)) 6 | - Bump github.com/Azure/terraform-module-test-helper from 0.17.0 to 0.18.0 in /test [\#516](https://github.com/Azure/terraform-azurerm-aks/pull/516) ([dependabot[bot]](https://github.com/apps/dependabot)) 7 | - Add upgrade notice document [\#513](https://github.com/Azure/terraform-azurerm-aks/pull/513) ([lonegunmanb](https://github.com/lonegunmanb)) 8 | - Add retry when the ingress is not ready [\#510](https://github.com/Azure/terraform-azurerm-aks/pull/510) ([lonegunmanb](https://github.com/lonegunmanb)) 9 | - Add support for `support_plan` and `Premium` sku tier. [\#508](https://github.com/Azure/terraform-azurerm-aks/pull/508) ([ecklm](https://github.com/ecklm)) 10 | - Refactor code, split monolith tf config into multiple files [\#494](https://github.com/Azure/terraform-azurerm-aks/pull/494) ([lonegunmanb](https://github.com/lonegunmanb)) 11 | - Remove `var.http_application_routing_enabled` [\#493](https://github.com/Azure/terraform-azurerm-aks/pull/493) ([lonegunmanb](https://github.com/lonegunmanb)) 12 | - feat\(`http_proxy_config`\): Add `http_proxy_config` [\#492](https://github.com/Azure/terraform-azurerm-aks/pull/492) ([lonegunmanb](https://github.com/lonegunmanb)) 13 | - Remove `public_network_access_enabled` entirely [\#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) ([lonegunmanb](https://github.com/lonegunmanb)) 14 | - Ignore deprecated attribute `public_network_access_enabled` [\#485](https://github.com/Azure/terraform-azurerm-aks/pull/485) ([ishuar](https://github.com/ishuar)) 15 | - feat: enable precondition on `default_node_pool` for autoscaling with node pool type [\#484](https://github.com/Azure/terraform-azurerm-aks/pull/484) ([ishuar](https://github.com/ishuar)) 16 | - Add web\_app\_routing\_identity block to outputs [\#481](https://github.com/Azure/terraform-azurerm-aks/pull/481) ([bonddim](https://github.com/bonddim)) 17 | - Add support for `kubelet_identity` nested block [\#479](https://github.com/Azure/terraform-azurerm-aks/pull/479) ([lonegunmanb](https://github.com/lonegunmanb)) 18 | - Prepare for v8.0 [\#462](https://github.com/Azure/terraform-azurerm-aks/pull/462) ([lonegunmanb](https://github.com/lonegunmanb)) 19 | - Remove precondition on extra node pool which prevent using windows pool with overlay [\#512](https://github.com/Azure/terraform-azurerm-aks/pull/512) ([lonegunmanb](https://github.com/lonegunmanb)) 20 | - Add support for `maintenance_window_auto_upgrade` [\#505](https://github.com/Azure/terraform-azurerm-aks/pull/505) ([skolobov](https://github.com/skolobov)) 21 | - Let the users decide whether adding a random suffix in cluster and pool's name or not. [\#496](https://github.com/Azure/terraform-azurerm-aks/pull/496) ([lonegunmanb](https://github.com/lonegunmanb)) 22 | - Add role assignments for ingress application gateway and corresponding example [\#426](https://github.com/Azure/terraform-azurerm-aks/pull/426) ([lonegunmanb](https://github.com/lonegunmanb)) 23 | - Add support for workload\_autoscaler\_profile settings [\#404](https://github.com/Azure/terraform-azurerm-aks/pull/404) ([bonddim](https://github.com/bonddim)) 24 | 25 | 26 | 27 | \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* 28 | -------------------------------------------------------------------------------- /NoticeOnUpgradeTov7.0.md: -------------------------------------------------------------------------------- 1 | # Notice on Upgrade to v7.x 2 | 3 | ## Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard` 4 | 5 | AzureRM's minimum version is `>= 3.51, < 4.0` now. 6 | [`var.sku_tier` cannot be set to `Paid` anymore](https://github.com/hashicorp/terraform-provider-azurerm/issues/20887), now possible values are `Free` and `Standard`. 7 | 8 | ## Ignore changes on `kubernetes_version` from outside of Terraform 9 | 10 | Related issue: #335 11 | 12 | Two new resources would be created when upgrading from v6.x to v7.x: 13 | 14 | * `null_resource.kubernetes_version_keeper` 15 | * `azapi_update_resource.aks_cluster_post_create` 16 | 17 | `azurerm_kubernetes_cluster.main` resource would ignore change on `kubernetes_version` from outside of Terraform in case AKS cluster's patch version has been upgraded automatically. 18 | When you change `var.kubernetes_version`'s value, it would trigger a re-creation of `null_resource.kubernetes_version_keeper` and re-creation of `azapi_update_resource.aks_cluster_post_create`, which would upgrade the AKS cluster's `kubernetes_version`. 19 | 20 | `azapi` provider is required to be configured in your Terraform configuration. 21 | 22 | ## Fix #315 by amending missing `linux_os_config` block 23 | 24 | In v6.0, `default_node_pool.linux_os_config` block won't be added to `azurerm_kubernetes_cluster.main` resource when `var.enable_auto_scaling` is `true`. This bug has been fixed in v7.0.0 so you might see a diff on `azurerm_kubernetes_cluster.main` resource. 25 | 26 | ## Wrap `log_analytics_solution_id` to an object to fix #263. 27 | 28 | `var.log_analytics_solution_id` is now an object with `id` attribute. This change is to fix #263. 29 | 30 | ## Remove unused net_profile_docker_bridge_cidr 31 | 32 | `var.net_profile_docker_bridge_cidr` has been [deprecated](https://github.com/hashicorp/terraform-provider-azurerm/issues/18119) and is not used in the module anymore and has been removed. 33 | 34 | ## Add `create_before_destroy=true` to node pools #357 35 | 36 | Now `azurerm_kubernetes_cluster_node_pool.node_pool` resource has `create_before_destroy=true` to avoid downtime when upgrading node pools. Users must be aware that there would be a "random" suffix added into pool's name, this suffix's length is `4`, so your previous node pool's name `nodepool1` would be `nodepool1xxxx`. This suffix is calculated from node pool's config, the same configuration would lead to the same suffix. You might need to shorten your node pool's name because of this new added suffix. 37 | 38 | To enable this feature, we've also added new `null_resource.pool_name_keeper` to track node pool's name in case you've changed the name. 39 | 40 | ## Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` #361 41 | 42 | As the [document](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#public_network_access_enabled) described: 43 | 44 | >When `public_network_access_enabled` is set to true, `0.0.0.0/32` must be added to `authorized_ip_ranges` in the `api_server_access_profile block`. 45 | 46 | We'll add `api_server_access_profile` nested block after AzureRM provider's v4.0, but starting from v7.0 we'll enforce such pre-condition check. 47 | 48 | ## Add `depends_on` to `azurerm_kubernetes_cluster_node_pool` resources #418 49 | 50 | If you have `azurerm_kubernetes_cluster_node_pool` resources not managed with this module (`var.nodepools`) you 51 | must have an explicit `depends_on` on those resources to avoid conflicting nodepools operations. 52 | See issue #418 for more details. 53 | -------------------------------------------------------------------------------- /examples/named_cluster/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | count = var.create_resource_group ? 1 : 0 7 | 8 | location = var.location 9 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 10 | } 11 | 12 | locals { 13 | resource_group = { 14 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 15 | location = var.location 16 | } 17 | } 18 | 19 | resource "azurerm_virtual_network" "test" { 20 | address_space = ["10.52.0.0/16"] 21 | location = local.resource_group.location 22 | name = "${random_id.prefix.hex}-vn" 23 | resource_group_name = local.resource_group.name 24 | } 25 | 26 | resource "azurerm_subnet" "test" { 27 | address_prefixes = ["10.52.0.0/24"] 28 | name = "${random_id.prefix.hex}-sn" 29 | resource_group_name = local.resource_group.name 30 | virtual_network_name = azurerm_virtual_network.test.name 31 | } 32 | 33 | resource "azurerm_user_assigned_identity" "test" { 34 | location = local.resource_group.location 35 | name = "${random_id.prefix.hex}-identity" 36 | resource_group_name = local.resource_group.name 37 | } 38 | 39 | # Just for demo purpose, not necessary to named cluster. 40 | resource "azurerm_log_analytics_workspace" "main" { 41 | location = coalesce(var.log_analytics_workspace_location, local.resource_group.location) 42 | name = "prefix-workspace" 43 | resource_group_name = local.resource_group.name 44 | retention_in_days = 30 45 | sku = "PerGB2018" 46 | } 47 | 48 | resource "azurerm_log_analytics_solution" "main" { 49 | location = coalesce(var.log_analytics_workspace_location, local.resource_group.location) 50 | resource_group_name = local.resource_group.name 51 | solution_name = "ContainerInsights" 52 | workspace_name = azurerm_log_analytics_workspace.main.name 53 | workspace_resource_id = azurerm_log_analytics_workspace.main.id 54 | 55 | plan { 56 | product = "OMSGallery/ContainerInsights" 57 | publisher = "Microsoft" 58 | } 59 | } 60 | 61 | module "aks_cluster_name" { 62 | source = "../.." 63 | 64 | prefix = "prefix" 65 | resource_group_name = local.resource_group.name 66 | admin_username = null 67 | azure_policy_enabled = true 68 | cluster_log_analytics_workspace_name = "test-cluster" 69 | cluster_name = "test-cluster" 70 | disk_encryption_set_id = azurerm_disk_encryption_set.des.id 71 | identity_ids = [azurerm_user_assigned_identity.test.id] 72 | identity_type = "UserAssigned" 73 | log_analytics_solution = { 74 | id = azurerm_log_analytics_solution.main.id 75 | } 76 | log_analytics_workspace_enabled = true 77 | log_analytics_workspace = { 78 | id = azurerm_log_analytics_workspace.main.id 79 | name = azurerm_log_analytics_workspace.main.name 80 | } 81 | location = local.resource_group.location 82 | maintenance_window = { 83 | allowed = [ 84 | { 85 | day = "Sunday", 86 | hours = [22, 23] 87 | }, 88 | ] 89 | not_allowed = [] 90 | } 91 | net_profile_pod_cidr = "10.1.0.0/16" 92 | private_cluster_enabled = true 93 | rbac_aad = true 94 | role_based_access_control_enabled = true 95 | 96 | # KMS etcd encryption 97 | kms_enabled = true 98 | kms_key_vault_key_id = azurerm_key_vault_key.kms.id 99 | kms_key_vault_network_access = "Public" 100 | 101 | depends_on = [ 102 | azurerm_key_vault_access_policy.kms, 103 | azurerm_role_assignment.kms 104 | ] 105 | } 106 | -------------------------------------------------------------------------------- /examples/named_cluster_v4/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | count = var.create_resource_group ? 1 : 0 7 | 8 | location = var.location 9 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 10 | } 11 | 12 | locals { 13 | resource_group = { 14 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 15 | location = var.location 16 | } 17 | } 18 | 19 | resource "azurerm_virtual_network" "test" { 20 | address_space = ["10.52.0.0/16"] 21 | location = local.resource_group.location 22 | name = "${random_id.prefix.hex}-vn" 23 | resource_group_name = local.resource_group.name 24 | } 25 | 26 | resource "azurerm_subnet" "test" { 27 | address_prefixes = ["10.52.0.0/24"] 28 | name = "${random_id.prefix.hex}-sn" 29 | resource_group_name = local.resource_group.name 30 | virtual_network_name = azurerm_virtual_network.test.name 31 | } 32 | 33 | resource "azurerm_user_assigned_identity" "test" { 34 | location = local.resource_group.location 35 | name = "${random_id.prefix.hex}-identity" 36 | resource_group_name = local.resource_group.name 37 | } 38 | 39 | # Just for demo purpose, not necessary to named cluster. 40 | resource "azurerm_log_analytics_workspace" "main" { 41 | location = coalesce(var.log_analytics_workspace_location, local.resource_group.location) 42 | name = "prefix-workspace" 43 | resource_group_name = local.resource_group.name 44 | retention_in_days = 30 45 | sku = "PerGB2018" 46 | } 47 | 48 | resource "azurerm_log_analytics_solution" "main" { 49 | location = coalesce(var.log_analytics_workspace_location, local.resource_group.location) 50 | resource_group_name = local.resource_group.name 51 | solution_name = "ContainerInsights" 52 | workspace_name = azurerm_log_analytics_workspace.main.name 53 | workspace_resource_id = azurerm_log_analytics_workspace.main.id 54 | 55 | plan { 56 | product = "OMSGallery/ContainerInsights" 57 | publisher = "Microsoft" 58 | } 59 | } 60 | 61 | module "aks_cluster_name" { 62 | source = "../.." 63 | 64 | prefix = "prefix" 65 | resource_group_name = local.resource_group.name 66 | admin_username = null 67 | azure_policy_enabled = true 68 | cluster_log_analytics_workspace_name = "test-cluster" 69 | cluster_name = "test-cluster" 70 | disk_encryption_set_id = azurerm_disk_encryption_set.des.id 71 | identity_ids = [azurerm_user_assigned_identity.test.id] 72 | identity_type = "UserAssigned" 73 | log_analytics_solution = { 74 | id = azurerm_log_analytics_solution.main.id 75 | } 76 | log_analytics_workspace_enabled = true 77 | log_analytics_workspace = { 78 | id = azurerm_log_analytics_workspace.main.id 79 | name = azurerm_log_analytics_workspace.main.name 80 | } 81 | location = local.resource_group.location 82 | maintenance_window = { 83 | allowed = [ 84 | { 85 | day = "Sunday", 86 | hours = [22, 23] 87 | }, 88 | ] 89 | not_allowed = [] 90 | } 91 | net_profile_pod_cidr = "10.1.0.0/16" 92 | private_cluster_enabled = true 93 | rbac_aad = true 94 | role_based_access_control_enabled = true 95 | 96 | # KMS etcd encryption 97 | kms_enabled = true 98 | kms_key_vault_key_id = azurerm_key_vault_key.kms.id 99 | kms_key_vault_network_access = "Public" 100 | 101 | depends_on = [ 102 | azurerm_key_vault_access_policy.kms, 103 | azurerm_role_assignment.kms 104 | ] 105 | } 106 | -------------------------------------------------------------------------------- /examples/startup/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "random_id" "name" { 6 | byte_length = 8 7 | } 8 | 9 | resource "azurerm_resource_group" "main" { 10 | count = var.create_resource_group ? 1 : 0 11 | 12 | location = var.location 13 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 14 | } 15 | 16 | locals { 17 | resource_group = { 18 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 19 | location = var.location 20 | } 21 | } 22 | 23 | resource "azurerm_virtual_network" "test" { 24 | address_space = ["10.52.0.0/16"] 25 | location = local.resource_group.location 26 | name = "${random_id.prefix.hex}-vn" 27 | resource_group_name = local.resource_group.name 28 | } 29 | 30 | resource "azurerm_subnet" "test" { 31 | address_prefixes = ["10.52.0.0/24"] 32 | name = "${random_id.prefix.hex}-sn" 33 | resource_group_name = local.resource_group.name 34 | virtual_network_name = azurerm_virtual_network.test.name 35 | } 36 | 37 | module "aks" { 38 | source = "../.." 39 | 40 | location = local.resource_group.location 41 | prefix = random_id.name.hex 42 | resource_group_name = local.resource_group.name 43 | kubernetes_version = "1.30" # don't specify the patch version! 44 | automatic_channel_upgrade = "patch" 45 | agents_availability_zones = ["1", "2"] 46 | agents_count = null 47 | agents_max_count = 2 48 | agents_max_pods = 100 49 | agents_min_count = 1 50 | agents_pool_name = "testnodepool" 51 | agents_pool_linux_os_configs = [ 52 | { 53 | transparent_huge_page_enabled = "always" 54 | sysctl_configs = [ 55 | { 56 | fs_aio_max_nr = 65536 57 | fs_file_max = 100000 58 | fs_inotify_max_user_watches = 1000000 59 | } 60 | ] 61 | } 62 | ] 63 | agents_type = "VirtualMachineScaleSets" 64 | azure_policy_enabled = true 65 | client_id = var.client_id 66 | client_secret = var.client_secret 67 | confidential_computing = { 68 | sgx_quote_helper_enabled = true 69 | } 70 | disk_encryption_set_id = azurerm_disk_encryption_set.des.id 71 | enable_auto_scaling = true 72 | enable_host_encryption = true 73 | green_field_application_gateway_for_ingress = { 74 | name = "${random_id.prefix.hex}-agw" 75 | subnet_cidr = "10.52.1.0/24" 76 | } 77 | local_account_disabled = true 78 | log_analytics_workspace_enabled = true 79 | cluster_log_analytics_workspace_name = random_id.name.hex 80 | maintenance_window = { 81 | allowed = [ 82 | { 83 | day = "Sunday", 84 | hours = [22, 23] 85 | }, 86 | ] 87 | not_allowed = [ 88 | { 89 | start = "2035-01-01T20:00:00Z", 90 | end = "2035-01-01T21:00:00Z" 91 | }, 92 | ] 93 | } 94 | maintenance_window_node_os = { 95 | frequency = "Daily" 96 | interval = 1 97 | start_time = "07:00" 98 | utc_offset = "+01:00" 99 | duration = 16 100 | } 101 | net_profile_dns_service_ip = "10.0.0.10" 102 | net_profile_service_cidr = "10.0.0.0/16" 103 | network_plugin = "azure" 104 | network_policy = "azure" 105 | node_os_channel_upgrade = "NodeImage" 106 | os_disk_size_gb = 60 107 | private_cluster_enabled = true 108 | rbac_aad = true 109 | role_based_access_control_enabled = true 110 | sku_tier = "Standard" 111 | vnet_subnet = { 112 | id = azurerm_subnet.test.id 113 | } 114 | 115 | agents_labels = { 116 | "node1" : "label1" 117 | } 118 | agents_tags = { 119 | "Agent" : "agentTag" 120 | } 121 | depends_on = [ 122 | azurerm_subnet.test, 123 | ] 124 | } 125 | -------------------------------------------------------------------------------- /examples/startup_v4/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "random_id" "name" { 6 | byte_length = 8 7 | } 8 | 9 | resource "azurerm_resource_group" "main" { 10 | count = var.create_resource_group ? 1 : 0 11 | 12 | location = var.location 13 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 14 | } 15 | 16 | locals { 17 | resource_group = { 18 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 19 | location = var.location 20 | } 21 | } 22 | 23 | resource "azurerm_virtual_network" "test" { 24 | address_space = ["10.52.0.0/16"] 25 | location = local.resource_group.location 26 | name = "${random_id.prefix.hex}-vn" 27 | resource_group_name = local.resource_group.name 28 | } 29 | 30 | resource "azurerm_subnet" "test" { 31 | address_prefixes = ["10.52.0.0/24"] 32 | name = "${random_id.prefix.hex}-sn" 33 | resource_group_name = local.resource_group.name 34 | virtual_network_name = azurerm_virtual_network.test.name 35 | } 36 | 37 | module "aks" { 38 | source = "../.." 39 | 40 | location = local.resource_group.location 41 | prefix = random_id.name.hex 42 | resource_group_name = local.resource_group.name 43 | kubernetes_version = "1.30" # don't specify the patch version! 44 | automatic_channel_upgrade = "patch" 45 | agents_availability_zones = ["1", "2"] 46 | agents_count = null 47 | agents_max_count = 2 48 | agents_max_pods = 100 49 | agents_min_count = 1 50 | agents_pool_name = "testnodepool" 51 | agents_pool_linux_os_configs = [ 52 | { 53 | transparent_huge_page_enabled = "always" 54 | sysctl_configs = [ 55 | { 56 | fs_aio_max_nr = 65536 57 | fs_file_max = 100000 58 | fs_inotify_max_user_watches = 1000000 59 | } 60 | ] 61 | } 62 | ] 63 | agents_type = "VirtualMachineScaleSets" 64 | azure_policy_enabled = true 65 | client_id = var.client_id 66 | client_secret = var.client_secret 67 | confidential_computing = { 68 | sgx_quote_helper_enabled = true 69 | } 70 | disk_encryption_set_id = azurerm_disk_encryption_set.des.id 71 | enable_auto_scaling = true 72 | enable_host_encryption = true 73 | green_field_application_gateway_for_ingress = { 74 | name = "${random_id.prefix.hex}-agw" 75 | subnet_cidr = "10.52.1.0/24" 76 | } 77 | local_account_disabled = true 78 | log_analytics_workspace_enabled = true 79 | cluster_log_analytics_workspace_name = random_id.name.hex 80 | maintenance_window = { 81 | allowed = [ 82 | { 83 | day = "Sunday", 84 | hours = [22, 23] 85 | }, 86 | ] 87 | not_allowed = [ 88 | { 89 | start = "2035-01-01T20:00:00Z", 90 | end = "2035-01-01T21:00:00Z" 91 | }, 92 | ] 93 | } 94 | maintenance_window_node_os = { 95 | frequency = "Daily" 96 | interval = 1 97 | start_time = "07:00" 98 | utc_offset = "+01:00" 99 | duration = 16 100 | } 101 | net_profile_dns_service_ip = "10.0.0.10" 102 | net_profile_service_cidr = "10.0.0.0/16" 103 | network_plugin = "azure" 104 | network_policy = "azure" 105 | node_os_channel_upgrade = "NodeImage" 106 | os_disk_size_gb = 60 107 | private_cluster_enabled = true 108 | rbac_aad = true 109 | role_based_access_control_enabled = true 110 | sku_tier = "Standard" 111 | vnet_subnet = { 112 | id = azurerm_subnet.test.id 113 | } 114 | 115 | agents_labels = { 116 | "node1" : "label1" 117 | } 118 | agents_tags = { 119 | "Agent" : "agentTag" 120 | } 121 | depends_on = [ 122 | azurerm_subnet.test, 123 | ] 124 | } 125 | -------------------------------------------------------------------------------- /CHANGELOG-v5.md: -------------------------------------------------------------------------------- 1 | ## 5.0.0 (July 14, 2022) 2 | 3 | ENHANCEMENTS: 4 | 5 | * Variable `enable_kube_dashboard` has been removed as [#181](https://github.com/Azure/terraform-azurerm-aks/issues/181) described. ([#187](https://github.com/Azure/terraform-azurerm-aks/pull/187)) 6 | * Add new variable `location` so we can define location for the resources explicitly. ([#172](https://github.com/Azure/terraform-azurerm-aks/pull/172)) 7 | * Bump AzureRM Provider version to 3.3.0. ([#157](https://github.com/Azure/terraform-azurerm-aks/pull/157)) 8 | * Add new variable `private_dns_zone_id` to make argument `private_dns_zone_id` configurable. ([#174](https://github.com/Azure/terraform-azurerm-aks/pull/174)) 9 | * Add new variable `open_service_mesh_enabled` to make argument `open_service_mesh_enabled` configurable. ([#132](https://github.com/Azure/terraform-azurerm-aks/pull/132)) 10 | * Remove `addon_profile` in the outputs since the block has been removed from provider 3.x. Extract embedded blocks inside `addon_profile` block into standalone outputs. ([#188](https://github.com/Azure/terraform-azurerm-aks/pull/188)) 11 | * Add `nullable = true` to some variables to simplify the conditional expressions. ([#193](https://github.com/Azure/terraform-azurerm-aks/pull/193)) 12 | * Add new variable `oidc_issuer_enabled` to make argument `oidc_issuer_enabled` configurable. ([#205](https://github.com/Azure/terraform-azurerm-aks/pull/205) 13 | * Add new output `oidc_issuer_url` to expose the created issuer URL from the module. [#206](https://github.com/Azure/terraform-azurerm-aks/pull/206)) 14 | * Turn monitoring on in the test code. ([#201](https://github.com/Azure/terraform-azurerm-aks/pull/201)) 15 | * Add new variables `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` to make arguments `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` configurable. ([#149](https://github.com/Azure/terraform-azurerm-aks/pull/149)) 16 | * Remove `module.ssh-key` and moves resource `tls_private_key` inside the module to root directory, then outputs tls keys. ([#189](https://github.com/Azure/terraform-azurerm-aks/pull/189)) 17 | * Add new variables `rbac_aad_azure_rbac_enabled` and `rbac_aad_tenant_id` to make arguments in `azure_active_directory_role_based_access_control` configurable. ([#199](https://github.com/Azure/terraform-azurerm-aks/pull/199)) 18 | * Add `count` meta-argument to resource `tls_private_key` to avoid the unnecessary creation. ([#209](https://github.com/Azure/terraform-azurerm-aks/pull/209)) 19 | * Add new variable `only_critical_addons_enabled` to make argument `only_critical_addons_enabled` in block `default_node_pool` configurable. ([#129](https://github.com/Azure/terraform-azurerm-aks/pull/129)) 20 | * Add support for the argument `key_vault_secrets_provider`. ([#214](https://github.com/Azure/terraform-azurerm-aks/pull/214)) 21 | * Provides a way to attach existing Log Analytics Workspace to AKS through Container Insights. ([#213](https://github.com/Azure/terraform-azurerm-aks/pull/213)) 22 | * Add new variable `local_account_disabled` to make argument `local_account_disabled` configurable. ([#218](https://github.com/Azure/terraform-azurerm-aks/pull/218)) 23 | * Set argument `private_cluster_enabled` to `true` in the test code. ([#219](https://github.com/Azure/terraform-azurerm-aks/pull/219)) 24 | * Add new variable `disk_encryption_set_id` to make argument `disk_encryption_set_id` configurable. Create resource `azurerm_disk_encryption_set` in the test code to turn disk encryption on for the cluster. ([#195](https://github.com/Azure/terraform-azurerm-aks/pull/195)) 25 | * Add new variable `api_server_authorized_ip_ranges` to make argument `api_server_authorized_ip_ranges` configurable. ([#220](https://github.com/Azure/terraform-azurerm-aks/pull/220)) 26 | * Rename output `system_assigned_identity` to `cluster_identity` since it could be user assigned identity. Remove the index inside output's value expression. ([#197](https://github.com/Azure/terraform-azurerm-aks/pull/197)) 27 | * Rename `var.enable_azure_policy` to `var.azure_policy_enabled` to meet the naming convention. Set `azure_policy_enabled` to `true` in test fixture code. ([#203](https://github.com/Azure/terraform-azurerm-aks/pull/203)) 28 | 29 | BUG FIXES: 30 | 31 | * Change the incorrect description of variable `tags`. ([#175](https://github.com/Azure/terraform-azurerm-aks/pull/175)) 32 | -------------------------------------------------------------------------------- /NoticeOnUpgradeTov5.0.md: -------------------------------------------------------------------------------- 1 | # Notice on Upgrade to v5.x 2 | 3 | V5.0.0 is a major version upgrade and a lot of breaking changes have been introduced. Extreme caution must be taken during the upgrade to avoid resource replacement and downtime by accident. 4 | 5 | Running the `terraform plan` first to inspect the plan is strongly advised. 6 | 7 | ## Terraform and terraform-provider-azurerm version restrictions 8 | 9 | Now Terraform core's lowest version is v1.2.0 and terraform-provider-azurerm's lowest version is v3.21.0. 10 | 11 | ## variable `user_assigned_identity_id` has been renamed. 12 | 13 | variable `user_assigned_identity_id` has been renamed to `identity_ids` and it's type has been changed from `string` to `list(string)`. 14 | 15 | ## `addon_profile` in outputs is no longer available. 16 | 17 | It has been broken into the following new outputs: 18 | 19 | * `aci_connector_linux` 20 | * `aci_connector_linux_enabled` 21 | * `azure_policy_enabled` 22 | * `http_application_routing_enabled` 23 | * `ingress_application_gateway` 24 | * `ingress_application_gateway_enabled` 25 | * `key_vault_secrets_provider` 26 | * `key_vault_secrets_provider_enabled` 27 | * `oms_agent` 28 | * `oms_agent_enabled` 29 | * `open_service_mesh_enabled` 30 | 31 | ## The following variables have been renamed from `enable_xxx` to `xxx_enabled` 32 | 33 | * `enable_azure_policy` has been renamed to `azure_policy_enabled` 34 | * `enable_http_application_routing` has been renamed to `http_application_routing_enabled` 35 | * `enable_ingress_application_gateway` has been renamed to `ingress_application_gateway_enabled` 36 | * `enable_log_analytics_workspace` has been renamed to `log_analytics_workspace_enabled` 37 | * `enable_open_service_mesh` has been renamed to `open_service_mesh_enabled` 38 | * `enable_role_based_access_control` has been renamed to `role_based_access_control_enabled` 39 | 40 | ## `nullable = true` has been added to the following variables so setting them to `null` explicitly will use the default value 41 | 42 | * `log_analytics_workspace_enable` 43 | * `os_disk_type` 44 | * `private_cluster_enabled` 45 | * `rbac_aad_managed` 46 | * `rbac_aad_admin_group_object_ids` 47 | * `network_policy` 48 | * `enable_node_public_ip` 49 | 50 | ## `var.admin_username`'s default value has been removed 51 | 52 | In v4.x `var.admin_username` has a default value `azureuser` and has been removed in V5.0.0. Since the `admin_username` argument in `linux_profile` block is a ForceNew argument, any value change to this argument will trigger a Kubernetes cluster replacement **SO THE EXTREME CAUTION MUST BE TAKEN**. The module's callers must set `var.admin_username` to `azureuser` explicitly if they didn't set it before. 53 | 54 | ## `module.ssh-key` has been removed 55 | 56 | The file named `private_ssh_key` which contains the tls private key will be deleted since the `local_file` resource has been removed. Now the private key is exported via `generated_cluster_private_ssh_key` in output and the corresponding public key is exported via `generated_cluster_public_ssh_key` in output. 57 | 58 | A `moved` block has been added to relocate the existing `tls_private_key` resource to the new address. If the `var.admin_username` is not `null`, no action is needed. 59 | 60 | Resource `tls_private_key`'s creation now is conditional. Users may see the destruction of existing `tls_private_key` in the generated plan if `var.admin_username` is `null`. 61 | 62 | ## `system_assigned_identity` in the output has been renamed to `cluster_identity` 63 | 64 | The `system_assigned_identity` was: 65 | 66 | ```hcl 67 | output "system_assigned_identity" { 68 | value = azurerm_kubernetes_cluster.main.identity 69 | } 70 | ``` 71 | 72 | Now it has been renamed to `cluster_identity`, and the block has been changed to: 73 | 74 | ```hcl 75 | output "cluster_identity" { 76 | description = "The `azurerm_kubernetes_cluster`'s `identity` block." 77 | value = try(azurerm_kubernetes_cluster.main.identity[0], null) 78 | } 79 | ``` 80 | 81 | The callers who used to read the cluster's identity block need to remove the index in their expression, from `module.aks.system_assigned_identity[0]` to `module.aks.cluster_identity`. 82 | 83 | ## The following outputs are now sensitive. All outputs referenced them must be declared as sensitive too 84 | 85 | * `client_certificate` 86 | * `client_key` 87 | * `cluster_ca_certificate` 88 | * `generated_cluster_private_ssh_key` 89 | * `host` 90 | * `kube_admin_config_raw` 91 | * `kube_config_raw` 92 | * `password` 93 | * `username` 94 | -------------------------------------------------------------------------------- /examples/multiple_node_pools/README.md: -------------------------------------------------------------------------------- 1 | # Testing the upgrade scenario 2 | 3 | You can use this example to manually test the upgrade scenario. 4 | 5 | See existing AKS versions: 6 | 7 | ``` 8 | % az aks get-versions --location centralus 9 | KubernetesVersion Upgrades 10 | ------------------- ------------------------ 11 | 1.28.3 None available 12 | 1.28.0 1.28.3 13 | 1.27.7 1.28.0, 1.28.3 14 | 1.27.3 1.27.7, 1.28.0, 1.28.3 15 | 1.26.10 1.27.3, 1.27.7 16 | 1.26.6 1.26.10, 1.27.3, 1.27.7 17 | 1.25.15 1.26.6, 1.26.10 18 | 1.25.11 1.25.15, 1.26.6, 1.26.10 19 | ``` 20 | 21 | In this example we test an upgrade from 1.26.10 to 1.27.7. 22 | 23 | ## Create the AKS cluster at version 1.26.10: 24 | 25 | ``` 26 | terraform init -upgrade 27 | terraform apply -var="kubernetes_version=1.26.10" -var="orchestrator_version=1.26.10" 28 | ``` 29 | 30 | Verify the AKS cluster version: 31 | 32 | ``` 33 | az aks list -o table # check AKS version 34 | az aks get-credentials --resource-group --name 35 | kubectl version # check api server version 36 | kubectl get nodes # check nodes version 37 | ``` 38 | 39 | In the `az aks list` output you will have `KubernetesVersion` and `CurrentKubernetesVersion` both at 1.26.10 40 | 41 | ## Upgrade the AKS cluster control plane only to version 1.27.7 42 | 43 | ``` 44 | terraform apply -var="kubernetes_version=1.27.7" -var="orchestrator_version=1.26.10" 45 | ``` 46 | 47 | Check the new versions: 48 | 49 | 50 | ``` 51 | az aks list -o table # check AKS version 52 | kubectl version # check api server version 53 | kubectl get nodes # check nodes version 54 | ``` 55 | 56 | In the `az aks list` output you will have `KubernetesVersion` and `CurrentKubernetesVersion` both at 1.27.7 57 | The control plane version will be 1.27.7 and the nodes will be 1.26.10. 58 | 59 | ## Upgrade the AKS cluster node pools to version 1.27.7 60 | 61 | ``` 62 | terraform apply -var="kubernetes_version=1.27.7" -var="orchestrator_version=1.27.7" 63 | ``` 64 | 65 | Check the new versions: 66 | 67 | ``` 68 | az aks list -o table # check AKS version 69 | kubectl version # check api server version 70 | kubectl get nodes # check nodes version 71 | ``` 72 | 73 | In the `az aks list` output you will have `KubernetesVersion` and `CurrentKubernetesVersion` both at 1.27.7 74 | The control plane version will be 1.27.7 and the nodes will be 1.27.7. 75 | 76 | ## Note on Issue #465 77 | 78 | The current implementation does not allow to upgrade `var.kubernetes_version` and `var.orchestrator_version` at the same time. 79 | 80 | We can test at this point a simultaneous upgrade to 1.28.3: 81 | 82 | ``` 83 | terraform apply -var="kubernetes_version=1.28.3" -var="orchestrator_version=1.28.3" 84 | ``` 85 | This will generate a plan where the azure_kubernetes_cluster resource is updated in place and the system node pool is updated. 86 | 87 | ``` 88 | # module.aks.azurerm_kubernetes_cluster.main will be updated in-place 89 | ~ resource "azurerm_kubernetes_cluster" "main" { 90 | id = "/subscriptions//resourceGroups/4c273d71bc7898d6-rg/providers/Microsoft.ContainerService/managedClusters/prefix-4c273d71bc7898d6-aks" 91 | name = "prefix-4c273d71bc7898d6-aks" 92 | tags = {} 93 | # (29 unchanged attributes hidden) 94 | 95 | ~ default_node_pool { 96 | name = "nodepool" 97 | ~ orchestrator_version = "1.27.7" -> "1.28.3" 98 | tags = {} 99 | # (22 unchanged attributes hidden) 100 | } 101 | 102 | # (4 unchanged blocks hidden) 103 | } 104 | ``` 105 | 106 | that will fail with the following error: 107 | 108 | ``` 109 | │ Error: updating Default Node Pool Agent Pool (Subscription: "" 110 | │ Resource Group Name: "4c273d71bc7898d6-rg" 111 | │ Managed Cluster Name: "prefix-4c273d71bc7898d6-aks" 112 | │ Agent Pool Name: "nodepool") performing CreateOrUpdate: agentpools.AgentPoolsClient#CreateOrUpdate: Failure sending request: StatusCode=0 -- Original Error: Code="NodePoolMcVersionIncompatible" Message="Node pool version 1.28.3 and control plane version 1.27.7 are incompatible. Minor version of node pool version 28 is bigger than control plane version 27. For more information, please check https://aka.ms/aks/UpgradeVersionRules" 113 | ``` 114 | -------------------------------------------------------------------------------- /NoticeOnUpgradeTov8.0.md: -------------------------------------------------------------------------------- 1 | # Notice on Upgrade to v8.x 2 | 3 | ## New variable `cluster_name_random_suffix` 4 | 5 | 1. A new variable `cluster_name_random_suffix` is added. This allows users to decide whether they want to add a random suffix to a cluster's name. This is particularly useful when Terraform needs to recreate a resource that cannot be updated in-place, as it avoids naming conflicts. Because of [#357](https://github.com/Azure/terraform-azurerm-aks/pull/357), now the `azurerm_kubernetes_cluster` resource is `create_before_destroy = true` now, we cannot turn this feature off. If you want to recreate this cluster by one apply without any trouble, please turn this random naming suffix on to avoid the naming conflict. 6 | 7 | 2. The `create_before_destroy` attribute is added to the `node_pools` variable as an object field. This attribute determines whether a new node pool should be created before the old one is destroyed during updates. By default, it is set to `true`. 8 | 9 | 3. The naming of extra node pools has been updated. Now, a random UUID is used as the seed for the random suffix in the name of the node pool, instead of the JSON-encoded value of the node pool. **This naming suffix only apply for extra node pools that create before destroy.** 10 | 11 | You're recommended to set `var.cluster_name_random_suffix` to `true` explicitly, and you'll see a random suffix in your cluster's name. If you don't like this suffix, please remember now a new cluster with the same name would be created before the old one has been deleted. If you do want to recreate the cluster, please run `terraform destroy` first. 12 | 13 | ## Remove `var.http_application_routing_enabled` 14 | 15 | According to the [document](https://learn.microsoft.com/en-us/azure/aks/http-application-routing), HTTP application routing add-on for AKS has been retired so we have to remove this feature from this module. 16 | 17 | 1. The variable `http_application_routing_enabled` has been removed from the module. This variable was previously used to enable HTTP Application Routing Addon. 18 | 19 | 2. The `http_application_routing_enabled` output has been removed from `outputs.tf`. This output was previously used to display whether HTTP Application Routing was enabled. 20 | 21 | 3. The `http_application_routing_enabled` attribute has been removed from the `azurerm_kubernetes_cluster` resource in `main.tf`. This attribute was previously used to enable HTTP Application Routing for the Kubernetes cluster. 22 | 23 | 4. The `http_application_routing_enabled` attribute has been added to the `ignore_changes` lifecycle block of the `azurerm_kubernetes_cluster` resource in `main.tf`. This means changes to this attribute will not trigger the resource to be updated. 24 | 25 | These changes mean that users of this module will no longer be able to enable HTTP Application Routing through this module. 26 | 27 | The new feature for the Ingress in AKS is [Managed NGINX ingress with the application routing add-on](https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default), you can enable this with `var.web_app_routing`. 28 | 29 | Users who were using this feature, please read this [Migrate document](https://learn.microsoft.com/en-us/azure/aks/app-routing-migration). 30 | 31 | ## Remove `public_network_access_enabled` entirely 32 | 33 | According to this [announcement](https://github.com/Azure/AKS/issues/3690), now public network access for AKS is no longer supported. 34 | 35 | The primary impact [#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) is the complete removal of the `public_network_access_enabled` variable from the module. 36 | 37 | 1. The `public_network_access_enabled` variable has been removed from the `variables.tf` file. This means that the module no longer supports the configuration of public network access at the Kubernetes cluster level. 38 | 39 | 2. The `public_network_access_enabled` variable has also been removed from the `main.tf` file and all example files (`application_gateway_ingress/main.tf`, `multiple_node_pools/main.tf`, `named_cluster/main.tf`, `startup/main.tf`, `with_acr/main.tf`, `without_monitor/main.tf`). This indicates that the module no longer uses this variable in the creation of the Azure Kubernetes Service (AKS) resource. 40 | 41 | 3. The `public_network_access_enabled` has been added into `azurerm_kubernetes_cluster`'s `ignore_changes` list. Any change to this attribute won't trigger update. 42 | 43 | ## Add role assignments for ingress application gateway 44 | 45 | The `variables.tf` file is updated with new variables related to the application gateway for ingress, including `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress`. 46 | 47 | The `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress` variables are used to configure the Application Gateway Ingress for the Azure Kubernetes Service (AKS) in the Terraform module. 48 | 49 | 1. `brown_field_application_gateway_for_ingress`: This variable is used when you want to use an existing Application Gateway as the ingress for the AKS cluster. It is an object that contains the ID of the Application Gateway (`id`) and the ID of the Subnet (`subnet_id`) which the Application Gateway is connected to. If this variable is set, the module will not create a new Application Gateway and will use the existing one instead. 50 | 51 | 2. `green_field_application_gateway_for_ingress`: This variable is used when you want the module to create a new Application Gateway for the AKS cluster. It is an object that contains the name of the Application Gateway to be used or created in the Nodepool Resource Group (`name`), the subnet CIDR to be used to create an Application Gateway (`subnet_cidr`), and the ID of the subnet on which to create an Application Gateway (`subnet_id`). If this variable is set, the module will create a new Application Gateway with the provided configuration. 52 | 53 | 3. `create_role_assignments_for_application_gateway`: This is a boolean variable that determines whether to create the corresponding role assignments for the application gateway or not. By default, it is set to `true`. Role assignments are necessary for the Application Gateway to function correctly with the AKS cluster. If set to `true`, the module will create the necessary role assignments on the Application Gateway. 54 | -------------------------------------------------------------------------------- /locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval. 3 | auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete 4 | # automatic upgrades are either: 5 | # - null 6 | # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null 7 | # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null 8 | automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : ( 9 | (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) || 10 | (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null) 11 | ) 12 | cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks") 13 | # Abstract the decision whether to create an Analytics Workspace or not. 14 | create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null 15 | create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null 16 | default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), []) 17 | # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1 18 | existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null) 19 | existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4] 20 | existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id) 21 | existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null) 22 | # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1 23 | existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), []) 24 | existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null) 25 | existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null) 26 | existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null) 27 | ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress 28 | # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null. 29 | # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled 30 | # is set to `true`. 31 | log_analytics_workspace = var.log_analytics_workspace_enabled ? ( 32 | # The Log Analytics Workspace should be enabled: 33 | var.log_analytics_workspace == null ? { 34 | # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied. 35 | # Create an `azurerm_log_analytics_workspace` resource and use that. 36 | id = local.azurerm_log_analytics_workspace_id 37 | name = local.azurerm_log_analytics_workspace_name 38 | location = local.azurerm_log_analytics_workspace_location 39 | resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name 40 | } : { 41 | # `log_analytics_workspace` is supplied. Let's use that. 42 | id = var.log_analytics_workspace.id 43 | name = var.log_analytics_workspace.name 44 | location = var.log_analytics_workspace.location 45 | # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1 46 | resource_group_name = split("/", var.log_analytics_workspace.id)[4] 47 | } 48 | ) : null # Finally, the Log Analytics Workspace should be disabled. 49 | node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true } 50 | node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true } 51 | private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null) 52 | query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false) 53 | subnet_ids = [for _, s in local.subnets : s.id] 54 | subnets = merge({ for k, v in merge( 55 | [ 56 | for key, pool in var.node_pools : { 57 | "${key}-vnet-subnet" : pool.vnet_subnet, 58 | "${key}-pod-subnet" : pool.pod_subnet, 59 | } 60 | ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : { 61 | "vnet-subnet" : { 62 | id = var.vnet_subnet.id 63 | } 64 | }) 65 | # subnet_ids = for id in local.potential_subnet_ids : id if id != null 66 | use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null 67 | use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null 68 | valid_private_dns_zone_regexs = [ 69 | "private\\.[a-z0-9]+\\.azmk8s\\.io", 70 | "privatelink\\.[a-z0-9]+\\.azmk8s\\.io", 71 | "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z]+\\.azmk8s\\.io", 72 | "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z]+\\.azmk8s\\.io", 73 | ] 74 | } 75 | -------------------------------------------------------------------------------- /v4/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval. 3 | auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete 4 | # automatic upgrades are either: 5 | # - null 6 | # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null 7 | # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null 8 | automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : ( 9 | (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) || 10 | (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null) 11 | ) 12 | cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks") 13 | # Abstract the decision whether to create an Analytics Workspace or not. 14 | create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null 15 | create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null 16 | default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), []) 17 | # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1 18 | existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null) 19 | existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4] 20 | existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id) 21 | existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null) 22 | # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1 23 | existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), []) 24 | existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null) 25 | existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null) 26 | existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null) 27 | ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress 28 | # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null. 29 | # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled 30 | # is set to `true`. 31 | log_analytics_workspace = var.log_analytics_workspace_enabled ? ( 32 | # The Log Analytics Workspace should be enabled: 33 | var.log_analytics_workspace == null ? { 34 | # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied. 35 | # Create an `azurerm_log_analytics_workspace` resource and use that. 36 | id = local.azurerm_log_analytics_workspace_id 37 | name = local.azurerm_log_analytics_workspace_name 38 | location = local.azurerm_log_analytics_workspace_location 39 | resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name 40 | } : { 41 | # `log_analytics_workspace` is supplied. Let's use that. 42 | id = var.log_analytics_workspace.id 43 | name = var.log_analytics_workspace.name 44 | location = var.log_analytics_workspace.location 45 | # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1 46 | resource_group_name = split("/", var.log_analytics_workspace.id)[4] 47 | } 48 | ) : null # Finally, the Log Analytics Workspace should be disabled. 49 | node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true } 50 | node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true } 51 | private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null) 52 | query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false) 53 | subnet_ids = [for _, s in local.subnets : s.id] 54 | subnets = merge({ for k, v in merge( 55 | [ 56 | for key, pool in var.node_pools : { 57 | "${key}-vnet-subnet" : pool.vnet_subnet, 58 | "${key}-pod-subnet" : pool.pod_subnet, 59 | } 60 | ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : { 61 | "vnet-subnet" : { 62 | id = var.vnet_subnet.id 63 | } 64 | }) 65 | # subnet_ids = for id in local.potential_subnet_ids : id if id != null 66 | use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null 67 | use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null 68 | valid_private_dns_zone_regexs = [ 69 | "private\\.[a-z0-9]+\\.azmk8s\\.io", 70 | "privatelink\\.[a-z0-9]+\\.azmk8s\\.io", 71 | "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z]+\\.azmk8s\\.io", 72 | "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z]+\\.azmk8s\\.io", 73 | ] 74 | } 75 | -------------------------------------------------------------------------------- /unit-test-fixture/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval. 3 | auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete 4 | # automatic upgrades are either: 5 | # - null 6 | # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null 7 | # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null 8 | automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : ( 9 | (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) || 10 | (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null) 11 | ) 12 | cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks") 13 | # Abstract the decision whether to create an Analytics Workspace or not. 14 | create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null 15 | create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null 16 | default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), []) 17 | # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1 18 | existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null) 19 | existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4] 20 | existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id) 21 | existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null) 22 | # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1 23 | existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), []) 24 | existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null) 25 | existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null) 26 | existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null) 27 | ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress 28 | # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null. 29 | # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled 30 | # is set to `true`. 31 | log_analytics_workspace = var.log_analytics_workspace_enabled ? ( 32 | # The Log Analytics Workspace should be enabled: 33 | var.log_analytics_workspace == null ? { 34 | # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied. 35 | # Create an `azurerm_log_analytics_workspace` resource and use that. 36 | id = local.azurerm_log_analytics_workspace_id 37 | name = local.azurerm_log_analytics_workspace_name 38 | location = local.azurerm_log_analytics_workspace_location 39 | resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name 40 | } : { 41 | # `log_analytics_workspace` is supplied. Let's use that. 42 | id = var.log_analytics_workspace.id 43 | name = var.log_analytics_workspace.name 44 | location = var.log_analytics_workspace.location 45 | # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1 46 | resource_group_name = split("/", var.log_analytics_workspace.id)[4] 47 | } 48 | ) : null # Finally, the Log Analytics Workspace should be disabled. 49 | node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true } 50 | node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true } 51 | private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null) 52 | query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false) 53 | subnet_ids = [for _, s in local.subnets : s.id] 54 | subnets = merge({ for k, v in merge( 55 | [ 56 | for key, pool in var.node_pools : { 57 | "${key}-vnet-subnet" : pool.vnet_subnet, 58 | "${key}-pod-subnet" : pool.pod_subnet, 59 | } 60 | ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : { 61 | "vnet-subnet" : { 62 | id = var.vnet_subnet.id 63 | } 64 | }) 65 | # subnet_ids = for id in local.potential_subnet_ids : id if id != null 66 | use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null 67 | use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null 68 | valid_private_dns_zone_regexs = [ 69 | "private\\.[a-z0-9]+\\.azmk8s\\.io", 70 | "privatelink\\.[a-z0-9]+\\.azmk8s\\.io", 71 | "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z]+\\.azmk8s\\.io", 72 | "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z]+\\.azmk8s\\.io", 73 | ] 74 | } 75 | -------------------------------------------------------------------------------- /log_analytics.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_log_analytics_workspace" "main" { 2 | count = local.create_analytics_workspace ? 1 : 0 3 | 4 | location = var.location 5 | name = try(coalesce(var.cluster_log_analytics_workspace_name, trim("${var.prefix}-workspace", "-")), "aks-workspace") 6 | resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name) 7 | allow_resource_only_permissions = var.log_analytics_workspace_allow_resource_only_permissions 8 | cmk_for_query_forced = var.log_analytics_workspace_cmk_for_query_forced 9 | daily_quota_gb = var.log_analytics_workspace_daily_quota_gb 10 | data_collection_rule_id = var.log_analytics_workspace_data_collection_rule_id 11 | immediate_data_purge_on_30_days_enabled = var.log_analytics_workspace_immediate_data_purge_on_30_days_enabled 12 | internet_ingestion_enabled = var.log_analytics_workspace_internet_ingestion_enabled 13 | internet_query_enabled = var.log_analytics_workspace_internet_query_enabled 14 | local_authentication_disabled = var.log_analytics_workspace_local_authentication_disabled 15 | reservation_capacity_in_gb_per_day = var.log_analytics_workspace_reservation_capacity_in_gb_per_day 16 | retention_in_days = var.log_retention_in_days 17 | sku = var.log_analytics_workspace_sku 18 | tags = var.tags 19 | 20 | dynamic "identity" { 21 | for_each = var.log_analytics_workspace_identity == null ? [] : [var.log_analytics_workspace_identity] 22 | 23 | content { 24 | type = identity.value.type 25 | identity_ids = identity.value.identity_ids 26 | } 27 | } 28 | 29 | lifecycle { 30 | precondition { 31 | condition = can(coalesce(var.cluster_log_analytics_workspace_name, var.prefix)) 32 | error_message = "You must set one of `var.cluster_log_analytics_workspace_name` and `var.prefix` to create `azurerm_log_analytics_workspace.main`." 33 | } 34 | } 35 | } 36 | 37 | locals { 38 | azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null) 39 | azurerm_log_analytics_workspace_location = try(azurerm_log_analytics_workspace.main[0].location, null) 40 | azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null) 41 | azurerm_log_analytics_workspace_resource_group_name = try(azurerm_log_analytics_workspace.main[0].resource_group_name, null) 42 | } 43 | 44 | data "azurerm_log_analytics_workspace" "main" { 45 | count = local.query_datasource_for_log_analytics_workspace_location ? 1 : 0 46 | 47 | name = var.log_analytics_workspace.name 48 | resource_group_name = local.log_analytics_workspace.resource_group_name 49 | } 50 | 51 | resource "azurerm_log_analytics_solution" "main" { 52 | count = local.create_analytics_solution ? 1 : 0 53 | 54 | location = coalesce(local.log_analytics_workspace.location, try(data.azurerm_log_analytics_workspace.main[0].location, null)) 55 | resource_group_name = local.log_analytics_workspace.resource_group_name 56 | solution_name = "ContainerInsights" 57 | workspace_name = local.log_analytics_workspace.name 58 | workspace_resource_id = local.log_analytics_workspace.id 59 | tags = var.tags 60 | 61 | plan { 62 | product = "OMSGallery/ContainerInsights" 63 | publisher = "Microsoft" 64 | } 65 | } 66 | 67 | locals { 68 | dcr_location = try(coalesce(try(local.log_analytics_workspace.location, null), try(data.azurerm_log_analytics_workspace.main[0].location, null)), null) 69 | } 70 | 71 | resource "azurerm_monitor_data_collection_rule" "dcr" { 72 | count = (local.create_analytics_workspace && var.oms_agent_enabled) ? 1 : 0 73 | 74 | location = local.dcr_location 75 | name = "MSCI-${local.dcr_location}-${azurerm_kubernetes_cluster.main.name}" 76 | resource_group_name = var.resource_group_name 77 | description = "DCR for Azure Monitor Container Insights" 78 | tags = var.tags 79 | 80 | data_flow { 81 | destinations = [local.log_analytics_workspace.name] 82 | streams = var.monitor_data_collection_rule_extensions_streams 83 | } 84 | data_flow { 85 | destinations = [local.log_analytics_workspace.name] 86 | streams = ["Microsoft-Syslog"] 87 | } 88 | destinations { 89 | log_analytics { 90 | name = local.log_analytics_workspace.name 91 | workspace_resource_id = local.log_analytics_workspace.id 92 | } 93 | } 94 | data_sources { 95 | extension { 96 | extension_name = "ContainerInsights" 97 | name = "ContainerInsightsExtension" 98 | streams = var.monitor_data_collection_rule_extensions_streams 99 | extension_json = jsonencode({ 100 | "dataCollectionSettings" : { 101 | interval = var.data_collection_settings.data_collection_interval 102 | namespaceFilteringMode = var.data_collection_settings.namespace_filtering_mode_for_data_collection 103 | namespaces = var.data_collection_settings.namespaces_for_data_collection 104 | enableContainerLogV2 = var.data_collection_settings.container_log_v2_enabled 105 | } 106 | }) 107 | } 108 | syslog { 109 | facility_names = var.monitor_data_collection_rule_data_sources_syslog_facilities 110 | log_levels = var.monitor_data_collection_rule_data_sources_syslog_levels 111 | name = "sysLogsDataSource" 112 | streams = ["Microsoft-Syslog"] 113 | } 114 | } 115 | } 116 | 117 | resource "azurerm_monitor_data_collection_rule_association" "dcra" { 118 | count = (local.create_analytics_workspace && var.oms_agent_enabled) ? 1 : 0 119 | 120 | target_resource_id = azurerm_kubernetes_cluster.main.id 121 | data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr[0].id 122 | description = "Association of container insights data collection rule. Deleting this association will break the data collection for this AKS Cluster." 123 | name = "ContainerInsightsExtension" 124 | } -------------------------------------------------------------------------------- /v4/log_analytics.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_log_analytics_workspace" "main" { 2 | count = local.create_analytics_workspace ? 1 : 0 3 | 4 | location = var.location 5 | name = try(coalesce(var.cluster_log_analytics_workspace_name, trim("${var.prefix}-workspace", "-")), "aks-workspace") 6 | resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name) 7 | allow_resource_only_permissions = var.log_analytics_workspace_allow_resource_only_permissions 8 | cmk_for_query_forced = var.log_analytics_workspace_cmk_for_query_forced 9 | daily_quota_gb = var.log_analytics_workspace_daily_quota_gb 10 | data_collection_rule_id = var.log_analytics_workspace_data_collection_rule_id 11 | immediate_data_purge_on_30_days_enabled = var.log_analytics_workspace_immediate_data_purge_on_30_days_enabled 12 | internet_ingestion_enabled = var.log_analytics_workspace_internet_ingestion_enabled 13 | internet_query_enabled = var.log_analytics_workspace_internet_query_enabled 14 | local_authentication_disabled = var.log_analytics_workspace_local_authentication_disabled 15 | reservation_capacity_in_gb_per_day = var.log_analytics_workspace_reservation_capacity_in_gb_per_day 16 | retention_in_days = var.log_retention_in_days 17 | sku = var.log_analytics_workspace_sku 18 | tags = var.tags 19 | 20 | dynamic "identity" { 21 | for_each = var.log_analytics_workspace_identity == null ? [] : [var.log_analytics_workspace_identity] 22 | 23 | content { 24 | type = identity.value.type 25 | identity_ids = identity.value.identity_ids 26 | } 27 | } 28 | 29 | lifecycle { 30 | precondition { 31 | condition = can(coalesce(var.cluster_log_analytics_workspace_name, var.prefix)) 32 | error_message = "You must set one of `var.cluster_log_analytics_workspace_name` and `var.prefix` to create `azurerm_log_analytics_workspace.main`." 33 | } 34 | } 35 | } 36 | 37 | locals { 38 | azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null) 39 | azurerm_log_analytics_workspace_location = try(azurerm_log_analytics_workspace.main[0].location, null) 40 | azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null) 41 | azurerm_log_analytics_workspace_resource_group_name = try(azurerm_log_analytics_workspace.main[0].resource_group_name, null) 42 | } 43 | 44 | data "azurerm_log_analytics_workspace" "main" { 45 | count = local.query_datasource_for_log_analytics_workspace_location ? 1 : 0 46 | 47 | name = var.log_analytics_workspace.name 48 | resource_group_name = local.log_analytics_workspace.resource_group_name 49 | } 50 | 51 | resource "azurerm_log_analytics_solution" "main" { 52 | count = local.create_analytics_solution ? 1 : 0 53 | 54 | location = coalesce(local.log_analytics_workspace.location, try(data.azurerm_log_analytics_workspace.main[0].location, null)) 55 | resource_group_name = local.log_analytics_workspace.resource_group_name 56 | solution_name = "ContainerInsights" 57 | workspace_name = local.log_analytics_workspace.name 58 | workspace_resource_id = local.log_analytics_workspace.id 59 | tags = var.tags 60 | 61 | plan { 62 | product = "OMSGallery/ContainerInsights" 63 | publisher = "Microsoft" 64 | } 65 | } 66 | 67 | locals { 68 | dcr_location = try(coalesce(try(local.log_analytics_workspace.location, null), try(data.azurerm_log_analytics_workspace.main[0].location, null)), null) 69 | } 70 | 71 | resource "azurerm_monitor_data_collection_rule" "dcr" { 72 | count = (local.create_analytics_workspace && var.oms_agent_enabled) ? 1 : 0 73 | 74 | location = local.dcr_location 75 | name = "MSCI-${local.dcr_location}-${azurerm_kubernetes_cluster.main.name}" 76 | resource_group_name = var.resource_group_name 77 | description = "DCR for Azure Monitor Container Insights" 78 | tags = var.tags 79 | 80 | data_flow { 81 | destinations = [local.log_analytics_workspace.name] 82 | streams = var.monitor_data_collection_rule_extensions_streams 83 | } 84 | data_flow { 85 | destinations = [local.log_analytics_workspace.name] 86 | streams = ["Microsoft-Syslog"] 87 | } 88 | destinations { 89 | log_analytics { 90 | name = local.log_analytics_workspace.name 91 | workspace_resource_id = local.log_analytics_workspace.id 92 | } 93 | } 94 | data_sources { 95 | extension { 96 | extension_name = "ContainerInsights" 97 | name = "ContainerInsightsExtension" 98 | streams = var.monitor_data_collection_rule_extensions_streams 99 | extension_json = jsonencode({ 100 | "dataCollectionSettings" : { 101 | interval = var.data_collection_settings.data_collection_interval 102 | namespaceFilteringMode = var.data_collection_settings.namespace_filtering_mode_for_data_collection 103 | namespaces = var.data_collection_settings.namespaces_for_data_collection 104 | enableContainerLogV2 = var.data_collection_settings.container_log_v2_enabled 105 | } 106 | }) 107 | } 108 | syslog { 109 | facility_names = var.monitor_data_collection_rule_data_sources_syslog_facilities 110 | log_levels = var.monitor_data_collection_rule_data_sources_syslog_levels 111 | name = "sysLogsDataSource" 112 | streams = ["Microsoft-Syslog"] 113 | } 114 | } 115 | } 116 | 117 | resource "azurerm_monitor_data_collection_rule_association" "dcra" { 118 | count = (local.create_analytics_workspace && var.oms_agent_enabled) ? 1 : 0 119 | 120 | target_resource_id = azurerm_kubernetes_cluster.main.id 121 | data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr[0].id 122 | description = "Association of container insights data collection rule. Deleting this association will break the data collection for this AKS Cluster." 123 | name = "ContainerInsightsExtension" 124 | } -------------------------------------------------------------------------------- /test/upgrade/upgrade_test.go: -------------------------------------------------------------------------------- 1 | package upgrade 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/require" 12 | 13 | test_helper "github.com/Azure/terraform-module-test-helper" 14 | "github.com/gruntwork-io/terratest/modules/terraform" 15 | ) 16 | 17 | func TestExampleUpgrade_startup(t *testing.T) { 18 | t.Parallel() 19 | currentRoot, err := test_helper.GetCurrentModuleRootPath() 20 | if err != nil { 21 | t.FailNow() 22 | } 23 | currentMajorVersion, err := test_helper.GetCurrentMajorVersionFromEnv() 24 | if err != nil { 25 | t.FailNow() 26 | } 27 | vars := map[string]interface{}{ 28 | "client_id": "", 29 | "client_secret": "", 30 | } 31 | managedIdentityId := os.Getenv("MSI_ID") 32 | if managedIdentityId != "" { 33 | vars["managed_identity_principal_id"] = managedIdentityId 34 | } 35 | test_helper.ModuleUpgradeTest(t, "Azure", "terraform-azurerm-aks", "examples/startup", currentRoot, terraform.Options{ 36 | Upgrade: true, 37 | Vars: vars, 38 | }, currentMajorVersion) 39 | } 40 | 41 | func TestExampleUpgrade_without_monitor(t *testing.T) { 42 | t.Parallel() 43 | currentRoot, err := test_helper.GetCurrentModuleRootPath() 44 | if err != nil { 45 | t.FailNow() 46 | } 47 | currentMajorVersion, err := test_helper.GetCurrentMajorVersionFromEnv() 48 | if err != nil { 49 | t.FailNow() 50 | } 51 | var vars map[string]interface{} 52 | managedIdentityId := os.Getenv("MSI_ID") 53 | if managedIdentityId != "" { 54 | vars = map[string]interface{}{ 55 | "managed_identity_principal_id": managedIdentityId, 56 | } 57 | } 58 | test_helper.ModuleUpgradeTest(t, "Azure", "terraform-azurerm-aks", "examples/without_monitor", currentRoot, terraform.Options{ 59 | Upgrade: true, 60 | Vars: vars, 61 | }, currentMajorVersion) 62 | } 63 | 64 | func TestExampleUpgrade_named_cluster(t *testing.T) { 65 | t.Parallel() 66 | currentRoot, err := test_helper.GetCurrentModuleRootPath() 67 | if err != nil { 68 | t.FailNow() 69 | } 70 | currentMajorVersion, err := test_helper.GetCurrentMajorVersionFromEnv() 71 | if err != nil { 72 | t.FailNow() 73 | } 74 | var vars map[string]interface{} 75 | managedIdentityId := os.Getenv("MSI_ID") 76 | if managedIdentityId != "" { 77 | vars = map[string]interface{}{ 78 | "managed_identity_principal_id": managedIdentityId, 79 | } 80 | } 81 | test_helper.ModuleUpgradeTest(t, "Azure", "terraform-azurerm-aks", "examples/named_cluster", currentRoot, terraform.Options{ 82 | Upgrade: true, 83 | Vars: vars, 84 | }, currentMajorVersion) 85 | } 86 | 87 | func TestExampleUpgrade(t *testing.T) { 88 | t.Parallel() 89 | examples := []string{ 90 | "examples/with_acr", 91 | "examples/multiple_node_pools", 92 | } 93 | for _, e := range examples { 94 | example := e 95 | t.Run(example, func(t *testing.T) { 96 | t.Parallel() 97 | currentRoot, err := test_helper.GetCurrentModuleRootPath() 98 | if err != nil { 99 | t.FailNow() 100 | } 101 | currentMajorVersion, err := test_helper.GetCurrentMajorVersionFromEnv() 102 | if err != nil { 103 | t.FailNow() 104 | } 105 | test_helper.ModuleUpgradeTest(t, "Azure", "terraform-azurerm-aks", example, currentRoot, terraform.Options{ 106 | Upgrade: true, 107 | }, currentMajorVersion) 108 | }) 109 | } 110 | } 111 | 112 | func TestExampleUpgrade_applicationGw(t *testing.T) { 113 | t.Parallel() 114 | useExistingAppGw := []struct { 115 | useBrownFieldAppGw bool 116 | bringYourOwnVnet bool 117 | createRoleBindingForAppGw bool 118 | }{ 119 | { 120 | bringYourOwnVnet: true, 121 | useBrownFieldAppGw: true, 122 | createRoleBindingForAppGw: true, 123 | }, 124 | { 125 | bringYourOwnVnet: true, 126 | useBrownFieldAppGw: false, 127 | createRoleBindingForAppGw: true, 128 | }, 129 | { 130 | bringYourOwnVnet: false, 131 | useBrownFieldAppGw: false, 132 | createRoleBindingForAppGw: false, 133 | }, 134 | } 135 | for _, u := range useExistingAppGw { 136 | t.Run(fmt.Sprintf("useExistingAppGw %t %t %t", u.bringYourOwnVnet, u.useBrownFieldAppGw, u.createRoleBindingForAppGw), func(t *testing.T) { 137 | t.Parallel() 138 | currentRoot, err := test_helper.GetCurrentModuleRootPath() 139 | if err != nil { 140 | t.FailNow() 141 | } 142 | currentMajorVersion, err := test_helper.GetCurrentMajorVersionFromEnv() 143 | if err != nil { 144 | t.FailNow() 145 | } 146 | test_helper.ModuleUpgradeTest(t, "Azure", "terraform-azurerm-aks", "examples/application_gateway_ingress", currentRoot, terraform.Options{ 147 | Upgrade: true, 148 | Vars: map[string]interface{}{ 149 | "bring_your_own_vnet": u.bringYourOwnVnet, 150 | "use_brown_field_application_gateway": u.useBrownFieldAppGw, 151 | "create_role_assignments_for_application_gateway": u.createRoleBindingForAppGw, 152 | }, 153 | MaxRetries: 20, 154 | TimeBetweenRetries: time.Minute, 155 | RetryableTerraformErrors: map[string]string{ 156 | ".*is empty list of object.*": "the ingress hasn't been created, need more time", 157 | }, 158 | }, currentMajorVersion) 159 | }) 160 | } 161 | } 162 | 163 | func TestExamplesForV4(t *testing.T) { 164 | t.Parallel() 165 | examples, err := os.ReadDir("../../examples") 166 | require.NoError(t, err) 167 | currentRoot, err := test_helper.GetCurrentModuleRootPath() 168 | if err != nil { 169 | t.FailNow() 170 | } 171 | currentMajorVersion, err := test_helper.GetCurrentMajorVersionFromEnv() 172 | if err != nil { 173 | t.FailNow() 174 | } 175 | for _, example := range examples { 176 | if !example.IsDir() { 177 | continue 178 | } 179 | if !strings.HasSuffix(example.Name(), "_v4") { 180 | continue 181 | } 182 | t.Run(example.Name(), func(t *testing.T) { 183 | t.Parallel() 184 | tmp, err := os.MkdirTemp("", "") 185 | require.NoError(t, err) 186 | defer func() { 187 | _ = os.RemoveAll(tmp) 188 | }() 189 | tfvars := filepath.Join(tmp, "terraform.tfvars") 190 | require.NoError(t, os.WriteFile(tfvars, []byte(` 191 | client_id = "" 192 | client_secret = "" 193 | `), 0o600)) 194 | test_helper.ModuleUpgradeTest(t, "Azure", "terraform-azurerm-aks", fmt.Sprintf("examples/%s", example.Name()), currentRoot, terraform.Options{ 195 | VarFiles: []string{tfvars}, 196 | }, currentMajorVersion) 197 | }) 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /role_assignments.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_role_assignment" "acr" { 2 | for_each = var.attached_acr_id_map 3 | 4 | principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id 5 | scope = each.value 6 | role_definition_name = "AcrPull" 7 | skip_service_principal_aad_check = true 8 | } 9 | 10 | # /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testIdentity 11 | data "azurerm_user_assigned_identity" "cluster_identity" { 12 | count = (var.client_id == "" || nonsensitive(var.client_secret) == "") && var.identity_type == "UserAssigned" ? 1 : 0 13 | 14 | name = split("/", var.identity_ids[0])[8] 15 | resource_group_name = split("/", var.identity_ids[0])[4] 16 | } 17 | 18 | # The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus) 19 | # However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets 20 | # used by the system node pool and by any additional node pools. 21 | # https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites 22 | # https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites 23 | # https://github.com/Azure/terraform-azurerm-aks/issues/178 24 | resource "azurerm_role_assignment" "network_contributor" { 25 | for_each = var.create_role_assignment_network_contributor && (var.client_id == "" || nonsensitive(var.client_secret) == "") ? local.subnets : {} 26 | 27 | principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) 28 | scope = each.value.id 29 | role_definition_name = "Network Contributor" 30 | 31 | lifecycle { 32 | precondition { 33 | condition = length(var.network_contributor_role_assigned_subnet_ids) == 0 34 | error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." 35 | } 36 | } 37 | } 38 | 39 | resource "azurerm_role_assignment" "network_contributor_on_subnet" { 40 | for_each = var.network_contributor_role_assigned_subnet_ids 41 | 42 | principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) 43 | scope = each.value 44 | role_definition_name = "Network Contributor" 45 | 46 | lifecycle { 47 | precondition { 48 | condition = !var.create_role_assignment_network_contributor 49 | error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." 50 | } 51 | } 52 | } 53 | 54 | data "azurerm_client_config" "this" {} 55 | 56 | data "azurerm_virtual_network" "application_gateway_vnet" { 57 | count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 58 | 59 | name = local.existing_application_gateway_subnet_vnet_name 60 | resource_group_name = local.existing_application_gateway_subnet_resource_group_name 61 | } 62 | 63 | resource "azurerm_role_assignment" "application_gateway_existing_vnet_network_contributor" { 64 | count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 65 | 66 | principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id 67 | scope = data.azurerm_virtual_network.application_gateway_vnet[0].id 68 | role_definition_name = "Network Contributor" 69 | 70 | lifecycle { 71 | precondition { 72 | condition = data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subnet_subscription_id_for_ingress 73 | error_message = "Application Gateway's subnet must be in the same subscription, or `var.application_gateway_for_ingress.create_role_assignments` must be set to `false`." 74 | } 75 | } 76 | } 77 | 78 | resource "azurerm_role_assignment" "application_gateway_byo_vnet_network_contributor" { 79 | count = var.create_role_assignments_for_application_gateway && local.use_green_field_gw_for_ingress ? 1 : 0 80 | 81 | principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id 82 | scope = join("/", slice(local.default_nodepool_subnet_segments, 0, length(local.default_nodepool_subnet_segments) - 2)) 83 | role_definition_name = "Network Contributor" 84 | 85 | lifecycle { 86 | precondition { 87 | condition = var.green_field_application_gateway_for_ingress == null || !(var.create_role_assignments_for_application_gateway && var.vnet_subnet == null) 88 | error_message = "When `var.vnet_subnet` is `null`, you must set `var.create_role_assignments_for_application_gateway` to `false`, set `var.green_field_application_gateway_for_ingress` to `null`." 89 | } 90 | } 91 | } 92 | 93 | resource "azurerm_role_assignment" "existing_application_gateway_contributor" { 94 | count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 95 | 96 | principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id 97 | scope = var.brown_field_application_gateway_for_ingress.id 98 | role_definition_name = "Contributor" 99 | 100 | lifecycle { 101 | precondition { 102 | condition = var.brown_field_application_gateway_for_ingress == null ? true : data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subscription_id_for_ingress 103 | error_message = "Application Gateway must be in the same subscription, or `var.create_role_assignments_for_application_gateway` must be set to `false`." 104 | } 105 | } 106 | } 107 | 108 | data "azurerm_resource_group" "ingress_gw" { 109 | count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 110 | 111 | name = local.existing_application_gateway_resource_group_for_ingress 112 | } 113 | 114 | data "azurerm_resource_group" "aks_rg" { 115 | count = var.create_role_assignments_for_application_gateway ? 1 : 0 116 | 117 | name = var.resource_group_name 118 | } 119 | 120 | resource "azurerm_role_assignment" "application_gateway_resource_group_reader" { 121 | count = var.create_role_assignments_for_application_gateway && local.ingress_application_gateway_enabled ? 1 : 0 122 | 123 | principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id 124 | scope = local.use_brown_field_gw_for_ingress ? data.azurerm_resource_group.ingress_gw[0].id : data.azurerm_resource_group.aks_rg[0].id 125 | role_definition_name = "Reader" 126 | } 127 | -------------------------------------------------------------------------------- /v4/role_assignments.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_role_assignment" "acr" { 2 | for_each = var.attached_acr_id_map 3 | 4 | principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id 5 | scope = each.value 6 | role_definition_name = "AcrPull" 7 | skip_service_principal_aad_check = true 8 | } 9 | 10 | # /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testIdentity 11 | data "azurerm_user_assigned_identity" "cluster_identity" { 12 | count = (var.client_id == "" || nonsensitive(var.client_secret) == "") && var.identity_type == "UserAssigned" ? 1 : 0 13 | 14 | name = split("/", var.identity_ids[0])[8] 15 | resource_group_name = split("/", var.identity_ids[0])[4] 16 | } 17 | 18 | # The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus) 19 | # However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets 20 | # used by the system node pool and by any additional node pools. 21 | # https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites 22 | # https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites 23 | # https://github.com/Azure/terraform-azurerm-aks/issues/178 24 | resource "azurerm_role_assignment" "network_contributor" { 25 | for_each = var.create_role_assignment_network_contributor && (var.client_id == "" || nonsensitive(var.client_secret) == "") ? local.subnets : {} 26 | 27 | principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) 28 | scope = each.value.id 29 | role_definition_name = "Network Contributor" 30 | 31 | lifecycle { 32 | precondition { 33 | condition = length(var.network_contributor_role_assigned_subnet_ids) == 0 34 | error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." 35 | } 36 | } 37 | } 38 | 39 | resource "azurerm_role_assignment" "network_contributor_on_subnet" { 40 | for_each = var.network_contributor_role_assigned_subnet_ids 41 | 42 | principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) 43 | scope = each.value 44 | role_definition_name = "Network Contributor" 45 | 46 | lifecycle { 47 | precondition { 48 | condition = !var.create_role_assignment_network_contributor 49 | error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." 50 | } 51 | } 52 | } 53 | 54 | data "azurerm_client_config" "this" {} 55 | 56 | data "azurerm_virtual_network" "application_gateway_vnet" { 57 | count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 58 | 59 | name = local.existing_application_gateway_subnet_vnet_name 60 | resource_group_name = local.existing_application_gateway_subnet_resource_group_name 61 | } 62 | 63 | resource "azurerm_role_assignment" "application_gateway_existing_vnet_network_contributor" { 64 | count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 65 | 66 | principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id 67 | scope = data.azurerm_virtual_network.application_gateway_vnet[0].id 68 | role_definition_name = "Network Contributor" 69 | 70 | lifecycle { 71 | precondition { 72 | condition = data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subnet_subscription_id_for_ingress 73 | error_message = "Application Gateway's subnet must be in the same subscription, or `var.application_gateway_for_ingress.create_role_assignments` must be set to `false`." 74 | } 75 | } 76 | } 77 | 78 | resource "azurerm_role_assignment" "application_gateway_byo_vnet_network_contributor" { 79 | count = var.create_role_assignments_for_application_gateway && local.use_green_field_gw_for_ingress ? 1 : 0 80 | 81 | principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id 82 | scope = join("/", slice(local.default_nodepool_subnet_segments, 0, length(local.default_nodepool_subnet_segments) - 2)) 83 | role_definition_name = "Network Contributor" 84 | 85 | lifecycle { 86 | precondition { 87 | condition = var.green_field_application_gateway_for_ingress == null || !(var.create_role_assignments_for_application_gateway && var.vnet_subnet == null) 88 | error_message = "When `var.vnet_subnet` is `null`, you must set `var.create_role_assignments_for_application_gateway` to `false`, set `var.green_field_application_gateway_for_ingress` to `null`." 89 | } 90 | } 91 | } 92 | 93 | resource "azurerm_role_assignment" "existing_application_gateway_contributor" { 94 | count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 95 | 96 | principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id 97 | scope = var.brown_field_application_gateway_for_ingress.id 98 | role_definition_name = "Contributor" 99 | 100 | lifecycle { 101 | precondition { 102 | condition = var.brown_field_application_gateway_for_ingress == null ? true : data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subscription_id_for_ingress 103 | error_message = "Application Gateway must be in the same subscription, or `var.create_role_assignments_for_application_gateway` must be set to `false`." 104 | } 105 | } 106 | } 107 | 108 | data "azurerm_resource_group" "ingress_gw" { 109 | count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 110 | 111 | name = local.existing_application_gateway_resource_group_for_ingress 112 | } 113 | 114 | data "azurerm_resource_group" "aks_rg" { 115 | count = var.create_role_assignments_for_application_gateway ? 1 : 0 116 | 117 | name = var.resource_group_name 118 | } 119 | 120 | resource "azurerm_role_assignment" "application_gateway_resource_group_reader" { 121 | count = var.create_role_assignments_for_application_gateway && local.ingress_application_gateway_enabled ? 1 : 0 122 | 123 | principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id 124 | scope = local.use_brown_field_gw_for_ingress ? data.azurerm_resource_group.ingress_gw[0].id : data.azurerm_resource_group.aks_rg[0].id 125 | role_definition_name = "Reader" 126 | } 127 | -------------------------------------------------------------------------------- /examples/application_gateway_ingress/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "random_id" "name" { 6 | byte_length = 8 7 | } 8 | 9 | resource "azurerm_resource_group" "main" { 10 | count = var.create_resource_group ? 1 : 0 11 | 12 | location = var.location 13 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 14 | } 15 | 16 | locals { 17 | resource_group = { 18 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 19 | location = var.location 20 | } 21 | } 22 | 23 | resource "azurerm_virtual_network" "test" { 24 | count = var.bring_your_own_vnet ? 1 : 0 25 | 26 | address_space = ["10.52.0.0/16"] 27 | location = local.resource_group.location 28 | name = "${random_id.prefix.hex}-vn" 29 | resource_group_name = local.resource_group.name 30 | } 31 | 32 | resource "azurerm_subnet" "test" { 33 | count = var.bring_your_own_vnet ? 1 : 0 34 | 35 | address_prefixes = ["10.52.0.0/24"] 36 | name = "${random_id.prefix.hex}-sn" 37 | resource_group_name = local.resource_group.name 38 | virtual_network_name = azurerm_virtual_network.test[0].name 39 | } 40 | 41 | locals { 42 | appgw_cidr = !var.use_brown_field_application_gateway && !var.bring_your_own_vnet ? "10.225.0.0/16" : "10.52.1.0/24" 43 | } 44 | 45 | resource "azurerm_subnet" "appgw" { 46 | count = var.use_brown_field_application_gateway && var.bring_your_own_vnet ? 1 : 0 47 | 48 | address_prefixes = [local.appgw_cidr] 49 | name = "${random_id.prefix.hex}-gw" 50 | resource_group_name = local.resource_group.name 51 | virtual_network_name = azurerm_virtual_network.test[0].name 52 | } 53 | 54 | # Locals block for hardcoded names 55 | locals { 56 | backend_address_pool_name = try("${azurerm_virtual_network.test[0].name}-beap", "") 57 | frontend_ip_configuration_name = try("${azurerm_virtual_network.test[0].name}-feip", "") 58 | frontend_port_name = try("${azurerm_virtual_network.test[0].name}-feport", "") 59 | http_setting_name = try("${azurerm_virtual_network.test[0].name}-be-htst", "") 60 | listener_name = try("${azurerm_virtual_network.test[0].name}-httplstn", "") 61 | request_routing_rule_name = try("${azurerm_virtual_network.test[0].name}-rqrt", "") 62 | } 63 | 64 | resource "azurerm_public_ip" "pip" { 65 | count = var.use_brown_field_application_gateway && var.bring_your_own_vnet ? 1 : 0 66 | 67 | allocation_method = "Static" 68 | location = local.resource_group.location 69 | name = "appgw-pip" 70 | resource_group_name = local.resource_group.name 71 | sku = "Standard" 72 | } 73 | 74 | resource "azurerm_application_gateway" "appgw" { 75 | count = var.use_brown_field_application_gateway && var.bring_your_own_vnet ? 1 : 0 76 | 77 | location = local.resource_group.location 78 | #checkov:skip=CKV_AZURE_120:We don't need the WAF for this simple example 79 | name = "ingress" 80 | resource_group_name = local.resource_group.name 81 | 82 | backend_address_pool { 83 | name = local.backend_address_pool_name 84 | } 85 | backend_http_settings { 86 | cookie_based_affinity = "Disabled" 87 | name = local.http_setting_name 88 | port = 80 89 | protocol = "Http" 90 | request_timeout = 1 91 | } 92 | frontend_ip_configuration { 93 | name = local.frontend_ip_configuration_name 94 | public_ip_address_id = azurerm_public_ip.pip[0].id 95 | } 96 | frontend_port { 97 | name = local.frontend_port_name 98 | port = 80 99 | } 100 | gateway_ip_configuration { 101 | name = "appGatewayIpConfig" 102 | subnet_id = azurerm_subnet.appgw[0].id 103 | } 104 | http_listener { 105 | frontend_ip_configuration_name = local.frontend_ip_configuration_name 106 | frontend_port_name = local.frontend_port_name 107 | name = local.listener_name 108 | protocol = "Http" 109 | } 110 | request_routing_rule { 111 | http_listener_name = local.listener_name 112 | name = local.request_routing_rule_name 113 | rule_type = "Basic" 114 | backend_address_pool_name = local.backend_address_pool_name 115 | backend_http_settings_name = local.http_setting_name 116 | priority = 1 117 | } 118 | sku { 119 | name = "Standard_v2" 120 | tier = "Standard_v2" 121 | capacity = 1 122 | } 123 | 124 | lifecycle { 125 | ignore_changes = [ 126 | tags, 127 | backend_address_pool, 128 | backend_http_settings, 129 | http_listener, 130 | probe, 131 | request_routing_rule, 132 | url_path_map, 133 | ] 134 | } 135 | } 136 | 137 | module "aks" { 138 | #checkov:skip=CKV_AZURE_141:We enable admin account here so we can provision K8s resources directly in this simple example 139 | source = "../.." 140 | 141 | prefix = random_id.name.hex 142 | resource_group_name = local.resource_group.name 143 | location = local.resource_group.location 144 | kubernetes_version = "1.30" # don't specify the patch version! 145 | automatic_channel_upgrade = "patch" 146 | agents_availability_zones = ["1", "2"] 147 | agents_count = null 148 | agents_max_count = 2 149 | agents_max_pods = 100 150 | agents_min_count = 1 151 | agents_pool_name = "testnodepool" 152 | agents_pool_linux_os_configs = [ 153 | { 154 | transparent_huge_page_enabled = "always" 155 | sysctl_configs = [ 156 | { 157 | fs_aio_max_nr = 65536 158 | fs_file_max = 100000 159 | fs_inotify_max_user_watches = 1000000 160 | } 161 | ] 162 | } 163 | ] 164 | agents_type = "VirtualMachineScaleSets" 165 | azure_policy_enabled = true 166 | enable_auto_scaling = true 167 | enable_host_encryption = true 168 | green_field_application_gateway_for_ingress = var.use_brown_field_application_gateway ? null : { 169 | name = "ingress" 170 | subnet_cidr = local.appgw_cidr 171 | } 172 | brown_field_application_gateway_for_ingress = var.use_brown_field_application_gateway ? { 173 | id = azurerm_application_gateway.appgw[0].id 174 | subnet_id = azurerm_subnet.appgw[0].id 175 | } : null 176 | create_role_assignments_for_application_gateway = var.create_role_assignments_for_application_gateway 177 | local_account_disabled = false 178 | log_analytics_workspace_enabled = false 179 | net_profile_dns_service_ip = "10.0.0.10" 180 | net_profile_service_cidr = "10.0.0.0/16" 181 | network_plugin = "azure" 182 | network_policy = "azure" 183 | os_disk_size_gb = 60 184 | private_cluster_enabled = false 185 | role_based_access_control_enabled = true 186 | rbac_aad = true 187 | sku_tier = "Standard" 188 | vnet_subnet = var.bring_your_own_vnet ? { 189 | id = azurerm_subnet.test[0].id 190 | } : null 191 | depends_on = [ 192 | azurerm_subnet.test, 193 | ] 194 | } -------------------------------------------------------------------------------- /examples/application_gateway_ingress_v4/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "prefix" { 2 | byte_length = 8 3 | } 4 | 5 | resource "random_id" "name" { 6 | byte_length = 8 7 | } 8 | 9 | resource "azurerm_resource_group" "main" { 10 | count = var.create_resource_group ? 1 : 0 11 | 12 | location = var.location 13 | name = coalesce(var.resource_group_name, "${random_id.prefix.hex}-rg") 14 | } 15 | 16 | locals { 17 | resource_group = { 18 | name = var.create_resource_group ? azurerm_resource_group.main[0].name : var.resource_group_name 19 | location = var.location 20 | } 21 | } 22 | 23 | resource "azurerm_virtual_network" "test" { 24 | count = var.bring_your_own_vnet ? 1 : 0 25 | 26 | address_space = ["10.52.0.0/16"] 27 | location = local.resource_group.location 28 | name = "${random_id.prefix.hex}-vn" 29 | resource_group_name = local.resource_group.name 30 | } 31 | 32 | resource "azurerm_subnet" "test" { 33 | count = var.bring_your_own_vnet ? 1 : 0 34 | 35 | address_prefixes = ["10.52.0.0/24"] 36 | name = "${random_id.prefix.hex}-sn" 37 | resource_group_name = local.resource_group.name 38 | virtual_network_name = azurerm_virtual_network.test[0].name 39 | } 40 | 41 | locals { 42 | appgw_cidr = !var.use_brown_field_application_gateway && !var.bring_your_own_vnet ? "10.225.0.0/16" : "10.52.1.0/24" 43 | } 44 | 45 | resource "azurerm_subnet" "appgw" { 46 | count = var.use_brown_field_application_gateway && var.bring_your_own_vnet ? 1 : 0 47 | 48 | address_prefixes = [local.appgw_cidr] 49 | name = "${random_id.prefix.hex}-gw" 50 | resource_group_name = local.resource_group.name 51 | virtual_network_name = azurerm_virtual_network.test[0].name 52 | } 53 | 54 | # Locals block for hardcoded names 55 | locals { 56 | backend_address_pool_name = try("${azurerm_virtual_network.test[0].name}-beap", "") 57 | frontend_ip_configuration_name = try("${azurerm_virtual_network.test[0].name}-feip", "") 58 | frontend_port_name = try("${azurerm_virtual_network.test[0].name}-feport", "") 59 | http_setting_name = try("${azurerm_virtual_network.test[0].name}-be-htst", "") 60 | listener_name = try("${azurerm_virtual_network.test[0].name}-httplstn", "") 61 | request_routing_rule_name = try("${azurerm_virtual_network.test[0].name}-rqrt", "") 62 | } 63 | 64 | resource "azurerm_public_ip" "pip" { 65 | count = var.use_brown_field_application_gateway && var.bring_your_own_vnet ? 1 : 0 66 | 67 | allocation_method = "Static" 68 | location = local.resource_group.location 69 | name = "appgw-pip" 70 | resource_group_name = local.resource_group.name 71 | sku = "Standard" 72 | } 73 | 74 | resource "azurerm_application_gateway" "appgw" { 75 | count = var.use_brown_field_application_gateway && var.bring_your_own_vnet ? 1 : 0 76 | 77 | location = local.resource_group.location 78 | #checkov:skip=CKV_AZURE_120:We don't need the WAF for this simple example 79 | name = "ingress" 80 | resource_group_name = local.resource_group.name 81 | 82 | backend_address_pool { 83 | name = local.backend_address_pool_name 84 | } 85 | backend_http_settings { 86 | cookie_based_affinity = "Disabled" 87 | name = local.http_setting_name 88 | port = 80 89 | protocol = "Http" 90 | request_timeout = 1 91 | } 92 | frontend_ip_configuration { 93 | name = local.frontend_ip_configuration_name 94 | public_ip_address_id = azurerm_public_ip.pip[0].id 95 | } 96 | frontend_port { 97 | name = local.frontend_port_name 98 | port = 80 99 | } 100 | gateway_ip_configuration { 101 | name = "appGatewayIpConfig" 102 | subnet_id = azurerm_subnet.appgw[0].id 103 | } 104 | http_listener { 105 | frontend_ip_configuration_name = local.frontend_ip_configuration_name 106 | frontend_port_name = local.frontend_port_name 107 | name = local.listener_name 108 | protocol = "Http" 109 | } 110 | request_routing_rule { 111 | http_listener_name = local.listener_name 112 | name = local.request_routing_rule_name 113 | rule_type = "Basic" 114 | backend_address_pool_name = local.backend_address_pool_name 115 | backend_http_settings_name = local.http_setting_name 116 | priority = 1 117 | } 118 | sku { 119 | name = "Standard_v2" 120 | tier = "Standard_v2" 121 | capacity = 1 122 | } 123 | 124 | lifecycle { 125 | ignore_changes = [ 126 | tags, 127 | backend_address_pool, 128 | backend_http_settings, 129 | http_listener, 130 | probe, 131 | request_routing_rule, 132 | url_path_map, 133 | ] 134 | } 135 | } 136 | 137 | module "aks" { 138 | #checkov:skip=CKV_AZURE_141:We enable admin account here so we can provision K8s resources directly in this simple example 139 | source = "../.." 140 | 141 | prefix = random_id.name.hex 142 | resource_group_name = local.resource_group.name 143 | location = local.resource_group.location 144 | kubernetes_version = "1.30" # don't specify the patch version! 145 | automatic_channel_upgrade = "patch" 146 | agents_availability_zones = ["1", "2"] 147 | agents_count = null 148 | agents_max_count = 2 149 | agents_max_pods = 100 150 | agents_min_count = 1 151 | agents_pool_name = "testnodepool" 152 | agents_pool_linux_os_configs = [ 153 | { 154 | transparent_huge_page_enabled = "always" 155 | sysctl_configs = [ 156 | { 157 | fs_aio_max_nr = 65536 158 | fs_file_max = 100000 159 | fs_inotify_max_user_watches = 1000000 160 | } 161 | ] 162 | } 163 | ] 164 | agents_type = "VirtualMachineScaleSets" 165 | azure_policy_enabled = true 166 | enable_auto_scaling = true 167 | enable_host_encryption = true 168 | green_field_application_gateway_for_ingress = var.use_brown_field_application_gateway ? null : { 169 | name = "ingress" 170 | subnet_cidr = local.appgw_cidr 171 | } 172 | brown_field_application_gateway_for_ingress = var.use_brown_field_application_gateway ? { 173 | id = azurerm_application_gateway.appgw[0].id 174 | subnet_id = azurerm_subnet.appgw[0].id 175 | } : null 176 | create_role_assignments_for_application_gateway = var.create_role_assignments_for_application_gateway 177 | local_account_disabled = false 178 | log_analytics_workspace_enabled = false 179 | net_profile_dns_service_ip = "10.0.0.10" 180 | net_profile_service_cidr = "10.0.0.0/16" 181 | network_plugin = "azure" 182 | network_policy = "azure" 183 | os_disk_size_gb = 60 184 | private_cluster_enabled = false 185 | role_based_access_control_enabled = true 186 | rbac_aad = true 187 | sku_tier = "Standard" 188 | vnet_subnet = var.bring_your_own_vnet ? { 189 | id = azurerm_subnet.test[0].id 190 | } : null 191 | depends_on = [ 192 | azurerm_subnet.test, 193 | ] 194 | } -------------------------------------------------------------------------------- /CHANGELOG-v9.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## [9.4.1](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.1) (2025-02-05) 4 | 5 | **Merged pull requests:** 6 | 7 | - Revert changes of `9.4.0` [\#635](https://github.com/Azure/terraform-azurerm-aks/pull/635) ([lonegunmanb](https://github.com/lonegunmanb)) 8 | 9 | ## [9.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.0) (2025-02-05) 10 | 11 | **Merged pull requests:** 12 | 13 | - Bump azapi provider to \>=2.0, \< 3.0 [\#632](https://github.com/Azure/terraform-azurerm-aks/pull/632) ([zioproto](https://github.com/zioproto)) 14 | - Dependabot 624 626 [\#627](https://github.com/Azure/terraform-azurerm-aks/pull/627) ([zioproto](https://github.com/zioproto)) 15 | - Bump github.com/Azure/terraform-module-test-helper from 0.28.0 to 0.30.0 in /test [\#626](https://github.com/Azure/terraform-azurerm-aks/pull/626) ([dependabot[bot]](https://github.com/apps/dependabot)) 16 | - Bump github.com/gruntwork-io/terratest from 0.48.0 to 0.48.1 in /test [\#624](https://github.com/Azure/terraform-azurerm-aks/pull/624) ([dependabot[bot]](https://github.com/apps/dependabot)) 17 | - Dependabot changes from PR 609 619 620 [\#621](https://github.com/Azure/terraform-azurerm-aks/pull/621) ([zioproto](https://github.com/zioproto)) 18 | - Bump github.com/Azure/terraform-module-test-helper from 0.27.0 to 0.28.0 in /test [\#620](https://github.com/Azure/terraform-azurerm-aks/pull/620) ([dependabot[bot]](https://github.com/apps/dependabot)) 19 | - Bump github.com/gruntwork-io/terratest from 0.47.2 to 0.48.0 in /test [\#619](https://github.com/Azure/terraform-azurerm-aks/pull/619) ([dependabot[bot]](https://github.com/apps/dependabot)) 20 | - Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#616](https://github.com/Azure/terraform-azurerm-aks/pull/616) ([lonegunmanb](https://github.com/lonegunmanb)) 21 | - Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#615](https://github.com/Azure/terraform-azurerm-aks/pull/615) ([dependabot[bot]](https://github.com/apps/dependabot)) 22 | - Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 in /test [\#609](https://github.com/Azure/terraform-azurerm-aks/pull/609) ([dependabot[bot]](https://github.com/apps/dependabot)) 23 | 24 | ## [9.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.3.0) (2024-12-11) 25 | 26 | **Merged pull requests:** 27 | 28 | - Support of oms\_agent\_enabled add-on [\#613](https://github.com/Azure/terraform-azurerm-aks/pull/613) ([lonegunmanb](https://github.com/lonegunmanb)) 29 | - Implement node\_network\_profile for default node pool [\#598](https://github.com/Azure/terraform-azurerm-aks/pull/598) ([zioproto](https://github.com/zioproto)) 30 | - Bump examples to AKS 1.30 [\#595](https://github.com/Azure/terraform-azurerm-aks/pull/595) ([zioproto](https://github.com/zioproto)) 31 | - Add `v4` sub-folder so this module could run with AzureRM provider both `v3` and `v4`. [\#594](https://github.com/Azure/terraform-azurerm-aks/pull/594) ([lonegunmanb](https://github.com/lonegunmanb)) 32 | 33 | ## [9.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.2.0) (2024-11-07) 34 | 35 | **Merged pull requests:** 36 | 37 | - Make the Azure Key Vault public because private Key Vault requires preview API [\#599](https://github.com/Azure/terraform-azurerm-aks/pull/599) ([zioproto](https://github.com/zioproto)) 38 | - Bump github.com/Azure/terraform-module-test-helper from 0.25.0 to 0.26.0 in /test [\#593](https://github.com/Azure/terraform-azurerm-aks/pull/593) ([lonegunmanb](https://github.com/lonegunmanb)) 39 | - Use oidc as authentication method [\#592](https://github.com/Azure/terraform-azurerm-aks/pull/592) ([lonegunmanb](https://github.com/lonegunmanb)) 40 | - Update README.md [\#589](https://github.com/Azure/terraform-azurerm-aks/pull/589) ([shailwx](https://github.com/shailwx)) 41 | - Add `cost_analysis_enabled` option [\#583](https://github.com/Azure/terraform-azurerm-aks/pull/583) ([artificial-aidan](https://github.com/artificial-aidan)) 42 | - Bump github.com/Azure/terraform-module-test-helper from 0.24.0 to 0.25.0 in /test [\#581](https://github.com/Azure/terraform-azurerm-aks/pull/581) ([dependabot[bot]](https://github.com/apps/dependabot)) 43 | - Bump github.com/gruntwork-io/terratest from 0.46.15 to 0.47.0 in /test [\#579](https://github.com/Azure/terraform-azurerm-aks/pull/579) ([dependabot[bot]](https://github.com/apps/dependabot)) 44 | - Bump github.com/Azure/terraform-module-test-helper from 0.22.0 to 0.24.0 in /test [\#574](https://github.com/Azure/terraform-azurerm-aks/pull/574) ([dependabot[bot]](https://github.com/apps/dependabot)) 45 | - Bump github.com/hashicorp/go-retryablehttp from 0.7.5 to 0.7.7 in /test [\#562](https://github.com/Azure/terraform-azurerm-aks/pull/562) ([dependabot[bot]](https://github.com/apps/dependabot)) 46 | 47 | ## [9.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.1.0) (2024-07-04) 48 | 49 | **Merged pull requests:** 50 | 51 | - Downgrade next major version back to v9 [\#577](https://github.com/Azure/terraform-azurerm-aks/pull/577) ([lonegunmanb](https://github.com/lonegunmanb)) 52 | - Restore devcontainer [\#576](https://github.com/Azure/terraform-azurerm-aks/pull/576) ([zioproto](https://github.com/zioproto)) 53 | - set drainTimeoutInMinutes default value to null [\#575](https://github.com/Azure/terraform-azurerm-aks/pull/575) ([zioproto](https://github.com/zioproto)) 54 | - fix README.md format [\#570](https://github.com/Azure/terraform-azurerm-aks/pull/570) ([joaoestrela](https://github.com/joaoestrela)) 55 | - Bump github.com/hashicorp/go-getter from 1.7.4 to 1.7.5 in /test [\#569](https://github.com/Azure/terraform-azurerm-aks/pull/569) ([dependabot[bot]](https://github.com/apps/dependabot)) 56 | - Start new Changelog file for v10 [\#567](https://github.com/Azure/terraform-azurerm-aks/pull/567) ([zioproto](https://github.com/zioproto)) 57 | - fixed inaccurate variable descriptions for azure cni in overlay mode [\#566](https://github.com/Azure/terraform-azurerm-aks/pull/566) ([Xelef2000](https://github.com/Xelef2000)) 58 | - add drain\_timeout\_in\_minutes and node\_soak\_duration\_in\_minutes [\#564](https://github.com/Azure/terraform-azurerm-aks/pull/564) ([zioproto](https://github.com/zioproto)) 59 | 60 | ## [9.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.0.0) (2024-06-07) 61 | 62 | **Merged pull requests:** 63 | 64 | - Compromise on e2e tests involving ingress, since it's not stable [\#558](https://github.com/Azure/terraform-azurerm-aks/pull/558) ([lonegunmanb](https://github.com/lonegunmanb)) 65 | - Add weekly-codeql action [\#555](https://github.com/Azure/terraform-azurerm-aks/pull/555) ([lonegunmanb](https://github.com/lonegunmanb)) 66 | - Change default value for `var.agents_pool_max_surge` to 10% [\#554](https://github.com/Azure/terraform-azurerm-aks/pull/554) ([lonegunmanb](https://github.com/lonegunmanb)) 67 | - Update Microsoft.ContainerService managedClusters API version to 2024-02-01 [\#552](https://github.com/Azure/terraform-azurerm-aks/pull/552) ([olofmattsson-inriver](https://github.com/olofmattsson-inriver)) 68 | - Bump github.com/Azure/terraform-module-test-helper from 0.19.0 to 0.22.0 in /test [\#549](https://github.com/Azure/terraform-azurerm-aks/pull/549) ([dependabot[bot]](https://github.com/apps/dependabot)) 69 | - Amending log analytics attributes [\#548](https://github.com/Azure/terraform-azurerm-aks/pull/548) ([lonegunmanb](https://github.com/lonegunmanb)) 70 | - bump k8s version for example since 1.26 has been deprecated [\#540](https://github.com/Azure/terraform-azurerm-aks/pull/540) ([lonegunmanb](https://github.com/lonegunmanb)) 71 | - fix\(typo\): typo in output variable [\#537](https://github.com/Azure/terraform-azurerm-aks/pull/537) ([mbaykara](https://github.com/mbaykara)) 72 | - Bump github.com/Azure/terraform-module-test-helper from 0.18.0 to 0.19.0 in /test [\#521](https://github.com/Azure/terraform-azurerm-aks/pull/521) ([dependabot[bot]](https://github.com/apps/dependabot)) 73 | 74 | 75 | 76 | \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* 77 | -------------------------------------------------------------------------------- /test/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/Azure/terraform-azurerm-aks 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.24.1 6 | 7 | require ( 8 | github.com/Azure/terraform-module-test-helper v0.32.0 9 | github.com/gruntwork-io/terratest v0.48.2 10 | github.com/hashicorp/go-retryablehttp v0.7.7 11 | github.com/stretchr/testify v1.10.0 12 | ) 13 | 14 | require ( 15 | filippo.io/edwards25519 v1.1.0 // indirect 16 | github.com/agext/levenshtein v1.2.3 // indirect 17 | github.com/ahmetb/go-linq/v3 v3.2.0 // indirect 18 | github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect 19 | github.com/aws/aws-sdk-go-v2 v1.32.5 // indirect 20 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect 21 | github.com/aws/aws-sdk-go-v2/config v1.28.5 // indirect 22 | github.com/aws/aws-sdk-go-v2/credentials v1.17.46 // indirect 23 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.20 // indirect 24 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.41 // indirect 25 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect 26 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect 27 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect 28 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.24 // indirect 29 | github.com/aws/aws-sdk-go-v2/service/acm v1.30.6 // indirect 30 | github.com/aws/aws-sdk-go-v2/service/autoscaling v1.51.0 // indirect 31 | github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.44.0 // indirect 32 | github.com/aws/aws-sdk-go-v2/service/dynamodb v1.37.1 // indirect 33 | github.com/aws/aws-sdk-go-v2/service/ec2 v1.193.0 // indirect 34 | github.com/aws/aws-sdk-go-v2/service/ecr v1.36.6 // indirect 35 | github.com/aws/aws-sdk-go-v2/service/ecs v1.52.0 // indirect 36 | github.com/aws/aws-sdk-go-v2/service/iam v1.38.1 // indirect 37 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect 38 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.5 // indirect 39 | github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.5 // indirect 40 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5 // indirect 41 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5 // indirect 42 | github.com/aws/aws-sdk-go-v2/service/kms v1.37.6 // indirect 43 | github.com/aws/aws-sdk-go-v2/service/lambda v1.69.0 // indirect 44 | github.com/aws/aws-sdk-go-v2/service/rds v1.91.0 // indirect 45 | github.com/aws/aws-sdk-go-v2/service/route53 v1.46.2 // indirect 46 | github.com/aws/aws-sdk-go-v2/service/s3 v1.69.0 // indirect 47 | github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.6 // indirect 48 | github.com/aws/aws-sdk-go-v2/service/sns v1.33.6 // indirect 49 | github.com/aws/aws-sdk-go-v2/service/sqs v1.37.1 // indirect 50 | github.com/aws/aws-sdk-go-v2/service/ssm v1.56.0 // indirect 51 | github.com/aws/aws-sdk-go-v2/service/sso v1.24.6 // indirect 52 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.5 // indirect 53 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.1 // indirect 54 | github.com/aws/smithy-go v1.22.1 // indirect 55 | github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect 56 | github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect 57 | github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect 58 | github.com/davecgh/go-spew v1.1.1 // indirect 59 | github.com/emicklei/go-restful/v3 v3.9.0 // indirect 60 | github.com/go-errors/errors v1.5.1 // indirect 61 | github.com/go-logr/logr v1.4.2 // indirect 62 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 63 | github.com/go-openapi/jsonreference v0.20.2 // indirect 64 | github.com/go-openapi/swag v0.22.3 // indirect 65 | github.com/go-sql-driver/mysql v1.8.1 // indirect 66 | github.com/gogo/protobuf v1.3.2 // indirect 67 | github.com/golang/protobuf v1.5.4 // indirect 68 | github.com/google/gnostic-models v0.6.8 // indirect 69 | github.com/google/go-cmp v0.6.0 // indirect 70 | github.com/google/go-github/v42 v42.0.0 // indirect 71 | github.com/google/go-querystring v1.1.0 // indirect 72 | github.com/google/gofuzz v1.2.0 // indirect 73 | github.com/google/uuid v1.6.0 // indirect 74 | github.com/gruntwork-io/go-commons v0.17.2 // indirect 75 | github.com/hashicorp/errwrap v1.0.0 // indirect 76 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 77 | github.com/hashicorp/go-getter/v2 v2.2.3 // indirect 78 | github.com/hashicorp/go-multierror v1.1.1 // indirect 79 | github.com/hashicorp/go-safetemp v1.0.0 // indirect 80 | github.com/hashicorp/go-version v1.7.0 // indirect 81 | github.com/hashicorp/hcl v1.0.0 // indirect 82 | github.com/hashicorp/hcl/v2 v2.23.0 // indirect 83 | github.com/hashicorp/terraform-config-inspect v0.0.0-20250203082807-efaa306e97b4 // indirect 84 | github.com/hashicorp/terraform-json v0.24.0 // indirect 85 | github.com/imdario/mergo v0.3.11 // indirect 86 | github.com/jackc/pgpassfile v1.0.0 // indirect 87 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 88 | github.com/jackc/pgx/v5 v5.7.1 // indirect 89 | github.com/jackc/puddle/v2 v2.2.2 // indirect 90 | github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a // indirect 91 | github.com/jmespath/go-jmespath v0.4.0 // indirect 92 | github.com/josharian/intern v1.0.0 // indirect 93 | github.com/json-iterator/go v1.1.12 // indirect 94 | github.com/klauspost/compress v1.16.5 // indirect 95 | github.com/lonegunmanb/tfmodredirector v0.1.0 // indirect 96 | github.com/magodo/hclgrep v0.0.0-20220303061548-1b2b24c7caf6 // indirect 97 | github.com/mailru/easyjson v0.7.7 // indirect 98 | github.com/mattn/go-zglob v0.0.3 // indirect 99 | github.com/minamijoyo/hcledit v0.2.6 // indirect 100 | github.com/mitchellh/go-homedir v1.1.0 // indirect 101 | github.com/mitchellh/go-testing-interface v1.14.1 // indirect 102 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect 103 | github.com/moby/spdystream v0.2.0 // indirect 104 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 105 | github.com/modern-go/reflect2 v1.0.2 // indirect 106 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 107 | github.com/pmezard/go-difflib v1.0.0 // indirect 108 | github.com/pquerna/otp v1.4.0 // indirect 109 | github.com/r3labs/diff/v3 v3.0.1 // indirect 110 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 111 | github.com/spf13/afero v1.12.0 // indirect 112 | github.com/spf13/pflag v1.0.5 // indirect 113 | github.com/thanhpk/randstr v1.0.6 // indirect 114 | github.com/tmccombs/hcl2json v0.6.4 // indirect 115 | github.com/ulikunitz/xz v0.5.11 // indirect 116 | github.com/urfave/cli/v2 v2.10.3 // indirect 117 | github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect 118 | github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 119 | github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect 120 | github.com/zclconf/go-cty v1.15.1 // indirect 121 | golang.org/x/crypto v0.36.0 // indirect 122 | golang.org/x/exp v0.0.0-20221106115401-f9659909a136 // indirect 123 | golang.org/x/mod v0.24.0 // indirect 124 | golang.org/x/net v0.38.0 // indirect 125 | golang.org/x/oauth2 v0.28.0 // indirect 126 | golang.org/x/sync v0.12.0 // indirect 127 | golang.org/x/sys v0.31.0 // indirect 128 | golang.org/x/term v0.30.0 // indirect 129 | golang.org/x/text v0.23.0 // indirect 130 | golang.org/x/time v0.11.0 // indirect 131 | golang.org/x/tools v0.31.0 // indirect 132 | google.golang.org/protobuf v1.36.1 // indirect 133 | gopkg.in/inf.v0 v0.9.1 // indirect 134 | gopkg.in/yaml.v2 v2.4.0 // indirect 135 | gopkg.in/yaml.v3 v3.0.1 // indirect 136 | k8s.io/api v0.28.4 // indirect 137 | k8s.io/apimachinery v0.28.4 // indirect 138 | k8s.io/client-go v0.28.4 // indirect 139 | k8s.io/klog/v2 v2.100.1 // indirect 140 | k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect 141 | k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect 142 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 143 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 144 | sigs.k8s.io/yaml v1.3.0 // indirect 145 | ) 146 | --------------------------------------------------------------------------------