.
19 | {{ end }}
20 | Log in with username {{ .Values.admin.user}}.
21 | To find out the password, run the following command:
22 |
23 | echo "DefectDojo {{ .Values.admin.user}} password: $(kubectl \
24 | get secret {{ $fullName }} \
25 | --namespace={{ .Release.Namespace }} \
26 | --output jsonpath='{.data.DD_ADMIN_PASSWORD}' \
27 | | base64 --decode)"
28 |
--------------------------------------------------------------------------------
/modules/defectdojo/templates/django-service.yaml:
--------------------------------------------------------------------------------
1 | {{- $fullName := include "defectdojo.fullname" . -}}
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: {{ $fullName }}-django
6 | labels:
7 | defectdojo.org/component: django
8 | app.kubernetes.io/name: {{ include "defectdojo.name" . }}
9 | app.kubernetes.io/instance: {{ .Release.Name }}
10 | app.kubernetes.io/managed-by: {{ .Release.Service }}
11 | helm.sh/chart: {{ include "defectdojo.chart" . }}
12 | spec:
13 | selector:
14 | defectdojo.org/component: django
15 | app.kubernetes.io/name: {{ include "defectdojo.name" . }}
16 | app.kubernetes.io/instance: {{ .Release.Name }}
17 | ports:
18 | - name: http
19 | protocol: TCP
20 | {{- if .Values.django.nginx.tls.enabled }}
21 | port: 443
22 | targetPort: 8443
23 | {{- else }}
24 | port: 80
25 | targetPort: 8080
26 | {{- end }}
27 | {{- if .Values.django.serviceType }}
28 | type: {{ .Values.django.serviceType }}
29 | {{- end }}
30 | {{- if .Values.gke.useGKEIngress }}
31 | type: NodePort
32 | {{- end }}
33 |
--------------------------------------------------------------------------------
/modules/defectdojo/templates/extra-secret.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.extraSecrets -}}
2 | {{- $fullName := include "defectdojo.fullname" . -}}
3 | apiVersion: v1
4 | kind: Secret
5 | metadata:
6 | name: {{ $fullName }}-extrasecrets
7 | labels:
8 | app.kubernetes.io/name: {{ include "defectdojo.name" . }}
9 | app.kubernetes.io/instance: {{ .Release.Name }}
10 | app.kubernetes.io/managed-by: {{ .Release.Service }}
11 | helm.sh/chart: {{ include "defectdojo.chart" . }}
12 | type: Opaque
13 | data:
14 | {{- range $key, $value := .Values.extraSecrets }}
15 | {{ $key | indent 2}}: {{ $value | b64enc }}
16 | {{- end }}
17 | {{- end }}
18 |
--------------------------------------------------------------------------------
/modules/defectdojo/templates/gke-managed-certificate.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.gke.useManagedCertificate }}
2 | {{- $fullName := include "defectdojo.fullname" . -}}
3 | apiVersion: networking.gke.io/v1
4 | kind: ManagedCertificate
5 | metadata:
6 | name: {{ $fullName }}-django
7 | spec:
8 | domains:
9 | - {{ .Values.host }}
10 | {{- end }}
11 |
--------------------------------------------------------------------------------
/modules/defectdojo/templates/media-pvc.yaml:
--------------------------------------------------------------------------------
1 | {{- $fullName := include "django.pvc_name" $ -}}
2 | {{ with .Values.django.mediaPersistentVolume }}
3 | {{- if and .enabled (eq .type "pvc") .persistentVolumeClaim.create }}
4 | apiVersion: v1
5 | kind: PersistentVolumeClaim
6 | metadata:
7 | labels:
8 | defectdojo.org/component: django
9 | app.kubernetes.io/name: {{ include "defectdojo.name" $ }}
10 | app.kubernetes.io/instance: {{ $.Release.Name }}
11 | app.kubernetes.io/managed-by: {{ $.Release.Service }}
12 | helm.sh/chart: {{ include "defectdojo.chart" $ }}
13 | name: {{ $fullName }}
14 | spec:
15 | accessModes:
16 | {{- toYaml .persistentVolumeClaim.accessModes |nindent 4 }}
17 | resources:
18 | requests:
19 | storage: {{ .persistentVolumeClaim.size }}
20 | {{- if .persistentVolumeClaim.storage_class_name }}
21 | storage_class_name: {{ .persistentVolumeClaim.storage_class_name }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
--------------------------------------------------------------------------------
/modules/defectdojo/templates/sa.yaml:
--------------------------------------------------------------------------------
1 | {{- $fullName := include "defectdojo.fullname" . -}}
2 | kind: ServiceAccount
3 | apiVersion: v1
4 | metadata:
5 | name: {{ $fullName }}
6 | labels:
7 | app.kubernetes.io/name: {{ include "defectdojo.name" . }}
8 | app.kubernetes.io/instance: {{ .Release.Name }}
9 | app.kubernetes.io/managed-by: {{ .Release.Service }}
10 | helm.sh/chart: {{ include "defectdojo.chart" . }}
11 | annotations:
12 | helm.sh/resource-policy: keep
13 | helm.sh/hook: "pre-install"
14 | helm.sh/hook-delete-policy: "before-hook-creation"
15 | {{- with .Values.annotations }}
16 | {{ toYaml . | nindent 4 }}
17 | {{- end }}
18 | {{- if ne .Values.gke.workloadIdentityEmail "" }}
19 | iam.gke.io/gcp-service-account: {{ .Values.gke.workloadIdentityEmail }}
20 | {{- end }}
21 |
--------------------------------------------------------------------------------
/modules/defectdojo/templates/secret-postgresql-ha-pgpool.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.createPostgresqlHaPgpoolSecret -}}
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: {{ .Values.postgresqlha.global.pgpool.existingSecret }}
6 | labels:
7 | app.kubernetes.io/name: {{ include "defectdojo.name" . }}
8 | app.kubernetes.io/instance: {{ .Release.Name }}
9 | app.kubernetes.io/managed-by: {{ .Release.Service }}
10 | helm.sh/chart: {{ include "defectdojo.chart" . }}
11 | annotations:
12 | helm.sh/resource-policy: keep
13 | helm.sh/hook: "pre-install"
14 | helm.sh/hook-delete-policy: "before-hook-creation"
15 | type: Opaque
16 | data:
17 | {{- if .Values.postgresqlha.pgpool.adminPassword }}
18 | admin-password: {{ .Values.postgresqlha.pgpool.adminPassword | b64enc | quote }}
19 | {{- else }}
20 | {{- $pgpoolRandomPassword := randAlphaNum 16 | b64enc | quote }}
21 | admin-password: {{ $pgpoolRandomPassword }}
22 | {{- end }}
23 | {{- end }}
24 |
--------------------------------------------------------------------------------
/modules/defectdojo/templates/secret-rabbitmq.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.createRabbitMqSecret -}}
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: {{ .Values.rabbitmq.auth.existingPasswordSecret }}
6 | labels:
7 | app.kubernetes.io/name: {{ include "defectdojo.name" . }}
8 | app.kubernetes.io/instance: {{ .Release.Name }}
9 | app.kubernetes.io/managed-by: {{ .Release.Service }}
10 | helm.sh/chart: {{ include "defectdojo.chart" . }}
11 | annotations:
12 | helm.sh/resource-policy: keep
13 | helm.sh/hook: "pre-install"
14 | helm.sh/hook-delete-policy: "before-hook-creation"
15 | type: Opaque
16 | data:
17 | {{- if .Values.rabbitmq.auth.password }}
18 | rabbitmq-password: {{ .Values.rabbitmq.auth.password | b64enc | quote }}
19 | {{- else }}
20 | rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }}
21 | {{- end}}
22 | {{- if .Values.rabbitmq.auth.erlangCookie }}
23 | rabbitmq-erlang-cookie: {{ .Values.rabbitmq.auth.erlangCookie | b64enc | quote }}
24 | {{- else }}
25 | rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }}
26 | {{- end }}
27 | {{- end }}
28 |
--------------------------------------------------------------------------------
/modules/defectdojo/templates/secret-redis.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.createRedisSecret -}}
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: {{ .Values.redis.auth.existingSecret }}
6 | labels:
7 | app.kubernetes.io/name: {{ include "defectdojo.name" . }}
8 | app.kubernetes.io/instance: {{ .Release.Name }}
9 | app.kubernetes.io/managed-by: {{ .Release.Service }}
10 | helm.sh/chart: {{ include "defectdojo.chart" . }}
11 | annotations:
12 | helm.sh/resource-policy: keep
13 | helm.sh/hook: "pre-install"
14 | helm.sh/hook-delete-policy: "before-hook-creation"
15 | type: Opaque
16 | data:
17 | {{- if .Values.redis.auth.password }}
18 | {{ .Values.redis.auth.existingSecretPasswordKey }}: {{ .Values.redis.auth.password | b64enc | quote }}
19 | {{- else }}
20 | {{ .Values.redis.auth.existingSecretPasswordKey }}: {{ randAlphaNum 10 | b64enc | quote }}
21 | {{- end }}
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/modules/external-secret/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_iam_policy_document" "external_secrets" {
2 | statement {
3 | actions = ["ssm:GetParameter"]
4 | resources = var.external_secrets_ssm_parameter_arns
5 | }
6 |
7 | statement {
8 | actions = [
9 | "secretsmanager:GetResourcePolicy",
10 | "secretsmanager:GetSecretValue",
11 | "secretsmanager:DescribeSecret",
12 | "secretsmanager:ListSecretVersionIds",
13 | ]
14 | resources = var.external_secrets_secrets_manager_arns
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/modules/external-secret/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 | manage_via_gitops = var.manage_via_gitops
4 | set_values = local.set_values
5 | helm_config = local.helm_config
6 | irsa_config = local.irsa_config
7 | addon_context = var.addon_context
8 | }
9 |
10 | resource "aws_iam_policy" "external_secrets" {
11 | name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa"
12 | path = var.addon_context.irsa_iam_role_path
13 | description = "Provides permissions to for External Secrets to retrieve secrets from AWS SSM and AWS Secrets Manager"
14 | policy = data.aws_iam_policy_document.external_secrets.json
15 | }
16 |
--------------------------------------------------------------------------------
/modules/external-secret/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/external-secret/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/falco/main.tf:
--------------------------------------------------------------------------------
1 | resource "kubernetes_namespace" "falco" {
2 | count = var.falco_enabled ? 1 : 0
3 | metadata {
4 | name = "falco"
5 | }
6 | }
7 |
8 | resource "helm_release" "falco" {
9 | count = var.falco_enabled ? 1 : 0
10 | depends_on = [kubernetes_namespace.falco]
11 | name = "falco"
12 | namespace = "falco"
13 | chart = "falco"
14 | repository = "https://falcosecurity.github.io/charts"
15 | timeout = 600
16 | version = var.version
17 | values = [
18 | templatefile("${path.module}/values.yaml", {
19 | slack_webhook = var.slack_webhook
20 | })
21 | ]
22 | }
23 |
--------------------------------------------------------------------------------
/modules/falco/output.tf:
--------------------------------------------------------------------------------
1 | output "falco_namespace" {
2 | value = kubernetes_namespace.falco.metadata[0].name
3 | description = "The namespace where Falco is deployed"
4 | }
5 |
6 | output "falco_release" {
7 | value = helm_release.falco.name
8 | description = "The Helm release name for Falco"
9 | }
10 |
--------------------------------------------------------------------------------
/modules/falco/variable.tf:
--------------------------------------------------------------------------------
1 | variable "falco_enabled" {
2 | description = "Enable or disable Falco deployment"
3 | type = bool
4 | default = true
5 | }
6 |
7 | variable "slack_webhook" {
8 | description = "Slack webhook URL for Falco alerts"
9 | type = string
10 | default = ""
11 | }
12 |
13 | variable "version" {
14 | description = "Helm Chart version of Falco"
15 | type = string
16 | default = ""
17 | }
18 |
--------------------------------------------------------------------------------
/modules/falco/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.12.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 1.11.1"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = ">= 1.0.0"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/helm-addon/outputs.tf:
--------------------------------------------------------------------------------
1 | output "helm_release" {
2 | description = "Map of attributes of the Helm release created without sensitive outputs"
3 | value = try({ for k, v in helm_release.addon : k => v if k != "repository_password" }, {})
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = try(helm_release.addon[0].metadata, null)
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = try(module.irsa[0].irsa_iam_role_arn, null)
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = try(module.irsa[0].irsa_iam_role_name, null)
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = try(coalesce(try(module.irsa[0].service_account, null), lookup(var.irsa_config, "kubernetes_service_account", null)), null)
24 | }
25 |
--------------------------------------------------------------------------------
/modules/helm-addon/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm chart config. Repository and version required. See https://registry.terraform.io/providers/hashicorp/helm/latest/docs"
3 | type = any
4 | }
5 |
6 | variable "set_values" {
7 | description = "Forced set values"
8 | type = any
9 | default = []
10 | }
11 |
12 | variable "set_sensitive_values" {
13 | description = "Forced set_sensitive values"
14 | type = any
15 | default = []
16 | }
17 |
18 | variable "manage_via_gitops" {
19 | description = "Determines if the add-on should be managed via GitOps"
20 | type = bool
21 | default = false
22 | }
23 |
24 | variable "irsa_iam_role_name" {
25 | description = "IAM role name for IRSA"
26 | type = string
27 | default = ""
28 | }
29 |
30 | variable "irsa_config" {
31 | description = "Input configuration for IRSA module"
32 | type = any
33 | default = {}
34 | }
35 |
36 | variable "addon_context" {
37 | description = "Input configuration for the addon"
38 | type = any
39 | }
40 |
--------------------------------------------------------------------------------
/modules/helm-addon/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | helm = {
6 | source = "hashicorp/helm"
7 | version = ">= 2.4.1"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/ingress-nginx/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/ingress-nginx/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/irsa/outputs.tf:
--------------------------------------------------------------------------------
1 | output "irsa_iam_role_arn" {
2 | description = "IAM role ARN for your service account"
3 | value = try(aws_iam_role.irsa[0].arn, null)
4 | }
5 |
6 | output "irsa_iam_role_name" {
7 | description = "IAM role name for your service account"
8 | value = try(aws_iam_role.irsa[0].name, null)
9 | }
10 |
11 | output "namespace" {
12 | description = "IRSA Namespace"
13 | value = try(kubernetes_namespace_v1.irsa[0].id, var.kubernetes_namespace)
14 | }
15 |
16 | output "service_account" {
17 | description = "IRSA Service Account"
18 | value = try(kubernetes_service_account_v1.irsa[0].id, var.kubernetes_service_account)
19 | }
20 |
--------------------------------------------------------------------------------
/modules/irsa/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/karpenter/config/karpenter.yaml:
--------------------------------------------------------------------------------
1 | nodeSelector:
2 | kubernetes.io/os: linux
3 |
4 | settings:
5 | clusterName: ${eks_cluster_id}
6 | clusterEndpoint: ${eks_cluster_endpoint}
7 | eksControlPlane: false
8 | featureGates:
9 | spotToSpotConsolidation: true
10 | nodeRepair: true
11 |
12 | controller:
13 | containerName: controller
14 | resources:
15 | requests:
16 | cpu: 50m
17 | memory: 200Mi
18 | limits:
19 | cpu: 100m
20 | memory: 400Mi
21 |
22 | podAnnotations:
23 | co.elastic.logs/enabled: "true"
24 |
25 | affinity:
26 | nodeAffinity:
27 | requiredDuringSchedulingIgnoredDuringExecution:
28 | nodeSelectorTerms:
29 | - matchExpressions:
30 | - key: "Addons-Services"
31 | operator: In
32 | values:
33 | - "true"
34 |
35 | serviceMonitor:
36 | # -- Specifies whether a ServiceMonitor should be created.
37 | enabled: ${enable_service_monitor}
38 |
39 | service:
40 | # -- Additional annotations for the Service.
41 | annotations: {}
42 |
43 | schedulerName: default-scheduler
44 |
--------------------------------------------------------------------------------
/modules/karpenter/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/karpenter/scripts/patch_karpenter_crds.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | echo "Patching Karpenter CRDs with Helm labels and annotations..."
6 |
7 | # Define CRD names
8 | CRDS=(
9 | "ec2nodeclasses.karpenter.k8s.aws"
10 | "nodepools.karpenter.sh"
11 | "nodeclaims.karpenter.sh"
12 | )
13 |
14 | # Define Helm release name (should match Terraform Helm release)
15 | HELM_RELEASE="karpenter-crd" # Make sure this matches your Helm release name
16 | KARPENTER_NAMESPACE="default" # Change if using a different namespace
17 |
18 | # Apply Helm labels and annotations if the CRD exists
19 | for CRD in "${CRDS[@]}"; do
20 | if kubectl get crd "$CRD" > /dev/null 2>&1; then
21 | echo "Patching $CRD..."
22 | kubectl label crd "$CRD" app.kubernetes.io/managed-by=Helm --overwrite
23 | kubectl annotate crd "$CRD" meta.helm.sh/release-name="$HELM_RELEASE" --overwrite
24 | kubectl annotate crd "$CRD" meta.helm.sh/release-namespace="$KARPENTER_NAMESPACE" --overwrite
25 | else
26 | echo "CRD $CRD does not exist. Skipping..."
27 | fi
28 | done
29 |
30 | echo "Karpenter CRDs patching completed!"
31 |
--------------------------------------------------------------------------------
/modules/karpenter/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = ">= 2.0.0, < 4.0.0"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/keda/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 | manage_via_gitops = var.manage_via_gitops
4 | set_values = local.set_values
5 | helm_config = local.helm_config
6 | irsa_config = local.irsa_config
7 | addon_context = var.addon_context
8 | }
9 |
10 | resource "aws_iam_policy" "keda_irsa" {
11 | description = "KEDA IAM role policy for SQS and CloudWatch"
12 | name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa"
13 | path = var.addon_context.irsa_iam_role_path
14 | policy = data.aws_iam_policy_document.keda_irsa.json
15 | tags = var.addon_context.tags
16 | }
17 |
--------------------------------------------------------------------------------
/modules/keda/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/keda/values.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | limits:
3 | cpu: 200m
4 | memory: 100Mi
5 | requests:
6 | cpu: 100m
7 | memory: 50Mi
8 |
9 | nodeSelector:
10 | kubernetes.io/os: linux
11 |
--------------------------------------------------------------------------------
/modules/keda/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/kubernetes-dashboard/outputs.tf:
--------------------------------------------------------------------------------
1 | output "k8s-dashboard-admin-token" {
2 | value = nonsensitive(kubernetes_secret_v1.admin-user.data.token)
3 | }
4 |
5 | output "k8s-dashboard-read-only-token" {
6 | value = nonsensitive(kubernetes_secret_v1.dashboard_read_only_sa_token.data.token)
7 | }
8 |
--------------------------------------------------------------------------------
/modules/kubernetes-dashboard/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = "~> 2.0"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/metrics-server-vpa/README.md:
--------------------------------------------------------------------------------
1 | # metrics-server-vpa
2 |
3 |
4 | ## Requirements
5 |
6 | No requirements.
7 |
8 | ## Providers
9 |
10 | | Name | Version |
11 | |------|---------|
12 | | [helm](#provider\_helm) | n/a |
13 |
14 | ## Modules
15 |
16 | No modules.
17 |
18 | ## Resources
19 |
20 | | Name | Type |
21 | |------|------|
22 | | [helm_release.metrics-server-vpa](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
23 |
24 | ## Inputs
25 |
26 | | Name | Description | Type | Default | Required |
27 | |------|-------------|------|---------|:--------:|
28 | | [metrics\_server\_vpa\_config](#input\_metrics\_server\_vpa\_config) | Configuration to provide settings of vpa over metrics server | `any` | {
"maxCPU": "100m",
"maxMemory": "500Mi",
"metricsServerDeploymentName": "metrics-server",
"minCPU": "25m",
"minMemory": "150Mi"
} | no |
29 |
30 | ## Outputs
31 |
32 | No outputs.
33 |
34 |
--------------------------------------------------------------------------------
/modules/metrics-server-vpa/config/values.yaml:
--------------------------------------------------------------------------------
1 | metricsServerDeploymentName: ${metricsServerDeploymentName}
2 | minCPU: ${minCPU}
3 | minMemory: ${minMemory}
4 | maxCPU: ${maxCPU}
5 | maxMemory: ${maxMemory}
6 |
--------------------------------------------------------------------------------
/modules/metrics-server-vpa/main.tf:
--------------------------------------------------------------------------------
1 | resource "helm_release" "metrics-server-vpa" {
2 | name = "metricsservervpa"
3 | namespace = "kube-system"
4 | chart = "${path.module}/metrics-server-vpa/"
5 | timeout = 600
6 | values = [
7 | templatefile("${path.module}/config/values.yaml", {
8 | minCPU = var.metrics_server_vpa_config.minCPU,
9 | minMemory = var.metrics_server_vpa_config.minMemory,
10 | maxCPU = var.metrics_server_vpa_config.maxCPU,
11 | maxMemory = var.metrics_server_vpa_config.maxMemory,
12 | metricsServerDeploymentName = var.metrics_server_vpa_config.metricsServerDeploymentName
13 | })
14 | ]
15 | }
16 |
--------------------------------------------------------------------------------
/modules/metrics-server-vpa/metrics-server-vpa/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | appVersion: "1.0"
3 | description: A Helm chart add vpa on metrics-server
4 | name: metricsservervpa
5 | version: 1.0.0
6 |
--------------------------------------------------------------------------------
/modules/metrics-server-vpa/metrics-server-vpa/templates/vpa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "autoscaling.k8s.io/v1"
2 | kind: VerticalPodAutoscaler
3 | metadata:
4 | name: metrics-server-vpa
5 | namespace: kube-system
6 | spec:
7 | targetRef:
8 | apiVersion: "apps/v1"
9 | kind: Deployment
10 | name: {{ .Values.metricsServerDeploymentName}}
11 | updatePolicy:
12 | updateMode: "Auto"
13 | resourcePolicy:
14 | containerPolicies:
15 | - containerName: '*'
16 | minAllowed:
17 | cpu: {{ .Values.minCPU}}
18 | memory: {{ .Values.minMemory}}
19 | maxAllowed:
20 | cpu: {{ .Values.maxCPU}}
21 | memory: {{ .Values.maxMemory}}
22 | controlledResources: ["cpu", "memory"]
23 |
--------------------------------------------------------------------------------
/modules/metrics-server-vpa/variables.tf:
--------------------------------------------------------------------------------
1 | variable "metrics_server_vpa_config" {
2 | description = "Configuration to provide settings of vpa over metrics server"
3 | default = {
4 |
5 | minCPU = "25m"
6 | maxCPU = "100m"
7 | minMemory = "150Mi"
8 | maxMemory = "500Mi"
9 | metricsServerDeploymentName = "metrics-server"
10 | }
11 | type = any
12 | }
13 |
--------------------------------------------------------------------------------
/modules/metrics-server/config/metrics_server.yaml:
--------------------------------------------------------------------------------
1 | ## Node affinity for particular node in which labels key is "Infra-Services" and value is "true"
2 |
3 | affinity:
4 | nodeAffinity:
5 | requiredDuringSchedulingIgnoredDuringExecution:
6 | nodeSelectorTerms:
7 | - matchExpressions:
8 | - key: "Addons-Services"
9 | operator: In
10 | values:
11 | - "true"
12 |
13 | ## Particular args to be passed in deployment
14 |
15 | extraArgs:
16 | - --kubelet-preferred-address-types=InternalIP
17 | - --v=2
18 |
19 | apiService:
20 | create: true
21 |
22 | ## Using limits and requests
23 |
24 | resources:
25 | limits:
26 | cpu: 60m
27 | memory: 200Mi
28 | requests:
29 | cpu: 30m
30 | memory: 100Mi
31 |
32 | podAnnotations:
33 | co.elastic.logs/enabled: "true"
34 |
35 | replicas: 2
36 |
--------------------------------------------------------------------------------
/modules/metrics-server/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "metrics-server"
3 |
4 | # https://github.com/kubernetes-sigs/metrics-server/blob/master/charts/metrics-server/Chart.yaml
5 | default_helm_config = {
6 | name = local.name
7 | chart = local.name
8 | repository = "https://kubernetes-sigs.github.io/metrics-server/"
9 | version = var.helm_config.version
10 | namespace = "kube-system"
11 | description = "Metric server helm Chart deployment configuration"
12 | }
13 |
14 | helm_config = merge(
15 | local.default_helm_config,
16 | var.helm_config,
17 | {
18 | values = [file("${path.module}/config/metrics_server.yaml"), var.helm_config.values[0]]
19 | }
20 | )
21 |
22 | argocd_gitops_config = {
23 | enable = true
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/modules/metrics-server/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 |
4 | manage_via_gitops = var.manage_via_gitops
5 | helm_config = local.helm_config
6 | addon_context = var.addon_context
7 |
8 | depends_on = [kubernetes_namespace_v1.this]
9 | }
10 |
11 | resource "kubernetes_namespace_v1" "this" {
12 | count = try(local.helm_config["create_namespace"], true) && local.helm_config["namespace"] != "kube-system" ? 1 : 0
13 |
14 | metadata {
15 | name = local.helm_config["namespace"]
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/modules/metrics-server/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/metrics-server/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for Metrics Server"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps"
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | })
26 | }
27 |
28 | variable "addon_version" {
29 | description = "Helm Chart version for Metrics Server"
30 | type = string
31 | default = ""
32 | }
33 |
--------------------------------------------------------------------------------
/modules/metrics-server/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/reloader/config/reloader.yaml:
--------------------------------------------------------------------------------
1 | reloader:
2 | deployment:
3 | affinity:
4 | nodeAffinity:
5 | requiredDuringSchedulingIgnoredDuringExecution:
6 | nodeSelectorTerms:
7 | - matchExpressions:
8 | - key: "Addons-Services"
9 | operator: In
10 | values:
11 | - "true"
12 |
13 | resources:
14 | limits:
15 | cpu: "100m"
16 | memory: "240Mi"
17 | requests:
18 | cpu: "50m"
19 | memory: "120Mi"
20 |
21 | pod:
22 | annotations:
23 | co.elastic.logs/enabled: "true"
24 |
25 | serviceMonitor:
26 | enabled: ${enable_service_monitor}
27 |
--------------------------------------------------------------------------------
/modules/reloader/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "reloader"
3 |
4 | argocd_gitops_config = {
5 | enable = true
6 | serviceAccountName = local.name
7 | }
8 |
9 | template_values = templatefile("${path.module}/config/reloader.yaml", {
10 | enable_service_monitor = var.helm_config.enable_service_monitor
11 | })
12 | }
13 |
14 | module "helm_addon" {
15 | source = "../helm-addon"
16 |
17 | # https://github.com/stakater/Reloader/blob/master/deployments/kubernetes/chart/reloader/Chart.yaml
18 | helm_config = merge(
19 | {
20 | name = local.name
21 | chart = local.name
22 | repository = "https://stakater.github.io/stakater-charts"
23 | version = var.addon_version
24 | namespace = local.name
25 | create_namespace = true
26 | description = "Reloader Helm Chart deployment configuration"
27 | },
28 | var.helm_config,
29 | {
30 | values = [local.template_values, var.helm_config.values[0]]
31 | }
32 | )
33 |
34 | manage_via_gitops = var.manage_via_gitops
35 | addon_context = var.addon_context
36 | }
37 |
--------------------------------------------------------------------------------
/modules/reloader/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/reloader/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for Reloader."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | irsa_iam_role_path = string
26 | irsa_iam_permissions_boundary = string
27 | })
28 | }
29 |
30 | variable "addon_version" {
31 | description = "reloader helm chart version"
32 | type = string
33 | default = ""
34 | }
35 |
--------------------------------------------------------------------------------
/modules/reloader/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | }
4 |
--------------------------------------------------------------------------------
/modules/service-monitor-crd/README.md:
--------------------------------------------------------------------------------
1 | # service_monitor_crd
2 |
3 |
4 | ## Requirements
5 |
6 | | Name | Version |
7 | |------|---------|
8 | | [terraform](#requirement\_terraform) | >= 0.12.26 |
9 | | [helm](#requirement\_helm) | ~> 2.0 |
10 |
11 | ## Providers
12 |
13 | | Name | Version |
14 | |------|---------|
15 | | [helm](#provider\_helm) | ~> 2.0 |
16 |
17 | ## Modules
18 |
19 | No modules.
20 |
21 | ## Resources
22 |
23 | | Name | Type |
24 | |------|------|
25 | | [helm_release.service-monitor-crd](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
26 |
27 | ## Inputs
28 |
29 | No inputs.
30 |
31 | ## Outputs
32 |
33 | No outputs.
34 |
35 |
--------------------------------------------------------------------------------
/modules/service-monitor-crd/main.tf:
--------------------------------------------------------------------------------
1 | resource "helm_release" "service-monitor-crd" {
2 | name = "service-monitor-crd"
3 | chart = "${path.module}/service_monitor/"
4 | timeout = 600
5 | }
6 |
--------------------------------------------------------------------------------
/modules/service-monitor-crd/service_monitor/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/modules/service-monitor-crd/service_monitor/values.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/service-monitor-crd/service_monitor/values.yaml
--------------------------------------------------------------------------------
/modules/service-monitor-crd/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.12.26"
3 |
4 | required_providers {
5 | helm = {
6 | source = "hashicorp/helm"
7 | version = "~> 2.0"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/velero/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_partition" "current" {}
2 | data "aws_caller_identity" "current" {}
3 | data "aws_region" "current" {}
4 |
5 | resource "time_sleep" "dataplane" {
6 | create_duration = "10s"
7 |
8 | triggers = {
9 | data_plane_wait_arn = var.data_plane_wait_arn # this waits for the data plane to be ready
10 | eks_cluster_id = var.eks_cluster_id # this ties it to downstream resources
11 | }
12 | }
13 |
14 | data "aws_eks_cluster" "eks_cluster" {
15 | name = time_sleep.dataplane.triggers["eks_cluster_id"] # this makes downstream resources wait for data plane to be ready
16 | }
17 |
--------------------------------------------------------------------------------
/modules/velero/delete-snapshot.py:
--------------------------------------------------------------------------------
1 | import json
2 | import boto3
3 | import collections
4 | import datetime
5 |
6 | ec = boto3.client('ec2','${region}')
7 | def lambda_handler(event, context):
8 | reservations = ec.describe_snapshots( Filters=[ {'Name': 'tag-key', 'Values': ['velero.io/backup']},] )
9 | print(reservations)
10 | now = datetime.datetime.today().strftime('%Y%m%d')
11 | print (now)
12 | current = int(now)
13 | retention = ${retention_period_in_days}
14 | for snapshot in reservations['Snapshots']:
15 | print ("Checking snapshot %s which was created on %s" % (snapshot['SnapshotId'],snapshot['StartTime']))
16 | snapshotDate = snapshot['StartTime'].strftime('%Y%m%d')
17 | print(snapshotDate)
18 | snaptime = int(snapshotDate)
19 | print (snaptime)
20 | delete_date = (current - snaptime)
21 | print (delete_date)
22 | if delete_date > retention:
23 | print ("The snapshot is older than retention days. Deleting Now")
24 | ec.delete_snapshot(SnapshotId= snapshot['SnapshotId'])
25 | else:
26 | print ("Snapshot is newer than configured retention of %d days so we keep it" % (retention))
27 |
--------------------------------------------------------------------------------
/modules/velero/delete-snapshot.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/velero/delete-snapshot.zip
--------------------------------------------------------------------------------
/modules/velero/velero-data/helm/values.yaml:
--------------------------------------------------------------------------------
1 | initContainers:
2 | - name: velero-plugin-for-aws-2
3 | image: velero/velero-plugin-for-aws:v1.10.0
4 | imagePullPolicy: IfNotPresent
5 | volumeMounts:
6 | - mountPath: /target
7 | name: plugins
8 |
9 | configuration:
10 | backupStorageLocation:
11 | - name: default
12 | provider: aws
13 | bucket: ${bucket}
14 | config:
15 | region: ${region}
16 | volumeSnapshotLocation:
17 | - name: default
18 | provider: aws
19 | config:
20 | region: ${region}
21 |
22 | credentials:
23 | useSecret: false
24 |
25 | podAnnotations:
26 | co.elastic.logs/enabled: "true"
27 |
28 | affinity:
29 | nodeAffinity:
30 | requiredDuringSchedulingIgnoredDuringExecution:
31 | nodeSelectorTerms:
32 | - matchExpressions:
33 | - key: "Addons-Services"
34 | operator: In
35 | values:
36 | - "true"
37 |
38 | resources:
39 | requests:
40 | cpu: 10m
41 | memory: 128Mi
42 | limits:
43 | cpu: 500m
44 | memory: 512Mi
45 |
--------------------------------------------------------------------------------
/modules/velero/velero-data/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/velero/velero-data/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | time = {
10 | source = "hashicorp/time"
11 | version = ">= 0.8"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/velero/velero_job/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/modules/velero/velero_job/templates/backup_job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: velero.io/v1
2 | kind: Schedule
3 | metadata:
4 | name: {{ .Values.velero_backup_name }}
5 | namespace: velero
6 | spec:
7 | schedule: "{{ .Values.schedule_cron_time }}"
8 | template:
9 | includedNamespaces:
10 | - '{{ .Values.namespaces }}'
11 | includedResources:
12 | - '*'
13 | includeClusterResources: true
14 | snapshotVolumes: true
15 | storageLocation: default
16 | volumeSnapshotLocations:
17 | - default
18 | ttl: 24h0m0s
19 | status:
20 | phase: "Enabled"
21 | lastBackup:
22 | validationErrors:
23 |
--------------------------------------------------------------------------------
/modules/velero/velero_job/values.yaml:
--------------------------------------------------------------------------------
1 | velero_backup_name: cluster_backup
2 | schedule_cron_time: "*/10 * * * *"
3 | namespaces: "*"
4 |
--------------------------------------------------------------------------------
/modules/velero/velero_notification/values.yaml:
--------------------------------------------------------------------------------
1 | image:
2 | registry: ghcr.io
3 | repository: kubeshop/botkube
4 | pullPolicy: IfNotPresent
5 | tag: v1.10.0
6 |
7 | sources:
8 | 'k8s-all-events':
9 | displayName: "Backup Status"
10 | botkube/kubernetes:
11 | config:
12 | namespaces: &k8s-events-namespaces
13 | event:
14 | types:
15 | - all
16 | resources:
17 | - type: velero.io/v1/backups
18 | event:
19 | types:
20 | - all
21 | updateSetting:
22 | includeDiff: true
23 | fields:
24 | - status.phase
25 |
26 | communications:
27 | 'default-group':
28 | socketSlack:
29 | enabled: true
30 | channels:
31 | 'default':
32 | name: '${slack_channel_name}'
33 | bindings:
34 | executors:
35 | - k8s-default-tools
36 | sources:
37 | - k8s-all-events
38 | botToken: '${slack_botToken}'
39 | appToken: '${slack_appToken}'
40 |
41 | settings:
42 | clusterName: '${cluster_id}'
43 |
--------------------------------------------------------------------------------
/modules/velero/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.12.26"
3 |
4 | required_providers {
5 | template = {
6 | source = "hashicorp/template"
7 | version = "~> 2.2"
8 | }
9 | local = {
10 | source = "hashicorp/local"
11 | version = "~> 2.1"
12 | }
13 | archive = {
14 | source = "hashicorp/archive"
15 | version = "~> 2.0"
16 | }
17 | aws = {
18 | source = "hashicorp/aws"
19 | version = ">= 3.0.0"
20 | }
21 | helm = {
22 | source = "hashicorp/helm"
23 | version = "~> 2.0"
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/modules/vpa-crds/README.md:
--------------------------------------------------------------------------------
1 | # vpa-crds
2 |
3 |
4 | ## Requirements
5 |
6 | | Name | Version |
7 | |------|---------|
8 | | [terraform](#requirement\_terraform) | >= 0.12.26 |
9 | | [helm](#requirement\_helm) | ~> 2.0 |
10 |
11 | ## Providers
12 |
13 | | Name | Version |
14 | |------|---------|
15 | | [helm](#provider\_helm) | ~> 2.0 |
16 |
17 | ## Modules
18 |
19 | No modules.
20 |
21 | ## Resources
22 |
23 | | Name | Type |
24 | |------|------|
25 | | [helm_release.vpa-crds](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
26 |
27 | ## Inputs
28 |
29 | | Name | Description | Type | Default | Required |
30 | |------|-------------|------|---------|:--------:|
31 | | [chart\_version](#input\_chart\_version) | chart version for VPA | `string` | `"9.9.0"` | no |
32 | | [helm-config](#input\_helm-config) | vpa config from user end | `any` | `{}` | no |
33 |
34 | ## Outputs
35 |
36 | No outputs.
37 |
38 |
--------------------------------------------------------------------------------
/modules/vpa-crds/main.tf:
--------------------------------------------------------------------------------
1 | resource "helm_release" "vpa-crds" {
2 | name = "vertical-pod-autoscaler"
3 | namespace = "kube-system"
4 | repository = "https://cowboysysop.github.io/charts/"
5 | chart = "vertical-pod-autoscaler"
6 | version = var.chart_version
7 | timeout = 600
8 | values = [
9 | file("${path.module}/config/values.yaml"),
10 | var.helm-config
11 | ]
12 | }
13 |
--------------------------------------------------------------------------------
/modules/vpa-crds/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm-config" {
2 | description = "vpa config from user end"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "chart_version" {
8 | description = "chart version for VPA"
9 | type = string
10 | default = "9.9.0"
11 | }
12 |
--------------------------------------------------------------------------------
/modules/vpa-crds/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.12.26"
3 |
4 | required_providers {
5 | helm = {
6 | source = "hashicorp/helm"
7 | version = "~> 2.0"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-cloudwatch-metrics/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 | manage_via_gitops = var.manage_via_gitops
4 | set_values = local.set_values
5 | helm_config = local.helm_config
6 | irsa_config = local.irsa_config
7 | addon_context = var.addon_context
8 | }
9 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-cloudwatch-metrics/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-cloudwatch-metrics/values.yaml:
--------------------------------------------------------------------------------
1 | clusterName: ${eks_cluster_id}
2 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-cloudwatch-metrics/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config aws-cloudwatch-metrics."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "irsa_policies" {
14 | description = "Additional IAM policies for a IAM role for service accounts"
15 | type = list(string)
16 | default = []
17 | }
18 |
19 | variable "addon_context" {
20 | description = "Input configuration for the addon"
21 | type = object({
22 | aws_caller_identity_account_id = string
23 | aws_caller_identity_arn = string
24 | aws_eks_cluster_endpoint = string
25 | aws_partition_id = string
26 | aws_region_name = string
27 | eks_cluster_id = string
28 | eks_oidc_issuer_url = string
29 | eks_oidc_provider_arn = string
30 | tags = map(string)
31 | irsa_iam_role_path = string
32 | irsa_iam_permissions_boundary = string
33 | })
34 | }
35 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-cloudwatch-metrics/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | helm = {
14 | source = "hashicorp/helm"
15 | version = ">= 2.4.1"
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-coredns/outputs.tf:
--------------------------------------------------------------------------------
1 | output "release_metadata" {
2 | description = "Map of attributes of the Helm release metadata"
3 | value = try(module.helm_addon[0].release_metadata, null)
4 | }
5 |
6 | output "irsa_arn" {
7 | description = "IAM role ARN for the service account"
8 | value = try(module.helm_addon[0].irsa_arn, null)
9 | }
10 |
11 | output "irsa_name" {
12 | description = "IAM role name for the service account"
13 | value = try(module.helm_addon[0].irsa_name, null)
14 | }
15 |
16 | output "service_account" {
17 | description = "Name of Kubernetes service account"
18 | value = try(module.helm_addon[0].service_account, null)
19 | }
20 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-coredns/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.10"
8 | }
9 | null = {
10 | source = "hashicorp/null"
11 | version = ">= 3.0"
12 | }
13 | time = {
14 | source = "hashicorp/time"
15 | version = ">= 0.8"
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-efs-csi-driver/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? {
4 | enable = true
5 | serviceAccountName = local.service_account
6 | } : null
7 | }
8 |
9 | output "release_metadata" {
10 | description = "Map of attributes of the Helm release metadata"
11 | value = module.helm_addon.release_metadata
12 | }
13 |
14 | output "irsa_arn" {
15 | description = "IAM role ARN for the service account"
16 | value = module.helm_addon.irsa_arn
17 | }
18 |
19 | output "irsa_name" {
20 | description = "IAM role name for the service account"
21 | value = module.helm_addon.irsa_name
22 | }
23 |
24 | output "service_account" {
25 | description = "Name of Kubernetes service account"
26 | value = module.helm_addon.service_account
27 | }
28 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-efs-csi-driver/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-kube-proxy/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "kube-proxy"
3 | }
4 |
5 | data "aws_eks_addon_version" "this" {
6 | addon_name = local.name
7 | kubernetes_version = var.addon_config.kubernetes_version
8 | most_recent = try(var.addon_config.most_recent, false)
9 | }
10 |
11 | resource "aws_eks_addon" "kube_proxy" {
12 | cluster_name = var.addon_context.eks_cluster_id
13 | addon_name = local.name
14 | addon_version = try(var.addon_config.addon_version, data.aws_eks_addon_version.this.version)
15 | resolve_conflicts = try(var.addon_config.resolve_conflicts, "OVERWRITE")
16 | service_account_role_arn = try(var.addon_config.service_account_role_arn, null)
17 | preserve = try(var.addon_config.preserve, true)
18 |
19 | tags = merge(
20 | var.addon_context.tags,
21 | try(var.addon_config.tags, {})
22 | )
23 | }
24 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-kube-proxy/outputs.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/z-archieve/aws-kube-proxy/outputs.tf
--------------------------------------------------------------------------------
/modules/z-archieve/aws-kube-proxy/variables.tf:
--------------------------------------------------------------------------------
1 | variable "addon_config" {
2 | description = "Amazon EKS Managed Add-on config for Kube Proxy"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "addon_context" {
8 | description = "Input configuration for the addon"
9 | type = object({
10 | aws_caller_identity_account_id = string
11 | aws_caller_identity_arn = string
12 | aws_eks_cluster_endpoint = string
13 | aws_partition_id = string
14 | aws_region_name = string
15 | eks_cluster_id = string
16 | eks_oidc_issuer_url = string
17 | eks_oidc_provider_arn = string
18 | tags = map(string)
19 | })
20 | }
21 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-kube-proxy/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-load-balancer-controller/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 | manage_via_gitops = var.manage_via_gitops
4 | set_values = local.set_values
5 | helm_config = local.helm_config
6 | irsa_config = local.irsa_config
7 | addon_context = var.addon_context
8 | }
9 |
10 | resource "aws_iam_policy" "aws_load_balancer_controller" {
11 | name = "${var.addon_context.eks_cluster_id}-lb-irsa"
12 | description = "Allows lb controller to manage ALB and NLB"
13 | policy = data.aws_iam_policy_document.aws_lb.json
14 | tags = var.addon_context.tags
15 | }
16 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-load-balancer-controller/outputs.tf:
--------------------------------------------------------------------------------
1 | output "ingress_namespace" {
2 | description = "AWS LoadBalancer Controller Ingress Namespace"
3 | value = local.helm_config["namespace"]
4 | }
5 |
6 | output "ingress_name" {
7 | description = "AWS LoadBalancer Controller Ingress Name"
8 | value = local.helm_config["name"]
9 | }
10 |
11 | output "argocd_gitops_config" {
12 | description = "Configuration used for managing the add-on with ArgoCD"
13 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
14 | }
15 |
16 | output "release_metadata" {
17 | description = "Map of attributes of the Helm release metadata"
18 | value = module.helm_addon.release_metadata
19 | }
20 |
21 | output "irsa_arn" {
22 | description = "IAM role ARN for the service account"
23 | value = module.helm_addon.irsa_arn
24 | }
25 |
26 | output "irsa_name" {
27 | description = "IAM role name for the service account"
28 | value = module.helm_addon.irsa_name
29 | }
30 |
31 | output "service_account" {
32 | description = "Name of Kubernetes service account"
33 | value = module.helm_addon.service_account
34 | }
35 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-load-balancer-controller/values.yaml:
--------------------------------------------------------------------------------
1 | clusterName: ${eks_cluster_id}
2 | region: ${aws_region}
3 | image:
4 | repository: ${repository}
5 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-load-balancer-controller/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for the aws_load_balancer_controller."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon."
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | irsa_iam_role_path = string
26 | irsa_iam_permissions_boundary = string
27 | default_repository = string
28 | })
29 | }
30 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-load-balancer-controller/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-node-termination-handler/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_iam_policy_document" "aws_node_termination_handler_queue_policy_document" {
2 | statement {
3 | actions = [
4 | "sqs:SendMessage"
5 | ]
6 | principals {
7 | type = "Service"
8 | identifiers = [
9 | "events.amazonaws.com",
10 | "sqs.amazonaws.com"
11 | ]
12 | }
13 | resources = [
14 | aws_sqs_queue.aws_node_termination_handler_queue.arn
15 | ]
16 | }
17 | }
18 |
19 | data "aws_iam_policy_document" "irsa_policy" {
20 | statement {
21 | actions = [
22 | "autoscaling:CompleteLifecycleAction",
23 | "autoscaling:DescribeAutoScalingInstances",
24 | "autoscaling:DescribeTags",
25 | "ec2:DescribeInstances",
26 | "sqs:DeleteMessage",
27 | "sqs:ReceiveMessage",
28 | ]
29 | resources = ["*"]
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-node-termination-handler/outputs.tf:
--------------------------------------------------------------------------------
1 | output "release_metadata" {
2 | description = "Map of attributes of the Helm release metadata"
3 | value = module.helm_addon.release_metadata
4 | }
5 |
6 | output "irsa_arn" {
7 | description = "IAM role ARN for the service account"
8 | value = module.helm_addon.irsa_arn
9 | }
10 |
11 | output "irsa_name" {
12 | description = "IAM role name for the service account"
13 | value = module.helm_addon.irsa_name
14 | }
15 |
16 | output "service_account" {
17 | description = "Name of Kubernetes service account"
18 | value = module.helm_addon.service_account
19 | }
20 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-node-termination-handler/values.yaml:
--------------------------------------------------------------------------------
1 | enableSqsTerminationDraining: true
2 | enablePrometheusServer: true
3 | %{ if length(autoscaling_group_names) == 0 ~}
4 | checkASGTagBeforeDraining: false
5 | %{ endif ~}
6 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-node-termination-handler/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "AWS Node Termination Handler Helm Chart Configuration"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "autoscaling_group_names" {
8 | description = "EKS Node Group ASG names"
9 | type = list(string)
10 | }
11 |
12 | variable "addon_context" {
13 | description = "Input configuration for the addon"
14 | type = object({
15 | aws_caller_identity_account_id = string
16 | aws_caller_identity_arn = string
17 | aws_eks_cluster_endpoint = string
18 | aws_partition_id = string
19 | aws_region_name = string
20 | eks_cluster_id = string
21 | eks_oidc_issuer_url = string
22 | eks_oidc_provider_arn = string
23 | tags = map(string)
24 | irsa_iam_role_path = string
25 | irsa_iam_permissions_boundary = string
26 | })
27 | }
28 |
29 | variable "irsa_policies" {
30 | description = "Additional IAM policies for a IAM role for service accounts"
31 | type = list(string)
32 | default = []
33 | }
34 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-node-termination-handler/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-privateca-issuer/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_iam_policy_document" "aws_privateca_issuer" {
2 | statement {
3 | effect = "Allow"
4 | resources = [var.aws_privateca_acmca_arn]
5 | actions = [
6 | "acm-pca:DescribeCertificateAuthority",
7 | "acm-pca:GetCertificate",
8 | "acm-pca:IssueCertificate",
9 | ]
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-privateca-issuer/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 | manage_via_gitops = var.manage_via_gitops
4 | set_values = local.set_values
5 | helm_config = local.helm_config
6 | irsa_config = local.irsa_config
7 | addon_context = var.addon_context
8 | }
9 |
10 | resource "aws_iam_policy" "aws_privateca_issuer" {
11 | description = "AWS PCA issuer IAM policy"
12 | name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa"
13 | policy = data.aws_iam_policy_document.aws_privateca_issuer.json
14 | tags = var.addon_context.tags
15 | }
16 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-privateca-issuer/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/aws-privateca-issuer/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager-csi-driver/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 |
4 | # https://github.com/cert-manager/csi-driver/blob/main/deploy/charts/csi-driver/Chart.yaml
5 | helm_config = merge(
6 | {
7 | name = "cert-manager-csi-driver"
8 | chart = "cert-manager-csi-driver"
9 | repository = "https://charts.jetstack.io"
10 | version = "v0.4.2"
11 | namespace = "cert-manager"
12 | description = "Cert Manager CSI Driver Add-on"
13 | },
14 | var.helm_config
15 | )
16 |
17 | manage_via_gitops = var.manage_via_gitops
18 | addon_context = var.addon_context
19 | }
20 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager-csi-driver/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager-csi-driver/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for Cert-Manager CSI Driver."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | irsa_iam_role_path = string
26 | irsa_iam_permissions_boundary = string
27 | })
28 | }
29 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager-csi-driver/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | }
4 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager-istio-csr/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 | helm_config = merge(
4 | {
5 | name = "cert-manager-istio-csr"
6 | chart = "cert-manager-istio-csr"
7 | repository = "https://charts.jetstack.io"
8 | version = "v0.5.0"
9 | namespace = "cert-manager"
10 | create_namespace = false
11 | description = "Cert-manager-istio-csr Helm Chart deployment configuration"
12 | },
13 | var.helm_config
14 | )
15 | manage_via_gitops = var.manage_via_gitops
16 | addon_context = var.addon_context
17 | }
18 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager-istio-csr/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager-istio-csr/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm Config for istio-csr."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | irsa_iam_role_path = string
26 | irsa_iam_permissions_boundary = string
27 | })
28 | }
29 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager-istio-csr/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | }
4 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/cert-manager-ca/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: cert-manager-ca
3 | description: A Helm chart to install a Cert Manager CA
4 | type: application
5 | version: 0.2.0
6 | appVersion: v0.1.0
7 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/cert-manager-ca/templates/certificate.yaml:
--------------------------------------------------------------------------------
1 | {{- range .Values.clusterIssuers }}
2 | {{- if eq .type "CA" }}
3 | apiVersion: cert-manager.io/v1
4 | kind: Certificate
5 | metadata:
6 | name: {{ .name }}
7 | namespace: {{ $.Release.Namespace }}
8 | spec:
9 | isCA: true
10 | commonName: {{ .name }}
11 | secretName: {{ .secretName }}
12 | {{- with .privateKey }}
13 | privateKey:
14 | {{- toYaml . | nindent 4 }}
15 | {{- end }}
16 | {{- with .issuer }}
17 | issuerRef:
18 | {{- toYaml . | nindent 4 }}
19 | {{- end }}
20 | {{- end }}
21 | {{- end }}
22 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/cert-manager-ca/templates/clusterissuers.yaml:
--------------------------------------------------------------------------------
1 | {{- range .Values.clusterIssuers }}
2 | ---
3 | apiVersion: cert-manager.io/v1
4 | kind: ClusterIssuer
5 | metadata:
6 | name: {{ .name }}
7 | spec:
8 | {{- if eq .type "selfSigned" }}
9 | selfSigned: {}
10 | {{- else if eq .type "CA" }}
11 | ca:
12 | secretName: {{ .secretName }}
13 | {{- end }}
14 | {{- end }}
15 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/cert-manager-ca/values.yaml:
--------------------------------------------------------------------------------
1 | clusterIssuers:
2 | - name: cert-manager-selfsigned
3 | type: selfSigned
4 | - name: cert-manager-ca
5 | type: CA
6 | secretName: cert-manager-ca-root
7 | privateKey:
8 | algorithm: ECDSA
9 | size: 256
10 | issuer:
11 | name: cert-manager-selfsigned
12 | kind: ClusterIssuer
13 | group: cert-manager.io
14 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/cert-manager-letsencrypt/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: cert-manager-letsencrypt
3 | description: Cert Manager Cluster Issuers for Let's Encrypt certificates with DNS01 protocol
4 | type: application
5 | version: 0.1.0
6 | appVersion: v0.1.0
7 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/cert-manager-letsencrypt/templates/clusterissuer-production.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: ClusterIssuer
3 | metadata:
4 | name: {{ .Release.Name }}-production-route53
5 | labels:
6 | ca: letsencrypt
7 | environment: production
8 | solver: dns01
9 | provider: route53
10 | spec:
11 | acme:
12 | {{- if .Values.email }}
13 | email: {{ .Values.email }}
14 | {{- end }}
15 | server: https://acme-v02.api.letsencrypt.org/directory
16 | preferredChain: ISRG Root X1
17 | privateKeySecretRef:
18 | name: {{ .Release.Name }}-production-route53
19 | solvers:
20 | - dns01:
21 | route53:
22 | region: {{ .Values.region | default "global" }}
23 | {{- if .Values.dnsZones }}
24 | selector:
25 | dnsZones:
26 | {{- .Values.dnsZones | toYaml | nindent 12 }}
27 | {{- end }}
28 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/cert-manager-letsencrypt/templates/clusterissuer-staging.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: ClusterIssuer
3 | metadata:
4 | name: {{ .Release.Name }}-staging-route53
5 | labels:
6 | ca: letsencrypt
7 | environment: staging
8 | solver: dns01
9 | provider: route53
10 | spec:
11 | acme:
12 | {{- if .Values.email }}
13 | email: {{ .Values.email }}
14 | {{- end }}
15 | server: https://acme-staging-v02.api.letsencrypt.org/directory
16 | preferredChain: ISRG Root X1
17 | privateKeySecretRef:
18 | name: {{ .Release.Name }}-staging-route53
19 | solvers:
20 | - dns01:
21 | route53:
22 | region: {{ .Values.region | default "global" }}
23 | {{- if .Values.dnsZones }}
24 | selector:
25 | dnsZones:
26 | {{- .Values.dnsZones | toYaml | nindent 12 }}
27 | {{- end }}
28 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/cert-manager-letsencrypt/values.yaml:
--------------------------------------------------------------------------------
1 | # email: user@example.com
2 |
3 | # region: global
4 |
5 | # dnsZones:
6 | # - domain.name
7 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_route53_zone" "selected" {
2 | for_each = toset(var.domain_names)
3 |
4 | name = each.key
5 | }
6 |
7 | data "aws_iam_policy_document" "cert_manager_iam_policy_document" {
8 | statement {
9 | effect = "Allow"
10 | resources = ["arn:${var.addon_context.aws_partition_id}:route53:::change/*"]
11 | actions = ["route53:GetChange"]
12 | }
13 |
14 | dynamic "statement" {
15 | for_each = { for k, v in toset(var.domain_names) : k => data.aws_route53_zone.selected[k].arn }
16 |
17 | content {
18 | effect = "Allow"
19 | resources = [statement.value]
20 | actions = [
21 | "route53:ChangeresourceRecordSets",
22 | "route53:ListresourceRecordSets"
23 | ]
24 | }
25 | }
26 |
27 | statement {
28 | effect = "Allow"
29 | resources = ["*"]
30 | actions = ["route53:ListHostedZonesByName"]
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "eks_cluster_id" {
7 | description = "Current AWS EKS Cluster ID"
8 | value = var.addon_context.eks_cluster_id
9 | }
10 |
11 | output "release_metadata" {
12 | description = "Map of attributes of the Helm release metadata"
13 | value = module.helm_addon.release_metadata
14 | }
15 |
16 | output "irsa_arn" {
17 | description = "IAM role ARN for the service account"
18 | value = module.helm_addon.irsa_arn
19 | }
20 |
21 | output "irsa_name" {
22 | description = "IAM role name for the service account"
23 | value = module.helm_addon.irsa_name
24 | }
25 |
26 | output "service_account" {
27 | description = "Name of Kubernetes service account"
28 | value = module.helm_addon.service_account
29 | }
30 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/values.yaml:
--------------------------------------------------------------------------------
1 | extraArgs:
2 | - --enable-certificate-owner-ref=true
3 |
4 | installCRDs: true
5 |
--------------------------------------------------------------------------------
/modules/z-archieve/cert-manager/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.10"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = ">= 2.4.1"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-autoscaler/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? {
4 | enable = true
5 | serviceAccountName = local.service_account
6 | } : null
7 | }
8 |
9 | output "release_metadata" {
10 | description = "Map of attributes of the Helm release metadata"
11 | value = module.helm_addon.release_metadata
12 | }
13 |
14 | output "irsa_arn" {
15 | description = "IAM role ARN for the service account"
16 | value = module.helm_addon.irsa_arn
17 | }
18 |
19 | output "service_account" {
20 | description = "Name of Kubernetes service account"
21 | value = module.helm_addon.service_account
22 | }
23 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-autoscaler/values.yaml:
--------------------------------------------------------------------------------
1 | awsRegion: ${aws_region}
2 |
3 | autoDiscovery:
4 | clusterName: ${eks_cluster_id}
5 | extraArgs:
6 | aws-use-static-instance-list: true
7 |
8 | image:
9 | tag: ${image_tag}
10 |
11 | resources:
12 | limits:
13 | cpu: 200m
14 | memory: 512Mi
15 | requests:
16 | cpu: 200m
17 | memory: 512Mi
18 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-autoscaler/variables.tf:
--------------------------------------------------------------------------------
1 | variable "eks_cluster_version" {
2 | description = "The Kubernetes version for the cluster - used to match appropriate version for image used"
3 | type = string
4 | }
5 |
6 | variable "helm_config" {
7 | description = "Cluster Autoscaler Helm Config"
8 | type = any
9 | default = {}
10 | }
11 |
12 | variable "manage_via_gitops" {
13 | description = "Determines if the add-on should be managed via GitOps."
14 | type = bool
15 | default = false
16 | }
17 |
18 | variable "addon_context" {
19 | description = "Input configuration for the addon"
20 | type = object({
21 | aws_caller_identity_account_id = string
22 | aws_caller_identity_arn = string
23 | aws_eks_cluster_endpoint = string
24 | aws_partition_id = string
25 | aws_region_name = string
26 | eks_cluster_id = string
27 | eks_oidc_issuer_url = string
28 | eks_oidc_provider_arn = string
29 | tags = map(string)
30 | irsa_iam_role_path = string
31 | irsa_iam_permissions_boundary = string
32 | })
33 | }
34 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-autoscaler/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-proportional-autoscaler/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 |
4 | # https://github.com/kubernetes-sigs/cluster-proportional-autoscaler/blob/master/charts/cluster-proportional-autoscaler/Chart.yaml
5 | helm_config = merge(
6 | {
7 | name = "cluster-proportional-autoscaler"
8 | chart = "cluster-proportional-autoscaler"
9 | repository = "https://kubernetes-sigs.github.io/cluster-proportional-autoscaler"
10 | version = "1.1.0"
11 | namespace = "kube-system"
12 | values = [templatefile("${path.module}/values.yaml", {
13 | operating_system = "linux"
14 | })]
15 | description = "Cluster Proportional Autoscaler Helm Chart"
16 | },
17 | var.helm_config
18 | )
19 |
20 | manage_via_gitops = var.manage_via_gitops
21 | addon_context = var.addon_context
22 | }
23 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-proportional-autoscaler/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-proportional-autoscaler/values.yaml:
--------------------------------------------------------------------------------
1 | # Formula for controlling the replicas. Adjust according to your needs
2 | # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) )
3 | # replicas = min(replicas, max)
4 | # replicas = max(replicas, min)
5 | config:
6 | linear:
7 | coresPerReplica: 256
8 | nodesPerReplica: 16
9 | min: 1
10 | max: 100
11 | preventSinglePointFailure: true
12 | includeUnschedulableNodes: true
13 |
14 | # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive).
15 | # The following option should be defined in user defined values.yaml using var.helm_config
16 |
17 | #options:
18 | # target:
19 |
20 | podSecurityContext:
21 | seccompProfile:
22 | type: RuntimeDefault
23 | supplementalGroups: [ 65534 ]
24 | fsGroup: 65534
25 |
26 | nodeSelector:
27 | kubernetes.io/os: ${operating_system}
28 |
29 | resources:
30 | limits:
31 | cpu: 100m
32 | memory: 128Mi
33 | requests:
34 | cpu: 100m
35 | memory: 128Mi
36 |
37 | tolerations:
38 | - key: "CriticalAddonsOnly"
39 | operator: "Exists"
40 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-proportional-autoscaler/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for the Karpenter"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | })
26 | }
27 |
--------------------------------------------------------------------------------
/modules/z-archieve/cluster-proportional-autoscaler/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | }
4 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/aws-provider/aws-controller-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: pkg.crossplane.io/v1alpha1
3 | kind: ControllerConfig
4 | metadata:
5 | name: aws-controller-config
6 | annotations:
7 | eks.amazonaws.com/role-arn: ${iam-role-arn}
8 | spec:
9 | podSecurityContext:
10 | fsGroup: 2000
11 | args:
12 | - --debug
13 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/aws-provider/aws-provider-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: aws.crossplane.io/v1beta1
3 | kind: ProviderConfig
4 | metadata:
5 | name: aws-provider-config
6 | spec:
7 | credentials:
8 | source: InjectedIdentity
9 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/aws-provider/aws-provider.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: pkg.crossplane.io/v1
3 | kind: Provider
4 | metadata:
5 | name: ${aws-provider-name}
6 | spec:
7 | package: xpkg.upbound.io/crossplane-contrib/provider-aws:${coalesce(provider-aws-version, "v0.33.0")}
8 | controllerConfigRef:
9 | name: aws-controller-config
10 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/aws-provider/jet-aws-controller-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: pkg.crossplane.io/v1alpha1
3 | kind: ControllerConfig
4 | metadata:
5 | name: jet-aws-controller-config
6 | annotations:
7 | eks.amazonaws.com/role-arn: ${iam-role-arn}
8 | spec:
9 | podSecurityContext:
10 | fsGroup: 2000
11 | args:
12 | - --debug
13 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/aws-provider/jet-aws-provider-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: aws.jet.crossplane.io/v1alpha1
3 | kind: ProviderConfig
4 | metadata:
5 | name: jet-aws-provider-config
6 | spec:
7 | credentials:
8 | source: InjectedIdentity
9 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/aws-provider/jet-aws-provider.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: pkg.crossplane.io/v1
3 | kind: Provider
4 | metadata:
5 | name: ${aws-provider-name}
6 | spec:
7 | package: crossplane/provider-jet-aws:${provider-aws-version}
8 | controllerConfigRef:
9 | name: jet-aws-controller-config
10 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_iam_policy_document" "s3_policy" {
2 | statement {
3 | sid = "VisualEditor0"
4 | effect = "Allow"
5 | resources = ["arn:${var.addon_context.aws_partition_id}:s3:::*"]
6 |
7 | actions = [
8 | "s3:CreateBucket",
9 | "s3:DeleteBucket",
10 | "s3:DeleteObject",
11 | "s3:DeleteObjectVersion",
12 | "s3:Get*",
13 | "s3:ListBucket",
14 | "s3:Put*",
15 | ]
16 | }
17 |
18 | statement {
19 | sid = "VisualEditor1"
20 | effect = "Allow"
21 | resources = ["*"]
22 | actions = ["s3:ListAllMyBuckets"]
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/kubernetes-provider/kubernetes-controller-clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: ${kubernetes-serviceaccount-name}
5 | subjects:
6 | - kind: ServiceAccount
7 | name: ${kubernetes-serviceaccount-name}
8 | namespace: ${namespace}
9 | roleRef:
10 | kind: ClusterRole
11 | name: cluster-admin
12 | apiGroup: rbac.authorization.k8s.io
13 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/kubernetes-provider/kubernetes-controller-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: pkg.crossplane.io/v1alpha1
2 | kind: ControllerConfig
3 | metadata:
4 | name: kubernetes-controller-config
5 | spec:
6 | serviceAccountName: ${kubernetes-serviceaccount-name}
7 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/kubernetes-provider/kubernetes-provider-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kubernetes.crossplane.io/v1alpha1
3 | kind: ProviderConfig
4 | metadata:
5 | name: kubernetes-provider-config
6 | spec:
7 | credentials:
8 | source: InjectedIdentity
9 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/kubernetes-provider/kubernetes-provider.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: pkg.crossplane.io/v1
3 | kind: Provider
4 | metadata:
5 | name: ${kubernetes-provider-name}
6 | spec:
7 | package: xpkg.upbound.io/crossplane-contrib/provider-kubernetes:${coalesce(provider-kubernetes-version, "v0.5.0")}
8 | controllerConfigRef:
9 | name: kubernetes-controller-config
10 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | namespace = try(var.helm_config.namespace, "crossplane-system")
3 |
4 | # https://github.com/crossplane/crossplane/blob/master/cluster/charts/crossplane/Chart.yaml
5 | default_helm_config = {
6 | name = "crossplane"
7 | chart = "crossplane"
8 | repository = "https://charts.crossplane.io/stable/"
9 | version = "1.10.1"
10 | namespace = local.namespace
11 | description = "Crossplane Helm chart"
12 | values = local.default_helm_values
13 | }
14 |
15 | helm_config = merge(
16 | local.default_helm_config,
17 | var.helm_config
18 | )
19 |
20 | default_helm_values = [templatefile("${path.module}/values.yaml", {
21 | operating-system = "linux"
22 | })]
23 |
24 | aws_provider_sa = "aws-provider"
25 | jet_aws_provider_sa = "jet-aws-provider"
26 | kubernetes_provider_sa = try(var.helm_config.service_account, "kubernetes-provider")
27 | aws_current_account_id = var.account_id
28 | aws_current_partition = var.aws_partition
29 | }
30 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/outputs.tf:
--------------------------------------------------------------------------------
1 | output "release_metadata" {
2 | description = "Map of attributes of the Helm release metadata"
3 | value = module.helm_addon.release_metadata
4 | }
5 |
6 | output "irsa_arn" {
7 | description = "IAM role ARN for the service account"
8 | value = module.helm_addon.irsa_arn
9 | }
10 |
11 | output "irsa_name" {
12 | description = "IAM role name for the service account"
13 | value = module.helm_addon.irsa_name
14 | }
15 |
16 | output "service_account" {
17 | description = "Name of Kubernetes service account"
18 | value = module.helm_addon.service_account
19 | }
20 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/values.yaml:
--------------------------------------------------------------------------------
1 | nodeSelector:
2 | kubernetes.io/os: ${operating-system}
3 |
--------------------------------------------------------------------------------
/modules/z-archieve/crossplane/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | kubectl = {
14 | source = "gavinbunney/kubectl"
15 | version = ">= 1.14"
16 | }
17 | time = {
18 | source = "hashicorp/time"
19 | version = ">= 0.7"
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/modules/z-archieve/csi-secrets-store-provider-aws/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = try(var.helm_config.name, "csi-secrets-store-provider-aws")
3 | namespace = try(var.helm_config.namespace, local.name)
4 | }
5 |
6 | resource "kubernetes_namespace_v1" "csi_secrets_store_provider_aws" {
7 | metadata {
8 | name = local.namespace
9 | }
10 | }
11 |
12 | module "helm_addon" {
13 | source = "../helm-addon"
14 |
15 | # https://github.com/aws/eks-charts/blob/master/stable/csi-secrets-store-provider-aws/Chart.yaml
16 | helm_config = merge(
17 | {
18 | name = local.name
19 | chart = local.name
20 | repository = "https://aws.github.io/eks-charts"
21 | version = "0.0.3"
22 | namespace = kubernetes_namespace_v1.csi_secrets_store_provider_aws.metadata[0].name
23 | description = "A Helm chart to install the Secrets Store CSI Driver and the AWS Key Management Service Provider inside a Kubernetes cluster."
24 | },
25 | var.helm_config
26 | )
27 |
28 | manage_via_gitops = var.manage_via_gitops
29 | addon_context = var.addon_context
30 | }
31 |
--------------------------------------------------------------------------------
/modules/z-archieve/csi-secrets-store-provider-aws/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/csi-secrets-store-provider-aws/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "CSI Secrets Store Provider AWS Helm Configurations"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps"
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | irsa_iam_role_path = string
26 | irsa_iam_permissions_boundary = string
27 | })
28 | }
29 |
--------------------------------------------------------------------------------
/modules/z-archieve/csi-secrets-store-provider-aws/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/data.tf:
--------------------------------------------------------------------------------
1 | # data "aws_partition" "current" {}
2 | # data "aws_caller_identity" "current" {}
3 | # data "aws_region" "current" {}
4 |
5 | # resource "time_sleep" "dataplane" {
6 | # create_duration = "10s"
7 |
8 | # triggers = {
9 | # data_plane_wait_arn = var.data_plane_wait_arn # this waits for the data plane to be ready
10 | # eks_cluster_id = var.eks_cluster_id # this ties it to downstream resources
11 | # }
12 | # }
13 |
14 | # data "aws_eks_cluster" "eks_cluster" {
15 | # # this makes downstream resources wait for data plane to be ready
16 | # name = time_sleep.dataplane.triggers["eks_cluster_id"]
17 | # }
18 |
--------------------------------------------------------------------------------
/modules/z-archieve/external-dns/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with GitOps"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/external-dns/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/external-secrets/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_iam_policy_document" "external_secrets" {
2 | statement {
3 | actions = ["ssm:GetParameter"]
4 | resources = var.external_secrets_ssm_parameter_arns
5 | }
6 |
7 | statement {
8 | actions = [
9 | "secretsmanager:GetResourcePolicy",
10 | "secretsmanager:GetSecretValue",
11 | "secretsmanager:DescribeSecret",
12 | "secretsmanager:ListSecretVersionIds",
13 | ]
14 | resources = var.external_secrets_secrets_manager_arns
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/modules/z-archieve/external-secrets/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 | manage_via_gitops = var.manage_via_gitops
4 | set_values = local.set_values
5 | helm_config = local.helm_config
6 | irsa_config = local.irsa_config
7 | addon_context = var.addon_context
8 | }
9 |
10 | resource "aws_iam_policy" "external_secrets" {
11 | name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa"
12 | path = var.addon_context.irsa_iam_role_path
13 | description = "Provides permissions to for External Secrets to retrieve secrets from AWS SSM and AWS Secrets Manager"
14 | policy = data.aws_iam_policy_document.external_secrets.json
15 | }
16 |
--------------------------------------------------------------------------------
/modules/z-archieve/external-secrets/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/external-secrets/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/helm-addon/outputs.tf:
--------------------------------------------------------------------------------
1 | output "helm_release" {
2 | description = "Map of attributes of the Helm release created without sensitive outputs"
3 | value = try({ for k, v in helm_release.addon : k => v if k != "repository_password" }, {})
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = try(helm_release.addon[0].metadata, null)
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = try(module.irsa[0].irsa_iam_role_arn, null)
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = try(module.irsa[0].irsa_iam_role_name, null)
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = try(coalesce(try(module.irsa[0].service_account, null), lookup(var.irsa_config, "kubernetes_service_account", null)), null)
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/helm-addon/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm chart config. Repository and version required. See https://registry.terraform.io/providers/hashicorp/helm/latest/docs"
3 | type = any
4 | }
5 |
6 | variable "set_values" {
7 | description = "Forced set values"
8 | type = any
9 | default = []
10 | }
11 |
12 | variable "set_sensitive_values" {
13 | description = "Forced set_sensitive values"
14 | type = any
15 | default = []
16 | }
17 |
18 | variable "manage_via_gitops" {
19 | description = "Determines if the add-on should be managed via GitOps"
20 | type = bool
21 | default = false
22 | }
23 |
24 | variable "irsa_iam_role_name" {
25 | description = "IAM role name for IRSA"
26 | type = string
27 | default = ""
28 | }
29 |
30 | variable "irsa_config" {
31 | description = "Input configuration for IRSA module"
32 | type = any
33 | default = {}
34 | }
35 |
36 | variable "addon_context" {
37 | description = "Input configuration for the addon"
38 | type = any
39 | }
40 |
--------------------------------------------------------------------------------
/modules/z-archieve/helm-addon/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | helm = {
6 | source = "hashicorp/helm"
7 | version = ">= 2.4.1"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/ingress-nginx/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = try(var.helm_config.name, "ingress-nginx")
3 | namespace = try(var.helm_config.namespace, local.name)
4 | }
5 |
6 | resource "kubernetes_namespace_v1" "this" {
7 | count = try(var.helm_config.create_namespace, true) && local.namespace != "kube-system" ? 1 : 0
8 |
9 | metadata {
10 | name = local.namespace
11 | }
12 | }
13 |
14 | module "helm_addon" {
15 | source = "../helm-addon"
16 |
17 | helm_config = merge(
18 | {
19 | name = local.name
20 | chart = local.name
21 | repository = "https://kubernetes.github.io/ingress-nginx"
22 | version = "4.9.1"
23 | namespace = try(kubernetes_namespace_v1.this[0].metadata[0].name, local.namespace)
24 | description = "The NGINX HelmChart Ingress Controller deployment configuration"
25 | },
26 | var.helm_config
27 | )
28 |
29 | manage_via_gitops = var.manage_via_gitops
30 | addon_context = var.addon_context
31 | }
32 |
--------------------------------------------------------------------------------
/modules/z-archieve/ingress-nginx/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/ingress-nginx/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Ingress NGINX Helm Configuration"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | })
26 | }
27 |
--------------------------------------------------------------------------------
/modules/z-archieve/ingress-nginx/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/helm/istio-ingress.yaml:
--------------------------------------------------------------------------------
1 | # nodeSelector:
2 | # Addons-Services: true
3 | resources:
4 | limits:
5 | cpu: 20m
6 | memory: 200Mi
7 | requests:
8 | cpu: 10m
9 | memory: 100Mi
10 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/helm/values.yaml:
--------------------------------------------------------------------------------
1 | pilot:
2 | resources:
3 | limits:
4 | cpu: 100m
5 | memory: 200Mi
6 | requests:
7 | cpu: 50m
8 | memory: 100Mi
9 | affinity:
10 | nodeAffinity:
11 | requiredDuringSchedulingIgnoredDuringExecution:
12 | nodeSelectorTerms:
13 | - matchExpressions:
14 | - key: "Addons-Services"
15 | operator: In
16 | values:
17 | - "true"
18 |
19 | # gateways:
20 | # istio-ingressgateway:
21 | # nodeSelector:
22 | # Addons-Services: true
23 | # resources:
24 | # limits:
25 | # cpu: 100m
26 | # memory: 200Mi
27 | # requests:
28 | # cpu: 10m
29 | # memory: 100Mi
30 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/istio-observability/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/istio-observability/templates/clusterissuer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: ClusterIssuer
3 | metadata:
4 | name: letsencrypt-istio
5 | namespace: istio-system
6 | spec:
7 | acme:
8 | email: {{ .Values.clusterIssuer.email }}
9 | server: https://acme-v02.api.letsencrypt.org/directory
10 | privateKeySecretRef:
11 | name: letsencrypt-istio
12 | solvers:
13 | - http01:
14 | ingress:
15 | class: istio
16 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/istio-observability/templates/enable-access-logs.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.accessLogging.enabled -}}
2 | apiVersion: telemetry.istio.io/v1alpha1
3 | kind: Telemetry
4 | metadata:
5 | name: mesh-default
6 | namespace: istio-system
7 | spec:
8 | accessLogging:
9 | - providers:
10 | - name: envoy
11 | {{- end }}
12 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/istio-observability/templates/service-monitor-control-plane.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.monitoring.enabled -}}
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | name: prometheus-oper-istio-controlplane
6 | labels:
7 | release: prometheus-operator
8 | monitoring: istio-controlplane
9 | spec:
10 | jobLabel: istio
11 | selector:
12 | matchExpressions:
13 | - {key: istio, operator: In, values: [mixer,pilot,galley,citadel,sidecar-injector]}
14 | namespaceSelector:
15 | matchNames:
16 | - istio-system
17 | endpoints:
18 | - port: http-monitoring
19 | interval: 15s
20 | {{- end }}
21 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/istio-observability/templates/service-monitor-dataplane.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.monitoring.enabled -}}
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | name: prometheus-oper-istio-dataplane
6 | labels:
7 | monitoring: istio-dataplane
8 | release: prometheus-operator
9 | spec:
10 | selector:
11 | matchExpressions:
12 | - {key: istio-prometheus-ignore, operator: DoesNotExist}
13 | namespaceSelector:
14 | any: true
15 | jobLabel: envoy-stats
16 | endpoints:
17 | - path: /stats/prometheus
18 | targetPort: http-envoy-prom
19 | interval: 15s
20 | relabelings:
21 | - sourceLabels: [__meta_kubernetes_pod_container_port_name]
22 | action: keep
23 | regex: '.*-envoy-prom'
24 | - action: labelmap
25 | regex: "__meta_kubernetes_pod_label_(.+)"
26 | - sourceLabels: [__meta_kubernetes_namespace]
27 | action: replace
28 | targetLabel: namespace
29 | - sourceLabels: [__meta_kubernetes_pod_name]
30 | action: replace
31 | targetLabel: pod_name
32 | {{- end }}
33 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/istio-observability/values.yaml:
--------------------------------------------------------------------------------
1 | accessLogging:
2 | enabled: "${envoy_access_logs_enabled}"
3 |
4 | monitoring:
5 | enabled: "${prometheus_monitoring_enabled}"
6 |
7 | clusterIssuer:
8 | email: "${cert_manager_letsencrypt_email}"
9 |
--------------------------------------------------------------------------------
/modules/z-archieve/istio/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.12.26"
3 | required_providers {
4 | aws = {
5 | source = "hashicorp/aws"
6 | version = ">= 3.43.0"
7 | }
8 | kubernetes = {
9 | source = "hashicorp/kubernetes"
10 | version = ">= 2.0.2"
11 | }
12 | helm = {
13 | source = "hashicorp/helm"
14 | version = ">= 2.0.2"
15 | }
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/modules/z-archieve/karpenter-provisioner/config/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/modules/z-archieve/karpenter-provisioner/config/ipv4-values.yaml:
--------------------------------------------------------------------------------
1 | private_subnet_selector_key: ${private_subnet_selector_key}
2 | private_subnet_selector_value: "${private_subnet_selector_value}"
3 | security_group_selector_key: ${security_group_selector_key}
4 | security_group_selector_value: "${security_group_selector_value}"
5 | karpenter_ec2_capacity_type: "${instance_capacity_type}"
6 | karpenter_ec2_instance_family: "${ec2_instance_family}"
7 | karpenter_ec2_instance_type: "${ec2_instance_type}"
8 | excluded_karpenter_ec2_instance_type: "${excluded_instance_type}"
9 | provisioner_name: "${provisioner_name}"
10 | instance_capacity_type: "${instance_capacity_type}"
11 | kms_key_id: "${kms_key_id}"
12 | ec2_node_name: "${ec2_node_name}"
13 |
14 |
15 |
16 | spec:
17 | labels:
18 | ${karpenter_label}: "true"
19 | eks.amazonaws.com/nodegroup: "Services-ng"
20 |
--------------------------------------------------------------------------------
/modules/z-archieve/karpenter-provisioner/config/ipv6-values.yaml:
--------------------------------------------------------------------------------
1 | private_subnet_selector_key: ${private_subnet_selector_key}
2 | private_subnet_selector_value: "${private_subnet_selector_value}"
3 | security_group_selector_key: ${security_group_selector_key}
4 | security_group_selector_value: "${security_group_selector_value}"
5 | karpenter_ec2_capacity_type: "${instance_capacity_type}"
6 | karpenter_ec2_instance_family: "${ec2_instance_family}"
7 | karpenter_ec2_instance_type: "${ec2_instance_type}"
8 | excluded_karpenter_ec2_instance_type: "${excluded_instance_type}"
9 | provisioner_name: "${provisioner_name}"
10 | karpenter_instance_hypervisor: "${instance_hypervisor}"
11 | instance_capacity_type: "${instance_capacity_type}"
12 | kms_key_id: "${kms_key_id}"
13 | ec2_node_name: "${ec2_node_name}"
14 |
15 |
16 | spec:
17 | labels:
18 | ${karpenter_label}: "true"
19 | eks.amazonaws.com/nodegroup: "Services-ng"
20 |
--------------------------------------------------------------------------------
/modules/z-archieve/karpenter-provisioner/tfsec.yaml:
--------------------------------------------------------------------------------
1 | exclude:
2 | - aws-iam-no-policy-wildcards # Wildcards required in addon IAM policies
3 | - aws-vpc-no-excessive-port-access # VPC settings left up to user implementation for recommended practices
4 | - aws-vpc-no-public-ingress-acl # VPC settings left up to user implementation for recommended practices
5 | - aws-eks-no-public-cluster-access-to-cidr # Public access enabled for better example usability, users are recommended to disable if possible
6 | - aws-eks-no-public-cluster-access # Public access enabled for better example usability, users are recommended to disable if possible
7 | - aws-eks-encrypt-secrets # Module defaults to encrypting secrets with CMK, but this is not hardcoded and therefore a spurious error
8 | - aws-vpc-no-public-egress-sgr # Added in v1.22
9 | - aws-ec2-no-public-egress-sgr
10 | - aws-ec2-no-public-ingress-sgr
11 | - aws-ec2-enforce-http-token-imds
12 | - aws-ec2-no-public-ip-subnet # VPN IP
13 | - aws-ec2-require-vpc-flow-logs-for-all-vpcs # disabled flow logs by default
14 |
--------------------------------------------------------------------------------
/modules/z-archieve/karpenter-provisioner/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.12.26"
3 | required_providers {
4 | helm = {
5 | source = "hashicorp/helm"
6 | version = ">= 2.0.0" # Adjust version as per your requirement
7 | }
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/modules/z-archieve/karpenter/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 | manage_via_gitops = var.manage_via_gitops
4 | helm_config = local.helm_config
5 | set_values = local.set_values
6 | irsa_config = local.irsa_config
7 | addon_context = var.addon_context
8 | }
9 |
10 | resource "aws_iam_policy" "karpenter" {
11 | name = "${var.addon_context.eks_cluster_id}-karpenter"
12 | description = "IAM Policy for Karpenter"
13 | policy = data.aws_iam_policy_document.karpenter.json
14 | }
15 |
16 | resource "aws_iam_policy" "karpenter-spot" {
17 | name = "${var.addon_context.eks_cluster_id}-karpenter-spot"
18 | description = "IAM Policy for Karpenter"
19 | policy = data.aws_iam_policy_document.karpenter-spot-service-linked-policy.json
20 | }
21 |
--------------------------------------------------------------------------------
/modules/z-archieve/karpenter/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/karpenter/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubecost/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 |
4 | # https://github.com/kubecost/cost-analyzer-helm-chart/blob/develop/cost-analyzer/Chart.yaml
5 | helm_config = merge(
6 | {
7 | name = "kubecost"
8 | chart = "cost-analyzer"
9 | repository = "oci://public.ecr.aws/kubecost"
10 | version = "1.97.0"
11 | namespace = "kubecost"
12 | values = [file("${path.module}/values.yaml")]
13 | create_namespace = true
14 | description = "Kubecost Helm Chart deployment configuration"
15 | },
16 | var.helm_config
17 | )
18 |
19 | manage_via_gitops = var.manage_via_gitops
20 | addon_context = var.addon_context
21 | }
22 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubecost/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubecost/values.yaml:
--------------------------------------------------------------------------------
1 | # https://github.com/kubecost/cost-analyzer-helm-chart/blob/master/cost-analyzer/values-eks-cost-monitoring.yaml
2 | global:
3 | grafana:
4 | enabled: false
5 | proxy: false
6 |
7 | imageVersion: prod-1.97.0
8 | kubecostFrontend:
9 | image: public.ecr.aws/kubecost/frontend
10 |
11 | kubecostModel:
12 | image: public.ecr.aws/kubecost/cost-model
13 |
14 | kubecostMetrics:
15 | emitPodAnnotations: true
16 | emitNamespaceAnnotations: true
17 |
18 | prometheus:
19 | server:
20 | image:
21 | repository: public.ecr.aws/kubecost/prometheus
22 | tag: v2.35.0
23 |
24 | configmapReload:
25 | prometheus:
26 | image:
27 | repository: public.ecr.aws/bitnami/configmap-reload
28 | tag: 0.7.1
29 |
30 | reporting:
31 | productAnalytics: false
32 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubecost/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm Config for kubecost."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | irsa_iam_role_path = string
26 | irsa_iam_permissions_boundary = string
27 | })
28 | }
29 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubecost/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | }
4 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubernetes-dashboard/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "kubernetes-dashboard"
3 |
4 | # https://github.com/kubernetes/dashboard/blob/master/charts/helm-chart/kubernetes-dashboard/Chart.yaml
5 | default_helm_config = {
6 | name = local.name
7 | chart = local.name
8 | repository = "https://kubernetes.github.io/dashboard/"
9 | version = "5.11.0"
10 | namespace = local.name
11 | description = "Kubernetes Dashboard Helm Chart"
12 | }
13 |
14 | helm_config = merge(
15 | local.default_helm_config,
16 | var.helm_config
17 | )
18 |
19 | argocd_gitops_config = {
20 | enable = true
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubernetes-dashboard/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 |
4 | manage_via_gitops = var.manage_via_gitops
5 | helm_config = local.helm_config
6 | addon_context = var.addon_context
7 |
8 | depends_on = [kubernetes_namespace_v1.this]
9 | }
10 |
11 | resource "kubernetes_namespace_v1" "this" {
12 | count = try(local.helm_config["create_namespace"], true) && local.helm_config["namespace"] != "kube-system" ? 1 : 0
13 |
14 | metadata {
15 | name = local.helm_config["namespace"]
16 | labels = {
17 | "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints"
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubernetes-dashboard/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubernetes-dashboard/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for the Kubernetes Dashboard"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps"
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | })
26 | }
27 |
--------------------------------------------------------------------------------
/modules/z-archieve/kubernetes-dashboard/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/metrics-server/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "metrics-server"
3 |
4 | # https://github.com/kubernetes-sigs/metrics-server/blob/master/charts/metrics-server/Chart.yaml
5 | default_helm_config = {
6 | name = local.name
7 | chart = local.name
8 | repository = "https://kubernetes-sigs.github.io/metrics-server/"
9 | version = "3.11.0"
10 | namespace = "kube-system"
11 | description = "Metric server helm Chart deployment configuration"
12 | }
13 |
14 | helm_config = merge(
15 | local.default_helm_config,
16 | var.helm_config
17 | )
18 |
19 | argocd_gitops_config = {
20 | enable = true
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/modules/z-archieve/metrics-server/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "../helm-addon"
3 |
4 | manage_via_gitops = var.manage_via_gitops
5 | helm_config = local.helm_config
6 | addon_context = var.addon_context
7 |
8 | depends_on = [kubernetes_namespace_v1.this]
9 | }
10 |
11 | resource "kubernetes_namespace_v1" "this" {
12 | count = try(local.helm_config["create_namespace"], true) && local.helm_config["namespace"] != "kube-system" ? 1 : 0
13 |
14 | metadata {
15 | name = local.helm_config["namespace"]
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/modules/z-archieve/metrics-server/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/metrics-server/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for Metrics Server"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps"
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | })
26 | }
27 |
--------------------------------------------------------------------------------
/modules/z-archieve/metrics-server/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/reloader/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "reloader"
3 |
4 | argocd_gitops_config = {
5 | enable = true
6 | serviceAccountName = local.name
7 | }
8 | }
9 |
10 | module "helm_addon" {
11 | source = "../helm-addon"
12 |
13 | # https://github.com/stakater/Reloader/blob/master/deployments/kubernetes/chart/reloader/Chart.yaml
14 | helm_config = merge(
15 | {
16 | name = local.name
17 | chart = local.name
18 | repository = "https://stakater.github.io/stakater-charts"
19 | version = "v1.0.63"
20 | namespace = local.name
21 | create_namespace = true
22 | description = "Reloader Helm Chart deployment configuration"
23 | },
24 | var.helm_config
25 | )
26 |
27 | manage_via_gitops = var.manage_via_gitops
28 | addon_context = var.addon_context
29 | }
30 |
--------------------------------------------------------------------------------
/modules/z-archieve/reloader/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/reloader/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for Reloader."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | irsa_iam_role_path = string
26 | irsa_iam_permissions_boundary = string
27 | })
28 | }
29 |
--------------------------------------------------------------------------------
/modules/z-archieve/reloader/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | }
4 |
--------------------------------------------------------------------------------
/modules/z-archieve/secrets-store-csi-driver/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "secrets-store-csi-driver"
3 |
4 | # https://github.com/kubernetes-sigs/secrets-store-csi-driver/blob/main/charts/secrets-store-csi-driver/Chart.yaml
5 | default_helm_config = {
6 | name = local.name
7 | chart = local.name
8 | repository = "https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts"
9 | version = "1.2.4"
10 | namespace = local.name
11 | description = "A Helm chart to install the Secrets Store CSI Driver"
12 | }
13 |
14 | helm_config = merge(
15 | local.default_helm_config,
16 | var.helm_config
17 | )
18 |
19 | argocd_gitops_config = {
20 | enable = true
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/modules/z-archieve/secrets-store-csi-driver/main.tf:
--------------------------------------------------------------------------------
1 | resource "kubernetes_namespace_v1" "secrets_store_csi_driver" {
2 | metadata {
3 | name = local.name
4 |
5 | labels = {
6 | "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints"
7 | }
8 | }
9 | }
10 |
11 | module "helm_addon" {
12 | source = "../helm-addon"
13 | manage_via_gitops = var.manage_via_gitops
14 | helm_config = local.helm_config
15 | addon_context = var.addon_context
16 |
17 | depends_on = [kubernetes_namespace_v1.secrets_store_csi_driver]
18 | }
19 |
--------------------------------------------------------------------------------
/modules/z-archieve/secrets-store-csi-driver/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/secrets-store-csi-driver/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | type = any
3 | default = {}
4 | description = "CSI Secrets Store Provider Helm Configurations"
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | type = bool
9 | default = false
10 | description = "Determines if the add-on should be managed via GitOps."
11 | }
12 |
13 | variable "addon_context" {
14 | type = object({
15 | aws_caller_identity_account_id = string
16 | aws_caller_identity_arn = string
17 | aws_eks_cluster_endpoint = string
18 | aws_partition_id = string
19 | aws_region_name = string
20 | eks_cluster_id = string
21 | eks_oidc_issuer_url = string
22 | eks_oidc_provider_arn = string
23 | tags = map(string)
24 | irsa_iam_role_path = string
25 | irsa_iam_permissions_boundary = string
26 | })
27 | description = "Input configuration for the addon"
28 | }
29 |
--------------------------------------------------------------------------------
/modules/z-archieve/secrets-store-csi-driver/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/strimzi-kafka-operator/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "strimzi"
3 | default_helm_config = {
4 | name = local.name
5 | chart = "strimzi-kafka-operator"
6 | repository = "https://strimzi.io/charts/"
7 | version = "0.31.1"
8 | namespace = local.name
9 | create_namespace = true
10 | values = [templatefile("${path.module}/values.yaml", {})]
11 | description = "Strimzi - Apache Kafka on Kubernetes"
12 | }
13 | helm_config = merge(local.default_helm_config, var.helm_config)
14 | }
15 |
16 | #-------------------------------------------------
17 | # Strimzi Kafka Helm Add-on
18 | #-------------------------------------------------
19 | module "helm_addon" {
20 | source = "../helm-addon"
21 | helm_config = local.helm_config
22 | addon_context = var.addon_context
23 | manage_via_gitops = var.manage_via_gitops
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/strimzi-kafka-operator/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/strimzi-kafka-operator/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for strimzi-kafka-operator.
2 |
3 | resources:
4 | limits:
5 | memory: 1Gi
6 | cpu: 1000m
7 | requests:
8 | memory: 1Gi
9 | cpu: 1000m
10 |
--------------------------------------------------------------------------------
/modules/z-archieve/strimzi-kafka-operator/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for the kafka."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps."
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | irsa_iam_role_path = string
26 | irsa_iam_permissions_boundary = string
27 | })
28 | }
29 |
--------------------------------------------------------------------------------
/modules/z-archieve/strimzi-kafka-operator/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | }
4 |
--------------------------------------------------------------------------------
/modules/z-archieve/tetrate-istio/locals_tid.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | tetrate_istio_distribution_helm_config = {
3 | description = "Tetrate Istio Distribution - Simple, safe enterprise-grade Istio distribution"
4 | }
5 |
6 | tetrate_istio_distribution_helm_values = {
7 | cni = tolist([yamlencode({
8 | "global" : {
9 | "hub" : "containers.istio.tetratelabs.com",
10 | "tag" : "${lookup(var.cni_helm_config, "version", local.default_helm_config.version)}-tetratefips-v0",
11 | }
12 | })])
13 | istiod = tolist([yamlencode({
14 | "global" : {
15 | "hub" : "containers.istio.tetratelabs.com",
16 | "tag" : "${lookup(var.istiod_helm_config, "version", local.default_helm_config.version)}-tetratefips-v0",
17 | }
18 | })])
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/modules/z-archieve/tetrate-istio/main.tf:
--------------------------------------------------------------------------------
1 | module "base" {
2 | source = "../helm-addon"
3 |
4 | count = var.install_base ? 1 : 0
5 |
6 | manage_via_gitops = var.manage_via_gitops
7 | helm_config = local.base_helm_config
8 | addon_context = var.addon_context
9 | }
10 |
11 | module "cni" {
12 | source = "../helm-addon"
13 |
14 | count = var.install_cni ? 1 : 0
15 |
16 | manage_via_gitops = var.manage_via_gitops
17 | helm_config = local.cni_helm_config
18 | addon_context = var.addon_context
19 |
20 | depends_on = [module.base]
21 | }
22 |
23 | module "istiod" {
24 | source = "../helm-addon"
25 |
26 | count = var.install_istiod ? 1 : 0
27 |
28 | manage_via_gitops = var.manage_via_gitops
29 | helm_config = local.istiod_helm_config
30 | addon_context = var.addon_context
31 |
32 | depends_on = [module.cni]
33 | }
34 |
35 | module "gateway" {
36 | source = "../helm-addon"
37 |
38 | count = var.install_gateway ? 1 : 0
39 |
40 | manage_via_gitops = var.manage_via_gitops
41 | helm_config = local.gateway_helm_config
42 | addon_context = var.addon_context
43 |
44 | depends_on = [module.istiod]
45 | }
46 |
--------------------------------------------------------------------------------
/modules/z-archieve/tetrate-istio/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
--------------------------------------------------------------------------------
/modules/z-archieve/tetrate-istio/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | }
4 |
--------------------------------------------------------------------------------
/modules/z-archieve/velero/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/velero/values.yaml:
--------------------------------------------------------------------------------
1 | initContainers:
2 | - name: velero-plugin-for-csi
3 | image: velero/velero-plugin-for-csi:v0.7.0
4 | volumeMounts:
5 | - mountPath: /target
6 | name: plugins
7 | - name: velero-plugin-for-aws
8 | image: velero/velero-plugin-for-aws:v1.9.0
9 | imagePullPolicy: IfNotPresent
10 | volumeMounts:
11 | - mountPath: /target
12 | name: plugins
13 |
14 | configuration:
15 | backupStorageLocation:
16 | - name: default
17 | provider: aws
18 | bucket: ${bucket}
19 | config:
20 | region: ${region}
21 | volumeSnapshotLocation:
22 | - name: default
23 | provider: aws
24 | config:
25 | region: ${region}
26 |
27 | credentials:
28 | useSecret: false
29 |
--------------------------------------------------------------------------------
/modules/z-archieve/velero/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/z-archieve/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | time = {
10 | source = "hashicorp/time"
11 | version = ">= 0.8"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/z-archieve/vpa/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = try(var.helm_config.name, "vpa")
3 | namespace = try(var.helm_config.namespace, local.name)
4 | }
5 |
6 | resource "kubernetes_namespace_v1" "vpa" {
7 | count = try(var.helm_config.create_namespace, true) && local.namespace != "kube-system" ? 1 : 0
8 |
9 | metadata {
10 | name = local.namespace
11 | }
12 | }
13 |
14 | module "helm_addon" {
15 | source = "../helm-addon"
16 |
17 | # https://github.com/FairwindsOps/charts/blob/master/stable/vpa/Chart.yaml
18 | helm_config = merge(
19 | {
20 | name = local.name
21 | chart = local.name
22 | repository = "https://charts.fairwinds.com/stable"
23 | version = "1.5.0"
24 | namespace = try(kubernetes_namespace_v1.vpa[0].metadata[0].name, local.namespace)
25 | description = "Kubernetes Vertical Pod Autoscaler"
26 | },
27 | var.helm_config
28 | )
29 |
30 | manage_via_gitops = var.manage_via_gitops
31 | addon_context = var.addon_context
32 | }
33 |
--------------------------------------------------------------------------------
/modules/z-archieve/vpa/outputs.tf:
--------------------------------------------------------------------------------
1 | output "argocd_gitops_config" {
2 | description = "Configuration used for managing the add-on with ArgoCD"
3 | value = var.manage_via_gitops ? { enable = true } : null
4 | }
5 |
6 | output "release_metadata" {
7 | description = "Map of attributes of the Helm release metadata"
8 | value = module.helm_addon.release_metadata
9 | }
10 |
11 | output "irsa_arn" {
12 | description = "IAM role ARN for the service account"
13 | value = module.helm_addon.irsa_arn
14 | }
15 |
16 | output "irsa_name" {
17 | description = "IAM role name for the service account"
18 | value = module.helm_addon.irsa_name
19 | }
20 |
21 | output "service_account" {
22 | description = "Name of Kubernetes service account"
23 | value = module.helm_addon.service_account
24 | }
25 |
--------------------------------------------------------------------------------
/modules/z-archieve/vpa/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for VPA"
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "manage_via_gitops" {
8 | description = "Determines if the add-on should be managed via GitOps"
9 | type = bool
10 | default = false
11 | }
12 |
13 | variable "addon_context" {
14 | description = "Input configuration for the addon"
15 | type = object({
16 | aws_caller_identity_account_id = string
17 | aws_caller_identity_arn = string
18 | aws_eks_cluster_endpoint = string
19 | aws_partition_id = string
20 | aws_region_name = string
21 | eks_cluster_id = string
22 | eks_oidc_issuer_url = string
23 | eks_oidc_provider_arn = string
24 | tags = map(string)
25 | })
26 | }
27 |
--------------------------------------------------------------------------------
/modules/z-archieve/vpa/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | kubernetes = {
6 | source = "hashicorp/kubernetes"
7 | version = ">= 2.10"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/tfsec.yaml:
--------------------------------------------------------------------------------
1 | exclude:
2 | - aws-iam-no-policy-wildcards # Wildcards required in addon IAM policies
3 | - aws-vpc-no-excessive-port-access # VPC settings left up to user implementation for recommended practices
4 | - aws-vpc-no-public-ingress-acl # VPC settings left up to user implementation for recommended practices
5 | - aws-eks-no-public-cluster-access-to-cidr # Public access enabled for better example usability, users are recommended to disable if possible
6 | - aws-eks-no-public-cluster-access # Public access enabled for better example usability, users are recommended to disable if possible
7 | - aws-eks-encrypt-secrets # Module defaults to encrypting secrets with CMK, but this is not hardcoded and therefore a spurious error
8 | - aws-vpc-no-public-egress-sgr # Added in v1.22
9 | - aws-ec2-no-public-egress-sgr
10 | - aws-ec2-no-public-ingress-sgr
11 | - aws-ec2-enforce-http-token-imds
12 | - aws-ec2-no-public-ip-subnet # VPN IP
13 | - aws-ec2-require-vpc-flow-logs-for-all-vpcs # disabled flow logs by default
14 |
--------------------------------------------------------------------------------
/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 | required_providers {
4 | aws = {
5 | source = "hashicorp/aws"
6 | version = ">= 4.23"
7 | }
8 | kubernetes = {
9 | source = "hashicorp/kubernetes"
10 | version = ">= 2.13"
11 | }
12 | helm = {
13 | source = "hashicorp/helm"
14 | version = ">= 2.6"
15 | }
16 | time = {
17 | source = "hashicorp/time"
18 | version = ">= 0.6.0"
19 | }
20 | random = {
21 | source = "hashicorp/random"
22 | version = ">= 3.0.0"
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------