├── .gitignore ├── .pre-commit-config.yaml ├── .tflint.hcl ├── IAM.md ├── LICENSE ├── README.md ├── data.tf ├── examples └── complete │ ├── README.md │ ├── config │ ├── argo-rollout.yaml │ ├── argocd-workflow.yaml │ ├── argocd.yaml │ ├── aws-alb.yaml │ ├── aws-node-termination-handler.yaml │ ├── cert-manager.yaml │ ├── cluster-autoscaler.yaml │ ├── cluster-proportional-autoscaler.yaml │ ├── coredns-hpa.yaml │ ├── ebs-csi.yaml │ ├── external-secret.yaml │ ├── ingress-nginx.yaml │ ├── istio.yaml │ ├── karpenter.yaml │ ├── keda.yaml │ ├── kubernetes-dashboard.yaml │ ├── metrics-server.yaml │ ├── reloader.yaml │ ├── velero.yaml │ └── vpa-crd.yaml │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── terraform.destroy.sh │ └── version.tf ├── locals.tf ├── main.tf ├── modules ├── argo-rollout │ ├── README.md │ ├── config │ │ └── argo-rollout.yaml │ ├── main.tf │ ├── output.tf │ └── variable.tf ├── argocd-projects │ ├── README.md │ ├── argo-project │ │ ├── Chart.yaml │ │ ├── templates │ │ │ └── project.yaml │ │ └── values.yaml │ ├── main.tf │ └── variable.tf ├── argocd-workflow │ ├── README.md │ ├── config │ │ └── argocd-workflow.yaml │ ├── main.tf │ ├── output.tf │ └── variable.tf ├── argocd │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── aws-ebs-csi-driver │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── data.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── aws-ebs-storage-class │ ├── README.md │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── aws-efs-csi-driver │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── aws-efs-filesystem-with-storage-class │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── aws-load-balancer-controller │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── aws-node-termination-handler │ ├── README.md │ ├── config │ │ ├── aws_nth.yaml │ │ └── values.yaml │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── aws-vpc-cni │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── cert-manager-le-http-issuer │ ├── README.md │ ├── config │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── ClusterIssuer-istio.yaml │ │ │ ├── ClusterIssuer-prod.yaml │ │ │ └── ClusterIssuer-staging.yaml │ │ └── values.yaml │ ├── main.tf │ ├── variable.tf │ └── versions.tf ├── cert-manager │ ├── README.md │ ├── config │ │ ├── cert-manager-ca │ │ │ ├── Chart.yaml │ │ │ ├── templates │ │ │ │ ├── certificate.yaml │ │ │ │ └── clusterissuers.yaml │ │ │ └── values.yaml │ │ ├── cert-manager-letsencrypt │ │ │ ├── Chart.yaml │ │ │ ├── templates │ │ │ │ ├── clusterissuer-production.yaml │ │ │ │ └── clusterissuer-staging.yaml │ │ │ └── values.yaml │ │ └── values.yaml │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── cluster-autoscaler │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── cluster-proportional-autoscaler │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── core-dns-hpa │ ├── README.md │ ├── config │ │ ├── Chart.yaml │ │ ├── templates │ │ │ └── hpa.yaml │ │ └── values.yaml │ ├── main.tf │ ├── output.tf │ ├── variable.tf │ └── versions.tf ├── defectdojo │ ├── .helmignore │ ├── Chart.lock │ ├── Chart.yaml │ ├── charts │ │ ├── mysql-9.1.8.tgz │ │ ├── postgresql-11.6.26.tgz │ │ ├── postgresql-ha-9.1.9.tgz │ │ ├── rabbitmq-11.2.2.tgz │ │ └── redis-16.12.3.tgz │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── celery-beat-deployment.yaml │ │ ├── celery-worker-deployment.yaml │ │ ├── configmap.yaml │ │ ├── django-deployment.yaml │ │ ├── django-ingress.yaml │ │ ├── django-service.yaml │ │ ├── extra-secret.yaml │ │ ├── gke-managed-certificate.yaml │ │ ├── initializer-job.yaml │ │ ├── media-pvc.yaml │ │ ├── network-policy.yaml │ │ ├── sa.yaml │ │ ├── secret-mysql.yaml │ │ ├── secret-postgresql-ha-pgpool.yaml │ │ ├── secret-postgresql-ha.yaml │ │ ├── secret-postgresql.yaml │ │ ├── secret-rabbitmq.yaml │ │ ├── secret-redis.yaml │ │ ├── secret.yaml │ │ └── tests │ │ │ └── unit-tests.yaml │ └── values.yaml ├── external-secret │ ├── README.md │ ├── config │ │ └── external-secret.yaml │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── falco │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── main.tf │ ├── output.tf │ ├── variable.tf │ └── versions.tf ├── helm-addon │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── ingress-nginx │ ├── README.md │ ├── config │ │ ├── ingress_nginx.yaml │ │ └── ingress_nginx_ipv6.yaml │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── irsa │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── karpenter │ ├── README.md │ ├── config │ │ └── karpenter.yaml │ ├── data.tf │ ├── example │ │ ├── node_class_al2_ami.yaml │ │ ├── node_class_bottlerocker_amis.yaml │ │ └── node_pool.yaml │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── scripts │ │ └── patch_karpenter_crds.sh │ ├── variables.tf │ └── versions.tf ├── keda │ ├── README.md │ ├── config │ │ ├── keda.yaml │ │ └── values.yaml │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf ├── kubeclarity │ └── values.yaml ├── kubernetes-dashboard │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── metrics-server-vpa │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── main.tf │ ├── metrics-server-vpa │ │ ├── Chart.yaml │ │ └── templates │ │ │ └── vpa.yaml │ └── variables.tf ├── metrics-server │ ├── README.md │ ├── config │ │ └── metrics_server.yaml │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── reloader │ ├── README.md │ ├── config │ │ └── reloader.yaml │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── service-monitor-crd │ ├── README.md │ ├── main.tf │ ├── service_monitor │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ │ └── crd.yaml │ │ └── values.yaml │ └── versions.tf ├── velero │ ├── README.md │ ├── data.tf │ ├── delete-snapshot.py │ ├── delete-snapshot.zip │ ├── helm │ │ └── values.yaml │ ├── locals.tf │ ├── main.tf │ ├── variable.tf │ ├── velero-data │ │ ├── README.md │ │ ├── helm │ │ │ └── values.yaml │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── velero_job │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ │ └── backup_job.yaml │ │ └── values.yaml │ ├── velero_notification │ │ └── values.yaml │ └── versions.tf ├── vpa-crds │ ├── README.md │ ├── config │ │ └── values.yaml │ ├── main.tf │ ├── variables.tf │ └── versions.tf └── z-archieve │ ├── README.md │ ├── aws-cloudwatch-metrics │ ├── README.md │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── aws-coredns │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── aws-efs-csi-driver │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── aws-kube-proxy │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── aws-load-balancer-controller │ ├── README.md │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── aws-node-termination-handler │ ├── README.md │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── aws-privateca-issuer │ ├── README.md │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── cert-manager-csi-driver │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── cert-manager-istio-csr │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── cert-manager │ ├── README.md │ ├── cert-manager-ca │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── certificate.yaml │ │ │ └── clusterissuers.yaml │ │ └── values.yaml │ ├── cert-manager-letsencrypt │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── clusterissuer-production.yaml │ │ │ └── clusterissuer-staging.yaml │ │ └── values.yaml │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── cluster-autoscaler │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── cluster-proportional-autoscaler │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── crossplane │ ├── README.md │ ├── aws-provider │ │ ├── aws-controller-config.yaml │ │ ├── aws-provider-config.yaml │ │ ├── aws-provider.yaml │ │ ├── jet-aws-controller-config.yaml │ │ ├── jet-aws-provider-config.yaml │ │ └── jet-aws-provider.yaml │ ├── data.tf │ ├── kubernetes-provider │ │ ├── kubernetes-controller-clusterrolebinding.yaml │ │ ├── kubernetes-controller-config.yaml │ │ ├── kubernetes-provider-config.yaml │ │ └── kubernetes-provider.yaml │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── csi-secrets-store-provider-aws │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── data.tf │ ├── external-dns │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── external-secrets │ ├── README.md │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── helm-addon │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── ingress-nginx │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── istio │ ├── README.md │ ├── config │ │ └── istio.yaml │ ├── helm │ │ ├── istio-ingress.yaml │ │ ├── values.yaml │ │ └── values │ │ │ └── istiod │ │ │ └── values.yaml │ ├── istio-observability │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── clusterissuer.yaml │ │ │ ├── enable-access-logs.yaml │ │ │ ├── service-monitor-control-plane.yaml │ │ │ └── service-monitor-dataplane.yaml │ │ └── values.yaml │ ├── main.tf │ ├── variables.tf │ └── versions.tf │ ├── karpenter-provisioner │ ├── README.md │ ├── config │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── ipv4-values.yaml │ │ ├── ipv6-values.yaml │ │ └── templates │ │ │ └── provisioner.yaml │ ├── main.tf │ ├── tfsec.yaml │ ├── variable.tf │ └── versions.tf │ ├── karpenter │ ├── README.md │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── kubecost │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── kubernetes-dashboard │ ├── README.md │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── locals.tf │ ├── main.tf │ ├── metrics-server │ ├── README.md │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── outputs.tf │ ├── reloader │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── secrets-store-csi-driver │ ├── README.md │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── strimzi-kafka-operator │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── tetrate-istio │ ├── README.md │ ├── locals.tf │ ├── locals_tid.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── variables.tf │ ├── velero │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── values.yaml │ ├── variables.tf │ └── versions.tf │ ├── versions.tf │ └── vpa │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── outputs.tf ├── tfsec.yaml ├── variables.tf └── versions.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | .terraform.lock.hcl 26 | # Include override files you do wish to add to version control using negated pattern 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.1.0 4 | hooks: 5 | - id: trailing-whitespace 6 | args: ['--markdown-linebreak-ext=md'] 7 | - id: end-of-file-fixer 8 | - id: check-merge-conflict 9 | - id: detect-private-key 10 | - id: detect-aws-credentials 11 | args: ['--allow-missing-credentials'] 12 | - repo: https://github.com/antonbabenko/pre-commit-terraform 13 | rev: v1.77.0 14 | hooks: 15 | - id: terraform_fmt 16 | - id: terraform_docs 17 | args: 18 | - '--args=--lockfile=false' 19 | - --hook-config=--add-to-existing-file=true 20 | - --hook-config=--create-file-if-not-exist=true 21 | 22 | - id: terraform_tflint 23 | args: 24 | - --args=--config=.tflint.hcl 25 | - id: terraform_tfsec 26 | files: ^examples/ # only scan `examples/*` which are the implementation 27 | args: 28 | - --args=--config-file=__GIT_WORKING_DIR__/tfsec.yaml 29 | - --args=--concise-output 30 | -------------------------------------------------------------------------------- /data.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "current" {} 2 | data "aws_caller_identity" "current" {} 3 | 4 | resource "time_sleep" "dataplane" { 5 | create_duration = "10s" 6 | 7 | triggers = { 8 | data_plane_wait_arn = var.data_plane_wait_arn # this waits for the data plane to be ready 9 | eks_cluster_id = var.eks_cluster_name # this ties it to downstream resources 10 | } 11 | } 12 | 13 | data "aws_eks_cluster" "eks_cluster" { 14 | # this makes downstream resources wait for data plane to be ready 15 | name = time_sleep.dataplane.triggers["eks_cluster_id"] 16 | } 17 | -------------------------------------------------------------------------------- /examples/complete/config/argo-rollout.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | affinity: 3 | nodeAffinity: 4 | requiredDuringSchedulingIgnoredDuringExecution: 5 | nodeSelectorTerms: 6 | - matchExpressions: 7 | - key: "Addons-Services" 8 | operator: In 9 | values: 10 | - "true" 11 | resources: 12 | limits: 13 | cpu: 20m 14 | memory: 100Mi 15 | requests: 16 | cpu: 10m 17 | memory: 50Mi 18 | 19 | dashboard: 20 | affinity: 21 | nodeAffinity: 22 | requiredDuringSchedulingIgnoredDuringExecution: 23 | nodeSelectorTerms: 24 | - matchExpressions: 25 | - key: "Addons-Services" 26 | operator: In 27 | values: 28 | - "true" 29 | resources: 30 | limits: 31 | cpu: 20m 32 | memory: 100Mi 33 | requests: 34 | cpu: 10m 35 | memory: 50Mi 36 | -------------------------------------------------------------------------------- /examples/complete/config/argocd-workflow.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | affinity: 3 | nodeAffinity: 4 | requiredDuringSchedulingIgnoredDuringExecution: 5 | nodeSelectorTerms: 6 | - matchExpressions: 7 | - key: "Addons-Services" 8 | operator: In 9 | values: 10 | - "true" 11 | 12 | executor: 13 | resources: 14 | limits: 15 | cpu: 20m 16 | memory: 100Mi 17 | requests: 18 | cpu: 10m 19 | memory: 50Mi 20 | 21 | server: 22 | resources: 23 | limits: 24 | cpu: 20m 25 | memory: 100Mi 26 | requests: 27 | cpu: 10m 28 | memory: 50Mi 29 | 30 | logging: 31 | resources: 32 | limits: 33 | cpu: 20m 34 | memory: 100Mi 35 | requests: 36 | cpu: 10m 37 | memory: 50Mi 38 | 39 | mainContainer: 40 | resources: 41 | limits: 42 | cpu: 20m 43 | memory: 100Mi 44 | requests: 45 | cpu: 10m 46 | memory: 50Mi 47 | -------------------------------------------------------------------------------- /examples/complete/config/aws-alb.yaml: -------------------------------------------------------------------------------- 1 | ## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" 2 | 3 | affinity: 4 | nodeAffinity: 5 | requiredDuringSchedulingIgnoredDuringExecution: 6 | nodeSelectorTerms: 7 | - matchExpressions: 8 | - key: "Addons-Services" 9 | operator: In 10 | values: 11 | - "true" 12 | 13 | ## Using limits and requests 14 | 15 | resources: 16 | limits: 17 | cpu: 100m 18 | memory: 200Mi 19 | requests: 20 | cpu: 50m 21 | memory: 100Mi 22 | -------------------------------------------------------------------------------- /examples/complete/config/aws-node-termination-handler.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | limits: 3 | cpu: 20m 4 | memory: 60Mi 5 | requests: 6 | cpu: 10m 7 | memory: 30Mi 8 | -------------------------------------------------------------------------------- /examples/complete/config/cluster-autoscaler.yaml: -------------------------------------------------------------------------------- 1 | ## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" 2 | 3 | affinity: 4 | nodeAffinity: 5 | requiredDuringSchedulingIgnoredDuringExecution: 6 | nodeSelectorTerms: 7 | - matchExpressions: 8 | - key: "Addons-Services" 9 | operator: In 10 | values: 11 | - "true" 12 | 13 | ## Using limits and requests 14 | 15 | resources: 16 | limits: 17 | cpu: 50m 18 | memory: 200Mi 19 | requests: 20 | cpu: 20m 21 | memory: 100Mi 22 | -------------------------------------------------------------------------------- /examples/complete/config/cluster-proportional-autoscaler.yaml: -------------------------------------------------------------------------------- 1 | # Formula for controlling the replicas. Adjust according to your needs 2 | # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) ) 3 | # replicas = min(replicas, max) 4 | # replicas = max(replicas, min) 5 | config: 6 | linear: 7 | coresPerReplica: 2 8 | nodesPerReplica: 1 9 | min: 1 10 | max: 20 11 | preventSinglePointFailure: true 12 | includeUnschedulableNodes: true 13 | 14 | # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive). 15 | # The following option should be defined in user defined values.yaml using var.helm_config 16 | 17 | options: 18 | target: deployment/coredns 19 | 20 | resources: 21 | limits: 22 | cpu: 100m 23 | memory: 150Mi 24 | requests: 25 | cpu: 50m 26 | memory: 75Mi 27 | 28 | affinity: 29 | nodeAffinity: 30 | requiredDuringSchedulingIgnoredDuringExecution: 31 | nodeSelectorTerms: 32 | - matchExpressions: 33 | - key: "Addons-Services" 34 | operator: In 35 | values: 36 | - "true" 37 | -------------------------------------------------------------------------------- /examples/complete/config/coredns-hpa.yaml: -------------------------------------------------------------------------------- 1 | affinity: 2 | nodeAffinity: 3 | requiredDuringSchedulingIgnoredDuringExecution: 4 | nodeSelectorTerms: 5 | - matchExpressions: 6 | - key: "Addons-Services" 7 | operator: In 8 | values: 9 | - "true" 10 | 11 | resources: 12 | limits: 13 | cpu: 200m 14 | memory: 200Mi 15 | requests: 16 | cpu: 100m 17 | memory: 100Mi 18 | -------------------------------------------------------------------------------- /examples/complete/config/ebs-csi.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | resources: 3 | requests: 4 | cpu: 10m 5 | memory: 40Mi 6 | limits: 7 | cpu: 20m 8 | memory: 80Mi 9 | 10 | node: 11 | resources: 12 | requests: 13 | cpu: 10m 14 | memory: 40Mi 15 | limits: 16 | cpu: 20m 17 | memory: 80Mi 18 | -------------------------------------------------------------------------------- /examples/complete/config/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | resources: 3 | limits: 4 | cpu: 100m 5 | memory: 400Mi 6 | requests: 7 | cpu: 50m 8 | memory: 200Mi 9 | 10 | affinity: 11 | nodeAffinity: 12 | requiredDuringSchedulingIgnoredDuringExecution: 13 | nodeSelectorTerms: 14 | - matchExpressions: 15 | - key: "Addons-Services" 16 | operator: "In" 17 | values: 18 | - "true" 19 | -------------------------------------------------------------------------------- /examples/complete/config/istio.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | defaultResources: 3 | requests: 4 | cpu: 10m 5 | memory: 100Mi 6 | limits: 7 | cpu: 20m 8 | memory: 200Mi 9 | 10 | proxy: 11 | resources: 12 | requests: 13 | cpu: 10m 14 | memory: 80Mi 15 | limits: 16 | cpu: 20m 17 | memory: 160Mi 18 | 19 | pilot: 20 | resources: 21 | limits: 22 | cpu: 100m 23 | memory: 200Mi 24 | requests: 25 | cpu: 50m 26 | memory: 100Mi 27 | 28 | affinity: 29 | nodeAffinity: 30 | requiredDuringSchedulingIgnoredDuringExecution: 31 | nodeSelectorTerms: 32 | - matchExpressions: 33 | - key: "Addons-Services" 34 | operator: In 35 | values: 36 | - "true" 37 | -------------------------------------------------------------------------------- /examples/complete/config/karpenter.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | containerName: controller 3 | resources: 4 | requests: 5 | cpu: 50m 6 | memory: 200Mi 7 | limits: 8 | cpu: 100m 9 | memory: 400Mi 10 | 11 | affinity: 12 | nodeAffinity: 13 | requiredDuringSchedulingIgnoredDuringExecution: 14 | nodeSelectorTerms: 15 | - matchExpressions: 16 | - key: "Addons-Services" 17 | operator: In 18 | values: 19 | - "true" 20 | 21 | settings: 22 | eksControlPlane: false 23 | featureGates: 24 | spotToSpotConsolidation: true 25 | nodeRepair: true 26 | 27 | service: 28 | # -- Additional annotations for the Service. 29 | annotations: {} 30 | 31 | schedulerName: default-scheduler 32 | -------------------------------------------------------------------------------- /examples/complete/config/kubernetes-dashboard.yaml: -------------------------------------------------------------------------------- 1 | app: 2 | affinity: 3 | nodeAffinity: 4 | requiredDuringSchedulingIgnoredDuringExecution: 5 | nodeSelectorTerms: 6 | - matchExpressions: 7 | - key: "Addons-Services" 8 | operator: In 9 | values: 10 | - "true" 11 | 12 | auth: 13 | service: 14 | resources: 15 | requests: 16 | cpu: 100m 17 | memory: 200Mi 18 | limits: 19 | cpu: 250m 20 | memory: 400Mi 21 | 22 | api: 23 | service: 24 | resources: 25 | requests: 26 | cpu: 100m 27 | memory: 200Mi 28 | limits: 29 | cpu: 250m 30 | memory: 400Mi 31 | 32 | web: 33 | service: 34 | resources: 35 | requests: 36 | cpu: 100m 37 | memory: 200Mi 38 | limits: 39 | cpu: 250m 40 | memory: 400Mi 41 | 42 | 43 | metricsScraper: 44 | service: 45 | resources: 46 | requests: 47 | cpu: 100m 48 | memory: 200Mi 49 | limits: 50 | cpu: 250m 51 | memory: 400Mi 52 | -------------------------------------------------------------------------------- /examples/complete/config/metrics-server.yaml: -------------------------------------------------------------------------------- 1 | ## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" 2 | 3 | affinity: 4 | nodeAffinity: 5 | requiredDuringSchedulingIgnoredDuringExecution: 6 | nodeSelectorTerms: 7 | - matchExpressions: 8 | - key: "Addons-Services" 9 | operator: In 10 | values: 11 | - "true" 12 | 13 | ## Using limits and requests 14 | 15 | resources: 16 | limits: 17 | cpu: 60m 18 | memory: 200Mi 19 | requests: 20 | cpu: 30m 21 | memory: 100Mi 22 | -------------------------------------------------------------------------------- /examples/complete/config/reloader.yaml: -------------------------------------------------------------------------------- 1 | reloader: 2 | deployment: 3 | affinity: 4 | nodeAffinity: 5 | requiredDuringSchedulingIgnoredDuringExecution: 6 | nodeSelectorTerms: 7 | - matchExpressions: 8 | - key: "Addons-Services" 9 | operator: In 10 | values: 11 | - "true" 12 | 13 | resources: 14 | limits: 15 | cpu: "100m" 16 | memory: "240Mi" 17 | requests: 18 | cpu: "50m" 19 | memory: "120Mi" 20 | -------------------------------------------------------------------------------- /examples/complete/config/velero.yaml: -------------------------------------------------------------------------------- 1 | affinity: 2 | nodeAffinity: 3 | requiredDuringSchedulingIgnoredDuringExecution: 4 | nodeSelectorTerms: 5 | - matchExpressions: 6 | - key: "Addons-Services" 7 | operator: In 8 | values: 9 | - "true" 10 | 11 | resources: 12 | requests: 13 | cpu: 10m 14 | memory: 128Mi 15 | limits: 16 | cpu: 20m 17 | memory: 256Mi 18 | -------------------------------------------------------------------------------- /examples/complete/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | default_tags { 4 | tags = local.additional_tags 5 | } 6 | } 7 | 8 | data "aws_eks_cluster" "cluster" { 9 | name = "" 10 | } 11 | 12 | data "aws_eks_cluster_auth" "cluster" { 13 | name = "" 14 | } 15 | 16 | provider "kubernetes" { 17 | host = data.aws_eks_cluster.cluster.endpoint 18 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) 19 | token = data.aws_eks_cluster_auth.cluster.token 20 | } 21 | 22 | provider "helm" { 23 | kubernetes { 24 | host = data.aws_eks_cluster.cluster.endpoint 25 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) 26 | token = data.aws_eks_cluster_auth.cluster.token 27 | } 28 | } 29 | 30 | provider "kubectl" { 31 | host = data.aws_eks_cluster.cluster.endpoint 32 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) 33 | token = data.aws_eks_cluster_auth.cluster.token 34 | load_config_file = false 35 | } 36 | -------------------------------------------------------------------------------- /examples/complete/version.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 3.43.0" 7 | } 8 | kubernetes = { 9 | source = "hashicorp/kubernetes" 10 | version = ">= 2.0.2" 11 | } 12 | kubectl = { 13 | source = "gavinbunney/kubectl" 14 | version = ">= 1.7.0" 15 | } 16 | helm = { 17 | source = "hashicorp/helm" 18 | version = ">= 2.0.0" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /modules/argo-rollout/output.tf: -------------------------------------------------------------------------------- 1 | output "argorollout_credentials" { 2 | value = var.argorollout_config.enable_dashboard == true && var.argorollout_config.argorollout_ingress_load_balancer == "nlb" ? { 3 | username = "admin" 4 | password = nonsensitive(random_password.argorollout_password[0].result) 5 | hostname = var.argorollout_config.hostname 6 | } : { hostname = var.argorollout_config.hostname } 7 | } 8 | -------------------------------------------------------------------------------- /modules/argo-rollout/variable.tf: -------------------------------------------------------------------------------- 1 | variable "namespace" { 2 | type = string 3 | default = "argocd" 4 | description = "Name of the Kubernetes namespace where the Argocd deployment will be deployed." 5 | } 6 | 7 | variable "argorollout_config" { 8 | type = any 9 | 10 | default = { 11 | values = {} 12 | namespace = "" 13 | hostname = "" 14 | enable_dashboard = false 15 | argorollout_ingress_load_balancer = "" 16 | private_alb_enabled = false 17 | alb_acm_certificate_arn = "" 18 | subnet_ids = "" 19 | } 20 | description = "Specify the configuration settings for Argocd-Rollout, including the hostname, and custom YAML values." 21 | } 22 | 23 | variable "chart_version" { 24 | default = "2.38.0" 25 | type = string 26 | description = "Argo rollout chart version" 27 | } 28 | -------------------------------------------------------------------------------- /modules/argocd-projects/README.md: -------------------------------------------------------------------------------- 1 | # argocd-projects 2 | 3 | 4 | ## Requirements 5 | 6 | No requirements. 7 | 8 | ## Providers 9 | 10 | | Name | Version | 11 | |------|---------| 12 | | [helm](#provider\_helm) | n/a | 13 | 14 | ## Modules 15 | 16 | No modules. 17 | 18 | ## Resources 19 | 20 | | Name | Type | 21 | |------|------| 22 | | [helm_release.argo_project](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | 23 | 24 | ## Inputs 25 | 26 | | Name | Description | Type | Default | Required | 27 | |------|-------------|------|---------|:--------:| 28 | | [name](#input\_name) | Name of argo-project | `string` | `""` | no | 29 | | [namespace](#input\_namespace) | Namespace on which argocd-project will get deployed | `string` | `""` | no | 30 | 31 | ## Outputs 32 | 33 | No outputs. 34 | 35 | -------------------------------------------------------------------------------- /modules/argocd-projects/argo-project/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: argo-projects 3 | description: A Helm chart for Kubernetes 4 | type: application 5 | version: 0.1.0 6 | appVersion: "1.16.0" 7 | -------------------------------------------------------------------------------- /modules/argocd-projects/argo-project/templates/project.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: AppProject 3 | metadata: 4 | name: {{ .Values.name }} 5 | namespace: {{ .Values.namespace }} 6 | spec: 7 | clusterResourceWhitelist: 8 | - group: '*' 9 | kind: '*' 10 | destinations: 11 | - namespace: '*' 12 | server: '*' 13 | sourceRepos: 14 | - '*' 15 | sourceNamespaces: 16 | - '*' 17 | -------------------------------------------------------------------------------- /modules/argocd-projects/argo-project/values.yaml: -------------------------------------------------------------------------------- 1 | namespace: ${namespace} 2 | name: ${name} 3 | -------------------------------------------------------------------------------- /modules/argocd-projects/main.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "argo_project" { 2 | name = "argo-project" 3 | chart = "${path.module}/argo-project" 4 | values = [templatefile("${path.module}/argo-project/values.yaml", { 5 | namespace = var.namespace 6 | name = var.name 7 | })] 8 | namespace = var.namespace 9 | } 10 | -------------------------------------------------------------------------------- /modules/argocd-projects/variable.tf: -------------------------------------------------------------------------------- 1 | variable "namespace" { 2 | description = "Namespace on which argocd-project will get deployed" 3 | default = "" 4 | type = string 5 | } 6 | 7 | variable "name" { 8 | description = "Name of argo-project" 9 | default = "" 10 | type = string 11 | } 12 | -------------------------------------------------------------------------------- /modules/argocd-workflow/output.tf: -------------------------------------------------------------------------------- 1 | output "argoworkflow_host" { 2 | value = var.argoworkflow_config.hostname 3 | } 4 | 5 | output "argo_workflow_token" { 6 | value = "Bearer ${nonsensitive(kubernetes_secret.argo_workflow_token_secret.data.token)}" 7 | } 8 | -------------------------------------------------------------------------------- /modules/argocd/output.tf: -------------------------------------------------------------------------------- 1 | output "argocd" { 2 | description = "Argocd_Info" 3 | value = { 4 | username = "admin", 5 | password = nonsensitive(data.kubernetes_secret.argocd-secret.data.password), 6 | url = var.argocd_config.hostname 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /modules/argocd/variables.tf: -------------------------------------------------------------------------------- 1 | variable "argocd_config" { 2 | type = any 3 | default = { 4 | hostname = "" 5 | values_yaml = "" 6 | redis_ha_enabled = false 7 | autoscaling_enabled = false 8 | slack_notification_token = "" 9 | argocd_notifications_enabled = false 10 | ingress_class_name = "" 11 | subnet_ids = [] 12 | } 13 | description = "Specify the configuration settings for Argocd, including the hostname, redis_ha_enabled, autoscaling, notification settings, and custom YAML values." 14 | } 15 | 16 | variable "chart_version" { 17 | type = string 18 | default = "7.3.11" 19 | description = "Version of the Argocd chart that will be used to deploy Argocd application." 20 | } 21 | 22 | variable "namespace" { 23 | type = string 24 | default = "argocd" 25 | description = "Name of the Kubernetes namespace where the Argocd deployment will be deployed." 26 | } 27 | 28 | variable "ingress_class_name" { 29 | type = string 30 | default = "nginx" 31 | description = "Enter ingress class name which is created in EKS cluster" 32 | } 33 | -------------------------------------------------------------------------------- /modules/aws-ebs-csi-driver/config/values.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | resources: 3 | requests: 4 | cpu: 10m 5 | memory: 40Mi 6 | limits: 7 | cpu: 20m 8 | memory: 80Mi 9 | 10 | 11 | node: 12 | resources: 13 | requests: 14 | cpu: 10m 15 | memory: 40Mi 16 | limits: 17 | cpu: 20m 18 | memory: 80Mi 19 | -------------------------------------------------------------------------------- /modules/aws-ebs-csi-driver/outputs.tf: -------------------------------------------------------------------------------- 1 | output "release_metadata" { 2 | description = "Map of attributes of the Helm release metadata" 3 | value = try(module.helm_addon[0].release_metadata, null) 4 | } 5 | 6 | output "irsa_arn" { 7 | description = "IAM role ARN for the service account" 8 | value = try(module.helm_addon[0].irsa_arn, null) 9 | } 10 | 11 | output "irsa_name" { 12 | description = "IAM role name for the service account" 13 | value = try(module.helm_addon[0].irsa_name, null) 14 | } 15 | 16 | output "service_account" { 17 | description = "Name of Kubernetes service account" 18 | value = try(module.helm_addon[0].service_account, null) 19 | } 20 | -------------------------------------------------------------------------------- /modules/aws-ebs-csi-driver/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/aws-ebs-storage-class/main.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_storage_class_v1" "single_az_sc" { 2 | count = var.single_az_ebs_gp3_storage_class ? 1 : 0 3 | metadata { 4 | name = var.single_az_ebs_gp3_storage_class_name 5 | } 6 | storage_provisioner = "kubernetes.io/aws-ebs" 7 | reclaim_policy = "Retain" 8 | allow_volume_expansion = true 9 | volume_binding_mode = "WaitForFirstConsumer" 10 | parameters = merge( 11 | { 12 | type = "gp3" 13 | encrypted = true 14 | kmskeyId = var.kms_key_id != "" ? var.kms_key_id : null 15 | zone = var.availability_zone 16 | }, 17 | { for k, v in var.tags_all : "tagSpecification_${k}" => "${k}=${v}" } 18 | ) 19 | } 20 | -------------------------------------------------------------------------------- /modules/aws-ebs-storage-class/variables.tf: -------------------------------------------------------------------------------- 1 | variable "kms_key_id" { 2 | type = string 3 | default = "" 4 | description = "KMS key to Encrypt storage class." 5 | } 6 | 7 | variable "single_az_ebs_gp3_storage_class_name" { 8 | type = string 9 | default = "" 10 | description = "Name for the single az storage class" 11 | } 12 | 13 | variable "single_az_ebs_gp3_storage_class" { 14 | type = bool 15 | default = false 16 | description = "Enable Single az storage class." 17 | } 18 | 19 | variable "availability_zone" { 20 | type = any 21 | description = "List of Azs" 22 | } 23 | 24 | variable "tags_all" { 25 | description = "Additional tags (e.g. `map('BusinessUnit`,`XYZ`)" 26 | type = map(string) 27 | default = {} 28 | } 29 | -------------------------------------------------------------------------------- /modules/aws-ebs-storage-class/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 3.43.0" 7 | } 8 | kubernetes = { 9 | source = "hashicorp/kubernetes" 10 | version = ">= 2.0.2" 11 | } 12 | helm = { 13 | source = "hashicorp/helm" 14 | version = ">= 2.0.2" 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /modules/aws-efs-csi-driver/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { 4 | enable = true 5 | serviceAccountName = local.service_account 6 | } : null 7 | } 8 | 9 | output "release_metadata" { 10 | description = "Map of attributes of the Helm release metadata" 11 | value = module.helm_addon.release_metadata 12 | } 13 | 14 | output "irsa_arn" { 15 | description = "IAM role ARN for the service account" 16 | value = module.helm_addon.irsa_arn 17 | } 18 | 19 | output "irsa_name" { 20 | description = "IAM role name for the service account" 21 | value = module.helm_addon.irsa_name 22 | } 23 | 24 | output "service_account" { 25 | description = "Name of Kubernetes service account" 26 | value = module.helm_addon.service_account 27 | } 28 | -------------------------------------------------------------------------------- /modules/aws-efs-csi-driver/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/aws-efs-filesystem-with-storage-class/outputs.tf: -------------------------------------------------------------------------------- 1 | output "id" { 2 | value = var.enabled ? join("", aws_efs_file_system.default[*].id) : null 3 | description = "EFS ID" 4 | } 5 | -------------------------------------------------------------------------------- /modules/aws-efs-filesystem-with-storage-class/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 3.43.0" 7 | } 8 | kubernetes = { 9 | source = "hashicorp/kubernetes" 10 | version = ">= 2.0.2" 11 | } 12 | helm = { 13 | source = "hashicorp/helm" 14 | version = ">= 2.0.2" 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /modules/aws-load-balancer-controller/config/values.yaml: -------------------------------------------------------------------------------- 1 | clusterName: ${eks_cluster_id} 2 | region: ${aws_region} 3 | image: 4 | repository: ${repository} 5 | 6 | ingressClass: ${load_balancer_controller_name} 7 | ingressClassParams: 8 | name: ${load_balancer_controller_name} 9 | create: true 10 | webhookServiceName: ${load_balancer_controller_name}-webhook-service 11 | 12 | enableServiceMutatorWebhook: false 13 | 14 | nameOverride: ${load_balancer_controller_name} 15 | 16 | affinity: 17 | nodeAffinity: 18 | requiredDuringSchedulingIgnoredDuringExecution: 19 | nodeSelectorTerms: 20 | - matchExpressions: 21 | - key: "Addons-Services" 22 | operator: In 23 | values: 24 | - "true" 25 | 26 | resources: 27 | limits: 28 | cpu: 100m 29 | memory: 200Mi 30 | requests: 31 | cpu: 50m 32 | memory: 100Mi 33 | -------------------------------------------------------------------------------- /modules/aws-load-balancer-controller/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | manage_via_gitops = var.manage_via_gitops 4 | set_values = local.set_values 5 | helm_config = local.helm_config 6 | irsa_config = local.irsa_config 7 | addon_context = var.addon_context 8 | } 9 | 10 | resource "aws_iam_policy" "aws_load_balancer_controller" { 11 | name = "${var.addon_context.eks_cluster_id}-lb-irsa-${local.name}" 12 | description = "Allows lb controller to manage ALB and NLB" 13 | policy = data.aws_iam_policy_document.aws_lb.json 14 | tags = var.addon_context.tags 15 | } 16 | -------------------------------------------------------------------------------- /modules/aws-load-balancer-controller/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ingress_namespace" { 2 | description = "AWS LoadBalancer Controller Ingress Namespace" 3 | value = local.helm_config["namespace"] 4 | } 5 | 6 | output "ingress_name" { 7 | description = "AWS LoadBalancer Controller Ingress Name" 8 | value = local.helm_config["name"] 9 | } 10 | 11 | output "argocd_gitops_config" { 12 | description = "Configuration used for managing the add-on with ArgoCD" 13 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 14 | } 15 | 16 | output "release_metadata" { 17 | description = "Map of attributes of the Helm release metadata" 18 | value = module.helm_addon.release_metadata 19 | } 20 | 21 | output "irsa_arn" { 22 | description = "IAM role ARN for the service account" 23 | value = module.helm_addon.irsa_arn 24 | } 25 | 26 | output "irsa_name" { 27 | description = "IAM role name for the service account" 28 | value = module.helm_addon.irsa_name 29 | } 30 | 31 | output "service_account" { 32 | description = "Name of Kubernetes service account" 33 | value = module.helm_addon.service_account 34 | } 35 | -------------------------------------------------------------------------------- /modules/aws-load-balancer-controller/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/aws-node-termination-handler/config/aws_nth.yaml: -------------------------------------------------------------------------------- 1 | enableSqsTerminationDraining: false 2 | enablePrometheusServer: true 3 | 4 | podAnnotations: 5 | co.elastic.logs/enabled: "true" 6 | 7 | resources: 8 | limits: 9 | cpu: 16m 10 | memory: 60Mi 11 | requests: 12 | cpu: 8m 13 | memory: 30Mi 14 | -------------------------------------------------------------------------------- /modules/aws-node-termination-handler/config/values.yaml: -------------------------------------------------------------------------------- 1 | enableSqsTerminationDraining: true 2 | enablePrometheusServer: true 3 | %{ if length(autoscaling_group_names) == 0 ~} 4 | checkASGTagBeforeDraining: false 5 | %{ endif ~} 6 | -------------------------------------------------------------------------------- /modules/aws-node-termination-handler/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "aws_node_termination_handler_queue_policy_document" { 2 | count = var.enable_notifications ? 1 : 0 3 | statement { 4 | actions = [ 5 | "sqs:SendMessage" 6 | ] 7 | principals { 8 | type = "Service" 9 | identifiers = [ 10 | "events.amazonaws.com", 11 | "sqs.amazonaws.com" 12 | ] 13 | } 14 | resources = [ 15 | aws_sqs_queue.aws_node_termination_handler_queue[0].arn 16 | ] 17 | } 18 | } 19 | 20 | data "aws_iam_policy_document" "irsa_policy" { 21 | statement { 22 | actions = [ 23 | "autoscaling:CompleteLifecycleAction", 24 | "autoscaling:DescribeAutoScalingInstances", 25 | "autoscaling:DescribeTags", 26 | "ec2:DescribeInstances", 27 | "sqs:DeleteMessage", 28 | "sqs:ReceiveMessage", 29 | ] 30 | resources = ["*"] 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /modules/aws-node-termination-handler/outputs.tf: -------------------------------------------------------------------------------- 1 | output "release_metadata" { 2 | description = "Map of attributes of the Helm release metadata" 3 | value = module.helm_addon.release_metadata 4 | } 5 | 6 | output "irsa_arn" { 7 | description = "IAM role ARN for the service account" 8 | value = module.helm_addon.irsa_arn 9 | } 10 | 11 | output "irsa_name" { 12 | description = "IAM role name for the service account" 13 | value = module.helm_addon.irsa_name 14 | } 15 | 16 | output "service_account" { 17 | description = "Name of Kubernetes service account" 18 | value = module.helm_addon.service_account 19 | } 20 | -------------------------------------------------------------------------------- /modules/aws-node-termination-handler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/aws-vpc-cni/outputs.tf: -------------------------------------------------------------------------------- 1 | output "irsa_arn" { 2 | description = "IAM role ARN for the service account" 3 | value = try(module.irsa_addon[0].irsa_iam_role_arn, null) 4 | } 5 | 6 | output "irsa_name" { 7 | description = "IAM role name for the service account" 8 | value = try(module.irsa_addon[0].irsa_iam_role_name, null) 9 | } 10 | -------------------------------------------------------------------------------- /modules/aws-vpc-cni/variables.tf: -------------------------------------------------------------------------------- 1 | variable "addon_config" { 2 | description = "Amazon EKS Managed Add-on" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "enable_ipv6" { 8 | description = "Enable IPV6 CNI policy" 9 | type = any 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /modules/aws-vpc-cni/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.10" 8 | } 9 | kubectl = { 10 | source = "gavinbunney/kubectl" 11 | version = ">= 1.7.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/cert-manager-le-http-issuer/config/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cert-manager-le-http 3 | description: A Helm chart to install a Cert Manager CA 4 | type: application 5 | version: 0.1.0 6 | appVersion: v0.1.0 7 | -------------------------------------------------------------------------------- /modules/cert-manager-le-http-issuer/config/templates/ClusterIssuer-istio.yaml: -------------------------------------------------------------------------------- 1 | # apiVersion: cert-manager.io/v1 2 | # kind: ClusterIssuer 3 | # metadata: 4 | # name: letsencrypt-istio 5 | # namespace: istio-system 6 | # spec: 7 | # acme: 8 | # # The ACME server URL 9 | # server: https://acme-v02.api.letsencrypt.org/directory 10 | # # Email address used for ACME registration 11 | # email: {{ .Values.email }} 12 | # # Name of a secret used to store the ACME account private key 13 | # privateKeySecretRef: 14 | # name: letsencrypt-istio 15 | # # Enable the HTTP-01 challenge provider 16 | # solvers: 17 | # - http01: 18 | # ingress: 19 | # class: istio 20 | -------------------------------------------------------------------------------- /modules/cert-manager-le-http-issuer/config/templates/ClusterIssuer-prod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-prod 5 | spec: 6 | acme: 7 | # The ACME server URL 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | # Email address used for ACME registration 10 | email: {{ .Values.email }} 11 | # Name of a secret used to store the ACME account private key 12 | privateKeySecretRef: 13 | name: letsencrypt-prod 14 | # Enable the HTTP-01 challenge provider 15 | solvers: 16 | - http01: 17 | ingress: 18 | class: {{ .Values.ingressClass }} 19 | -------------------------------------------------------------------------------- /modules/cert-manager-le-http-issuer/config/templates/ClusterIssuer-staging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-staging-http 5 | spec: 6 | acme: 7 | # The staging ACME server URL 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | # Email address used for ACME registration 10 | email: {{ .Values.email }} 11 | # Name of a secret used to store the ACME account private key 12 | privateKeySecretRef: 13 | name: letsencrypt-staging-http 14 | # Enable the HTTP-01 challenge provider 15 | solvers: 16 | - http01: 17 | ingress: 18 | class: {{ .Values.ingressClass }} 19 | -------------------------------------------------------------------------------- /modules/cert-manager-le-http-issuer/config/values.yaml: -------------------------------------------------------------------------------- 1 | # email: 2 | 3 | ingressClass: ${ingress_class_name} 4 | -------------------------------------------------------------------------------- /modules/cert-manager-le-http-issuer/main.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "cert-manager-le-http-issuer" { 2 | name = "cert-manager-le-http-issuer" 3 | chart = "${path.module}/config/" 4 | version = "0.1.0" 5 | values = [templatefile("${path.module}/config/values.yaml", { 6 | ingress_class_name = var.ingress_class_name 7 | })] 8 | set { 9 | name = "email" 10 | value = var.cert_manager_letsencrypt_email 11 | type = "string" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /modules/cert-manager-le-http-issuer/variable.tf: -------------------------------------------------------------------------------- 1 | 2 | variable "cert_manager_letsencrypt_email" { 3 | description = "Email address for Let's Encrypt notifications" 4 | type = string 5 | } 6 | 7 | variable "ingress_class_name" { 8 | description = "Enter the specific ingress class name" 9 | type = string 10 | } 11 | -------------------------------------------------------------------------------- /modules/cert-manager-le-http-issuer/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.0" 3 | 4 | required_providers { 5 | helm = { 6 | source = "hashicorp/helm" 7 | version = ">= 1.0.0" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/cert-manager/config/cert-manager-ca/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cert-manager-ca 3 | description: A Helm chart to install a Cert Manager CA 4 | type: application 5 | version: 0.2.0 6 | appVersion: v0.1.0 7 | -------------------------------------------------------------------------------- /modules/cert-manager/config/cert-manager-ca/templates/certificate.yaml: -------------------------------------------------------------------------------- 1 | {{- range .Values.clusterIssuers }} 2 | {{- if eq .type "CA" }} 3 | apiVersion: cert-manager.io/v1 4 | kind: Certificate 5 | metadata: 6 | name: {{ .name }} 7 | namespace: {{ $.Release.Namespace }} 8 | spec: 9 | isCA: true 10 | commonName: {{ .name }} 11 | secretName: {{ .secretName }} 12 | {{- with .privateKey }} 13 | privateKey: 14 | {{- toYaml . | nindent 4 }} 15 | {{- end }} 16 | {{- with .issuer }} 17 | issuerRef: 18 | {{- toYaml . | nindent 4 }} 19 | {{- end }} 20 | {{- end }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /modules/cert-manager/config/cert-manager-ca/templates/clusterissuers.yaml: -------------------------------------------------------------------------------- 1 | {{- range .Values.clusterIssuers }} 2 | --- 3 | apiVersion: cert-manager.io/v1 4 | kind: ClusterIssuer 5 | metadata: 6 | name: {{ .name }} 7 | spec: 8 | {{- if eq .type "selfSigned" }} 9 | selfSigned: {} 10 | {{- else if eq .type "CA" }} 11 | ca: 12 | secretName: {{ .secretName }} 13 | {{- end }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /modules/cert-manager/config/cert-manager-ca/values.yaml: -------------------------------------------------------------------------------- 1 | clusterIssuers: 2 | - name: cert-manager-selfsigned 3 | type: selfSigned 4 | - name: cert-manager-ca 5 | type: CA 6 | secretName: cert-manager-ca-root 7 | privateKey: 8 | algorithm: ECDSA 9 | size: 256 10 | issuer: 11 | name: cert-manager-selfsigned 12 | kind: ClusterIssuer 13 | group: cert-manager.io 14 | -------------------------------------------------------------------------------- /modules/cert-manager/config/cert-manager-letsencrypt/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cert-manager-letsencrypt 3 | description: Cert Manager Cluster Issuers for Let's Encrypt certificates with DNS01 protocol 4 | type: application 5 | version: 0.1.0 6 | appVersion: v0.1.0 7 | -------------------------------------------------------------------------------- /modules/cert-manager/config/cert-manager-letsencrypt/templates/clusterissuer-production.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: {{ .Release.Name }}-production-route53 5 | labels: 6 | ca: letsencrypt 7 | environment: production 8 | solver: dns01 9 | provider: route53 10 | spec: 11 | acme: 12 | {{- if .Values.email }} 13 | email: {{ .Values.email }} 14 | {{- end }} 15 | server: https://acme-v02.api.letsencrypt.org/directory 16 | preferredChain: ISRG Root X1 17 | privateKeySecretRef: 18 | name: {{ .Release.Name }}-production-route53 19 | solvers: 20 | - dns01: 21 | route53: 22 | region: {{ .Values.region | default "global" }} 23 | {{- if .Values.dnsZones }} 24 | selector: 25 | dnsZones: 26 | {{- .Values.dnsZones | toYaml | nindent 12 }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /modules/cert-manager/config/cert-manager-letsencrypt/templates/clusterissuer-staging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: {{ .Release.Name }}-staging-route53 5 | labels: 6 | ca: letsencrypt 7 | environment: staging 8 | solver: dns01 9 | provider: route53 10 | spec: 11 | acme: 12 | {{- if .Values.email }} 13 | email: {{ .Values.email }} 14 | {{- end }} 15 | server: https://acme-staging-v02.api.letsencrypt.org/directory 16 | preferredChain: ISRG Root X1 17 | privateKeySecretRef: 18 | name: {{ .Release.Name }}-staging-route53 19 | solvers: 20 | - dns01: 21 | route53: 22 | region: {{ .Values.region | default "global" }} 23 | {{- if .Values.dnsZones }} 24 | selector: 25 | dnsZones: 26 | {{- .Values.dnsZones | toYaml | nindent 12 }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /modules/cert-manager/config/cert-manager-letsencrypt/values.yaml: -------------------------------------------------------------------------------- 1 | # email: user@example.com 2 | 3 | # region: global 4 | 5 | # dnsZones: 6 | # - domain.name 7 | -------------------------------------------------------------------------------- /modules/cert-manager/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "selected" { 2 | for_each = toset(var.domain_names) 3 | 4 | name = each.key 5 | } 6 | 7 | data "aws_iam_policy_document" "cert_manager_iam_policy_document" { 8 | statement { 9 | effect = "Allow" 10 | resources = ["arn:${var.addon_context.aws_partition_id}:route53:::change/*"] 11 | actions = ["route53:GetChange"] 12 | } 13 | 14 | dynamic "statement" { 15 | for_each = { for k, v in toset(var.domain_names) : k => data.aws_route53_zone.selected[k].arn } 16 | 17 | content { 18 | effect = "Allow" 19 | resources = [statement.value] 20 | actions = [ 21 | "route53:ChangeresourceRecordSets", 22 | "route53:ListresourceRecordSets" 23 | ] 24 | } 25 | } 26 | 27 | statement { 28 | effect = "Allow" 29 | resources = ["*"] 30 | actions = ["route53:ListHostedZonesByName"] 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /modules/cert-manager/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "eks_cluster_id" { 7 | description = "Current AWS EKS Cluster ID" 8 | value = var.addon_context.eks_cluster_id 9 | } 10 | 11 | output "release_metadata" { 12 | description = "Map of attributes of the Helm release metadata" 13 | value = module.helm_addon.release_metadata 14 | } 15 | 16 | output "irsa_arn" { 17 | description = "IAM role ARN for the service account" 18 | value = module.helm_addon.irsa_arn 19 | } 20 | 21 | output "irsa_name" { 22 | description = "IAM role name for the service account" 23 | value = module.helm_addon.irsa_name 24 | } 25 | 26 | output "service_account" { 27 | description = "Name of Kubernetes service account" 28 | value = module.helm_addon.service_account 29 | } 30 | -------------------------------------------------------------------------------- /modules/cert-manager/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.10" 8 | } 9 | helm = { 10 | source = "hashicorp/helm" 11 | version = ">= 2.4.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { 4 | enable = true 5 | serviceAccountName = local.service_account 6 | } : null 7 | } 8 | 9 | output "release_metadata" { 10 | description = "Map of attributes of the Helm release metadata" 11 | value = module.helm_addon.release_metadata 12 | } 13 | 14 | output "irsa_arn" { 15 | description = "IAM role ARN for the service account" 16 | value = module.helm_addon.irsa_arn 17 | } 18 | 19 | output "service_account" { 20 | description = "Name of Kubernetes service account" 21 | value = module.helm_addon.service_account 22 | } 23 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/cluster-proportional-autoscaler/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "cluster-proportional-autoscaler" 3 | namespace = "kube-system" 4 | 5 | default_helm_values = templatefile("${path.module}/config/values.yaml", {}) 6 | 7 | default_helm_config = { 8 | name = local.name 9 | chart = local.name 10 | repository = "https://kubernetes-sigs.github.io/cluster-proportional-autoscaler" 11 | version = var.chart_version 12 | namespace = local.namespace 13 | description = "Cluster Proportional Autoscaler Helm Chart" 14 | values = local.default_helm_values 15 | } 16 | 17 | helm_config = merge( 18 | local.default_helm_config, 19 | var.helm_config, 20 | { 21 | values = [ 22 | local.default_helm_values, # Values from config folder 23 | var.helm_config.values[0] # Values from the variable 24 | ] 25 | } 26 | ) 27 | } 28 | -------------------------------------------------------------------------------- /modules/cluster-proportional-autoscaler/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | helm_config = local.helm_config 4 | manage_via_gitops = var.manage_via_gitops 5 | addon_context = var.addon_context 6 | } 7 | -------------------------------------------------------------------------------- /modules/cluster-proportional-autoscaler/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/cluster-proportional-autoscaler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for cluster proportional autoscaler" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | }) 26 | } 27 | 28 | variable "chart_version" { 29 | description = "Helm cart version for karpenter CRDs" 30 | type = string 31 | default = "1.1.0" 32 | } 33 | -------------------------------------------------------------------------------- /modules/cluster-proportional-autoscaler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/core-dns-hpa/config/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: "1.11.1" 3 | description: A Helm chart add hpa on coredns 4 | name: corednshpa 5 | version: 1.31.0 6 | -------------------------------------------------------------------------------- /modules/core-dns-hpa/config/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: core-dns-hpa-cpu-memory 5 | namespace: kube-system 6 | spec: 7 | scaleTargetRef: 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | name: {{ .Values.corednsdeploymentname}} 11 | minReplicas: {{ .Values.minReplicas}} 12 | maxReplicas: {{ .Values.maxReplicas}} 13 | metrics: 14 | - type: Resource 15 | resource: 16 | name: cpu 17 | target: 18 | type: Utilization 19 | averageUtilization: {{ .Values.targetCPUUtilizationPercentage}} 20 | - type: Resource 21 | resource: 22 | name: memory 23 | target: 24 | type: Utilization 25 | averageUtilization: {{ .Values.targetMemoryUtilizationPercentage}} 26 | -------------------------------------------------------------------------------- /modules/core-dns-hpa/config/values.yaml: -------------------------------------------------------------------------------- 1 | corednsdeploymentname: "coredns" 2 | minReplicas: 2 3 | maxReplicas: 10 4 | targetCPUUtilizationPercentage: 80 5 | targetMemoryUtilizationPercentage: 80 6 | 7 | affinity: 8 | nodeAffinity: 9 | requiredDuringSchedulingIgnoredDuringExecution: 10 | nodeSelectorTerms: 11 | - matchExpressions: 12 | - key: "Addons-Services" 13 | operator: In 14 | values: 15 | - "true" 16 | 17 | resources: 18 | limits: 19 | cpu: 300m 20 | memory: 200Mi 21 | requests: 22 | cpu: 100m 23 | memory: 100Mi 24 | -------------------------------------------------------------------------------- /modules/core-dns-hpa/main.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "coredns-hpa" { 2 | name = "corednshpa" 3 | namespace = "kube-system" 4 | chart = "${path.module}/config" 5 | timeout = 600 6 | values = concat( 7 | [file("${path.module}/config/values.yaml")], var.helm_config.values 8 | ) 9 | } 10 | -------------------------------------------------------------------------------- /modules/core-dns-hpa/output.tf: -------------------------------------------------------------------------------- 1 | output "helm_release_status" { 2 | description = "Status of the Helm release" 3 | value = helm_release.coredns-hpa.status 4 | } 5 | -------------------------------------------------------------------------------- /modules/core-dns-hpa/variable.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Path to the values.yaml file that overwrites the default values" 3 | type = any 4 | default = {} 5 | } 6 | -------------------------------------------------------------------------------- /modules/core-dns-hpa/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.0" 3 | 4 | required_providers { 5 | helm = { 6 | source = "hashicorp/helm" 7 | version = ">= 1.0.0" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/defectdojo/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /modules/defectdojo/Chart.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: mysql 3 | repository: https://charts.bitnami.com/bitnami 4 | version: 9.1.8 5 | - name: postgresql 6 | repository: https://charts.bitnami.com/bitnami 7 | version: 11.6.26 8 | - name: postgresql-ha 9 | repository: https://charts.bitnami.com/bitnami 10 | version: 9.1.9 11 | - name: rabbitmq 12 | repository: https://charts.bitnami.com/bitnami 13 | version: 11.2.2 14 | - name: redis 15 | repository: https://charts.bitnami.com/bitnami 16 | version: 16.12.3 17 | digest: sha256:f53ebb0cea44dfbb72ac96ae98680848acd5e17a0947a728e5646460d0da4ef9 18 | generated: "2023-03-06T17:08:53.379497544Z" 19 | -------------------------------------------------------------------------------- /modules/defectdojo/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: "2.24.4" 3 | description: A Helm chart for Kubernetes to install DefectDojo 4 | name: defectdojo 5 | version: 1.6.78 6 | icon: https://www.defectdojo.org/img/favicon.ico 7 | maintainers: 8 | - name: madchap 9 | email: defectdojo-project@owasp.org 10 | url: https://github.com/DefectDojo/django-DefectDojo 11 | dependencies: 12 | - name: mysql 13 | version: ~9.1.7 14 | repository: "@bitnami" 15 | condition: mysql.enabled 16 | - name: postgresql 17 | version: ~11.6.5 18 | repository: "@bitnami" 19 | condition: postgresql.enabled 20 | - name: postgresql-ha 21 | version: ~9.1.5 22 | repository: "@bitnami" 23 | alias: postgresqlha 24 | condition: postgresqlha.enabled 25 | - name: rabbitmq 26 | version: ~11.2.0 27 | repository: "@bitnami" 28 | condition: rabbitmq.enabled 29 | - name: redis 30 | version: ~16.12.0 31 | repository: "@bitnami" 32 | condition: redis.enabled 33 | -------------------------------------------------------------------------------- /modules/defectdojo/charts/mysql-9.1.8.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/defectdojo/charts/mysql-9.1.8.tgz -------------------------------------------------------------------------------- /modules/defectdojo/charts/postgresql-11.6.26.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/defectdojo/charts/postgresql-11.6.26.tgz -------------------------------------------------------------------------------- /modules/defectdojo/charts/postgresql-ha-9.1.9.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/defectdojo/charts/postgresql-ha-9.1.9.tgz -------------------------------------------------------------------------------- /modules/defectdojo/charts/rabbitmq-11.2.2.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/defectdojo/charts/rabbitmq-11.2.2.tgz -------------------------------------------------------------------------------- /modules/defectdojo/charts/redis-16.12.3.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/defectdojo/charts/redis-16.12.3.tgz -------------------------------------------------------------------------------- /modules/defectdojo/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "defectdojo.fullname" . -}} 2 | DefectDojo has been installed. 3 | {{ if .Values.django.ingress.enabled }} 4 | To use it, go to . 5 | {{ else }} 6 | To be able to access it, set up an ingress or access the service directly by 7 | running the following command: 8 | 9 | kubectl port-forward --namespace={{ .Release.Namespace }} \ 10 | service/{{ .Release.Name }}-django 8080:80 11 | {{ if ne "localhost" .Values.host }} 12 | As you set your host value to {{ .Values.host }}, make sure that it resolves to 13 | the localhost IP address, e.g. by adding the following two lines to /etc/hosts: 14 | 15 | ::1 {{ .Values.host }} 16 | 127.0.0.1 {{ .Values.host }} 17 | {{ end }} 18 | To access DefectDojo, go to . 19 | {{ end }} 20 | Log in with username {{ .Values.admin.user}}. 21 | To find out the password, run the following command: 22 | 23 | echo "DefectDojo {{ .Values.admin.user}} password: $(kubectl \ 24 | get secret {{ $fullName }} \ 25 | --namespace={{ .Release.Namespace }} \ 26 | --output jsonpath='{.data.DD_ADMIN_PASSWORD}' \ 27 | | base64 --decode)" 28 | -------------------------------------------------------------------------------- /modules/defectdojo/templates/django-service.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "defectdojo.fullname" . -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ $fullName }}-django 6 | labels: 7 | defectdojo.org/component: django 8 | app.kubernetes.io/name: {{ include "defectdojo.name" . }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | helm.sh/chart: {{ include "defectdojo.chart" . }} 12 | spec: 13 | selector: 14 | defectdojo.org/component: django 15 | app.kubernetes.io/name: {{ include "defectdojo.name" . }} 16 | app.kubernetes.io/instance: {{ .Release.Name }} 17 | ports: 18 | - name: http 19 | protocol: TCP 20 | {{- if .Values.django.nginx.tls.enabled }} 21 | port: 443 22 | targetPort: 8443 23 | {{- else }} 24 | port: 80 25 | targetPort: 8080 26 | {{- end }} 27 | {{- if .Values.django.serviceType }} 28 | type: {{ .Values.django.serviceType }} 29 | {{- end }} 30 | {{- if .Values.gke.useGKEIngress }} 31 | type: NodePort 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /modules/defectdojo/templates/extra-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.extraSecrets -}} 2 | {{- $fullName := include "defectdojo.fullname" . -}} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ $fullName }}-extrasecrets 7 | labels: 8 | app.kubernetes.io/name: {{ include "defectdojo.name" . }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | helm.sh/chart: {{ include "defectdojo.chart" . }} 12 | type: Opaque 13 | data: 14 | {{- range $key, $value := .Values.extraSecrets }} 15 | {{ $key | indent 2}}: {{ $value | b64enc }} 16 | {{- end }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /modules/defectdojo/templates/gke-managed-certificate.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.gke.useManagedCertificate }} 2 | {{- $fullName := include "defectdojo.fullname" . -}} 3 | apiVersion: networking.gke.io/v1 4 | kind: ManagedCertificate 5 | metadata: 6 | name: {{ $fullName }}-django 7 | spec: 8 | domains: 9 | - {{ .Values.host }} 10 | {{- end }} 11 | -------------------------------------------------------------------------------- /modules/defectdojo/templates/media-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "django.pvc_name" $ -}} 2 | {{ with .Values.django.mediaPersistentVolume }} 3 | {{- if and .enabled (eq .type "pvc") .persistentVolumeClaim.create }} 4 | apiVersion: v1 5 | kind: PersistentVolumeClaim 6 | metadata: 7 | labels: 8 | defectdojo.org/component: django 9 | app.kubernetes.io/name: {{ include "defectdojo.name" $ }} 10 | app.kubernetes.io/instance: {{ $.Release.Name }} 11 | app.kubernetes.io/managed-by: {{ $.Release.Service }} 12 | helm.sh/chart: {{ include "defectdojo.chart" $ }} 13 | name: {{ $fullName }} 14 | spec: 15 | accessModes: 16 | {{- toYaml .persistentVolumeClaim.accessModes |nindent 4 }} 17 | resources: 18 | requests: 19 | storage: {{ .persistentVolumeClaim.size }} 20 | {{- if .persistentVolumeClaim.storage_class_name }} 21 | storage_class_name: {{ .persistentVolumeClaim.storage_class_name }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /modules/defectdojo/templates/sa.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "defectdojo.fullname" . -}} 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: {{ $fullName }} 6 | labels: 7 | app.kubernetes.io/name: {{ include "defectdojo.name" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | helm.sh/chart: {{ include "defectdojo.chart" . }} 11 | annotations: 12 | helm.sh/resource-policy: keep 13 | helm.sh/hook: "pre-install" 14 | helm.sh/hook-delete-policy: "before-hook-creation" 15 | {{- with .Values.annotations }} 16 | {{ toYaml . | nindent 4 }} 17 | {{- end }} 18 | {{- if ne .Values.gke.workloadIdentityEmail "" }} 19 | iam.gke.io/gcp-service-account: {{ .Values.gke.workloadIdentityEmail }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /modules/defectdojo/templates/secret-postgresql-ha-pgpool.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.createPostgresqlHaPgpoolSecret -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ .Values.postgresqlha.global.pgpool.existingSecret }} 6 | labels: 7 | app.kubernetes.io/name: {{ include "defectdojo.name" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | helm.sh/chart: {{ include "defectdojo.chart" . }} 11 | annotations: 12 | helm.sh/resource-policy: keep 13 | helm.sh/hook: "pre-install" 14 | helm.sh/hook-delete-policy: "before-hook-creation" 15 | type: Opaque 16 | data: 17 | {{- if .Values.postgresqlha.pgpool.adminPassword }} 18 | admin-password: {{ .Values.postgresqlha.pgpool.adminPassword | b64enc | quote }} 19 | {{- else }} 20 | {{- $pgpoolRandomPassword := randAlphaNum 16 | b64enc | quote }} 21 | admin-password: {{ $pgpoolRandomPassword }} 22 | {{- end }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /modules/defectdojo/templates/secret-rabbitmq.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.createRabbitMqSecret -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ .Values.rabbitmq.auth.existingPasswordSecret }} 6 | labels: 7 | app.kubernetes.io/name: {{ include "defectdojo.name" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | helm.sh/chart: {{ include "defectdojo.chart" . }} 11 | annotations: 12 | helm.sh/resource-policy: keep 13 | helm.sh/hook: "pre-install" 14 | helm.sh/hook-delete-policy: "before-hook-creation" 15 | type: Opaque 16 | data: 17 | {{- if .Values.rabbitmq.auth.password }} 18 | rabbitmq-password: {{ .Values.rabbitmq.auth.password | b64enc | quote }} 19 | {{- else }} 20 | rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} 21 | {{- end}} 22 | {{- if .Values.rabbitmq.auth.erlangCookie }} 23 | rabbitmq-erlang-cookie: {{ .Values.rabbitmq.auth.erlangCookie | b64enc | quote }} 24 | {{- else }} 25 | rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} 26 | {{- end }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /modules/defectdojo/templates/secret-redis.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.createRedisSecret -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ .Values.redis.auth.existingSecret }} 6 | labels: 7 | app.kubernetes.io/name: {{ include "defectdojo.name" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | helm.sh/chart: {{ include "defectdojo.chart" . }} 11 | annotations: 12 | helm.sh/resource-policy: keep 13 | helm.sh/hook: "pre-install" 14 | helm.sh/hook-delete-policy: "before-hook-creation" 15 | type: Opaque 16 | data: 17 | {{- if .Values.redis.auth.password }} 18 | {{ .Values.redis.auth.existingSecretPasswordKey }}: {{ .Values.redis.auth.password | b64enc | quote }} 19 | {{- else }} 20 | {{ .Values.redis.auth.existingSecretPasswordKey }}: {{ randAlphaNum 10 | b64enc | quote }} 21 | {{- end }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /modules/external-secret/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "external_secrets" { 2 | statement { 3 | actions = ["ssm:GetParameter"] 4 | resources = var.external_secrets_ssm_parameter_arns 5 | } 6 | 7 | statement { 8 | actions = [ 9 | "secretsmanager:GetResourcePolicy", 10 | "secretsmanager:GetSecretValue", 11 | "secretsmanager:DescribeSecret", 12 | "secretsmanager:ListSecretVersionIds", 13 | ] 14 | resources = var.external_secrets_secrets_manager_arns 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /modules/external-secret/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | manage_via_gitops = var.manage_via_gitops 4 | set_values = local.set_values 5 | helm_config = local.helm_config 6 | irsa_config = local.irsa_config 7 | addon_context = var.addon_context 8 | } 9 | 10 | resource "aws_iam_policy" "external_secrets" { 11 | name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa" 12 | path = var.addon_context.irsa_iam_role_path 13 | description = "Provides permissions to for External Secrets to retrieve secrets from AWS SSM and AWS Secrets Manager" 14 | policy = data.aws_iam_policy_document.external_secrets.json 15 | } 16 | -------------------------------------------------------------------------------- /modules/external-secret/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/external-secret/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/falco/main.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_namespace" "falco" { 2 | count = var.falco_enabled ? 1 : 0 3 | metadata { 4 | name = "falco" 5 | } 6 | } 7 | 8 | resource "helm_release" "falco" { 9 | count = var.falco_enabled ? 1 : 0 10 | depends_on = [kubernetes_namespace.falco] 11 | name = "falco" 12 | namespace = "falco" 13 | chart = "falco" 14 | repository = "https://falcosecurity.github.io/charts" 15 | timeout = 600 16 | version = var.version 17 | values = [ 18 | templatefile("${path.module}/values.yaml", { 19 | slack_webhook = var.slack_webhook 20 | }) 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /modules/falco/output.tf: -------------------------------------------------------------------------------- 1 | output "falco_namespace" { 2 | value = kubernetes_namespace.falco.metadata[0].name 3 | description = "The namespace where Falco is deployed" 4 | } 5 | 6 | output "falco_release" { 7 | value = helm_release.falco.name 8 | description = "The Helm release name for Falco" 9 | } 10 | -------------------------------------------------------------------------------- /modules/falco/variable.tf: -------------------------------------------------------------------------------- 1 | variable "falco_enabled" { 2 | description = "Enable or disable Falco deployment" 3 | type = bool 4 | default = true 5 | } 6 | 7 | variable "slack_webhook" { 8 | description = "Slack webhook URL for Falco alerts" 9 | type = string 10 | default = "" 11 | } 12 | 13 | variable "version" { 14 | description = "Helm Chart version of Falco" 15 | type = string 16 | default = "" 17 | } 18 | -------------------------------------------------------------------------------- /modules/falco/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 1.11.1" 8 | } 9 | helm = { 10 | source = "hashicorp/helm" 11 | version = ">= 1.0.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/helm-addon/outputs.tf: -------------------------------------------------------------------------------- 1 | output "helm_release" { 2 | description = "Map of attributes of the Helm release created without sensitive outputs" 3 | value = try({ for k, v in helm_release.addon : k => v if k != "repository_password" }, {}) 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = try(helm_release.addon[0].metadata, null) 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = try(module.irsa[0].irsa_iam_role_arn, null) 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = try(module.irsa[0].irsa_iam_role_name, null) 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = try(coalesce(try(module.irsa[0].service_account, null), lookup(var.irsa_config, "kubernetes_service_account", null)), null) 24 | } 25 | -------------------------------------------------------------------------------- /modules/helm-addon/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm chart config. Repository and version required. See https://registry.terraform.io/providers/hashicorp/helm/latest/docs" 3 | type = any 4 | } 5 | 6 | variable "set_values" { 7 | description = "Forced set values" 8 | type = any 9 | default = [] 10 | } 11 | 12 | variable "set_sensitive_values" { 13 | description = "Forced set_sensitive values" 14 | type = any 15 | default = [] 16 | } 17 | 18 | variable "manage_via_gitops" { 19 | description = "Determines if the add-on should be managed via GitOps" 20 | type = bool 21 | default = false 22 | } 23 | 24 | variable "irsa_iam_role_name" { 25 | description = "IAM role name for IRSA" 26 | type = string 27 | default = "" 28 | } 29 | 30 | variable "irsa_config" { 31 | description = "Input configuration for IRSA module" 32 | type = any 33 | default = {} 34 | } 35 | 36 | variable "addon_context" { 37 | description = "Input configuration for the addon" 38 | type = any 39 | } 40 | -------------------------------------------------------------------------------- /modules/helm-addon/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | helm = { 6 | source = "hashicorp/helm" 7 | version = ">= 2.4.1" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/ingress-nginx/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/ingress-nginx/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/irsa/outputs.tf: -------------------------------------------------------------------------------- 1 | output "irsa_iam_role_arn" { 2 | description = "IAM role ARN for your service account" 3 | value = try(aws_iam_role.irsa[0].arn, null) 4 | } 5 | 6 | output "irsa_iam_role_name" { 7 | description = "IAM role name for your service account" 8 | value = try(aws_iam_role.irsa[0].name, null) 9 | } 10 | 11 | output "namespace" { 12 | description = "IRSA Namespace" 13 | value = try(kubernetes_namespace_v1.irsa[0].id, var.kubernetes_namespace) 14 | } 15 | 16 | output "service_account" { 17 | description = "IRSA Service Account" 18 | value = try(kubernetes_service_account_v1.irsa[0].id, var.kubernetes_service_account) 19 | } 20 | -------------------------------------------------------------------------------- /modules/irsa/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | kubernetes = { 10 | source = "hashicorp/kubernetes" 11 | version = ">= 2.10" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/karpenter/config/karpenter.yaml: -------------------------------------------------------------------------------- 1 | nodeSelector: 2 | kubernetes.io/os: linux 3 | 4 | settings: 5 | clusterName: ${eks_cluster_id} 6 | clusterEndpoint: ${eks_cluster_endpoint} 7 | eksControlPlane: false 8 | featureGates: 9 | spotToSpotConsolidation: true 10 | nodeRepair: true 11 | 12 | controller: 13 | containerName: controller 14 | resources: 15 | requests: 16 | cpu: 50m 17 | memory: 200Mi 18 | limits: 19 | cpu: 100m 20 | memory: 400Mi 21 | 22 | podAnnotations: 23 | co.elastic.logs/enabled: "true" 24 | 25 | affinity: 26 | nodeAffinity: 27 | requiredDuringSchedulingIgnoredDuringExecution: 28 | nodeSelectorTerms: 29 | - matchExpressions: 30 | - key: "Addons-Services" 31 | operator: In 32 | values: 33 | - "true" 34 | 35 | serviceMonitor: 36 | # -- Specifies whether a ServiceMonitor should be created. 37 | enabled: ${enable_service_monitor} 38 | 39 | service: 40 | # -- Additional annotations for the Service. 41 | annotations: {} 42 | 43 | schedulerName: default-scheduler 44 | -------------------------------------------------------------------------------- /modules/karpenter/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/karpenter/scripts/patch_karpenter_crds.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "Patching Karpenter CRDs with Helm labels and annotations..." 6 | 7 | # Define CRD names 8 | CRDS=( 9 | "ec2nodeclasses.karpenter.k8s.aws" 10 | "nodepools.karpenter.sh" 11 | "nodeclaims.karpenter.sh" 12 | ) 13 | 14 | # Define Helm release name (should match Terraform Helm release) 15 | HELM_RELEASE="karpenter-crd" # Make sure this matches your Helm release name 16 | KARPENTER_NAMESPACE="default" # Change if using a different namespace 17 | 18 | # Apply Helm labels and annotations if the CRD exists 19 | for CRD in "${CRDS[@]}"; do 20 | if kubectl get crd "$CRD" > /dev/null 2>&1; then 21 | echo "Patching $CRD..." 22 | kubectl label crd "$CRD" app.kubernetes.io/managed-by=Helm --overwrite 23 | kubectl annotate crd "$CRD" meta.helm.sh/release-name="$HELM_RELEASE" --overwrite 24 | kubectl annotate crd "$CRD" meta.helm.sh/release-namespace="$KARPENTER_NAMESPACE" --overwrite 25 | else 26 | echo "CRD $CRD does not exist. Skipping..." 27 | fi 28 | done 29 | 30 | echo "Karpenter CRDs patching completed!" 31 | -------------------------------------------------------------------------------- /modules/karpenter/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | helm = { 10 | source = "hashicorp/helm" 11 | version = ">= 2.0.0, < 4.0.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/keda/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | manage_via_gitops = var.manage_via_gitops 4 | set_values = local.set_values 5 | helm_config = local.helm_config 6 | irsa_config = local.irsa_config 7 | addon_context = var.addon_context 8 | } 9 | 10 | resource "aws_iam_policy" "keda_irsa" { 11 | description = "KEDA IAM role policy for SQS and CloudWatch" 12 | name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa" 13 | path = var.addon_context.irsa_iam_role_path 14 | policy = data.aws_iam_policy_document.keda_irsa.json 15 | tags = var.addon_context.tags 16 | } 17 | -------------------------------------------------------------------------------- /modules/keda/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/keda/values.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | limits: 3 | cpu: 200m 4 | memory: 100Mi 5 | requests: 6 | cpu: 100m 7 | memory: 50Mi 8 | 9 | nodeSelector: 10 | kubernetes.io/os: linux 11 | -------------------------------------------------------------------------------- /modules/keda/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/kubernetes-dashboard/outputs.tf: -------------------------------------------------------------------------------- 1 | output "k8s-dashboard-admin-token" { 2 | value = nonsensitive(kubernetes_secret_v1.admin-user.data.token) 3 | } 4 | 5 | output "k8s-dashboard-read-only-token" { 6 | value = nonsensitive(kubernetes_secret_v1.dashboard_read_only_sa_token.data.token) 7 | } 8 | -------------------------------------------------------------------------------- /modules/kubernetes-dashboard/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | helm = { 10 | source = "hashicorp/helm" 11 | version = "~> 2.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/metrics-server-vpa/README.md: -------------------------------------------------------------------------------- 1 | # metrics-server-vpa 2 | 3 | 4 | ## Requirements 5 | 6 | No requirements. 7 | 8 | ## Providers 9 | 10 | | Name | Version | 11 | |------|---------| 12 | | [helm](#provider\_helm) | n/a | 13 | 14 | ## Modules 15 | 16 | No modules. 17 | 18 | ## Resources 19 | 20 | | Name | Type | 21 | |------|------| 22 | | [helm_release.metrics-server-vpa](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | 23 | 24 | ## Inputs 25 | 26 | | Name | Description | Type | Default | Required | 27 | |------|-------------|------|---------|:--------:| 28 | | [metrics\_server\_vpa\_config](#input\_metrics\_server\_vpa\_config) | Configuration to provide settings of vpa over metrics server | `any` |
{
"maxCPU": "100m",
"maxMemory": "500Mi",
"metricsServerDeploymentName": "metrics-server",
"minCPU": "25m",
"minMemory": "150Mi"
}
| no | 29 | 30 | ## Outputs 31 | 32 | No outputs. 33 | 34 | -------------------------------------------------------------------------------- /modules/metrics-server-vpa/config/values.yaml: -------------------------------------------------------------------------------- 1 | metricsServerDeploymentName: ${metricsServerDeploymentName} 2 | minCPU: ${minCPU} 3 | minMemory: ${minMemory} 4 | maxCPU: ${maxCPU} 5 | maxMemory: ${maxMemory} 6 | -------------------------------------------------------------------------------- /modules/metrics-server-vpa/main.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "metrics-server-vpa" { 2 | name = "metricsservervpa" 3 | namespace = "kube-system" 4 | chart = "${path.module}/metrics-server-vpa/" 5 | timeout = 600 6 | values = [ 7 | templatefile("${path.module}/config/values.yaml", { 8 | minCPU = var.metrics_server_vpa_config.minCPU, 9 | minMemory = var.metrics_server_vpa_config.minMemory, 10 | maxCPU = var.metrics_server_vpa_config.maxCPU, 11 | maxMemory = var.metrics_server_vpa_config.maxMemory, 12 | metricsServerDeploymentName = var.metrics_server_vpa_config.metricsServerDeploymentName 13 | }) 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /modules/metrics-server-vpa/metrics-server-vpa/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: "1.0" 3 | description: A Helm chart add vpa on metrics-server 4 | name: metricsservervpa 5 | version: 1.0.0 6 | -------------------------------------------------------------------------------- /modules/metrics-server-vpa/metrics-server-vpa/templates/vpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "autoscaling.k8s.io/v1" 2 | kind: VerticalPodAutoscaler 3 | metadata: 4 | name: metrics-server-vpa 5 | namespace: kube-system 6 | spec: 7 | targetRef: 8 | apiVersion: "apps/v1" 9 | kind: Deployment 10 | name: {{ .Values.metricsServerDeploymentName}} 11 | updatePolicy: 12 | updateMode: "Auto" 13 | resourcePolicy: 14 | containerPolicies: 15 | - containerName: '*' 16 | minAllowed: 17 | cpu: {{ .Values.minCPU}} 18 | memory: {{ .Values.minMemory}} 19 | maxAllowed: 20 | cpu: {{ .Values.maxCPU}} 21 | memory: {{ .Values.maxMemory}} 22 | controlledResources: ["cpu", "memory"] 23 | -------------------------------------------------------------------------------- /modules/metrics-server-vpa/variables.tf: -------------------------------------------------------------------------------- 1 | variable "metrics_server_vpa_config" { 2 | description = "Configuration to provide settings of vpa over metrics server" 3 | default = { 4 | 5 | minCPU = "25m" 6 | maxCPU = "100m" 7 | minMemory = "150Mi" 8 | maxMemory = "500Mi" 9 | metricsServerDeploymentName = "metrics-server" 10 | } 11 | type = any 12 | } 13 | -------------------------------------------------------------------------------- /modules/metrics-server/config/metrics_server.yaml: -------------------------------------------------------------------------------- 1 | ## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" 2 | 3 | affinity: 4 | nodeAffinity: 5 | requiredDuringSchedulingIgnoredDuringExecution: 6 | nodeSelectorTerms: 7 | - matchExpressions: 8 | - key: "Addons-Services" 9 | operator: In 10 | values: 11 | - "true" 12 | 13 | ## Particular args to be passed in deployment 14 | 15 | extraArgs: 16 | - --kubelet-preferred-address-types=InternalIP 17 | - --v=2 18 | 19 | apiService: 20 | create: true 21 | 22 | ## Using limits and requests 23 | 24 | resources: 25 | limits: 26 | cpu: 60m 27 | memory: 200Mi 28 | requests: 29 | cpu: 30m 30 | memory: 100Mi 31 | 32 | podAnnotations: 33 | co.elastic.logs/enabled: "true" 34 | 35 | replicas: 2 36 | -------------------------------------------------------------------------------- /modules/metrics-server/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "metrics-server" 3 | 4 | # https://github.com/kubernetes-sigs/metrics-server/blob/master/charts/metrics-server/Chart.yaml 5 | default_helm_config = { 6 | name = local.name 7 | chart = local.name 8 | repository = "https://kubernetes-sigs.github.io/metrics-server/" 9 | version = var.helm_config.version 10 | namespace = "kube-system" 11 | description = "Metric server helm Chart deployment configuration" 12 | } 13 | 14 | helm_config = merge( 15 | local.default_helm_config, 16 | var.helm_config, 17 | { 18 | values = [file("${path.module}/config/metrics_server.yaml"), var.helm_config.values[0]] 19 | } 20 | ) 21 | 22 | argocd_gitops_config = { 23 | enable = true 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /modules/metrics-server/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | 4 | manage_via_gitops = var.manage_via_gitops 5 | helm_config = local.helm_config 6 | addon_context = var.addon_context 7 | 8 | depends_on = [kubernetes_namespace_v1.this] 9 | } 10 | 11 | resource "kubernetes_namespace_v1" "this" { 12 | count = try(local.helm_config["create_namespace"], true) && local.helm_config["namespace"] != "kube-system" ? 1 : 0 13 | 14 | metadata { 15 | name = local.helm_config["namespace"] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /modules/metrics-server/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/metrics-server/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for Metrics Server" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps" 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | }) 26 | } 27 | 28 | variable "addon_version" { 29 | description = "Helm Chart version for Metrics Server" 30 | type = string 31 | default = "" 32 | } 33 | -------------------------------------------------------------------------------- /modules/metrics-server/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/reloader/config/reloader.yaml: -------------------------------------------------------------------------------- 1 | reloader: 2 | deployment: 3 | affinity: 4 | nodeAffinity: 5 | requiredDuringSchedulingIgnoredDuringExecution: 6 | nodeSelectorTerms: 7 | - matchExpressions: 8 | - key: "Addons-Services" 9 | operator: In 10 | values: 11 | - "true" 12 | 13 | resources: 14 | limits: 15 | cpu: "100m" 16 | memory: "240Mi" 17 | requests: 18 | cpu: "50m" 19 | memory: "120Mi" 20 | 21 | pod: 22 | annotations: 23 | co.elastic.logs/enabled: "true" 24 | 25 | serviceMonitor: 26 | enabled: ${enable_service_monitor} 27 | -------------------------------------------------------------------------------- /modules/reloader/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "reloader" 3 | 4 | argocd_gitops_config = { 5 | enable = true 6 | serviceAccountName = local.name 7 | } 8 | 9 | template_values = templatefile("${path.module}/config/reloader.yaml", { 10 | enable_service_monitor = var.helm_config.enable_service_monitor 11 | }) 12 | } 13 | 14 | module "helm_addon" { 15 | source = "../helm-addon" 16 | 17 | # https://github.com/stakater/Reloader/blob/master/deployments/kubernetes/chart/reloader/Chart.yaml 18 | helm_config = merge( 19 | { 20 | name = local.name 21 | chart = local.name 22 | repository = "https://stakater.github.io/stakater-charts" 23 | version = var.addon_version 24 | namespace = local.name 25 | create_namespace = true 26 | description = "Reloader Helm Chart deployment configuration" 27 | }, 28 | var.helm_config, 29 | { 30 | values = [local.template_values, var.helm_config.values[0]] 31 | } 32 | ) 33 | 34 | manage_via_gitops = var.manage_via_gitops 35 | addon_context = var.addon_context 36 | } 37 | -------------------------------------------------------------------------------- /modules/reloader/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/reloader/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for Reloader." 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | }) 28 | } 29 | 30 | variable "addon_version" { 31 | description = "reloader helm chart version" 32 | type = string 33 | default = "" 34 | } 35 | -------------------------------------------------------------------------------- /modules/reloader/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/service-monitor-crd/README.md: -------------------------------------------------------------------------------- 1 | # service_monitor_crd 2 | 3 | 4 | ## Requirements 5 | 6 | | Name | Version | 7 | |------|---------| 8 | | [terraform](#requirement\_terraform) | >= 0.12.26 | 9 | | [helm](#requirement\_helm) | ~> 2.0 | 10 | 11 | ## Providers 12 | 13 | | Name | Version | 14 | |------|---------| 15 | | [helm](#provider\_helm) | ~> 2.0 | 16 | 17 | ## Modules 18 | 19 | No modules. 20 | 21 | ## Resources 22 | 23 | | Name | Type | 24 | |------|------| 25 | | [helm_release.service-monitor-crd](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | 26 | 27 | ## Inputs 28 | 29 | No inputs. 30 | 31 | ## Outputs 32 | 33 | No outputs. 34 | 35 | -------------------------------------------------------------------------------- /modules/service-monitor-crd/main.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "service-monitor-crd" { 2 | name = "service-monitor-crd" 3 | chart = "${path.module}/service_monitor/" 4 | timeout = 600 5 | } 6 | -------------------------------------------------------------------------------- /modules/service-monitor-crd/service_monitor/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /modules/service-monitor-crd/service_monitor/values.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/service-monitor-crd/service_monitor/values.yaml -------------------------------------------------------------------------------- /modules/service-monitor-crd/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.26" 3 | 4 | required_providers { 5 | helm = { 6 | source = "hashicorp/helm" 7 | version = "~> 2.0" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/velero/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "current" {} 2 | data "aws_caller_identity" "current" {} 3 | data "aws_region" "current" {} 4 | 5 | resource "time_sleep" "dataplane" { 6 | create_duration = "10s" 7 | 8 | triggers = { 9 | data_plane_wait_arn = var.data_plane_wait_arn # this waits for the data plane to be ready 10 | eks_cluster_id = var.eks_cluster_id # this ties it to downstream resources 11 | } 12 | } 13 | 14 | data "aws_eks_cluster" "eks_cluster" { 15 | name = time_sleep.dataplane.triggers["eks_cluster_id"] # this makes downstream resources wait for data plane to be ready 16 | } 17 | -------------------------------------------------------------------------------- /modules/velero/delete-snapshot.py: -------------------------------------------------------------------------------- 1 | import json 2 | import boto3 3 | import collections 4 | import datetime 5 | 6 | ec = boto3.client('ec2','${region}') 7 | def lambda_handler(event, context): 8 | reservations = ec.describe_snapshots( Filters=[ {'Name': 'tag-key', 'Values': ['velero.io/backup']},] ) 9 | print(reservations) 10 | now = datetime.datetime.today().strftime('%Y%m%d') 11 | print (now) 12 | current = int(now) 13 | retention = ${retention_period_in_days} 14 | for snapshot in reservations['Snapshots']: 15 | print ("Checking snapshot %s which was created on %s" % (snapshot['SnapshotId'],snapshot['StartTime'])) 16 | snapshotDate = snapshot['StartTime'].strftime('%Y%m%d') 17 | print(snapshotDate) 18 | snaptime = int(snapshotDate) 19 | print (snaptime) 20 | delete_date = (current - snaptime) 21 | print (delete_date) 22 | if delete_date > retention: 23 | print ("The snapshot is older than retention days. Deleting Now") 24 | ec.delete_snapshot(SnapshotId= snapshot['SnapshotId']) 25 | else: 26 | print ("Snapshot is newer than configured retention of %d days so we keep it" % (retention)) 27 | -------------------------------------------------------------------------------- /modules/velero/delete-snapshot.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/velero/delete-snapshot.zip -------------------------------------------------------------------------------- /modules/velero/velero-data/helm/values.yaml: -------------------------------------------------------------------------------- 1 | initContainers: 2 | - name: velero-plugin-for-aws-2 3 | image: velero/velero-plugin-for-aws:v1.10.0 4 | imagePullPolicy: IfNotPresent 5 | volumeMounts: 6 | - mountPath: /target 7 | name: plugins 8 | 9 | configuration: 10 | backupStorageLocation: 11 | - name: default 12 | provider: aws 13 | bucket: ${bucket} 14 | config: 15 | region: ${region} 16 | volumeSnapshotLocation: 17 | - name: default 18 | provider: aws 19 | config: 20 | region: ${region} 21 | 22 | credentials: 23 | useSecret: false 24 | 25 | podAnnotations: 26 | co.elastic.logs/enabled: "true" 27 | 28 | affinity: 29 | nodeAffinity: 30 | requiredDuringSchedulingIgnoredDuringExecution: 31 | nodeSelectorTerms: 32 | - matchExpressions: 33 | - key: "Addons-Services" 34 | operator: In 35 | values: 36 | - "true" 37 | 38 | resources: 39 | requests: 40 | cpu: 10m 41 | memory: 128Mi 42 | limits: 43 | cpu: 500m 44 | memory: 512Mi 45 | -------------------------------------------------------------------------------- /modules/velero/velero-data/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/velero/velero-data/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | time = { 10 | source = "hashicorp/time" 11 | version = ">= 0.8" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/velero/velero_job/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /modules/velero/velero_job/templates/backup_job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: velero.io/v1 2 | kind: Schedule 3 | metadata: 4 | name: {{ .Values.velero_backup_name }} 5 | namespace: velero 6 | spec: 7 | schedule: "{{ .Values.schedule_cron_time }}" 8 | template: 9 | includedNamespaces: 10 | - '{{ .Values.namespaces }}' 11 | includedResources: 12 | - '*' 13 | includeClusterResources: true 14 | snapshotVolumes: true 15 | storageLocation: default 16 | volumeSnapshotLocations: 17 | - default 18 | ttl: 24h0m0s 19 | status: 20 | phase: "Enabled" 21 | lastBackup: 22 | validationErrors: 23 | -------------------------------------------------------------------------------- /modules/velero/velero_job/values.yaml: -------------------------------------------------------------------------------- 1 | velero_backup_name: cluster_backup 2 | schedule_cron_time: "*/10 * * * *" 3 | namespaces: "*" 4 | -------------------------------------------------------------------------------- /modules/velero/velero_notification/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | registry: ghcr.io 3 | repository: kubeshop/botkube 4 | pullPolicy: IfNotPresent 5 | tag: v1.10.0 6 | 7 | sources: 8 | 'k8s-all-events': 9 | displayName: "Backup Status" 10 | botkube/kubernetes: 11 | config: 12 | namespaces: &k8s-events-namespaces 13 | event: 14 | types: 15 | - all 16 | resources: 17 | - type: velero.io/v1/backups 18 | event: 19 | types: 20 | - all 21 | updateSetting: 22 | includeDiff: true 23 | fields: 24 | - status.phase 25 | 26 | communications: 27 | 'default-group': 28 | socketSlack: 29 | enabled: true 30 | channels: 31 | 'default': 32 | name: '${slack_channel_name}' 33 | bindings: 34 | executors: 35 | - k8s-default-tools 36 | sources: 37 | - k8s-all-events 38 | botToken: '${slack_botToken}' 39 | appToken: '${slack_appToken}' 40 | 41 | settings: 42 | clusterName: '${cluster_id}' 43 | -------------------------------------------------------------------------------- /modules/velero/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.26" 3 | 4 | required_providers { 5 | template = { 6 | source = "hashicorp/template" 7 | version = "~> 2.2" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = "~> 2.1" 12 | } 13 | archive = { 14 | source = "hashicorp/archive" 15 | version = "~> 2.0" 16 | } 17 | aws = { 18 | source = "hashicorp/aws" 19 | version = ">= 3.0.0" 20 | } 21 | helm = { 22 | source = "hashicorp/helm" 23 | version = "~> 2.0" 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /modules/vpa-crds/README.md: -------------------------------------------------------------------------------- 1 | # vpa-crds 2 | 3 | 4 | ## Requirements 5 | 6 | | Name | Version | 7 | |------|---------| 8 | | [terraform](#requirement\_terraform) | >= 0.12.26 | 9 | | [helm](#requirement\_helm) | ~> 2.0 | 10 | 11 | ## Providers 12 | 13 | | Name | Version | 14 | |------|---------| 15 | | [helm](#provider\_helm) | ~> 2.0 | 16 | 17 | ## Modules 18 | 19 | No modules. 20 | 21 | ## Resources 22 | 23 | | Name | Type | 24 | |------|------| 25 | | [helm_release.vpa-crds](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | 26 | 27 | ## Inputs 28 | 29 | | Name | Description | Type | Default | Required | 30 | |------|-------------|------|---------|:--------:| 31 | | [chart\_version](#input\_chart\_version) | chart version for VPA | `string` | `"9.9.0"` | no | 32 | | [helm-config](#input\_helm-config) | vpa config from user end | `any` | `{}` | no | 33 | 34 | ## Outputs 35 | 36 | No outputs. 37 | 38 | -------------------------------------------------------------------------------- /modules/vpa-crds/main.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "vpa-crds" { 2 | name = "vertical-pod-autoscaler" 3 | namespace = "kube-system" 4 | repository = "https://cowboysysop.github.io/charts/" 5 | chart = "vertical-pod-autoscaler" 6 | version = var.chart_version 7 | timeout = 600 8 | values = [ 9 | file("${path.module}/config/values.yaml"), 10 | var.helm-config 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /modules/vpa-crds/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm-config" { 2 | description = "vpa config from user end" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "chart_version" { 8 | description = "chart version for VPA" 9 | type = string 10 | default = "9.9.0" 11 | } 12 | -------------------------------------------------------------------------------- /modules/vpa-crds/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.26" 3 | 4 | required_providers { 5 | helm = { 6 | source = "hashicorp/helm" 7 | version = "~> 2.0" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-cloudwatch-metrics/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | manage_via_gitops = var.manage_via_gitops 4 | set_values = local.set_values 5 | helm_config = local.helm_config 6 | irsa_config = local.irsa_config 7 | addon_context = var.addon_context 8 | } 9 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-cloudwatch-metrics/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-cloudwatch-metrics/values.yaml: -------------------------------------------------------------------------------- 1 | clusterName: ${eks_cluster_id} 2 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-cloudwatch-metrics/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config aws-cloudwatch-metrics." 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "irsa_policies" { 14 | description = "Additional IAM policies for a IAM role for service accounts" 15 | type = list(string) 16 | default = [] 17 | } 18 | 19 | variable "addon_context" { 20 | description = "Input configuration for the addon" 21 | type = object({ 22 | aws_caller_identity_account_id = string 23 | aws_caller_identity_arn = string 24 | aws_eks_cluster_endpoint = string 25 | aws_partition_id = string 26 | aws_region_name = string 27 | eks_cluster_id = string 28 | eks_oidc_issuer_url = string 29 | eks_oidc_provider_arn = string 30 | tags = map(string) 31 | irsa_iam_role_path = string 32 | irsa_iam_permissions_boundary = string 33 | }) 34 | } 35 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-cloudwatch-metrics/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | kubernetes = { 10 | source = "hashicorp/kubernetes" 11 | version = ">= 2.10" 12 | } 13 | helm = { 14 | source = "hashicorp/helm" 15 | version = ">= 2.4.1" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-coredns/outputs.tf: -------------------------------------------------------------------------------- 1 | output "release_metadata" { 2 | description = "Map of attributes of the Helm release metadata" 3 | value = try(module.helm_addon[0].release_metadata, null) 4 | } 5 | 6 | output "irsa_arn" { 7 | description = "IAM role ARN for the service account" 8 | value = try(module.helm_addon[0].irsa_arn, null) 9 | } 10 | 11 | output "irsa_name" { 12 | description = "IAM role name for the service account" 13 | value = try(module.helm_addon[0].irsa_name, null) 14 | } 15 | 16 | output "service_account" { 17 | description = "Name of Kubernetes service account" 18 | value = try(module.helm_addon[0].service_account, null) 19 | } 20 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-coredns/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.10" 8 | } 9 | null = { 10 | source = "hashicorp/null" 11 | version = ">= 3.0" 12 | } 13 | time = { 14 | source = "hashicorp/time" 15 | version = ">= 0.8" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-efs-csi-driver/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { 4 | enable = true 5 | serviceAccountName = local.service_account 6 | } : null 7 | } 8 | 9 | output "release_metadata" { 10 | description = "Map of attributes of the Helm release metadata" 11 | value = module.helm_addon.release_metadata 12 | } 13 | 14 | output "irsa_arn" { 15 | description = "IAM role ARN for the service account" 16 | value = module.helm_addon.irsa_arn 17 | } 18 | 19 | output "irsa_name" { 20 | description = "IAM role name for the service account" 21 | value = module.helm_addon.irsa_name 22 | } 23 | 24 | output "service_account" { 25 | description = "Name of Kubernetes service account" 26 | value = module.helm_addon.service_account 27 | } 28 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-efs-csi-driver/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-kube-proxy/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "kube-proxy" 3 | } 4 | 5 | data "aws_eks_addon_version" "this" { 6 | addon_name = local.name 7 | kubernetes_version = var.addon_config.kubernetes_version 8 | most_recent = try(var.addon_config.most_recent, false) 9 | } 10 | 11 | resource "aws_eks_addon" "kube_proxy" { 12 | cluster_name = var.addon_context.eks_cluster_id 13 | addon_name = local.name 14 | addon_version = try(var.addon_config.addon_version, data.aws_eks_addon_version.this.version) 15 | resolve_conflicts = try(var.addon_config.resolve_conflicts, "OVERWRITE") 16 | service_account_role_arn = try(var.addon_config.service_account_role_arn, null) 17 | preserve = try(var.addon_config.preserve, true) 18 | 19 | tags = merge( 20 | var.addon_context.tags, 21 | try(var.addon_config.tags, {}) 22 | ) 23 | } 24 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-kube-proxy/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/squareops/terraform-aws-eks-addons/51fe798de588f268b5169fbc3cc2b4de5145f0d8/modules/z-archieve/aws-kube-proxy/outputs.tf -------------------------------------------------------------------------------- /modules/z-archieve/aws-kube-proxy/variables.tf: -------------------------------------------------------------------------------- 1 | variable "addon_config" { 2 | description = "Amazon EKS Managed Add-on config for Kube Proxy" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "addon_context" { 8 | description = "Input configuration for the addon" 9 | type = object({ 10 | aws_caller_identity_account_id = string 11 | aws_caller_identity_arn = string 12 | aws_eks_cluster_endpoint = string 13 | aws_partition_id = string 14 | aws_region_name = string 15 | eks_cluster_id = string 16 | eks_oidc_issuer_url = string 17 | eks_oidc_provider_arn = string 18 | tags = map(string) 19 | }) 20 | } 21 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-kube-proxy/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-load-balancer-controller/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | manage_via_gitops = var.manage_via_gitops 4 | set_values = local.set_values 5 | helm_config = local.helm_config 6 | irsa_config = local.irsa_config 7 | addon_context = var.addon_context 8 | } 9 | 10 | resource "aws_iam_policy" "aws_load_balancer_controller" { 11 | name = "${var.addon_context.eks_cluster_id}-lb-irsa" 12 | description = "Allows lb controller to manage ALB and NLB" 13 | policy = data.aws_iam_policy_document.aws_lb.json 14 | tags = var.addon_context.tags 15 | } 16 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-load-balancer-controller/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ingress_namespace" { 2 | description = "AWS LoadBalancer Controller Ingress Namespace" 3 | value = local.helm_config["namespace"] 4 | } 5 | 6 | output "ingress_name" { 7 | description = "AWS LoadBalancer Controller Ingress Name" 8 | value = local.helm_config["name"] 9 | } 10 | 11 | output "argocd_gitops_config" { 12 | description = "Configuration used for managing the add-on with ArgoCD" 13 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 14 | } 15 | 16 | output "release_metadata" { 17 | description = "Map of attributes of the Helm release metadata" 18 | value = module.helm_addon.release_metadata 19 | } 20 | 21 | output "irsa_arn" { 22 | description = "IAM role ARN for the service account" 23 | value = module.helm_addon.irsa_arn 24 | } 25 | 26 | output "irsa_name" { 27 | description = "IAM role name for the service account" 28 | value = module.helm_addon.irsa_name 29 | } 30 | 31 | output "service_account" { 32 | description = "Name of Kubernetes service account" 33 | value = module.helm_addon.service_account 34 | } 35 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-load-balancer-controller/values.yaml: -------------------------------------------------------------------------------- 1 | clusterName: ${eks_cluster_id} 2 | region: ${aws_region} 3 | image: 4 | repository: ${repository} 5 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-load-balancer-controller/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for the aws_load_balancer_controller." 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon." 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | default_repository = string 28 | }) 29 | } 30 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-load-balancer-controller/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-node-termination-handler/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "aws_node_termination_handler_queue_policy_document" { 2 | statement { 3 | actions = [ 4 | "sqs:SendMessage" 5 | ] 6 | principals { 7 | type = "Service" 8 | identifiers = [ 9 | "events.amazonaws.com", 10 | "sqs.amazonaws.com" 11 | ] 12 | } 13 | resources = [ 14 | aws_sqs_queue.aws_node_termination_handler_queue.arn 15 | ] 16 | } 17 | } 18 | 19 | data "aws_iam_policy_document" "irsa_policy" { 20 | statement { 21 | actions = [ 22 | "autoscaling:CompleteLifecycleAction", 23 | "autoscaling:DescribeAutoScalingInstances", 24 | "autoscaling:DescribeTags", 25 | "ec2:DescribeInstances", 26 | "sqs:DeleteMessage", 27 | "sqs:ReceiveMessage", 28 | ] 29 | resources = ["*"] 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-node-termination-handler/outputs.tf: -------------------------------------------------------------------------------- 1 | output "release_metadata" { 2 | description = "Map of attributes of the Helm release metadata" 3 | value = module.helm_addon.release_metadata 4 | } 5 | 6 | output "irsa_arn" { 7 | description = "IAM role ARN for the service account" 8 | value = module.helm_addon.irsa_arn 9 | } 10 | 11 | output "irsa_name" { 12 | description = "IAM role name for the service account" 13 | value = module.helm_addon.irsa_name 14 | } 15 | 16 | output "service_account" { 17 | description = "Name of Kubernetes service account" 18 | value = module.helm_addon.service_account 19 | } 20 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-node-termination-handler/values.yaml: -------------------------------------------------------------------------------- 1 | enableSqsTerminationDraining: true 2 | enablePrometheusServer: true 3 | %{ if length(autoscaling_group_names) == 0 ~} 4 | checkASGTagBeforeDraining: false 5 | %{ endif ~} 6 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-node-termination-handler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "AWS Node Termination Handler Helm Chart Configuration" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "autoscaling_group_names" { 8 | description = "EKS Node Group ASG names" 9 | type = list(string) 10 | } 11 | 12 | variable "addon_context" { 13 | description = "Input configuration for the addon" 14 | type = object({ 15 | aws_caller_identity_account_id = string 16 | aws_caller_identity_arn = string 17 | aws_eks_cluster_endpoint = string 18 | aws_partition_id = string 19 | aws_region_name = string 20 | eks_cluster_id = string 21 | eks_oidc_issuer_url = string 22 | eks_oidc_provider_arn = string 23 | tags = map(string) 24 | irsa_iam_role_path = string 25 | irsa_iam_permissions_boundary = string 26 | }) 27 | } 28 | 29 | variable "irsa_policies" { 30 | description = "Additional IAM policies for a IAM role for service accounts" 31 | type = list(string) 32 | default = [] 33 | } 34 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-node-termination-handler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-privateca-issuer/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "aws_privateca_issuer" { 2 | statement { 3 | effect = "Allow" 4 | resources = [var.aws_privateca_acmca_arn] 5 | actions = [ 6 | "acm-pca:DescribeCertificateAuthority", 7 | "acm-pca:GetCertificate", 8 | "acm-pca:IssueCertificate", 9 | ] 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-privateca-issuer/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | manage_via_gitops = var.manage_via_gitops 4 | set_values = local.set_values 5 | helm_config = local.helm_config 6 | irsa_config = local.irsa_config 7 | addon_context = var.addon_context 8 | } 9 | 10 | resource "aws_iam_policy" "aws_privateca_issuer" { 11 | description = "AWS PCA issuer IAM policy" 12 | name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa" 13 | policy = data.aws_iam_policy_document.aws_privateca_issuer.json 14 | tags = var.addon_context.tags 15 | } 16 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-privateca-issuer/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/aws-privateca-issuer/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager-csi-driver/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | 4 | # https://github.com/cert-manager/csi-driver/blob/main/deploy/charts/csi-driver/Chart.yaml 5 | helm_config = merge( 6 | { 7 | name = "cert-manager-csi-driver" 8 | chart = "cert-manager-csi-driver" 9 | repository = "https://charts.jetstack.io" 10 | version = "v0.4.2" 11 | namespace = "cert-manager" 12 | description = "Cert Manager CSI Driver Add-on" 13 | }, 14 | var.helm_config 15 | ) 16 | 17 | manage_via_gitops = var.manage_via_gitops 18 | addon_context = var.addon_context 19 | } 20 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager-csi-driver/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager-csi-driver/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for Cert-Manager CSI Driver." 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager-csi-driver/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager-istio-csr/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | helm_config = merge( 4 | { 5 | name = "cert-manager-istio-csr" 6 | chart = "cert-manager-istio-csr" 7 | repository = "https://charts.jetstack.io" 8 | version = "v0.5.0" 9 | namespace = "cert-manager" 10 | create_namespace = false 11 | description = "Cert-manager-istio-csr Helm Chart deployment configuration" 12 | }, 13 | var.helm_config 14 | ) 15 | manage_via_gitops = var.manage_via_gitops 16 | addon_context = var.addon_context 17 | } 18 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager-istio-csr/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager-istio-csr/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm Config for istio-csr." 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager-istio-csr/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/cert-manager-ca/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cert-manager-ca 3 | description: A Helm chart to install a Cert Manager CA 4 | type: application 5 | version: 0.2.0 6 | appVersion: v0.1.0 7 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/cert-manager-ca/templates/certificate.yaml: -------------------------------------------------------------------------------- 1 | {{- range .Values.clusterIssuers }} 2 | {{- if eq .type "CA" }} 3 | apiVersion: cert-manager.io/v1 4 | kind: Certificate 5 | metadata: 6 | name: {{ .name }} 7 | namespace: {{ $.Release.Namespace }} 8 | spec: 9 | isCA: true 10 | commonName: {{ .name }} 11 | secretName: {{ .secretName }} 12 | {{- with .privateKey }} 13 | privateKey: 14 | {{- toYaml . | nindent 4 }} 15 | {{- end }} 16 | {{- with .issuer }} 17 | issuerRef: 18 | {{- toYaml . | nindent 4 }} 19 | {{- end }} 20 | {{- end }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/cert-manager-ca/templates/clusterissuers.yaml: -------------------------------------------------------------------------------- 1 | {{- range .Values.clusterIssuers }} 2 | --- 3 | apiVersion: cert-manager.io/v1 4 | kind: ClusterIssuer 5 | metadata: 6 | name: {{ .name }} 7 | spec: 8 | {{- if eq .type "selfSigned" }} 9 | selfSigned: {} 10 | {{- else if eq .type "CA" }} 11 | ca: 12 | secretName: {{ .secretName }} 13 | {{- end }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/cert-manager-ca/values.yaml: -------------------------------------------------------------------------------- 1 | clusterIssuers: 2 | - name: cert-manager-selfsigned 3 | type: selfSigned 4 | - name: cert-manager-ca 5 | type: CA 6 | secretName: cert-manager-ca-root 7 | privateKey: 8 | algorithm: ECDSA 9 | size: 256 10 | issuer: 11 | name: cert-manager-selfsigned 12 | kind: ClusterIssuer 13 | group: cert-manager.io 14 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/cert-manager-letsencrypt/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cert-manager-letsencrypt 3 | description: Cert Manager Cluster Issuers for Let's Encrypt certificates with DNS01 protocol 4 | type: application 5 | version: 0.1.0 6 | appVersion: v0.1.0 7 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/cert-manager-letsencrypt/templates/clusterissuer-production.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: {{ .Release.Name }}-production-route53 5 | labels: 6 | ca: letsencrypt 7 | environment: production 8 | solver: dns01 9 | provider: route53 10 | spec: 11 | acme: 12 | {{- if .Values.email }} 13 | email: {{ .Values.email }} 14 | {{- end }} 15 | server: https://acme-v02.api.letsencrypt.org/directory 16 | preferredChain: ISRG Root X1 17 | privateKeySecretRef: 18 | name: {{ .Release.Name }}-production-route53 19 | solvers: 20 | - dns01: 21 | route53: 22 | region: {{ .Values.region | default "global" }} 23 | {{- if .Values.dnsZones }} 24 | selector: 25 | dnsZones: 26 | {{- .Values.dnsZones | toYaml | nindent 12 }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/cert-manager-letsencrypt/templates/clusterissuer-staging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: {{ .Release.Name }}-staging-route53 5 | labels: 6 | ca: letsencrypt 7 | environment: staging 8 | solver: dns01 9 | provider: route53 10 | spec: 11 | acme: 12 | {{- if .Values.email }} 13 | email: {{ .Values.email }} 14 | {{- end }} 15 | server: https://acme-staging-v02.api.letsencrypt.org/directory 16 | preferredChain: ISRG Root X1 17 | privateKeySecretRef: 18 | name: {{ .Release.Name }}-staging-route53 19 | solvers: 20 | - dns01: 21 | route53: 22 | region: {{ .Values.region | default "global" }} 23 | {{- if .Values.dnsZones }} 24 | selector: 25 | dnsZones: 26 | {{- .Values.dnsZones | toYaml | nindent 12 }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/cert-manager-letsencrypt/values.yaml: -------------------------------------------------------------------------------- 1 | # email: user@example.com 2 | 3 | # region: global 4 | 5 | # dnsZones: 6 | # - domain.name 7 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "selected" { 2 | for_each = toset(var.domain_names) 3 | 4 | name = each.key 5 | } 6 | 7 | data "aws_iam_policy_document" "cert_manager_iam_policy_document" { 8 | statement { 9 | effect = "Allow" 10 | resources = ["arn:${var.addon_context.aws_partition_id}:route53:::change/*"] 11 | actions = ["route53:GetChange"] 12 | } 13 | 14 | dynamic "statement" { 15 | for_each = { for k, v in toset(var.domain_names) : k => data.aws_route53_zone.selected[k].arn } 16 | 17 | content { 18 | effect = "Allow" 19 | resources = [statement.value] 20 | actions = [ 21 | "route53:ChangeresourceRecordSets", 22 | "route53:ListresourceRecordSets" 23 | ] 24 | } 25 | } 26 | 27 | statement { 28 | effect = "Allow" 29 | resources = ["*"] 30 | actions = ["route53:ListHostedZonesByName"] 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "eks_cluster_id" { 7 | description = "Current AWS EKS Cluster ID" 8 | value = var.addon_context.eks_cluster_id 9 | } 10 | 11 | output "release_metadata" { 12 | description = "Map of attributes of the Helm release metadata" 13 | value = module.helm_addon.release_metadata 14 | } 15 | 16 | output "irsa_arn" { 17 | description = "IAM role ARN for the service account" 18 | value = module.helm_addon.irsa_arn 19 | } 20 | 21 | output "irsa_name" { 22 | description = "IAM role name for the service account" 23 | value = module.helm_addon.irsa_name 24 | } 25 | 26 | output "service_account" { 27 | description = "Name of Kubernetes service account" 28 | value = module.helm_addon.service_account 29 | } 30 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/values.yaml: -------------------------------------------------------------------------------- 1 | extraArgs: 2 | - --enable-certificate-owner-ref=true 3 | 4 | installCRDs: true 5 | -------------------------------------------------------------------------------- /modules/z-archieve/cert-manager/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.10" 8 | } 9 | helm = { 10 | source = "hashicorp/helm" 11 | version = ">= 2.4.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-autoscaler/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { 4 | enable = true 5 | serviceAccountName = local.service_account 6 | } : null 7 | } 8 | 9 | output "release_metadata" { 10 | description = "Map of attributes of the Helm release metadata" 11 | value = module.helm_addon.release_metadata 12 | } 13 | 14 | output "irsa_arn" { 15 | description = "IAM role ARN for the service account" 16 | value = module.helm_addon.irsa_arn 17 | } 18 | 19 | output "service_account" { 20 | description = "Name of Kubernetes service account" 21 | value = module.helm_addon.service_account 22 | } 23 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-autoscaler/values.yaml: -------------------------------------------------------------------------------- 1 | awsRegion: ${aws_region} 2 | 3 | autoDiscovery: 4 | clusterName: ${eks_cluster_id} 5 | extraArgs: 6 | aws-use-static-instance-list: true 7 | 8 | image: 9 | tag: ${image_tag} 10 | 11 | resources: 12 | limits: 13 | cpu: 200m 14 | memory: 512Mi 15 | requests: 16 | cpu: 200m 17 | memory: 512Mi 18 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-autoscaler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "eks_cluster_version" { 2 | description = "The Kubernetes version for the cluster - used to match appropriate version for image used" 3 | type = string 4 | } 5 | 6 | variable "helm_config" { 7 | description = "Cluster Autoscaler Helm Config" 8 | type = any 9 | default = {} 10 | } 11 | 12 | variable "manage_via_gitops" { 13 | description = "Determines if the add-on should be managed via GitOps." 14 | type = bool 15 | default = false 16 | } 17 | 18 | variable "addon_context" { 19 | description = "Input configuration for the addon" 20 | type = object({ 21 | aws_caller_identity_account_id = string 22 | aws_caller_identity_arn = string 23 | aws_eks_cluster_endpoint = string 24 | aws_partition_id = string 25 | aws_region_name = string 26 | eks_cluster_id = string 27 | eks_oidc_issuer_url = string 28 | eks_oidc_provider_arn = string 29 | tags = map(string) 30 | irsa_iam_role_path = string 31 | irsa_iam_permissions_boundary = string 32 | }) 33 | } 34 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-autoscaler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-proportional-autoscaler/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | 4 | # https://github.com/kubernetes-sigs/cluster-proportional-autoscaler/blob/master/charts/cluster-proportional-autoscaler/Chart.yaml 5 | helm_config = merge( 6 | { 7 | name = "cluster-proportional-autoscaler" 8 | chart = "cluster-proportional-autoscaler" 9 | repository = "https://kubernetes-sigs.github.io/cluster-proportional-autoscaler" 10 | version = "1.1.0" 11 | namespace = "kube-system" 12 | values = [templatefile("${path.module}/values.yaml", { 13 | operating_system = "linux" 14 | })] 15 | description = "Cluster Proportional Autoscaler Helm Chart" 16 | }, 17 | var.helm_config 18 | ) 19 | 20 | manage_via_gitops = var.manage_via_gitops 21 | addon_context = var.addon_context 22 | } 23 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-proportional-autoscaler/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-proportional-autoscaler/values.yaml: -------------------------------------------------------------------------------- 1 | # Formula for controlling the replicas. Adjust according to your needs 2 | # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) ) 3 | # replicas = min(replicas, max) 4 | # replicas = max(replicas, min) 5 | config: 6 | linear: 7 | coresPerReplica: 256 8 | nodesPerReplica: 16 9 | min: 1 10 | max: 100 11 | preventSinglePointFailure: true 12 | includeUnschedulableNodes: true 13 | 14 | # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive). 15 | # The following option should be defined in user defined values.yaml using var.helm_config 16 | 17 | #options: 18 | # target: 19 | 20 | podSecurityContext: 21 | seccompProfile: 22 | type: RuntimeDefault 23 | supplementalGroups: [ 65534 ] 24 | fsGroup: 65534 25 | 26 | nodeSelector: 27 | kubernetes.io/os: ${operating_system} 28 | 29 | resources: 30 | limits: 31 | cpu: 100m 32 | memory: 128Mi 33 | requests: 34 | cpu: 100m 35 | memory: 128Mi 36 | 37 | tolerations: 38 | - key: "CriticalAddonsOnly" 39 | operator: "Exists" 40 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-proportional-autoscaler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for the Karpenter" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | }) 26 | } 27 | -------------------------------------------------------------------------------- /modules/z-archieve/cluster-proportional-autoscaler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/aws-provider/aws-controller-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: pkg.crossplane.io/v1alpha1 3 | kind: ControllerConfig 4 | metadata: 5 | name: aws-controller-config 6 | annotations: 7 | eks.amazonaws.com/role-arn: ${iam-role-arn} 8 | spec: 9 | podSecurityContext: 10 | fsGroup: 2000 11 | args: 12 | - --debug 13 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/aws-provider/aws-provider-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: aws.crossplane.io/v1beta1 3 | kind: ProviderConfig 4 | metadata: 5 | name: aws-provider-config 6 | spec: 7 | credentials: 8 | source: InjectedIdentity 9 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/aws-provider/aws-provider.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: pkg.crossplane.io/v1 3 | kind: Provider 4 | metadata: 5 | name: ${aws-provider-name} 6 | spec: 7 | package: xpkg.upbound.io/crossplane-contrib/provider-aws:${coalesce(provider-aws-version, "v0.33.0")} 8 | controllerConfigRef: 9 | name: aws-controller-config 10 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/aws-provider/jet-aws-controller-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: pkg.crossplane.io/v1alpha1 3 | kind: ControllerConfig 4 | metadata: 5 | name: jet-aws-controller-config 6 | annotations: 7 | eks.amazonaws.com/role-arn: ${iam-role-arn} 8 | spec: 9 | podSecurityContext: 10 | fsGroup: 2000 11 | args: 12 | - --debug 13 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/aws-provider/jet-aws-provider-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: aws.jet.crossplane.io/v1alpha1 3 | kind: ProviderConfig 4 | metadata: 5 | name: jet-aws-provider-config 6 | spec: 7 | credentials: 8 | source: InjectedIdentity 9 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/aws-provider/jet-aws-provider.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: pkg.crossplane.io/v1 3 | kind: Provider 4 | metadata: 5 | name: ${aws-provider-name} 6 | spec: 7 | package: crossplane/provider-jet-aws:${provider-aws-version} 8 | controllerConfigRef: 9 | name: jet-aws-controller-config 10 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "s3_policy" { 2 | statement { 3 | sid = "VisualEditor0" 4 | effect = "Allow" 5 | resources = ["arn:${var.addon_context.aws_partition_id}:s3:::*"] 6 | 7 | actions = [ 8 | "s3:CreateBucket", 9 | "s3:DeleteBucket", 10 | "s3:DeleteObject", 11 | "s3:DeleteObjectVersion", 12 | "s3:Get*", 13 | "s3:ListBucket", 14 | "s3:Put*", 15 | ] 16 | } 17 | 18 | statement { 19 | sid = "VisualEditor1" 20 | effect = "Allow" 21 | resources = ["*"] 22 | actions = ["s3:ListAllMyBuckets"] 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/kubernetes-provider/kubernetes-controller-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ${kubernetes-serviceaccount-name} 5 | subjects: 6 | - kind: ServiceAccount 7 | name: ${kubernetes-serviceaccount-name} 8 | namespace: ${namespace} 9 | roleRef: 10 | kind: ClusterRole 11 | name: cluster-admin 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/kubernetes-provider/kubernetes-controller-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1alpha1 2 | kind: ControllerConfig 3 | metadata: 4 | name: kubernetes-controller-config 5 | spec: 6 | serviceAccountName: ${kubernetes-serviceaccount-name} 7 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/kubernetes-provider/kubernetes-provider-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kubernetes.crossplane.io/v1alpha1 3 | kind: ProviderConfig 4 | metadata: 5 | name: kubernetes-provider-config 6 | spec: 7 | credentials: 8 | source: InjectedIdentity 9 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/kubernetes-provider/kubernetes-provider.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: pkg.crossplane.io/v1 3 | kind: Provider 4 | metadata: 5 | name: ${kubernetes-provider-name} 6 | spec: 7 | package: xpkg.upbound.io/crossplane-contrib/provider-kubernetes:${coalesce(provider-kubernetes-version, "v0.5.0")} 8 | controllerConfigRef: 9 | name: kubernetes-controller-config 10 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | namespace = try(var.helm_config.namespace, "crossplane-system") 3 | 4 | # https://github.com/crossplane/crossplane/blob/master/cluster/charts/crossplane/Chart.yaml 5 | default_helm_config = { 6 | name = "crossplane" 7 | chart = "crossplane" 8 | repository = "https://charts.crossplane.io/stable/" 9 | version = "1.10.1" 10 | namespace = local.namespace 11 | description = "Crossplane Helm chart" 12 | values = local.default_helm_values 13 | } 14 | 15 | helm_config = merge( 16 | local.default_helm_config, 17 | var.helm_config 18 | ) 19 | 20 | default_helm_values = [templatefile("${path.module}/values.yaml", { 21 | operating-system = "linux" 22 | })] 23 | 24 | aws_provider_sa = "aws-provider" 25 | jet_aws_provider_sa = "jet-aws-provider" 26 | kubernetes_provider_sa = try(var.helm_config.service_account, "kubernetes-provider") 27 | aws_current_account_id = var.account_id 28 | aws_current_partition = var.aws_partition 29 | } 30 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/outputs.tf: -------------------------------------------------------------------------------- 1 | output "release_metadata" { 2 | description = "Map of attributes of the Helm release metadata" 3 | value = module.helm_addon.release_metadata 4 | } 5 | 6 | output "irsa_arn" { 7 | description = "IAM role ARN for the service account" 8 | value = module.helm_addon.irsa_arn 9 | } 10 | 11 | output "irsa_name" { 12 | description = "IAM role name for the service account" 13 | value = module.helm_addon.irsa_name 14 | } 15 | 16 | output "service_account" { 17 | description = "Name of Kubernetes service account" 18 | value = module.helm_addon.service_account 19 | } 20 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/values.yaml: -------------------------------------------------------------------------------- 1 | nodeSelector: 2 | kubernetes.io/os: ${operating-system} 3 | -------------------------------------------------------------------------------- /modules/z-archieve/crossplane/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | kubernetes = { 10 | source = "hashicorp/kubernetes" 11 | version = ">= 2.10" 12 | } 13 | kubectl = { 14 | source = "gavinbunney/kubectl" 15 | version = ">= 1.14" 16 | } 17 | time = { 18 | source = "hashicorp/time" 19 | version = ">= 0.7" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /modules/z-archieve/csi-secrets-store-provider-aws/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = try(var.helm_config.name, "csi-secrets-store-provider-aws") 3 | namespace = try(var.helm_config.namespace, local.name) 4 | } 5 | 6 | resource "kubernetes_namespace_v1" "csi_secrets_store_provider_aws" { 7 | metadata { 8 | name = local.namespace 9 | } 10 | } 11 | 12 | module "helm_addon" { 13 | source = "../helm-addon" 14 | 15 | # https://github.com/aws/eks-charts/blob/master/stable/csi-secrets-store-provider-aws/Chart.yaml 16 | helm_config = merge( 17 | { 18 | name = local.name 19 | chart = local.name 20 | repository = "https://aws.github.io/eks-charts" 21 | version = "0.0.3" 22 | namespace = kubernetes_namespace_v1.csi_secrets_store_provider_aws.metadata[0].name 23 | description = "A Helm chart to install the Secrets Store CSI Driver and the AWS Key Management Service Provider inside a Kubernetes cluster." 24 | }, 25 | var.helm_config 26 | ) 27 | 28 | manage_via_gitops = var.manage_via_gitops 29 | addon_context = var.addon_context 30 | } 31 | -------------------------------------------------------------------------------- /modules/z-archieve/csi-secrets-store-provider-aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/csi-secrets-store-provider-aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "CSI Secrets Store Provider AWS Helm Configurations" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps" 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /modules/z-archieve/csi-secrets-store-provider-aws/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/data.tf: -------------------------------------------------------------------------------- 1 | # data "aws_partition" "current" {} 2 | # data "aws_caller_identity" "current" {} 3 | # data "aws_region" "current" {} 4 | 5 | # resource "time_sleep" "dataplane" { 6 | # create_duration = "10s" 7 | 8 | # triggers = { 9 | # data_plane_wait_arn = var.data_plane_wait_arn # this waits for the data plane to be ready 10 | # eks_cluster_id = var.eks_cluster_id # this ties it to downstream resources 11 | # } 12 | # } 13 | 14 | # data "aws_eks_cluster" "eks_cluster" { 15 | # # this makes downstream resources wait for data plane to be ready 16 | # name = time_sleep.dataplane.triggers["eks_cluster_id"] 17 | # } 18 | -------------------------------------------------------------------------------- /modules/z-archieve/external-dns/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with GitOps" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/external-dns/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/external-secrets/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "external_secrets" { 2 | statement { 3 | actions = ["ssm:GetParameter"] 4 | resources = var.external_secrets_ssm_parameter_arns 5 | } 6 | 7 | statement { 8 | actions = [ 9 | "secretsmanager:GetResourcePolicy", 10 | "secretsmanager:GetSecretValue", 11 | "secretsmanager:DescribeSecret", 12 | "secretsmanager:ListSecretVersionIds", 13 | ] 14 | resources = var.external_secrets_secrets_manager_arns 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /modules/z-archieve/external-secrets/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | manage_via_gitops = var.manage_via_gitops 4 | set_values = local.set_values 5 | helm_config = local.helm_config 6 | irsa_config = local.irsa_config 7 | addon_context = var.addon_context 8 | } 9 | 10 | resource "aws_iam_policy" "external_secrets" { 11 | name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa" 12 | path = var.addon_context.irsa_iam_role_path 13 | description = "Provides permissions to for External Secrets to retrieve secrets from AWS SSM and AWS Secrets Manager" 14 | policy = data.aws_iam_policy_document.external_secrets.json 15 | } 16 | -------------------------------------------------------------------------------- /modules/z-archieve/external-secrets/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/external-secrets/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/helm-addon/outputs.tf: -------------------------------------------------------------------------------- 1 | output "helm_release" { 2 | description = "Map of attributes of the Helm release created without sensitive outputs" 3 | value = try({ for k, v in helm_release.addon : k => v if k != "repository_password" }, {}) 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = try(helm_release.addon[0].metadata, null) 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = try(module.irsa[0].irsa_iam_role_arn, null) 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = try(module.irsa[0].irsa_iam_role_name, null) 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = try(coalesce(try(module.irsa[0].service_account, null), lookup(var.irsa_config, "kubernetes_service_account", null)), null) 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/helm-addon/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm chart config. Repository and version required. See https://registry.terraform.io/providers/hashicorp/helm/latest/docs" 3 | type = any 4 | } 5 | 6 | variable "set_values" { 7 | description = "Forced set values" 8 | type = any 9 | default = [] 10 | } 11 | 12 | variable "set_sensitive_values" { 13 | description = "Forced set_sensitive values" 14 | type = any 15 | default = [] 16 | } 17 | 18 | variable "manage_via_gitops" { 19 | description = "Determines if the add-on should be managed via GitOps" 20 | type = bool 21 | default = false 22 | } 23 | 24 | variable "irsa_iam_role_name" { 25 | description = "IAM role name for IRSA" 26 | type = string 27 | default = "" 28 | } 29 | 30 | variable "irsa_config" { 31 | description = "Input configuration for IRSA module" 32 | type = any 33 | default = {} 34 | } 35 | 36 | variable "addon_context" { 37 | description = "Input configuration for the addon" 38 | type = any 39 | } 40 | -------------------------------------------------------------------------------- /modules/z-archieve/helm-addon/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | helm = { 6 | source = "hashicorp/helm" 7 | version = ">= 2.4.1" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/ingress-nginx/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = try(var.helm_config.name, "ingress-nginx") 3 | namespace = try(var.helm_config.namespace, local.name) 4 | } 5 | 6 | resource "kubernetes_namespace_v1" "this" { 7 | count = try(var.helm_config.create_namespace, true) && local.namespace != "kube-system" ? 1 : 0 8 | 9 | metadata { 10 | name = local.namespace 11 | } 12 | } 13 | 14 | module "helm_addon" { 15 | source = "../helm-addon" 16 | 17 | helm_config = merge( 18 | { 19 | name = local.name 20 | chart = local.name 21 | repository = "https://kubernetes.github.io/ingress-nginx" 22 | version = "4.9.1" 23 | namespace = try(kubernetes_namespace_v1.this[0].metadata[0].name, local.namespace) 24 | description = "The NGINX HelmChart Ingress Controller deployment configuration" 25 | }, 26 | var.helm_config 27 | ) 28 | 29 | manage_via_gitops = var.manage_via_gitops 30 | addon_context = var.addon_context 31 | } 32 | -------------------------------------------------------------------------------- /modules/z-archieve/ingress-nginx/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/ingress-nginx/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Ingress NGINX Helm Configuration" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | }) 26 | } 27 | -------------------------------------------------------------------------------- /modules/z-archieve/ingress-nginx/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/helm/istio-ingress.yaml: -------------------------------------------------------------------------------- 1 | # nodeSelector: 2 | # Addons-Services: true 3 | resources: 4 | limits: 5 | cpu: 20m 6 | memory: 200Mi 7 | requests: 8 | cpu: 10m 9 | memory: 100Mi 10 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/helm/values.yaml: -------------------------------------------------------------------------------- 1 | pilot: 2 | resources: 3 | limits: 4 | cpu: 100m 5 | memory: 200Mi 6 | requests: 7 | cpu: 50m 8 | memory: 100Mi 9 | affinity: 10 | nodeAffinity: 11 | requiredDuringSchedulingIgnoredDuringExecution: 12 | nodeSelectorTerms: 13 | - matchExpressions: 14 | - key: "Addons-Services" 15 | operator: In 16 | values: 17 | - "true" 18 | 19 | # gateways: 20 | # istio-ingressgateway: 21 | # nodeSelector: 22 | # Addons-Services: true 23 | # resources: 24 | # limits: 25 | # cpu: 100m 26 | # memory: 200Mi 27 | # requests: 28 | # cpu: 10m 29 | # memory: 100Mi 30 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/istio-observability/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/istio-observability/templates/clusterissuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-istio 5 | namespace: istio-system 6 | spec: 7 | acme: 8 | email: {{ .Values.clusterIssuer.email }} 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | privateKeySecretRef: 11 | name: letsencrypt-istio 12 | solvers: 13 | - http01: 14 | ingress: 15 | class: istio 16 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/istio-observability/templates/enable-access-logs.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.accessLogging.enabled -}} 2 | apiVersion: telemetry.istio.io/v1alpha1 3 | kind: Telemetry 4 | metadata: 5 | name: mesh-default 6 | namespace: istio-system 7 | spec: 8 | accessLogging: 9 | - providers: 10 | - name: envoy 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/istio-observability/templates/service-monitor-control-plane.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.monitoring.enabled -}} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: prometheus-oper-istio-controlplane 6 | labels: 7 | release: prometheus-operator 8 | monitoring: istio-controlplane 9 | spec: 10 | jobLabel: istio 11 | selector: 12 | matchExpressions: 13 | - {key: istio, operator: In, values: [mixer,pilot,galley,citadel,sidecar-injector]} 14 | namespaceSelector: 15 | matchNames: 16 | - istio-system 17 | endpoints: 18 | - port: http-monitoring 19 | interval: 15s 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/istio-observability/templates/service-monitor-dataplane.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.monitoring.enabled -}} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: prometheus-oper-istio-dataplane 6 | labels: 7 | monitoring: istio-dataplane 8 | release: prometheus-operator 9 | spec: 10 | selector: 11 | matchExpressions: 12 | - {key: istio-prometheus-ignore, operator: DoesNotExist} 13 | namespaceSelector: 14 | any: true 15 | jobLabel: envoy-stats 16 | endpoints: 17 | - path: /stats/prometheus 18 | targetPort: http-envoy-prom 19 | interval: 15s 20 | relabelings: 21 | - sourceLabels: [__meta_kubernetes_pod_container_port_name] 22 | action: keep 23 | regex: '.*-envoy-prom' 24 | - action: labelmap 25 | regex: "__meta_kubernetes_pod_label_(.+)" 26 | - sourceLabels: [__meta_kubernetes_namespace] 27 | action: replace 28 | targetLabel: namespace 29 | - sourceLabels: [__meta_kubernetes_pod_name] 30 | action: replace 31 | targetLabel: pod_name 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/istio-observability/values.yaml: -------------------------------------------------------------------------------- 1 | accessLogging: 2 | enabled: "${envoy_access_logs_enabled}" 3 | 4 | monitoring: 5 | enabled: "${prometheus_monitoring_enabled}" 6 | 7 | clusterIssuer: 8 | email: "${cert_manager_letsencrypt_email}" 9 | -------------------------------------------------------------------------------- /modules/z-archieve/istio/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.26" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 3.43.0" 7 | } 8 | kubernetes = { 9 | source = "hashicorp/kubernetes" 10 | version = ">= 2.0.2" 11 | } 12 | helm = { 13 | source = "hashicorp/helm" 14 | version = ">= 2.0.2" 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /modules/z-archieve/karpenter-provisioner/config/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /modules/z-archieve/karpenter-provisioner/config/ipv4-values.yaml: -------------------------------------------------------------------------------- 1 | private_subnet_selector_key: ${private_subnet_selector_key} 2 | private_subnet_selector_value: "${private_subnet_selector_value}" 3 | security_group_selector_key: ${security_group_selector_key} 4 | security_group_selector_value: "${security_group_selector_value}" 5 | karpenter_ec2_capacity_type: "${instance_capacity_type}" 6 | karpenter_ec2_instance_family: "${ec2_instance_family}" 7 | karpenter_ec2_instance_type: "${ec2_instance_type}" 8 | excluded_karpenter_ec2_instance_type: "${excluded_instance_type}" 9 | provisioner_name: "${provisioner_name}" 10 | instance_capacity_type: "${instance_capacity_type}" 11 | kms_key_id: "${kms_key_id}" 12 | ec2_node_name: "${ec2_node_name}" 13 | 14 | 15 | 16 | spec: 17 | labels: 18 | ${karpenter_label}: "true" 19 | eks.amazonaws.com/nodegroup: "Services-ng" 20 | -------------------------------------------------------------------------------- /modules/z-archieve/karpenter-provisioner/config/ipv6-values.yaml: -------------------------------------------------------------------------------- 1 | private_subnet_selector_key: ${private_subnet_selector_key} 2 | private_subnet_selector_value: "${private_subnet_selector_value}" 3 | security_group_selector_key: ${security_group_selector_key} 4 | security_group_selector_value: "${security_group_selector_value}" 5 | karpenter_ec2_capacity_type: "${instance_capacity_type}" 6 | karpenter_ec2_instance_family: "${ec2_instance_family}" 7 | karpenter_ec2_instance_type: "${ec2_instance_type}" 8 | excluded_karpenter_ec2_instance_type: "${excluded_instance_type}" 9 | provisioner_name: "${provisioner_name}" 10 | karpenter_instance_hypervisor: "${instance_hypervisor}" 11 | instance_capacity_type: "${instance_capacity_type}" 12 | kms_key_id: "${kms_key_id}" 13 | ec2_node_name: "${ec2_node_name}" 14 | 15 | 16 | spec: 17 | labels: 18 | ${karpenter_label}: "true" 19 | eks.amazonaws.com/nodegroup: "Services-ng" 20 | -------------------------------------------------------------------------------- /modules/z-archieve/karpenter-provisioner/tfsec.yaml: -------------------------------------------------------------------------------- 1 | exclude: 2 | - aws-iam-no-policy-wildcards # Wildcards required in addon IAM policies 3 | - aws-vpc-no-excessive-port-access # VPC settings left up to user implementation for recommended practices 4 | - aws-vpc-no-public-ingress-acl # VPC settings left up to user implementation for recommended practices 5 | - aws-eks-no-public-cluster-access-to-cidr # Public access enabled for better example usability, users are recommended to disable if possible 6 | - aws-eks-no-public-cluster-access # Public access enabled for better example usability, users are recommended to disable if possible 7 | - aws-eks-encrypt-secrets # Module defaults to encrypting secrets with CMK, but this is not hardcoded and therefore a spurious error 8 | - aws-vpc-no-public-egress-sgr # Added in v1.22 9 | - aws-ec2-no-public-egress-sgr 10 | - aws-ec2-no-public-ingress-sgr 11 | - aws-ec2-enforce-http-token-imds 12 | - aws-ec2-no-public-ip-subnet # VPN IP 13 | - aws-ec2-require-vpc-flow-logs-for-all-vpcs # disabled flow logs by default 14 | -------------------------------------------------------------------------------- /modules/z-archieve/karpenter-provisioner/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12.26" 3 | required_providers { 4 | helm = { 5 | source = "hashicorp/helm" 6 | version = ">= 2.0.0" # Adjust version as per your requirement 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /modules/z-archieve/karpenter/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | manage_via_gitops = var.manage_via_gitops 4 | helm_config = local.helm_config 5 | set_values = local.set_values 6 | irsa_config = local.irsa_config 7 | addon_context = var.addon_context 8 | } 9 | 10 | resource "aws_iam_policy" "karpenter" { 11 | name = "${var.addon_context.eks_cluster_id}-karpenter" 12 | description = "IAM Policy for Karpenter" 13 | policy = data.aws_iam_policy_document.karpenter.json 14 | } 15 | 16 | resource "aws_iam_policy" "karpenter-spot" { 17 | name = "${var.addon_context.eks_cluster_id}-karpenter-spot" 18 | description = "IAM Policy for Karpenter" 19 | policy = data.aws_iam_policy_document.karpenter-spot-service-linked-policy.json 20 | } 21 | -------------------------------------------------------------------------------- /modules/z-archieve/karpenter/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/karpenter/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/kubecost/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | 4 | # https://github.com/kubecost/cost-analyzer-helm-chart/blob/develop/cost-analyzer/Chart.yaml 5 | helm_config = merge( 6 | { 7 | name = "kubecost" 8 | chart = "cost-analyzer" 9 | repository = "oci://public.ecr.aws/kubecost" 10 | version = "1.97.0" 11 | namespace = "kubecost" 12 | values = [file("${path.module}/values.yaml")] 13 | create_namespace = true 14 | description = "Kubecost Helm Chart deployment configuration" 15 | }, 16 | var.helm_config 17 | ) 18 | 19 | manage_via_gitops = var.manage_via_gitops 20 | addon_context = var.addon_context 21 | } 22 | -------------------------------------------------------------------------------- /modules/z-archieve/kubecost/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/kubecost/values.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/kubecost/cost-analyzer-helm-chart/blob/master/cost-analyzer/values-eks-cost-monitoring.yaml 2 | global: 3 | grafana: 4 | enabled: false 5 | proxy: false 6 | 7 | imageVersion: prod-1.97.0 8 | kubecostFrontend: 9 | image: public.ecr.aws/kubecost/frontend 10 | 11 | kubecostModel: 12 | image: public.ecr.aws/kubecost/cost-model 13 | 14 | kubecostMetrics: 15 | emitPodAnnotations: true 16 | emitNamespaceAnnotations: true 17 | 18 | prometheus: 19 | server: 20 | image: 21 | repository: public.ecr.aws/kubecost/prometheus 22 | tag: v2.35.0 23 | 24 | configmapReload: 25 | prometheus: 26 | image: 27 | repository: public.ecr.aws/bitnami/configmap-reload 28 | tag: 0.7.1 29 | 30 | reporting: 31 | productAnalytics: false 32 | -------------------------------------------------------------------------------- /modules/z-archieve/kubecost/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm Config for kubecost." 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /modules/z-archieve/kubecost/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/z-archieve/kubernetes-dashboard/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "kubernetes-dashboard" 3 | 4 | # https://github.com/kubernetes/dashboard/blob/master/charts/helm-chart/kubernetes-dashboard/Chart.yaml 5 | default_helm_config = { 6 | name = local.name 7 | chart = local.name 8 | repository = "https://kubernetes.github.io/dashboard/" 9 | version = "5.11.0" 10 | namespace = local.name 11 | description = "Kubernetes Dashboard Helm Chart" 12 | } 13 | 14 | helm_config = merge( 15 | local.default_helm_config, 16 | var.helm_config 17 | ) 18 | 19 | argocd_gitops_config = { 20 | enable = true 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /modules/z-archieve/kubernetes-dashboard/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | 4 | manage_via_gitops = var.manage_via_gitops 5 | helm_config = local.helm_config 6 | addon_context = var.addon_context 7 | 8 | depends_on = [kubernetes_namespace_v1.this] 9 | } 10 | 11 | resource "kubernetes_namespace_v1" "this" { 12 | count = try(local.helm_config["create_namespace"], true) && local.helm_config["namespace"] != "kube-system" ? 1 : 0 13 | 14 | metadata { 15 | name = local.helm_config["namespace"] 16 | labels = { 17 | "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /modules/z-archieve/kubernetes-dashboard/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/kubernetes-dashboard/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for the Kubernetes Dashboard" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps" 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | }) 26 | } 27 | -------------------------------------------------------------------------------- /modules/z-archieve/kubernetes-dashboard/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/metrics-server/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "metrics-server" 3 | 4 | # https://github.com/kubernetes-sigs/metrics-server/blob/master/charts/metrics-server/Chart.yaml 5 | default_helm_config = { 6 | name = local.name 7 | chart = local.name 8 | repository = "https://kubernetes-sigs.github.io/metrics-server/" 9 | version = "3.11.0" 10 | namespace = "kube-system" 11 | description = "Metric server helm Chart deployment configuration" 12 | } 13 | 14 | helm_config = merge( 15 | local.default_helm_config, 16 | var.helm_config 17 | ) 18 | 19 | argocd_gitops_config = { 20 | enable = true 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /modules/z-archieve/metrics-server/main.tf: -------------------------------------------------------------------------------- 1 | module "helm_addon" { 2 | source = "../helm-addon" 3 | 4 | manage_via_gitops = var.manage_via_gitops 5 | helm_config = local.helm_config 6 | addon_context = var.addon_context 7 | 8 | depends_on = [kubernetes_namespace_v1.this] 9 | } 10 | 11 | resource "kubernetes_namespace_v1" "this" { 12 | count = try(local.helm_config["create_namespace"], true) && local.helm_config["namespace"] != "kube-system" ? 1 : 0 13 | 14 | metadata { 15 | name = local.helm_config["namespace"] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /modules/z-archieve/metrics-server/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/metrics-server/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for Metrics Server" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps" 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | }) 26 | } 27 | -------------------------------------------------------------------------------- /modules/z-archieve/metrics-server/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/reloader/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "reloader" 3 | 4 | argocd_gitops_config = { 5 | enable = true 6 | serviceAccountName = local.name 7 | } 8 | } 9 | 10 | module "helm_addon" { 11 | source = "../helm-addon" 12 | 13 | # https://github.com/stakater/Reloader/blob/master/deployments/kubernetes/chart/reloader/Chart.yaml 14 | helm_config = merge( 15 | { 16 | name = local.name 17 | chart = local.name 18 | repository = "https://stakater.github.io/stakater-charts" 19 | version = "v1.0.63" 20 | namespace = local.name 21 | create_namespace = true 22 | description = "Reloader Helm Chart deployment configuration" 23 | }, 24 | var.helm_config 25 | ) 26 | 27 | manage_via_gitops = var.manage_via_gitops 28 | addon_context = var.addon_context 29 | } 30 | -------------------------------------------------------------------------------- /modules/z-archieve/reloader/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/reloader/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for Reloader." 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /modules/z-archieve/reloader/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/z-archieve/secrets-store-csi-driver/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "secrets-store-csi-driver" 3 | 4 | # https://github.com/kubernetes-sigs/secrets-store-csi-driver/blob/main/charts/secrets-store-csi-driver/Chart.yaml 5 | default_helm_config = { 6 | name = local.name 7 | chart = local.name 8 | repository = "https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts" 9 | version = "1.2.4" 10 | namespace = local.name 11 | description = "A Helm chart to install the Secrets Store CSI Driver" 12 | } 13 | 14 | helm_config = merge( 15 | local.default_helm_config, 16 | var.helm_config 17 | ) 18 | 19 | argocd_gitops_config = { 20 | enable = true 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /modules/z-archieve/secrets-store-csi-driver/main.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_namespace_v1" "secrets_store_csi_driver" { 2 | metadata { 3 | name = local.name 4 | 5 | labels = { 6 | "app.kubernetes.io/managed-by" = "terraform-aws-eks-blueprints" 7 | } 8 | } 9 | } 10 | 11 | module "helm_addon" { 12 | source = "../helm-addon" 13 | manage_via_gitops = var.manage_via_gitops 14 | helm_config = local.helm_config 15 | addon_context = var.addon_context 16 | 17 | depends_on = [kubernetes_namespace_v1.secrets_store_csi_driver] 18 | } 19 | -------------------------------------------------------------------------------- /modules/z-archieve/secrets-store-csi-driver/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/secrets-store-csi-driver/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | type = any 3 | default = {} 4 | description = "CSI Secrets Store Provider Helm Configurations" 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | type = bool 9 | default = false 10 | description = "Determines if the add-on should be managed via GitOps." 11 | } 12 | 13 | variable "addon_context" { 14 | type = object({ 15 | aws_caller_identity_account_id = string 16 | aws_caller_identity_arn = string 17 | aws_eks_cluster_endpoint = string 18 | aws_partition_id = string 19 | aws_region_name = string 20 | eks_cluster_id = string 21 | eks_oidc_issuer_url = string 22 | eks_oidc_provider_arn = string 23 | tags = map(string) 24 | irsa_iam_role_path = string 25 | irsa_iam_permissions_boundary = string 26 | }) 27 | description = "Input configuration for the addon" 28 | } 29 | -------------------------------------------------------------------------------- /modules/z-archieve/secrets-store-csi-driver/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/strimzi-kafka-operator/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = "strimzi" 3 | default_helm_config = { 4 | name = local.name 5 | chart = "strimzi-kafka-operator" 6 | repository = "https://strimzi.io/charts/" 7 | version = "0.31.1" 8 | namespace = local.name 9 | create_namespace = true 10 | values = [templatefile("${path.module}/values.yaml", {})] 11 | description = "Strimzi - Apache Kafka on Kubernetes" 12 | } 13 | helm_config = merge(local.default_helm_config, var.helm_config) 14 | } 15 | 16 | #------------------------------------------------- 17 | # Strimzi Kafka Helm Add-on 18 | #------------------------------------------------- 19 | module "helm_addon" { 20 | source = "../helm-addon" 21 | helm_config = local.helm_config 22 | addon_context = var.addon_context 23 | manage_via_gitops = var.manage_via_gitops 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/strimzi-kafka-operator/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/strimzi-kafka-operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for strimzi-kafka-operator. 2 | 3 | resources: 4 | limits: 5 | memory: 1Gi 6 | cpu: 1000m 7 | requests: 8 | memory: 1Gi 9 | cpu: 1000m 10 | -------------------------------------------------------------------------------- /modules/z-archieve/strimzi-kafka-operator/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for the kafka." 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps." 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | irsa_iam_role_path = string 26 | irsa_iam_permissions_boundary = string 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /modules/z-archieve/strimzi-kafka-operator/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/z-archieve/tetrate-istio/locals_tid.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tetrate_istio_distribution_helm_config = { 3 | description = "Tetrate Istio Distribution - Simple, safe enterprise-grade Istio distribution" 4 | } 5 | 6 | tetrate_istio_distribution_helm_values = { 7 | cni = tolist([yamlencode({ 8 | "global" : { 9 | "hub" : "containers.istio.tetratelabs.com", 10 | "tag" : "${lookup(var.cni_helm_config, "version", local.default_helm_config.version)}-tetratefips-v0", 11 | } 12 | })]) 13 | istiod = tolist([yamlencode({ 14 | "global" : { 15 | "hub" : "containers.istio.tetratelabs.com", 16 | "tag" : "${lookup(var.istiod_helm_config, "version", local.default_helm_config.version)}-tetratefips-v0", 17 | } 18 | })]) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /modules/z-archieve/tetrate-istio/main.tf: -------------------------------------------------------------------------------- 1 | module "base" { 2 | source = "../helm-addon" 3 | 4 | count = var.install_base ? 1 : 0 5 | 6 | manage_via_gitops = var.manage_via_gitops 7 | helm_config = local.base_helm_config 8 | addon_context = var.addon_context 9 | } 10 | 11 | module "cni" { 12 | source = "../helm-addon" 13 | 14 | count = var.install_cni ? 1 : 0 15 | 16 | manage_via_gitops = var.manage_via_gitops 17 | helm_config = local.cni_helm_config 18 | addon_context = var.addon_context 19 | 20 | depends_on = [module.base] 21 | } 22 | 23 | module "istiod" { 24 | source = "../helm-addon" 25 | 26 | count = var.install_istiod ? 1 : 0 27 | 28 | manage_via_gitops = var.manage_via_gitops 29 | helm_config = local.istiod_helm_config 30 | addon_context = var.addon_context 31 | 32 | depends_on = [module.cni] 33 | } 34 | 35 | module "gateway" { 36 | source = "../helm-addon" 37 | 38 | count = var.install_gateway ? 1 : 0 39 | 40 | manage_via_gitops = var.manage_via_gitops 41 | helm_config = local.gateway_helm_config 42 | addon_context = var.addon_context 43 | 44 | depends_on = [module.istiod] 45 | } 46 | -------------------------------------------------------------------------------- /modules/z-archieve/tetrate-istio/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | -------------------------------------------------------------------------------- /modules/z-archieve/tetrate-istio/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | } 4 | -------------------------------------------------------------------------------- /modules/z-archieve/velero/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? local.argocd_gitops_config : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/velero/values.yaml: -------------------------------------------------------------------------------- 1 | initContainers: 2 | - name: velero-plugin-for-csi 3 | image: velero/velero-plugin-for-csi:v0.7.0 4 | volumeMounts: 5 | - mountPath: /target 6 | name: plugins 7 | - name: velero-plugin-for-aws 8 | image: velero/velero-plugin-for-aws:v1.9.0 9 | imagePullPolicy: IfNotPresent 10 | volumeMounts: 11 | - mountPath: /target 12 | name: plugins 13 | 14 | configuration: 15 | backupStorageLocation: 16 | - name: default 17 | provider: aws 18 | bucket: ${bucket} 19 | config: 20 | region: ${region} 21 | volumeSnapshotLocation: 22 | - name: default 23 | provider: aws 24 | config: 25 | region: ${region} 26 | 27 | credentials: 28 | useSecret: false 29 | -------------------------------------------------------------------------------- /modules/z-archieve/velero/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/z-archieve/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.72" 8 | } 9 | time = { 10 | source = "hashicorp/time" 11 | version = ">= 0.8" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/z-archieve/vpa/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = try(var.helm_config.name, "vpa") 3 | namespace = try(var.helm_config.namespace, local.name) 4 | } 5 | 6 | resource "kubernetes_namespace_v1" "vpa" { 7 | count = try(var.helm_config.create_namespace, true) && local.namespace != "kube-system" ? 1 : 0 8 | 9 | metadata { 10 | name = local.namespace 11 | } 12 | } 13 | 14 | module "helm_addon" { 15 | source = "../helm-addon" 16 | 17 | # https://github.com/FairwindsOps/charts/blob/master/stable/vpa/Chart.yaml 18 | helm_config = merge( 19 | { 20 | name = local.name 21 | chart = local.name 22 | repository = "https://charts.fairwinds.com/stable" 23 | version = "1.5.0" 24 | namespace = try(kubernetes_namespace_v1.vpa[0].metadata[0].name, local.namespace) 25 | description = "Kubernetes Vertical Pod Autoscaler" 26 | }, 27 | var.helm_config 28 | ) 29 | 30 | manage_via_gitops = var.manage_via_gitops 31 | addon_context = var.addon_context 32 | } 33 | -------------------------------------------------------------------------------- /modules/z-archieve/vpa/outputs.tf: -------------------------------------------------------------------------------- 1 | output "argocd_gitops_config" { 2 | description = "Configuration used for managing the add-on with ArgoCD" 3 | value = var.manage_via_gitops ? { enable = true } : null 4 | } 5 | 6 | output "release_metadata" { 7 | description = "Map of attributes of the Helm release metadata" 8 | value = module.helm_addon.release_metadata 9 | } 10 | 11 | output "irsa_arn" { 12 | description = "IAM role ARN for the service account" 13 | value = module.helm_addon.irsa_arn 14 | } 15 | 16 | output "irsa_name" { 17 | description = "IAM role name for the service account" 18 | value = module.helm_addon.irsa_name 19 | } 20 | 21 | output "service_account" { 22 | description = "Name of Kubernetes service account" 23 | value = module.helm_addon.service_account 24 | } 25 | -------------------------------------------------------------------------------- /modules/z-archieve/vpa/variables.tf: -------------------------------------------------------------------------------- 1 | variable "helm_config" { 2 | description = "Helm provider config for VPA" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "manage_via_gitops" { 8 | description = "Determines if the add-on should be managed via GitOps" 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "addon_context" { 14 | description = "Input configuration for the addon" 15 | type = object({ 16 | aws_caller_identity_account_id = string 17 | aws_caller_identity_arn = string 18 | aws_eks_cluster_endpoint = string 19 | aws_partition_id = string 20 | aws_region_name = string 21 | eks_cluster_id = string 22 | eks_oidc_issuer_url = string 23 | eks_oidc_provider_arn = string 24 | tags = map(string) 25 | }) 26 | } 27 | -------------------------------------------------------------------------------- /modules/z-archieve/vpa/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | kubernetes = { 6 | source = "hashicorp/kubernetes" 7 | version = ">= 2.10" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tfsec.yaml: -------------------------------------------------------------------------------- 1 | exclude: 2 | - aws-iam-no-policy-wildcards # Wildcards required in addon IAM policies 3 | - aws-vpc-no-excessive-port-access # VPC settings left up to user implementation for recommended practices 4 | - aws-vpc-no-public-ingress-acl # VPC settings left up to user implementation for recommended practices 5 | - aws-eks-no-public-cluster-access-to-cidr # Public access enabled for better example usability, users are recommended to disable if possible 6 | - aws-eks-no-public-cluster-access # Public access enabled for better example usability, users are recommended to disable if possible 7 | - aws-eks-encrypt-secrets # Module defaults to encrypting secrets with CMK, but this is not hardcoded and therefore a spurious error 8 | - aws-vpc-no-public-egress-sgr # Added in v1.22 9 | - aws-ec2-no-public-egress-sgr 10 | - aws-ec2-no-public-ingress-sgr 11 | - aws-ec2-enforce-http-token-imds 12 | - aws-ec2-no-public-ip-subnet # VPN IP 13 | - aws-ec2-require-vpc-flow-logs-for-all-vpcs # disabled flow logs by default 14 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 4.23" 7 | } 8 | kubernetes = { 9 | source = "hashicorp/kubernetes" 10 | version = ">= 2.13" 11 | } 12 | helm = { 13 | source = "hashicorp/helm" 14 | version = ">= 2.6" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = ">= 0.6.0" 19 | } 20 | random = { 21 | source = "hashicorp/random" 22 | version = ">= 3.0.0" 23 | } 24 | } 25 | } 26 | --------------------------------------------------------------------------------