├── network.tf ├── modules ├── app-mesh │ ├── outputs.tf │ ├── labels.tf │ ├── variables.tf │ ├── main.tf │ └── README.md ├── prometheus │ ├── outputs.tf │ ├── labels.tf │ ├── main.tf │ ├── variables.tf │ └── README.md ├── karpenter │ ├── outputs.tf │ ├── labels.tf │ ├── variables.tf │ ├── README.md │ └── main.tf ├── lb-controller │ ├── outputs.tf │ ├── labels.tf │ ├── variables.tf │ ├── main.tf │ └── README.md ├── metrics-server │ ├── outputs.tf │ ├── labels.tf │ ├── main.tf │ ├── variables.tf │ └── README.md ├── cluster-autoscaler │ ├── outputs.tf │ ├── charts │ │ └── cluster-autoscaler │ │ │ ├── templates │ │ │ ├── serviceaccount.yaml │ │ │ ├── ingress.yaml │ │ │ ├── _helpers.tpl │ │ │ └── rbac.yaml │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ ├── labels.tf │ ├── variables.tf │ └── main.tf ├── node-termination-handler │ ├── outputs.tf │ ├── labels.tf │ ├── main.tf │ └── variables.tf ├── alb-ingress │ ├── outputs.tf │ ├── labels.tf │ └── variables.tf ├── ecr │ ├── versions.tf │ ├── default.tf │ ├── labels.tf │ ├── outputs.tf │ ├── variables.tf │ └── main.tf ├── iam-role-for-serviceaccount │ ├── versions.tf │ ├── labels.tf │ ├── outputs.tf │ ├── main.tf │ ├── variables.tf │ └── README.md ├── addon │ ├── outputs.tf │ ├── default.tf │ ├── labels.tf │ ├── variables.tf │ ├── main.tf │ └── README.md └── container-insights │ ├── outputs.tf │ ├── labels.tf │ ├── variables.tf │ ├── README.md │ └── main.tf ├── examples ├── app-mesh │ ├── modules │ │ └── codebuild │ │ │ ├── default.tf │ │ │ ├── app │ │ │ ├── Gemfile │ │ │ ├── modules │ │ │ │ ├── hostname.rb │ │ │ │ ├── restaurant.rb │ │ │ │ ├── getstats.rb │ │ │ │ ├── getvotes.rb │ │ │ │ ├── restaurantsdbread.rb │ │ │ │ ├── pageviews.rb │ │ │ │ └── restaurantsdbupdate.rb │ │ │ ├── startup.sh │ │ │ ├── README.md │ │ │ └── Dockerfile │ │ │ ├── outputs.tf │ │ │ ├── variables.tf │ │ │ ├── buildspec.yaml │ │ │ └── main.tf │ ├── default.auto.tfvars │ ├── fixture.tc1.tfvars │ ├── outputs.tf │ ├── variables.tf │ ├── main.tf │ └── README.md ├── emr │ ├── labels.tf │ ├── templates │ │ ├── delete-emr-virtual-cluster.tpl │ │ ├── create-emr-virtual-cluster.tpl │ │ └── create-emr-virtual-cluster-request.tpl │ ├── default.auto.tfvars │ ├── variables.tf │ ├── outputs.tf │ └── main.tf ├── addon │ ├── labels.tf │ ├── outputs.tf │ ├── default.auto.tfvars │ ├── variables.tf │ ├── main.tf │ └── README.md ├── arm64 │ ├── app │ │ ├── Dockerfile │ │ └── app.js │ ├── buildspec-docker.yaml │ ├── tc3.tfvars │ ├── default.auto.tfvars │ ├── tc2.tfvars │ ├── outputs.tf │ ├── templates │ │ └── hello-nodejs.tpl │ ├── tc1.tfvars │ ├── buildspec-manifest.yaml │ ├── main.tf │ ├── variables.tf │ └── awscb.tf ├── ecr │ ├── app │ │ ├── Dockerfile │ │ └── app.js │ ├── templates │ │ ├── build.tpl │ │ └── hello-nodejs.tpl │ ├── tc1.tfvars │ ├── default.auto.tfvars │ ├── outputs.tf │ ├── main.tf │ ├── variables.tf │ └── README.md ├── kubeflow │ ├── templates │ │ ├── kfuninst.tpl │ │ └── kfinst.tpl │ ├── outputs.tf │ ├── default.auto.tfvars │ ├── variables.tf │ └── main.tf ├── fargate │ ├── default.auto.tfvars │ ├── tc1.tfvars │ ├── tc2.tfvars │ ├── outputs.tf │ ├── manifests │ │ └── hello-kubernetes.yaml │ ├── main.tf │ ├── variables.tf │ └── README.md ├── cw │ ├── fixture.tc1.tfvars │ ├── fixture.tc2.tfvars │ ├── fixture.tc3.tfvars │ ├── outputs.tf │ ├── default.auto.tfvars │ ├── main.tf │ ├── variables.tf │ └── README.md ├── irsa │ ├── default.auto.tfvars │ ├── fixture.tc1.tfvars │ ├── outputs.tf │ ├── main.tf │ └── variables.tf ├── autoscaling │ ├── default.auto.tfvars │ ├── fixture.tc2.tfvars │ ├── fixture.tc5.tfvars │ ├── fixture.tc1.tfvars │ ├── fixture.tc3.tfvars │ ├── outputs.tf │ ├── fixture.tc4.tfvars │ ├── manifests │ │ └── php-apache.yaml │ ├── variables.tf │ └── main.tf ├── lb │ ├── outputs.tf │ ├── default.auto.tfvars │ ├── fixture.tc2.tfvars │ ├── fixture.tc1.tfvars │ ├── main.tf │ └── variables.tf ├── spot │ ├── default.auto.tfvars │ ├── fixture.tc2.tfvars │ ├── outputs.tf │ ├── fixture.tc1.tfvars │ ├── variables.tf │ ├── main.tf │ └── README.md └── bottlerocket │ ├── default.auto.tfvars │ ├── outputs.tf │ ├── fixture.tc2.tfvars │ ├── main.tf │ ├── fixture.tc1.tfvars │ ├── variables.tf │ └── README.md ├── versions.tf ├── images ├── aws-cw-cpu-alarm.png ├── ecr-vpc-endpoints.png ├── aws-ec2-lbc-game-2048.png ├── bottlerocket-features.png ├── aws-am-sidecar-pattern.png ├── aws-am-yelb-architecture.png ├── aws-am-yelb-screenshot.png ├── aws-emr-on-eks-diagram.png ├── kubeflow-setup-workspace.png ├── aws-am-traffic-management.png ├── aws-cw-container-insights.png ├── kubeflow-platform-overview.png ├── aws-cw-container-insights-cpu.png ├── aws-ecr-multi-arch-manifest.png ├── kubeflow-dashboard-first-look.png ├── aws-cw-container-insights-disk.png ├── aws-ecr-multi-arch-build-pipeline.png ├── aws-cw-container-insights-cpu-map-view.png ├── bottlerocket-security-first-container-host-os.png └── aws-chaos-engineering-workshop-eks-architecture.png ├── CODEOWNER ├── defaults.tf ├── .github └── workflows │ └── stale.yaml ├── templates └── bottlerocket.tpl ├── .gitignore ├── LICENSE ├── labels.tf ├── variables.tf ├── outputs.tf └── script └── update-kubeconfig.sh /network.tf: -------------------------------------------------------------------------------- 1 | ## virtual private cloud 2 | 3 | -------------------------------------------------------------------------------- /modules/app-mesh/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | -------------------------------------------------------------------------------- /modules/prometheus/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | -------------------------------------------------------------------------------- /modules/karpenter/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | 3 | -------------------------------------------------------------------------------- /modules/lb-controller/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | -------------------------------------------------------------------------------- /modules/metrics-server/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | 3 | -------------------------------------------------------------------------------- /modules/node-termination-handler/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | -------------------------------------------------------------------------------- /modules/alb-ingress/outputs.tf: -------------------------------------------------------------------------------- 1 | ### deprecated 2 | # output variables 3 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/default.tf: -------------------------------------------------------------------------------- 1 | # default variables 2 | 3 | locals {} 4 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | ## requirements 2 | 3 | terraform { 4 | required_version = ">= 0.13" 5 | } 6 | -------------------------------------------------------------------------------- /modules/ecr/versions.tf: -------------------------------------------------------------------------------- 1 | ## requirements 2 | 3 | terraform { 4 | required_version = ">= 0.13" 5 | } 6 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | gem 'pg' 3 | gem 'redis' 4 | -------------------------------------------------------------------------------- /examples/emr/labels.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | default-tags = merge( 3 | { "terraform.io" = "managed" }, 4 | ) 5 | } 6 | -------------------------------------------------------------------------------- /images/aws-cw-cpu-alarm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-cw-cpu-alarm.png -------------------------------------------------------------------------------- /examples/addon/labels.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | default-tags = merge( 3 | { "terraform.io" = "managed" }, 4 | ) 5 | } 6 | -------------------------------------------------------------------------------- /images/ecr-vpc-endpoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/ecr-vpc-endpoints.png -------------------------------------------------------------------------------- /images/aws-ec2-lbc-game-2048.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-ec2-lbc-game-2048.png -------------------------------------------------------------------------------- /images/bottlerocket-features.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/bottlerocket-features.png -------------------------------------------------------------------------------- /images/aws-am-sidecar-pattern.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-am-sidecar-pattern.png -------------------------------------------------------------------------------- /images/aws-am-yelb-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-am-yelb-architecture.png -------------------------------------------------------------------------------- /images/aws-am-yelb-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-am-yelb-screenshot.png -------------------------------------------------------------------------------- /images/aws-emr-on-eks-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-emr-on-eks-diagram.png -------------------------------------------------------------------------------- /images/kubeflow-setup-workspace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/kubeflow-setup-workspace.png -------------------------------------------------------------------------------- /modules/iam-role-for-serviceaccount/versions.tf: -------------------------------------------------------------------------------- 1 | ## requirements 2 | 3 | terraform { 4 | required_version = ">= 0.13" 5 | } 6 | -------------------------------------------------------------------------------- /images/aws-am-traffic-management.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-am-traffic-management.png -------------------------------------------------------------------------------- /images/aws-cw-container-insights.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-cw-container-insights.png -------------------------------------------------------------------------------- /images/kubeflow-platform-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/kubeflow-platform-overview.png -------------------------------------------------------------------------------- /modules/addon/outputs.tf: -------------------------------------------------------------------------------- 1 | output "addon" { 2 | description = "Attributes of eks addon" 3 | value = aws_eks_addon.addon 4 | } 5 | -------------------------------------------------------------------------------- /images/aws-cw-container-insights-cpu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-cw-container-insights-cpu.png -------------------------------------------------------------------------------- /images/aws-ecr-multi-arch-manifest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-ecr-multi-arch-manifest.png -------------------------------------------------------------------------------- /images/kubeflow-dashboard-first-look.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/kubeflow-dashboard-first-look.png -------------------------------------------------------------------------------- /images/aws-cw-container-insights-disk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-cw-container-insights-disk.png -------------------------------------------------------------------------------- /images/aws-ecr-multi-arch-build-pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-ecr-multi-arch-build-pipeline.png -------------------------------------------------------------------------------- /images/aws-cw-container-insights-cpu-map-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-cw-container-insights-cpu-map-view.png -------------------------------------------------------------------------------- /modules/addon/default.tf: -------------------------------------------------------------------------------- 1 | # default variables 2 | 3 | locals { 4 | default_addon_config = { 5 | name = "vpc-cni" 6 | version = null 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /examples/arm64/app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | WORKDIR /usr/src/app 3 | COPY package*.json app.js ./ 4 | RUN npm install 5 | EXPOSE 3000 6 | CMD ["node", "app.js"] 7 | -------------------------------------------------------------------------------- /examples/ecr/app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | WORKDIR /usr/src/app 3 | COPY package*.json app.js ./ 4 | RUN npm install 5 | EXPOSE 3000 6 | CMD ["node", "app.js"] 7 | -------------------------------------------------------------------------------- /images/bottlerocket-security-first-container-host-os.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/bottlerocket-security-first-container-host-os.png -------------------------------------------------------------------------------- /images/aws-chaos-engineering-workshop-eks-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ann-felix/terraform-aws-eks/HEAD/images/aws-chaos-engineering-workshop-eks-architecture.png -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/modules/hostname.rb: -------------------------------------------------------------------------------- 1 | require 'socket' 2 | 3 | def hostname() 4 | hostnamedata = 'ApplicationVersion2' 5 | return hostnamedata 6 | end 7 | -------------------------------------------------------------------------------- /modules/addon/labels.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name-tag = { "Name" = lookup(var.addon_config, "name", local.default_addon_config.name) } 3 | default-tags = merge( 4 | { "terraform.io" = "managed" }, 5 | local.name-tag 6 | ) 7 | } 8 | -------------------------------------------------------------------------------- /examples/kubeflow/templates/kfuninst.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export WORK_DIR=$${PWD} 4 | 5 | export CONFIG_FILE=$WORK_DIR/kfctl_aws.yaml 6 | 7 | kfctl delete -V -f $CONFIG_FILE 8 | 9 | unset CONFIG_FILE 10 | unset WORK_DIR 11 | -------------------------------------------------------------------------------- /examples/ecr/templates/build.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | aws ecr get-login-password --region ${region} | docker login --username AWS --password-stdin ${ecr_uri} 4 | docker build -t app . 5 | docker tag apps:latest ${ecr_uri} 6 | docker push ${ecr_uri} 7 | -------------------------------------------------------------------------------- /examples/emr/templates/delete-emr-virtual-cluster.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | OUTPUT='.emr_cli_result' 3 | 4 | while read id; do 5 | aws emr-containers delete-virtual-cluster --region "${aws_region}" --output text --id $${id} 6 | done < $${OUTPUT} 7 | rm $${OUTPUT} 8 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/outputs.tf: -------------------------------------------------------------------------------- 1 | resource "local_file" "manifest" { 2 | content = templatefile("${path.module}/templates/yelb.tpl", { 3 | ecr_uri = module.ecr.url 4 | }) 5 | filename = "${path.cwd}/yelb.yaml" 6 | file_permission = "0400" 7 | } -------------------------------------------------------------------------------- /examples/emr/templates/create-emr-virtual-cluster.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | OUTPUT='.emr_cli_result' 3 | 4 | aws emr-containers create-virtual-cluster --region "${aws_region}" --output text \ 5 | --cli-input-json file://create-emr-virtual-cluster-request.json \ 6 | --query 'id' 2>&1 | tee -a $${OUTPUT} 7 | -------------------------------------------------------------------------------- /modules/addon/variables.tf: -------------------------------------------------------------------------------- 1 | ### addon 2 | variable "addon_config" { 3 | description = "EKS Add-on configuration" 4 | default = {} 5 | } 6 | 7 | ### tags 8 | variable "tags" { 9 | description = "The key-value maps for tagging" 10 | type = map(string) 11 | default = {} 12 | } 13 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/modules/restaurant.rb: -------------------------------------------------------------------------------- 1 | require_relative 'restaurantsdbread' 2 | require_relative 'restaurantsdbupdate' 3 | 4 | def restaurantsupdate(restaurant) 5 | restaurantsdbupdate(restaurant) 6 | restaurantcount = restaurantsdbread(restaurant) 7 | return restaurantcount 8 | end 9 | -------------------------------------------------------------------------------- /examples/kubeflow/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig" { 2 | description = "Bash script to update the kubeconfig file for the EKS cluster" 3 | value = module.eks.kubeconfig 4 | } 5 | 6 | output "features" { 7 | description = "Features configurations of the AWS EKS cluster" 8 | value = module.eks.features 9 | } 10 | -------------------------------------------------------------------------------- /examples/emr/templates/create-emr-virtual-cluster-request.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "name": "${emr_name}", 3 | "containerProvider": { 4 | "type": "EKS", 5 | "id": "${eks_name}", 6 | "info": { 7 | "eksInfo": { 8 | "namespace": "default" 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/variables.tf: -------------------------------------------------------------------------------- 1 | ### description 2 | variable "name" { 3 | description = "The logical name of the module instance" 4 | type = string 5 | } 6 | 7 | ### tags 8 | variable "tags" { 9 | description = "The key-value maps for tagging" 10 | type = map(string) 11 | default = {} 12 | } 13 | -------------------------------------------------------------------------------- /CODEOWNER: -------------------------------------------------------------------------------- 1 | # This is a comment. 2 | # Each line is a file pattern followed by one or more owners. 3 | 4 | # These owners will be the default owners for everything in 5 | # the repo. Unless a later match takes precedence, 6 | # @global-owner1 and @global-owner2 will be requested for 7 | # review when someone opens a pull request. 8 | * @Young-ook 9 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/modules/getstats.rb: -------------------------------------------------------------------------------- 1 | require_relative 'hostname' 2 | require_relative 'pageviews' 3 | 4 | def getstats() 5 | hostname = hostname() 6 | pageviews = pageviews() 7 | stats = '{"hostname": "' + hostname + '"' + ", " + '"pageviews":' + pageviews + "}" 8 | return stats 9 | end 10 | -------------------------------------------------------------------------------- /modules/addon/main.tf: -------------------------------------------------------------------------------- 1 | ## eks addon 2 | 3 | resource "aws_eks_addon" "addon" { 4 | addon_name = lookup(var.addon_config, "name", local.default_addon_config.name) 5 | addon_version = lookup(var.addon_config, "version", local.default_addon_config.version) 6 | cluster_name = lookup(var.addon_config, "eks_name") 7 | tags = var.tags 8 | } 9 | -------------------------------------------------------------------------------- /examples/fargate/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | name = "eks-fargate" 4 | tags = { 5 | env = "dev" 6 | } 7 | kubernetes_version = "1.21" 8 | fargate_profiles = [ 9 | { 10 | name = "default" 11 | namespace = "default" 12 | }, 13 | ] 14 | -------------------------------------------------------------------------------- /examples/fargate/tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | name = "eks-fargate-tc1" 4 | tags = { 5 | env = "dev" 6 | test = "tc1" 7 | } 8 | kubernetes_version = "1.21" 9 | fargate_profiles = [ 10 | { 11 | name = "default" 12 | namespace = "default" 13 | }, 14 | ] 15 | -------------------------------------------------------------------------------- /examples/addon/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | description = "The attributes of Amazon VPC" 3 | value = module.vpc.vpc 4 | } 5 | 6 | output "kubeconfig" { 7 | description = "Bash script to update kubeconfig file" 8 | value = module.eks.kubeconfig 9 | } 10 | 11 | output "addons" { 12 | description = "EKS addons" 13 | value = module.addons 14 | } 15 | -------------------------------------------------------------------------------- /examples/cw/fixture.tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | name = "eks-cw-tc1" 4 | tags = { 5 | env = "dev" 6 | test = "tc1" 7 | metrics = "true" 8 | logs = "false" 9 | } 10 | kubernetes_version = "1.20" 11 | enable_cw = { 12 | enable_metrics = true 13 | enable_logs = false 14 | } 15 | -------------------------------------------------------------------------------- /examples/cw/fixture.tc2.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | name = "eks-cw-tc2" 4 | tags = { 5 | env = "dev" 6 | test = "tc2" 7 | metrics = "false" 8 | logs = "true" 9 | } 10 | kubernetes_version = "1.20" 11 | enable_cw = { 12 | enable_metrics = false 13 | enable_logs = true 14 | } 15 | -------------------------------------------------------------------------------- /examples/cw/fixture.tc3.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | name = "eks-cw-tc3" 4 | tags = { 5 | env = "dev" 6 | test = "tc3" 7 | metrics = "true" 8 | logs = "true" 9 | } 10 | kubernetes_version = "1.20" 11 | enable_cw = { 12 | enable_metrics = true 13 | enable_logs = true 14 | } 15 | -------------------------------------------------------------------------------- /modules/ecr/default.tf: -------------------------------------------------------------------------------- 1 | # default variables 2 | 3 | locals { 4 | default_lifecycle_policy = { 5 | rules = [{ 6 | rulePriority = 1 7 | description = "Only keep 2 images" 8 | selection = { 9 | tagStatus = "any" 10 | countType = "imageCountMoreThan" 11 | countNumber = 2 12 | } 13 | action = { 14 | type = "expire" 15 | } 16 | }] 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /defaults.tf: -------------------------------------------------------------------------------- 1 | ### default values 2 | 3 | locals { 4 | default_eks_config = { 5 | ami_type = "AL2_x86_64" 6 | instance_type = "t3.medium" 7 | capacity_type = null # allowed values: ON_DEMAND, SPOT, and default is null 8 | } 9 | default_bottlerocket_config = { 10 | admin_container_enabled = false 11 | admin_container_superpowered = false 12 | admin_container_source = "" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/karpenter/labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "suffix" { 2 | length = 5 3 | upper = false 4 | lower = true 5 | number = false 6 | special = false 7 | } 8 | 9 | locals { 10 | suffix = var.petname ? random_string.suffix.result : "" 11 | name = join("-", compact(["karpenter", local.suffix])) 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "Name" = local.name }, 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /examples/ecr/app/app.js: -------------------------------------------------------------------------------- 1 | // Hello World sample app. 2 | const http = require('http'); 3 | 4 | const port = 3000; 5 | 6 | const server = http.createServer((req, res) => { 7 | res.statusCode = 200; 8 | res.setHeader('Content-Type', 'text/plain'); 9 | res.end(`Hello World. This processor architecture is ${process.arch}`); 10 | }); 11 | 12 | server.listen(port, () => { 13 | console.log(`Server running on processor architecture ${process.arch}`); 14 | }); 15 | -------------------------------------------------------------------------------- /examples/arm64/app/app.js: -------------------------------------------------------------------------------- 1 | // Hello World sample app. 2 | const http = require('http'); 3 | 4 | const port = 3000; 5 | 6 | const server = http.createServer((req, res) => { 7 | res.statusCode = 200; 8 | res.setHeader('Content-Type', 'text/plain'); 9 | res.end(`Hello World. This processor architecture is ${process.arch}`); 10 | }); 11 | 12 | server.listen(port, () => { 13 | console.log(`Server running on processor architecture ${process.arch}`); 14 | }); 15 | -------------------------------------------------------------------------------- /modules/app-mesh/labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "appmesh-suffix" { 2 | length = 5 3 | upper = false 4 | lower = true 5 | number = false 6 | special = false 7 | } 8 | 9 | locals { 10 | suffix = var.petname ? random_string.appmesh-suffix.result : "" 11 | name = join("-", compact([var.cluster_name, "app-mesh", local.suffix])) 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "Name" = local.name }, 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/charts/cluster-autoscaler/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "cluster-autoscaler.serviceAccountName" . }} 6 | labels: 7 | {{- include "cluster-autoscaler.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /.github/workflows/stale.yaml: -------------------------------------------------------------------------------- 1 | name: 'Close stale issues and PRs' 2 | on: 3 | schedule: 4 | - cron: '30 1 * * *' 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v4 11 | with: 12 | stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.' 13 | days-before-stale: 30 14 | days-before-close: 5 15 | -------------------------------------------------------------------------------- /examples/emr/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | use_default_vpc = true 4 | name = "eks-emr" 5 | tags = { 6 | env = "prod" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [ 10 | { 11 | name = "spark" 12 | desired_size = 3 13 | min_size = 3 14 | max_size = 9 15 | instance_type = "m5.large" 16 | } 17 | ] 18 | -------------------------------------------------------------------------------- /modules/metrics-server/labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "metrics-suffix" { 2 | length = 5 3 | upper = false 4 | lower = true 5 | number = false 6 | special = false 7 | } 8 | 9 | locals { 10 | suffix = var.petname ? random_string.metrics-suffix.result : "" 11 | name = join("-", compact([var.cluster_name, "metrics-server", local.suffix])) 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "Name" = local.name }, 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /examples/app-mesh/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2c", "ap-northeast-2d"] 3 | use_default_vpc = true 4 | name = "eks-appmesh" 5 | tags = { 6 | env = "dev" 7 | } 8 | kubernetes_version = "1.20" 9 | managed_node_groups = [ 10 | { 11 | name = "default" 12 | min_size = 1 13 | max_size = 3 14 | desired_size = 1 15 | instance_type = "t3.large" 16 | } 17 | ] 18 | -------------------------------------------------------------------------------- /modules/lb-controller/labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "lbc-suffix" { 2 | length = 5 3 | upper = false 4 | lower = true 5 | number = false 6 | special = false 7 | } 8 | 9 | locals { 10 | suffix = var.petname ? random_string.lbc-suffix.result : "" 11 | name = join("-", compact([var.cluster_name, "aws-load-balancer-controller", local.suffix])) 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "Name" = local.name }, 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /modules/prometheus/labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "prometheus-suffix" { 2 | length = 5 3 | upper = false 4 | lower = true 5 | number = false 6 | special = false 7 | } 8 | 9 | locals { 10 | suffix = var.petname ? random_string.prometheus-suffix.result : "" 11 | name = join("-", compact([var.cluster_name, "prometheus-server", local.suffix])) 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "Name" = local.name }, 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /examples/irsa/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | name = "eks-irsa" 2 | tags = { 3 | env = "dev" 4 | } 5 | aws_region = "ap-northeast-2" 6 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 7 | cidr = "10.1.0.0/16" 8 | enable_igw = true 9 | enable_ngw = true 10 | single_ngw = true 11 | kubernetes_version = "1.21" 12 | fargate_profiles = [ 13 | { 14 | name = "default" 15 | namespace = "default" 16 | }, 17 | ] 18 | -------------------------------------------------------------------------------- /modules/iam-role-for-serviceaccount/labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "irsa-suffix" { 2 | length = 12 3 | upper = false 4 | lower = true 5 | number = true 6 | special = false 7 | } 8 | 9 | locals { 10 | suffix = random_string.irsa-suffix.result 11 | name = var.name == null ? substr(join("-", ["irsa", local.suffix]), 0, 64) : substr(var.name, 0, 64) 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "Name" = local.name }, 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /templates/bottlerocket.tpl: -------------------------------------------------------------------------------- 1 | [settings.kubernetes] 2 | cluster-name = "${cluster_name}" 3 | api-server = "${cluster_endpoint}" 4 | cluster-certificate = "${cluster_ca_data}" 5 | 6 | [settings.host-containers.admin] 7 | enabled = ${admin_container_enabled} 8 | superpowered = ${admin_container_superpowered} 9 | %{ if admin_container_source != "" } 10 | source = "${admin_container_source}" 11 | %{ endif } 12 | 13 | [settings.host-containers.control] 14 | enabled = ${control_container_enabled} 15 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "autoscaler-suffix" { 2 | length = 5 3 | upper = false 4 | lower = true 5 | number = false 6 | special = false 7 | } 8 | 9 | locals { 10 | suffix = var.petname ? random_string.autoscaler-suffix.result : "" 11 | name = join("-", compact([var.cluster_name, "cluster-autoscaler", local.suffix])) 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "Name" = local.name }, 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/buildspec.yaml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | phases: 3 | pre_build: 4 | commands: 5 | - cd examples/app-mesh/modules/codebuild/app/ 6 | - $(aws ecr get-login --no-include-email) 7 | - echo $TAG 8 | - IMAGE_URI="$REPOSITORY_URI:$TAG" 9 | build: 10 | commands: 11 | - echo Build a container image started on `date` 12 | - docker build --tag "$IMAGE_URI" . 13 | post_build: 14 | commands: 15 | - docker push "$IMAGE_URI" 16 | -------------------------------------------------------------------------------- /examples/kubeflow/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | name = "eks-kubeflow" 4 | tags = { 5 | env = "dev" 6 | } 7 | kubernetes_version = "1.21" 8 | managed_node_groups = [ 9 | { 10 | name = "default" 11 | min_size = 1 12 | max_size = 9 13 | desired_size = 7 14 | instance_type = "t3.small" 15 | } 16 | ] 17 | node_groups = [] 18 | fargate_profiles = [] 19 | -------------------------------------------------------------------------------- /modules/alb-ingress/labels.tf: -------------------------------------------------------------------------------- 1 | ### deprecated 2 | resource "random_string" "albingress-suffix" { 3 | length = 5 4 | upper = false 5 | lower = true 6 | number = false 7 | special = false 8 | } 9 | 10 | locals { 11 | suffix = var.petname ? random_string.albingress-suffix.result : "" 12 | name = join("-", compact([var.cluster_name, "alb-ingress", local.suffix])) 13 | default-tags = merge( 14 | { "terraform.io" = "managed" }, 15 | { "Name" = local.name }, 16 | ) 17 | } 18 | -------------------------------------------------------------------------------- /modules/container-insights/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | 3 | output "helm" { 4 | description = "The generated attributes of helm packages" 5 | value = zipmap( 6 | ["metrics", "logs"], 7 | [helm_release.metrics, helm_release.logs] 8 | ) 9 | } 10 | 11 | output "features" { 12 | description = "Features configurations for cloudwatch container insights" 13 | value = { 14 | "metrics_enabled" = local.metrics_enabled 15 | "logs_enabled" = local.logs_enabled 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/app-mesh/fixture.tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | name = "eks-appmesh-tc1" 4 | tags = { 5 | env = "dev" 6 | test = "tc1" 7 | } 8 | kubernetes_version = "1.20" 9 | managed_node_groups = [ 10 | { 11 | name = "default" 12 | min_size = 1 13 | max_size = 3 14 | desired_size = 1 15 | ami_type = "AL2_x86_64" 16 | instance_type = "t3.large" 17 | } 18 | ] 19 | -------------------------------------------------------------------------------- /examples/arm64/buildspec-docker.yaml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | phases: 3 | install: 4 | commands: 5 | - yum update -y 6 | pre_build: 7 | commands: 8 | - cd examples/arm64/app/ 9 | - $(aws ecr get-login --no-include-email) 10 | - echo $TAG 11 | - IMAGE_URI="$REPOSITORY_URI:$TAG" 12 | build: 13 | commands: 14 | - echo Build started on `date` 15 | - docker build --tag $IMAGE_URI . 16 | post_build: 17 | commands: 18 | - docker push $IMAGE_URI 19 | -------------------------------------------------------------------------------- /examples/addon/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | use_default_vpc = true 4 | name = "eks-addon" 5 | tags = { 6 | env = "dev" 7 | } 8 | enable_ssm = true 9 | kubernetes_version = "1.21" 10 | managed_node_groups = [ 11 | { 12 | name = "default" 13 | desired_size = 1 14 | min_size = 1 15 | max_size = 1 16 | instance_type = "m5.large" 17 | } 18 | ] 19 | -------------------------------------------------------------------------------- /examples/arm64/tc3.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | name = "eks-arm64-tc3" 4 | tags = { 5 | env = "dev" 6 | test = "tc3" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [ 10 | { 11 | name = "default" 12 | min_size = 1 13 | max_size = 3 14 | desired_size = 1 15 | instance_type = "m6g.medium" 16 | ami_type = "AL2_ARM_64" 17 | } 18 | ] 19 | node_groups = [] 20 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/charts/cluster-autoscaler/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /examples/autoscaling/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | use_default_vpc = true 4 | name = "eks-autoscaling" 5 | tags = { 6 | env = "dev" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [ 10 | { 11 | name = "default" 12 | desired_size = 2 13 | max_size = 6 14 | instance_type = "t3.small" 15 | } 16 | ] 17 | node_groups = [] 18 | fargate_profiles = [] 19 | -------------------------------------------------------------------------------- /modules/node-termination-handler/labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "node-termination-handler-suffix" { 2 | length = 5 3 | upper = false 4 | lower = true 5 | number = false 6 | special = false 7 | } 8 | 9 | locals { 10 | suffix = var.petname ? random_string.node-termination-handler-suffix.result : "" 11 | name = join("-", compact([var.cluster_name, "app-mesh", local.suffix])) 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "Name" = local.name }, 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /examples/autoscaling/fixture.tc2.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | name = "eks-autoscaling-tc2" 4 | tags = { 5 | env = "dev" 6 | test = "tc2" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [ 10 | { 11 | name = "default" 12 | min_size = 1 13 | max_size = 3 14 | desired_size = 1 15 | instance_type = "t3.small" 16 | } 17 | ] 18 | node_groups = [] 19 | fargate_profiles = [] 20 | -------------------------------------------------------------------------------- /examples/arm64/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | use_default_vpc = true 4 | name = "eks-arm64" 5 | tags = { 6 | env = "dev" 7 | arch = "arm64" 8 | } 9 | kubernetes_version = "1.21" 10 | managed_node_groups = [ 11 | { 12 | name = "arm64" 13 | min_size = 1 14 | max_size = 1 15 | desired_size = 1 16 | instance_type = "m6g.medium" 17 | ami_type = "AL2_ARM_64" 18 | } 19 | ] 20 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # when the variable is populated a search domain entry is added to resolv.conf at startup 4 | # this is needed for the ECS service discovery given the app works by calling host names and not FQDNs 5 | # a search domain can't be added to the container when using the awsvpc mode 6 | # and the awsvpc mode is needed for A records (bridge only supports SRV records) 7 | if [ $SEARCH_DOMAIN ]; then echo "search ${SEARCH_DOMAIN}" >> /etc/resolv.conf; fi 8 | 9 | ruby /app/yelb-appserver.rb -o 0.0.0.0 10 | -------------------------------------------------------------------------------- /examples/fargate/tc2.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2c", "ap-northeast-2b"] 3 | name = "eks-fargate-tc2" 4 | tags = { 5 | env = "dev" 6 | test = "tc2" 7 | } 8 | kubernetes_version = "1.21" 9 | fargate_profiles = [ 10 | { 11 | name = "hello" 12 | namespace = "hello" 13 | }, 14 | ] 15 | managed_node_groups = [ 16 | { 17 | name = "hello" 18 | min_size = 1 19 | max_size = 1 20 | desired_size = 1 21 | instance_type = "t3.small" 22 | } 23 | ] 24 | -------------------------------------------------------------------------------- /examples/ecr/tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | cidr = "10.0.0.0/16" 4 | name = "eks-ecr-tc1" 5 | tags = { 6 | env = "dev" 7 | platform = "fargate" 8 | test = "tc1" 9 | } 10 | enable_igw = true 11 | enable_ngw = true 12 | single_ngw = true 13 | kubernetes_version = "1.21" 14 | managed_node_groups = [] 15 | node_groups = [] 16 | fargate_profiles = [ 17 | { 18 | name = "hello" 19 | namespace = "hello" 20 | }, 21 | ] 22 | -------------------------------------------------------------------------------- /examples/lb/outputs.tf: -------------------------------------------------------------------------------- 1 | output "eks" { 2 | description = "The generated AWS EKS cluster" 3 | value = module.eks.cluster 4 | } 5 | 6 | output "role" { 7 | description = "The generated role of the EKS node group" 8 | value = module.eks.role 9 | } 10 | 11 | output "kubeconfig" { 12 | description = "Bash script to update the kubeconfig file for the EKS cluster" 13 | value = module.eks.kubeconfig 14 | } 15 | 16 | output "features" { 17 | description = "Features configurations of the AWS EKS cluster" 18 | value = module.eks.features 19 | } 20 | -------------------------------------------------------------------------------- /examples/fargate/outputs.tf: -------------------------------------------------------------------------------- 1 | output "eks" { 2 | description = "The generated AWS EKS cluster" 3 | value = module.eks.cluster 4 | } 5 | 6 | output "role" { 7 | description = "The generated role of the EKS node group" 8 | value = module.eks.role 9 | } 10 | 11 | output "kubeconfig" { 12 | description = "Bash script to update the kubeconfig file for the EKS cluster" 13 | value = module.eks.kubeconfig 14 | } 15 | 16 | output "features" { 17 | description = "Features configurations of the AWS EKS cluster" 18 | value = module.eks.features 19 | } 20 | -------------------------------------------------------------------------------- /examples/irsa/fixture.tc1.tfvars: -------------------------------------------------------------------------------- 1 | name = "eks-irsa-tc1" 2 | tags = { 3 | env = "dev" 4 | test = "tc1" 5 | nodegroup = "true" 6 | } 7 | aws_region = "ap-northeast-1" 8 | azs = ["ap-northeast-1a", "ap-northeast-1d", "ap-northeast-1c"] 9 | cidr = "10.1.0.0/16" 10 | enable_igw = true 11 | enable_ngw = true 12 | single_ngw = true 13 | kubernetes_version = "1.21" 14 | managed_node_groups = [ 15 | { 16 | name = "default" 17 | desired_size = 1 18 | instance_type = "t3.large" 19 | } 20 | ] 21 | -------------------------------------------------------------------------------- /examples/autoscaling/fixture.tc5.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | name = "eks-autoscaling-tc5" 4 | use_default_vpc = false 5 | tags = { 6 | env = "dev" 7 | test = "tc5" 8 | ssm_managed = "enabled" 9 | fargate = "enabled" 10 | } 11 | kubernetes_version = "1.21" 12 | enable_ssm = true 13 | managed_node_groups = [ 14 | ] 15 | node_groups = [] 16 | fargate_profiles = [ 17 | { 18 | name = "default" 19 | namespace = "default" 20 | }, 21 | ] 22 | -------------------------------------------------------------------------------- /examples/ecr/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | cidr = "10.0.0.0/16" 4 | name = "eks-ecr" 5 | tags = { 6 | env = "dev" 7 | platform = "ec2" 8 | } 9 | enable_igw = true 10 | enable_ngw = true 11 | single_ngw = true 12 | kubernetes_version = "1.21" 13 | managed_node_groups = [ 14 | { 15 | name = "hello" 16 | desired_size = 1 17 | instance_type = "t3.medium" 18 | } 19 | ] 20 | node_groups = [] 21 | fargate_profiles = [] 22 | -------------------------------------------------------------------------------- /examples/lb/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | cidr = "10.1.0.0/16" 4 | enable_igw = true 5 | enable_ngw = true 6 | single_ngw = true 7 | name = "eks-lbc" 8 | tags = { 9 | env = "dev" 10 | } 11 | kubernetes_version = "1.21" 12 | managed_node_groups = [ 13 | { 14 | name = "game-2048" 15 | min_size = 1 16 | max_size = 3 17 | desired_size = 1 18 | instance_type = "t3.large" 19 | } 20 | ] 21 | node_groups = [] 22 | fargate_profiles = [] 23 | -------------------------------------------------------------------------------- /examples/autoscaling/fixture.tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | use_default_vpc = false 4 | name = "eks-autoscaling-tc1" 5 | tags = { 6 | env = "dev" 7 | test = "tc1" 8 | } 9 | kubernetes_version = "1.21" 10 | enable_ssm = true 11 | managed_node_groups = [] 12 | node_groups = [ 13 | { 14 | name = "default" 15 | min_size = 1 16 | max_size = 3 17 | desired_size = 1 18 | instance_type = "t3.small" 19 | } 20 | ] 21 | fargate_profiles = [] 22 | -------------------------------------------------------------------------------- /modules/ecr/labels.tf: -------------------------------------------------------------------------------- 1 | # name and description 2 | resource "random_string" "suffix" { 3 | length = 5 4 | upper = false 5 | lower = true 6 | number = false 7 | special = false 8 | } 9 | 10 | locals { 11 | suffix = random_string.suffix.result 12 | name = join("-", compact([var.namespace, (var.name == "" ? local.suffix : var.name)])) 13 | repo-name = join("/", compact([var.namespace, (var.name == "" ? local.suffix : var.name)])) 14 | name-tag = { "Name" = local.repo-name } 15 | default-tags = merge( 16 | { "terraform.io" = "managed" }, 17 | local.name-tag 18 | ) 19 | } 20 | -------------------------------------------------------------------------------- /examples/spot/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | use_default_vpc = true 4 | name = "eks-spot" 5 | tags = { 6 | env = "dev" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [] 10 | node_groups = [ 11 | { 12 | name = "spot" 13 | desired_size = 1 14 | instance_type = "t3.large" 15 | instances_distribution = { 16 | spot_allocation_strategy = "lowest-price" 17 | spot_max_price = "0.036" 18 | } 19 | } 20 | ] 21 | fargate_profiles = [] 22 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/modules/getvotes.rb: -------------------------------------------------------------------------------- 1 | require_relative 'restaurantsdbread' 2 | require_relative 'restaurantsdbupdate' 3 | 4 | def getvotes() 5 | outback = restaurantsdbread("outback") 6 | ihop = restaurantsdbread("ihop") 7 | bucadibeppo = restaurantsdbread("bucadibeppo") 8 | chipotle = restaurantsdbread("chipotle") 9 | votes = '[{"name": "outback", "value": ' + outback + '},' + '{"name": "bucadibeppo", "value": ' + bucadibeppo + '},' + '{"name": "ihop", "value": ' + ihop + '}, ' + '{"name": "chipotle", "value": ' + chipotle + '}]' 10 | return votes 11 | end 12 | -------------------------------------------------------------------------------- /examples/autoscaling/fixture.tc3.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | use_default_vpc = false 4 | name = "eks-autoscaling-tc3" 5 | tags = { 6 | env = "dev" 7 | test = "tc3" 8 | ssm_managed = "enabled" 9 | } 10 | kubernetes_version = "1.21" 11 | enable_ssm = true 12 | managed_node_groups = [ 13 | { 14 | name = "default" 15 | min_size = 1 16 | max_size = 3 17 | desired_size = 1 18 | instance_type = "t3.small" 19 | } 20 | ] 21 | node_groups = [] 22 | fargate_profiles = [] 23 | -------------------------------------------------------------------------------- /examples/arm64/tc2.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-west-2" 2 | azs = ["us-west-2a", "us-west-2b", "us-west-2c"] 3 | name = "eks-x86-arm64-tc2" 4 | tags = { 5 | env = "dev" 6 | test = "tc2" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [] 10 | node_groups = [ 11 | { 12 | name = "arm64" 13 | min_size = 1 14 | max_size = 3 15 | desired_size = 1 16 | instance_type = "m6g.medium" 17 | ami_type = "AL2_ARM_64" 18 | }, 19 | { 20 | name = "x86" 21 | min_size = 1 22 | max_size = 3 23 | desired_size = 1 24 | instance_type = "t3.small" 25 | } 26 | ] 27 | -------------------------------------------------------------------------------- /examples/bottlerocket/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | use_default_vpc = true 4 | name = "eks-bottlerocket" 5 | tags = { 6 | env = "dev" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [] 10 | node_groups = [ 11 | { 12 | name = "bottlerocket" 13 | instance_type = "t3.small" 14 | ami_type = "BOTTLEROCKET_x86_64" 15 | }, 16 | ] 17 | 18 | 19 | # allowed values for 'ami_type' 20 | # - AL2_x86_64 21 | # - AL2_x86_64_GPU 22 | # - AL2_ARM_64 23 | # - CUSTOM 24 | # - BOTTLEROCKET_ARM_64 25 | # - BOTTLEROCKET_x86_64 26 | -------------------------------------------------------------------------------- /examples/spot/fixture.tc2.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2c", "ap-northeast-2d"] 3 | name = "eks-mng-mix-tc2" 4 | tags = { 5 | env = "dev" 6 | test = "tc2" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [ 10 | { 11 | name = "on-demand" 12 | capacity_type = "ON_DEMAND" # allowed values: ON_DEMAND, SPOT 13 | instance_type = "t3.medium" 14 | desired_size = 1 15 | }, 16 | { 17 | name = "spot" 18 | capacity_type = "SPOT" 19 | instance_type = "t3.medium" 20 | desired_size = 1 21 | } 22 | ] 23 | node_groups = [] 24 | fargate_profiles = [] 25 | -------------------------------------------------------------------------------- /examples/spot/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | description = "The attributes of Amazon VPC" 3 | value = module.vpc.vpc 4 | } 5 | 6 | output "eks" { 7 | description = "The generated AWS EKS cluster" 8 | value = module.eks.cluster 9 | } 10 | 11 | output "role" { 12 | description = "The generated role of the EKS node group" 13 | value = module.eks.role 14 | } 15 | 16 | output "kubeconfig" { 17 | description = "Bash script to update the kubeconfig file for the EKS cluster" 18 | value = module.eks.kubeconfig 19 | } 20 | 21 | output "features" { 22 | description = "Features configurations of the AWS EKS cluster" 23 | value = module.eks.features 24 | } 25 | -------------------------------------------------------------------------------- /examples/app-mesh/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | description = "The attributes of Amazon VPC" 3 | value = module.vpc.vpc 4 | } 5 | 6 | output "eks" { 7 | description = "The generated AWS EKS cluster" 8 | value = module.eks.cluster 9 | } 10 | 11 | output "role" { 12 | description = "The generated role of the EKS node group" 13 | value = module.eks.role 14 | } 15 | 16 | output "kubeconfig" { 17 | description = "Bash script to update the kubeconfig file for the EKS cluster" 18 | value = module.eks.kubeconfig 19 | } 20 | 21 | output "features" { 22 | description = "Features configurations of the AWS EKS cluster" 23 | value = module.eks.features 24 | } 25 | -------------------------------------------------------------------------------- /examples/arm64/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | description = "The attributes of Amazon VPC" 3 | value = module.vpc.vpc 4 | } 5 | 6 | output "eks" { 7 | description = "The generated AWS EKS cluster" 8 | value = module.eks.cluster 9 | } 10 | 11 | output "role" { 12 | description = "The generated role of the EKS node group" 13 | value = module.eks.role 14 | } 15 | 16 | output "kubeconfig" { 17 | description = "Bash script to update the kubeconfig file for the EKS cluster" 18 | value = module.eks.kubeconfig 19 | } 20 | 21 | output "features" { 22 | description = "Features configurations of the AWS EKS cluster" 23 | value = module.eks.features 24 | } 25 | -------------------------------------------------------------------------------- /examples/autoscaling/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | description = "The attributes of Amazon VPC" 3 | value = module.vpc.vpc 4 | } 5 | 6 | output "eks" { 7 | description = "The generated AWS EKS cluster" 8 | value = module.eks.cluster 9 | } 10 | 11 | output "role" { 12 | description = "The generated role of the EKS node group" 13 | value = module.eks.role 14 | } 15 | 16 | output "kubeconfig" { 17 | description = "Bash script to update the kubeconfig file for the EKS cluster" 18 | value = module.eks.kubeconfig 19 | } 20 | 21 | output "features" { 22 | description = "Features configurations of the AWS EKS cluster" 23 | value = module.eks.features 24 | } 25 | -------------------------------------------------------------------------------- /examples/bottlerocket/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | description = "The attributes of Amazon VPC" 3 | value = module.vpc.vpc 4 | } 5 | 6 | output "eks" { 7 | description = "The generated AWS EKS cluster" 8 | value = module.eks.cluster 9 | } 10 | 11 | output "role" { 12 | description = "The generated role of the EKS node group" 13 | value = module.eks.role 14 | } 15 | 16 | output "kubeconfig" { 17 | description = "Bash script to update the kubeconfig file for the EKS cluster" 18 | value = module.eks.kubeconfig 19 | } 20 | 21 | output "features" { 22 | description = "Features configurations of the AWS EKS cluster" 23 | value = module.eks.features 24 | } 25 | -------------------------------------------------------------------------------- /examples/fargate/manifests/hello-kubernetes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-kubernetes 5 | spec: 6 | ports: 7 | - port: 80 8 | targetPort: 8080 9 | selector: 10 | app: hello-kubernetes 11 | --- 12 | apiVersion: apps/v1 13 | kind: Deployment 14 | metadata: 15 | name: hello-kubernetes 16 | spec: 17 | replicas: 3 18 | selector: 19 | matchLabels: 20 | app: hello-kubernetes 21 | template: 22 | metadata: 23 | labels: 24 | app: hello-kubernetes 25 | spec: 26 | containers: 27 | - name: hello-kubernetes 28 | image: paulbouwer/hello-kubernetes:1.8 29 | ports: 30 | - containerPort: 8080 31 | -------------------------------------------------------------------------------- /examples/lb/fixture.tc2.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | cidr = "10.1.0.0/16" 4 | enable_igw = true 5 | enable_ngw = true 6 | single_ngw = true 7 | name = "eks-lbc-tc2-fargate" 8 | tags = { 9 | env = "dev" 10 | test = "tc2" 11 | } 12 | kubernetes_version = "1.21" 13 | managed_node_groups = [] 14 | node_groups = [] 15 | fargate_profiles = [ 16 | { 17 | name = "game-2048" 18 | namespace = "game-2048" 19 | }, 20 | { 21 | name = "default" 22 | namespace = "default" 23 | }, 24 | { 25 | name = "kube-system" 26 | namespace = "kube-system" 27 | }, 28 | ] 29 | -------------------------------------------------------------------------------- /examples/arm64/templates/hello-nodejs.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-nodejs-svc 5 | labels: 6 | app: hello-nodejs 7 | spec: 8 | ports: 9 | - port: 80 10 | selector: 11 | app: hello-nodejs 12 | --- 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | metadata: 16 | name: hello-nodejs 17 | labels: 18 | app: hello-nodejs 19 | spec: 20 | replicas: 3 21 | selector: 22 | matchLabels: 23 | app: hello-nodejs 24 | template: 25 | metadata: 26 | labels: 27 | app: hello-nodejs 28 | spec: 29 | containers: 30 | - name: hello-nodejs 31 | image: ${ecr_uri} 32 | ports: 33 | - containerPort: 80 34 | -------------------------------------------------------------------------------- /examples/ecr/templates/hello-nodejs.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-nodejs-svc 5 | labels: 6 | app: hello-nodejs 7 | spec: 8 | ports: 9 | - port: 80 10 | selector: 11 | app: hello-nodejs 12 | --- 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | metadata: 16 | name: hello-nodejs 17 | labels: 18 | app: hello-nodejs 19 | spec: 20 | replicas: 3 21 | selector: 22 | matchLabels: 23 | app: hello-nodejs 24 | template: 25 | metadata: 26 | labels: 27 | app: hello-nodejs 28 | spec: 29 | containers: 30 | - name: hello-nodejs 31 | image: ${ecr_uri} 32 | ports: 33 | - containerPort: 80 34 | -------------------------------------------------------------------------------- /examples/arm64/tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "us-west-2" 2 | azs = ["us-west-2a", "us-west-2b", "us-west-2c"] 3 | name = "eks-arm64-tc1" 4 | tags = { 5 | env = "dev" 6 | arch = "arm64" 7 | test = "tc1" 8 | } 9 | kubernetes_version = "1.21" 10 | managed_node_groups = [ 11 | { 12 | name = "arm64" 13 | min_size = 1 14 | max_size = 3 15 | desired_size = 1 16 | instance_type = "m6g.medium" 17 | ami_type = "AL2_ARM_64" 18 | } 19 | ] 20 | node_groups = [ 21 | { 22 | name = "arm64" 23 | min_size = 1 24 | max_size = 3 25 | desired_size = 1 26 | instance_type = "m6g.medium" 27 | ami_type = "AL2_ARM_64" 28 | } 29 | ] 30 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/README.md: -------------------------------------------------------------------------------- 1 | This is the application server. It's a Ruby/Sinatra application that exposes a number of APIs that are consumed by the UI (or via curl if you fancy that). 2 | 3 | Originally this application was included in a single file (`yelb-appserver.rb`). This has been since refactored by extracting the single API definition in their separate adapters and modules. This made the transition to Lambda/Serverless easier (one lambda per API definition). This hasn't changed the other deployment models (containers and instances) because those models still launch the `yelb-appserver.rb` main application which imports the modules instead of having everything in a single file (as it was conceived originally). 4 | -------------------------------------------------------------------------------- /modules/container-insights/labels.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "current" { 2 | count = local.metrics_enabled || local.logs_enabled ? 1 : 0 3 | } 4 | 5 | data "aws_region" "current" { 6 | count = local.metrics_enabled || local.logs_enabled ? 1 : 0 7 | } 8 | 9 | resource "random_string" "containerinsights-suffix" { 10 | count = local.metrics_enabled || local.logs_enabled ? 1 : 0 11 | length = 5 12 | upper = false 13 | lower = true 14 | number = false 15 | special = false 16 | } 17 | 18 | locals { 19 | suffix = var.petname && (local.metrics_enabled || local.logs_enabled) ? random_string.containerinsights-suffix.0.result : "" 20 | default-tags = merge( 21 | { "terraform.io" = "managed" }, 22 | ) 23 | } 24 | -------------------------------------------------------------------------------- /modules/iam-role-for-serviceaccount/outputs.tf: -------------------------------------------------------------------------------- 1 | output "name" { 2 | description = "The name of generated IAM role" 3 | value = aws_iam_role.irsa.name 4 | } 5 | 6 | output "arn" { 7 | description = "The ARN of generated IAM role" 8 | value = aws_iam_role.irsa.arn 9 | } 10 | 11 | output "kubecli" { 12 | description = "The kubernetes configuration file for creating IAM role with service account" 13 | value = join(" ", [ 14 | format("kubectl -n %s create sa %s", var.namespace, var.serviceaccount), 15 | "&&", 16 | format("kubectl -n %s annotate sa %s %s", 17 | var.namespace, 18 | var.serviceaccount, 19 | join("=", ["eks.amazonaws.com/role-arn", aws_iam_role.irsa.arn]) 20 | ), 21 | ]) 22 | } 23 | -------------------------------------------------------------------------------- /examples/cw/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | description = "The attributes of Amazon VPC" 3 | value = module.vpc.vpc 4 | } 5 | 6 | output "eks" { 7 | description = "The generated AWS EKS cluster" 8 | value = module.eks.cluster 9 | } 10 | 11 | output "role" { 12 | description = "The generated role of the EKS node group" 13 | value = module.eks.role 14 | } 15 | 16 | output "kubeconfig" { 17 | description = "Bash script to update the kubeconfig file for the EKS cluster" 18 | value = module.eks.kubeconfig 19 | } 20 | 21 | output "features" { 22 | description = "Features configuration of the AWS EKS and CloudWatch" 23 | value = zipmap( 24 | ["eks", "cw"], 25 | [module.eks.features, module.cw.features] 26 | ) 27 | } 28 | -------------------------------------------------------------------------------- /examples/autoscaling/fixture.tc4.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | name = "eks-autoscaling-tc4" 4 | tags = { 5 | env = "dev" 6 | test = "tc4" 7 | ssm_managed = "enabled" 8 | } 9 | kubernetes_version = "1.21" 10 | enable_ssm = true 11 | managed_node_groups = [ 12 | { 13 | name = "default" 14 | min_size = 1 15 | max_size = 3 16 | desired_size = 1 17 | instance_type = "t3.small" 18 | } 19 | ] 20 | node_groups = [ 21 | { 22 | name = "default" 23 | min_size = 1 24 | max_size = 3 25 | desired_size = 1 26 | instance_type = "t3.small" 27 | } 28 | ] 29 | fargate_profiles = [] 30 | -------------------------------------------------------------------------------- /modules/ecr/outputs.tf: -------------------------------------------------------------------------------- 1 | output "name" { 2 | description = "A name of generated ECR repository" 3 | value = aws_ecr_repository.repo.id 4 | } 5 | 6 | output "arn" { 7 | description = "An ARN of generated ECR repository" 8 | value = aws_ecr_repository.repo.arn 9 | } 10 | 11 | output "url" { 12 | description = "A URL of generated ECR repository" 13 | value = aws_ecr_repository.repo.repository_url 14 | } 15 | 16 | output "policy_arns" { 17 | description = "A map of IAM polices to allow access this ECR repository. If you want to make an IAM role or instance-profile has permissions to manage this repository, please attach the `poliy_arn` of this output on your side." 18 | value = zipmap(["read", "write"], [aws_iam_policy.read.arn, aws_iam_policy.write.arn]) 19 | } 20 | -------------------------------------------------------------------------------- /examples/kubeflow/templates/kfinst.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export WORK_DIR=$${PWD} 4 | 5 | export CONFIG_URI="https://raw.githubusercontent.com/kubeflow/manifests/v1.0-branch/kfdef/kfctl_aws.v1.0.1.yaml" 6 | export CONFIG_FILE=$WORK_DIR/kfctl_aws.yaml 7 | 8 | curl -o $CONFIG_FILE $CONFIG_URI 9 | 10 | # currently, disabled iam role for service account in this install configuration 11 | # yq '.spec.plugins[0].spec.enablePodIamPolicy = true' -i $CONFIG_FILE 12 | yq '.spec.plugins[0].spec.region = "${aws_region}"' -i $CONFIG_FILE 13 | yq '.spec.plugins[0].spec.roles[0] = "${eks_role}"' -i $CONFIG_FILE 14 | sed -i -e 's/kubeflow-aws/'"${eks_name}"'/' $CONFIG_FILE 15 | 16 | ${kubeconfig} 17 | export KUBECONFIG=kubeconfig 18 | 19 | kfctl apply -V -f $CONFIG_FILE 20 | 21 | unset KUBECONFIG 22 | unset CONFIG_FILE 23 | unset CONFIG_URI 24 | unset WORK_DIR 25 | -------------------------------------------------------------------------------- /examples/bottlerocket/fixture.tc2.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | name = "eks-bottlerocket-tc2" 3 | tags = { 4 | env = "dev" 5 | test = "tc2" 6 | } 7 | kubernetes_version = "1.21" 8 | enable_ssm = true 9 | managed_node_groups = [ 10 | { 11 | name = "bottlerocket-x86" 12 | instance_type = "t3.small" 13 | ami_type = "BOTTLEROCKET_x86_64" 14 | }, 15 | { 16 | name = "bottlerocket-arm" 17 | instance_type = "m6g.medium" 18 | ami_type = "BOTTLEROCKET_ARM_64" 19 | }, 20 | ] 21 | node_groups = [ 22 | { 23 | name = "bottlerocket-x86" 24 | instance_type = "t3.small" 25 | ami_type = "BOTTLEROCKET_x86_64" 26 | }, 27 | { 28 | name = "bottlerocket-arm" 29 | instance_type = "m6g.medium" 30 | ami_type = "BOTTLEROCKET_ARM_64" 31 | }, 32 | ] 33 | -------------------------------------------------------------------------------- /modules/prometheus/main.tf: -------------------------------------------------------------------------------- 1 | ## kubernetes prometheus 2 | 3 | locals { 4 | namespace = lookup(var.helm, "namespace", "prometheus") 5 | serviceaccount = lookup(var.helm, "serviceaccount", "prometheus") 6 | } 7 | 8 | resource "helm_release" "prometheus" { 9 | name = lookup(var.helm, "name", "prometheus") 10 | chart = lookup(var.helm, "chart", "prometheus") 11 | version = lookup(var.helm, "version", null) 12 | repository = lookup(var.helm, "repository", "https://prometheus-community.github.io/helm-charts") 13 | namespace = local.namespace 14 | create_namespace = true 15 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 16 | 17 | dynamic "set" { 18 | for_each = merge({}, lookup(var.helm, "vars", {})) 19 | content { 20 | name = set.key 21 | value = set.value 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /examples/irsa/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig" { 2 | description = "Bash script to update the kubeconfig file for the EKS cluster" 3 | value = module.eks.kubeconfig 4 | } 5 | 6 | output "kubecli" { 7 | description = "The kubectl command to attach annotations of IAM role for service account" 8 | value = module.irsa.kubecli 9 | } 10 | 11 | resource "local_file" "kubejob" { 12 | content = <<-EOT 13 | apiVersion: batch/v1 14 | kind: Job 15 | metadata: 16 | name: aws-cli 17 | spec: 18 | template: 19 | metadata: 20 | labels: 21 | app: aws-cli 22 | spec: 23 | serviceAccountName: s3-readonly 24 | containers: 25 | - name: aws-cli 26 | image: amazon/aws-cli:latest 27 | args: ["s3", "ls"] 28 | restartPolicy: Never 29 | EOT 30 | filename = "${path.cwd}/irsa.yaml" 31 | file_permission = "0600" 32 | } 33 | -------------------------------------------------------------------------------- /modules/metrics-server/main.tf: -------------------------------------------------------------------------------- 1 | ## kubernetes metrics-server 2 | 3 | locals { 4 | namespace = lookup(var.helm, "namespace", "kube-system") 5 | serviceaccount = lookup(var.helm, "serviceaccount", "metrics-server") 6 | } 7 | 8 | resource "helm_release" "metrics" { 9 | name = lookup(var.helm, "name", "metrics-server") 10 | chart = lookup(var.helm, "chart", "metrics-server") 11 | version = lookup(var.helm, "version", null) 12 | repository = lookup(var.helm, "repository", "https://charts.helm.sh/stable") 13 | namespace = local.namespace 14 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 15 | 16 | dynamic "set" { 17 | for_each = merge({ 18 | "args[0]" = "--kubelet-preferred-address-types=InternalIP" 19 | }, lookup(var.helm, "vars", {})) 20 | content { 21 | name = set.key 22 | value = set.value 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /modules/node-termination-handler/main.tf: -------------------------------------------------------------------------------- 1 | ## kubernetes node termination handler 2 | 3 | locals { 4 | namespace = lookup(var.helm, "namespace", "kube-system") 5 | serviceaccount = lookup(var.helm, "serviceaccount", "aws-node-termination-handler") 6 | } 7 | 8 | resource "helm_release" "node-termination-handler" { 9 | name = lookup(var.helm, "name", "aws-node-termination-handler") 10 | chart = lookup(var.helm, "chart", "aws-node-termination-handler") 11 | version = lookup(var.helm, "version", null) 12 | repository = lookup(var.helm, "repository", "https://aws.github.io/eks-charts") 13 | namespace = local.namespace 14 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 15 | 16 | dynamic "set" { 17 | for_each = merge({}, lookup(var.helm, "vars", {})) 18 | content { 19 | name = set.key 20 | value = set.value 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /examples/arm64/buildspec-manifest.yaml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | # Based on the Docker documentation, must include the DOCKER_CLI_EXPERIMENTAL environment variable 3 | # https://docs.docker.com/engine/reference/commandline/manifest/ 4 | 5 | phases: 6 | install: 7 | commands: 8 | - yum update -y 9 | pre_build: 10 | commands: 11 | - cd examples/arm64/app/ 12 | - $(aws ecr get-login --no-include-email) 13 | - echo $TAG 14 | build: 15 | commands: 16 | - export DOCKER_CLI_EXPERIMENTAL=enabled 17 | - docker manifest create $REPOSITORY_URI $REPOSITORY_URI:arm64 $REPOSITORY_URI:amd64 18 | - docker manifest annotate --arch arm64 $REPOSITORY_URI $REPOSITORY_URI:arm64 19 | - docker manifest annotate --arch amd64 $REPOSITORY_URI $REPOSITORY_URI:amd64 20 | post_build: 21 | commands: 22 | - docker manifest push $REPOSITORY_URI 23 | - docker manifest inspect $REPOSITORY_URI 24 | -------------------------------------------------------------------------------- /modules/iam-role-for-serviceaccount/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | oidc_fully_qualified_subjects = format("system:serviceaccount:%s:%s", var.namespace, var.serviceaccount) 3 | } 4 | 5 | # security/policy 6 | resource "aws_iam_role" "irsa" { 7 | name = local.name 8 | path = var.path 9 | tags = merge(local.default-tags, var.tags) 10 | assume_role_policy = jsonencode({ 11 | Statement = [{ 12 | Action = "sts:AssumeRoleWithWebIdentity" 13 | Effect = "Allow" 14 | Principal = { 15 | Federated = var.oidc_arn 16 | } 17 | Condition = { 18 | StringEquals = { 19 | format("%s:sub", var.oidc_url) = local.oidc_fully_qualified_subjects 20 | } 21 | } 22 | }] 23 | Version = "2012-10-17" 24 | }) 25 | } 26 | 27 | resource "aws_iam_role_policy_attachment" "irsa" { 28 | for_each = { for k, v in var.policy_arns : k => v } 29 | policy_arn = each.value 30 | role = aws_iam_role.irsa.name 31 | } 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | **/.terraform.lock* 4 | 5 | 6 | # .tfstate files 7 | *.tfstate 8 | *.tfstate.* 9 | 10 | # Crash log files 11 | crash.log 12 | 13 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 14 | # .tfvars files are managed as part of configuration and so should be included in 15 | # version control. 16 | # 17 | # example.tfvars 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | # 28 | # !example_override.tf 29 | 30 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 31 | # example: *tfplan* 32 | 33 | # User kubernetes cluster configs 34 | kubeconfig 35 | 36 | # Local .DS_Store 37 | **/.DS_Store 38 | -------------------------------------------------------------------------------- /examples/lb/fixture.tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | cidr = "10.1.0.0/16" 4 | enable_igw = true 5 | enable_ngw = true 6 | single_ngw = true 7 | name = "eks-lbc-tc1-spot" 8 | tags = { 9 | env = "dev" 10 | test = "tc1" 11 | } 12 | kubernetes_version = "1.21" 13 | managed_node_groups = [] 14 | node_groups = [ 15 | { 16 | name = "mixed" 17 | min_size = 1 18 | max_size = 3 19 | desired_size = 3 20 | instance_type = "t3.medium" 21 | instances_distribution = { 22 | on_demand_percentage_above_base_capacity = 50 23 | spot_allocation_strategy = "capacity-optimized" 24 | } 25 | instances_override = [ 26 | { 27 | instance_type = "t3.small" 28 | weighted_capacity = 2 29 | }, 30 | { 31 | instance_type = "t3.large" 32 | weighted_capacity = 1 33 | } 34 | ] 35 | } 36 | ] 37 | fargate_profiles = [] 38 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/variables.tf: -------------------------------------------------------------------------------- 1 | ### helm 2 | variable "helm" { 3 | description = "The helm release configuration" 4 | type = any 5 | default = { 6 | name = "cluster-autoscaler" 7 | chart = "cluster-autoscaler" 8 | namespace = "kube-system" 9 | serviceaccount = "cluster-autoscaler" 10 | cleanup_on_fail = true 11 | vars = {} 12 | } 13 | } 14 | 15 | ### security/policy 16 | variable "oidc" { 17 | description = "The Open ID Connect properties" 18 | type = map(any) 19 | } 20 | 21 | ### description 22 | variable "cluster_name" { 23 | description = "The kubernetes cluster name" 24 | type = string 25 | } 26 | 27 | variable "petname" { 28 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 29 | type = bool 30 | default = true 31 | } 32 | 33 | ### tags 34 | variable "tags" { 35 | description = "The key-value maps for tagging" 36 | type = map(string) 37 | default = {} 38 | } 39 | -------------------------------------------------------------------------------- /modules/metrics-server/variables.tf: -------------------------------------------------------------------------------- 1 | ### helm 2 | variable "helm" { 3 | description = "The helm release configuration" 4 | type = any 5 | default = { 6 | name = "metrics-server" 7 | repository = "https://charts.helm.sh/stable" 8 | chart = "metrics-server" 9 | namespace = "kube-system" 10 | cleanup_on_fail = true 11 | vars = {} 12 | } 13 | } 14 | 15 | ### security/policy 16 | variable "oidc" { 17 | description = "The Open ID Connect properties" 18 | type = map(any) 19 | } 20 | 21 | ### description 22 | variable "cluster_name" { 23 | description = "The kubernetes cluster name" 24 | type = string 25 | } 26 | 27 | variable "petname" { 28 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 29 | type = bool 30 | default = true 31 | } 32 | 33 | ### tags 34 | variable "tags" { 35 | description = "The key-value maps for tagging" 36 | type = map(string) 37 | default = {} 38 | } 39 | -------------------------------------------------------------------------------- /examples/autoscaling/manifests/php-apache.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: php-apache 5 | spec: 6 | selector: 7 | matchLabels: 8 | run: php-apache 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | run: php-apache 14 | spec: 15 | containers: 16 | - name: php-apache 17 | image: k8s.gcr.io/hpa-example 18 | ports: 19 | - containerPort: 80 20 | resources: 21 | limits: 22 | cpu: 500m 23 | requests: 24 | cpu: 200m 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: php-apache 30 | labels: 31 | run: php-apache 32 | spec: 33 | ports: 34 | - port: 80 35 | selector: 36 | run: php-apache 37 | --- 38 | apiVersion: autoscaling/v1 39 | kind: HorizontalPodAutoscaler 40 | metadata: 41 | name: php-apache 42 | spec: 43 | scaleTargetRef: 44 | apiVersion: apps/v1 45 | kind: Deployment 46 | name: php-apache 47 | minReplicas: 1 48 | maxReplicas: 10 49 | targetCPUUtilizationPercentage: 50 -------------------------------------------------------------------------------- /examples/spot/fixture.tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-1" 2 | azs = ["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"] 3 | name = "eks-mixed-tc1" 4 | tags = { 5 | env = "dev" 6 | test = "tc1" 7 | } 8 | kubernetes_version = "1.21" 9 | managed_node_groups = [] 10 | node_groups = [ 11 | { 12 | name = "mixed" 13 | min_size = 1 14 | max_size = 3 15 | desired_size = 2 16 | instance_type = "t3.medium" 17 | instances_distribution = { 18 | on_demand_percentage_above_base_capacity = 50 19 | spot_allocation_strategy = "capacity-optimized" 20 | } 21 | instances_override = [ 22 | { 23 | instance_type = "t3.small" 24 | weighted_capacity = 2 25 | }, 26 | { 27 | instance_type = "t3.large" 28 | weighted_capacity = 1 29 | } 30 | ] 31 | }, 32 | { 33 | name = "on-demand" 34 | min_size = 1 35 | max_size = 3 36 | desired_size = 1 37 | instance_type = "t3.large" 38 | } 39 | ] 40 | fargate_profiles = [] 41 | -------------------------------------------------------------------------------- /examples/fargate/main.tf: -------------------------------------------------------------------------------- 1 | # Fargate node groups example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "terraform-aws-modules/vpc/aws" 14 | version = "2.63.0" 15 | name = var.name 16 | azs = var.azs 17 | cidr = "10.0.0.0/16" 18 | private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] 19 | public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"] 20 | enable_nat_gateway = true 21 | single_nat_gateway = true 22 | tags = module.eks.tags.shared 23 | } 24 | 25 | # eks 26 | module "eks" { 27 | source = "Young-ook/eks/aws" 28 | name = var.name 29 | tags = var.tags 30 | subnets = module.vpc.private_subnets 31 | kubernetes_version = var.kubernetes_version 32 | managed_node_groups = var.managed_node_groups 33 | node_groups = var.node_groups 34 | fargate_profiles = var.fargate_profiles 35 | } 36 | -------------------------------------------------------------------------------- /modules/alb-ingress/variables.tf: -------------------------------------------------------------------------------- 1 | ### deprecated 2 | ### helm 3 | variable "helm" { 4 | description = "The helm release configuration" 5 | type = any 6 | default = { 7 | name = "aws-alb-ingress-controller" 8 | repository = "https://charts.helm.sh/incubator" 9 | chart = "aws-alb-ingress-controller" 10 | namespace = "kube-system" 11 | cleanup_on_fail = true 12 | vars = {} 13 | } 14 | } 15 | 16 | ### security/policy 17 | variable "oidc" { 18 | description = "The Open ID Connect properties" 19 | type = map(any) 20 | } 21 | 22 | ### description 23 | variable "cluster_name" { 24 | description = "The kubernetes cluster name" 25 | type = string 26 | } 27 | 28 | variable "petname" { 29 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 30 | type = bool 31 | default = true 32 | } 33 | 34 | ### tags 35 | variable "tags" { 36 | description = "The key-value maps for tagging" 37 | type = map(string) 38 | default = {} 39 | } 40 | -------------------------------------------------------------------------------- /modules/app-mesh/variables.tf: -------------------------------------------------------------------------------- 1 | ### helm 2 | variable "helm" { 3 | description = "The helm release configuration" 4 | type = any 5 | default = { 6 | name = "appmesh-controller" 7 | repository = "https://aws.github.io/eks-charts" 8 | chart = "appmesh-controller" 9 | namespace = "appmesh-system" 10 | serviceaccount = "aws-appmesh-controller" 11 | cleanup_on_fail = true 12 | vars = {} 13 | } 14 | } 15 | 16 | ### security/policy 17 | variable "oidc" { 18 | description = "The Open ID Connect properties" 19 | type = map(any) 20 | } 21 | 22 | ### description 23 | variable "cluster_name" { 24 | description = "The kubernetes cluster name" 25 | type = string 26 | } 27 | 28 | variable "petname" { 29 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 30 | type = bool 31 | default = true 32 | } 33 | 34 | ### tags 35 | variable "tags" { 36 | description = "The key-value maps for tagging" 37 | type = map(string) 38 | default = {} 39 | } 40 | -------------------------------------------------------------------------------- /examples/bottlerocket/main.tf: -------------------------------------------------------------------------------- 1 | # Bottle Rocket OS example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 3.64" 9 | } 10 | } 11 | } 12 | 13 | provider "aws" { 14 | region = var.aws_region 15 | } 16 | 17 | # vpc 18 | module "vpc" { 19 | source = "Young-ook/vpc/aws" 20 | name = var.name 21 | tags = var.tags 22 | vpc_config = var.use_default_vpc ? null : { 23 | azs = var.azs 24 | cidr = "10.10.0.0/16" 25 | subnet_type = "private" 26 | single_ngw = true 27 | } 28 | } 29 | 30 | # eks 31 | module "eks" { 32 | source = "../../" 33 | name = var.name 34 | tags = var.tags 35 | subnets = slice(values(module.vpc.subnets[var.use_default_vpc ? "public" : "private"]), 0, 3) 36 | kubernetes_version = var.kubernetes_version 37 | managed_node_groups = var.managed_node_groups 38 | node_groups = var.node_groups 39 | fargate_profiles = var.fargate_profiles 40 | enable_ssm = var.enable_ssm 41 | } 42 | -------------------------------------------------------------------------------- /modules/karpenter/variables.tf: -------------------------------------------------------------------------------- 1 | variable "enabled" { 2 | description = "A conditional indicator to enable cluster-autoscale" 3 | type = bool 4 | default = true 5 | } 6 | 7 | ### helm 8 | variable "helm" { 9 | description = "The helm release configuration" 10 | type = any 11 | default = { 12 | name = "karpenter" 13 | repository = "https://charts.karpenter.sh" 14 | chart = "karpenter" 15 | namespace = "karpenter" 16 | serviceaccount = "karpenter" 17 | cleanup_on_fail = true 18 | vars = {} 19 | } 20 | } 21 | 22 | ### security/policy 23 | variable "oidc" { 24 | description = "The Open ID Connect properties" 25 | type = map(any) 26 | } 27 | 28 | ### description 29 | variable "petname" { 30 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 31 | type = bool 32 | default = true 33 | } 34 | 35 | ### tags 36 | variable "tags" { 37 | description = "The key-value maps for tagging" 38 | type = map(string) 39 | default = {} 40 | } 41 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM bitnami/ruby:2.4.2-r1 2 | MAINTAINER massimo@it20.info 3 | 4 | ################## BEGIN INSTALLATION ###################### 5 | 6 | # Set the working directory to /app 7 | WORKDIR /app 8 | 9 | COPY yelb-appserver.rb yelb-appserver.rb 10 | COPY Gemfile Gemfile 11 | COPY modules modules 12 | 13 | ENV LANG=en_us.UTF-8 14 | ENV LC_ALL=C.UTF-8 15 | ENV RACK_ENV=production 16 | 17 | RUN gem install sinatra --no-ri --no-rdoc 18 | RUN gem install redis --no-ri --no-rdoc 19 | ### hack to allow the setup of the pg gem (which would fail otherwise) 20 | RUN apt-get update 21 | RUN apt-get install libpq-dev -y 22 | ### end of hack (this would require additional research and optimization) 23 | RUN gem install pg --no-ri --no-rdoc 24 | ### this installs the AWS SDK for DynamoDB (so that appserver can talk to DDB Vs the default Postgres/Redis) 25 | RUN gem install aws-sdk-dynamodb pg --no-ri --no-rdoc 26 | # Set the working directory to / 27 | WORKDIR / 28 | ADD startup.sh startup.sh 29 | 30 | ##################### INSTALLATION END ##################### 31 | 32 | CMD ["./startup.sh"] 33 | 34 | 35 | -------------------------------------------------------------------------------- /examples/bottlerocket/fixture.tc1.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | use_default_vpc = false 3 | name = "eks-bottlerocket-tc1" 4 | tags = { 5 | env = "dev" 6 | test = "tc1" 7 | } 8 | kubernetes_version = "1.21" 9 | enable_ssm = true 10 | managed_node_groups = [ 11 | { 12 | name = "default" 13 | min_size = 1 14 | max_size = 3 15 | desired_size = 1 16 | instance_type = "t3.small" 17 | ami_type = "AL2_x86_64" 18 | }, 19 | ] 20 | node_groups = [ 21 | { 22 | name = "default" 23 | instance_type = "t3.small" 24 | }, 25 | { 26 | name = "al2" 27 | instance_type = "t3.small" 28 | ami_type = "AL2_x86_64" 29 | }, 30 | { 31 | name = "bottlerocket" 32 | instance_type = "t3.small" 33 | ami_type = "BOTTLEROCKET_x86_64" 34 | }, 35 | { 36 | name = "al2-gpu" 37 | instance_type = "g4dn.xlarge" 38 | ami_type = "AL2_x86_64_GPU" 39 | }, 40 | { 41 | name = "al2-arm" 42 | instance_type = "m6g.medium" 43 | ami_type = "AL2_ARM_64" 44 | }, 45 | ] 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Young-ook 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/arm64/main.tf: -------------------------------------------------------------------------------- 1 | # ARM64 node groups example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | ### network/vpc 12 | module "vpc" { 13 | source = "Young-ook/vpc/aws" 14 | name = var.name 15 | tags = var.tags 16 | vpc_config = var.use_default_vpc ? null : { 17 | azs = var.azs 18 | cidr = "10.10.0.0/16" 19 | subnet_type = "private" 20 | single_ngw = true 21 | } 22 | } 23 | 24 | ### cluster/eks 25 | module "eks" { 26 | source = "Young-ook/eks/aws" 27 | name = var.name 28 | tags = var.tags 29 | subnets = slice(values(module.vpc.subnets[var.use_default_vpc ? "public" : "private"]), 0, 3) 30 | kubernetes_version = var.kubernetes_version 31 | managed_node_groups = var.managed_node_groups 32 | node_groups = var.node_groups 33 | } 34 | 35 | ### artifact/ecr 36 | module "ecr" { 37 | providers = { aws = aws.codebuild } 38 | source = "Young-ook/eks/aws//modules/ecr" 39 | name = "hello-nodejs" 40 | scan_on_push = false 41 | } 42 | -------------------------------------------------------------------------------- /modules/lb-controller/variables.tf: -------------------------------------------------------------------------------- 1 | ### helm 2 | variable "helm" { 3 | description = "The helm release configuration" 4 | type = any 5 | default = { 6 | repository = "https://aws.github.io/eks-charts" 7 | name = "aws-load-balancer-controller" 8 | chart = "aws-load-balancer-controller" 9 | namespace = "kube-system" 10 | serviceaccount = "aws-load-balancer-controller" 11 | cleanup_on_fail = true 12 | vars = {} 13 | } 14 | } 15 | 16 | ### security/policy 17 | variable "oidc" { 18 | description = "The Open ID Connect properties" 19 | type = map(any) 20 | } 21 | 22 | ### description 23 | variable "cluster_name" { 24 | description = "The kubernetes cluster name" 25 | type = string 26 | } 27 | 28 | variable "petname" { 29 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 30 | type = bool 31 | default = true 32 | } 33 | 34 | ### tags 35 | variable "tags" { 36 | description = "The key-value maps for tagging" 37 | type = map(string) 38 | default = {} 39 | } 40 | -------------------------------------------------------------------------------- /labels.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "uid" { 2 | length = 12 3 | upper = false 4 | lower = true 5 | number = false 6 | special = false 7 | } 8 | 9 | locals { 10 | service = "eks" 11 | uid = join("-", [local.service, random_string.uid.result]) 12 | name = var.name == null || var.name == "" ? local.uid : var.name 13 | default-tags = merge( 14 | { "terraform.io" = "managed" }, 15 | ) 16 | } 17 | 18 | ## kubernetes tags 19 | locals { 20 | eks-shared-tag = { 21 | format("kubernetes.io/cluster/%s", local.name) = "shared" 22 | } 23 | eks-owned-tag = { 24 | format("kubernetes.io/cluster/%s", local.name) = "owned" 25 | } 26 | eks-elb-tag = { 27 | "kubernetes.io/role/elb" = "1" 28 | } 29 | eks-internal-elb-tag = { 30 | "kubernetes.io/role/internal-elb" = "1" 31 | } 32 | eks-autoscaler-tag = { 33 | "k8s.io/cluster-autoscaler/enabled" = "true" 34 | format("k8s.io/cluster-autoscaler/%s", local.name) = "owned" 35 | } 36 | eks-tag = merge( 37 | { 38 | "eks:cluster-name" = local.name 39 | }, 40 | local.eks-owned-tag, 41 | local.eks-autoscaler-tag, 42 | ) 43 | } 44 | -------------------------------------------------------------------------------- /modules/node-termination-handler/variables.tf: -------------------------------------------------------------------------------- 1 | ### helm 2 | variable "helm" { 3 | description = "The helm release configuration" 4 | type = any 5 | default = { 6 | name = "aws-node-termination-handler" 7 | repository = "https://aws.github.io/eks-charts" 8 | chart = "aws-node-termination-handler" 9 | namespace = "kube-system" 10 | serviceaccount = "aws-node-termination-handler" 11 | cleanup_on_fail = true 12 | vars = {} 13 | } 14 | } 15 | 16 | ### security/policy 17 | variable "oidc" { 18 | description = "The Open ID Connect properties" 19 | type = map(any) 20 | } 21 | 22 | ### description 23 | variable "cluster_name" { 24 | description = "The kubernetes cluster name" 25 | type = string 26 | } 27 | 28 | variable "petname" { 29 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 30 | type = bool 31 | default = true 32 | } 33 | 34 | ### tags 35 | variable "tags" { 36 | description = "The key-value maps for tagging" 37 | type = map(string) 38 | default = {} 39 | } 40 | -------------------------------------------------------------------------------- /modules/container-insights/variables.tf: -------------------------------------------------------------------------------- 1 | variable "features" { 2 | description = "Toggle flags to enable cloudwatch features" 3 | type = map(any) 4 | default = { 5 | enable_metrics = false 6 | enable_logs = false 7 | } 8 | } 9 | 10 | ### helm 11 | variable "helm" { 12 | description = "The helm release configuration" 13 | type = any 14 | default = { 15 | repository = "https://aws.github.io/eks-charts" 16 | cleanup_on_fail = true 17 | vars = {} 18 | } 19 | } 20 | 21 | ### security/policy 22 | variable "oidc" { 23 | description = "The Open ID Connect properties" 24 | type = map(any) 25 | } 26 | 27 | ### description 28 | variable "cluster_name" { 29 | description = "The kubernetes cluster name" 30 | type = string 31 | } 32 | 33 | variable "petname" { 34 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 35 | type = bool 36 | default = true 37 | } 38 | 39 | ### tags 40 | variable "tags" { 41 | description = "The key-value maps for tagging" 42 | type = map(string) 43 | default = {} 44 | } 45 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/modules/restaurantsdbread.rb: -------------------------------------------------------------------------------- 1 | require 'pg' 2 | require 'pg_ext' 3 | require 'aws-sdk-dynamodb' 4 | 5 | def restaurantsdbread(restaurant) 6 | if ($yelbddbrestaurants != nil && $yelbddbrestaurants != "") then 7 | dynamodb = Aws::DynamoDB::Client.new(region: $awsregion) 8 | params = { 9 | table_name: $yelbddbrestaurants, 10 | key: { 11 | name: restaurant 12 | } 13 | } 14 | restaurantrecord = dynamodb.get_item(params) 15 | restaurantcount = restaurantrecord.item['restaurantcount'] 16 | else 17 | con = PG.connect :host => $yelbdbhost, 18 | :port => $yelbdbport, 19 | :dbname => 'yelbdatabase', 20 | :user => 'postgres', 21 | :password => 'postgres_password' 22 | con.prepare('statement1', 'SELECT count FROM restaurants WHERE name = $1') 23 | res = con.exec_prepared('statement1', [ restaurant ]) 24 | restaurantcount = res.getvalue(0,0) 25 | con.close 26 | end 27 | return restaurantcount.to_s 28 | end 29 | -------------------------------------------------------------------------------- /modules/iam-role-for-serviceaccount/variables.tf: -------------------------------------------------------------------------------- 1 | variable "namespace" { 2 | description = "The namespace where kubernetes service account is" 3 | type = string 4 | } 5 | 6 | variable "serviceaccount" { 7 | description = "The name of kubernetes service account" 8 | type = string 9 | } 10 | 11 | ### security 12 | variable "policy_arns" { 13 | description = "A list of policy ARNs to attach the role" 14 | type = list(string) 15 | default = [] 16 | } 17 | 18 | variable "oidc_url" { 19 | description = "A URL of the OIDC Provider" 20 | type = string 21 | } 22 | 23 | variable "oidc_arn" { 24 | description = "An ARN of the OIDC Provider" 25 | type = string 26 | } 27 | 28 | ### description 29 | variable "name" { 30 | description = "The logical name of the module instance" 31 | type = string 32 | default = null 33 | } 34 | 35 | variable "path" { 36 | description = "The path for role" 37 | type = string 38 | default = "/" 39 | } 40 | 41 | ### tags 42 | variable "tags" { 43 | description = "The key-value maps for tagging" 44 | type = map(string) 45 | default = {} 46 | } 47 | -------------------------------------------------------------------------------- /modules/prometheus/variables.tf: -------------------------------------------------------------------------------- 1 | ### helm 2 | variable "helm" { 3 | description = "The helm release configuration" 4 | type = any 5 | default = { 6 | name = "prometheus" 7 | repository = "https://prometheus-community.github.io/helm-charts" 8 | chart = "prometheus" 9 | namespace = "prometheus" 10 | values = { 11 | "alertmanager.persistentVolume.storageClass" = "gp2" 12 | "server.persistentVolume.storageClass" = "gp2" 13 | } 14 | cleanup_on_fail = true 15 | vars = {} 16 | } 17 | } 18 | 19 | ### security/policy 20 | variable "oidc" { 21 | description = "The Open ID Connect properties" 22 | type = map(any) 23 | } 24 | 25 | ### description 26 | variable "cluster_name" { 27 | description = "The kubernetes cluster name" 28 | type = string 29 | } 30 | 31 | variable "petname" { 32 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 33 | type = bool 34 | default = true 35 | } 36 | 37 | ### tags 38 | variable "tags" { 39 | description = "The key-value maps for tagging" 40 | type = map(string) 41 | default = {} 42 | } 43 | -------------------------------------------------------------------------------- /examples/arm64/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "use_default_vpc" { 10 | description = "A feature flag for whether to use default vpc" 11 | type = bool 12 | } 13 | 14 | variable "azs" { 15 | description = "A list of availability zones for the vpc to deploy resources" 16 | type = list(string) 17 | } 18 | 19 | ### kubernetes cluster 20 | variable "kubernetes_version" { 21 | description = "The target version of kubernetes" 22 | type = string 23 | } 24 | 25 | variable "node_groups" { 26 | description = "Node groups definition" 27 | default = [] 28 | } 29 | 30 | variable "managed_node_groups" { 31 | description = "Amazon managed node groups definition" 32 | default = [] 33 | } 34 | 35 | ### description 36 | variable "name" { 37 | description = "The logical name of the module instance" 38 | type = string 39 | default = "eks" 40 | } 41 | 42 | ### tags 43 | variable "tags" { 44 | description = "The key-value maps for tagging" 45 | type = map(string) 46 | default = {} 47 | } 48 | -------------------------------------------------------------------------------- /modules/addon/README.md: -------------------------------------------------------------------------------- 1 | # Amazon EKS Add-on 2 | 3 | An [add-on](https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html) is software that provides supporting operational capabilities to Kubernetes applications, but is not specific to the application. This includes software like observability agents or Kubernetes drivers that allow the cluster to interact with underlying AWS resources for networking, compute, and storage. Add-on software is typically built and maintained by the Kubernetes community, cloud providers like AWS, or third-party vendors. Amazon EKS automatically installs self-managed add-ons such as the Amazon VPC CNI, kube-proxy, and CoreDNS for every cluster. You can change the default configuration of the add-ons and update them when desired. 4 | 5 | For detailed steps when using the AWS Management Console, AWS CLI, and eksctl, see the topics for the following add-ons: 6 | * [Amazon VPC CNI](https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html) 7 | * [CoreDNS](https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html) 8 | * [kube-proxy](https://docs.aws.amazon.com/eks/latest/userguide/managing-kube-proxy.html) 9 | * [Amazon EBS CSI](https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html) 10 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/charts/cluster-autoscaler/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cluster-autoscaler 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | appVersion: "v1.17.3" 24 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/main.tf: -------------------------------------------------------------------------------- 1 | ### ecr 2 | module "ecr" { 3 | source = "Young-ook/eks/aws//modules/ecr" 4 | name = "yelb" 5 | scan_on_push = false 6 | } 7 | 8 | ### codebuild 9 | locals { 10 | stages = ["yelb"] 11 | image_al2_amd64 = "aws/codebuild/amazonlinux2-x86_64-standard:3.0" 12 | buildspec = "examples/app-mesh/modules/codebuild/buildspec.yaml" 13 | } 14 | 15 | module "cb" { 16 | for_each = toset(local.stages) 17 | source = "Young-ook/spinnaker/aws//modules/codebuild" 18 | version = "~> 2.0" 19 | name = each.key 20 | tags = var.tags 21 | environment_config = { 22 | type = "LINUX_CONTAINER" 23 | compute_type = "BUILD_GENERAL1_LARGE" 24 | image = local.image_al2_amd64 25 | privileged_mode = true 26 | environment_variables = { 27 | REPOSITORY_URI = module.ecr.url 28 | TAG = "v2" 29 | } 30 | } 31 | source_config = { 32 | type = "GITHUB" 33 | location = "https://github.com/Young-ook/terraform-aws-eks.git" 34 | buildspec = local.buildspec 35 | version = "main" 36 | } 37 | policy_arns = [ 38 | module.ecr.policy_arns["read"], 39 | module.ecr.policy_arns["write"], 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /examples/cw/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2c", "ap-northeast-2d"] 3 | use_default_vpc = true 4 | name = "eks-cw" 5 | tags = { 6 | env = "dev" 7 | metrics = "false" 8 | logs = "false" 9 | } 10 | kubernetes_version = "1.22" 11 | managed_node_groups = [ 12 | { 13 | name = "mng" 14 | desired_size = 1 15 | min_size = 1 16 | max_size = 3 17 | instance_type = "t3.large" 18 | }, 19 | { 20 | name = "mng-eachtag" 21 | desired_size = 1 22 | min_size = 1 23 | max_size = 3 24 | instance_type = "t3.large" 25 | tags = { 26 | test = "each-node-tag" 27 | } 28 | } 29 | ] 30 | node_groups = [ 31 | { 32 | name = "ng" 33 | desired_size = 1 34 | min_size = 1 35 | max_size = 3 36 | instance_type = "t3.large" 37 | }, 38 | { 39 | name = "ng-eachtag" 40 | desired_size = 1 41 | min_size = 1 42 | max_size = 3 43 | instance_type = "t3.large" 44 | tags = { 45 | test = "each-node-tag" 46 | } 47 | } 48 | ] 49 | enable_cw = { 50 | enable_metrics = false 51 | enable_logs = true 52 | } 53 | -------------------------------------------------------------------------------- /examples/emr/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "use_default_vpc" { 10 | description = "A feature flag for whether to use default vpc" 11 | type = bool 12 | } 13 | 14 | variable "azs" { 15 | description = "A list of availability zones for the vpc to deploy resources" 16 | type = list(string) 17 | } 18 | 19 | ### kubernetes cluster 20 | variable "kubernetes_version" { 21 | description = "The target version of kubernetes" 22 | type = string 23 | } 24 | 25 | variable "managed_node_groups" { 26 | description = "Amazon managed node groups definition" 27 | default = [] 28 | } 29 | 30 | ### feature 31 | variable "enable_ssm" { 32 | description = "Allow ssh access using session manager" 33 | type = bool 34 | default = false 35 | } 36 | 37 | ### description 38 | variable "name" { 39 | description = "The logical name of the module instance" 40 | type = string 41 | default = null 42 | } 43 | 44 | ### tags 45 | variable "tags" { 46 | description = "The key-value maps for tagging" 47 | type = map(string) 48 | default = {} 49 | } 50 | -------------------------------------------------------------------------------- /examples/kubeflow/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | default = "us-east-1" 8 | } 9 | 10 | variable "azs" { 11 | description = "A list of availability zones for the vpc to deploy resources" 12 | type = list(string) 13 | default = ["us-east-1a", "us-east-1b", "us-east-1c"] 14 | } 15 | 16 | ### kubernetes cluster 17 | variable "kubernetes_version" { 18 | description = "The target version of kubernetes" 19 | type = string 20 | } 21 | 22 | variable "node_groups" { 23 | description = "Node groups definition" 24 | default = [] 25 | } 26 | 27 | variable "managed_node_groups" { 28 | description = "Amazon managed node groups definition" 29 | default = [] 30 | } 31 | 32 | variable "fargate_profiles" { 33 | description = "Amazon Fargate for EKS profiles" 34 | default = [] 35 | } 36 | 37 | ### description 38 | variable "name" { 39 | description = "The logical name of the module instance" 40 | type = string 41 | } 42 | 43 | ### tags 44 | variable "tags" { 45 | description = "The key-value maps for tagging" 46 | type = map(string) 47 | default = {} 48 | } 49 | -------------------------------------------------------------------------------- /examples/addon/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "use_default_vpc" { 10 | description = "A feature flag for whether to use default vpc" 11 | type = bool 12 | } 13 | 14 | variable "azs" { 15 | description = "A list of availability zones for the vpc to deploy resources" 16 | type = list(string) 17 | } 18 | 19 | ### kubernetes cluster 20 | variable "kubernetes_version" { 21 | description = "The target version of kubernetes" 22 | type = string 23 | } 24 | 25 | variable "managed_node_groups" { 26 | description = "Amazon managed node groups definition" 27 | default = [] 28 | } 29 | 30 | ### feature 31 | variable "enable_ssm" { 32 | description = "Allow ssh access using session manager" 33 | type = bool 34 | default = false 35 | } 36 | 37 | ### description 38 | variable "name" { 39 | description = "The logical name of the module instance" 40 | type = string 41 | default = null 42 | } 43 | 44 | ### tags 45 | variable "tags" { 46 | description = "The key-value maps for tagging" 47 | type = map(string) 48 | default = {} 49 | } 50 | -------------------------------------------------------------------------------- /modules/ecr/variables.tf: -------------------------------------------------------------------------------- 1 | ### security 2 | variable "trusted_accounts" { 3 | description = "A list of AWS Account IDs you want to allow them access to the ECR repository" 4 | type = list(string) 5 | default = [] 6 | } 7 | 8 | variable "image_tag_mutability" { 9 | description = "The tag mutability setting for the repository. Must be one of: MUTABLE or IMMUTABLE. Defaults to MUTABLE." 10 | type = string 11 | default = "MUTABLE" 12 | } 13 | 14 | variable "scan_on_push" { 15 | description = "Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false)." 16 | type = bool 17 | default = false 18 | } 19 | 20 | variable "lifecycle_policy" { 21 | description = "Lifecycle policy JSON document" 22 | type = string 23 | default = null 24 | } 25 | 26 | variable "namespace" { 27 | description = "Namespace of container image repository" 28 | type = string 29 | default = "" 30 | } 31 | 32 | ### description 33 | variable "name" { 34 | description = "Name of container image repository" 35 | type = string 36 | default = "" 37 | } 38 | 39 | ### tags 40 | variable "tags" { 41 | description = "The key-value maps for tagging" 42 | type = map(string) 43 | default = {} 44 | } 45 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/charts/cluster-autoscaler/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "cluster-autoscaler.fullname" . -}} 3 | {{- $svcPort := .Values.service.port -}} 4 | {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} 5 | apiVersion: networking.k8s.io/v1beta1 6 | {{- else -}} 7 | apiVersion: extensions/v1beta1 8 | {{- end }} 9 | kind: Ingress 10 | metadata: 11 | name: {{ $fullName }} 12 | labels: 13 | {{- include "cluster-autoscaler.labels" . | nindent 4 }} 14 | {{- with .Values.ingress.annotations }} 15 | annotations: 16 | {{- toYaml . | nindent 4 }} 17 | {{- end }} 18 | spec: 19 | {{- if .Values.ingress.tls }} 20 | tls: 21 | {{- range .Values.ingress.tls }} 22 | - hosts: 23 | {{- range .hosts }} 24 | - {{ . | quote }} 25 | {{- end }} 26 | secretName: {{ .secretName }} 27 | {{- end }} 28 | {{- end }} 29 | rules: 30 | {{- range .Values.ingress.hosts }} 31 | - host: {{ .host | quote }} 32 | http: 33 | paths: 34 | {{- range .paths }} 35 | - path: {{ . }} 36 | backend: 37 | serviceName: {{ $fullName }} 38 | servicePort: {{ $svcPort }} 39 | {{- end }} 40 | {{- end }} 41 | {{- end }} 42 | -------------------------------------------------------------------------------- /examples/spot/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "use_default_vpc" { 10 | description = "A feature flag for whether to use default vpc" 11 | type = bool 12 | } 13 | 14 | variable "azs" { 15 | description = "A list of availability zones for the vpc to deploy resources" 16 | type = list(string) 17 | } 18 | 19 | ### kubernetes cluster 20 | variable "kubernetes_version" { 21 | description = "The target version of kubernetes" 22 | type = string 23 | } 24 | 25 | variable "node_groups" { 26 | description = "Node groups definition" 27 | default = [] 28 | } 29 | 30 | variable "managed_node_groups" { 31 | description = "Amazon managed node groups definition" 32 | default = [] 33 | } 34 | 35 | variable "fargate_profiles" { 36 | description = "Amazon Fargate for EKS profiles" 37 | default = [] 38 | } 39 | 40 | ### description 41 | variable "name" { 42 | description = "The logical name of the module instance" 43 | type = string 44 | default = "eks" 45 | } 46 | 47 | ### tags 48 | variable "tags" { 49 | description = "The key-value maps for tagging" 50 | type = map(string) 51 | default = {} 52 | } 53 | -------------------------------------------------------------------------------- /modules/karpenter/README.md: -------------------------------------------------------------------------------- 1 | # Karpenter 2 | [Karpenter](https://github.com/aws/karpenter) is an open-source node provisioning project built for Kubernetes. Its goal is to improve the efficiency and cost of running workloads on Kubernetes clusters. Check out the [docs](https://karpenter.sh/) to learn more. 3 | 4 | ## Examples 5 | - [Introducing Karpenter](https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler/) 6 | - [Implement autoscaling with Karpenter](https://www.eksworkshop.com/beginner/085_scaling_karpenter/) 7 | 8 | ## Quickstart 9 | ### Setup 10 | This is a terraform module to deploy Helm chart for Karpenter on your EKS cluster. 11 | ```hcl 12 | module "eks" { 13 | source = "Young-ook/eks/aws" 14 | name = "eks" 15 | } 16 | 17 | provider "helm" { 18 | kubernetes { 19 | host = module.eks.helmconfig.host 20 | token = module.eks.helmconfig.token 21 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 22 | } 23 | } 24 | 25 | module "karpenter" { 26 | source = "Young-ook/eks/aws//modules/kerpenter" 27 | oidc = module.eks.oidc 28 | } 29 | ``` 30 | Run the terraform code to make a change on your environment. 31 | ``` 32 | terraform init 33 | terraform apply 34 | ``` 35 | 36 | ### Verify 37 | #### Check container status 38 | -------------------------------------------------------------------------------- /examples/irsa/main.tf: -------------------------------------------------------------------------------- 1 | # IAM Role for Service Accounts example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/spinnaker/aws//modules/spinnaker-aware-aws-vpc" 14 | name = var.name 15 | tags = merge(var.tags, module.eks.tags.shared) 16 | azs = var.azs 17 | cidr = var.cidr 18 | enable_igw = var.enable_igw 19 | enable_ngw = var.enable_ngw 20 | single_ngw = var.single_ngw 21 | vpc_endpoint_config = [] 22 | } 23 | 24 | # eks 25 | module "eks" { 26 | source = "Young-ook/eks/aws" 27 | name = var.name 28 | tags = var.tags 29 | subnets = values(module.vpc.subnets["private"]) 30 | kubernetes_version = var.kubernetes_version 31 | fargate_profiles = var.fargate_profiles 32 | } 33 | 34 | module "irsa" { 35 | source = "Young-ook/eks/aws//modules/iam-role-for-serviceaccount" 36 | name = join("-", ["irsa", var.name, "s3-readonly"]) 37 | namespace = "default" 38 | serviceaccount = "s3-readonly" 39 | oidc_url = module.eks.oidc.url 40 | oidc_arn = module.eks.oidc.arn 41 | policy_arns = ["arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"] 42 | tags = var.tags 43 | } 44 | -------------------------------------------------------------------------------- /examples/fargate/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | default = "us-east-1" 8 | } 9 | 10 | variable "azs" { 11 | description = "A list of availability zones for the vpc to deploy resources" 12 | type = list(string) 13 | default = ["us-east-1a", "us-east-1b", "us-east-1c"] 14 | } 15 | 16 | variable "subnets" { 17 | description = "The list of subnets to deploy an eks cluster" 18 | type = list(string) 19 | default = null 20 | } 21 | 22 | ### kubernetes cluster 23 | variable "kubernetes_version" { 24 | description = "The target version of kubernetes" 25 | type = string 26 | } 27 | 28 | variable "node_groups" { 29 | description = "Node groups definition" 30 | default = [] 31 | } 32 | 33 | variable "managed_node_groups" { 34 | description = "Amazon managed node groups definition" 35 | default = [] 36 | } 37 | 38 | variable "fargate_profiles" { 39 | description = "Amazon Fargate for EKS profiles" 40 | default = [] 41 | } 42 | 43 | ### description 44 | variable "name" { 45 | description = "The logical name of the module instance" 46 | type = string 47 | default = "eks" 48 | } 49 | 50 | ### tags 51 | variable "tags" { 52 | description = "The key-value maps for tagging" 53 | type = map(string) 54 | default = {} 55 | } 56 | -------------------------------------------------------------------------------- /examples/cw/main.tf: -------------------------------------------------------------------------------- 1 | # CloudWatch ContainerInsights example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/vpc/aws" 14 | name = var.name 15 | tags = var.tags 16 | vpc_config = var.use_default_vpc ? null : { 17 | azs = var.azs 18 | cidr = "10.10.0.0/16" 19 | subnet_type = "private" 20 | single_ngw = true 21 | } 22 | } 23 | 24 | # eks 25 | module "eks" { 26 | source = "Young-ook/eks/aws" 27 | name = var.name 28 | tags = var.tags 29 | subnets = slice(values(module.vpc.subnets[var.use_default_vpc ? "public" : "private"]), 0, 3) 30 | kubernetes_version = var.kubernetes_version 31 | managed_node_groups = var.managed_node_groups 32 | node_groups = var.node_groups 33 | enable_ssm = var.enable_ssm 34 | } 35 | 36 | # utilities 37 | provider "helm" { 38 | kubernetes { 39 | host = module.eks.helmconfig.host 40 | token = module.eks.helmconfig.token 41 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 42 | } 43 | } 44 | 45 | module "cw" { 46 | source = "../../modules/container-insights" 47 | features = var.enable_cw 48 | cluster_name = module.eks.cluster.name 49 | oidc = module.eks.oidc 50 | tags = { env = "test" } 51 | } 52 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/modules/pageviews.rb: -------------------------------------------------------------------------------- 1 | require 'redis' 2 | require 'aws-sdk-dynamodb' 3 | 4 | def pageviews() 5 | if ($yelbddbcache != nil && $yelbddbcache != "") then 6 | dynamodb = Aws::DynamoDB::Client.new(region: $awsregion) 7 | params = { 8 | table_name: $yelbddbcache, 9 | key: { 10 | counter: 'pageviews' 11 | } 12 | } 13 | pageviewsrecord = dynamodb.get_item(params) 14 | pageviewscount = pageviewsrecord.item['pageviewscount'] 15 | pageviewscount += 1 16 | params = { 17 | table_name: $yelbddbcache, 18 | key: { 19 | counter: 'pageviews' 20 | }, 21 | update_expression: 'set pageviewscount = :c', 22 | expression_attribute_values: {':c' => pageviewscount}, 23 | return_values: 'UPDATED_NEW' 24 | } 25 | pageviewrecord = dynamodb.update_item(params) 26 | else 27 | redis = Redis.new 28 | redis = Redis.new(:host => $redishost, :port => 6379) 29 | redis.incr("pageviews") 30 | pageviewscount = redis.get("pageviews") 31 | redis.quit() 32 | end 33 | return pageviewscount.to_s 34 | end 35 | -------------------------------------------------------------------------------- /examples/app-mesh/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "use_default_vpc" { 10 | description = "A feature flag for whether to use default vpc" 11 | type = bool 12 | } 13 | 14 | variable "azs" { 15 | description = "A list of availability zones for the vpc to deploy resources" 16 | type = list(string) 17 | } 18 | 19 | ### kubernetes cluster 20 | variable "kubernetes_version" { 21 | description = "The target version of kubernetes" 22 | type = string 23 | } 24 | 25 | variable "node_groups" { 26 | description = "Node groups definition" 27 | default = [] 28 | } 29 | 30 | variable "managed_node_groups" { 31 | description = "Amazon managed node groups definition" 32 | default = [] 33 | } 34 | 35 | variable "fargate_profiles" { 36 | description = "Amazon Fargate for EKS profiles" 37 | default = [] 38 | } 39 | 40 | ### feature 41 | variable "enable_ssm" { 42 | description = "Allow ssh access using session manager" 43 | type = bool 44 | default = false 45 | } 46 | 47 | ### description 48 | variable "name" { 49 | description = "The logical name of the module instance" 50 | type = string 51 | default = "eks" 52 | } 53 | 54 | ### tags 55 | variable "tags" { 56 | description = "The key-value maps for tagging" 57 | type = map(string) 58 | default = {} 59 | } 60 | -------------------------------------------------------------------------------- /examples/autoscaling/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "use_default_vpc" { 10 | description = "A feature flag for whether to use default vpc" 11 | type = bool 12 | } 13 | 14 | variable "azs" { 15 | description = "A list of availability zones for the vpc to deploy resources" 16 | type = list(string) 17 | } 18 | 19 | ### kubernetes cluster 20 | variable "kubernetes_version" { 21 | description = "The target version of kubernetes" 22 | type = string 23 | } 24 | 25 | variable "node_groups" { 26 | description = "Node groups definition" 27 | default = [] 28 | } 29 | 30 | variable "managed_node_groups" { 31 | description = "Amazon managed node groups definition" 32 | default = [] 33 | } 34 | 35 | variable "fargate_profiles" { 36 | description = "Amazon Fargate for EKS profiles" 37 | default = [] 38 | } 39 | 40 | ### feature 41 | variable "enable_ssm" { 42 | description = "Allow ssh access using session manager" 43 | type = bool 44 | default = false 45 | } 46 | 47 | ### description 48 | variable "name" { 49 | description = "The logical name of the module instance" 50 | type = string 51 | default = "eks" 52 | } 53 | 54 | ### tags 55 | variable "tags" { 56 | description = "The key-value maps for tagging" 57 | type = map(string) 58 | default = {} 59 | } 60 | -------------------------------------------------------------------------------- /examples/bottlerocket/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "use_default_vpc" { 10 | description = "A feature flag for whether to use default vpc" 11 | type = bool 12 | } 13 | 14 | variable "azs" { 15 | description = "A list of availability zones for the vpc to deploy resources" 16 | type = list(string) 17 | } 18 | 19 | ### kubernetes cluster 20 | variable "kubernetes_version" { 21 | description = "The target version of kubernetes" 22 | type = string 23 | } 24 | 25 | variable "node_groups" { 26 | description = "Node groups definition" 27 | default = [] 28 | } 29 | 30 | variable "managed_node_groups" { 31 | description = "Amazon managed node groups definition" 32 | default = [] 33 | } 34 | 35 | variable "fargate_profiles" { 36 | description = "Amazon Fargate for EKS profiles" 37 | default = [] 38 | } 39 | 40 | ### feature 41 | variable "enable_ssm" { 42 | description = "Allow ssh access using session manager" 43 | type = bool 44 | default = false 45 | } 46 | 47 | ### description 48 | variable "name" { 49 | description = "The logical name of the module instance" 50 | type = string 51 | default = "eks" 52 | } 53 | 54 | ### tags 55 | variable "tags" { 56 | description = "The key-value maps for tagging" 57 | type = map(string) 58 | default = {} 59 | } 60 | -------------------------------------------------------------------------------- /examples/kubeflow/main.tf: -------------------------------------------------------------------------------- 1 | # Machine Learning with Kubeflow 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/vpc/aws" 14 | name = var.name 15 | tags = var.tags 16 | vpc_config = { 17 | azs = var.azs 18 | cidr = "10.10.0.0/16" 19 | subnet_type = "private" 20 | single_ngw = true 21 | } 22 | } 23 | 24 | # eks 25 | module "eks" { 26 | source = "Young-ook/eks/aws" 27 | name = var.name 28 | tags = var.tags 29 | subnets = slice(values(module.vpc.subnets["private"]), 0, 3) 30 | kubernetes_version = var.kubernetes_version 31 | managed_node_groups = var.managed_node_groups 32 | node_groups = var.node_groups 33 | fargate_profiles = var.fargate_profiles 34 | enable_ssm = true 35 | } 36 | 37 | resource "local_file" "kfinst" { 38 | content = templatefile("${path.module}/templates/kfinst.tpl", { 39 | aws_region = var.aws_region 40 | eks_name = module.eks.cluster.name 41 | eks_role = module.eks.role.arn 42 | kubeconfig = module.eks.kubeconfig 43 | }) 44 | filename = "${path.module}/kfinst.sh" 45 | file_permission = "0700" 46 | } 47 | 48 | resource "local_file" "kfuninst" { 49 | content = templatefile("${path.module}/templates/kfuninst.tpl", {}) 50 | filename = "${path.module}/kfuninst.sh" 51 | file_permission = "0700" 52 | } 53 | -------------------------------------------------------------------------------- /examples/ecr/outputs.tf: -------------------------------------------------------------------------------- 1 | output "eks" { 2 | description = "The generated AWS EKS cluster" 3 | value = module.eks.cluster 4 | } 5 | 6 | output "role" { 7 | description = "The generated role of the EKS node group" 8 | value = module.eks.role 9 | } 10 | 11 | output "kubeconfig" { 12 | description = "Bash script to update the kubeconfig file for the EKS cluster" 13 | value = module.eks.kubeconfig 14 | } 15 | 16 | output "features" { 17 | description = "Features configurations of the AWS EKS cluster" 18 | value = module.eks.features 19 | } 20 | 21 | output "url" { 22 | description = "A URL of generated ECR repository" 23 | value = module.ecr.url 24 | } 25 | 26 | output "policy_arns" { 27 | description = "A map of IAM polices to allow access this ECR repository. If you want to make an IAM role or instance-profile has permissions to manage this repository, please attach the `poliy_arn` of this output on your side." 28 | value = module.ecr.policy_arns 29 | } 30 | 31 | resource "local_file" "build" { 32 | content = templatefile("${path.module}/templates/build.tpl", { 33 | region = var.aws_region 34 | ecr_uri = module.ecr.url 35 | }) 36 | filename = "${path.cwd}/docker-build.sh" 37 | file_permission = "0400" 38 | } 39 | 40 | resource "local_file" "manifest" { 41 | content = templatefile("${path.module}/templates/hello-nodejs.tpl", { 42 | ecr_uri = module.ecr.url 43 | }) 44 | filename = "${path.cwd}/hello-nodejs.yaml" 45 | file_permission = "0400" 46 | } 47 | -------------------------------------------------------------------------------- /examples/app-mesh/modules/codebuild/app/modules/restaurantsdbupdate.rb: -------------------------------------------------------------------------------- 1 | require 'pg' 2 | require 'pg_ext' 3 | require 'aws-sdk-dynamodb' 4 | 5 | def restaurantsdbupdate(restaurant) 6 | if ($yelbddbrestaurants != nil && $yelbddbrestaurants != "") then 7 | dynamodb = Aws::DynamoDB::Client.new(region: $awsregion) 8 | params = { 9 | table_name: $yelbddbrestaurants, 10 | key: { 11 | name: restaurant 12 | } 13 | } 14 | restaurantrecord = dynamodb.get_item(params) 15 | restaurantcount = restaurantrecord.item['restaurantcount'] 16 | restaurantcount += 1 17 | params = { 18 | table_name: $yelbddbrestaurants, 19 | key: { 20 | name: restaurant 21 | }, 22 | update_expression: 'set restaurantcount = :c', 23 | expression_attribute_values: {':c' => restaurantcount}, 24 | return_values: 'UPDATED_NEW' 25 | } 26 | restaurantrecord = dynamodb.update_item(params) 27 | else 28 | con = PG.connect :host => $yelbdbhost, 29 | :port => $yelbdbport, 30 | :dbname => 'yelbdatabase', 31 | :user => 'postgres', 32 | :password => 'postgres_password' 33 | con.prepare('statement1', 'UPDATE restaurants SET count = count +1 WHERE name = $1') 34 | res = con.exec_prepared('statement1', [ restaurant ]) 35 | con.close 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /examples/addon/main.tf: -------------------------------------------------------------------------------- 1 | # Amazon EKS with Add-ons 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/vpc/aws" 14 | name = var.name 15 | tags = var.tags 16 | vpc_config = var.use_default_vpc ? null : { 17 | azs = var.azs 18 | cidr = "10.10.0.0/16" 19 | subnet_type = "private" 20 | single_ngw = true 21 | } 22 | } 23 | 24 | # eks 25 | module "eks" { 26 | source = "../../" 27 | name = var.name 28 | tags = var.tags 29 | subnets = slice(values(module.vpc.subnets[var.use_default_vpc ? "public" : "private"]), 0, 3) 30 | kubernetes_version = var.kubernetes_version 31 | managed_node_groups = var.managed_node_groups 32 | enable_ssm = var.enable_ssm 33 | } 34 | 35 | module "addons" { 36 | for_each = { 37 | for addon in [ 38 | { 39 | name = "vpc-cni" 40 | eks_name = module.eks.cluster.name 41 | }, 42 | { 43 | name = "coredns" 44 | eks_name = module.eks.cluster.name 45 | }, 46 | { 47 | name = "kube-proxy" 48 | eks_name = module.eks.cluster.name 49 | }, 50 | { 51 | name = "aws-ebs-csi-driver" 52 | eks_name = module.eks.cluster.name 53 | }, 54 | ] : addon.name => addon 55 | } 56 | source = "../../modules/addon" 57 | tags = var.tags 58 | addon_config = each.value 59 | } 60 | -------------------------------------------------------------------------------- /examples/emr/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | description = "The attributes of Amazon VPC" 3 | value = module.vpc.vpc 4 | } 5 | 6 | output "kubeconfig" { 7 | description = "Bash script to update kubeconfig file" 8 | value = module.eks.kubeconfig 9 | } 10 | 11 | # Need to update aws-auth configmap with, 12 | # 13 | # - rolearn: arn:aws:iam::{AWS_ACCOUNT_ID}:role/AWSServiceRoleForAmazonEMRContainers 14 | # username: emr-containers 15 | # 16 | # and also, create role and role mapping on the target namespace 17 | # for more details, https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-cluster-access.html 18 | # 19 | # `eksctl` provides a command that creates the required RBAC resources for EMR, 20 | # and updates the aws-auth ConfigMap to bind the role with the SLR for EMR. 21 | # 22 | 23 | output "enable_emr_access" { 24 | description = "Bash script to enable emr to the eks cluster" 25 | value = join(" ", [ 26 | format("eksctl create iamidentitymapping --cluster %s --service-name emr-containers --namespace default", module.eks.cluster.name), 27 | ]) 28 | } 29 | 30 | output "create_emr_containers" { 31 | description = "Bash script to create emr containers virtual cluster" 32 | value = join(" ", [ 33 | "bash -e", 34 | format("%s/create-emr-virtual-cluster.sh", path.module), 35 | ]) 36 | } 37 | 38 | output "delete_emr_containers" { 39 | description = "Bash script to delete emr containers virtual cluster" 40 | value = join(" ", [ 41 | "bash -e", 42 | format("%s/delete-emr-virtual-cluster.sh", path.module), 43 | ]) 44 | } 45 | -------------------------------------------------------------------------------- /modules/metrics-server/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Metrics Server 2 | [Metrics Server](https://github.com/kubernetes-sigs/metrics-server) is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines. Metrics Server collects resource metrics from Kubelets and exposes them in Kubernetes apiserver through Metrics API for use by Horizontal Pod Autoscaler and Vertical Pod Autoscaler. Metrics API can also be accessed by kubectl top, making it easier to debug autoscaling pipelines. 3 | 4 | ## Examples 5 | - [Quickstart Example](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/metrics-server/README.md#quickstart) 6 | 7 | ## Quickstart 8 | ### Setup 9 | This is a terraform module to deploy Helm chart for Kubernetes Metrics Server. 10 | ```hcl 11 | module "eks" { 12 | source = "Young-ook/eks/aws" 13 | name = "eks" 14 | } 15 | 16 | provider "helm" { 17 | kubernetes { 18 | host = module.eks.helmconfig.host 19 | token = module.eks.helmconfig.token 20 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 21 | load_config_file = false 22 | } 23 | } 24 | 25 | module "metrics-server" { 26 | source = "Young-ook/eks/aws//modules/metrics-server" 27 | cluster_name = module.eks.cluster.name 28 | oidc = module.eks.oidc 29 | tags = { env = "test" } 30 | } 31 | ``` 32 | Modify the terraform configuration file to deploy metrics server. Run the terraform code to make a change on your environment. 33 | ``` 34 | terraform init 35 | terraform apply 36 | ``` 37 | -------------------------------------------------------------------------------- /examples/cw/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "use_default_vpc" { 10 | description = "A feature flag for whether to use default vpc" 11 | type = bool 12 | } 13 | 14 | variable "azs" { 15 | description = "A list of availability zones for the vpc to deploy resources" 16 | type = list(string) 17 | } 18 | 19 | ### kubernetes cluster 20 | variable "kubernetes_version" { 21 | description = "The target version of kubernetes" 22 | type = string 23 | } 24 | 25 | variable "node_groups" { 26 | description = "Node groups definition" 27 | default = [] 28 | } 29 | 30 | variable "managed_node_groups" { 31 | description = "Amazon managed node groups definition" 32 | default = [] 33 | } 34 | 35 | variable "fargate_profiles" { 36 | description = "Amazon Fargate for EKS profiles" 37 | default = [] 38 | } 39 | 40 | ### feature 41 | variable "enable_ssm" { 42 | description = "Allow ssh access using session manager" 43 | type = bool 44 | default = false 45 | } 46 | 47 | variable "enable_cw" { 48 | description = "Enable cloudwatch container insights" 49 | type = map(any) 50 | default = { 51 | enable_metrics = false 52 | enable_logs = false 53 | } 54 | } 55 | 56 | ### description 57 | variable "name" { 58 | description = "The logical name of the module instance" 59 | type = string 60 | default = "eks" 61 | } 62 | 63 | ### tags 64 | variable "tags" { 65 | description = "The key-value maps for tagging" 66 | type = map(string) 67 | default = {} 68 | } 69 | -------------------------------------------------------------------------------- /examples/lb/main.tf: -------------------------------------------------------------------------------- 1 | # Amazon EKS with AWS LoadBalancers 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/spinnaker/aws//modules/spinnaker-aware-aws-vpc" 14 | version = "2.3.1" 15 | name = var.name 16 | tags = merge(var.tags, module.eks.tags.shared) 17 | azs = var.azs 18 | cidr = var.cidr 19 | enable_igw = var.enable_igw 20 | enable_ngw = var.enable_ngw 21 | single_ngw = var.single_ngw 22 | vpc_endpoint_config = [] 23 | } 24 | 25 | # eks 26 | module "eks" { 27 | source = "Young-ook/eks/aws" 28 | version = "1.7.5" 29 | name = var.name 30 | tags = var.tags 31 | subnets = values(module.vpc.subnets["private"]) 32 | kubernetes_version = var.kubernetes_version 33 | managed_node_groups = var.managed_node_groups 34 | node_groups = var.node_groups 35 | fargate_profiles = var.fargate_profiles 36 | } 37 | 38 | provider "helm" { 39 | kubernetes { 40 | host = module.eks.helmconfig.host 41 | token = module.eks.helmconfig.token 42 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 43 | } 44 | } 45 | 46 | module "lb-controller" { 47 | source = "../../modules/lb-controller" 48 | cluster_name = module.eks.cluster.name 49 | oidc = module.eks.oidc 50 | tags = var.tags 51 | helm = { 52 | vars = module.eks.features.fargate_enabled ? { 53 | vpcId = module.vpc.vpc.id 54 | } : {} 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /modules/prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Prometheus 2 | [Prometheus](https://prometheus.io/) is an open-source systems monitoring and alerting toolkit originally built at SoundCloud. Since its inception in 2012, many companies and organizations have adopted Prometheus, and the project has a very active developer and user community. It is now a standalone open source project and maintained independently of any company. Prometheus joined the Cloud Native Computing Foundation in 2016 as the second hosted project, after Kubernetes. 3 | 4 | ## Examples 5 | - [Deploy Promethus using Helm](https://www.eksworkshop.com/intermediate/240_monitoring/deploy-prometheus/) 6 | 7 | ## Quickstart 8 | ### Setup 9 | This is a terraform module to deploy Helm chart for Prometheus. 10 | ```hcl 11 | module "eks" { 12 | source = "Young-ook/eks/aws" 13 | name = "eks" 14 | } 15 | 16 | provider "helm" { 17 | kubernetes { 18 | host = module.eks.helmconfig.host 19 | token = module.eks.helmconfig.token 20 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 21 | } 22 | } 23 | 24 | module "prometheus" { 25 | source = "Young-ook/eks/aws//modules/prometheus" 26 | enabled = true 27 | cluster_name = module.eks.cluster.name 28 | oidc = module.eks.oidc 29 | tags = { env = "test" } 30 | helm = { 31 | values = { 32 | "alertmanager.persistentVolume.storageClass" = "gp2" 33 | "server.persistentVolume.storageClass" = "gp2" 34 | } 35 | } 36 | } 37 | ``` 38 | Modify the terraform configuration file to deploy prometheus. Run the terraform code to make a change on your environment. 39 | ``` 40 | terraform init 41 | terraform apply 42 | ``` 43 | -------------------------------------------------------------------------------- /examples/app-mesh/main.tf: -------------------------------------------------------------------------------- 1 | # App Mesh example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # build container image 12 | module "codebuild" { 13 | source = "./modules/codebuild" 14 | name = var.name 15 | tags = var.tags 16 | } 17 | 18 | # vpc 19 | module "vpc" { 20 | source = "Young-ook/vpc/aws" 21 | name = var.name 22 | tags = var.tags 23 | vpc_config = var.use_default_vpc ? null : { 24 | azs = var.azs 25 | cidr = "10.10.0.0/16" 26 | subnet_type = "private" 27 | single_ngw = true 28 | } 29 | } 30 | 31 | # eks 32 | module "eks" { 33 | source = "Young-ook/eks/aws" 34 | name = var.name 35 | tags = var.tags 36 | subnets = slice(values(module.vpc.subnets[var.use_default_vpc ? "public" : "private"]), 0, 3) 37 | kubernetes_version = var.kubernetes_version 38 | managed_node_groups = var.managed_node_groups 39 | node_groups = var.node_groups 40 | enable_ssm = var.enable_ssm 41 | policy_arns = [ 42 | "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess", 43 | "arn:aws:iam::aws:policy/AWSAppMeshEnvoyAccess", 44 | ] 45 | } 46 | 47 | # utilities 48 | provider "helm" { 49 | kubernetes { 50 | host = module.eks.helmconfig.host 51 | token = module.eks.helmconfig.token 52 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 53 | } 54 | } 55 | 56 | module "app-mesh" { 57 | source = "Young-ook/eks/aws//modules/app-mesh" 58 | cluster_name = module.eks.cluster.name 59 | oidc = module.eks.oidc 60 | tags = { env = "test" } 61 | helm = { 62 | version = "1.2.0" 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | ### network 2 | variable "subnets" { 3 | description = "The list of subnet IDs to deploy your EKS cluster" 4 | type = list(string) 5 | } 6 | 7 | ### kubernetes cluster 8 | variable "kubernetes_version" { 9 | description = "The target version of kubernetes" 10 | type = string 11 | default = "1.19" 12 | } 13 | 14 | variable "node_groups" { 15 | description = "Node groups definition" 16 | default = [] 17 | } 18 | 19 | variable "managed_node_groups" { 20 | description = "Amazon managed node groups definition" 21 | default = [] 22 | } 23 | 24 | variable "fargate_profiles" { 25 | description = "Amazon Fargate for EKS profiles" 26 | default = [] 27 | } 28 | 29 | variable "wait" { 30 | description = "Wait duration after control plane creation" 31 | default = "90s" 32 | } 33 | 34 | ### feature 35 | variable "enabled_cluster_log_types" { 36 | description = "A list of the desired control plane logging to enable" 37 | type = list(string) 38 | default = [] 39 | } 40 | 41 | variable "enable_ssm" { 42 | description = "Allow ssh access using session manager" 43 | type = bool 44 | default = false 45 | } 46 | 47 | variable "bottlerocket_config" { 48 | description = "Bottlerocket OS configuration" 49 | default = {} 50 | } 51 | 52 | ### security 53 | variable "policy_arns" { 54 | description = "A list of policy ARNs to attach the node groups role" 55 | type = list(string) 56 | default = [] 57 | } 58 | 59 | ### description 60 | variable "name" { 61 | description = "The logical name of the module instance" 62 | type = string 63 | default = null 64 | } 65 | 66 | ### tags 67 | variable "tags" { 68 | description = "The key-value maps for tagging" 69 | type = map(string) 70 | default = {} 71 | } 72 | -------------------------------------------------------------------------------- /modules/lb-controller/main.tf: -------------------------------------------------------------------------------- 1 | ## kubernetes aws-load-balancer-controller 2 | 3 | locals { 4 | namespace = lookup(var.helm, "namespace", "kube-system") 5 | serviceaccount = lookup(var.helm, "serviceaccount", "aws-load-balancer-controller") 6 | } 7 | 8 | module "irsa" { 9 | source = "../iam-role-for-serviceaccount" 10 | name = join("-", ["irsa", local.name]) 11 | namespace = local.namespace 12 | serviceaccount = local.serviceaccount 13 | oidc_url = var.oidc.url 14 | oidc_arn = var.oidc.arn 15 | policy_arns = [aws_iam_policy.lbc.arn] 16 | tags = var.tags 17 | } 18 | 19 | resource "aws_iam_policy" "lbc" { 20 | name = local.name 21 | tags = merge(local.default-tags, var.tags) 22 | description = format("Allow aws-load-balancer-controller to manage AWS resources") 23 | path = "/" 24 | policy = file("${path.module}/policy.json") 25 | } 26 | 27 | resource "helm_release" "lbc" { 28 | name = lookup(var.helm, "name", "aws-load-balancer-controller") 29 | chart = lookup(var.helm, "chart", "aws-load-balancer-controller") 30 | version = lookup(var.helm, "version", null) 31 | repository = lookup(var.helm, "repository", "https://aws.github.io/eks-charts") 32 | namespace = local.namespace 33 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 34 | 35 | dynamic "set" { 36 | for_each = merge({ 37 | "clusterName" = var.cluster_name 38 | "serviceAccount.name" = local.serviceaccount 39 | "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" = module.irsa.arn 40 | }, lookup(var.helm, "vars", {})) 41 | content { 42 | name = set.key 43 | value = set.value 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /examples/bottlerocket/README.md: -------------------------------------------------------------------------------- 1 | # Bottle Rocket OS 2 | 3 | ![bottlerocket-security-first-container-host-os](../../images/bottlerocket-security-first-container-host-os.png) 4 | ![bottlerocket-features](../../images/bottlerocket-features.png) 5 | 6 | ## Download example 7 | Download this example on your workspace 8 | ```sh 9 | git clone https://github.com/Young-ook/terraform-aws-eks 10 | cd terraform-aws-eks/examples/bottlerocket 11 | ``` 12 | 13 | ## Setup 14 | [This](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/bottlerocket/main.tf) is the example of terraform configuration file to create a managed EKS on your AWS account. Check out and apply it using terraform command. 15 | 16 | Run terraform: 17 | ``` 18 | terraform init 19 | terraform apply 20 | ``` 21 | Also you can use the `-var-file` option for customized paramters when you run the terraform plan/apply command. 22 | ``` 23 | terraform plan -var-file tc1.tfvars 24 | terraform apply -var-file tc1.tfvars 25 | ``` 26 | 27 | ## Launch Bottlerocket managed node group 28 | You can configure an AMI type for your (aws managed or self managed) node groups. For GPU instance types, you can set the `ami_type` parameter in the node group definition. GPU instance types should use the AL2_x86_64_GPU for its ami type or Non-GPU instances should use the AL2_x86_64. And ARM architecture based instance should use AL2_ARM_64. 29 | 30 | Possible values: 31 | - AL2_x86_64 32 | - AL2_x86_64_GPU 33 | - AL2_ARM_64 34 | - CUSTOM 35 | - BOTTLEROCKET_ARM_64 36 | - BOTTLEROCKET_x86_64 37 | 38 | ## Clean up 39 | To remove all infrastrcuture, run terraform: 40 | ```sh 41 | terraform destroy 42 | ``` 43 | Don't forget you have to use the `-var-file` option when you run terraform destroy command to delete the aws resources created with extra variable files. 44 | ```sh 45 | terraform destroy -var-file tc1.tfvars 46 | ``` 47 | -------------------------------------------------------------------------------- /examples/spot/main.tf: -------------------------------------------------------------------------------- 1 | # Spot Instances for node groups example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/vpc/aws" 14 | name = var.name 15 | tags = var.tags 16 | vpc_config = var.use_default_vpc ? null : { 17 | azs = var.azs 18 | cidr = "10.10.0.0/16" 19 | subnet_type = "private" 20 | single_ngw = true 21 | } 22 | } 23 | 24 | # eks 25 | module "eks" { 26 | source = "Young-ook/eks/aws" 27 | name = var.name 28 | tags = var.tags 29 | subnets = slice(values(module.vpc.subnets[var.use_default_vpc ? "public" : "private"]), 0, 3) 30 | kubernetes_version = var.kubernetes_version 31 | managed_node_groups = var.managed_node_groups 32 | node_groups = var.node_groups 33 | fargate_profiles = var.fargate_profiles 34 | } 35 | 36 | provider "helm" { 37 | kubernetes { 38 | host = module.eks.helmconfig.host 39 | token = module.eks.helmconfig.token 40 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 41 | } 42 | } 43 | 44 | module "metrics-server" { 45 | source = "Young-ook/eks/aws//modules/metrics-server" 46 | cluster_name = module.eks.cluster.name 47 | oidc = module.eks.oidc 48 | tags = { env = "test" } 49 | } 50 | 51 | module "cluster-autoscaler" { 52 | source = "Young-ook/eks/aws//modules/cluster-autoscaler" 53 | cluster_name = module.eks.cluster.name 54 | oidc = module.eks.oidc 55 | tags = { env = "test" } 56 | } 57 | 58 | module "node-termination-handler" { 59 | source = "../../modules/node-termination-handler" 60 | cluster_name = module.eks.cluster.name 61 | oidc = module.eks.oidc 62 | tags = { env = "test" } 63 | } 64 | -------------------------------------------------------------------------------- /examples/emr/main.tf: -------------------------------------------------------------------------------- 1 | # Amazon EMR on Amazon EKS 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/vpc/aws" 14 | name = var.name 15 | tags = var.tags 16 | vpc_config = var.use_default_vpc ? null : { 17 | azs = var.azs 18 | cidr = "10.10.0.0/16" 19 | subnet_type = "private" 20 | single_ngw = true 21 | } 22 | } 23 | 24 | # eks 25 | module "eks" { 26 | source = "Young-ook/eks/aws" 27 | name = var.name 28 | tags = var.tags 29 | subnets = slice(values(module.vpc.subnets[var.use_default_vpc ? "public" : "private"]), 0, 3) 30 | kubernetes_version = var.kubernetes_version 31 | managed_node_groups = var.managed_node_groups 32 | enable_ssm = var.enable_ssm 33 | } 34 | 35 | resource "local_file" "create-emr-virtual-cluster-request-json" { 36 | content = templatefile("${path.module}/templates/create-emr-virtual-cluster-request.tpl", { 37 | emr_name = var.name 38 | eks_name = module.eks.cluster.name 39 | }) 40 | filename = "${path.module}/create-emr-virtual-cluster-request.json" 41 | file_permission = "0600" 42 | } 43 | 44 | resource "local_file" "create-emr-virtual-cluster-cli" { 45 | depends_on = [local_file.create-emr-virtual-cluster-request-json, ] 46 | content = templatefile("${path.module}/templates/create-emr-virtual-cluster.tpl", { 47 | aws_region = var.aws_region 48 | }) 49 | filename = "${path.module}/create-emr-virtual-cluster.sh" 50 | file_permission = "0600" 51 | } 52 | 53 | resource "local_file" "delete-emr-virtual-cluster-cli" { 54 | content = templatefile("${path.module}/templates/delete-emr-virtual-cluster.tpl", { 55 | aws_region = var.aws_region 56 | }) 57 | filename = "${path.module}/delete-emr-virtual-cluster.sh" 58 | file_permission = "0600" 59 | } 60 | -------------------------------------------------------------------------------- /modules/app-mesh/main.tf: -------------------------------------------------------------------------------- 1 | ## kubernetes aws-app-mesh-controller 2 | 3 | # aws partitions 4 | module "aws" { 5 | source = "Young-ook/spinnaker/aws//modules/aws-partitions" 6 | } 7 | 8 | locals { 9 | namespace = lookup(var.helm, "namespace", "appmesh-system") 10 | serviceaccount = lookup(var.helm, "serviceaccount", "aws-appmesh-controller") 11 | } 12 | 13 | module "irsa" { 14 | source = "../iam-role-for-serviceaccount" 15 | name = join("-", ["irsa", local.name]) 16 | namespace = local.namespace 17 | serviceaccount = local.serviceaccount 18 | oidc_url = var.oidc.url 19 | oidc_arn = var.oidc.arn 20 | policy_arns = [ 21 | format("arn:%s:iam::aws:policy/AWSCloudMapFullAccess", module.aws.partition.partition), 22 | format("arn:%s:iam::aws:policy/AWSAppMeshFullAccess", module.aws.partition.partition), 23 | ] 24 | tags = var.tags 25 | } 26 | 27 | resource "helm_release" "appmesh" { 28 | name = lookup(var.helm, "name", "appmesh-controller") 29 | chart = lookup(var.helm, "chart", "appmesh-controller") 30 | version = lookup(var.helm, "version", null) 31 | repository = lookup(var.helm, "repository", "https://aws.github.io/eks-charts") 32 | namespace = local.namespace 33 | create_namespace = true 34 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 35 | 36 | dynamic "set" { 37 | for_each = merge({ 38 | "region" = module.aws.region.name 39 | "serviceAccount.name" = local.serviceaccount 40 | "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" = module.irsa.arn 41 | "tracing.enabled" = true 42 | "tracing.provider" = "x-ray" 43 | }, lookup(var.helm, "vars", {})) 44 | content { 45 | name = set.key 46 | value = set.value 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /examples/addon/README.md: -------------------------------------------------------------------------------- 1 | # Amazon EKS Add-ons 2 | This is an example on how to create and update add-ons on your EKS cluster. If you want know more details about Add-on terraform module, please check out [this](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/addon). 3 | 4 | ## Download example 5 | Download this example on your workspace 6 | ```sh 7 | git clone https://github.com/Young-ook/terraform-aws-eks 8 | cd terraform-aws-eks/examples/addon 9 | ``` 10 | 11 | ## Setup 12 | [This](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/addon/main.tf) is the example of terraform configuration file to create multiple add-ons on your EKS cluster. Check out and apply it using terraform command. 13 | 14 | If you don't have the terraform and kubernetes tools in your environment, go to the main [page](https://github.com/Young-ook/terraform-aws-eks) of this repository and follow the installation instructions. 15 | 16 | Run terraform: 17 | ``` 18 | terraform init 19 | terraform apply 20 | ``` 21 | Also you can use the `-var-file` option for customized paramters when you run the terraform plan/apply command. 22 | ``` 23 | terraform plan -var-file tc1.tfvars 24 | terraform apply -var-file tc1.tfvars 25 | ``` 26 | 27 | ### Update kubeconfig 28 | Update and download kubernetes config file to local. You can see the bash command like below after terraform apply is complete. The output looks like below. Copy and run it to save the kubernetes configuration file to your local workspace. And export it as an environment variable to apply to the terminal. 29 | 30 | ``` 31 | bash -e .terraform/modules/eks/script/update-kubeconfig.sh -r ap-northeast-2 -n eks-addon -k kubeconfig 32 | export KUBECONFIG=kubeconfig 33 | ``` 34 | 35 | ## Clean up 36 | Run terraform: 37 | ``` 38 | terraform destroy 39 | ``` 40 | Don't forget you have to use the `-var-file` option when you run terraform destroy command to delete the aws resources created with extra variable files. 41 | ``` 42 | terraform destroy -var-file tc1.tfvars 43 | ``` 44 | -------------------------------------------------------------------------------- /examples/spot/README.md: -------------------------------------------------------------------------------- 1 | # Amazon EKS with Spot Instances 2 | Amazon EC2 Spot Instances let you take advantage of unused EC2 capacity in the AWS cloud. Spot Instances are available at up to a 90% discount compared to On-Demand prices; however, can be interrupted via Spot Instance interruptions, a two-minute warning before Amazon EC2 stops or terminates the instance. The AWS Node Termination Handler makes it easy for users to take advantage of the cost savings and performance boost offered by EC2 Spot Instances in their Kubernetes clusters while gracefully handling EC2 Spot Instance terminations. The AWS Node Termination Handler provides a connection between termination requests from AWS to Kubernetes nodes, allowing graceful draining and termination of nodes that receive interruption notifications. The termination handler uses the Kubernetes API to initiate drain and cordon actions on a node that is targeted for termination. 3 | For more details, please visit [this](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/node-termination-handler/) 4 | 5 | ## Download example 6 | Download this example on your workspace 7 | ```sh 8 | git clone https://github.com/Young-ook/terraform-aws-eks 9 | cd terraform-aws-eks/examples/spot 10 | ``` 11 | 12 | ## Setup 13 | [This](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/spot/main.tf) is the example of terraform configuration file to create a managed EKS with Spot Instances on your AWS account. Check out and apply it using terraform command. 14 | 15 | Run terraform: 16 | ``` 17 | terraform init 18 | terraform apply 19 | ``` 20 | Also you can use the `-var-file` option for customized paramters when you run the terraform plan/apply command. 21 | ``` 22 | terraform plan -var-file default.tfvars 23 | terraform apply -var-file default.tfvars 24 | ``` 25 | 26 | ## Clean up 27 | Run terraform: 28 | ``` 29 | terraform destroy 30 | ``` 31 | Don't forget you have to use the `-var-file` option when you run terraform destroy command to delete the aws resources created with extra variable files. 32 | ``` 33 | terraform destroy -var-file default.tfvars 34 | ``` 35 | -------------------------------------------------------------------------------- /examples/ecr/main.tf: -------------------------------------------------------------------------------- 1 | # Fargate node groups example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/spinnaker/aws//modules/spinnaker-aware-aws-vpc" 14 | name = var.name 15 | tags = merge(var.tags, module.eks.tags.shared) 16 | azs = var.azs 17 | cidr = var.cidr 18 | enable_igw = var.enable_igw 19 | enable_ngw = var.enable_ngw 20 | single_ngw = var.single_ngw 21 | 22 | # Amazon ECS tasks using the Fargate launch type and platform version 1.3.0 or earlier only require 23 | # the com.amazonaws.region.ecr.dkr Amazon ECR VPC endpoint and the Amazon S3 gateway endpoints. 24 | # 25 | # Amazon ECS tasks using the Fargate launch type and platform version 1.4.0 or later require both 26 | # the com.amazonaws.region.ecr.dkr and com.amazonaws.region.ecr.api Amazon ECR VPC endpoints and 27 | # the Amazon S3 gateway endpoints. 28 | # 29 | # For more details, please visit the https://docs.aws.amazon.com/AmazonECR/latest/userguide/vpc-endpoints.html 30 | 31 | vpc_endpoint_config = [ 32 | { 33 | service = "ecr.dkr" 34 | type = "Interface" 35 | private_dns_enabled = false 36 | }, 37 | { 38 | service = "ecr.api" 39 | type = "Interface" 40 | private_dns_enabled = true 41 | }, 42 | { 43 | service = "s3" 44 | type = "Gateway" 45 | }, 46 | ] 47 | } 48 | 49 | # ecr 50 | module "ecr" { 51 | source = "Young-ook/eks/aws//modules/ecr" 52 | name = "app" 53 | tags = var.tags 54 | } 55 | 56 | # eks 57 | module "eks" { 58 | source = "Young-ook/eks/aws" 59 | name = var.name 60 | tags = var.tags 61 | subnets = values(module.vpc.subnets["private"]) 62 | kubernetes_version = var.kubernetes_version 63 | enable_ssm = true 64 | managed_node_groups = var.managed_node_groups 65 | node_groups = var.node_groups 66 | fargate_profiles = var.fargate_profiles 67 | } 68 | -------------------------------------------------------------------------------- /examples/ecr/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | } 8 | 9 | variable "cidr" { 10 | description = "The vpc CIDR (e.g. 10.0.0.0/16)" 11 | type = string 12 | default = "10.0.0.0/16" 13 | } 14 | 15 | variable "azs" { 16 | description = "A list of availability zones for the vpc" 17 | type = list(string) 18 | default = ["us-east-1a", "us-east-1b", "us-east-1c"] 19 | } 20 | 21 | variable "vpc_endpoint_config" { 22 | description = "A list of vpc endpoint configurations" 23 | type = list(any) 24 | default = null 25 | } 26 | 27 | ### features 28 | variable "enable_igw" { 29 | description = "Should be true if you want to provision Internet Gateway for internet facing communication" 30 | type = bool 31 | default = true 32 | } 33 | 34 | variable "enable_ngw" { 35 | description = "Should be true if you want to provision NAT Gateway(s) across all of private networks" 36 | type = bool 37 | default = false 38 | } 39 | 40 | variable "single_ngw" { 41 | description = "Should be true if you want to provision a single shared NAT Gateway across all of private networks" 42 | type = bool 43 | default = false 44 | } 45 | 46 | ### kubernetes cluster 47 | variable "kubernetes_version" { 48 | description = "The target version of kubernetes" 49 | type = string 50 | } 51 | 52 | variable "node_groups" { 53 | description = "Node groups definition" 54 | default = [] 55 | } 56 | 57 | variable "managed_node_groups" { 58 | description = "Amazon managed node groups definition" 59 | default = [] 60 | } 61 | 62 | variable "fargate_profiles" { 63 | description = "Amazon Fargate for EKS profiles" 64 | default = [] 65 | } 66 | 67 | ### description 68 | variable "name" { 69 | description = "The logical name of the module instance" 70 | type = string 71 | default = "eks" 72 | } 73 | 74 | ### tags 75 | variable "tags" { 76 | description = "The key-value maps for tagging" 77 | type = map(string) 78 | default = {} 79 | } 80 | -------------------------------------------------------------------------------- /examples/arm64/awscb.tf: -------------------------------------------------------------------------------- 1 | # build container image 2 | 3 | # This seperate provider is no longer required in this example, 4 | # because this example tested on ap-northeast-2 (seoul) and it supports arm64 architectre 5 | # with ECR, ECR, EKS, CodeBuild now. if you want to run this example on other regions, 6 | # please check below before you begin. 7 | # (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) 8 | 9 | provider "aws" { 10 | alias = "codebuild" 11 | region = "ap-northeast-1" 12 | } 13 | 14 | ### pipeline/cb 15 | locals { 16 | stages = ["amd64", "arm64", "manifest"] 17 | image_al2_aarch64 = "aws/codebuild/amazonlinux2-aarch64-standard:2.0" 18 | image_al2_amd64 = "aws/codebuild/amazonlinux2-x86_64-standard:3.0" 19 | buildspec_image = "examples/arm64/buildspec-docker.yaml" 20 | buildspec_manifest = "examples/arm64/buildspec-manifest.yaml" 21 | } 22 | 23 | module "cb" { 24 | for_each = toset(local.stages) 25 | source = "Young-ook/spinnaker/aws//modules/codebuild" 26 | version = "2.2.6" 27 | name = join("-", ["hello-nodejs", each.key]) 28 | tags = var.tags 29 | environment_config = { 30 | type = each.key == "arm64" ? "ARM_CONTAINER" : "LINUX_CONTAINER" 31 | compute_type = "BUILD_GENERAL1_LARGE" 32 | image = each.key == "arm64" ? local.image_al2_aarch64 : local.image_al2_amd64 33 | privileged_mode = true 34 | environment_variables = { 35 | REPOSITORY_URI = module.ecr.url 36 | TAG = each.key == "manifest" ? "" : each.key 37 | } 38 | } 39 | source_config = { 40 | type = "GITHUB" 41 | location = "https://github.com/Young-ook/terraform-aws-eks.git" 42 | buildspec = each.key == "manifest" ? local.buildspec_manifest : local.buildspec_image 43 | version = "main" 44 | } 45 | policy_arns = [ 46 | module.ecr.policy_arns["read"], 47 | module.ecr.policy_arns["write"], 48 | ] 49 | } 50 | 51 | resource "local_file" "manifest" { 52 | content = templatefile("${path.module}/templates/hello-nodejs.tpl", { 53 | ecr_uri = module.ecr.url 54 | }) 55 | filename = "${path.cwd}/hello-nodejs.yaml" 56 | file_permission = "0400" 57 | } 58 | -------------------------------------------------------------------------------- /examples/lb/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | default = "us-east-1" 8 | } 9 | 10 | variable "cidr" { 11 | description = "The vpc CIDR (e.g. 10.0.0.0/16)" 12 | type = string 13 | default = "10.0.0.0/16" 14 | } 15 | 16 | variable "azs" { 17 | description = "A list of availability zones for the vpc to deploy resources" 18 | type = list(string) 19 | default = ["us-east-1a", "us-east-1b", "us-east-1c"] 20 | } 21 | 22 | variable "subnets" { 23 | description = "The list of subnets to deploy an eks cluster" 24 | type = list(string) 25 | default = null 26 | } 27 | 28 | variable "enable_igw" { 29 | description = "Should be true if you want to provision Internet Gateway for internet facing communication" 30 | type = bool 31 | default = true 32 | } 33 | 34 | variable "enable_ngw" { 35 | description = "Should be true if you want to provision NAT Gateway(s) across all of private networks" 36 | type = bool 37 | default = false 38 | } 39 | 40 | variable "single_ngw" { 41 | description = "Should be true if you want to provision a single shared NAT Gateway across all of private networks" 42 | type = bool 43 | default = false 44 | } 45 | 46 | ### kubernetes cluster 47 | variable "kubernetes_version" { 48 | description = "The target version of kubernetes" 49 | type = string 50 | } 51 | 52 | variable "node_groups" { 53 | description = "Node groups definition" 54 | default = [] 55 | } 56 | 57 | variable "managed_node_groups" { 58 | description = "Amazon managed node groups definition" 59 | default = [] 60 | } 61 | 62 | variable "fargate_profiles" { 63 | description = "Amazon Fargate for EKS profiles" 64 | default = [] 65 | } 66 | 67 | ### description 68 | variable "name" { 69 | description = "The logical name of the module instance" 70 | type = string 71 | default = "eks" 72 | } 73 | 74 | ### tags 75 | variable "tags" { 76 | description = "The key-value maps for tagging" 77 | type = map(string) 78 | default = {} 79 | } 80 | -------------------------------------------------------------------------------- /examples/irsa/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | default = "us-east-1" 8 | } 9 | 10 | variable "cidr" { 11 | description = "The vpc CIDR (e.g. 10.0.0.0/16)" 12 | type = string 13 | default = "10.0.0.0/16" 14 | } 15 | 16 | variable "azs" { 17 | description = "A list of availability zones for the vpc to deploy resources" 18 | type = list(string) 19 | default = ["us-east-1a", "us-east-1b", "us-east-1c"] 20 | } 21 | 22 | variable "subnets" { 23 | description = "The list of subnets to deploy an eks cluster" 24 | type = list(string) 25 | default = null 26 | } 27 | 28 | variable "enable_igw" { 29 | description = "Should be true if you want to provision Internet Gateway for internet facing communication" 30 | type = bool 31 | default = true 32 | } 33 | 34 | variable "enable_ngw" { 35 | description = "Should be true if you want to provision NAT Gateway(s) across all of private networks" 36 | type = bool 37 | default = false 38 | } 39 | 40 | variable "single_ngw" { 41 | description = "Should be true if you want to provision a single shared NAT Gateway across all of private networks" 42 | type = bool 43 | default = false 44 | } 45 | 46 | ### kubernetes cluster 47 | variable "kubernetes_version" { 48 | description = "The target version of kubernetes" 49 | type = string 50 | default = "1.17" 51 | } 52 | 53 | variable "node_groups" { 54 | description = "Node groups definition" 55 | default = [] 56 | } 57 | 58 | variable "managed_node_groups" { 59 | description = "Amazon managed node groups definition" 60 | default = [] 61 | } 62 | 63 | variable "fargate_profiles" { 64 | description = "Amazon Fargate for EKS profiles" 65 | default = [] 66 | } 67 | 68 | ### description 69 | variable "name" { 70 | description = "The logical name of the module instance" 71 | type = string 72 | default = "eks" 73 | } 74 | 75 | ### tags 76 | variable "tags" { 77 | description = "The key-value maps for tagging" 78 | type = map(string) 79 | default = {} 80 | } 81 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/charts/cluster-autoscaler/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "cluster-autoscaler.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 7 | {{- end }} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "cluster-autoscaler.fullname" -}} 15 | {{- if .Values.fullnameOverride }} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 17 | {{- else }} 18 | {{- $name := default .Chart.Name .Values.nameOverride }} 19 | {{- if contains $name .Release.Name }} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 21 | {{- else }} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "cluster-autoscaler.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 32 | {{- end }} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "cluster-autoscaler.labels" -}} 38 | helm.sh/chart: {{ include "cluster-autoscaler.chart" . }} 39 | {{ include "cluster-autoscaler.selectorLabels" . }} 40 | {{- if .Chart.AppVersion }} 41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 42 | {{- end }} 43 | app.kubernetes.io/managed-by: {{ .Release.Service }} 44 | {{- end }} 45 | 46 | {{/* 47 | Selector labels 48 | */}} 49 | {{- define "cluster-autoscaler.selectorLabels" -}} 50 | app.kubernetes.io/name: {{ include "cluster-autoscaler.name" . }} 51 | app.kubernetes.io/instance: {{ .Release.Name }} 52 | {{- end }} 53 | 54 | {{/* 55 | Create the name of the service account to use 56 | */}} 57 | {{- define "cluster-autoscaler.serviceAccountName" -}} 58 | {{- if .Values.serviceAccount.create }} 59 | {{- default (include "cluster-autoscaler.fullname" .) .Values.serviceAccount.name }} 60 | {{- else }} 61 | {{- default "default" .Values.serviceAccount.name }} 62 | {{- end }} 63 | {{- end }} 64 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/main.tf: -------------------------------------------------------------------------------- 1 | ## kubernetes cluster autoscaler 2 | 3 | locals { 4 | namespace = lookup(var.helm, "namespace", "kube-system") 5 | serviceaccount = lookup(var.helm, "serviceaccount", "cluster-autoscaler") 6 | } 7 | 8 | module "irsa" { 9 | source = "../iam-role-for-serviceaccount" 10 | name = join("-", ["irsa", local.name]) 11 | namespace = local.namespace 12 | serviceaccount = local.serviceaccount 13 | oidc_url = var.oidc.url 14 | oidc_arn = var.oidc.arn 15 | policy_arns = [aws_iam_policy.autoscaler.arn] 16 | tags = var.tags 17 | } 18 | 19 | resource "aws_iam_policy" "autoscaler" { 20 | name = local.name 21 | description = format("Allow cluster-autoscaler to manage AWS resources") 22 | path = "/" 23 | policy = jsonencode({ 24 | Statement = [{ 25 | Action = [ 26 | "autoscaling:DescribeAutoScalingGroups", 27 | "autoscaling:DescribeAutoScalingInstances", 28 | "autoscaling:DescribeLaunchConfigurations", 29 | "autoscaling:DescribeTags", 30 | "autoscaling:SetDesiredCapacity", 31 | "autoscaling:TerminateInstanceInAutoScalingGroup", 32 | "ec2:DescribeLaunchTemplateVersions", 33 | ] 34 | Effect = "Allow" 35 | Resource = ["*"] 36 | }] 37 | Version = "2012-10-17" 38 | }) 39 | } 40 | 41 | resource "helm_release" "autoscaler" { 42 | name = lookup(var.helm, "name", "cluster-autoscaler") 43 | chart = lookup(var.helm, "chart", "cluster-autoscaler") 44 | version = lookup(var.helm, "version", null) 45 | repository = lookup(var.helm, "repository", join("/", [path.module, "charts"])) 46 | namespace = local.namespace 47 | create_namespace = true 48 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 49 | 50 | dynamic "set" { 51 | for_each = merge({ 52 | "autoDiscovery.clusterName" = var.cluster_name 53 | "serviceAccount.name" = local.serviceaccount 54 | "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" = module.irsa.arn 55 | }, lookup(var.helm, "vars", {})) 56 | content { 57 | name = set.key 58 | value = set.value 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /examples/cw/README.md: -------------------------------------------------------------------------------- 1 | # Amazon CloudWatch Container Insights 2 | Use [CloudWatch Container Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights.html) to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. It automatically collects metrics for many resources, such as CPU, memory, disk, and network. It also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects. 3 | 4 | ![aws-cw-container-insights](../../images/aws-cw-container-insights.png) 5 | 6 | ## Download example 7 | Download this example on your workspace 8 | ```sh 9 | git clone https://github.com/Young-ook/terraform-aws-eks 10 | cd terraform-aws-eks/examples/cw 11 | ``` 12 | 13 | ## Setup 14 | [This](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/cw/main.tf) is the example of terraform configuration file to create a managed EKS on your AWS account and install Amazon CloudWatch Container Insights agents using Helm chart to the EKS cluster. Check out and apply it using terraform command. 15 | 16 | Run terraform: 17 | ```sh 18 | terraform init 19 | terraform apply 20 | ``` 21 | Also you can use the `-var-file` option for customized paramters when you run the terraform plan/apply command. 22 | ```sh 23 | terraform plan -var-file tc1.tfvars 24 | terraform apply -var-file tc1.tfvars 25 | ``` 26 | 27 | ### Update kubeconfig 28 | Update and download kubernetes config file to local. You can see the bash command like below after terraform apply is complete. The output looks like below. Copy and run it to save the kubernetes configuration file to your local workspace. And export it as an environment variable to apply to the terminal. 29 | ```sh 30 | bash -e .terraform/modules/eks/script/update-kubeconfig.sh -r ap-northeast-2 -n eks-cw -k kubeconfig 31 | export KUBECONFIG=kubeconfig 32 | ``` 33 | 34 | ## Clean up 35 | Run terraform: 36 | ``` 37 | terraform destroy 38 | ``` 39 | Or if you only want to remove all resources of CloudWatch Container Insights from the EKS clsuter, you can run terraform destroy command with `-target` option: 40 | ``` 41 | terraform destroy -target module.cw 42 | ``` 43 | Don't forget you have to use the `-var-file` option when you run terraform destroy command to delete the aws resources created with extra variable files. 44 | ``` 45 | terraform destroy -var-file tc1.tfvars 46 | ``` 47 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | 3 | output "cluster" { 4 | description = "The EKS cluster attributes" 5 | value = { 6 | name = aws_eks_cluster.cp.name 7 | control_plane = aws_eks_cluster.cp 8 | data_plane = { 9 | node_groups = local.node_groups_enabled ? aws_autoscaling_group.ng : null 10 | managed_node_groups = local.managed_node_groups_enabled ? aws_eks_node_group.ng : null 11 | fargate = local.fargate_enabled ? aws_eks_fargate_profile.fargate : null 12 | } 13 | } 14 | } 15 | 16 | output "role" { 17 | description = "The generated role of the EKS node group" 18 | value = (local.node_groups_enabled || local.managed_node_groups_enabled ? zipmap( 19 | ["name", "arn"], 20 | [aws_iam_role.ng.0.name, aws_iam_role.ng.0.arn] 21 | ) : null) 22 | } 23 | 24 | output "oidc" { 25 | description = "The OIDC provider attributes for IAM Role for ServiceAccount" 26 | value = zipmap( 27 | ["url", "arn"], 28 | [local.oidc["url"], local.oidc["arn"]] 29 | ) 30 | } 31 | 32 | output "tags" { 33 | description = "The generated tags for EKS integration" 34 | value = { 35 | "shared" = local.eks-shared-tag 36 | "owned" = local.eks-owned-tag 37 | "elb" = local.eks-elb-tag 38 | "internal-elb" = local.eks-internal-elb-tag 39 | } 40 | } 41 | 42 | data "aws_region" "current" {} 43 | 44 | output "kubeconfig" { 45 | description = "Bash script to update kubeconfig file" 46 | value = join(" ", [ 47 | "bash -e", 48 | format("%s/script/update-kubeconfig.sh", path.module), 49 | format("-r %s", data.aws_region.current.name), 50 | format("-n %s", aws_eks_cluster.cp.name), 51 | "-k kubeconfig", 52 | ]) 53 | } 54 | 55 | data "aws_eks_cluster_auth" "cp" { 56 | name = aws_eks_cluster.cp.name 57 | } 58 | 59 | output "helmconfig" { 60 | description = "The configurations map for Helm provider" 61 | sensitive = true 62 | value = { 63 | host = aws_eks_cluster.cp.endpoint 64 | token = data.aws_eks_cluster_auth.cp.token 65 | ca = aws_eks_cluster.cp.certificate_authority.0.data 66 | } 67 | } 68 | 69 | output "features" { 70 | description = "Features configurations for the EKS" 71 | value = { 72 | "managed_node_groups_enabled" = local.managed_node_groups_enabled 73 | "node_groups_enabled" = local.node_groups_enabled 74 | "fargate_enabled" = local.fargate_enabled 75 | "ssm_enabled" = var.enable_ssm 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /modules/karpenter/main.tf: -------------------------------------------------------------------------------- 1 | ## kubernetes cluster autoscaling 2 | 3 | locals { 4 | namespace = lookup(var.helm, "namespace", "karpenter") 5 | serviceaccount = lookup(var.helm, "serviceaccount", "karpenter") 6 | } 7 | 8 | module "irsa" { 9 | source = "../iam-role-for-serviceaccount" 10 | name = join("-", ["irsa", local.name]) 11 | namespace = local.namespace 12 | serviceaccount = local.serviceaccount 13 | oidc_url = var.oidc.url 14 | oidc_arn = var.oidc.arn 15 | policy_arns = [aws_iam_policy.karpenter.arn] 16 | tags = var.tags 17 | } 18 | 19 | resource "aws_iam_policy" "karpenter" { 20 | name = local.name 21 | description = format("Allow karpenter to manage AWS resources") 22 | path = "/" 23 | policy = jsonencode({ 24 | Version = "2012-10-17" 25 | Statement = [ 26 | { 27 | Action = [ 28 | "ec2:CreateLaunchTemplate", 29 | "ec2:CreateFleet", 30 | "ec2:RunInstances", 31 | "ec2:CreateTags", 32 | "iam:PassRole", 33 | "ec2:TerminateInstances", 34 | "ec2:DescribeLaunchTemplates", 35 | "ec2:DeleteLaunchTemplate", 36 | "ec2:DescribeInstances", 37 | "ec2:DescribeSecurityGroups", 38 | "ec2:DescribeSubnets", 39 | "ec2:DescribeInstanceTypes", 40 | "ec2:DescribeInstanceTypeOfferings", 41 | "ec2:DescribeAvailabilityZones", 42 | "ssm:GetParameter" 43 | ] 44 | Effect = "Allow" 45 | Resource = "*" 46 | }, 47 | ] 48 | }) 49 | } 50 | 51 | resource "helm_release" "karpenter" { 52 | name = lookup(var.helm, "name", "karpenter") 53 | chart = lookup(var.helm, "chart", "karpenter") 54 | version = lookup(var.helm, "version", null) 55 | repository = lookup(var.helm, "repository", join("/", [path.module, "charts"])) 56 | namespace = local.namespace 57 | create_namespace = true 58 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 59 | 60 | dynamic "set" { 61 | for_each = merge({ 62 | "clusterName" = lookup(var.helm.vars, "cluster_name") 63 | "clusterEndpoint" = lookup(var.helm.vars, "cluster_endpoint") 64 | "aws.defaultInstanceProfile" = lookup(var.helm.vars, "default_instance_profile") 65 | "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" = module.irsa.arn 66 | }, lookup(var.helm, "vars", {})) 67 | content { 68 | name = set.key 69 | value = set.value 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /modules/ecr/main.tf: -------------------------------------------------------------------------------- 1 | ## managed container image registry service 2 | 3 | # security/policy 4 | resource "aws_iam_policy" "read" { 5 | name = format("%s-ecr-read", local.name) 6 | description = format("Allow to read images from the ECR") 7 | path = "/" 8 | policy = jsonencode({ 9 | Statement = [{ 10 | Action = [ 11 | "ecr:BatchCheckLayerAvailability", 12 | "ecr:BatchGetImage", 13 | "ecr:DescribeImages", 14 | "ecr:DescribeRepositories", 15 | "ecr:GetAuthorizationToken", 16 | "ecr:GetDownloadUrlForLayer", 17 | "ecr:ListImages", 18 | ] 19 | Effect = "Allow" 20 | Resource = [aws_ecr_repository.repo.arn] 21 | }] 22 | Version = "2012-10-17" 23 | }) 24 | } 25 | 26 | resource "aws_iam_policy" "write" { 27 | name = format("%s-ecr-write", local.name) 28 | description = format("Allow to push and write images to the ECR") 29 | path = "/" 30 | policy = jsonencode({ 31 | Statement = [{ 32 | Action = [ 33 | "ecr:PutImage", 34 | "ecr:UploadLayerPart", 35 | "ecr:InitiateLayerUpload", 36 | "ecr:CompleteLayerUpload", 37 | ] 38 | Effect = "Allow" 39 | Resource = [aws_ecr_repository.repo.arn] 40 | }] 41 | Version = "2012-10-17" 42 | }) 43 | } 44 | 45 | data "aws_caller_identity" "current" {} 46 | 47 | resource "aws_ecr_repository_policy" "repo" { 48 | repository = aws_ecr_repository.repo.name 49 | policy = jsonencode({ 50 | Statement = [{ 51 | Sid = "AllowCrossAccountAccess" 52 | Action = [ 53 | "ecr:BatchCheckLayerAvailability", 54 | "ecr:BatchGetImage", 55 | "ecr:DescribeImages", 56 | "ecr:DescribeRepositories", 57 | "ecr:GetAuthorizationToken", 58 | "ecr:GetDownloadUrlForLayer", 59 | "ecr:GetRepositoryPolicy", 60 | "ecr:ListImages", 61 | ] 62 | Effect = "Allow" 63 | Principal = { 64 | AWS = flatten([ 65 | data.aws_caller_identity.current.account_id, 66 | var.trusted_accounts, 67 | ]) 68 | } 69 | }] 70 | Version = "2012-10-17" 71 | }) 72 | } 73 | 74 | resource "aws_ecr_lifecycle_policy" "repo" { 75 | count = var.lifecycle_policy != null ? 1 : 0 76 | repository = aws_ecr_repository.repo.name 77 | policy = var.lifecycle_policy == null ? jsonencode(local.default_lifecycle_policy) : var.lifecycle_policy 78 | } 79 | 80 | resource "aws_ecr_repository" "repo" { 81 | name = local.repo-name 82 | tags = merge(local.default-tags, var.tags) 83 | 84 | image_tag_mutability = var.image_tag_mutability 85 | image_scanning_configuration { 86 | scan_on_push = var.scan_on_push 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /modules/app-mesh/README.md: -------------------------------------------------------------------------------- 1 | # App Mesh for Amazon EKS 2 | [AWS App Mesh](https://aws.amazon.com/app-mesh) is a service mesh that provides application-level networking to make it easy for your services to communicate with each other across multiple types of compute infrastructure. App Mesh standardizes how your services communicate, giving you end-to-end visibility and ensuring high-availability for your applications. 3 | 4 | ## Examples 5 | - [Learning AWS App Mesh](https://aws.amazon.com/blogs/compute/learning-aws-app-mesh/) 6 | - [AWS App Mesh Examples](https://github.com/aws/aws-app-mesh-examples) 7 | - [Getting started with AWS App Mesh and Amazon EKS](https://aws.amazon.com/blogs/containers/getting-started-with-app-mesh-and-eks/) 8 | 9 | ## Quickstart 10 | ### Setup 11 | This is a terraform module to deploy Helm chart for App Mesh controller. 12 | ```hcl 13 | module "eks" { 14 | source = "Young-ook/eks/aws" 15 | name = "eks" 16 | } 17 | 18 | provider "helm" { 19 | kubernetes { 20 | host = module.eks.helmconfig.host 21 | token = module.eks.helmconfig.token 22 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 23 | } 24 | } 25 | 26 | module "app-mesh" { 27 | source = "Young-ook/eks/aws//modules/app-mesh" 28 | cluster_name = module.eks.cluster.name 29 | oidc = module.eks.oidc 30 | tags = { env = "test" } 31 | } 32 | ``` 33 | Modify the terraform configuration file to deploy App Mesh controller. Run the terraform code to make a change on your environment. 34 | ``` 35 | terraform init 36 | terraform apply 37 | ``` 38 | 39 | ### Verify 40 | All steps are finished, check that there are pods that are `Ready` in `appmesh-system` namespace. Ensure the `appmesh-controller` pod is generated and running: 41 | ``` 42 | $ kubectl -n appmesh-system get all 43 | NAME READY STATUS RESTARTS AGE 44 | pod/appmesh-controller-xxxxxxxxx-xxxxx 1/1 Running 0 10h 45 | 46 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 47 | service/appmesh-controller-webhook-service ClusterIP 10.100.9.216 443/TCP 10h 48 | 49 | NAME READY UP-TO-DATE AVAILABLE AGE 50 | deployment.apps/appmesh-controller 1/1 1 1 10h 51 | 52 | NAME DESIRED CURRENT READY AGE 53 | replicaset.apps/appmesh-controller-xxxxxxxxx 1 1 1 10h 54 | ``` 55 | And you can list the all CRD(Custom Resource Definition)s for App Mesh integration. 56 | ``` 57 | $ kubectl get crds | grep appmesh 58 | gatewayroutes.appmesh.k8s.aws 59 | meshes.appmesh.k8s.aws 60 | virtualgateways.appmesh.k8s.aws 61 | virtualnodes.appmesh.k8s.aws 62 | virtualrouters.appmesh.k8s.aws 63 | virtualservices.appmesh.k8s.aws 64 | ``` 65 | -------------------------------------------------------------------------------- /modules/container-insights/README.md: -------------------------------------------------------------------------------- 1 | # Amazon CloudWatch Container Insights 2 | Use [CloudWatch Container Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights.html) to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. Container Insights is available for Amazon Elastic Container Service (Amazon ECS), Amazon Elastic Kubernetes Service (Amazon EKS), and Kubernetes platforms on Amazon EC2. Amazon ECS support includes support for Fargate. 3 | 4 | CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects. 5 | 6 | ![aws-cw-container-insights](../../images/aws-cw-container-insights.png) 7 | 8 | ## Examples 9 | - [Quickstart Example](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/container-insights/README.md#quickstart) 10 | - [Amazon CloudWatch Container Insights for Amazon ECS](https://aws.amazon.com/blogs/mt/introducing-container-insights-for-amazon-ecs) 11 | 12 | ## Quickstart 13 | ### Setup 14 | This is a terraform module to deploy Helm chart for Container Insights agents. 15 | ```hcl 16 | module "eks" { 17 | source = "Young-ook/eks/aws" 18 | name = "eks" 19 | } 20 | 21 | provider "helm" { 22 | kubernetes { 23 | host = module.eks.helmconfig.host 24 | token = module.eks.helmconfig.token 25 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 26 | } 27 | } 28 | 29 | module "container-insights" { 30 | source = "Young-ook/eks/aws//modules/container-insights" 31 | cluster_name = module.eks.cluster.name 32 | oidc = module.eks.oidc 33 | tags = { env = "test" } 34 | } 35 | ``` 36 | Modify the terraform configuration file to deploy Container Insights agents. Run the terraform code to make a change on your environment. 37 | ``` 38 | terraform init 39 | terraform apply 40 | ``` 41 | 42 | ### Verify 43 | All steps are finished, check that there are pods that are `Ready` in `amazon-cloudwatch` namespace: 44 | Ensure the `containerinsights` pods are generated and they are running: 45 | 46 | ``` 47 | $ kubectl -n amazon-cloudwatch get all 48 | NAME READY STATUS RESTARTS AGE 49 | pod/containerinsights-logs-757m9 1/1 Running 0 75s 50 | pod/containerinsights-metrics-6d779 1/1 Running 1 75s 51 | 52 | NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE 53 | daemonset.apps/containerinsights-logs 1 1 1 1 1 75s 54 | daemonset.apps/containerinsights-metrics 1 1 1 1 1 75s 55 | ``` 56 | -------------------------------------------------------------------------------- /examples/autoscaling/main.tf: -------------------------------------------------------------------------------- 1 | # Autoscaling example 2 | 3 | terraform { 4 | required_version = "~> 1.0" 5 | } 6 | 7 | provider "aws" { 8 | region = var.aws_region 9 | } 10 | 11 | # vpc 12 | module "vpc" { 13 | source = "Young-ook/vpc/aws" 14 | name = var.name 15 | tags = var.tags 16 | vpc_config = var.use_default_vpc ? null : { 17 | azs = var.azs 18 | cidr = "10.10.0.0/16" 19 | subnet_type = "private" 20 | single_ngw = true 21 | } 22 | } 23 | 24 | # eks 25 | module "eks" { 26 | source = "Young-ook/eks/aws" 27 | name = var.name 28 | tags = var.tags 29 | subnets = slice(values(module.vpc.subnets[var.use_default_vpc ? "public" : "private"]), 0, 3) 30 | kubernetes_version = var.kubernetes_version 31 | managed_node_groups = var.managed_node_groups 32 | node_groups = var.node_groups 33 | fargate_profiles = var.fargate_profiles 34 | enable_ssm = var.enable_ssm 35 | } 36 | 37 | provider "helm" { 38 | kubernetes { 39 | host = module.eks.helmconfig.host 40 | token = module.eks.helmconfig.token 41 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 42 | } 43 | } 44 | 45 | module "alb-ingress" { 46 | source = "Young-ook/eks/aws//modules/alb-ingress" 47 | cluster_name = module.eks.cluster.name 48 | oidc = module.eks.oidc 49 | tags = { env = "test" } 50 | } 51 | 52 | module "cluster-autoscaler" { 53 | for_each = toset(module.eks.features.managed_node_groups_enabled || module.eks.features.node_groups_enabled ? ["enabled"] : []) 54 | source = "Young-ook/eks/aws//modules/cluster-autoscaler" 55 | cluster_name = module.eks.cluster.name 56 | oidc = module.eks.oidc 57 | tags = { env = "test" } 58 | } 59 | 60 | module "container-insights" { 61 | source = "Young-ook/eks/aws//modules/container-insights" 62 | cluster_name = module.eks.cluster.name 63 | oidc = module.eks.oidc 64 | tags = { env = "test" } 65 | features = { 66 | enable_metrics = true 67 | enable_logs = true 68 | } 69 | } 70 | 71 | module "metrics-server" { 72 | for_each = toset(module.eks.features.managed_node_groups_enabled || module.eks.features.node_groups_enabled ? ["enabled"] : []) 73 | source = "Young-ook/eks/aws//modules/metrics-server" 74 | cluster_name = module.eks.cluster.name 75 | oidc = module.eks.oidc 76 | tags = { env = "test" } 77 | } 78 | 79 | module "prometheus" { 80 | for_each = toset(module.eks.features.managed_node_groups_enabled || module.eks.features.node_groups_enabled ? ["enabled"] : []) 81 | source = "../../modules/prometheus" 82 | cluster_name = module.eks.cluster.name 83 | oidc = module.eks.oidc 84 | tags = { env = "test" } 85 | helm = { 86 | vars = { 87 | "alertmanager.persistentVolume.storageClass" = "gp2" 88 | "server.persistentVolume.storageClass" = "gp2" 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /script/update-kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # update/generate kubernetes config file to access eks cluster 3 | set -e 4 | 5 | CURDIR=`dirname $0` 6 | EKS_NAME=eks 7 | SPINNAKER_MANAGED=false 8 | 9 | export AWS_REGION=us-east-1 10 | export KUBECONFIG=$CURDIR/kubeconfig 11 | 12 | function print_usage() { 13 | echo "Usage: $0 -k -n(name) -r(region) -s(spinnaker-managed) " 14 | } 15 | 16 | function process_args() { 17 | if [[ $# < 1 ]]; then 18 | print_usage 19 | exit -1 20 | fi 21 | 22 | while getopts ":n:a:r:k:s:" opt; do 23 | case $opt in 24 | n) EKS_NAME="$OPTARG" 25 | ;; 26 | r) AWS_REGION="$OPTARG" 27 | ;; 28 | k) KUBECONFIG="$OPTARG" 29 | ;; 30 | s) SPINNAKER_MANAGED="$OPTARG" 31 | ;; 32 | \?) 33 | >&2 echo "Unrecognized argument '$OPTARG'" 34 | ;; 35 | esac 36 | done 37 | } 38 | 39 | function init() { 40 | if [ -e $KUBECONFIG ]; then 41 | rm $KUBECONFIG 42 | fi 43 | 44 | # update kubeconfig 45 | aws eks update-kubeconfig --name $EKS_NAME --region $AWS_REGION 46 | 47 | if [ $SPINNAKER_MANAGED = "true" ]; then 48 | local namespace=$EKS_NAME 49 | local serviceaccount=spinnaker-managed 50 | 51 | rbac $namespace $serviceaccount 52 | minify $namespace 53 | fi 54 | 55 | # restrict access 56 | chmod 600 $KUBECONFIG 57 | } 58 | 59 | function rbac() { 60 | local namespace=$1 61 | local serviceaccount=$2 62 | 63 | cat << EOF | kubectl apply -f - 64 | apiVersion: v1 65 | kind: Namespace 66 | metadata: 67 | name: $namespace 68 | --- 69 | apiVersion: v1 70 | kind: ServiceAccount 71 | metadata: 72 | name: $serviceaccount 73 | namespace: $namespace 74 | --- 75 | apiVersion: rbac.authorization.k8s.io/v1 76 | kind: ClusterRoleBinding 77 | metadata: 78 | name: $serviceaccount 79 | roleRef: 80 | apiGroup: rbac.authorization.k8s.io 81 | kind: ClusterRole 82 | name: cluster-admin 83 | subjects: 84 | - kind: ServiceAccount 85 | name: $serviceaccount 86 | namespace: $namespace 87 | EOF 88 | 89 | token=$(kubectl get secret \ 90 | $(kubectl get serviceaccount $serviceaccount \ 91 | -n $namespace \ 92 | -o jsonpath='{.secrets[0].name}') \ 93 | -n $namespace \ 94 | -o jsonpath='{.data.token}' | base64 --decode) 95 | kubectl config set-credentials $serviceaccount --token=$token 96 | kubectl config set-context $namespace \ 97 | --cluster=$(kubectl config current-context) \ 98 | --user=$serviceaccount \ 99 | --namespace=$namespace 100 | } 101 | 102 | function minify () { 103 | local context=$1 104 | 105 | kubectl config view --raw > $KUBECONFIG.full.tmp 106 | kubectl --kubeconfig $KUBECONFIG.full.tmp config use-context $context 107 | kubectl --kubeconfig $KUBECONFIG.full.tmp \ 108 | config view --flatten --minify > $KUBECONFIG 109 | 110 | rm $KUBECONFIG.full.tmp 111 | } 112 | 113 | # main 114 | process_args "$@" 115 | init 116 | 117 | unset AWS_REGION 118 | unset KUBECONFIG 119 | -------------------------------------------------------------------------------- /examples/ecr/README.md: -------------------------------------------------------------------------------- 1 | # Amazon ECR 2 | [Amazon Elastic Container Registry](https://aws.amazon.com/ecr/) is a fully managed container registry that makes it easy to store, manage, share, and deploy your container images and artifacts anywhere. 3 | This is an example on how to create ECR on the AWS. If you want know more details about ECR terraform module, please check out [this](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/ecr). 4 | 5 | ## Download example 6 | Download this example on your workspace 7 | ```sh 8 | git clone https://github.com/Young-ook/terraform-aws-eks 9 | cd terraform-aws-eks/examples/ecr 10 | ``` 11 | 12 | ## Setup 13 | [This](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/ecr/main.tf) is the example of terraform configuration file to create an ECR (Elastic Container Registry) on your AWS account. Check out and apply it using terraform command. 14 | 15 | Run terraform: 16 | ``` 17 | terraform init 18 | terraform apply 19 | ``` 20 | Also you can use the `-var-file` option for customized paramters when you run the terraform plan/apply command. 21 | ``` 22 | terraform plan -var-file default.tfvars 23 | terraform apply -var-file default.tfvars 24 | ``` 25 | 26 | ### VPC Endpoints 27 | To improve the security of the VPC, a user has to configure Amazon ECR to use an interface VPC endpoint. For more details, please refer to [this](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/ecr). 28 | 29 | * Amazon ECS tasks using the Fargate launch type and platform version 1.3.0 or earlier only require the com.amazonaws.region.ecr.dkr Amazon ECR VPC endpoint and the Amazon S3 gateway endpoints. 30 | * Amazon ECS tasks using the Fargate launch type and platform version 1.4.0 or later require both the com.amazonaws.region.ecr.dkr and com.amazonaws.region.ecr.api Amazon ECR VPC endpoints and the Amazon S3 gateway endpoints. 31 | 32 | ### Update kubeconfig 33 | Update and download kubernetes config file to local. You can see the bash command like below after terraform apply is complete. The output looks like below. Copy and run it to save the kubernetes configuration file to your local workspace. And export it as an environment variable to apply to the terminal. 34 | 35 | ``` 36 | bash -e .terraform/modules/eks/script/update-kubeconfig.sh -r ap-northeast-2 -n eks-ecr -k kubeconfig 37 | export KUBECONFIG=kubeconfig 38 | ``` 39 | 40 | ### Register Artifacts 41 | In this example, after terraform apply, you will see generated shell script that it will help you build and register container images. Find `build.sh` on your local workspace and run it. 42 | 43 | ``` 44 | bash docker-build.sh 45 | ``` 46 | 47 | This terraform example also creates kubernetes manifest file to deploy simple application that it was built in the `build` script. 48 | ``` 49 | kubectl apply -f hello-nodejs.yaml 50 | ``` 51 | 52 | ## Clean up 53 | Run terraform: 54 | ``` 55 | terraform destroy 56 | ``` 57 | Don't forget you have to use the `-var-file` option when you run terraform destroy command to delete the aws resources created with extra variable files. 58 | ``` 59 | terraform destroy -var-file default.tfvars 60 | ``` 61 | -------------------------------------------------------------------------------- /modules/iam-role-for-serviceaccount/README.md: -------------------------------------------------------------------------------- 1 | # IAM Role for Service Account with OIDC 2 | With [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This module creates a single IAM role which can be assumed by trusted resources using OpenID Connect federated users. For more information about creating OpenID Connect identity provider, please visit [this](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html) 3 | 4 | ## Examples 5 | - [Quickstart](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/iam-role-for-serviceaccount/README.md#quickstart) 6 | - [IAM Role for Service Accounts](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/irsa/) 7 | 8 | ## Quickstart 9 | ### Setup 10 | The IAM roles for Service Accounts (IRSA) feature is available on new Amazon EKS Kubernetes version 1.14 clusters. Please make sure your EKS cluster version is 1.14 or higher to enable IAM roles for (Kubernetes) service accounts. 11 | This is AWS documentation for [IRSA]( https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) 12 | ```hcl 13 | module "irsa" { 14 | source = "Young-ook/eks/aws//modules/iam-role-for-serviceaccount" 15 | 16 | namespace = "default" 17 | serviceaccount = "iam-test" 18 | oidc_url = module.eks.oidc.url 19 | oidc_arn = module.eks.oidc.arn 20 | policy_arns = ["arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"] 21 | tags = { "env" = "test" } 22 | } 23 | ``` 24 | Modify the terraform configuration file to add the IRSA resources and Run the terraform code to make a change on your environment. 25 | ``` 26 | terraform init 27 | terraform apply 28 | ``` 29 | All steps are finished, check that there is a node that is `Ready`: 30 | ``` 31 | kubectl get no 32 | NAME STATUS ROLES AGE VERSION 33 | ip-172-31-21-243.ap-northeast-2.compute.internal Ready 15m v1.16.13-eks-2ba888 34 | ``` 35 | 36 | ### Verify 37 | #### Create service account 38 | Ensure the `iam-test` is created and `eks-iam-test` pod is running: 39 | ``` 40 | cat << EOF | kubectl apply -f - 41 | apiVersion: v1 42 | kind: ServiceAccount 43 | metadata: 44 | name: iam-test 45 | namespace: default 46 | annotations: 47 | eks.amazonaws.com/role-arn: arn:aws:iam::{replace with your aws account}:role/irsa-test-s3-readonly 48 | EOF 49 | ``` 50 | 51 | #### Run test application 52 | Successfully created service account, you can deploy test application. This will run a pod to try to describe s3 bucket on your AWS account using aws-cli. 53 | ``` 54 | cat << EOF | kubectl apply -f - 55 | apiVersion: batch/v1 56 | kind: Job 57 | metadata: 58 | name: aws-cli 59 | spec: 60 | template: 61 | metadata: 62 | labels: 63 | app: aws-cli 64 | spec: 65 | serviceAccountName: iam-test 66 | containers: 67 | - name: aws-cli 68 | image: amazon/aws-cli:latest 69 | args: ["s3", "ls"] 70 | restartPolicy: Never 71 | EOF 72 | ``` 73 | -------------------------------------------------------------------------------- /modules/cluster-autoscaler/charts/cluster-autoscaler/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for cluster-autoscaler. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: k8s.gcr.io/autoscaling/cluster-autoscaler 9 | pullPolicy: IfNotPresent 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: "" 12 | 13 | imagePullSecrets: [] 14 | nameOverride: "" 15 | fullnameOverride: "" 16 | 17 | extraVolumes: 18 | - name: ssl-certs 19 | hostPath: 20 | path: /etc/ssl/certs/ca-bundle.crt 21 | 22 | extraVolumeMounts: 23 | - name: ssl-certs 24 | mountPath: /etc/ssl/certs/ca-certificates.crt 25 | readOnly: true 26 | 27 | serviceAccount: 28 | # Specifies whether a service account should be created 29 | create: true 30 | # Annotations to add to the service account 31 | annotations: {} 32 | # The name of the service account to use. 33 | # If not set and create is true, a name is generated using the fullname template 34 | name: "" 35 | 36 | lables: 37 | k8s-addon: cluster-autoscaler.addons.k8s.io 38 | k8s-app: cluster-autoscaler 39 | 40 | podAnnotations: 41 | prometheus.io/scrape: 'true' 42 | prometheus.io/port: '8085' 43 | 44 | podSecurityContext: {} 45 | # fsGroup: 2000 46 | 47 | securityContext: {} 48 | # capabilities: 49 | # drop: 50 | # - ALL 51 | # readOnlyRootFilesystem: true 52 | # runAsNonRoot: true 53 | # runAsUser: 1000 54 | 55 | service: 56 | type: ClusterIP 57 | port: 80 58 | 59 | ingress: 60 | enabled: false 61 | annotations: {} 62 | # kubernetes.io/ingress.class: nginx 63 | # kubernetes.io/tls-acme: "true" 64 | hosts: 65 | - host: chart-example.local 66 | paths: [] 67 | tls: [] 68 | # - secretName: chart-example-tls 69 | # hosts: 70 | # - chart-example.local 71 | 72 | resources: 73 | limits: 74 | cpu: 100m 75 | memory: 500Mi 76 | requests: 77 | cpu: 100m 78 | memory: 500Mi 79 | 80 | autoscaling: 81 | enabled: false 82 | minReplicas: 1 83 | maxReplicas: 100 84 | targetCPUUtilizationPercentage: 80 85 | # targetMemoryUtilizationPercentage: 80 86 | 87 | nodeSelector: {} 88 | 89 | tolerations: [] 90 | 91 | affinity: {} 92 | 93 | # cloudProvider -- The cloud provider where the autoscaler runs. 94 | cloudProvider: aws 95 | 96 | autoDiscovery: 97 | # autoDiscovery.clusterName -- Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. 98 | clusterName: # cluster.local 99 | 100 | # AWS: Set tags as described in https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup 101 | # autoDiscovery.tags -- ASG tags to match, run through `tpl`. 102 | tags: 103 | - k8s.io/cluster-autoscaler/enabled 104 | - k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }} 105 | # - kubernetes.io/cluster/{{ .Values.autoDiscovery.clusterName }} 106 | 107 | autoscalingGroups: [] 108 | # For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example: 109 | # - name: asg1 110 | # maxSize: 2 111 | # minSize: 1 112 | # - name: asg2 113 | # maxSize: 2 114 | # minSize: 1 115 | 116 | # Environment variables for AWS provider 117 | awsRegion: "" 118 | awsAccessKeyID: "" 119 | awsSecretAccessKey: "" 120 | -------------------------------------------------------------------------------- /examples/app-mesh/README.md: -------------------------------------------------------------------------------- 1 | # AWS App Mesh 2 | 3 | ## Download example 4 | Download this example on your workspace 5 | ```sh 6 | git clone https://github.com/Young-ook/terraform-aws-eks 7 | cd terraform-aws-eks/examples/app-mesh 8 | ``` 9 | 10 | ## Setup 11 | [This](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/app-mesh/main.tf) is the example of terraform configuration file to create a managed EKS on your AWS account and install AWS App Mesh controller using Helm chart to the EKS cluster. Check out and apply it using terraform command. 12 | 13 | Run terraform: 14 | ```sh 15 | terraform init 16 | terraform apply 17 | ``` 18 | Also you can use the `-var-file` option for customized paramters when you run the terraform plan/apply command. 19 | ```sh 20 | terraform plan -var-file tc1.tfvars 21 | terraform apply -var-file tc1.tfvars 22 | ``` 23 | 24 | ### Update kubeconfig 25 | Update and download kubernetes config file to local. You can see the bash command like below after terraform apply is complete. The output looks like below. Copy and run it to save the kubernetes configuration file to your local workspace. And export it as an environment variable to apply to the terminal. 26 | ```sh 27 | bash -e .terraform/modules/eks/script/update-kubeconfig.sh -r ap-northeast-2 -n eks-appmesh -k kubeconfig 28 | export KUBECONFIG=kubeconfig 29 | ``` 30 | 31 | ## AWS App Mesh 32 | [AWS App Mesh](https://aws.amazon.com/app-mesh/) is a service mesh that provides application-level networking to make it easy for your services to communicate with each other across multiple types of compute infrastructure. App Mesh gives end-to-end visibility and high-availability for your applications. 33 | 34 | Run terraform. 35 | After provisioning, you can check the app mesh controller on your EKS cluster. If the output message contains a resource like 'appmesh-controller', then the app mesh controller is running properly. For more details, please refer to [this](https://github.com/Young-ook/terraform-aws-eks/tree/main/modules/app-mesh). 36 | ```sh 37 | kubectl -n appmesh-system get all 38 | ``` 39 | 40 | ### App Mesh example 41 | ![aws-am-yelb-architecture](../../images/aws-am-yelb-architecture.png) 42 | 43 | #### Deploy 44 | ```sh 45 | kubectl apply -f yelb.yaml 46 | ``` 47 | 48 | #### Access the example 49 | ##### Local Workspace 50 | In your local workspace, connect through a proxy to access your application's endpoint. 51 | ```sh 52 | kubectl -n yelb port-forward svc/yelb-ui 8080:80 53 | ``` 54 | Open `http://localhost:8080` on your web browser. This shows the application main page. 55 | 56 | ##### Cloud9 57 | In your Cloud9 IDE, run the application. 58 | ```sh 59 | kubectl -n yelb port-forward svc/yelb-ui 8080:80 60 | ``` 61 | Click `Preview` and `Preview Running Application`. This opens up a preview tab and shows the application main page. 62 | 63 | ![aws-am-yelb-screenshot](../../images/aws-am-yelb-screenshot.png) 64 | 65 | ### Delete 66 | ```sh 67 | kubectl delete -f yelb.yaml 68 | ``` 69 | 70 | ## Clean up 71 | Run terraform: 72 | ``` 73 | terraform destroy 74 | ``` 75 | Or if you only want to remove all resources of App Mesh Controller from the EKS clsuter, you can run terraform destroy command with `-target` option: 76 | ``` 77 | terraform destroy -target module.app-mesh 78 | ``` 79 | Don't forget you have to use the `-var-file` option when you run terraform destroy command to delete the aws resources created with extra variable files. 80 | ``` 81 | terraform destroy -var-file tc1.tfvars 82 | ``` 83 | -------------------------------------------------------------------------------- /examples/fargate/README.md: -------------------------------------------------------------------------------- 1 | # Amazon EKS on AWS Fargate 2 | 3 | ## Download example 4 | Download this example on your workspace 5 | ```sh 6 | git clone https://github.com/Young-ook/terraform-aws-eks 7 | cd terraform-aws-eks/examples/fargate 8 | ``` 9 | 10 | ## Setup 11 | [This](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/fargate/main.tf) is the example of terraform configuration file to create a managed EKS on your AWS account. Check out and apply it using terraform command. 12 | 13 | Run terraform: 14 | ``` 15 | terraform init 16 | terraform apply 17 | ``` 18 | Also you can use the `-var-file` option for customized paramters when you run the terraform plan/apply command. 19 | ``` 20 | terraform plan -var-file tc1.tfvars 21 | terraform apply -var-file tc1.tfvars 22 | ``` 23 | 24 | After then you will see the created EKS cluster and node groups. For more information about configuration of service account mapping for IAM role in Kubernetes, please check out the [IRSA](https://github.com/Young-ook/terraform-aws-eks/tree/main/modules/iam-role-for-serviceaccount/) 25 | 26 | ## AWS Fargate 27 | AWS Fargate is a technology that provides on-demand, right-sized compute capacity for containers. With AWS Fargate, you no longer have to provision, configure, or scale groups of virtual machines to run containers. This removes the need to choose server types, decide when to scale your node groups, or optimize cluster packing. You can control which pods start on Fargate and how they run with Fargate profiles. Each pod running on Fargate has its own isolation boundary and does not share the underlying kernel, CPU resources, memory resources, or elastic network interface with another pod. For more information, please refer [this](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html). 28 | 29 | Here is a [hello-kubernetes](https://github.com/Young-ook/terraform-aws-eks/blob/main/examples/fargate/manifests/hello-kubernetes.yaml) application to enable AWS Fargate as node groups for EKS cluster. 30 | ```hcl 31 | module "eks-fargate" { 32 | source = "Young-ook/eks/aws" 33 | name = "eks" 34 | tags = { env = "test" } 35 | fargate_profiles = { 36 | default = { 37 | name = "default" 38 | namespace = "default" 39 | } 40 | } 41 | } 42 | ``` 43 | Run terraform. After provisioning of EKS cluster, you can deploy the example using kubectl. 44 | ``` 45 | kubectl apply -f manifests/hello-kubernetes.yaml 46 | ``` 47 | A few minutes later you can see the fargate nodes are up. And you can try to access the service via port forwarding when all pods are ready and runnig. If everything looks fine, open the `localhost:8080` url on your web browser. 48 | ``` 49 | kubectl get node 50 | ``` 51 | ``` 52 | NAME STATUS ROLES AGE VERSION 53 | fargate-10.0.2.59 Ready 109s v1.17.9-eks-a84824 54 | fargate-10.0.3.171 Ready 2m31s v1.17.9-eks-a84824 55 | fargate-10.0.3.80 Ready 2m49s v1.17.9-eks-a84824 56 | ``` 57 | ``` 58 | kubectl port-forward svc/hello-kubernetes 8080:80 59 | ``` 60 | To clean up all resources or hello-kubernetes application from cluster, run kubectl: 61 | ``` 62 | kubectl delete -f manifests/hello-kubernetes.yaml 63 | ``` 64 | 65 | ## Clean up 66 | Run terraform: 67 | ``` 68 | terraform destroy 69 | ``` 70 | Don't forget you have to use the `-var-file` option when you run terraform destroy command to delete the aws resources created with extra variable files. 71 | ``` 72 | terraform destroy -var-file tc1.tfvars 73 | ``` 74 | -------------------------------------------------------------------------------- /modules/container-insights/main.tf: -------------------------------------------------------------------------------- 1 | ## kubernetes container-insights 2 | 3 | locals { 4 | metrics_enabled = lookup(var.features, "enable_metrics", false) 5 | logs_enabled = lookup(var.features, "enable_logs", false) 6 | } 7 | 8 | 9 | module "irsa-metrics" { 10 | source = "../iam-role-for-serviceaccount" 11 | count = local.metrics_enabled ? 1 : 0 12 | name = join("-", compact(["irsa", var.cluster_name, "amazon-cloudwatch", local.suffix])) 13 | namespace = "amazon-cloudwatch" 14 | serviceaccount = "amazon-cloudwatch" 15 | oidc_url = var.oidc.url 16 | oidc_arn = var.oidc.arn 17 | policy_arns = ["arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"] 18 | tags = var.tags 19 | } 20 | 21 | resource "helm_release" "metrics" { 22 | count = local.metrics_enabled ? 1 : 0 23 | name = "aws-cloudwatch-metrics" 24 | chart = "aws-cloudwatch-metrics" 25 | version = lookup(var.helm, "version", null) 26 | repository = lookup(var.helm, "repository", "https://aws.github.io/eks-charts") 27 | namespace = "amazon-cloudwatch" 28 | create_namespace = true 29 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 30 | 31 | dynamic "set" { 32 | for_each = { 33 | "clusterName" = var.cluster_name 34 | "serviceAccount.name" = "amazon-cloudwatch" 35 | "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" = module.irsa-metrics[0].arn 36 | } 37 | content { 38 | name = set.key 39 | value = set.value 40 | } 41 | } 42 | } 43 | 44 | module "irsa-logs" { 45 | source = "../iam-role-for-serviceaccount" 46 | count = local.logs_enabled ? 1 : 0 47 | name = join("-", compact(["irsa", var.cluster_name, "aws-for-fluent-bit", local.suffix])) 48 | namespace = "kube-system" 49 | serviceaccount = "aws-for-fluent-bit" 50 | oidc_url = var.oidc.url 51 | oidc_arn = var.oidc.arn 52 | policy_arns = ["arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"] 53 | tags = var.tags 54 | } 55 | 56 | resource "helm_release" "logs" { 57 | count = local.logs_enabled ? 1 : 0 58 | name = "aws-for-fluent-bit" 59 | chart = "aws-for-fluent-bit" 60 | version = lookup(var.helm, "version", null) 61 | repository = lookup(var.helm, "repository", "https://aws.github.io/eks-charts") 62 | namespace = "kube-system" 63 | cleanup_on_fail = lookup(var.helm, "cleanup_on_fail", true) 64 | 65 | dynamic "set" { 66 | for_each = merge({ 67 | "cloudWatch.enabled" = true 68 | "cloudWatch.region" = data.aws_region.current.0.name 69 | "cloudWatch.logGroupName" = format("/aws/containerinsights/%s/application", var.cluster_name) 70 | "firehose.enabled" = false 71 | "kinesis.enabled" = false 72 | "elasticsearch.enabled" = false 73 | "serviceAccount.name" = "aws-for-fluent-bit" 74 | "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" = module.irsa-logs[0].arn 75 | }, lookup(var.helm, "vars", {})) 76 | content { 77 | name = set.key 78 | value = set.value 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /modules/lb-controller/README.md: -------------------------------------------------------------------------------- 1 | # AWS Load Balancer Controller 2 | AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. 3 | - It satisfies Kubernetes Ingress resources by provisioning Application Load Balancers. 4 | - It satisfies Kubernetes Service resources by provisioning Network Load Balancers. 5 | The AWS Load Balancer Controller makes it easy for users to take advantage of the loadbalancer management. 6 | 7 | You can load balance application traffic across pods using the AWS Application Load Balancer (ALB). To learn more, see [What is an Application Load Balancer?](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html) in the Application Load Balancers User Guide. You can share an ALB across multiple applications in your Kubernetes cluster using Ingress groups. In the past, you needed to use a separate ALB for each application. The controller automatically provisions AWS ALBs in response to Kubernetes Ingress objects. ALBs can be used with pods deployed to nodes or to AWS Fargate. You can deploy an ALB to public or private subnets. 8 | 9 | The [AWS load balancer controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) (formerly named AWS ALB Ingress Controller) creates ALBs and the necessary supporting AWS resources whenever a Kubernetes Ingress resource is created on the cluster with the kubernetes.io/ingress.class: alb annotation. The Ingress resource configures the ALB to route HTTP or HTTPS traffic to different pods within the cluster. To ensure that your Ingress objects use the AWS load balancer controller, add the following annotation to your Kubernetes Ingress specification. For more information, see [Ingress specification](https://kubernetes-sigs.github.io/aws-load-balancer-controller/guide/ingress/spec/) on GitHub. 10 | 11 | ## Examples 12 | - [Quickstart Example](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/alb-ingress/README.md#quickstart) 13 | - [Kubernetes Ingress with AWS ALB Ingress Controller](https://aws.amazon.com/blogs/opensource/kubernetes-ingress-aws-alb-ingress-controller/a) 14 | 15 | ## Quickstart 16 | ### Setup 17 | This is a terraform module to deploy Helm chart for AWS LoadBalancer Controller. 18 | ```hcl 19 | module "eks" { 20 | source = "Young-ook/eks/aws" 21 | name = "eks" 22 | } 23 | 24 | provider "helm" { 25 | kubernetes { 26 | host = module.eks.helmconfig.host 27 | token = module.eks.helmconfig.token 28 | cluster_ca_certificate = base64decode(module.eks.helmconfig.ca) 29 | } 30 | } 31 | 32 | module "lb-controller" { 33 | source = "Young-ook/eks/aws//modules/lb-controller" 34 | cluster_name = module.eks.cluster.name 35 | oidc = module.eks.oidc 36 | tags = { env = "test" } 37 | } 38 | ``` 39 | Modify the terraform configuration file to deploy AWS Load Balancer Controller. Run the terraform code to make a change on your environment. 40 | ``` 41 | terraform init 42 | terraform apply 43 | ``` 44 | 45 | ### Verify 46 | All steps are finished, check that there are pods that are `Ready` in `kube-system` namespace: 47 | Ensure the `aws-load-balancer-controller` pod is generated and running: 48 | 49 | ```sh 50 | kubectl get deployment -n kube-system aws-load-balancer-controller 51 | ``` 52 | Output: 53 | ``` 54 | NAME READY UP-TO-DATE AVAILABLE AGE 55 | aws-load-balancer-controller 2/2 2 2 84s 56 | ``` 57 | If the pod is not healthy, please try to check the log: 58 | ```sh 59 | kubectl -n kube-system logs aws-load-balancer-controller-7dd4ff8cb-wqq58 60 | ``` -------------------------------------------------------------------------------- /modules/cluster-autoscaler/charts/cluster-autoscaler/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: {{ include "cluster-autoscaler.fullname" . }} 7 | labels: {{- include "cluster-autoscaler.labels" . | nindent 4 }} 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["events", "endpoints"] 11 | verbs: ["create", "patch"] 12 | - apiGroups: [""] 13 | resources: ["pods/eviction"] 14 | verbs: ["create"] 15 | - apiGroups: [""] 16 | resources: ["pods/status"] 17 | verbs: ["update"] 18 | - apiGroups: [""] 19 | resources: ["endpoints"] 20 | resourceNames: ["cluster-autoscaler"] 21 | verbs: ["get", "update"] 22 | - apiGroups: [""] 23 | resources: ["nodes"] 24 | verbs: ["watch", "list", "get", "update"] 25 | - apiGroups: [""] 26 | resources: 27 | - "pods" 28 | - "services" 29 | - "replicationcontrollers" 30 | - "persistentvolumeclaims" 31 | - "persistentvolumes" 32 | verbs: ["watch", "list", "get"] 33 | - apiGroups: ["extensions"] 34 | resources: ["replicasets", "daemonsets"] 35 | verbs: ["watch", "list", "get"] 36 | - apiGroups: ["policy"] 37 | resources: ["poddisruptionbudgets"] 38 | verbs: ["watch", "list"] 39 | - apiGroups: ["apps"] 40 | resources: ["statefulsets", "replicasets", "daemonsets"] 41 | verbs: ["watch", "list", "get"] 42 | - apiGroups: ["storage.k8s.io"] 43 | resources: ["storageclasses", "csinodes"] 44 | verbs: ["watch", "list", "get"] 45 | - apiGroups: ["batch", "extensions"] 46 | resources: ["jobs"] 47 | verbs: ["get", "list", "watch", "patch"] 48 | - apiGroups: ["coordination.k8s.io"] 49 | resources: ["leases"] 50 | verbs: ["create"] 51 | - apiGroups: ["coordination.k8s.io"] 52 | resourceNames: ["cluster-autoscaler"] 53 | resources: ["leases"] 54 | verbs: ["get", "update"] 55 | 56 | --- 57 | apiVersion: rbac.authorization.k8s.io/v1 58 | kind: Role 59 | metadata: 60 | namespace: {{ .Release.Namespace }} 61 | name: {{ include "cluster-autoscaler.fullname" . }} 62 | labels: {{- include "cluster-autoscaler.labels" . | nindent 4 }} 63 | rules: 64 | - apiGroups: [""] 65 | resources: ["configmaps"] 66 | verbs: ["create","list","watch"] 67 | - apiGroups: [""] 68 | resources: ["configmaps"] 69 | resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] 70 | verbs: ["delete", "get", "update", "watch"] 71 | 72 | --- 73 | apiVersion: rbac.authorization.k8s.io/v1 74 | kind: ClusterRoleBinding 75 | metadata: 76 | namespace: {{ .Release.Namespace }} 77 | name: {{ include "cluster-autoscaler.fullname" . }} 78 | labels: {{- include "cluster-autoscaler.labels" . | nindent 4 }} 79 | subjects: 80 | - kind: ServiceAccount 81 | namespace: {{ .Release.Namespace }} 82 | name: {{ include "cluster-autoscaler.serviceAccountName" . }} 83 | roleRef: 84 | apiGroup: rbac.authorization.k8s.io 85 | kind: ClusterRole 86 | name: {{ include "cluster-autoscaler.fullname" . }} 87 | 88 | --- 89 | apiVersion: rbac.authorization.k8s.io/v1 90 | kind: RoleBinding 91 | metadata: 92 | namespace: {{ .Release.Namespace }} 93 | name: {{ include "cluster-autoscaler.fullname" . }} 94 | labels: {{- include "cluster-autoscaler.labels" . | nindent 4 }} 95 | subjects: 96 | - kind: ServiceAccount 97 | namespace: {{ .Release.Namespace }} 98 | name: {{ include "cluster-autoscaler.serviceAccountName" . }} 99 | roleRef: 100 | apiGroup: rbac.authorization.k8s.io 101 | kind: Role 102 | name: {{ include "cluster-autoscaler.fullname" . }} 103 | {{- end }} 104 | --------------------------------------------------------------------------------