├── test └── eks │ ├── Makefile │ ├── main.tf │ ├── variables.tf │ └── eks.tf ├── vpc ├── provider.tf ├── variables.tf ├── main.tf ├── vpc-peering │ └── vpc-peering.tf ├── README.md ├── route-tables │ ├── create-routetables.tf │ └── update-routetables.tf └── subnets │ └── create-subnets.tf ├── remote-state ├── variables.tf ├── main.tf └── dynamodb.tf ├── hostedzone └── create-hostedzone.tf ├── eks ├── provider.tf ├── oidc-thumbprint.sh ├── wokernode_role.tf ├── appmesh_role.tf ├── external_dns_role.tf ├── cluster_autoscaler_role.tf ├── eks.tf ├── alb_ingress_controller_policy.tf ├── nginx_ingress_controller_policy.tf ├── eks-ng.tf ├── eks-custom-ng.tf └── README.md ├── dynamodb └── dynamodb.tf ├── s3 └── s3.tf ├── LICENSE └── README.md /test/eks/Makefile: -------------------------------------------------------------------------------- 1 | include ../../buildfile 2 | ENVS := ops -------------------------------------------------------------------------------- /vpc/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "${var.region}" 3 | } 4 | -------------------------------------------------------------------------------- /remote-state/variables.tf: -------------------------------------------------------------------------------- 1 | variable dynamodb_table_name { default = "" } 2 | variable s3bucket-name {} 3 | variable region {} 4 | variable tags { type = "map" } -------------------------------------------------------------------------------- /remote-state/main.tf: -------------------------------------------------------------------------------- 1 | module "s3" { 2 | source = "../s3" 3 | s3bucket-name = "${var.s3bucket-name}" 4 | region = "${var.region}" 5 | tags = "${var.tags}" 6 | } -------------------------------------------------------------------------------- /hostedzone/create-hostedzone.tf: -------------------------------------------------------------------------------- 1 | variable vpc_id {} 2 | variable zone_name {} 3 | 4 | 5 | resource "aws_route53_zone" "private" { 6 | name = "${var.zone_name}" 7 | 8 | vpc { 9 | vpc_id = "${var.vpc_id}" 10 | } 11 | } -------------------------------------------------------------------------------- /eks/provider.tf: -------------------------------------------------------------------------------- 1 | variable workspace_iam_roles {} 2 | variable env {} 3 | provider "aws" { 4 | region = var.region 5 | assume_role { 6 | role_arn = var.workspace_iam_roles[var.env] 7 | } 8 | } 9 | 10 | terraform { 11 | required_version = "~> 0.12.12" 12 | } -------------------------------------------------------------------------------- /vpc/variables.tf: -------------------------------------------------------------------------------- 1 | variable region {} 2 | variable subnet_list { type = "list" } 3 | variable availability_zone { 4 | type = "list" 5 | default = ["a","b","c"] 6 | } 7 | variable cidr_block {} 8 | 9 | variable tags { 10 | type = "map" 11 | default { 12 | Name = "test-vpc" 13 | Owner = "SamsonGudise" 14 | Purpose = "Test" 15 | } 16 | } 17 | variable peer_vpc_id { default = "" } 18 | variable environment {} -------------------------------------------------------------------------------- /eks/oidc-thumbprint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | THUMBPRINT=$(echo | openssl s_client -servername oidc.eks.${1}.amazonaws.com -showcerts -connect oidc.eks.${1}.amazonaws.com:443 2>&- | tac | sed -n '/-----END CERTIFICATE-----/,/-----BEGIN CERTIFICATE-----/p; /-----BEGIN CERTIFICATE-----/q' | tac | openssl x509 -fingerprint -noout | sed 's/://g' | awk -F= '{print $2}' | tr '[:upper:]' '[:lower:]') 3 | THUMBPRINT_JSON="{\"thumbprint\": \"${THUMBPRINT}\"}" 4 | echo ${THUMBPRINT_JSON} -------------------------------------------------------------------------------- /remote-state/dynamodb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_dynamodb_table" "terraform-state" { 2 | count = "${var.dynamodb_table_name == "" ? 0 : 1 }" 3 | name = "${var.dynamodb_table_name}" 4 | read_capacity = 20 5 | write_capacity = 20 6 | hash_key = "LockID" 7 | 8 | attribute { 9 | name = "LockID" 10 | type = "S" 11 | } 12 | tags { 13 | Name = "${lookup(var.tags,"Name","Unknown")}" 14 | Owner = "${lookup(var.tags,"Owner","Unknown")}" 15 | } 16 | } -------------------------------------------------------------------------------- /dynamodb/dynamodb.tf: -------------------------------------------------------------------------------- 1 | variable dynamodb_table_name {} 2 | variable tags { type = map } 3 | 4 | resource "aws_dynamodb_table" "terraform-state" { 5 | count = "${var.dynamodb_table_name == "" ? 0 : 1 }" 6 | name = "${var.dynamodb_table_name}" 7 | read_capacity = 20 8 | write_capacity = 20 9 | hash_key = "LockID" 10 | 11 | attribute { 12 | name = "LockID" 13 | type = "S" 14 | } 15 | tags { 16 | Name = "${lookup("Name",var.tags)}" 17 | Owner = "${lookup("Owner",var.tags)}" 18 | } 19 | } -------------------------------------------------------------------------------- /test/eks/main.tf: -------------------------------------------------------------------------------- 1 | variable "workspace_iam_roles" { 2 | type = map(string) 3 | default = { 4 | dev = "arn:aws:iam::1234567890:role/admin-role" 5 | } 6 | } 7 | 8 | 9 | provider "aws" { 10 | region = "us-west-2" 11 | assume_role { 12 | role_arn = var.workspace_iam_roles[terraform.workspace] 13 | } 14 | } 15 | 16 | terraform { 17 | required_version = "~> 0.12.12" 18 | 19 | backend "s3" { 20 | bucket = "s3bucket" # Change bucket name for your account 21 | 22 | # be careful here 23 | # this key needs to be unique for each of our accounts 24 | key = "test_modules/eks_cluster/terraform05282020.tfstate" 25 | region = "us-west-2" 26 | encrypt = true 27 | dynamodb_table = "dynamodb-state-lock" 28 | } 29 | } -------------------------------------------------------------------------------- /test/eks/variables.tf: -------------------------------------------------------------------------------- 1 | variable vpc_id { 2 | type = map(string) 3 | default = { 4 | ops = "vpc-08097bee14c504087" 5 | } 6 | } 7 | variable key_pair { 8 | type = map(string) 9 | default = { 10 | ops = "ops-keypair" 11 | } 12 | } 13 | variable cluster_name { 14 | type = map(string) 15 | default = { 16 | ops = "ops-eks1" 17 | } 18 | } 19 | 20 | variable "eks_release_version" { 21 | type = map(string) 22 | default = { 23 | ops = "1.15.11-20200507" 24 | } 25 | } 26 | 27 | variable "create_iamrole" { 28 | type = map(bool) 29 | default = { 30 | ops = false 31 | } 32 | } 33 | 34 | variable "nodegroup_config" { 35 | type = map(string) 36 | default = { 37 | custom_activity_nodegroup = true 38 | managed_activity_nodegroup = false 39 | managed_access_nodegroup = true 40 | custom_access_nodegroup = false 41 | analytics_nodegroup = false 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /s3/s3.tf: -------------------------------------------------------------------------------- 1 | variable s3bucket-name {} 2 | variable region {} 3 | variable tags { type = "map" } 4 | variable sse_algorithm { default = "AES256" } 5 | 6 | resource "aws_kms_key" "terraform-s3-kmskey" { 7 | count = "${var.sse_algorithm == "AES256" ? 0 : 1}" 8 | description = "This key is used to encrypt bucket objects" 9 | deletion_window_in_days = 10 10 | } 11 | 12 | resource "aws_s3_bucket" "terraform-s3-bucket" { 13 | bucket = "${var.s3bucket-name}" 14 | acl = "private" 15 | region = "${var.region}" 16 | versioning { 17 | enabled = true 18 | } 19 | 20 | server_side_encryption_configuration { 21 | rule { 22 | apply_server_side_encryption_by_default { 23 | #kms_master_key_id = "${aws_kms_key.terraform-s3-kmskey.arn}" 24 | sse_algorithm = "${var.sse_algorithm}" 25 | } 26 | } 27 | } 28 | 29 | tags { 30 | Name = "${lookup(var.tags,"Name","Unknown")}" 31 | Owner = "${lookup(var.tags,"Owner","Unknown")}" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 AWS Devops 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /eks/wokernode_role.tf: -------------------------------------------------------------------------------- 1 | ## Attach policy AmazonEKS_CNI_Policy 2 | resource "aws_iam_role_policy_attachment" "eksnode_AmazonEKS_CNI_Policy" { 3 | role = aws_iam_role.eksnode.name 4 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" 5 | } 6 | 7 | ## Attach policy AmazonEKSWorkerNodePolicy 8 | resource "aws_iam_role_policy_attachment" "eksnode_AmazonEKSWorkerNodePolicy" { 9 | role = aws_iam_role.eksnode.name 10 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" 11 | } 12 | 13 | ## Attach policy AmazonEC2ContainerRegistryReadOnly 14 | resource "aws_iam_role_policy_attachment" "eksnode_AmazonEC2ContainerRegistryReadOnly" { 15 | role = aws_iam_role.eksnode.name 16 | policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" 17 | } 18 | 19 | ## Create eksnode role 20 | resource "aws_iam_role" "eksnode" { 21 | name = "${var.cluster_name}-eksnode" 22 | assume_role_policy = < v0.0.2 26 | ``` 27 | module "build-vpc" { 28 | region = "${var.region}" 29 | subnet_list = "${var.subnet_list}" 30 | cidr_block = "${var.cidr_block}" 31 | peer_vpc_id = "${var.peer_vpc_id}" 32 | tags = "${var.tags}" 33 | # source = "git::git@github.com:AWS-Devops-Projects/aws-infrastructure.git//vpc?ref=v0.0.1" 34 | source = "git::git@github.com:AWS-Devops-Projects/aws-infrastructure.git//vpc?ref=v0.0.2" 35 | } 36 | -------------------------------------------------------------------------------- /test/eks/eks.tf: -------------------------------------------------------------------------------- 1 | module "create-eks-cluster" { 2 | 3 | source = "../../../terraform/modules/eks_cluster" 4 | #source = "https://github.com/SamsonGudise/aws-infrastructure.git/eks_cluster?ref=v0.1.001" 5 | vpc_id = var.vpc_id[terraform.workspace] 6 | key_name = var.key_pair[terraform.workspace] 7 | region = "us-west-2" 8 | cluster_name = var.cluster_name[terraform.workspace] 9 | public_access = true 10 | private_access = true 11 | create_iamrole = var.create_iamrole[terraform.workspace] 12 | access_instance_types = ["m5.large"] 13 | env = terraform.workspace 14 | eks_version = 1.15 15 | eks_release_version = "1.15.11-20200609" 16 | workspace_iam_roles = var.workspace_iam_roles 17 | ssh_sg = data.aws_security_groups.ssh_security_group.ids 18 | nodegroup_config = var.nodegroup_config 19 | ngname_prefix="v1" 20 | delay_in_seconds=30 21 | } 22 | 23 | data aws_security_groups "ssh_security_group" { 24 | filter { 25 | name = "vpc-id" 26 | values = [ var.vpc_id[terraform.workspace] ] 27 | } 28 | filter { 29 | name = "tag:Name" 30 | values = [ "ssh_admin_access", "access_nodeport"] 31 | } 32 | } 33 | 34 | output "eks-kubeconfig" { 35 | value = "${module.create-eks-cluster.kubeconfig}" 36 | } 37 | -------------------------------------------------------------------------------- /eks/appmesh_role.tf: -------------------------------------------------------------------------------- 1 | ## Assume role policy for EKS Cluster Appmesh 2 | data "aws_iam_policy_document" "eksappmesh_assume_role_policy" { 3 | statement { 4 | actions = ["sts:AssumeRoleWithWebIdentity"] 5 | effect = "Allow" 6 | 7 | condition { 8 | test = "StringEquals" 9 | variable = "${replace(aws_iam_openid_connect_provider.ekscluster.url, "https://", "")}:sub" 10 | values = ["system:serviceaccount:appmesh-system:appmesh-controller"] 11 | } 12 | 13 | principals { 14 | identifiers = ["${aws_iam_openid_connect_provider.ekscluster.arn}"] 15 | type = "Federated" 16 | } 17 | } 18 | } 19 | 20 | ## IAM Role for EKS Cluster autoscaler 21 | resource "aws_iam_role" "eksappmesh" { 22 | assume_role_policy = data.aws_iam_policy_document.eksappmesh_assume_role_policy.json 23 | name = "${var.cluster_name}-appmesh" 24 | } 25 | 26 | ## Attach policy AWSCloudMapFullAccess 27 | resource "aws_iam_role_policy_attachment" "eksappmesh_AWSCloudMapFullAccess" { 28 | role = aws_iam_role.eksappmesh.name 29 | policy_arn = "arn:aws:iam::aws:policy/AWSCloudMapFullAccess" 30 | } 31 | 32 | ## Attach policy AWSAppMeshFullAccess 33 | resource "aws_iam_role_policy_attachment" "eksappmesh_AWSAppMeshFullAccess" { 34 | role = aws_iam_role.eksappmesh.name 35 | policy_arn = "arn:aws:iam::aws:policy/AWSAppMeshFullAccess" 36 | } 37 | -------------------------------------------------------------------------------- /eks/external_dns_role.tf: -------------------------------------------------------------------------------- 1 | # Policy for EKS Cluster external dns 2 | # External DNS Policy 3 | resource "aws_iam_policy" "external_dns_policy" { 4 | name = "${var.cluster_name}-external-dns-policy" 5 | description = "External DNS Policy" 6 | 7 | policy = <---- 40 | 41 | resource "aws_subnet" "services-subnets" { 42 | count = "${(length(var.subnet_list)/2)*(length(var.availability_zone))}" 43 | vpc_id = "${var.vpc_id}" 44 | availability_zone = "${var.region}${var.availability_zone[count.index%3]}" 45 | cidr_block = "${cidrsubnet(data.aws_vpc.selected.cidr_block, var.noof-subnet-bits, count.index)}" 46 | map_public_ip_on_launch = "${(var.subnet_list[1+(2*(count.index/(length(var.availability_zone))))] == "public" || 47 | var.subnet_list[1+(2*(count.index/(length(var.availability_zone))))] == "dmz") ? 48 | true : false}" 49 | tags { 50 | Name = "sn-${var.environment}-${local.sregion}-${var.subnet_list[1+(2*(count.index/(length(var.availability_zone))))]}-${var.availability_zone[count.index%3]}-${var.subnet_list[2*(count.index/(length(var.availability_zone)))]}" 51 | type = "${var.subnet_list[1+(2*(count.index/(length(var.availability_zone))))]}" 52 | Owner = "${lookup(var.tags, "Owner")}" 53 | Purpose = "${lookup(var.tags, "Purpose")}" 54 | } 55 | } 56 | 57 | resource "aws_route_table_association" "assign" { 58 | count = "${(length(var.subnet_list)/2)*(length(var.availability_zone))}" 59 | subnet_id = "${element(aws_subnet.services-subnets.*.id,count.index)}" 60 | route_table_id = "${(var.subnet_list[1+(2*(count.index/(length(var.availability_zone))))] == "public" || 61 | var.subnet_list[1+(2*(count.index/(length(var.availability_zone))))] == "dmz") ? 62 | aws_route_table.crt.0.id : aws_route_table.crt.1.id}" 63 | } 64 | 65 | output "public_subnet_ids" { 66 | value = "${slice(aws_subnet.services-subnets.*.id,0,3)}" 67 | } 68 | output "private_subnet_ids" { 69 | value = "${slice(aws_subnet.services-subnets.*.id,3,6)}" 70 | } 71 | output "route_table_ids" { 72 | value = "${aws_route_table.crt.*.id}" 73 | } 74 | 75 | 76 | // Name = "${lookup(var.tags, "Purpose")}-${(count.index == 0 ? "public" : "private")}" 77 | // route_table_id = "${var.subnet_list[1+(2*(count.index/(length(var.availability_zone))))] == "public" ? 78 | // var.route_tables[0] : var.route_tables[1]}" 79 | // map_public_ip_on_launch = "${var.subnet_list[1+(2*(count.index/(length(var.availability_zone))))] == "public" ? true : false }" 80 | // Name = "${lookup(var.tags, "Purpose")}-${var.subnet_list[2*(count.index/(length(var.availability_zone)))]}-${var.availability_zone[count.index%3]}" 81 | 82 | -------------------------------------------------------------------------------- /eks/eks.tf: -------------------------------------------------------------------------------- 1 | variable cluster_name {} 2 | variable "tags" { 3 | type = map 4 | default = {} 5 | } 6 | variable public_access { 7 | default = false 8 | } 9 | 10 | variable private_access { 11 | default = true 12 | } 13 | 14 | variable create_iamrole { 15 | default = false 16 | } 17 | 18 | variable region { 19 | default = "us-west-2" 20 | } 21 | variable vpc_id {} 22 | 23 | variable key_name { 24 | default = "" 25 | } 26 | 27 | data "aws_region" "current" {} 28 | 29 | data "aws_vpc" "selected" { 30 | id = "${var.vpc_id}" 31 | } 32 | variable "eks_version" { 33 | default = "1.15" 34 | } 35 | variable "admin_access_cidrs" { 36 | type = list(string) 37 | default = ["0.0.0.0/0"] 38 | } 39 | 40 | data "aws_subnet_ids" "private" { 41 | vpc_id = var.vpc_id 42 | 43 | tags = { 44 | SubnetTier = "private" 45 | } 46 | } 47 | 48 | data "aws_caller_identity" "current" {} 49 | 50 | resource "aws_iam_role" "eksclusterRole" { 51 | count = var.create_iamrole == true ? 1: 0 52 | name = "eksclusterRole" 53 | assume_role_policy = <&2 "Error installing kubectl." 57 | exit 5 58 | fi 59 | 60 | # check that aws and ec2-metadata commands are installed 61 | command -v aws >/dev/null 2>&1 || { echo >&2 'aws command not installed.'; exit 2; } 62 | command -v ec2-metadata >/dev/null 2>&1 || { echo >&2 'ec2-metadata command not installed.'; exit 3; } 63 | 64 | # command alias 65 | shopt -s expand_aliases 66 | alias kubectlcfg="/usr/local/bin/kubectl --kubeconfig=/var/lib/kubelet/kubeconfig" 67 | 68 | # set filter parameters 69 | instanceId=$(ec2-metadata -i | cut -d ' ' -f2) 70 | hostId=$(ec2-metadata -h | cut -d ' ' -f2) 71 | 72 | # get region 73 | region=$(ec2-metadata --availability-zone | cut -d ' ' -f2|sed -e 's/.$//') 74 | 75 | # retrieve tags 76 | tagValues=$(aws ec2 describe-tags --output text --region "$region" --filters "Name=key,Values=Name" "Name=resource-type,Values=instance" "Name=resource-id,Values=$instanceId") 77 | if [ $? -ne 0 ]; then 78 | echo >&2 "Error retrieving tag value." 79 | exit 4 80 | fi 81 | 82 | # extract required tag value 83 | tagValue=$(echo "$tagValues" | cut -f5|sed -e "s/${var.cluster_name}-//") 84 | echo "$tagValue" 85 | 86 | # Assign NodeLabel 87 | kubelet_args="--node-labels=eks.amazonaws.com/nodegroup=$tagValue" 88 | 89 | if [[ $tagValue =~ .*prometheus.* ]]; then 90 | kubelet_args="--node-labels=eks.amazonaws.com/nodegroup=$tagValue,app=prometheus --register-with-taints=app=prometheus:NoSchedule" 91 | fi 92 | 93 | if [[ $tagValue =~ .*autoscale.* ]]; then 94 | kubelet_args="--node-labels=eks.amazonaws.com/nodegroup=$tagValue,type=autoscale --register-with-taints=type=autoscale:NoSchedule" 95 | fi 96 | echo "/etc/eks/bootstrap.sh ${var.cluster_name} --kubelet-extra-args '$kubelet_args'" | bash -xe 97 | USERDATA 98 | } 99 | 100 | resource "aws_iam_instance_profile" "eks-nodegroup" { 101 | name = "${var.cluster_name}-eksnode" 102 | role = "${var.cluster_name}-eksnode" 103 | } 104 | 105 | resource "aws_autoscaling_group" "access" { 106 | count = (var.nodegroup_config["custom_access_nodegroup"] ? 1 : 0) 107 | desired_capacity = 1 108 | max_size = 1 109 | min_size = 1 110 | name_prefix = "${var.cluster_name}-access" 111 | 112 | health_check_type = "EC2" 113 | ## Incase of instance failure or restart, ensurece new node comes up in the same region 114 | vpc_zone_identifier = [ element(tolist(data.aws_subnet_ids.private.ids),0) ] 115 | enabled_metrics = ["GroupTerminatingCapacity", "GroupInServiceCapacity", "GroupPendingCapacity", "GroupStandbyCapacity", "GroupTotalCapacity", "GroupDesiredCapacity", "GroupTerminatingInstances", "GroupTotalInstances", "GroupMaxSize", "GroupStandbyInstances", "GroupInServiceInstances", "GroupMinSize", "GroupPendingInstances" ] 116 | health_check_grace_period = 10 117 | max_instance_lifetime = var.max_instance_lifetime 118 | mixed_instances_policy { 119 | launch_template { 120 | launch_template_specification { 121 | launch_template_id = aws_launch_template.activity.id 122 | version = "$Latest" 123 | } 124 | override { 125 | instance_type = var.activity_instance_types[0%length(var.activity_instance_types)] 126 | } 127 | } 128 | } 129 | tag { 130 | key = "kubernetes.io/cluster/${var.cluster_name}" 131 | value = "owned" 132 | propagate_at_launch = true 133 | } 134 | 135 | tag { 136 | key = "app" 137 | value = "prometheus" 138 | propagate_at_launch = true 139 | } 140 | 141 | tag { 142 | key = "Name" 143 | value = "${var.cluster_name}-prometheus" 144 | propagate_at_launch = true 145 | } 146 | 147 | depends_on = [ 148 | aws_eks_node_group.access-nodeport, 149 | aws_iam_role_policy_attachment.eksnode_AmazonEKS_CNI_Policy, 150 | aws_iam_role_policy_attachment.eksnode_AmazonEKSWorkerNodePolicy, 151 | aws_iam_role_policy_attachment.eksnode_AmazonEC2ContainerRegistryReadOnly, 152 | aws_iam_role.eksnode, 153 | aws_eks_cluster.ekscluster 154 | ] 155 | lifecycle { 156 | create_before_destroy = true 157 | } 158 | } 159 | 160 | resource "aws_autoscaling_group" "activity" { 161 | count = (var.nodegroup_config["custom_activity_nodegroup"] ? (2 * local.sncount) : 0) 162 | min_size = count.index < local.sncount ? var.activity_custom_min_size : var.autoscale_min_size 163 | max_size = count.index < local.sncount ? var.activity_custom_max_size : var.autoscale_max_size 164 | desired_capacity = count.index < local.sncount ? var.activity_custom_desired_size : var.autoscale_desired_size 165 | name_prefix = (count.index < local.sncount? "${var.cluster_name}-activity-${count.index}-" : "${var.cluster_name}-autoscale-${count.index}-" ) 166 | 167 | enabled_metrics = ["GroupTerminatingCapacity", "GroupInServiceCapacity", "GroupPendingCapacity", "GroupStandbyCapacity", "GroupTotalCapacity", "GroupDesiredCapacity", "GroupTerminatingInstances", "GroupTotalInstances", "GroupMaxSize", "GroupStandbyInstances", "GroupInServiceInstances", "GroupMinSize", "GroupPendingInstances" ] 168 | health_check_grace_period = 10 169 | max_instance_lifetime = var.max_instance_lifetime 170 | 171 | health_check_type = "EC2" 172 | 173 | mixed_instances_policy { 174 | 175 | instances_distribution { 176 | spot_max_price = var.spot_max_price 177 | spot_instance_pools = 4 178 | on_demand_percentage_above_base_capacity = var.on_demand_instance_percentage 179 | } 180 | launch_template { 181 | launch_template_specification { 182 | launch_template_id = aws_launch_template.activity.id 183 | version = "$Latest" 184 | } 185 | override { 186 | instance_type = var.activity_instance_types[0%length(var.activity_instance_types)] 187 | } 188 | 189 | override { 190 | instance_type = var.activity_instance_types[1%length(var.activity_instance_types)] 191 | } 192 | 193 | override { 194 | instance_type = var.activity_instance_types[2%length(var.activity_instance_types)] 195 | } 196 | 197 | override { 198 | instance_type = var.activity_instance_types[3%length(var.activity_instance_types)] 199 | } 200 | } 201 | } 202 | vpc_zone_identifier = [ element(tolist(data.aws_subnet_ids.private.ids),(count.index % local.sncount)) ] 203 | 204 | tag { 205 | key = "k8s.io/cluster-autoscaler/node-template/label/type" 206 | value = (count.index < local.sncount ? "static" : "autoscale" ) 207 | propagate_at_launch = true 208 | } 209 | 210 | tag { 211 | key = "k8s.io/cluster-autoscaler/node-template/taint/type" 212 | value = (count.index < local.sncount ? "None" : "autoscale:NoSchedule" ) 213 | propagate_at_launch = true 214 | } 215 | 216 | tag { 217 | key = "kubernetes.io/cluster/${var.cluster_name}" 218 | value = "owned" 219 | propagate_at_launch = true 220 | } 221 | 222 | tag { 223 | key = "Name" 224 | value = (count.index < local.sncount? "${var.cluster_name}-activity-${count.index}" : "${var.cluster_name}-autoscale-${count.index}" ) 225 | propagate_at_launch = true 226 | } 227 | 228 | tag { 229 | key = "k8s.io/cluster-autoscaler/enabled" 230 | value = true 231 | propagate_at_launch = true 232 | } 233 | 234 | tag { 235 | key = "k8s.io/cluster-autoscaler/${var.cluster_name}" 236 | value = "owned" 237 | propagate_at_launch = true 238 | } 239 | 240 | depends_on = [ 241 | aws_iam_role_policy_attachment.eksnode_AmazonEKS_CNI_Policy, 242 | aws_iam_role_policy_attachment.eksnode_AmazonEKSWorkerNodePolicy, 243 | aws_iam_role_policy_attachment.eksnode_AmazonEC2ContainerRegistryReadOnly, 244 | aws_iam_role.eksnode, 245 | aws_eks_cluster.ekscluster 246 | ] 247 | 248 | lifecycle { 249 | create_before_destroy = true 250 | ignore_changes = [ desired_capacity ] 251 | } 252 | } 253 | 254 | resource "aws_launch_template" "activity" { 255 | name_prefix = "activity" 256 | description = "custom eks activity nodegroup" 257 | 258 | block_device_mappings { 259 | device_name = "/dev/xvda" 260 | ebs { 261 | volume_size = var.activity_disk_size 262 | } 263 | } 264 | 265 | capacity_reservation_specification { 266 | capacity_reservation_preference = "open" 267 | } 268 | 269 | ebs_optimized = true 270 | 271 | image_id = data.aws_ami.eks-worker.id 272 | iam_instance_profile { 273 | name = aws_iam_instance_profile.eks-nodegroup.name 274 | } 275 | 276 | instance_initiated_shutdown_behavior = "terminate" 277 | 278 | instance_type = var.activity_instance_types[0%length(var.activity_instance_types)] 279 | key_name = var.key_name 280 | 281 | vpc_security_group_ids = concat(var.ssh_sg, [aws_eks_cluster.ekscluster.vpc_config.0.cluster_security_group_id]) 282 | 283 | tag_specifications { 284 | resource_type = "instance" 285 | 286 | tags = { 287 | Name = "lt-nodegroup-${var.cluster_name}" 288 | } 289 | } 290 | 291 | lifecycle { 292 | create_before_destroy = true 293 | } 294 | user_data = base64encode(local.eks-node-userdata) 295 | } -------------------------------------------------------------------------------- /eks/README.md: -------------------------------------------------------------------------------- 1 | # EKS Cluster 2 | 3 | ### Pre-requisites 4 | * terraform 0.12 or later 5 | * `openssl`, `sed`, `tac` 6 | 7 | 8 | ### Assumptions 9 | * This module will create `eks cluster` in private subnets. So, VPC must have at least one subnet with `tag:SubnetTier` value `private` 10 | 11 | 12 | ### Required parameters 13 | 14 | * `cluster_name` to be created 15 | * `vpc_id` to host eks cluster 16 | * `env`: aws account or profile 17 | * `workspace_iam_roles` IAM role to assume 18 | 19 | ### Optional paramters 20 | 21 | * `public_access` default access to internet is `false` 22 | * `private_access` default internal access is `true` 23 | * `create_iamrole` default `false` don't create iamrole and policies 24 | * `region` default `us-west-2` 25 | * `key_name` set valid key_pair to access worker-nodes 26 | * `eks_version` default 1.14 27 | * `admin_access_cidrs` default `[0.0.0.0/0]` 28 | * `rt_tag` default `tag:Name` 29 | * `ssh_sg` source security_group for ssh access to worker-nodes 30 | * `access_instance_types` list of instances types for access node_group, default `t3.medium` 31 | * `access_labels` map of labels for access_nodegroup. check code for default values. 32 | * `activity_instance_types` list of instances types for activity node_group, default `t3.medium` 33 | * `analytics_instance_types` list of instances types for analytics node_group, default `t3.medium` 34 | * `analytics_min_size` min number of nodes per region for analytics node_group 35 | * `analytics_max_size` max number of nodes per region for analytics node_group 36 | * `analytics_desired_size` desired number of nodes per region for analytics node_group 37 | * `activity_min_size` min number of nodes per region for activity node_group 38 | * `activity_max_size` max number of nodes per region for activity node_group 39 | * `activity_desired_size` desired number of nodes per region for activity node_group 40 | * `activity_custom_min_size` min number of nodes per region for activity custom_node_group, default `0` 41 | * `activity_custom_max_size` max number of nodes per region for activity custom_node_group, default `100` 42 | * `activity_custom_desired_size` desired number of nodes per region for activity custom_node_group, default `0` 43 | * `autoscale_max_size` max number of nodes per region for autoscale nodegroup, default `100` 44 | * `autoscale_min_size` min number of nodes per region for autoscale nodegroup, default `0` 45 | * `autoscale_desired_size` max number of nodes per region for autoscale nodegroup, default `0` 46 | * `nodegroup_config` define map of nodegroup(s) to be created. `eks-ng.tf` for default configuration. 47 | * `spot_max_price` applicable to `activity_custom_nodegroups` 48 | * `max_instance_lifetime` applicable to `activity_custom_nodegroups` disabled by default 49 | * `on_demand_instance_percentage` on_demand instances distribution percent, applicable to custom activity and autoscale nodegroups 50 | 51 | 52 | ### Default Cluster configuration 53 | * prometheus nodegroup with single worker node (m5.large) dedicated for prometheus-server and single point of access for all NodePort services 54 | * 3 nodegroups, one for each availability zone `a`, `b`, `c` with min and desired set to `0` nodes and max set to `100`. 55 | * Initial cluster comes up with one m5.large, will scale based on load. 56 | 57 | ### Example 58 | 59 | 60 | module "create-eks-cluster" { 61 | source = "https://github.com/SamsonGudise/aws-infrastructure.git/eks_cluster?ref=v0.0.010" 62 | # vpc_id = var.vpc_id[terraform.workspace] 63 | vpc_id = data.terraform_remote_state.network.outputs.vpc_id 64 | key_name = var.key_pair[terraform.workspace] 65 | region = "us-west-2" 66 | cluster_name = var.cluster_name[terraform.workspace] 67 | env = terraform.workspace 68 | eks_version = 1.15 69 | eks_release_version = var.eks_release_version[terraform.workspace] 70 | workspace_iam_roles = var.workspace_iam_roles 71 | ssh_sg = data.aws_security_groups.ssh_security_group.ids 72 | } 73 | * Note: Check `../test_modules/eks_cluster` for more details on how to use this module. 74 | 75 | ### Install-addons 76 | 77 | ### Create Route53 `A` record 78 | * Create `A` record for `prometheus` nodegroup node. 79 | 80 | % kubectl get nodes -l app=prometheus 81 | NAME STATUS ROLES AGE VERSION 82 | ip-10-96-86-110.us-west-2.compute.internal Ready 5d3h v1.15.11-eks-af3caf 83 | % 84 | 85 | ### Edit Security Group to allow Jenkins slaves and user access 86 | * EKS Cluster comes up with security group name `eks-cluster-sg-${cluster-name}-[0-9]+` will be assinged to worker nodes. 87 | * Change security to allow TCP ports `22`, `443(https)`, `80(http)` and port range `30000-32767` from `172.0\10` and `10.0\9` 88 | 89 | ### Edit IAM Role `alb-ingress-controller` Trust Relationships 90 | * Change Trust Reliationships to allow alb-ingress-controller pod to manage ALBs for kube ingress 91 | 92 | { 93 | "Version": "2012-10-17", 94 | "Statement": [ 95 | { 96 | "Effect": "Allow", 97 | "Principal": { 98 | "Federated": "arn:aws:iam::122524323692:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/28E5E6E998145B4EA7DCC2DC5DEBB0C9" 99 | }, 100 | "Action": "sts:AssumeRoleWithWebIdentity", 101 | "Condition": { 102 | "StringEquals": { 103 | "oidc.eks.us-west-2.amazonaws.com/id/28E5E6E998145B4EA7DCC2DC5DEBB0C9:sub": "system:serviceaccount:kube-system:alb-ingress-controller" 104 | } 105 | } 106 | }, 107 | { 108 | "Effect": "Allow", 109 | "Principal": { 110 | "Federated": "arn:aws:iam::122524323692:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/188B8A9BFDCAC6964D3DA55C3D15928B" 111 | }, 112 | "Action": "sts:AssumeRoleWithWebIdentity", 113 | "Condition": { 114 | "StringEquals": { 115 | "oidc.eks.us-west-2.amazonaws.com/id/188B8A9BFDCAC6964D3DA55C3D15928B:sub": "system:serviceaccount:kube-system:alb-ingress-controller" 116 | } 117 | } 118 | } 119 | ] 120 | } 121 | 122 | ### Role Based Access Controls (RBAC) 123 | 124 | #### Cluster access 125 | 126 | 1. Create kubeconfig - `aws eks update-kubeconfig --name --profile ` 127 | 128 | 129 | 130 | $ aws eks update-kubeconfig --name dev-eks1 --profile=dev 131 | Added new context arn:aws:eks:us-west-2:122524323692:cluster/qa-eks1 to /Users/testuser/.kube/config 132 | 133 | 1. Validate your access to cluster 134 | You might get output like this below. 135 | 136 | 137 | 138 | $ kubectl get pods 139 | error: You must be logged in to the server (Unauthorized) 140 | 141 | 142 | Get with Cluster Administrator, they need your `RoleARN` and `CanonicalARN` and k8s `namespace` to provide desired access 143 | 144 | ### Get RoleARN & CanonicalARN 145 | 146 | 147 | * token 148 | 149 | 150 | 151 | % aws eks get-token --cluster-name eks-1-uswest2-dev --region us-west-2 --profile=dev 152 | {"kind": "ExecCredential", "apiVersion": "client.authentication.k8s.io/v1alpha1", "spec": {}, "status": {"expirationTimestamp": "2020-03-08T09:10:13Z", "token": ""}} 153 | 154 | * CanonicalARN 155 | 156 | 157 | 158 | % aws-iam-authenticator verify -i eks-1-uswest2-dev -t 159 | 160 | * RoleARN : AWS Console 161 | 162 | ### To be executed by Cluster administrator 163 | 164 | * Required cluster administrator or `system:master` permissions. For this excercise, I will be providing access to FullAccess to `demo` namespace for user's CanonicalARN:`arn:aws:iam::122524323692:role/AWSReservedSSO_AdministratorAccess_317bbdb6c7f7422b` 165 | 166 | 1. Update `roles.yaml` for your namespace. You need to create exact copy and update `namespace` and `name` under `metadata` to match naming convention and apply changes. 167 | 168 | 169 | 170 | kind: Role 171 | apiVersion: rbac.authorization.k8s.io/v1 172 | metadata: 173 | namespace: demo # must match namespace 174 | name: demo-role # name of the role 175 | rules: # Admins permissions 176 | - apiGroups: [""] 177 | resources: ["*"] 178 | verbs: ["*"] 179 | - apiGroups: ["extensions"] 180 | resources: ["*"] 181 | verbs: ["*"]% 182 | 183 | 184 | 1. Update `rolebindings.yaml` for your namespace. You need to create exact copy and update `metadata:namespace`, `metadata:name`, `Subjects:name` and `roleRef:name` to match naming convention 185 | 186 | 187 | 188 | kind: RoleBinding 189 | apiVersion: rbac.authorization.k8s.io/v1 190 | metadata: 191 | name: demo-role-binding # specify the name of the binding. Prepend with CI 192 | namespace: demo # specify namespace for binding to exist in. 193 | subjects: 194 | - kind: Group # In this step, we are creating a Group called -admins. This will be referenced later. 195 | name: demo-admins 196 | apiGroup: rbac.authorization.k8s.io 197 | roleRef: 198 | kind: Role 199 | name: demo-role 200 | apiGroup: rbac.authorization.k8s.io 201 | 202 | 203 | 204 | 1. Update `aws-auth` configmap to add `roleARN` and `groups` mapping to complete access request. 205 | 206 | 207 | 208 | $ kubectl get configmap aws-auth -n kube-system -o yaml > /tmp/aws-auth-configmap.yaml 209 | 210 | $ kubectl edit configmap aws-auth -n kube-system 211 | 212 | * Insert `rolearn`, `username` and `groups` under `mapRoles: |` 213 | 214 | 215 | - rolearn: arn:aws:iam::122524323692:role/AWSReservedSSO_AdministratorAccess_317bbdb6c7f7422b <- CanonicalARN 216 | username: arn:aws:iam::122524323692:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_AdministratorAccess_317bbdb6c7f7422b <- RoleARN 217 | groups: 218 | - demo-role 219 | 220 | * RoleARN: Role to be assumed to access AWS EKS 221 | 222 | #### Validate access 223 | 224 | Once DevOps Team or CD Job executes above steps. Ready to validate access. 225 | 226 | 227 | $ kubectl get pods -n demo 228 | 229 | #### Query worker nodes specific to `node-group` 230 | 231 | 232 | 233 | kubectl get nodes -l eks.amazonaws.com/nodegroup=access-nodeport 234 | kubectl get nodes -l eks.amazonaws.com/nodegroup=activity-[0,1,2] 235 | kubectl get nodes -l eks.amazonaws.com/nodegroup=analytics-[0,1,2] 236 | 237 | ### Ref: 238 | * https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html --------------------------------------------------------------------------------