├── .gitignore ├── README.md ├── backend.tf ├── eks ├── eks_cluster │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── eks_iam_roles │ ├── main.tf │ └── outputs.tf ├── eks_node │ ├── main.tf │ ├── outputs.tf │ ├── userdata.tpl │ └── variables.tf └── eks_sec_group │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── main.tf ├── network ├── route │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── sec_group │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── subnets │ ├── main.tf │ ├── outputs.tf │ └── variables.tf └── vpc │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── outputs.tf ├── rds ├── main.tf ├── outputs.tf └── variables.tf ├── variables.tf └── yaml ├── eks-admin-cluster-role-binding.yaml └── eks-admin-service-account.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.tfstate 3 | *.tfstate.backup 4 | *.tfvars 5 | .terraform 6 | tfplan 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Amazon AWS EKS and RDS PostgreSQL with terraform 2 | 3 | Assuming you already have Amazon AWS account we will need additional binaries for AWS CLI, terraform, kubectland aws-iam-authenticator. 4 | 5 | **Article is structured in 5 parts** 6 | 7 | * Initial tooling setup aws cli , kubectl and terraform 8 | * Creating terraform IAM account with access keys and access policy 9 | * Creating back-end storage for tfstate file in AWS S3 10 | * Creating Kubernetes cluster on AWS EKS and RDS on PostgreSQL 11 | * Working with kubernetes "kubectl" in EKS 12 | 13 | ## Initial tooling setup aws-cli, kubectl, terraform and aws-iam-authenticator 14 | 15 | Assuming you already have AWS account and [AWS CLI installed](https://docs.aws.amazon.com/cli/latest/userguide/awscli-install-linux.html) and [AWS CLI configured](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) for your user account we will need additional binaries for, terraform and kubectl. 16 | 17 | ### Deploying terraform 18 | 19 | #### terraform for OS X 20 | 21 | ```sh 22 | curl -o terraform_0.11.7_darwin_amd64.zip \ 23 | https://releases.hashicorp.com/terraform/0.11.7/terraform_0.11.7_darwin_amd64.zip 24 | 25 | unzip terraform_0.11.7_linux_amd64.zip -d /usr/local/bin/ 26 | ``` 27 | 28 | #### terraform for Linux 29 | 30 | ```sh 31 | curl https://releases.hashicorp.com/terraform/0.11.7/terraform_0.11.7_linux_amd64.zip > \ 32 | terraform_0.11.7_linux_amd64.zip 33 | 34 | unzip terraform_0.11.7_linux_amd64.zip -d /usr/local/bin/ 35 | ``` 36 | 37 | #### terraform installation verification 38 | 39 | Verify terraform version 0.11.7 or higher is installed: 40 | 41 | ```sh 42 | terraform version 43 | ``` 44 | 45 | ### Deploying kubectl 46 | 47 | #### kubectl for OS X 48 | 49 | ```sh 50 | curl -o kubectl https://storage.googleapis.com/kubernetes-release/release/v1.11.0/bin/darwin/amd64/kubectl 51 | 52 | chmod +x kubectl 53 | 54 | sudo mv kubectl /usr/local/bin/ 55 | ``` 56 | 57 | #### kubectl for Linux 58 | 59 | ```sh 60 | wget https://storage.googleapis.com/kubernetes-release/release/v1.11.0/bin/linux/amd64/kubectl 61 | 62 | chmod +x kubectl 63 | 64 | sudo mv kubectl /usr/local/bin/ 65 | ``` 66 | 67 | #### kubectl installation verification 68 | 69 | ```sh 70 | kubectl version --client 71 | ``` 72 | 73 | ### Deploying aws-iam-authenticator 74 | 75 | [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator) is a tool developed by [Heptio](https://heptio.com/) Team and this tool will allow us to manage EKS by using kubectl 76 | 77 | #### aws-iam-authenticator for OS X 78 | 79 | ```sh 80 | curl -o aws-iam-authenticator \ 81 | https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-07-26/bin/darwin/amd64/aws-iam-authenticator 82 | 83 | chmod +x ./aws-iam-authenticator 84 | 85 | cp ./aws-iam-authenticator $HOME/bin/aws-iam-authenticator && export PATH=$HOME/bin:$PATH 86 | ``` 87 | 88 | #### aws-iam-authenticator for Linux 89 | 90 | ```sh 91 | curl -o aws-iam-authenticator \ 92 | https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-07-26/bin/linux/amd64/aws-iam-authenticator 93 | 94 | chmod +x ./aws-iam-authenticator 95 | 96 | cp ./aws-iam-authenticator $HOME/.local/bin/aws-iam-authenticator && export PATH=$HOME/bin:$PATH 97 | ``` 98 | 99 | #### aws-iam-authenticator installation verification 100 | 101 | ```sh 102 | aws-iam-authenticator help 103 | ``` 104 | 105 | ### Authenticate to AWS 106 | 107 | Before configuring AWS CLI as EKS at this time is only available in US East (N. Virginia) and US West (Oregon) 108 | In below example we will be using US West (Oregon) "us-west-2" 109 | 110 | ```sh 111 | aws configure 112 | ``` 113 | 114 | ## Creating terraform IAM account with access keys and access policy 115 | 116 | 1st step is to setup terraform admin account in AWS IAM 117 | 118 | ### Create IAM terraform User 119 | 120 | ```sh 121 | aws iam create-user --user-name terraform 122 | ``` 123 | 124 | ### Add to newly created terraform user IAM admin policy 125 | 126 | > NOTE: For production or event proper testing account you may need tighten up and restrict access for terraform IAM user 127 | 128 | 129 | ```sh 130 | aws iam attach-user-policy --user-name terraform --policy-arn arn:aws:iam::aws:policy/AdministratorAccess 131 | ``` 132 | 133 | ### Create access keys for the user 134 | 135 | > NOTE: This Access Key and Secret Access Key will be used by terraform to manage infrastructure deployment 136 | 137 | ```sh 138 | aws iam create-access-key --user-name terraform 139 | ``` 140 | 141 | ### update terraform.tfvars file with access and security keys for newly created terraform IAM account 142 | 143 | [![asciicast](https://asciinema.org/a/195785.png)](https://asciinema.org/a/195785) 144 | 145 | ## Creating back-end storage for tfstate file in AWS S3 146 | 147 | Once we have terraform IAM account created we can proceed to next step creating dedicated bucket to keep terraform state files 148 | 149 | ### Create terraform state bucket 150 | 151 | > NOTE: Change name of the bucker, name should be unique across all AWS S3 buckets 152 | 153 | ```sh 154 | aws s3 mb s3://terra-state-bucket --region us-west-2 155 | ``` 156 | 157 | ### Enable versioning on the newly created bucket 158 | 159 | ```sh 160 | aws s3api put-bucket-versioning --bucket terra-state-bucket --versioning-configuration Status=Enabled 161 | ``` 162 | 163 | [![asciicast](https://asciinema.org/a/195792.png)](https://asciinema.org/a/195792) 164 | 165 | ## Creating Kubernetes cluster on AWS EKS and RDS on PostgreSQL 166 | 167 | Now we can move into creating new infrastructure, eks and rds with terraform 168 | 169 | ```sh 170 | . 171 | ├── backend.tf 172 | ├── eks 173 | │   ├── eks_cluster 174 | │   │   ├── main.tf 175 | │   │   ├── outputs.tf 176 | │   │   └── variables.tf 177 | │   ├── eks_iam_roles 178 | │   │   ├── main.tf 179 | │   │   └── outputs.tf 180 | │   ├── eks_node 181 | │   │   ├── main.tf 182 | │   │   ├── outputs.tf 183 | │   │   ├── userdata.tpl 184 | │   │   └── variables.tf 185 | │   └── eks_sec_group 186 | │   ├── main.tf 187 | │   ├── outputs.tf 188 | │   └── variables.tf 189 | ├── main.tf 190 | ├── network 191 | │   ├── route 192 | │   │   ├── main.tf 193 | │   │   ├── outputs.tf 194 | │   │   └── variables.tf 195 | │   ├── sec_group 196 | │   │   ├── main.tf 197 | │   │   ├── outputs.tf 198 | │   │   └── variables.tf 199 | │   ├── subnets 200 | │   │   ├── main.tf 201 | │   │   ├── outputs.tf 202 | │   │   └── variables.tf 203 | │   └── vpc 204 | │   ├── main.tf 205 | │   ├── outputs.tf 206 | │   └── variables.tf 207 | ├── outputs.tf 208 | ├── rds 209 | │   ├── main.tf 210 | │   ├── outputs.tf 211 | │   └── variables.tf 212 | ├── README.md 213 | ├── terraform.tfvars 214 | ├── variables.tf 215 | └── yaml 216 | ├── eks-admin-cluster-role-binding.yaml 217 | └── eks-admin-service-account.yaml 218 | ``` 219 | 220 | We will use terraform modules to keep our code clean and organized 221 | Terraform will run 2 separate environment dev and prod using same sources only difference in this case is number of worker nodes for kubernetes. 222 | 223 | ```go 224 | # Specify the provider and access details 225 | provider "aws" { 226 | access_key = "${var.access_key}" 227 | secret_key = "${var.secret_key}" 228 | region = "${var.aws_region}" 229 | } 230 | 231 | ## Network 232 | # Create VPC 233 | module "vpc" { 234 | source = "./network/vpc" 235 | eks_cluster_name = "${var.eks_cluster_name}" 236 | cidr_block = "${var.cidr_block}" 237 | } 238 | 239 | # Create Subnets 240 | module "subnets" { 241 | source = "./network/subnets" 242 | eks_cluster_name = "${var.eks_cluster_name}" 243 | vpc_id = "${module.vpc.vpc_id}" 244 | vpc_cidr_block = "${module.vpc.vpc_cidr_block}" 245 | } 246 | 247 | # Configure Routes 248 | module "route" { 249 | source = "./network/route" 250 | main_route_table_id = "${module.vpc.main_route_table_id}" 251 | gw_id = "${module.vpc.gw_id}" 252 | 253 | subnets = [ 254 | "${module.subnets.subnets}", 255 | ] 256 | } 257 | 258 | module "eks_iam_roles" { 259 | source = "./eks/eks_iam_roles" 260 | } 261 | 262 | module "eks_sec_group" { 263 | source = "./eks/eks_sec_group" 264 | eks_cluster_name = "${var.eks_cluster_name}" 265 | vpc_id = "${module.vpc.vpc_id}" 266 | } 267 | 268 | module "eks_cluster" { 269 | source = "./eks/eks_cluster" 270 | eks_cluster_name = "${var.eks_cluster_name}" 271 | iam_cluster_arn = "${module.eks_iam_roles.iam_cluster_arn}" 272 | iam_node_arn = "${module.eks_iam_roles.iam_node_arn}" 273 | 274 | subnets = [ 275 | "${module.subnets.subnets}", 276 | ] 277 | 278 | security_group_cluster = "${module.eks_sec_group.security_group_cluster}" 279 | } 280 | 281 | module "eks_node" { 282 | source = "./eks/eks_node" 283 | eks_cluster_name = "${var.eks_cluster_name}" 284 | eks_certificate_authority = "${module.eks_cluster.eks_certificate_authority}" 285 | eks_endpoint = "${module.eks_cluster.eks_endpoint}" 286 | iam_instance_profile = "${module.eks_iam_roles.iam_instance_profile}" 287 | security_group_node = "${module.eks_sec_group.security_group_node}" 288 | 289 | subnets = [ 290 | "${module.subnets.subnets}", 291 | ] 292 | } 293 | 294 | module "sec_group_rds" { 295 | source = "./network/sec_group" 296 | vpc_id = "${module.vpc.vpc_id}" 297 | vpc_cidr_block = "${module.vpc.vpc_cidr_block}" 298 | } 299 | 300 | 301 | module "rds" { 302 | source = "./rds" 303 | 304 | subnets = [ 305 | "${module.subnets.subnets}", 306 | ] 307 | 308 | sec_grp_rds = "${module.sec_group_rds.sec_grp_rds}" 309 | identifier = "${var.identifier}" 310 | storage_type = "${var.storage_type}" 311 | allocated_storage = "${var.allocated_storage}" 312 | db_engine = "${var.db_engine}" 313 | engine_version = "${var.engine_version}" 314 | instance_class = "${var.instance_class}" 315 | db_username = "${var.db_username}" 316 | db_password = "${var.db_password}" 317 | sec_grp_rds = "${module.sec_group_rds.sec_grp_rds}" 318 | } 319 | ``` 320 | 321 | Terraform modules will create 322 | 323 | * VPC 324 | * Subnets 325 | * Routes 326 | * IAM Roles for master and nodes 327 | * Security Groups "Firewall" to allow master and nodes to communicate 328 | * EKS cluster 329 | * Autoscaling Group will create nodes to be added to the cluster 330 | * Security group for RDS 331 | * RDS with PostgreSQL 332 | 333 | > NOTE: very important to keep tags as if tags is not specify nodes will not be able to join cluster 334 | 335 | ### Initial setup create and create new workspace for terraform 336 | 337 | cd into project folder and create workspace for dev and prod 338 | 339 | #### Initialize and pull terraform cloud specific dependencies 340 | 341 | ```sh 342 | terraform init 343 | ``` 344 | 345 | #### Create dev workspace 346 | 347 | ```sh 348 | terraform workspace new dev 349 | ``` 350 | 351 | #### List available workspace 352 | 353 | ```sh 354 | terraform workspace list 355 | ``` 356 | #### Select dev workspace 357 | 358 | ```sh 359 | terraform workspace select dev 360 | ``` 361 | 362 | Before we can start will need to update variables and add db password to terraform.tfvars 363 | 364 | ```sh 365 | echo 'db_password = "Your_DB_Passwd."' >> terraform.tfvars 366 | ``` 367 | 368 | #### It's a good idea to sync terraform modules 369 | 370 | ```sh 371 | terraform get -update 372 | ``` 373 | 374 | [![asciicast](https://asciinema.org/a/195796.png)](https://asciinema.org/a/195796) 375 | 376 | ### View terraform plan 377 | 378 | ```sh 379 | terraform plan 380 | ``` 381 | 382 | ### Apply terraform plan 383 | 384 | > NOTE: building complete infrastructure may take more than 10 minutes. 385 | 386 | ```sh 387 | terraform apply 388 | ``` 389 | 390 | [![asciicast](https://asciinema.org/a/195802.png)](https://asciinema.org/a/195802) 391 | 392 | ### Verify instance creation 393 | 394 | ```sh 395 | aws ec2 describe-instances --output table 396 | ``` 397 | 398 | ### We are not done yet 399 | 400 | #### Create new AWS CLI profile 401 | 402 | In order to use kubectl with EKS we need to set new AWS CLI profile 403 | 404 | > NOTE: will need to use secret and access keys from terraform.tfvars 405 | 406 | ```sh 407 | cat terraform.tfvars 408 | 409 | aws configure --profile terraform 410 | 411 | export AWS_PROFILE=terraform 412 | ``` 413 | 414 | #### Configure kubectl to allow us to connect to EKS cluster 415 | 416 | In terraform configuration we output configuration file for kubectl 417 | 418 | ```sh 419 | terraform output kubeconfig 420 | ``` 421 | 422 | #### Add output of "terraform output kubeconfig" to ~/.kube/config-devel 423 | 424 | ```sh 425 | terraform output kubeconfig > ~/.kube/config-devel 426 | 427 | export KUBECONFIG=$KUBECONFIG:~/.kube/config-devel 428 | ``` 429 | 430 | #### Verify kubectl connectivity 431 | 432 | ```sh 433 | kubectl get namespaces 434 | 435 | kubectl get services 436 | ``` 437 | 438 | #### Second part we need to allow EKS to add nodes by running configmap 439 | 440 | ```sh 441 | terraform output config_map_aws_auth > yaml/config_map_aws_auth.yaml 442 | 443 | kubectl apply -f yaml/config_map_aws_auth.yaml 444 | ``` 445 | 446 | #### Now you should be able to see nodes 447 | 448 | ```sh 449 | kubectl get nodes 450 | ``` 451 | 452 | [![asciicast](https://asciinema.org/a/195818.png)](https://asciinema.org/a/195818) 453 | 454 | ## Working with terraform on EKS 455 | 456 | ### Deploy the [Kubernetes Dashboard](https://github.com/kubernetes/dashboard) 457 | 458 | #### Deploy the Kubernetes dashboard 459 | 460 | ```sh 461 | kubectl apply -f \ 462 | https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml 463 | ``` 464 | 465 | #### Deploy heapster to enable container cluster monitoring and performance analysis on your cluster 466 | 467 | ```sh 468 | kubectl apply -f \ 469 | https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml 470 | ``` 471 | 472 | #### Deploy the influxdb backend for heapster to your cluster 473 | 474 | ```sh 475 | kubectl apply -f \ 476 | https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml 477 | ``` 478 | 479 | #### Create the heapster cluster role binding for the dashboard 480 | 481 | ```sh 482 | kubectl apply -f \ 483 | https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml 484 | ``` 485 | 486 | ### Create an eks-admin Service Account and Cluster Role Binding 487 | 488 | #### Apply the service account to your cluster 489 | 490 | ```sh 491 | kubectl apply -f yaml/eks-admin-service-account.yaml 492 | ``` 493 | 494 | #### Apply the cluster role binding to your cluster 495 | 496 | ```sh 497 | kubectl apply -f yaml/eks-admin-cluster-role-binding.yaml 498 | ``` 499 | 500 | ### Connect to the Dashboard 501 | 502 | ```sh 503 | kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}') 504 | 505 | kubectl proxy 506 | ``` 507 | 508 | > NOTE: Open the link with a web browser to access the dashboard endpoint: http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/ 509 | 510 | > NOTE: Choose Token and paste output from the previous command into the Token field 511 | 512 | [![asciicast](https://asciinema.org/a/195823.png)](https://asciinema.org/a/195823) 513 | 514 | ## Rolling back all changes 515 | 516 | ### Destroy all terraform created infrastructure 517 | 518 | ```sh 519 | terraform destroy -auto-approve 520 | ``` 521 | 522 | [![asciicast](https://asciinema.org/a/195827.png)](https://asciinema.org/a/195827) 523 | 524 | ### Removing S3 bucket, IAM roles and terraform account 525 | 526 | ```sh 527 | export AWS_PROFILE=default 528 | 529 | aws s3 rm s3://terra-state-bucket --recursive 530 | 531 | aws s3api put-bucket-versioning --bucket terra-state-bucket --versioning-configuration Status=Suspended 532 | 533 | aws s3api delete-objects --bucket terra-state-bucket --delete \ 534 | "$(aws s3api list-object-versions --bucket terra-state-bucket | \ 535 | jq '{Objects: [.Versions[] | {Key:.Key, VersionId : .VersionId}], Quiet: false}')" 536 | 537 | aws s3 rb s3://terra-state-bucket --force 538 | 539 | aws iam detach-user-policy --user-name terraform --policy-arn arn:aws:iam::aws:policy/AdministratorAccess 540 | 541 | aws iam list-access-keys --user-name terraform --query 'AccessKeyMetadata[*].{ID:AccessKeyId}' --output text 542 | 543 | aws iam delete-access-key --user-name terraform --access-key-id OUT_KEY 544 | 545 | aws iam delete-user --user-name terraform 546 | ``` -------------------------------------------------------------------------------- /backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "terra-state-bucket" 4 | key = "tfstate" 5 | region = "us-west-2" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /eks/eks_cluster/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_eks_cluster" "terra" { 2 | name = "${var.eks_cluster_name}-${terraform.workspace}" 3 | role_arn = "${var.iam_cluster_arn}" 4 | 5 | vpc_config { 6 | security_group_ids = ["${var.security_group_cluster}"] 7 | subnet_ids = ["${var.subnets}"] 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /eks/eks_cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | config_map_aws_auth = < $CA_CERTIFICATE_FILE_PATH 7 | INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) 8 | sed -i s,MASTER_ENDPOINT,${eks_endpoint},g /var/lib/kubelet/kubeconfig 9 | sed -i s,CLUSTER_NAME,${eks_cluster_name}-${workspace},g /var/lib/kubelet/kubeconfig 10 | sed -i s,REGION,${aws_region_current_name},g /etc/systemd/system/kubelet.service 11 | sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service 12 | sed -i s,MASTER_ENDPOINT,${eks_endpoint},g /etc/systemd/system/kubelet.service 13 | sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service 14 | DNS_CLUSTER_IP=10.100.0.10 15 | if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi 16 | sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service 17 | sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig 18 | sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service 19 | systemctl daemon-reload 20 | systemctl restart kubelet 21 | -------------------------------------------------------------------------------- /eks/eks_node/variables.tf: -------------------------------------------------------------------------------- 1 | variable "eks_cluster_name" { 2 | description = "cluster name" 3 | } 4 | 5 | variable "eks_certificate_authority" { 6 | description = "eks certificate authority" 7 | } 8 | 9 | variable "eks_endpoint" { 10 | description = "eks cluster endpoint" 11 | } 12 | 13 | variable "iam_instance_profile" { 14 | description = "eks instance profile name" 15 | } 16 | 17 | variable "security_group_node" { 18 | description = "eks security group name" 19 | } 20 | 21 | variable "subnets" { 22 | type = "list" 23 | } 24 | 25 | -------------------------------------------------------------------------------- /eks/eks_sec_group/main.tf: -------------------------------------------------------------------------------- 1 | # EKS Cluster Resources 2 | # EC2 Security Group to allow networking traffic with EKS cluster 3 | 4 | resource "aws_security_group" "cluster" { 5 | name = "terraform-eks-cluster" 6 | description = "Cluster communication with worker nodes" 7 | vpc_id = "${var.vpc_id}" 8 | 9 | egress { 10 | from_port = 0 11 | to_port = 0 12 | protocol = "-1" 13 | cidr_blocks = ["0.0.0.0/0"] 14 | } 15 | 16 | tags { 17 | Name = "${terraform.workspace}" 18 | } 19 | } 20 | 21 | resource "aws_security_group_rule" "cluster-ingress-node-https" { 22 | description = "Allow pods to communicate with the cluster API Server" 23 | from_port = 443 24 | protocol = "tcp" 25 | security_group_id = "${aws_security_group.cluster.id}" 26 | source_security_group_id = "${aws_security_group.node.id}" 27 | to_port = 443 28 | type = "ingress" 29 | } 30 | 31 | resource "aws_security_group_rule" "cluster-ingress-workstation-https" { 32 | cidr_blocks = ["0.0.0.0/0"] 33 | description = "Allow workstation to communicate with the cluster API Server" 34 | from_port = 443 35 | protocol = "tcp" 36 | security_group_id = "${aws_security_group.cluster.id}" 37 | to_port = 443 38 | type = "ingress" 39 | } 40 | 41 | # EKS Worker Nodes Resources 42 | # EC2 Security Group to allow networking traffic 43 | 44 | resource "aws_security_group" "node" { 45 | name = "terraform-eks-node" 46 | description = "Security group for all nodes in the cluster" 47 | vpc_id = "${var.vpc_id}" 48 | 49 | egress { 50 | from_port = 0 51 | to_port = 0 52 | protocol = "-1" 53 | cidr_blocks = ["0.0.0.0/0"] 54 | } 55 | 56 | tags = "${ 57 | map( 58 | "Name", "terraform-eks-${terraform.workspace}", 59 | "kubernetes.io/cluster/${var.eks_cluster_name}-${terraform.workspace}", "owned", 60 | ) 61 | }" 62 | } 63 | 64 | resource "aws_security_group_rule" "node-ingress-self" { 65 | description = "Allow node to communicate with each other" 66 | from_port = 0 67 | protocol = "-1" 68 | security_group_id = "${aws_security_group.node.id}" 69 | source_security_group_id = "${aws_security_group.node.id}" 70 | to_port = 65535 71 | type = "ingress" 72 | } 73 | 74 | resource "aws_security_group_rule" "node-ingress-cluster" { 75 | description = "Allow worker Kubelets and pods to receive communication from the cluster control plane" 76 | from_port = 1025 77 | protocol = "tcp" 78 | security_group_id = "${aws_security_group.node.id}" 79 | source_security_group_id = "${aws_security_group.cluster.id}" 80 | to_port = 65535 81 | type = "ingress" 82 | } 83 | -------------------------------------------------------------------------------- /eks/eks_sec_group/outputs.tf: -------------------------------------------------------------------------------- 1 | output "security_group_cluster" { 2 | value = "${aws_security_group.cluster.id}" 3 | } 4 | 5 | output "security_group_node" { 6 | value = "${aws_security_group.node.id}" 7 | } 8 | -------------------------------------------------------------------------------- /eks/eks_sec_group/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_id" {} 2 | 3 | variable "eks_cluster_name" {} 4 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # sPECIFy the provider and access details 2 | provider "aws" { 3 | access_key = "${var.access_key}" 4 | secret_key = "${var.secret_key}" 5 | region = "${var.aws_region}" 6 | } 7 | 8 | ## Network 9 | # Create VPC 10 | module "vpc" { 11 | source = "./network/vpc" 12 | eks_cluster_name = "${var.eks_cluster_name}" 13 | cidr_block = "${var.cidr_block}" 14 | } 15 | 16 | # Create Subnets 17 | module "subnets" { 18 | source = "./network/subnets" 19 | eks_cluster_name = "${var.eks_cluster_name}" 20 | vpc_id = "${module.vpc.vpc_id}" 21 | vpc_cidr_block = "${module.vpc.vpc_cidr_block}" 22 | } 23 | 24 | # Configure Routes 25 | module "route" { 26 | source = "./network/route" 27 | main_route_table_id = "${module.vpc.main_route_table_id}" 28 | gw_id = "${module.vpc.gw_id}" 29 | 30 | subnets = [ 31 | "${module.subnets.subnets}", 32 | ] 33 | } 34 | 35 | module "eks_iam_roles" { 36 | source = "./eks/eks_iam_roles" 37 | } 38 | 39 | module "eks_sec_group" { 40 | source = "./eks/eks_sec_group" 41 | eks_cluster_name = "${var.eks_cluster_name}" 42 | vpc_id = "${module.vpc.vpc_id}" 43 | } 44 | 45 | module "eks_cluster" { 46 | source = "./eks/eks_cluster" 47 | eks_cluster_name = "${var.eks_cluster_name}" 48 | iam_cluster_arn = "${module.eks_iam_roles.iam_cluster_arn}" 49 | iam_node_arn = "${module.eks_iam_roles.iam_node_arn}" 50 | 51 | subnets = [ 52 | "${module.subnets.subnets}", 53 | ] 54 | 55 | security_group_cluster = "${module.eks_sec_group.security_group_cluster}" 56 | } 57 | 58 | module "eks_node" { 59 | source = "./eks/eks_node" 60 | eks_cluster_name = "${var.eks_cluster_name}" 61 | eks_certificate_authority = "${module.eks_cluster.eks_certificate_authority}" 62 | eks_endpoint = "${module.eks_cluster.eks_endpoint}" 63 | iam_instance_profile = "${module.eks_iam_roles.iam_instance_profile}" 64 | security_group_node = "${module.eks_sec_group.security_group_node}" 65 | 66 | subnets = [ 67 | "${module.subnets.subnets}", 68 | ] 69 | } 70 | 71 | module "sec_group_rds" { 72 | source = "./network/sec_group" 73 | vpc_id = "${module.vpc.vpc_id}" 74 | vpc_cidr_block = "${module.vpc.vpc_cidr_block}" 75 | } 76 | 77 | 78 | module "rds" { 79 | source = "./rds" 80 | 81 | subnets = [ 82 | "${module.subnets.subnets}", 83 | ] 84 | 85 | sec_grp_rds = "${module.sec_group_rds.sec_grp_rds}" 86 | identifier = "${var.identifier}" 87 | storage_type = "${var.storage_type}" 88 | allocated_storage = "${var.allocated_storage}" 89 | db_engine = "${var.db_engine}" 90 | engine_version = "${var.engine_version}" 91 | instance_class = "${var.instance_class}" 92 | db_username = "${var.db_username}" 93 | db_password = "${var.db_password}" 94 | sec_grp_rds = "${module.sec_group_rds.sec_grp_rds}" 95 | } 96 | 97 | -------------------------------------------------------------------------------- /network/route/main.tf: -------------------------------------------------------------------------------- 1 | # Create new routing table without internet access for minin instances 2 | 3 | data "aws_availability_zones" "available" {} 4 | 5 | locals { 6 | network_count = "${length(data.aws_availability_zones.available.names)}" 7 | } 8 | 9 | resource "aws_default_route_table" "route" { 10 | default_route_table_id = "${var.main_route_table_id}" 11 | 12 | tags { 13 | Name = "${terraform.workspace}" 14 | } 15 | } 16 | 17 | resource "aws_route_table_association" "route_ass" { 18 | count = "${local.network_count}" 19 | subnet_id = "${element(var.subnets, count.index)}" 20 | route_table_id = "${var.main_route_table_id}" 21 | } 22 | 23 | # Internet access 24 | resource "aws_route" "route_internet" { 25 | route_table_id = "${var.main_route_table_id}" 26 | destination_cidr_block = "0.0.0.0/0" 27 | gateway_id = "${var.gw_id}" 28 | depends_on = ["aws_route_table_association.route_ass"] 29 | } 30 | -------------------------------------------------------------------------------- /network/route/outputs.tf: -------------------------------------------------------------------------------- 1 | output "route_id" { 2 | value = "${aws_default_route_table.route.id}" 3 | } 4 | -------------------------------------------------------------------------------- /network/route/variables.tf: -------------------------------------------------------------------------------- 1 | variable "main_route_table_id" {} 2 | 3 | variable "gw_id" {} 4 | 5 | variable "subnets" { 6 | type = "list" 7 | } 8 | -------------------------------------------------------------------------------- /network/sec_group/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "sec_grp_rds" { 2 | name = "rds" 3 | description = "rds securety group" 4 | vpc_id = "${var.vpc_id}" 5 | 6 | ingress { 7 | from_port = 5432 8 | to_port = 5432 9 | protocol = "tcp" 10 | cidr_blocks = ["0.0.0.0/0"] 11 | } 12 | 13 | /* ingress { 14 | from_port = 5432 15 | to_port = 5432 16 | protocol = "tcp" 17 | cidr_blocks = [ 18 | "${var.vpc_cidr_block}"] 19 | } */ 20 | 21 | egress { 22 | from_port = 5432 23 | to_port = 5432 24 | protocol = "tcp" 25 | cidr_blocks = ["0.0.0.0/0"] 26 | } 27 | 28 | /* egress { 29 | from_port = 5432 30 | to_port = 5432 31 | protocol = "tcp" 32 | cidr_blocks = [ 33 | "${var.vpc_cidr_block}"] 34 | } */ 35 | 36 | tags { 37 | Name = "${terraform.workspace}" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /network/sec_group/outputs.tf: -------------------------------------------------------------------------------- 1 | output "sec_grp_rds" { 2 | value = "${aws_security_group.sec_grp_rds.id}" 3 | } 4 | -------------------------------------------------------------------------------- /network/sec_group/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_cidr_block" { 2 | # type = "map" 3 | } 4 | 5 | variable "vpc_id" {} 6 | -------------------------------------------------------------------------------- /network/subnets/main.tf: -------------------------------------------------------------------------------- 1 | # Create a external facing subnets in us-west-2a AZ 2 | data "aws_availability_zones" "available" {} 3 | 4 | resource "aws_subnet" "subnet" { 5 | count = "${length(data.aws_availability_zones.available.names)}" 6 | cidr_block = "${cidrsubnet(var.vpc_cidr_block, 8, count.index)}" 7 | availability_zone = "${data.aws_availability_zones.available.names[count.index]}" 8 | vpc_id = "${var.vpc_id}" 9 | map_public_ip_on_launch = true 10 | 11 | tags = "${ 12 | map( 13 | "Name", "terraform-eks-${terraform.workspace}", 14 | "kubernetes.io/cluster/${var.eks_cluster_name}-${terraform.workspace}", "shared", 15 | ) 16 | }" 17 | } 18 | -------------------------------------------------------------------------------- /network/subnets/outputs.tf: -------------------------------------------------------------------------------- 1 | output "subnets" { 2 | value = ["${aws_subnet.subnet.*.id}"] 3 | } 4 | -------------------------------------------------------------------------------- /network/subnets/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_id" {} 2 | 3 | variable "vpc_cidr_block" {} 4 | 5 | variable "eks_cluster_name" {} 6 | -------------------------------------------------------------------------------- /network/vpc/main.tf: -------------------------------------------------------------------------------- 1 | # Create a VPC to launch build instances into 2 | resource "aws_vpc" "vpc_id" { 3 | cidr_block = "${var.cidr_block[terraform.workspace]}" 4 | enable_dns_hostnames = true 5 | enable_dns_support = true 6 | instance_tenancy = "default" 7 | 8 | lifecycle { 9 | create_before_destroy = true 10 | } 11 | 12 | tags = "${ 13 | map( 14 | "Name", "terraform-eks-${terraform.workspace}", 15 | "kubernetes.io/cluster/${var.eks_cluster_name}-${terraform.workspace}", "shared", 16 | ) 17 | }" 18 | } 19 | 20 | # Create an internet gateway to give our subnet access to the outside world 21 | resource "aws_internet_gateway" "gw_id" { 22 | vpc_id = "${aws_vpc.vpc_id.id}" 23 | 24 | tags { 25 | Name = "${terraform.workspace}" 26 | } 27 | } 28 | 29 | # Create dhcp option setup 30 | resource "aws_vpc_dhcp_options" "vpc_dhcp_id" { 31 | domain_name = "us-west-2.compute.internal" 32 | domain_name_servers = ["AmazonProvidedDNS"] 33 | 34 | tags { 35 | Name = "${terraform.workspace}" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /network/vpc/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = "${aws_vpc.vpc_id.id}" 3 | } 4 | 5 | output "vpc_cidr_block" { 6 | value = "${aws_vpc.vpc_id.cidr_block}" 7 | } 8 | 9 | output "gw_id" { 10 | value = "${aws_internet_gateway.gw_id.id}" 11 | } 12 | 13 | output "main_route_table_id" { 14 | value = "${aws_vpc.vpc_id.main_route_table_id}" 15 | } 16 | 17 | output "vpc_dhcp_id" { 18 | value = "${aws_vpc_dhcp_options.vpc_dhcp_id.id}" 19 | } 20 | -------------------------------------------------------------------------------- /network/vpc/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cidr_block" { 2 | type = "map" 3 | } 4 | 5 | variable "eks_cluster_name" {} 6 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = "${module.vpc.vpc_id}" 3 | } 4 | 5 | output "vpc_cidr_block" { 6 | value = "${module.vpc.vpc_cidr_block}" 7 | } 8 | 9 | output "gw_id" { 10 | value = "${module.vpc.gw_id}" 11 | } 12 | 13 | output "main_route_table_id" { 14 | value = "${module.vpc.main_route_table_id}" 15 | } 16 | 17 | output "vpc_dhcp_id" { 18 | value = "${module.vpc.vpc_dhcp_id}" 19 | } 20 | 21 | output "subnets" { 22 | value = ["${module.subnets.subnets}"] 23 | } 24 | 25 | output "route_id" { 26 | value = "${module.route.route_id}" 27 | } 28 | 29 | output "security_group_cluster" { 30 | value = "${module.eks_sec_group.security_group_cluster}" 31 | } 32 | 33 | output "security_group_node" { 34 | value = "${module.eks_sec_group.security_group_node}" 35 | } 36 | 37 | output "iam_instance_profile" { 38 | value = "${module.eks_iam_roles.iam_instance_profile}" 39 | } 40 | 41 | output "iam_cluster_arn" { 42 | value = "${module.eks_iam_roles.iam_cluster_arn}" 43 | } 44 | 45 | output "iam_node_arn" { 46 | value = "${module.eks_iam_roles.iam_node_arn}" 47 | } 48 | 49 | output "config_map_aws_auth" { 50 | value = "${module.eks_cluster.config_map_aws_auth}" 51 | } 52 | 53 | output "kubeconfig" { 54 | value = "${module.eks_cluster.kubeconfig}" 55 | } 56 | 57 | output "eks_certificate_authority" { 58 | value = "${module.eks_cluster.eks_certificate_authority}" 59 | } 60 | 61 | output "eks_endpoint" { 62 | value = "${module.eks_cluster.eks_endpoint}" 63 | } 64 | 65 | output "eks_cluster_name" { 66 | value = "${module.eks_cluster.eks_cluster_name}" 67 | } 68 | 69 | output "sec_grp_rds" { 70 | value = "${module.sec_group_rds.sec_grp_rds}" 71 | } 72 | 73 | output "db_subnet_group_id" { 74 | value = "${module.rds.db_subnet_group_id}" 75 | } 76 | 77 | output "db_subnet_group_arn" { 78 | value = "${module.rds.db_subnet_group_arn}" 79 | } 80 | 81 | output "db_instance_address" { 82 | value = "${module.rds.db_instance_address}" 83 | } 84 | 85 | output "db_instance_arn" { 86 | value = "${module.rds.db_instance_arn}" 87 | } 88 | 89 | output "db_instance_availability_zone" { 90 | value = "${module.rds.db_instance_availability_zone}" 91 | } 92 | 93 | output "db_instance_endpoint" { 94 | value = "${module.rds.db_instance_endpoint}" 95 | } 96 | 97 | output "db_instance_id" { 98 | value = "${module.rds.db_instance_id}" 99 | } 100 | 101 | output "db_instance_resource_id" { 102 | value = "${module.rds.db_instance_resource_id}" 103 | } 104 | 105 | output "db_instance_status" { 106 | value = "${module.rds.db_instance_status}" 107 | } 108 | 109 | output "db_instance_name" { 110 | value = "${module.rds.db_instance_name}" 111 | } 112 | 113 | output "db_instance_username" { 114 | value = "${module.rds.db_instance_username}" 115 | } 116 | 117 | output "db_instance_password" { 118 | value = "${module.rds.db_instance_password}" 119 | } 120 | 121 | output "db_instance_port" { 122 | value = "${module.rds.db_instance_port}" 123 | } 124 | 125 | -------------------------------------------------------------------------------- /rds/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | resource "aws_db_subnet_group" "db_sub_gr" { 4 | description = "terrafom db subnet group" 5 | name = "main_subnet_group" 6 | subnet_ids = ["${var.subnets}"] 7 | 8 | # subnet_ids = [ 9 | # "${var.api_dev_int_subnet_ids}"] 10 | tags { 11 | Name = "${terraform.workspace}" 12 | } 13 | } 14 | 15 | resource "aws_db_instance" "db" { 16 | identifier = "${var.identifier}" 17 | storage_type = "${var.storage_type}" 18 | allocated_storage = "${var.allocated_storage[terraform.workspace]}" 19 | engine = "${var.db_engine}" 20 | engine_version = "${var.engine_version}" 21 | instance_class = "${var.instance_class[terraform.workspace]}" 22 | name = "${terraform.workspace}" 23 | username = "${var.db_username}" 24 | password = "${var.db_password}" 25 | 26 | vpc_security_group_ids = [ 27 | "${var.sec_grp_rds}", 28 | ] 29 | 30 | db_subnet_group_name = "${aws_db_subnet_group.db_sub_gr.id}" 31 | storage_encrypted = false 32 | skip_final_snapshot = true 33 | publicly_accessible = false 34 | multi_az = false 35 | 36 | tags { 37 | Name = "${terraform.workspace}" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /rds/outputs.tf: -------------------------------------------------------------------------------- 1 | output "db_subnet_group_id" { 2 | description = "The db subnet group name" 3 | value = "${element(concat(aws_db_subnet_group.db_sub_gr.*.id, list("")), 0)}" 4 | } 5 | 6 | output "db_subnet_group_arn" { 7 | description = "The ARN of the db subnet group" 8 | value = "${element(concat(aws_db_subnet_group.db_sub_gr.*.arn, list("")), 0)}" 9 | } 10 | 11 | /* 12 | output "db_instance_id" { 13 | value = "${aws_db_instance.db.id}" 14 | } 15 | 16 | output "db_instance_address" { 17 | value = "${aws_db_instance.db.address}" 18 | } 19 | */ 20 | locals { 21 | db_instance_address = "${element(concat(coalescelist(aws_db_instance.db.*.address, aws_db_instance.this.*.address), list("")), 0)}" 22 | db_instance_arn = "${element(concat(coalescelist(aws_db_instance.db.*.arn, aws_db_instance.this.*.arn), list("")), 0)}" 23 | db_instance_availability_zone = "${element(concat(coalescelist(aws_db_instance.db.*.availability_zone, aws_db_instance.this.*.availability_zone), list("")), 0)}" 24 | db_instance_endpoint = "${element(concat(coalescelist(aws_db_instance.db.*.endpoint, aws_db_instance.this.*.endpoint), list("")), 0)}" 25 | db_instance_hosted_zone_id = "${element(concat(coalescelist(aws_db_instance.db.*.hosted_zone_id, aws_db_instance.this.*.hosted_zone_id), list("")), 0)}" 26 | db_instance_id = "${element(concat(coalescelist(aws_db_instance.db.*.id, aws_db_instance.this.*.id), list("")), 0)}" 27 | db_instance_resource_id = "${element(concat(coalescelist(aws_db_instance.db.*.resource_id, aws_db_instance.this.*.resource_id), list("")), 0)}" 28 | db_instance_status = "${element(concat(coalescelist(aws_db_instance.db.*.status, aws_db_instance.this.*.status), list("")), 0)}" 29 | db_instance_name = "${element(concat(coalescelist(aws_db_instance.db.*.name, aws_db_instance.this.*.name), list("")), 0)}" 30 | db_instance_username = "${element(concat(coalescelist(aws_db_instance.db.*.username, aws_db_instance.this.*.username), list("")), 0)}" 31 | db_instance_password = "${element(concat(coalescelist(aws_db_instance.db.*.password, aws_db_instance.this.*.password), list("")), 0)}" 32 | db_instance_port = "${element(concat(coalescelist(aws_db_instance.db.*.port, aws_db_instance.this.*.port), list("")), 0)}" 33 | } 34 | 35 | output "db_instance_address" { 36 | description = "The address of the RDS instance" 37 | value = "${local.db_instance_address}" 38 | } 39 | 40 | output "db_instance_arn" { 41 | description = "The ARN of the RDS instance" 42 | value = "${local.db_instance_arn}" 43 | } 44 | 45 | output "db_instance_availability_zone" { 46 | description = "The availability zone of the RDS instance" 47 | value = "${local.db_instance_availability_zone}" 48 | } 49 | 50 | output "db_instance_endpoint" { 51 | description = "The connection endpoint" 52 | value = "${local.db_instance_endpoint}" 53 | } 54 | 55 | output "db_instance_hosted_zone_id" { 56 | description = "The canonical hosted zone ID of the DB instance (to be used in a Route 53 Alias record)" 57 | value = "${local.db_instance_hosted_zone_id}" 58 | } 59 | 60 | output "db_instance_id" { 61 | description = "The RDS instance ID" 62 | value = "${local.db_instance_id}" 63 | } 64 | 65 | output "db_instance_resource_id" { 66 | description = "The RDS Resource ID of this instance" 67 | value = "${local.db_instance_resource_id}" 68 | } 69 | 70 | output "db_instance_status" { 71 | description = "The RDS instance status" 72 | value = "${local.db_instance_status}" 73 | } 74 | 75 | output "db_instance_name" { 76 | description = "The database name" 77 | value = "${local.db_instance_name}" 78 | } 79 | 80 | output "db_instance_username" { 81 | description = "The master username for the database" 82 | value = "${local.db_instance_username}" 83 | } 84 | 85 | output "db_instance_password" { 86 | description = "The database password (this password may be old, because Terraform doesn't track it after initial creation)" 87 | value = "${local.db_instance_password}" 88 | } 89 | 90 | output "db_instance_port" { 91 | description = "The database port" 92 | value = "${local.db_instance_port}" 93 | } 94 | -------------------------------------------------------------------------------- /rds/variables.tf: -------------------------------------------------------------------------------- 1 | variable "subnets" { 2 | type = "list" 3 | } 4 | 5 | variable "identifier" {} 6 | 7 | variable "storage_type" {} 8 | 9 | variable "allocated_storage" { 10 | type = "map" 11 | } 12 | 13 | variable "db_engine" {} 14 | 15 | variable "engine_version" {} 16 | 17 | variable "instance_class" { 18 | type = "map" 19 | } 20 | 21 | variable "db_username" {} 22 | 23 | variable "db_password" {} 24 | 25 | variable "sec_grp_rds" {} 26 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "access_key" { 2 | description = "AWS ACCEE_KEY" 3 | } 4 | 5 | variable "secret_key" { 6 | description = "AWS SECRETE_KEY" 7 | } 8 | 9 | variable "aws_region" { 10 | description = "AWS region to launch servers." 11 | default = "us-west-2" 12 | } 13 | 14 | variable "cidr_block" { 15 | description = "CIDR for the whole VPC" 16 | 17 | default = { 18 | prod = "10.10.0.0/16" 19 | dev = "10.20.0.0/16" 20 | } 21 | } 22 | 23 | variable "eks_cluster_name" { 24 | description = "cluster name" 25 | default = "terra" 26 | } 27 | 28 | variable "identifier" { 29 | description = "Identifier for DB" 30 | default = "terra-db" 31 | } 32 | 33 | variable "storage_type" { 34 | description = "Type of the storage ssd or magnetic" 35 | default = "gp2" 36 | } 37 | 38 | variable "allocated_storage" { 39 | description = "ammount of storage allocated in GB" 40 | 41 | default = { 42 | prod = "100" 43 | dev = "10" 44 | } 45 | } 46 | 47 | variable "db_engine" { 48 | description = " DB engine" 49 | default = "postgres" 50 | } 51 | 52 | variable "engine_version" { 53 | description = "DB engine version" 54 | default = "9.6.6" 55 | } 56 | 57 | variable "instance_class" { 58 | description = "mashine type to be used" 59 | 60 | default = { 61 | prod = "db.t2.micro" 62 | dev = "db.t2.large" 63 | } 64 | } 65 | 66 | variable "db_username" { 67 | description = "db admin user" 68 | default = "root" 69 | } 70 | 71 | variable "db_password" { 72 | description = "password, provide through your tfvars file" 73 | } 74 | -------------------------------------------------------------------------------- /yaml/eks-admin-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: eks-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: eks-admin 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /yaml/eks-admin-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: eks-admin 5 | namespace: kube-system 6 | --------------------------------------------------------------------------------