├── .github ├── CODEOWNERS ├── auto-merge.yml └── stale.yml ├── .gitignore ├── README.md ├── acm ├── 00-variable.tf ├── 03-locals.tf ├── 05-provider.tf ├── 52-acm.tf ├── 53-route53.tf └── 99-output.tf ├── bastion ├── 00-variable.tf ├── 02-data.tf ├── 04-backend.tf ├── 05-provider.tf ├── 20-main.tf ├── 99-output.tf └── template │ └── setup.sh ├── eks-charts ├── 00-variable.tf ├── 00-variable.tf.json ├── 02-data.tf ├── 03-locals.tf ├── 04-backend.tf ├── 05-provider.tf ├── 30-kube-system.tf ├── 33-kube-ingress.tf ├── 37-monitor.tf ├── 38-logging.tf ├── 39-keycloak.tf ├── 42-istio.tf ├── 53-argo-cd.tf ├── 55-devops.tf ├── 58-jenkins.tf ├── 92-default.tf ├── 92-role-binding.tf ├── 95-demo-dev.tf ├── 95-demo-prod.tf ├── bump.py ├── template │ ├── jenkins-env.groovy │ ├── keycloak-realm.json │ └── kube-config.yaml └── values │ ├── argo │ ├── argo-cd.yaml │ ├── argo-events.yaml │ ├── argo-gatekeeper.yaml │ ├── argo-rollouts.yaml │ ├── argo.yaml │ └── http-benchmark.yaml │ ├── consul │ ├── consul-gatekeeper.yaml │ └── consul.yaml │ ├── default │ └── cluster-overprovisioner.yaml │ ├── devops │ ├── archiva.yaml │ ├── chartmuseum.yaml │ ├── docker-registry.yaml │ ├── harbor.yaml │ ├── sonarqube.yaml │ └── sonatype-nexus.yaml │ ├── istio │ ├── kiali-gatekeeper.yaml │ └── tracing-gatekeeper.yaml │ ├── jenkins │ ├── jenkins.yaml │ └── secret │ │ ├── jenkins.txt │ │ └── secret.txt │ ├── keycloak │ └── keycloak.yaml │ ├── kube-ingress │ ├── cert-manager-issuers.yaml │ ├── cert-manager.yaml │ ├── external-dns.yaml │ └── nginx-ingress.yaml │ ├── kube-system │ ├── cluster-autoscaler.yaml │ ├── efs-provisioner.yaml │ ├── k8s-spot-termination-handler.yaml │ ├── kube-state-metrics.yaml │ ├── kube2iam.yaml │ └── metrics-server.yaml │ ├── logging │ └── fluentd-elasticsearch.yaml │ ├── monitor │ ├── datadog.yaml │ ├── grafana.yaml │ ├── prometheus-adapter.yaml │ ├── prometheus-alert-rules.yaml │ ├── prometheus-operator.yaml │ └── prometheus.yaml │ └── weave │ ├── weave-scope-gatekeeper.yaml │ └── weave-scope.yaml ├── eks ├── 00-variable.tf ├── 02-data.tf ├── 03-locals.tf ├── 04-backend.tf ├── 05-provider.tf ├── 30-eks.tf ├── 50-worker.tf ├── 70-acm.tf ├── 70-buckets.tf ├── 70-efs.tf └── 99-output.tf ├── instance └── main.tf ├── lambda ├── 00-variable.tf ├── 02-data.tf ├── 04-backend.tf ├── 05-provider.tf ├── 20-api-gateway.tf ├── 33-iam-role.tf ├── 35-lambda.tf ├── 52-acm.tf ├── 53-route53.tf ├── 99-output.tf ├── package │ └── lambda.zip └── src │ ├── index.js │ └── package.json ├── replace.sh └── vpc ├── 00-variable.tf ├── 04-backend.tf ├── 05-provider.tf ├── 20-main.tf ├── 99-output.tf └── graph.svg /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @nalbam 2 | -------------------------------------------------------------------------------- /.github/auto-merge.yml: -------------------------------------------------------------------------------- 1 | # Configuration for probot-auto-merge - https://github.com/bobvanderlinden/probot-auto-merge 2 | 3 | updateBranch: true 4 | deleteBranchAfterMerge: true 5 | reportStatus: true 6 | 7 | minApprovals: 8 | COLLABORATOR: 0 9 | maxRequestedChanges: 10 | NONE: 0 11 | blockingLabels: 12 | - blocked 13 | 14 | # Will merge whenever the above conditions are met, but also 15 | # the owner has approved or merge label was added. 16 | rules: 17 | - minApprovals: 18 | OWNER: 1 19 | - requiredLabels: 20 | - merge 21 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 60 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 7 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - pinned 8 | - security 9 | # Label to use when marking an issue as stale 10 | staleLabel: wontfix 11 | # Comment to post when marking an issue as stale. Set to `false` to disable 12 | markComment: > 13 | This issue has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions. 16 | # Comment to post when closing a stale issue. Set to `false` to disable 17 | closeComment: false 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Windows image file caches 2 | Thumbs.db 3 | ehthumbs.db 4 | 5 | # Folder config file 6 | Desktop.ini 7 | 8 | # Recycle Bin used on file shares 9 | $RECYCLE.BIN/ 10 | 11 | # Windows shortcuts 12 | *.lnk 13 | 14 | # Mac 15 | .DS_Store 16 | 17 | # Terraform 18 | .terraform/ 19 | .terraform.tfstate.lock.info 20 | terraform.tfstate.backup 21 | terraform.tfstate 22 | terraform.tfplan 23 | 24 | # Maven 25 | target/ 26 | build/ 27 | 28 | # JetBrains 29 | .idea/ 30 | *.iml 31 | 32 | # Temp 33 | .output/ 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # terraform-env-workshop 2 | 3 | ## clone 4 | 5 | ```bash 6 | git clone https://github.com/mzcdev/terraform-env-workshop 7 | 8 | cd terraform-env-workshop 9 | ``` 10 | 11 | ## aws credentials 12 | 13 | ```bash 14 | aws configure 15 | ``` 16 | 17 | ## setup 18 | 19 | ```bash 20 | # for ingress 21 | export ROOT_DOMAIN="" # nalbam.com 22 | export BASE_DOMAIN="" # demo.nalbam.com 23 | 24 | # for keycloak, jenkins, grafana, argo-cd 25 | export ADMIN_USERNAME="me@nalbam.com" 26 | export ADMIN_PASSWORD="Kw7sM9oEE02fA6YiA55EqVpa" 27 | 28 | # for keycloak 29 | # https://console.cloud.google.com/ : API 및 인증정보 > 사용자 인증 정보 > OAuth 2.0 클라이언트 ID 30 | # 승인된 리디렉션 URI : https://keycloak.${BASE_DOMAIN}/auth/realms/demo/broker/google/endpoint 31 | export GOOGLE_CLIENT_ID="GOOGLE_CLIENT_ID" 32 | export GOOGLE_CLIENT_SECRET="GOOGLE_CLIENT_SECRET" 33 | 34 | # for jenkins, alertmanager 35 | export SLACK_TOKEN="SLACK_TOKEN" 36 | 37 | # replace 38 | # create s3 bucket 39 | # create dynamodb table 40 | ./replace.sh 41 | ``` 42 | 43 | ## usage 44 | 45 | ### vpc 46 | 47 | ```bash 48 | cd ./vpc 49 | 50 | terraform init 51 | terraform plan 52 | terraform apply 53 | ``` 54 | 55 | ### bastion 56 | 57 | ```bash 58 | cd ./bastion 59 | 60 | terraform init 61 | terraform plan 62 | terraform apply 63 | ``` 64 | 65 | ### eks 66 | 67 | ```bash 68 | cd ./eks 69 | 70 | ./replace.py 71 | 72 | terraform init 73 | terraform plan 74 | terraform apply 75 | 76 | kubectl get no 77 | kubectl get ns 78 | kubectl get pod --all-namespaces 79 | kubectl get ing --all-namespaces 80 | ``` 81 | 82 | ### eks-charts 83 | 84 | ```bash 85 | cd ./eks-charts 86 | 87 | terraform init 88 | terraform plan 89 | terraform apply 90 | 91 | kubectl get no 92 | kubectl get ns 93 | kubectl get pod --all-namespaces 94 | kubectl get ing --all-namespaces 95 | ``` 96 | 97 | ### lambda api 98 | 99 | ```bash 100 | cd ./lambda 101 | 102 | terraform init 103 | terraform plan 104 | terraform apply 105 | 106 | curl -sL -X POST -d "{\"data\":\"ok\"}" ${invoke_url}/demo | jq . 107 | ``` 108 | -------------------------------------------------------------------------------- /acm/00-variable.tf: -------------------------------------------------------------------------------- 1 | 2 | variable "region" { 3 | description = "생성될 리전." 4 | type = string 5 | default = "us-east-1" 6 | } 7 | 8 | variable "root_domain" { 9 | description = "Route53 에 등록된 루트 도메인 명" 10 | type = string 11 | default = "mzdev.be" 12 | } 13 | 14 | variable "domain_name" { 15 | description = "ACM 인증서를 생성할 도메인 명" 16 | type = string 17 | default = "demo-api-workshop.mzdev.be" 18 | } 19 | 20 | variable "acm_certificate" { 21 | description = "ACM 인증서 생성 여부" 22 | type = bool 23 | default = false 24 | } 25 | -------------------------------------------------------------------------------- /acm/03-locals.tf: -------------------------------------------------------------------------------- 1 | 2 | locals { 3 | domain_name = var.domain_name == "" ? var.root_domain : var.domain_name 4 | } 5 | 6 | locals { 7 | certificate_id = var.acm_certificate ? element(concat(aws_acm_certificate.cert.*.id, [""]), 0) : element(concat(data.aws_acm_certificate.this.*.id, [""]), 0) 8 | certificate_arn = var.acm_certificate ? element(concat(aws_acm_certificate.cert.*.arn, [""]), 0) : element(concat(data.aws_acm_certificate.this.*.arn, [""]), 0) 9 | } 10 | -------------------------------------------------------------------------------- /acm/05-provider.tf: -------------------------------------------------------------------------------- 1 | 2 | provider "aws" { 3 | region = var.region 4 | } 5 | -------------------------------------------------------------------------------- /acm/52-acm.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_acm_certificate" "cert" { 3 | count = var.acm_certificate ? 1 : 0 4 | 5 | domain_name = local.domain_name 6 | 7 | subject_alternative_names = [ 8 | "*.${local.domain_name}" 9 | ] 10 | 11 | validation_method = "DNS" 12 | } 13 | 14 | resource "aws_route53_record" "cert" { 15 | count = var.acm_certificate ? 1 : 0 16 | 17 | zone_id = data.aws_route53_zone.this.id 18 | 19 | name = aws_acm_certificate.cert[0].domain_validation_options[0].resource_record_name 20 | type = aws_acm_certificate.cert[0].domain_validation_options[0].resource_record_type 21 | ttl = 60 22 | 23 | records = [ 24 | aws_acm_certificate.cert[0].domain_validation_options[0].resource_record_value, 25 | ] 26 | } 27 | 28 | resource "aws_acm_certificate_validation" "cert" { 29 | count = var.acm_certificate ? 1 : 0 30 | 31 | certificate_arn = aws_acm_certificate.cert[0].arn 32 | 33 | validation_record_fqdns = [ 34 | aws_route53_record.cert[0].fqdn, 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /acm/53-route53.tf: -------------------------------------------------------------------------------- 1 | 2 | data "aws_route53_zone" "this" { 3 | name = var.root_domain 4 | } 5 | 6 | data "aws_acm_certificate" "this" { 7 | count = var.acm_certificate ? 0 : 1 8 | 9 | domain = local.domain_name 10 | statuses = [ 11 | "ISSUED", 12 | ] 13 | 14 | most_recent = true 15 | } 16 | -------------------------------------------------------------------------------- /acm/99-output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "zone_id" { 3 | value = data.aws_route53_zone.this.id 4 | } 5 | 6 | output "name" { 7 | value = data.aws_route53_zone.this.name 8 | } 9 | 10 | output "certificate_id" { 11 | value = local.certificate_id 12 | } 13 | 14 | output "certificate_arn" { 15 | value = local.certificate_arn 16 | } 17 | -------------------------------------------------------------------------------- /bastion/00-variable.tf: -------------------------------------------------------------------------------- 1 | # variable 2 | 3 | variable "region" { 4 | default = "ap-northeast-2" 5 | } 6 | 7 | variable "name" { 8 | default = "dev-bastion" 9 | } 10 | 11 | variable "administrator" { 12 | default = true 13 | } 14 | 15 | variable "allow_ip_address" { 16 | type = list(string) 17 | default = [ 18 | "0.0.0.0/0", 19 | # "106.244.127.8/32", # echo "$(curl -sL icanhazip.com)/32" 20 | ] 21 | } 22 | 23 | variable "key_name" { 24 | default = "nalbam-seoul" 25 | } 26 | -------------------------------------------------------------------------------- /bastion/02-data.tf: -------------------------------------------------------------------------------- 1 | # data 2 | 3 | data "aws_caller_identity" "current" { 4 | } 5 | 6 | data "template_file" "setup" { 7 | template = file("template/setup.sh") 8 | 9 | vars = { 10 | HOSTNAME = var.name 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /bastion/04-backend.tf: -------------------------------------------------------------------------------- 1 | # backend 2 | 3 | terraform { 4 | backend "s3" { 5 | region = "ap-northeast-2" 6 | bucket = "terraform-workshop-seoul" 7 | key = "bastion.tfstate" 8 | dynamodb_table = "terraform-workshop-seoul" 9 | encrypt = true 10 | } 11 | required_version = ">= 0.12" 12 | } 13 | 14 | # terraform { 15 | # backend "remote" { 16 | # organization = "workshop" 17 | # workspaces { 18 | # name = "dev-bastion" 19 | # } 20 | # } 21 | # } 22 | 23 | data "terraform_remote_state" "vpc" { 24 | backend = "s3" 25 | config = { 26 | region = "ap-northeast-2" 27 | bucket = "terraform-workshop-seoul" 28 | key = "vpc-demo.tfstate" 29 | } 30 | } 31 | 32 | # data "terraform_remote_state" "vpc" { 33 | # backend = "remote" 34 | # config = { 35 | # organization = "workshop" 36 | # workspaces = { 37 | # name = "dev-vpc-demo" 38 | # } 39 | # } 40 | # } 41 | -------------------------------------------------------------------------------- /bastion/05-provider.tf: -------------------------------------------------------------------------------- 1 | # provider 2 | 3 | provider "aws" { 4 | region = var.region 5 | } 6 | -------------------------------------------------------------------------------- /bastion/20-main.tf: -------------------------------------------------------------------------------- 1 | # bastion 2 | 3 | module "bastion" { 4 | source = "github.com/mzcdev/terraform-aws-bastion?ref=v0.12.16" 5 | 6 | name = var.name 7 | 8 | vpc_id = data.terraform_remote_state.vpc.outputs.vpc_id 9 | 10 | subnet_id = data.terraform_remote_state.vpc.outputs.public_subnet_ids[0] 11 | 12 | administrator = var.administrator 13 | 14 | allow_ip_address = var.allow_ip_address 15 | 16 | user_data = data.template_file.setup.rendered 17 | 18 | key_name = var.key_name 19 | } 20 | -------------------------------------------------------------------------------- /bastion/99-output.tf: -------------------------------------------------------------------------------- 1 | # output 2 | 3 | output "id" { 4 | value = module.bastion.id 5 | } 6 | 7 | output "key_name" { 8 | value = module.bastion.key_name 9 | } 10 | 11 | output "private_ip" { 12 | value = module.bastion.private_ip 13 | } 14 | 15 | output "public_ip" { 16 | value = module.bastion.public_ip 17 | } 18 | 19 | output "security_group_id" { 20 | value = module.bastion.security_group_id 21 | } 22 | -------------------------------------------------------------------------------- /bastion/template/setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Log everything we do. 4 | set -x 5 | exec > /var/log/user-data.log 2>&1 6 | 7 | hostname "${HOSTNAME}" 8 | 9 | rm -rf /etc/motd 10 | cat < /etc/motd 11 | 12 | ######################################################### 13 | # # 14 | # 모든 로그는 원격지 로그 서버에 저장되고 있습니다. # 15 | # 비인가자의 경우 접속을 해지하여 주시기 바랍니다. # 16 | # # 17 | ######################################################### 18 | 19 | >> ${HOSTNAME} << 20 | 21 | EOF 22 | 23 | runuser -l ec2-user -c "curl -sL https://raw.githubusercontent.com/opspresso/toaster/master/tools.sh | bash" 24 | runuser -l ec2-user -c "curl -sL https://raw.githubusercontent.com/opspresso/toaster/master/install.sh | bash" 25 | -------------------------------------------------------------------------------- /eks-charts/00-variable.tf: -------------------------------------------------------------------------------- 1 | # variable 2 | 3 | variable "region" { 4 | description = "생성될 리전을 입력 합니다. e.g: ap-northeast-2" 5 | default = "ap-northeast-2" 6 | } 7 | 8 | variable "cluster_name" { 9 | description = "EKS Cluster 이름을 입력합니다. e.g: eks-demo" 10 | default = "eks-demo" 11 | } 12 | 13 | variable "cluster_role" { 14 | description = "EKS Cluster 롤을 입력합니다. e.g: dev, stg, prod, devops" 15 | default = "devops" 16 | } 17 | 18 | variable "admin_username" { 19 | default = "ADMIN_USERNAME" 20 | } 21 | 22 | variable "admin_password" { 23 | default = "ADMIN_PASSWORD" 24 | } 25 | 26 | variable "root_domain" { 27 | default = "mzdev.be" 28 | } 29 | 30 | variable "base_domain" { 31 | default = "demo.mzdev.be" 32 | } 33 | 34 | variable "slack_token" { 35 | default = "SLACK_TOKEN" 36 | } 37 | 38 | # variable "google_client_id" { 39 | # default = "REPLACEME.apps.googleusercontent.com" 40 | # } 41 | 42 | # variable "google_client_secret" { 43 | # default = "REPLACEME" 44 | # } 45 | 46 | # variable "datadog_api_key" { 47 | # default = "REPLACEME" 48 | # } 49 | 50 | # variable "datadog_app_key" { 51 | # default = "REPLACEME" 52 | # } 53 | 54 | variable "jenkins_enabled" { 55 | default = true 56 | } 57 | 58 | variable "chartmuseum_enabled" { 59 | default = true 60 | } 61 | 62 | variable "registry_enabled" { 63 | default = true 64 | } 65 | 66 | variable "harbor_enabled" { 67 | default = false 68 | } 69 | 70 | variable "archiva_enabled" { 71 | default = false 72 | } 73 | 74 | variable "nexus_enabled" { 75 | default = false 76 | } 77 | 78 | variable "sonarqube_enabled" { 79 | default = false 80 | } 81 | -------------------------------------------------------------------------------- /eks-charts/00-variable.tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "variable": { 3 | "argo_argo": { 4 | "default": "0.9.8", 5 | "description": "argo/argo" 6 | }, 7 | "argo_argo_cd": { 8 | "default": "2.5.0", 9 | "description": "argo/argo-cd" 10 | }, 11 | "argo_argo_events": { 12 | "default": "0.14.0", 13 | "description": "argo/argo-events" 14 | }, 15 | "argo_argo_rollouts": { 16 | "default": "0.3.0", 17 | "description": "argo/argo-rollouts" 18 | }, 19 | "bitnami_external_dns": { 20 | "default": "3.2.3", 21 | "description": "bitnami/external-dns" 22 | }, 23 | "codecentric_keycloak": { 24 | "default": "8.2.2", 25 | "description": "codecentric/keycloak" 26 | }, 27 | "gabibbo97_keycloak_gatekeeper": { 28 | "default": "3.3.1", 29 | "description": "gabibbo97/keycloak-gatekeeper" 30 | }, 31 | "harbor_harbor": { 32 | "default": "1.4.0", 33 | "description": "harbor/harbor" 34 | }, 35 | "jetstack_cert_manager": { 36 | "default": "v0.15.1", 37 | "description": "jetstack/cert-manager" 38 | }, 39 | "kiwigrid_fluentd_elasticsearch": { 40 | "default": "9.4.0", 41 | "description": "kiwigrid/fluentd-elasticsearch" 42 | }, 43 | "oteemo_sonarqube": { 44 | "default": "6.4.2", 45 | "description": "oteemo/sonarqube" 46 | }, 47 | "oteemo_sonatype_nexus": { 48 | "default": "2.3.0", 49 | "description": "oteemo/sonatype-nexus" 50 | }, 51 | "stable_chartmuseum": { 52 | "default": "2.13.0", 53 | "description": "stable/chartmuseum" 54 | }, 55 | "stable_cluster_autoscaler": { 56 | "default": "7.3.2", 57 | "description": "stable/cluster-autoscaler" 58 | }, 59 | "stable_cluster_overprovisioner": { 60 | "default": "0.4.0", 61 | "description": "stable/cluster-overprovisioner" 62 | }, 63 | "stable_datadog": { 64 | "default": "2.3.18", 65 | "description": "stable/datadog" 66 | }, 67 | "stable_docker_registry": { 68 | "default": "1.9.3", 69 | "description": "stable/docker-registry" 70 | }, 71 | "stable_efs_provisioner": { 72 | "default": "0.12.0", 73 | "description": "stable/efs-provisioner" 74 | }, 75 | "stable_grafana": { 76 | "default": "5.3.0", 77 | "description": "stable/grafana" 78 | }, 79 | "stable_jenkins": { 80 | "default": "2.1.0", 81 | "description": "stable/jenkins" 82 | }, 83 | "stable_k8s_spot_termination_handler": { 84 | "default": "1.4.9", 85 | "description": "stable/k8s-spot-termination-handler" 86 | }, 87 | "stable_kube2iam": { 88 | "default": "2.5.0", 89 | "description": "stable/kube2iam" 90 | }, 91 | "stable_kube_state_metrics": { 92 | "default": "2.8.11", 93 | "description": "stable/kube-state-metrics" 94 | }, 95 | "stable_metrics_server": { 96 | "default": "2.11.1", 97 | "description": "stable/metrics-server" 98 | }, 99 | "stable_nginx_ingress": { 100 | "default": "1.40.2", 101 | "description": "stable/nginx-ingress" 102 | }, 103 | "stable_prometheus_adapter": { 104 | "default": "2.4.0", 105 | "description": "stable/prometheus-adapter" 106 | }, 107 | "stable_prometheus_operator": { 108 | "default": "8.15.6", 109 | "description": "stable/prometheus-operator" 110 | }, 111 | "stable_weave_scope": { 112 | "default": "1.1.10", 113 | "description": "stable/weave-scope" 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /eks-charts/02-data.tf: -------------------------------------------------------------------------------- 1 | # data 2 | 3 | data "aws_caller_identity" "current" { 4 | } 5 | 6 | data "aws_eks_cluster" "cluster" { 7 | name = var.cluster_name 8 | } 9 | 10 | data "aws_eks_cluster_auth" "cluster" { 11 | name = data.aws_eks_cluster.cluster.name 12 | } 13 | 14 | data "template_file" "jenkins-env" { 15 | template = file("${path.module}/template/jenkins-env.groovy") 16 | vars = { 17 | cluster = var.cluster_name 18 | role = var.cluster_role 19 | base_domain = var.base_domain 20 | slack_token = var.slack_token 21 | jenkins = local.domain.jenkins 22 | harbor = local.domain.harbor 23 | archiva = local.domain.archiva 24 | chartmuseum = local.domain.chartmuseum 25 | registry = local.domain.registry 26 | nexus = local.domain.nexus 27 | sonarqube = local.domain.sonarqube 28 | } 29 | } 30 | 31 | # data "template_file" "keycloak-realm" { 32 | # template = file("${path.module}/template/keycloak-realm.json") 33 | # vars = { 34 | # google_client_id = var.google_client_id 35 | # google_client_secret = var.google_client_secret 36 | # } 37 | # } 38 | 39 | # data "template_file" "kube-config" { 40 | # template = file("${path.module}/template/kube-config.yaml") 41 | # vars = { 42 | # cluster_url = data.aws_eks_cluster.cluster.endpoint 43 | # cluster_auth_data = data.aws_eks_cluster.cluster.certificate_authority.0.data 44 | # cluster_token = data.aws_eks_cluster_auth.cluster.token 45 | # } 46 | # } 47 | -------------------------------------------------------------------------------- /eks-charts/03-locals.tf: -------------------------------------------------------------------------------- 1 | # locals 2 | 3 | locals { 4 | account_id = data.aws_caller_identity.current.account_id 5 | } 6 | 7 | locals { 8 | acm_arn = element(concat(data.terraform_remote_state.eks.outputs.acm_arn, [""]), 0) 9 | acm_root = data.terraform_remote_state.eks.outputs.acm_root 10 | acm_base = data.terraform_remote_state.eks.outputs.acm_base 11 | 12 | host_name = local.acm_arn != "" ? "*.${local.acm_base}" : "*.${var.base_domain}" 13 | 14 | efs_id = element(concat(data.terraform_remote_state.eks.outputs.efs_ids, [""]), 0) 15 | 16 | storage_class = local.efs_id == "" ? "default" : "efs" 17 | 18 | slack_url = format("%s/%s", "https://hooks.slack.com/services", var.slack_token) 19 | } 20 | 21 | locals { 22 | domain = { 23 | jenkins = var.jenkins_enabled ? "jenkins.${var.base_domain}" : "" 24 | chartmuseum = var.chartmuseum_enabled ? "chartmuseum.${var.base_domain}" : "" 25 | registry = var.registry_enabled ? "registry.${var.base_domain}" : "" 26 | harbor = var.harbor_enabled ? "harbor-core.${var.base_domain}" : "" 27 | archiva = var.archiva_enabled ? "archiva.${var.base_domain}" : "" 28 | nexus = var.nexus_enabled ? "nexus.${var.base_domain}" : "" 29 | sonarqube = var.sonarqube_enabled ? "sonarqube.${var.base_domain}" : "" 30 | } 31 | } 32 | 33 | # resource "local_file" "kube-config" { 34 | # content = data.template_file.kube-config.rendered 35 | # filename = "${path.module}/.kube/config" 36 | # } 37 | -------------------------------------------------------------------------------- /eks-charts/04-backend.tf: -------------------------------------------------------------------------------- 1 | # backend 2 | 3 | terraform { 4 | backend "s3" { 5 | region = "ap-northeast-2" 6 | bucket = "terraform-workshop-mzcdev" 7 | key = "eks-demo-charts.tfstate" 8 | dynamodb_table = "terraform-workshop-mzcdev" 9 | encrypt = true 10 | } 11 | required_version = ">= 0.12" 12 | } 13 | 14 | # terraform { 15 | # backend "remote" { 16 | # organization = "mzcdev" 17 | # workspaces { 18 | # name = "dev-eks-demo-charts" 19 | # } 20 | # } 21 | # } 22 | 23 | data "terraform_remote_state" "eks" { 24 | backend = "s3" 25 | config = { 26 | region = "ap-northeast-2" 27 | bucket = "terraform-workshop-mzcdev" 28 | key = "eks-demo.tfstate" 29 | } 30 | } 31 | 32 | # data "terraform_remote_state" "eks" { 33 | # backend = "remote" 34 | # config = { 35 | # organization = "mzcdev" 36 | # workspaces = { 37 | # name = "dev-eks-demo" 38 | # } 39 | # } 40 | # } 41 | -------------------------------------------------------------------------------- /eks-charts/05-provider.tf: -------------------------------------------------------------------------------- 1 | # provider 2 | 3 | provider "aws" { 4 | region = var.region 5 | } 6 | 7 | provider "kubernetes" { 8 | load_config_file = false 9 | 10 | host = data.aws_eks_cluster.cluster.endpoint 11 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) 12 | token = data.aws_eks_cluster_auth.cluster.token 13 | } 14 | 15 | provider "helm" { 16 | kubernetes { 17 | load_config_file = false 18 | 19 | host = data.aws_eks_cluster.cluster.endpoint 20 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) 21 | token = data.aws_eks_cluster_auth.cluster.token 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /eks-charts/30-kube-system.tf: -------------------------------------------------------------------------------- 1 | # kube-system 2 | 3 | resource "helm_release" "cluster-autoscaler" { 4 | repository = "https://kubernetes-charts.storage.googleapis.com" 5 | chart = "cluster-autoscaler" 6 | version = var.stable_cluster_autoscaler 7 | 8 | namespace = "kube-system" 9 | name = "cluster-autoscaler" 10 | 11 | values = [ 12 | file("./values/kube-system/cluster-autoscaler.yaml") 13 | ] 14 | 15 | set { 16 | name = "awsRegion" 17 | value = var.region 18 | } 19 | 20 | set { 21 | name = "autoDiscovery.clusterName" 22 | value = var.cluster_name 23 | } 24 | 25 | wait = false 26 | } 27 | 28 | resource "helm_release" "efs-provisioner" { 29 | repository = "https://kubernetes-charts.storage.googleapis.com" 30 | chart = "efs-provisioner" 31 | version = var.stable_efs_provisioner 32 | 33 | namespace = "kube-system" 34 | name = "efs-provisioner" 35 | 36 | values = [ 37 | file("./values/kube-system/efs-provisioner.yaml") 38 | ] 39 | 40 | set { 41 | name = "efsProvisioner.awsRegion" 42 | value = var.region 43 | } 44 | 45 | set { 46 | name = "efsProvisioner.efsFileSystemId" 47 | value = local.efs_id 48 | } 49 | } 50 | 51 | resource "helm_release" "k8s-spot-termination-handler" { 52 | repository = "https://kubernetes-charts.storage.googleapis.com" 53 | chart = "k8s-spot-termination-handler" 54 | version = var.stable_k8s_spot_termination_handler 55 | 56 | namespace = "kube-system" 57 | name = "k8s-spot-termination-handler" 58 | 59 | values = [ 60 | file("./values/kube-system/k8s-spot-termination-handler.yaml") 61 | ] 62 | 63 | set { 64 | name = "clusterName" 65 | value = var.cluster_name 66 | } 67 | 68 | set { 69 | name = "slackUrl" 70 | value = local.slack_url 71 | } 72 | 73 | wait = false 74 | } 75 | 76 | resource "helm_release" "kube2iam" { 77 | repository = "https://kubernetes-charts.storage.googleapis.com" 78 | chart = "kube2iam" 79 | version = var.stable_kube2iam 80 | 81 | namespace = "kube-system" 82 | name = "kube2iam" 83 | 84 | values = [ 85 | file("./values/kube-system/kube2iam.yaml") 86 | ] 87 | 88 | set { 89 | name = "awsRegion" 90 | value = var.region 91 | } 92 | 93 | wait = false 94 | } 95 | 96 | resource "helm_release" "metrics-server" { 97 | repository = "https://kubernetes-charts.storage.googleapis.com" 98 | chart = "metrics-server" 99 | version = var.stable_metrics_server 100 | 101 | namespace = "kube-system" 102 | name = "metrics-server" 103 | 104 | values = [ 105 | file("./values/kube-system/metrics-server.yaml") 106 | ] 107 | 108 | wait = false 109 | } 110 | -------------------------------------------------------------------------------- /eks-charts/33-kube-ingress.tf: -------------------------------------------------------------------------------- 1 | # kube-ingress 2 | 3 | resource "helm_release" "nginx-ingress" { 4 | repository = "https://kubernetes-charts.storage.googleapis.com" 5 | chart = "nginx-ingress" 6 | version = var.stable_nginx_ingress 7 | 8 | namespace = "kube-ingress" 9 | name = "nginx-ingress" 10 | 11 | values = [ 12 | file("./values/kube-ingress/nginx-ingress.yaml") 13 | ] 14 | 15 | set { 16 | name = "controller.service.annotations.external-dns\\.alpha\\.kubernetes\\.io/hostname" 17 | value = local.host_name 18 | } 19 | 20 | set { 21 | name = "controller.service.annotations.service\\.beta\\.kubernetes\\.io/aws-load-balancer-ssl-cert" 22 | value = local.acm_arn 23 | } 24 | 25 | wait = false 26 | 27 | create_namespace = true 28 | 29 | depends_on = [ 30 | helm_release.prometheus-operator, 31 | ] 32 | } 33 | 34 | resource "helm_release" "external-dns" { 35 | repository = "https://charts.bitnami.com/bitnami" 36 | chart = "external-dns" 37 | version = var.bitnami_external_dns 38 | 39 | namespace = "kube-ingress" 40 | name = "external-dns" 41 | 42 | values = [ 43 | file("./values/kube-ingress/external-dns.yaml") 44 | ] 45 | 46 | wait = false 47 | 48 | create_namespace = true 49 | } 50 | 51 | resource "helm_release" "cert-manager" { 52 | count = local.acm_arn == "" ? 1 : 0 53 | 54 | repository = "https://charts.jetstack.io" 55 | chart = "cert-manager" 56 | version = var.jetstack_cert_manager 57 | 58 | namespace = "kube-ingress" 59 | name = "cert-manager" 60 | 61 | values = [ 62 | file("./values/kube-ingress/cert-manager.yaml") 63 | ] 64 | 65 | create_namespace = true 66 | 67 | depends_on = [ 68 | helm_release.prometheus-operator, 69 | ] 70 | } 71 | 72 | resource "helm_release" "cert-manager-issuers" { 73 | count = local.acm_arn == "" ? 1 : 0 74 | 75 | repository = "https://kubernetes-charts-incubator.storage.googleapis.com" 76 | chart = "raw" 77 | 78 | namespace = "kube-ingress" 79 | name = "cert-manager-issuers" 80 | 81 | values = [ 82 | file("./values/kube-ingress/cert-manager-issuers.yaml") 83 | ] 84 | 85 | wait = false 86 | 87 | create_namespace = true 88 | 89 | depends_on = [ 90 | helm_release.cert-manager, 91 | ] 92 | } 93 | -------------------------------------------------------------------------------- /eks-charts/37-monitor.tf: -------------------------------------------------------------------------------- 1 | # monitor 2 | 3 | resource "helm_release" "grafana" { 4 | repository = "https://kubernetes-charts.storage.googleapis.com" 5 | chart = "grafana" 6 | version = var.stable_grafana 7 | 8 | namespace = "monitor" 9 | name = "grafana" 10 | 11 | values = [ 12 | file("./values/monitor/grafana.yaml") 13 | ] 14 | 15 | set { 16 | name = "adminUser" 17 | value = var.admin_username 18 | } 19 | 20 | set { 21 | name = "adminPassword" 22 | value = var.admin_password 23 | } 24 | 25 | set { 26 | name = "persistence.enabled" 27 | value = true 28 | } 29 | 30 | set { 31 | name = "persistence.storageClassName" 32 | value = local.storage_class 33 | } 34 | 35 | wait = false 36 | 37 | create_namespace = true 38 | 39 | depends_on = [ 40 | helm_release.efs-provisioner, 41 | ] 42 | } 43 | 44 | resource "helm_release" "prometheus-adapter" { 45 | repository = "https://kubernetes-charts.storage.googleapis.com" 46 | chart = "prometheus-adapter" 47 | version = var.stable_prometheus_adapter 48 | 49 | namespace = "monitor" 50 | name = "prometheus-adapter" 51 | 52 | values = [ 53 | file("./values/monitor/prometheus-adapter.yaml") 54 | ] 55 | 56 | wait = false 57 | 58 | create_namespace = true 59 | } 60 | 61 | resource "helm_release" "prometheus-operator" { 62 | repository = "https://kubernetes-charts.storage.googleapis.com" 63 | chart = "prometheus-operator" 64 | version = var.stable_prometheus_operator 65 | 66 | namespace = "monitor" 67 | name = "prometheus-operator" 68 | 69 | values = [ 70 | file("./values/monitor/prometheus-operator.yaml") 71 | ] 72 | 73 | set { 74 | name = "prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName" 75 | value = local.storage_class 76 | } 77 | 78 | set { 79 | name = "alertmanager.config.global.slack_api_url" 80 | value = local.slack_url 81 | } 82 | 83 | create_namespace = true 84 | 85 | depends_on = [ 86 | helm_release.efs-provisioner, 87 | ] 88 | } 89 | 90 | resource "helm_release" "prometheus-alert-rules" { 91 | repository = "https://kubernetes-charts-incubator.storage.googleapis.com" 92 | chart = "raw" 93 | 94 | namespace = "monitor" 95 | name = "prometheus-alert-rules" 96 | 97 | values = [ 98 | file("./values/monitor/prometheus-alert-rules.yaml") 99 | ] 100 | 101 | wait = false 102 | 103 | create_namespace = true 104 | 105 | depends_on = [ 106 | helm_release.prometheus-operator, 107 | ] 108 | } 109 | -------------------------------------------------------------------------------- /eks-charts/38-logging.tf: -------------------------------------------------------------------------------- 1 | # logging 2 | 3 | # resource "helm_release" "fluentd-elasticsearch" { 4 | # repository = "https://kiwigrid.github.io" 5 | # chart = "fluentd-elasticsearch" 6 | # version = var.kiwigrid_fluentd_elasticsearch 7 | 8 | # namespace = "logging" 9 | # name = "fluentd-elasticsearch" 10 | 11 | # values = [ 12 | # file("./values/logging/fluentd-elasticsearch.yaml") 13 | # ] 14 | 15 | # wait = false 16 | 17 | # create_namespace = true 18 | # } 19 | -------------------------------------------------------------------------------- /eks-charts/39-keycloak.tf: -------------------------------------------------------------------------------- 1 | # keycloak 2 | 3 | resource "kubernetes_namespace" "keycloak" { 4 | metadata { 5 | name = "keycloak" 6 | } 7 | } 8 | 9 | resource "kubernetes_secret" "keycloak-realm" { 10 | metadata { 11 | namespace = "keycloak" 12 | name = "realm-demo-secret" 13 | } 14 | 15 | type = "Opaque" 16 | 17 | data = { 18 | "demo.json" = file("${path.module}/template/keycloak-realm.json") 19 | # "demo.json" = data.template_file.keycloak-realm.rendered 20 | } 21 | 22 | depends_on = [ 23 | kubernetes_namespace.keycloak, 24 | ] 25 | } 26 | 27 | resource "helm_release" "keycloak" { 28 | repository = "https://codecentric.github.io/helm-charts" 29 | chart = "keycloak" 30 | version = var.codecentric_keycloak 31 | 32 | namespace = "keycloak" 33 | name = "keycloak" 34 | 35 | values = [ 36 | file("./values/keycloak/keycloak.yaml") 37 | ] 38 | 39 | set { 40 | name = "keycloak.replicas" 41 | value = 2 42 | } 43 | 44 | set { 45 | name = "keycloak.username" 46 | value = var.admin_username 47 | } 48 | 49 | set { 50 | name = "keycloak.password" 51 | value = var.admin_password 52 | } 53 | 54 | set { 55 | name = "postgresql.persistence.storageClass" 56 | value = local.storage_class 57 | } 58 | 59 | depends_on = [ 60 | kubernetes_secret.keycloak-realm, 61 | helm_release.efs-provisioner, 62 | helm_release.prometheus-operator, 63 | ] 64 | } 65 | -------------------------------------------------------------------------------- /eks-charts/42-istio.tf: -------------------------------------------------------------------------------- 1 | # istio 2 | 3 | # istioctl manifest apply --set profile=demo --set values.kiali.dashboard.auth.strategy=anonymous 4 | 5 | variable "kiali_gatekeeper" { 6 | default = false 7 | } 8 | 9 | variable "tracing_gatekeeper" { 10 | default = false 11 | } 12 | 13 | resource "helm_release" "kiali-gatekeeper" { 14 | count = var.kiali_gatekeeper ? 1 : 0 15 | 16 | repository = "https://gabibbo97.github.io/charts/" 17 | chart = "keycloak-gatekeeper" 18 | version = var.gabibbo97_keycloak_gatekeeper 19 | 20 | namespace = "istio-system" 21 | name = "kiali-gatekeeper" 22 | 23 | values = [ 24 | file("./values/istio/kiali-gatekeeper.yaml") 25 | ] 26 | 27 | wait = false 28 | 29 | create_namespace = true 30 | 31 | depends_on = [ 32 | helm_release.keycloak, 33 | ] 34 | } 35 | 36 | resource "helm_release" "tracing-gatekeeper" { 37 | count = var.tracing_gatekeeper ? 1 : 0 38 | 39 | repository = "https://gabibbo97.github.io/charts/" 40 | chart = "keycloak-gatekeeper" 41 | version = var.gabibbo97_keycloak_gatekeeper 42 | 43 | namespace = "istio-system" 44 | name = "tracing-gatekeeper" 45 | 46 | values = [ 47 | file("./values/istio/tracing-gatekeeper.yaml") 48 | ] 49 | 50 | wait = false 51 | 52 | create_namespace = true 53 | 54 | depends_on = [ 55 | helm_release.keycloak, 56 | ] 57 | } 58 | -------------------------------------------------------------------------------- /eks-charts/53-argo-cd.tf: -------------------------------------------------------------------------------- 1 | # argo-cd & argo-rollouts 2 | 3 | resource "helm_release" "argo-rollouts" { 4 | repository = "https://argoproj.github.io/argo-helm" 5 | chart = "argo-rollouts" 6 | version = var.argo_argo_rollouts 7 | 8 | namespace = "argo-rollouts" 9 | name = "argo-rollouts" 10 | 11 | values = [ 12 | file("./values/argo/argo-rollouts.yaml") 13 | ] 14 | 15 | create_namespace = true 16 | } 17 | 18 | resource "helm_release" "argo-cd" { 19 | repository = "https://argoproj.github.io/argo-helm" 20 | chart = "argo-cd" 21 | version = var.argo_argo_cd 22 | 23 | namespace = "argo-cd" 24 | name = "argocd" 25 | 26 | values = [ 27 | file("./values/argo/argo-cd.yaml") 28 | ] 29 | 30 | wait = false 31 | 32 | create_namespace = true 33 | 34 | depends_on = [ 35 | helm_release.prometheus-operator, 36 | helm_release.argo-rollouts, 37 | ] 38 | } 39 | -------------------------------------------------------------------------------- /eks-charts/55-devops.tf: -------------------------------------------------------------------------------- 1 | # devops 2 | 3 | resource "kubernetes_namespace" "devops" { 4 | count = var.cluster_role == "devops" ? 1 : 0 5 | 6 | metadata { 7 | name = "devops" 8 | } 9 | } 10 | 11 | resource "helm_release" "chartmuseum" { 12 | count = var.cluster_role == "devops" ? var.chartmuseum_enabled ? 1 : 0 : 0 13 | 14 | repository = "https://kubernetes-charts.storage.googleapis.com" 15 | chart = "chartmuseum" 16 | version = var.stable_chartmuseum 17 | 18 | namespace = "devops" 19 | name = "chartmuseum" 20 | 21 | values = [ 22 | file("./values/devops/chartmuseum.yaml") 23 | ] 24 | 25 | set { 26 | name = "env.open.STORAGE_AMAZON_BUCKET" 27 | value = "${var.cluster_name}-chartmuseum-${local.account_id}" 28 | } 29 | 30 | set { 31 | name = "env.open.STORAGE_AMAZON_REGION" 32 | value = var.region 33 | } 34 | 35 | wait = false 36 | 37 | depends_on = [ 38 | kubernetes_namespace.devops, 39 | helm_release.efs-provisioner, 40 | ] 41 | } 42 | 43 | resource "helm_release" "docker-registry" { 44 | count = var.cluster_role == "devops" ? var.registry_enabled ? 1 : 0 : 0 45 | 46 | repository = "https://kubernetes-charts.storage.googleapis.com" 47 | chart = "docker-registry" 48 | version = var.stable_docker_registry 49 | 50 | namespace = "devops" 51 | name = "docker-registry" 52 | 53 | values = [ 54 | file("./values/devops/docker-registry.yaml") 55 | ] 56 | 57 | set { 58 | name = "s3.bucket" 59 | value = "${var.cluster_name}-chartmuseum-${local.account_id}" 60 | } 61 | 62 | set { 63 | name = "s3.region" 64 | value = var.region 65 | } 66 | 67 | wait = false 68 | 69 | depends_on = [ 70 | kubernetes_namespace.devops, 71 | helm_release.efs-provisioner, 72 | ] 73 | } 74 | 75 | resource "helm_release" "archiva" { 76 | count = var.cluster_role == "devops" ? var.archiva_enabled ? 1 : 0 : 0 77 | 78 | repository = "https://xetus-oss.github.io/helm-charts/" 79 | chart = "xetusoss-archiva" 80 | version = "0.1.8" 81 | 82 | namespace = "devops" 83 | name = "archiva" 84 | 85 | values = [ 86 | file("./values/devops/archiva.yaml") 87 | ] 88 | 89 | set { 90 | name = "persistence.storageClass" 91 | value = local.storage_class 92 | } 93 | 94 | wait = false 95 | 96 | depends_on = [ 97 | kubernetes_namespace.devops, 98 | helm_release.efs-provisioner, 99 | ] 100 | } 101 | 102 | resource "helm_release" "sonarqube" { 103 | count = var.cluster_role == "devops" ? var.sonarqube_enabled ? 1 : 0 : 0 104 | 105 | repository = "https://oteemo.github.io/charts" 106 | chart = "sonarqube" 107 | version = var.oteemo_sonarqube 108 | 109 | namespace = "devops" 110 | name = "sonarqube" 111 | 112 | values = [ 113 | file("./values/devops/sonarqube.yaml") 114 | ] 115 | 116 | set { 117 | name = "persistence.storageClass" 118 | value = local.storage_class 119 | } 120 | 121 | set { 122 | name = "postgresql.persistence.storageClass" 123 | value = local.storage_class 124 | } 125 | 126 | wait = false 127 | 128 | depends_on = [ 129 | kubernetes_namespace.devops, 130 | helm_release.efs-provisioner, 131 | ] 132 | } 133 | 134 | resource "helm_release" "sonatype-nexus" { 135 | count = var.cluster_role == "devops" ? var.nexus_enabled ? 1 : 0 : 0 136 | 137 | repository = "https://oteemo.github.io/charts" 138 | chart = "sonatype-nexus" 139 | version = var.oteemo_sonatype_nexus 140 | 141 | namespace = "devops" 142 | name = "sonatype-nexus" 143 | 144 | values = [ 145 | file("./values/devops/sonatype-nexus.yaml") 146 | ] 147 | 148 | set { 149 | name = "persistence.storageClass" 150 | value = local.storage_class 151 | } 152 | 153 | wait = false 154 | 155 | depends_on = [ 156 | kubernetes_namespace.devops, 157 | helm_release.efs-provisioner, 158 | ] 159 | } 160 | -------------------------------------------------------------------------------- /eks-charts/58-jenkins.tf: -------------------------------------------------------------------------------- 1 | # jenkins 2 | 3 | resource "kubernetes_namespace" "jenkins" { 4 | count = var.cluster_role == "devops" ? var.jenkins_enabled ? 1 : 0 : 0 5 | 6 | metadata { 7 | name = "jenkins" 8 | } 9 | } 10 | 11 | resource "helm_release" "jenkins" { 12 | count = var.cluster_role == "devops" ? var.jenkins_enabled ? 1 : 0 : 0 13 | 14 | repository = "https://kubernetes-charts.storage.googleapis.com" 15 | chart = "jenkins" 16 | version = var.stable_jenkins 17 | 18 | namespace = "jenkins" 19 | name = "jenkins" 20 | 21 | values = [ 22 | file("./values/jenkins/jenkins.yaml") 23 | ] 24 | 25 | set { 26 | name = "master.adminUser" 27 | value = var.admin_username 28 | } 29 | 30 | set { 31 | name = "master.adminPassword" 32 | value = var.admin_password 33 | } 34 | 35 | set { 36 | name = "persistence.storageClass" 37 | value = local.storage_class 38 | } 39 | 40 | wait = false 41 | 42 | depends_on = [ 43 | kubernetes_namespace.jenkins, 44 | helm_release.efs-provisioner, 45 | helm_release.prometheus-operator, 46 | helm_release.chartmuseum, 47 | helm_release.docker-registry, 48 | ] 49 | } 50 | 51 | resource "kubernetes_cluster_role_binding" "cluster-admin-jenkins-default" { 52 | count = var.cluster_role == "devops" ? var.jenkins_enabled ? 1 : 0 : 0 53 | 54 | metadata { 55 | name = "cluster-admin:jenkins:default" 56 | } 57 | 58 | role_ref { 59 | api_group = "rbac.authorization.k8s.io" 60 | kind = "ClusterRole" 61 | name = "cluster-admin" 62 | } 63 | 64 | subject { 65 | kind = "ServiceAccount" 66 | namespace = "jenkins" 67 | name = "default" 68 | } 69 | 70 | depends_on = [ 71 | helm_release.jenkins, 72 | ] 73 | } 74 | 75 | # https://github.com/jenkinsci/kubernetes-credentials-provider-plugin/tree/master/docs/examples 76 | 77 | resource "kubernetes_secret" "jenkins-secret-username" { 78 | count = var.cluster_role == "devops" ? var.jenkins_enabled ? 1 : 0 : 0 79 | 80 | metadata { 81 | namespace = "jenkins" 82 | name = "jenkins-secret-username" 83 | 84 | labels = { 85 | "jenkins.io/credentials-type" : "usernamePassword" 86 | } 87 | 88 | annotations = { 89 | "jenkins.io/credentials-description" : "credentials from Kubernetes" 90 | } 91 | } 92 | 93 | type = "Opaque" 94 | 95 | data = { 96 | "username" = "username" 97 | "password" = "password" 98 | } 99 | 100 | depends_on = [ 101 | helm_release.jenkins, 102 | ] 103 | } 104 | 105 | resource "kubernetes_secret" "jenkins-secret-text" { 106 | count = var.cluster_role == "devops" ? var.jenkins_enabled ? 1 : 0 : 0 107 | 108 | metadata { 109 | namespace = "jenkins" 110 | name = "jenkins-secret-text" 111 | 112 | labels = { 113 | "jenkins.io/credentials-type" : "secretText" 114 | } 115 | 116 | annotations = { 117 | "jenkins.io/credentials-description" : "secret text credential from Kubernetes" 118 | } 119 | } 120 | 121 | type = "Opaque" 122 | 123 | data = { 124 | "text" = "Hello World!" 125 | } 126 | 127 | depends_on = [ 128 | helm_release.jenkins, 129 | ] 130 | } 131 | 132 | resource "kubernetes_secret" "jenkins-secret-file" { 133 | count = var.cluster_role == "devops" ? var.jenkins_enabled ? 1 : 0 : 0 134 | 135 | metadata { 136 | namespace = "jenkins" 137 | name = "jenkins-secret-file" 138 | 139 | labels = { 140 | "jenkins.io/credentials-type" : "secretFile" 141 | } 142 | 143 | annotations = { 144 | "jenkins.io/credentials-description" : "secret file credential from Kubernetes" 145 | } 146 | } 147 | 148 | type = "Opaque" 149 | 150 | data = { 151 | "filename" = "secret.txt" 152 | "data" = file("./values/jenkins/secret/secret.txt") 153 | } 154 | 155 | depends_on = [ 156 | helm_release.jenkins, 157 | ] 158 | } 159 | 160 | resource "kubernetes_secret" "jenkins-secret-private-key" { 161 | count = var.cluster_role == "devops" ? var.jenkins_enabled ? 1 : 0 : 0 162 | 163 | metadata { 164 | namespace = "jenkins" 165 | name = "jenkins-secret-private-key" 166 | 167 | labels = { 168 | "jenkins.io/credentials-type" : "basicSSHUserPrivateKey" 169 | } 170 | 171 | annotations = { 172 | "jenkins.io/credentials-description" : "basic user private key credential from Kubernetes" 173 | } 174 | } 175 | 176 | type = "Opaque" 177 | 178 | data = { 179 | "username" = "jenkins" 180 | "privateKey" = file("./values/jenkins/secret/jenkins.txt") 181 | } 182 | 183 | depends_on = [ 184 | helm_release.jenkins, 185 | ] 186 | } 187 | -------------------------------------------------------------------------------- /eks-charts/92-default.tf: -------------------------------------------------------------------------------- 1 | # default 2 | 3 | resource "helm_release" "cluster-overprovisioner" { 4 | repository = "https://kubernetes-charts.storage.googleapis.com" 5 | chart = "cluster-overprovisioner" 6 | version = var.stable_cluster_overprovisioner 7 | 8 | namespace = "default" 9 | name = "cluster-overprovisioner" 10 | 11 | values = [ 12 | file("./values/default/cluster-overprovisioner.yaml"), 13 | yamlencode( 14 | { 15 | deployments = [ 16 | { 17 | name = "default" 18 | replicaCount = 0 19 | resources = { 20 | requests = { 21 | cpu = "1000m" 22 | memory = "1Gi" 23 | } 24 | } 25 | } 26 | ] 27 | } 28 | ) 29 | ] 30 | 31 | # set { 32 | # name = "deployments.0.replicaCount" 33 | # value = 1 34 | # } 35 | 36 | wait = false 37 | } 38 | 39 | # for jenkins 40 | resource "kubernetes_config_map" "jenkins-env" { 41 | metadata { 42 | namespace = "default" 43 | name = "jenkins-env" 44 | } 45 | 46 | data = { 47 | # "groovy" = file("${path.module}/template/jenkins-env.groovy") 48 | "groovy" = data.template_file.jenkins-env.rendered 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /eks-charts/92-role-binding.tf: -------------------------------------------------------------------------------- 1 | # role_binding 2 | 3 | resource "kubernetes_cluster_role_binding" "iam-developer-edit" { 4 | metadata { 5 | name = "iam:developer:edit" 6 | } 7 | 8 | role_ref { 9 | api_group = "rbac.authorization.k8s.io" 10 | kind = "ClusterRole" 11 | name = "edit" 12 | } 13 | 14 | subject { 15 | kind = "User" 16 | name = "developer" 17 | } 18 | } 19 | 20 | resource "kubernetes_cluster_role_binding" "iam-readonly-view" { 21 | metadata { 22 | name = "iam:readonly:view" 23 | } 24 | 25 | role_ref { 26 | api_group = "rbac.authorization.k8s.io" 27 | kind = "ClusterRole" 28 | name = "view" 29 | } 30 | 31 | subject { 32 | kind = "User" 33 | name = "readonly" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /eks-charts/95-demo-dev.tf: -------------------------------------------------------------------------------- 1 | # demo-dev 2 | 3 | resource "kubernetes_namespace" "demo-dev" { 4 | metadata { 5 | labels = { 6 | istio-injection = "enabled" 7 | } 8 | 9 | name = "demo-dev" 10 | } 11 | } 12 | 13 | # for argo-rollouts 14 | resource "helm_release" "demo-dev-http-benchmark" { 15 | repository = "https://kubernetes-charts-incubator.storage.googleapis.com" 16 | chart = "raw" 17 | 18 | namespace = "demo-dev" 19 | name = "http-benchmark" 20 | 21 | values = [ 22 | file("./values/argo/http-benchmark.yaml") 23 | ] 24 | 25 | depends_on = [ 26 | kubernetes_namespace.demo-dev, 27 | helm_release.argo-rollouts, 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /eks-charts/95-demo-prod.tf: -------------------------------------------------------------------------------- 1 | # demo-prod 2 | 3 | resource "kubernetes_namespace" "demo-prod" { 4 | metadata { 5 | labels = { 6 | istio-injection = "enabled" 7 | } 8 | 9 | name = "demo-prod" 10 | } 11 | } 12 | 13 | # for argo-rollouts 14 | resource "helm_release" "demo-prod-http-benchmark" { 15 | repository = "https://kubernetes-charts-incubator.storage.googleapis.com" 16 | chart = "raw" 17 | 18 | namespace = "demo-prod" 19 | name = "http-benchmark" 20 | 21 | values = [ 22 | file("./values/argo/http-benchmark.yaml") 23 | ] 24 | 25 | depends_on = [ 26 | kubernetes_namespace.demo-prod, 27 | helm_release.argo-rollouts, 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /eks-charts/bump.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import sys 6 | import json 7 | 8 | 9 | def get_charts(chart): 10 | # print("load_charts : {}".format(chart)) 11 | 12 | txt = os.popen("helm search hub '{}' -o json".format(chart)).read() 13 | 14 | return json.loads(txt) 15 | 16 | 17 | def replace(): 18 | filepath = "00-variable.tf.json" 19 | 20 | if os.path.exists(filepath): 21 | # print("filepath : {}".format(filepath)) 22 | 23 | doc = None 24 | 25 | with open(filepath, "r") as file: 26 | doc = json.load(file) 27 | 28 | for k in doc["variable"]: 29 | chart = doc["variable"][k]["description"].split("/") 30 | 31 | old_ver = doc["variable"][k]["default"] 32 | new_ver = "" 33 | 34 | charts = get_charts(chart[1]) 35 | 36 | for o in charts: 37 | # print(o["url"], o["version"]) 38 | 39 | url = "https://hub.helm.sh/charts/{}/{}".format(chart[0], chart[1]) 40 | 41 | if o["url"] == url: 42 | new_ver = o["version"] 43 | 44 | # replace 45 | if new_ver != "": 46 | if new_ver != old_ver: 47 | print( 48 | "{}/{} : {} -> {}".format( 49 | chart[0], chart[1], old_ver, new_ver 50 | ) 51 | ) 52 | else: 53 | print("{}/{} : {}".format(chart[0], chart[1], old_ver)) 54 | 55 | doc["variable"][k]["default"] = new_ver 56 | 57 | if doc != None: 58 | with open(filepath, "w") as file: 59 | json.dump(doc, file, sort_keys=True, indent=2) 60 | 61 | 62 | def main(): 63 | replace() 64 | 65 | 66 | if __name__ == "__main__": 67 | main() 68 | -------------------------------------------------------------------------------- /eks-charts/template/jenkins-env.groovy: -------------------------------------------------------------------------------- 1 | #!/usr/bin/groovy 2 | import groovy.transform.Field 3 | @Field 4 | def role = "${role}" 5 | @Field 6 | def cluster = "${cluster}" 7 | @Field 8 | def base_domain = "${base_domain}" 9 | @Field 10 | def slack_token = "${slack_token}" 11 | @Field 12 | def jenkins = "${jenkins}" 13 | @Field 14 | def chartmuseum = "${chartmuseum}" 15 | @Field 16 | def registry = "${registry}" 17 | @Field 18 | def harbor = "${harbor}" 19 | @Field 20 | def archiva = "${archiva}" 21 | @Field 22 | def nexus = "${nexus}" 23 | @Field 24 | def sonarqube = "${sonarqube}" 25 | return this 26 | -------------------------------------------------------------------------------- /eks-charts/template/kube-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: "cluster" 5 | cluster: 6 | server: "${cluster_url}" 7 | certificate-authority-data: "${cluster_auth_data}" 8 | api-version: v1 9 | users: 10 | - name: "user" 11 | user: 12 | token: "${cluster_token}" 13 | contexts: 14 | - name: "eks" 15 | context: 16 | user: "user" 17 | cluster: "cluster" 18 | current-context: "eks" 19 | -------------------------------------------------------------------------------- /eks-charts/values/argo/argo-cd.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: argocd 2 | 3 | installCRDs: true 4 | 5 | controller: 6 | metrics: 7 | enabled: true 8 | serviceMonitor: 9 | enabled: true 10 | additionalLabels: 11 | release: prometheus-operator 12 | podAnnotations: 13 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 14 | 15 | server: 16 | extraArgs: 17 | - --insecure 18 | 19 | metrics: 20 | enabled: true 21 | serviceMonitor: 22 | enabled: true 23 | additionalLabels: 24 | release: prometheus-operator 25 | podAnnotations: 26 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 27 | 28 | ingress: 29 | enabled: true 30 | annotations: 31 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 32 | kubernetes.io/ingress.class: nginx 33 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 34 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 35 | hosts: 36 | - "argocd.demo.mzdev.be" 37 | tls: 38 | - secretName: argocd-server-tls 39 | hosts: 40 | - "argocd.demo.mzdev.be" 41 | 42 | config: 43 | url: "https://argocd.demo.mzdev.be" 44 | 45 | repositories: | 46 | - name: env-demo 47 | type: git 48 | url: https://github.com/opspresso/argocd-env-demo 49 | - name: stable 50 | type: helm 51 | url: https://kubernetes-charts.storage.googleapis.com 52 | - name: argo 53 | type: helm 54 | url: https://argoproj.github.io/argo-helm 55 | 56 | oidc.config: | 57 | name: SSO 58 | clientID: 'argo-cd' 59 | clientSecret: 'd91fdbbc-5dbb-43ab-b388-ce4170ff79c6' 60 | issuer: 'https://keycloak.demo.mzdev.be/auth/realms/demo' 61 | requestedScopes: 62 | - openid 63 | - email 64 | - profile 65 | - groups 66 | 67 | # https://argoproj.github.io/argo-cd/operator-manual/user-management/keycloak/ 68 | 69 | rbacConfig: 70 | policy.default: role:readonly 71 | policy.csv: | 72 | g, "/admin", role:admin 73 | 74 | additionalApplications: 75 | - name: docs 76 | project: default 77 | source: 78 | repoURL: https://github.com/opspresso/argocd-env-demo 79 | targetRevision: HEAD 80 | path: nalbam-docs/demo-prod 81 | destination: 82 | server: https://kubernetes.default.svc 83 | namespace: demo-prod 84 | syncPolicy: 85 | automated: 86 | prune: true 87 | selfHeal: true 88 | - name: sample-node-dev 89 | project: default 90 | source: 91 | repoURL: https://github.com/opspresso/argocd-env-demo 92 | targetRevision: HEAD 93 | path: sample-node/demo-dev 94 | destination: 95 | server: https://kubernetes.default.svc 96 | namespace: demo-dev 97 | syncPolicy: 98 | automated: 99 | prune: true 100 | selfHeal: true 101 | - name: sample-node-prod 102 | project: default 103 | source: 104 | repoURL: https://github.com/opspresso/argocd-env-demo 105 | targetRevision: HEAD 106 | path: sample-node/demo-prod 107 | destination: 108 | server: https://kubernetes.default.svc 109 | namespace: demo-prod 110 | syncPolicy: 111 | automated: 112 | prune: true 113 | selfHeal: true 114 | - name: sample-spring-dev 115 | project: default 116 | source: 117 | repoURL: https://github.com/opspresso/argocd-env-demo 118 | targetRevision: HEAD 119 | path: sample-spring/demo-dev 120 | destination: 121 | server: https://kubernetes.default.svc 122 | namespace: demo-dev 123 | syncPolicy: 124 | automated: 125 | prune: true 126 | selfHeal: true 127 | - name: sample-tomcat-dev 128 | project: default 129 | source: 130 | repoURL: https://github.com/opspresso/argocd-env-demo 131 | targetRevision: HEAD 132 | path: sample-tomcat/demo-dev 133 | destination: 134 | server: https://kubernetes.default.svc 135 | namespace: demo-dev 136 | syncPolicy: 137 | automated: 138 | prune: true 139 | selfHeal: true 140 | 141 | additionalProjects: [] 142 | -------------------------------------------------------------------------------- /eks-charts/values/argo/argo-events.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: argo-events 2 | 3 | installCRD: true 4 | -------------------------------------------------------------------------------- /eks-charts/values/argo/argo-gatekeeper.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: argo-gatekeeper 2 | 3 | discoveryURL: https://keycloak.demo.mzdev.be/auth/realms/demo 4 | 5 | upstreamURL: http://argo-server.argo.svc.cluster.local:2746 6 | 7 | ClientID: argo 8 | ClientSecret: 60820e7d-80a1-4e63-9ae1-b83972eaa020 9 | 10 | ingress: 11 | enabled: true 12 | annotations: 13 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 14 | kubernetes.io/ingress.class: nginx 15 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 16 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 17 | hosts: 18 | - "argo.demo.mzdev.be" 19 | tls: 20 | - secretName: argo-gatekeeper-tls 21 | hosts: 22 | - "argo.demo.mzdev.be" 23 | -------------------------------------------------------------------------------- /eks-charts/values/argo/argo-rollouts.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: argo-rollouts 2 | 3 | installCRDs: true 4 | 5 | controller: 6 | image: 7 | tag: v0.8.3 8 | -------------------------------------------------------------------------------- /eks-charts/values/argo/argo.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: argo 2 | 3 | installCRD: true 4 | 5 | init: 6 | serviceAccount: "" 7 | 8 | controller: 9 | podAnnotations: 10 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 11 | iam.amazonaws.com/role: "eks-demo-worker-bucket" 12 | serviceMonitor: 13 | enabled: true 14 | additionalLabels: 15 | release: prometheus-operator 16 | workflowNamespaces: 17 | - default 18 | 19 | server: 20 | podAnnotations: 21 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 22 | ingress: 23 | enabled: false 24 | annotations: 25 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 26 | kubernetes.io/ingress.class: nginx 27 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 28 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 29 | hosts: 30 | - "argo.demo.mzdev.be" 31 | tls: 32 | - secretName: argo-tls 33 | hosts: 34 | - "argo.demo.mzdev.be" 35 | 36 | useDefaultArtifactRepo: true 37 | useStaticCredentials: false 38 | 39 | artifactRepository: 40 | s3: 41 | bucket: "eks-demo-argo-demo" 42 | endpoint: s3.amazonaws.com 43 | -------------------------------------------------------------------------------- /eks-charts/values/argo/http-benchmark.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - apiVersion: argoproj.io/v1alpha1 3 | kind: AnalysisTemplate 4 | metadata: 5 | name: http-benchmark 6 | spec: 7 | args: 8 | - name: url 9 | metrics: 10 | - name: http-benchmark 11 | count: 10 12 | failureLimit: 5 13 | interval: 5s 14 | provider: 15 | job: 16 | spec: 17 | template: 18 | metadata: 19 | annotations: 20 | sidecar.istio.io/inject: "false" 21 | spec: 22 | containers: 23 | - name: load-tester 24 | image: argoproj/load-tester:latest 25 | command: [sh, -xec] 26 | args: 27 | - | 28 | wrk -t1 -c1 -d5 -s report.lua {{args.url}} 29 | jq -e '.errors_ratio <= 0.1' report.json 30 | restartPolicy: Never 31 | backoffLimit: 0 32 | -------------------------------------------------------------------------------- /eks-charts/values/consul/consul-gatekeeper.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: consul-gatekeeper 2 | 3 | discoveryURL: https://keycloak.demo.spic.me/auth/realms/demo 4 | 5 | upstreamURL: http://consul-ui.consul.svc.cluster.local:8500 6 | 7 | ClientID: consul 8 | ClientSecret: 85a70f6c-9577-4a44-b7b1-5231cb03b8f7 9 | 10 | ingress: 11 | enabled: true 12 | annotations: 13 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 14 | kubernetes.io/ingress.class: nginx 15 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 16 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 17 | hosts: 18 | - "consul.demo.spic.me" 19 | tls: 20 | - secretName: consul-tls 21 | hosts: 22 | - "consul.demo.spic.me" 23 | -------------------------------------------------------------------------------- /eks-charts/values/consul/consul.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: consul 2 | 3 | uiService: 4 | enabled: true 5 | type: ClusterIP 6 | 7 | uiIngress: 8 | enabled: false 9 | annotations: 10 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 11 | kubernetes.io/ingress.class: nginx 12 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 13 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 14 | hosts: 15 | - "consul.demo.spic.me" 16 | tls: 17 | - secretName: consul-tls 18 | hosts: 19 | - "consul.demo.spic.me" 20 | 21 | StorageClass: default 22 | -------------------------------------------------------------------------------- /eks-charts/values/default/cluster-overprovisioner.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/helm/charts/blob/master/stable/cluster-overprovisioner/values.yaml 2 | # https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler 3 | # https://medium.com/scout24-engineering/cluster-overprovisiong-in-kubernetes-79433cb3ed0e 4 | 5 | nameOverride: cluster-overprovisioner 6 | -------------------------------------------------------------------------------- /eks-charts/values/devops/archiva.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: archiva 2 | 3 | proxy: 4 | enabled: true 5 | hostname: "archiva.demo.mzdev.be" 6 | proto: https 7 | pathPrefix: / 8 | 9 | ingress: 10 | enabled: true 11 | annotations: 12 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 13 | ingress.kubernetes.io/proxy-body-size: "0" 14 | kubernetes.io/ingress.class: nginx 15 | nginx.ingress.kubernetes.io/proxy-body-size: "0" 16 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 17 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 18 | tls: 19 | enabled: true 20 | secret: archiva-tls 21 | 22 | persistence: 23 | enabled: true 24 | storageClass: default 25 | requestSize: 50Gi 26 | -------------------------------------------------------------------------------- /eks-charts/values/devops/chartmuseum.yaml: -------------------------------------------------------------------------------- 1 | fullnameOverride: chartmuseum 2 | 3 | replica: 4 | annotations: 5 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 6 | iam.amazonaws.com/role: "eks-demo-worker-bucket" 7 | 8 | env: 9 | open: 10 | DEBUG: false 11 | DISABLE_API: false 12 | DISABLE_METRICS: false 13 | ALLOW_OVERWRITE: true 14 | 15 | STORAGE: "amazon" 16 | STORAGE_AMAZON_BUCKET: "eks-demo-chartmuseum-demo" 17 | STORAGE_AMAZON_PREFIX: "/" 18 | STORAGE_AMAZON_REGION: "ap-northeast-2" 19 | # secret: 20 | # BASIC_AUTH_USER: "server" 21 | # BASIC_AUTH_PASS: "924426A5-DA22-4A10-9FB1-418761684372" 22 | 23 | ingress: 24 | enabled: true 25 | annotations: 26 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 27 | ingress.kubernetes.io/proxy-body-size: "0" 28 | kubernetes.io/ingress.class: nginx 29 | nginx.ingress.kubernetes.io/proxy-body-size: "0" 30 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 31 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 32 | hosts: 33 | - name: "chartmuseum.demo.mzdev.be" 34 | path: / 35 | tls: true 36 | tlsSecret: chartmuseum-tls 37 | -------------------------------------------------------------------------------- /eks-charts/values/devops/docker-registry.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: docker-registry 2 | 3 | podAnnotations: 4 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 5 | iam.amazonaws.com/role: "eks-demo-worker-bucket" 6 | 7 | storage: s3 8 | 9 | s3: 10 | region: "ap-northeast-2" 11 | bucket: "eks-demo-registry-demo" 12 | encrypt: "false" 13 | secure: "false" 14 | 15 | secrets: 16 | haSharedSecret: "secret-string-287463" 17 | s3: 18 | accessKey: "" 19 | secretKey: "" 20 | 21 | ingress: 22 | enabled: true 23 | annotations: 24 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 25 | ingress.kubernetes.io/proxy-body-size: "0" 26 | kubernetes.io/ingress.class: nginx 27 | nginx.ingress.kubernetes.io/proxy-body-size: "0" 28 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 29 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 30 | hosts: 31 | - "registry.demo.mzdev.be" 32 | path: / 33 | tls: 34 | - secretName: docker-registry-tls 35 | hosts: 36 | - "registry.demo.mzdev.be" 37 | -------------------------------------------------------------------------------- /eks-charts/values/devops/harbor.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: harbor 2 | 3 | expose: 4 | type: ingress 5 | tls: 6 | enabled: true 7 | secretName: "harbor-core-tls" 8 | notarySecretName: "harbor-notary-tls" 9 | ingress: 10 | hosts: 11 | core: "harbor-core.demo.mzdev.be" 12 | notary: "harbor-notary.demo.mzdev.be" 13 | annotations: 14 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 15 | ingress.kubernetes.io/proxy-body-size: "0" 16 | ingress.kubernetes.io/ssl-redirect: "true" 17 | kubernetes.io/ingress.class: nginx 18 | nginx.ingress.kubernetes.io/proxy-body-size: "0" 19 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 20 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 21 | 22 | externalURL: "https://harbor-core.demo.mzdev.be" 23 | 24 | harborAdminPassword: "password" 25 | 26 | core: 27 | podAnnotations: 28 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 29 | iam.amazonaws.com/role: "eks-demo-worker-bucket" 30 | 31 | registry: 32 | podAnnotations: 33 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 34 | iam.amazonaws.com/role: "eks-demo-worker-bucket" 35 | 36 | chartmuseum: 37 | podAnnotations: 38 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 39 | iam.amazonaws.com/role: "eks-demo-worker-bucket" 40 | 41 | persistence: 42 | enabled: true 43 | resourcePolicy: "keep" 44 | persistentVolumeClaim: 45 | registry: 46 | storageClass: default 47 | chartmuseum: 48 | storageClass: default 49 | jobservice: 50 | storageClass: default 51 | database: 52 | storageClass: default 53 | redis: 54 | storageClass: default 55 | trivy: 56 | storageClass: default 57 | imageChartStorage: 58 | s3: 59 | region: "ap-northeast-2" 60 | bucket: "eks-demo-harbor-demo" 61 | -------------------------------------------------------------------------------- /eks-charts/values/devops/sonarqube.yaml: -------------------------------------------------------------------------------- 1 | fullnameOverride: sonarqube 2 | 3 | ingress: 4 | enabled: true 5 | annotations: 6 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 7 | ingress.kubernetes.io/proxy-body-size: "0" 8 | kubernetes.io/ingress.class: nginx 9 | nginx.ingress.kubernetes.io/proxy-body-size: "0" 10 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 11 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 12 | hosts: 13 | - name: "sonarqube.demo.mzdev.be" 14 | path: / 15 | tls: 16 | - secretName: sonarqube-tls 17 | hosts: 18 | - "sonarqube.demo.mzdev.be" 19 | 20 | annotations: 21 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 22 | 23 | plugins: 24 | install: 25 | - "https://github.com/vaulttec/sonar-auth-oidc/releases/download/v2.0.0/sonar-auth-oidc-plugin-2.0.0.jar" 26 | 27 | sonarProperties: 28 | sonar.core.serverBaseURL: "https://sonarqube.demo.mzdev.be" 29 | # sonar.forceAuthentication: true 30 | sonar.auth.oidc.enabled: true 31 | sonar.auth.oidc.issuerUri: "https://keycloak.demo.mzdev.be/auth/realms/demo" 32 | sonar.auth.oidc.clientId.secured: "sonarqube" 33 | sonar.auth.oidc.clientSecret.secured: "5ad5e8a7-85f2-44cf-979c-dd8faf53e84c" 34 | # sonar.auth.oidc.scopes: "openid email profile" 35 | # sonar.auth.oidc.groupsSync.claimName: "groups" 36 | sonar.lf.enableGravatar: true 37 | 38 | # sonarSecretKey: "settings-encryption-secret" 39 | 40 | resources: 41 | requests: 42 | cpu: 1000m 43 | memory: 2Gi 44 | limits: 45 | cpu: 1200m 46 | memory: 3Gi 47 | 48 | persistence: 49 | enabled: true 50 | storageClass: default 51 | size: 20Gi 52 | 53 | postgresql: 54 | enabled: true 55 | persistence: 56 | enabled: true 57 | storageClass: default 58 | size: 8Gi 59 | -------------------------------------------------------------------------------- /eks-charts/values/devops/sonatype-nexus.yaml: -------------------------------------------------------------------------------- 1 | fullnameOverride: sonatype-nexus 2 | 3 | statefulset: 4 | enabled: false 5 | 6 | nexus: 7 | service: 8 | type: ClusterIP 9 | 10 | resources: 11 | requests: 12 | cpu: 1000m 13 | memory: 2Gi 14 | limits: 15 | cpu: 1200m 16 | memory: 3Gi 17 | 18 | podAnnotations: 19 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 20 | 21 | livenessProbe: 22 | initialDelaySeconds: 60 23 | periodSeconds: 30 24 | failureThreshold: 12 25 | path: / 26 | readinessProbe: 27 | initialDelaySeconds: 60 28 | periodSeconds: 30 29 | failureThreshold: 12 30 | path: / 31 | 32 | ingress: 33 | enabled: true 34 | annotations: 35 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 36 | ingress.kubernetes.io/proxy-body-size: "0" 37 | kubernetes.io/ingress.class: nginx 38 | nginx.ingress.kubernetes.io/proxy-body-size: "0" 39 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 40 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 41 | tls: 42 | enabled: true 43 | secretName: sonatype-nexus-tls 44 | 45 | nexusProxy: 46 | env: 47 | nexusHttpHost: "nexus.demo.mzdev.be" 48 | 49 | nexusBackup: 50 | enabled: false 51 | persistence: 52 | enabled: false 53 | storageClass: default 54 | storageSize: 20Gi 55 | 56 | persistence: 57 | enabled: true 58 | storageClass: default 59 | storageSize: 50Gi 60 | -------------------------------------------------------------------------------- /eks-charts/values/istio/kiali-gatekeeper.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: kiali-gatekeeper 2 | 3 | discoveryURL: https://keycloak.demo.mzdev.be/auth/realms/demo 4 | 5 | upstreamURL: http://kiali.istio-system.svc.cluster.local:20001 6 | 7 | ClientID: kiali 8 | ClientSecret: 746b5179-2b86-4c5c-8b1f-440e893f650b 9 | 10 | ingress: 11 | enabled: true 12 | annotations: 13 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 14 | kubernetes.io/ingress.class: nginx 15 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 16 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 17 | hosts: 18 | - "kiali-istio.demo.mzdev.be" 19 | tls: 20 | - secretName: kiali-gatekeeper-tls 21 | hosts: 22 | - "kiali-istio.demo.mzdev.be" 23 | -------------------------------------------------------------------------------- /eks-charts/values/istio/tracing-gatekeeper.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: tracing-gatekeeper 2 | 3 | discoveryURL: https://keycloak.demo.mzdev.be/auth/realms/demo 4 | 5 | upstreamURL: http://tracing.istio-system.svc.cluster.local:80 6 | 7 | ClientID: tracing 8 | ClientSecret: 0e94208b-1ea1-4e9b-b6e4-3e0b15c9fb9d 9 | 10 | ingress: 11 | enabled: true 12 | annotations: 13 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 14 | kubernetes.io/ingress.class: nginx 15 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 16 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 17 | hosts: 18 | - "tracing-istio.demo.mzdev.be" 19 | tls: 20 | - secretName: tracing-gatekeeper-tls 21 | hosts: 22 | - "tracing-istio.demo.mzdev.be" 23 | -------------------------------------------------------------------------------- /eks-charts/values/jenkins/jenkins.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: jenkins 2 | 3 | master: 4 | adminUser: "admin" 5 | adminPassword: "password" 6 | 7 | resources: 8 | requests: 9 | cpu: 1000m 10 | memory: 1Gi 11 | limits: 12 | cpu: 1000m 13 | memory: 3Gi 14 | 15 | podAnnotations: 16 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 17 | 18 | # hostNetworking: true 19 | 20 | javaOpts: "-Dorg.apache.commons.jelly.tags.fmt.timeZone=Asia/Seoul" 21 | 22 | jenkinsUrl: "https://jenkins.demo.mzdev.be" 23 | 24 | # customConfigMap: true 25 | # overwriteConfig: true 26 | # overwriteJobs: true 27 | 28 | ingress: 29 | enabled: true 30 | annotations: 31 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 32 | kubernetes.io/ingress.class: nginx 33 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 34 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 35 | hostName: "jenkins.demo.mzdev.be" 36 | tls: 37 | - secretName: jenkins-tls 38 | hosts: 39 | - "jenkins.demo.mzdev.be" 40 | 41 | # https://github.com/helm/charts/blob/master/stable/jenkins/values.yaml 42 | installPlugins: 43 | - configuration-as-code:latest 44 | - credentials-binding:latest 45 | - git:latest 46 | - kubernetes:latest 47 | - workflow-aggregator:latest 48 | - workflow-job:latest 49 | 50 | # https://plugins.jenkins.io/ 51 | additionalPlugins: 52 | - authorize-project:latest 53 | - blueocean:latest 54 | - github-pullrequest:latest 55 | - job-dsl:latest 56 | - keycloak:latest 57 | - kubernetes-credentials-provider:latest 58 | - pipeline-github-lib:latest 59 | - prometheus:latest 60 | - role-strategy:latest 61 | # - generic-webhook-trigger:latest 62 | 63 | overwritePlugins: true 64 | 65 | prometheus: 66 | enabled: true 67 | serviceMonitorAdditionalLabels: 68 | release: prometheus-operator 69 | 70 | # https://plugins.jenkins.io/configuration-as-code/ 71 | JCasC: 72 | # enabled: true 73 | # defaultConfig: true 74 | 75 | configScripts: 76 | welcome-message: |- 77 | jenkins: 78 | systemMessage: | 79 | Welcome to our CI\CD server. 80 | This Jenkins is configured and managed 'as code'. 81 | 82 | # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/demos/keycloak/README.md 83 | keycloak: |- 84 | unclassified: 85 | keycloakSecurityRealm: 86 | keycloakJson: |- 87 | { 88 | "realm": "demo", 89 | "auth-server-url": "https://keycloak.demo.mzdev.be/auth/", 90 | "ssl-required": "external", 91 | "resource": "jenkins", 92 | "credentials": { 93 | "secret": "f76f3359-4be2-45dd-aba9-6f5204e62438" 94 | }, 95 | "confidential-port": 0 96 | } 97 | 98 | # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/docs/seed-jobs.md 99 | # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/demos/jobs/multibranch-github.yaml 100 | jobs: |- 101 | jobs: 102 | - script: > 103 | multibranchPipelineJob('sample-node') { 104 | branchSources { 105 | git { 106 | id = 'sample-node' 107 | remote('https://github.com/nalbam/sample-node.git') 108 | } 109 | } 110 | orphanedItemStrategy { 111 | discardOldItems { 112 | numToKeep(15) 113 | } 114 | } 115 | } 116 | - script: > 117 | multibranchPipelineJob('sample-spring') { 118 | branchSources { 119 | git { 120 | id = 'sample-spring' 121 | remote('https://github.com/nalbam/sample-spring.git') 122 | } 123 | } 124 | orphanedItemStrategy { 125 | discardOldItems { 126 | numToKeep(15) 127 | } 128 | } 129 | } 130 | - script: > 131 | multibranchPipelineJob('sample-tomcat') { 132 | branchSources { 133 | git { 134 | id = 'sample-tomcat' 135 | remote('https://github.com/nalbam/sample-tomcat.git') 136 | } 137 | } 138 | orphanedItemStrategy { 139 | discardOldItems { 140 | numToKeep(15) 141 | } 142 | } 143 | } 144 | 145 | # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/demos/keycloak/README.md 146 | securityRealm: |- 147 | keycloak 148 | 149 | # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/demos/role-strategy-auth/README.md 150 | authorizationStrategy: |- 151 | roleBased: 152 | roles: 153 | global: 154 | - name: "admin" 155 | description: "Administrators" 156 | permissions: 157 | - "Overall/Administer" 158 | assignments: 159 | - "admin" 160 | - "me@nalbam.com" 161 | - name: "readonly" 162 | description: "Read-only users" 163 | permissions: 164 | - "Overall/Read" 165 | - "Job/Read" 166 | assignments: 167 | - "authenticated" 168 | items: 169 | - name: "sample" 170 | description: "Sample projects" 171 | pattern: "sample-.*" 172 | permissions: 173 | - "Job/Configure" 174 | - "Job/Build" 175 | - "Job/Delete" 176 | assignments: 177 | - "user1" 178 | - "user2" 179 | 180 | persistence: 181 | enabled: true 182 | storageClass: default 183 | size: 20Gi 184 | 185 | rbac: 186 | create: true 187 | readSecrets: true 188 | 189 | serviceAccount: 190 | create: true 191 | 192 | serviceAccountAgent: 193 | create: true 194 | -------------------------------------------------------------------------------- /eks-charts/values/jenkins/secret/jenkins.txt: -------------------------------------------------------------------------------- 1 | hello jenkins world 2 | -------------------------------------------------------------------------------- /eks-charts/values/jenkins/secret/secret.txt: -------------------------------------------------------------------------------- 1 | hello secret file 2 | -------------------------------------------------------------------------------- /eks-charts/values/keycloak/keycloak.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: keycloak 2 | 3 | keycloak: 4 | replicas: 1 5 | 6 | username: "admin" 7 | password: "password" 8 | 9 | cli: 10 | enabled: false 11 | 12 | ingress: 13 | enabled: true 14 | annotations: 15 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 16 | kubernetes.io/ingress.class: nginx 17 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 18 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 19 | hosts: 20 | - "keycloak.demo.mzdev.be" 21 | tls: 22 | - secretName: keycloak-tls 23 | hosts: 24 | - "keycloak.demo.mzdev.be" 25 | 26 | # podAnnotations: 27 | # cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 28 | 29 | extraEnv: | 30 | - name: PROXY_ADDRESS_FORWARDING 31 | value: "true" 32 | 33 | extraVolumes: | 34 | - name: realm-demo-secret 35 | secret: 36 | secretName: realm-demo-secret 37 | 38 | extraVolumeMounts: | 39 | - name: realm-demo-secret 40 | mountPath: "/realm/" 41 | readOnly: true 42 | 43 | extraArgs: -Dkeycloak.import=/realm/demo.json 44 | 45 | persistence: 46 | deployPostgres: true 47 | dbVendor: postgres 48 | dbHost: keycloak-postgresql 49 | dbPort: 5432 50 | dbUser: keycloak 51 | dbPassword: password 52 | 53 | # https://hub.helm.sh/charts/bitnami/postgresql 54 | postgresql: 55 | enabled: true 56 | postgresqlUsername: keycloak 57 | postgresqlPassword: password 58 | replication: 59 | enabled: false 60 | persistence: 61 | enabled: true 62 | storageClass: default 63 | size: 10Gi 64 | 65 | prometheus: 66 | operator: 67 | enabled: true 68 | 69 | serviceMonitor: 70 | selector: 71 | release: prometheus-operator 72 | 73 | prometheusRules: 74 | ## Add Prometheus Rules? 75 | enabled: false 76 | 77 | ## Additional labels to add to the PrometheusRule so it is picked up by the operator. 78 | ## If using the [Helm Chart](https://github.com/helm/charts/tree/master/stable/prometheus-operator) this is the name of the Helm release and 'app: prometheus-operator' 79 | selector: 80 | app: prometheus-operator 81 | release: prometheus 82 | 83 | ## Some example rules. 84 | rules: {} 85 | # - alert: keycloak-IngressHigh5xxRate 86 | # annotations: 87 | # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 1%. 88 | # expr: (sum(rate(nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m]))/sum(rate(nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m])))*100 > 1 89 | # for: 5m 90 | # labels: 91 | # severity: warning 92 | # - alert: keycloak-IngressHigh5xxRate 93 | # annotations: 94 | # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 5%. 95 | # expr: (sum(rate(nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m]))/sum(rate(nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m])))*100 > 5 96 | # for: 5m 97 | # labels: 98 | # severity: critical 99 | -------------------------------------------------------------------------------- /eks-charts/values/kube-ingress/cert-manager-issuers.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - apiVersion: cert-manager.io/v1alpha2 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-issuer 6 | spec: 7 | acme: 8 | # Email address used for ACME registration 9 | email: me@nalbam.com 10 | # Name of a secret used to store the ACME account private key 11 | privateKeySecretRef: 12 | name: letsencrypt-issuer 13 | # The ACME server URL 14 | # server: https://acme-v02.api.letsencrypt.org/directory 15 | server: https://acme-staging-v02.api.letsencrypt.org/directory 16 | solvers: 17 | # An empty 'selector' means that this solver matches all domains 18 | - selector: {} 19 | http01: 20 | ingress: 21 | class: nginx 22 | -------------------------------------------------------------------------------- /eks-charts/values/kube-ingress/cert-manager.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: cert-manager 2 | 3 | installCRDs: true 4 | 5 | ingressShim: 6 | defaultIssuerName: letsencrypt-issuer 7 | defaultIssuerKind: ClusterIssuer 8 | 9 | prometheus: 10 | enabled: true 11 | servicemonitor: 12 | enabled: true 13 | prometheusInstance: default 14 | labels: 15 | release: prometheus-operator 16 | -------------------------------------------------------------------------------- /eks-charts/values/kube-ingress/external-dns.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: external-dns 2 | 3 | sources: 4 | - service 5 | # - ingress 6 | 7 | # affinity: 8 | # nodeAffinity: 9 | # requiredDuringSchedulingIgnoredDuringExecution: 10 | # nodeSelectorTerms: 11 | # - matchExpressions: 12 | # - key: node.kubernetes.io/role 13 | # operator: In 14 | # values: 15 | # - ops 16 | 17 | podAnnotations: 18 | iam.amazonaws.com/role: "eks-demo-worker-route53" 19 | 20 | rbac: 21 | create: true 22 | -------------------------------------------------------------------------------- /eks-charts/values/kube-ingress/nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: nginx-ingress 2 | 3 | controller: 4 | kind: DaemonSet 5 | config: 6 | proxy-protocol: "true" 7 | real-ip-header: "proxy_protocol" 8 | set-real-ip-from: "0.0.0.0/0" 9 | use-forwarded-headers: "true" 10 | service: 11 | annotations: 12 | external-dns.alpha.kubernetes.io/hostname: "" 13 | external-dns.alpha.kubernetes.io/ttl: "300" 14 | service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" 15 | service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" 16 | service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" 17 | service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "" 18 | service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" 19 | # service.beta.kubernetes.io/aws-load-balancer-type: "nlb" 20 | targetPorts: 21 | http: http 22 | https: http 23 | stats: 24 | enabled: true 25 | metrics: 26 | enabled: true 27 | serviceMonitor: 28 | enabled: true 29 | additionalLabels: 30 | release: prometheus-operator 31 | -------------------------------------------------------------------------------- /eks-charts/values/kube-system/cluster-autoscaler.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: cluster-autoscaler 2 | 3 | autoDiscovery: 4 | enabled: true 5 | clusterName: "eks-demo" 6 | 7 | awsRegion: "ap-northeast-2" 8 | 9 | extraArgs: 10 | v: 4 11 | logtostderr: true 12 | scale-down-enabled: true 13 | scale-down-utilization-threshold: 0.6 14 | skip-nodes-with-local-storage: false 15 | skip-nodes-with-system-pods: false 16 | 17 | # affinity: 18 | # nodeAffinity: 19 | # requiredDuringSchedulingIgnoredDuringExecution: 20 | # nodeSelectorTerms: 21 | # - matchExpressions: 22 | # - key: node.kubernetes.io/role 23 | # operator: In 24 | # values: 25 | # - ops 26 | 27 | podAnnotations: 28 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 29 | iam.amazonaws.com/role: "eks-demo-worker-asg" 30 | 31 | sslCertPath: /etc/ssl/certs/ca-bundle.crt 32 | 33 | rbac: 34 | create: true 35 | pspEnabled: true 36 | -------------------------------------------------------------------------------- /eks-charts/values/kube-system/efs-provisioner.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: efs-provisioner 2 | 3 | podAnnotations: 4 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 5 | iam.amazonaws.com/role: "eks-demo-worker-efs" 6 | 7 | efsProvisioner: 8 | efsFileSystemId: "" 9 | awsRegion: ap-northeast-2 10 | path: /shared 11 | provisionerName: aws-efs 12 | storageClass: 13 | name: efs 14 | isDefault: false 15 | gidAllocate: 16 | enabled: true 17 | gidMin: 40000 18 | gidMax: 50000 19 | reclaimPolicy: Delete 20 | -------------------------------------------------------------------------------- /eks-charts/values/kube-system/k8s-spot-termination-handler.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: k8s-spot-termination-handler 2 | 3 | clusterName: "eks-demo" 4 | 5 | podAnnotations: 6 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 7 | iam.amazonaws.com/role: "eks-demo-worker-asg" 8 | 9 | detachAsg: false 10 | 11 | slackUrl: "" 12 | -------------------------------------------------------------------------------- /eks-charts/values/kube-system/kube-state-metrics.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: kube-state-metrics 2 | -------------------------------------------------------------------------------- /eks-charts/values/kube-system/kube2iam.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: kube2iam 2 | 3 | awsRegion: "ap-northeast-2" 4 | 5 | extraArgs: 6 | auto-discover-base-arn: true 7 | auto-discover-default-role: true 8 | # base-role-arn: BASE_ROLE_ARN 9 | # default-role: DEFAULT_ROLE 10 | 11 | host: 12 | iptables: true 13 | interface: eni+ 14 | 15 | # prometheus: 16 | # service: 17 | # enabled: true 18 | # serviceMonitor: 19 | # enabled: true 20 | # labels: 21 | # release: prometheus-operator 22 | 23 | rbac: 24 | create: true 25 | -------------------------------------------------------------------------------- /eks-charts/values/kube-system/metrics-server.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: metrics-server 2 | 3 | args: 4 | - --kubelet-insecure-tls 5 | - --kubelet-preferred-address-types=InternalIP 6 | 7 | # affinity: 8 | # nodeAffinity: 9 | # requiredDuringSchedulingIgnoredDuringExecution: 10 | # nodeSelectorTerms: 11 | # - matchExpressions: 12 | # - key: node.kubernetes.io/role 13 | # operator: In 14 | # values: 15 | # - ops 16 | 17 | podAnnotations: 18 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 19 | -------------------------------------------------------------------------------- /eks-charts/values/logging/fluentd-elasticsearch.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: fluentd-elasticsearch 2 | 3 | elasticsearch: 4 | auth: 5 | enabled: false 6 | user: "username" 7 | password: "password" 8 | includeTagKey: true 9 | setOutputHostEnvVar: true 10 | # If setOutputHostEnvVar is false this value is ignored 11 | hosts: ["elasticsearch-client:9200"] 12 | -------------------------------------------------------------------------------- /eks-charts/values/monitor/datadog.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: datadog 2 | 3 | datadog: 4 | clusterName: # CLUSTER_NAME 5 | 6 | apiKey: DATADOG_API_KEY 7 | apiKeyExistingSecret: # DATADOG_API_KEY_SECRET 8 | 9 | appKey: DATADOG_APP_KEY 10 | appKeyExistingSecret: # DATADOG_APP_KEY_SECRET 11 | 12 | kubeStateMetricsEnabled: false 13 | 14 | apmEnabled: true 15 | nonLocalTraffic: true 16 | 17 | daemonset: 18 | useHostPort: true 19 | 20 | env: 21 | - name: DD_AGENT_HOST 22 | valueFrom: 23 | fieldRef: 24 | fieldPath: status.hostIP 25 | -------------------------------------------------------------------------------- /eks-charts/values/monitor/grafana.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: grafana 2 | 3 | adminUser: "admin" 4 | adminPassword: "password" 5 | 6 | # podAnnotations: 7 | # cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 8 | 9 | ingress: 10 | enabled: true 11 | annotations: 12 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 13 | kubernetes.io/ingress.class: nginx 14 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 15 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 16 | hosts: 17 | - "grafana.demo.mzdev.be" 18 | tls: 19 | - secretName: grafana-tls 20 | hosts: 21 | - "grafana.demo.mzdev.be" 22 | 23 | grafana.ini: 24 | server: 25 | root_url: "https://grafana.demo.mzdev.be/" 26 | auth: 27 | disable_login_form: true 28 | auth.generic_oauth: 29 | enabled: true 30 | client_id: "grafana" 31 | client_secret: "df7d395f-e833-49b6-b19c-eea8a54fb06a" 32 | auth_url: "https://keycloak.demo.mzdev.be/auth/realms/demo/protocol/openid-connect/auth" 33 | token_url: "https://keycloak.demo.mzdev.be/auth/realms/demo/protocol/openid-connect/token" 34 | api_url: "https://keycloak.demo.mzdev.be/auth/realms/demo/protocol/openid-connect/userinfo" 35 | scopes: "openid email profile roles" 36 | allow_sign_up: "true" 37 | 38 | sidecar: 39 | dashboards: 40 | enabled: true 41 | 42 | persistence: 43 | enabled: true 44 | storageClassName: default 45 | size: 10Gi 46 | 47 | datasources: 48 | datasources.yaml: 49 | apiVersion: 1 50 | datasources: 51 | - name: Prometheus 52 | type: prometheus 53 | url: "http://prometheus-operator-prometheus:9090" 54 | access: proxy 55 | isDefault: true 56 | 57 | dashboardProviders: 58 | dashboardproviders.yaml: 59 | apiVersion: 1 60 | providers: 61 | - name: default 62 | orgId: 1 63 | folder: "" 64 | type: file 65 | disableDeletion: false 66 | editable: true 67 | options: 68 | path: /var/lib/grafana/dashboards/default 69 | 70 | dashboards: 71 | default: 72 | # https://grafana.com/dashboards/ 73 | kube-cluster: 74 | gnetId: 9797 75 | revision: 13 76 | datasource: Prometheus 77 | kube-deployment: 78 | gnetId: 9679 79 | revision: 8 80 | datasource: Prometheus 81 | jenkins-overview: 82 | gnetId: 12444 83 | revision: 1 84 | datasource: Prometheus 85 | jvm-overview: 86 | gnetId: 11526 87 | revision: 8 88 | datasource: Prometheus 89 | 90 | node-exporter: 91 | gnetId: 11074 92 | revision: 2 93 | datasource: Prometheus 94 | nodejs-application: 95 | gnetId: 11159 96 | revision: 1 97 | datasource: Prometheus 98 | 99 | nginx-ingress: 100 | url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json 101 | datasource: Prometheus 102 | argo-cd: 103 | url: https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json 104 | datasource: Prometheus 105 | argo-rollouts: 106 | url: https://raw.githubusercontent.com/argoproj/argo-rollouts/master/examples/dashboard.json 107 | datasource: Prometheus 108 | -------------------------------------------------------------------------------- /eks-charts/values/monitor/prometheus-adapter.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: prometheus-adapter 2 | 3 | prometheus: 4 | url: "http://prometheus-operator-prometheus.monitor.svc" 5 | port: 9090 6 | 7 | rules: 8 | default: true 9 | custom: 10 | - seriesQuery: 'container_network_receive_bytes_total{namespace!="",pod!=""}' 11 | resources: 12 | overrides: 13 | namespace: { resource: "namespace" } 14 | pod: { resource: "pod" } 15 | name: 16 | matches: "^(.*)_total" 17 | as: "${1}" 18 | metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>}[2m])) by (<<.GroupBy>>)" 19 | -------------------------------------------------------------------------------- /eks-charts/values/monitor/prometheus-alert-rules.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | labels: 6 | role: alert-rules 7 | name: prometheus-alert-rules 8 | spec: 9 | groups: 10 | - name: InstanceCountChanged 11 | rules: 12 | - alert: InstanceCountChanged 13 | expr: count(kube_node_labels{node=~"^.*$"}) - count(kube_node_labels{node=~"^.*$"} offset 2m) != 0 14 | labels: 15 | severity: Warning 16 | cluster: eks-demo 17 | annotations: 18 | summary: "Instance Count Changed" 19 | description: "The number of instances has changed. (delta: {{ $value }})" 20 | 21 | - name: InstanceDown 22 | rules: 23 | - alert: InstanceDown 24 | expr: up{job="kubernetes-nodes"} == 0 25 | labels: 26 | severity: Warning 27 | cluster: eks-demo 28 | annotations: 29 | summary: "Instance Down" 30 | description: "The instance({{ $labels.instance }}) is down." 31 | 32 | - name: HighCpuUsage 33 | rules: 34 | - alert: HighCpuUsage 35 | expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="kubernetes-service-endpoints",mode="idle"}[5m])) * 100) > 70 36 | for: 5m 37 | labels: 38 | severity: Warning 39 | cluster: eks-demo 40 | annotations: 41 | summary: "High CPU Usage(> 70%)" 42 | description: "The CPU usage of the instance({{ $labels.instance }}) has exceeded 70 percent for more than 5 minutes." 43 | 44 | - name: HighMemoryUsage 45 | rules: 46 | - alert: HighMemoryUsage 47 | expr: (node_memory_MemTotal_bytes - node_memory_MemFree_bytes - node_memory_Buffers_bytes - node_memory_Cached_bytes) / node_memory_MemTotal_bytes * 100 > 90 48 | for: 5m 49 | labels: 50 | severity: Warning 51 | cluster: eks-demo 52 | annotations: 53 | summary: "High Memory Usage(> 90%)" 54 | description: "The memory usage of the instance({{ $labels.instance }}) has exceeds 90 percent for more than 5 minutes." 55 | 56 | - name: PodCrashingLooping 57 | rules: 58 | - alert: PodCrashingLooping 59 | expr: round(increase(kube_pod_container_status_restarts_total[30m])) > 0 60 | for: 5m 61 | labels: 62 | severity: Critical 63 | cluster: eks-demo 64 | annotations: 65 | summary: "Pod Crash Looping(> 30m)" 66 | description: "Namespace : {{ $labels.namespace }} Pod : {{ $labels.pod }} -- crash {{ $value }} times" 67 | 68 | - name: KubeNodeNotReady 69 | rules: 70 | - alert: KubeNodeNotReady 71 | expr: kube_node_status_condition{job="kubernetes-service-endpoints",condition="Ready",status="true"} == 0 72 | for: 5m 73 | labels: 74 | severity: Critical 75 | cluster: eks-demo 76 | annotations: 77 | summary: "Kube Node Fail : {{ $labels.condition }}" 78 | description: "Node {{ $labels.node }} is failed. Check node!!" 79 | 80 | - name: AvgResponseTime 81 | rules: 82 | - alert: AvgResponseTime 83 | expr: (sum(rate(nginx_ingress_controller_response_duration_seconds_sum[5m])) by (host) != 0) / (sum(rate(nginx_ingress_controller_response_duration_seconds_count[5m])) by (host) != 0) > 5 84 | for: 5m 85 | labels: 86 | severity: Warning 87 | cluster: eks-demo 88 | annotations: 89 | summary: "Average Response Time(> 5s)" 90 | description: "{{ $labels.host }}'s Average Response Time is over 5sec." 91 | 92 | - name: HPAMaxUsage 93 | rules: 94 | - alert: HPAMaxUsage 95 | expr: (kube_hpa_status_current_replicas) / (kube_hpa_spec_max_replicas != 1) == 1 96 | for: 5m 97 | labels: 98 | severity: Warning 99 | cluster: eks-demo 100 | annotations: 101 | summary: "HPA Max Usage" 102 | description: "{{ $labels.hpa }} is using HPA Max." 103 | -------------------------------------------------------------------------------- /eks-charts/values/monitor/prometheus-operator.yaml: -------------------------------------------------------------------------------- 1 | fullnameOverride: prometheus-operator 2 | 3 | prometheusOperator: 4 | createCustomResource: false 5 | 6 | prometheus: 7 | prometheusSpec: 8 | scrapeInterval: 30s 9 | 10 | storageSpec: 11 | volumeClaimTemplate: 12 | spec: 13 | storageClassName: default 14 | accessModes: ["ReadWriteOnce"] 15 | resources: 16 | requests: 17 | storage: 50Gi 18 | 19 | ruleSelector: 20 | matchLabels: 21 | role: alert-rules 22 | 23 | grafana: 24 | enabled: false 25 | 26 | kubeStateMetrics: 27 | enabled: true 28 | 29 | alertmanager: 30 | enabled: true 31 | 32 | config: 33 | global: 34 | resolve_timeout: 5m 35 | slack_api_url: "" 36 | route: 37 | # group_by: ["job"] 38 | group_wait: 30s 39 | group_interval: 5m 40 | repeat_interval: 12h 41 | # receiver: "slack" 42 | routes: 43 | - match: 44 | alertname: Watchdog 45 | receiver: "null" 46 | - match: 47 | receiver: "slack" 48 | continue: false 49 | receivers: 50 | - name: "null" 51 | - name: "slack" 52 | slack_configs: 53 | - channel: "#kube-alerts" 54 | send_resolved: false 55 | color: '{{ if eq .Status "firing" }}danger{{ else }}good{{ end }}' 56 | title: '[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] Monitoring Event Notification' 57 | text: >- 58 | {{ range .Alerts }} 59 | *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` 60 | *Description:* `{{ .Annotations.description }}` 61 | *Details:* 62 | {{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }} 63 | {{ end }} 64 | {{ end }} 65 | -------------------------------------------------------------------------------- /eks-charts/values/monitor/prometheus.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: prometheus 2 | 3 | server: 4 | enabled: true 5 | global: 6 | scrape_interval: 30s 7 | persistentVolume: 8 | enabled: true 9 | storageClass: default 10 | size: 8Gi 11 | podAnnotations: 12 | cluster-autoscaler.kubernetes.io/safe-to-evict: "false" 13 | 14 | alertmanager: 15 | enabled: false 16 | persistentVolume: 17 | enabled: true 18 | storageClass: default 19 | size: 2Gi 20 | 21 | podSecurityPolicy: 22 | enabled: true 23 | 24 | kubeStateMetrics: 25 | enabled: false 26 | 27 | serverFiles: 28 | ## Alerts configuration 29 | ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ 30 | alerts: 31 | groups: 32 | - name: InstanceCountChanged 33 | rules: 34 | - alert: InstanceCountChanged 35 | expr: count(kube_node_labels{node=~"^.*$"}) - count(kube_node_labels{node=~"^.*$"} offset 2m) != 0 36 | labels: 37 | severity: Warning 38 | cluster: eks-demo 39 | annotations: 40 | summary: "Instance Count Changed" 41 | description: "The number of instances has changed. (delta: {{ $value }})" 42 | 43 | - name: InstanceDown 44 | rules: 45 | - alert: InstanceDown 46 | expr: up{job="kubernetes-nodes"} == 0 47 | labels: 48 | severity: Warning 49 | cluster: eks-demo 50 | annotations: 51 | summary: "Instance Down" 52 | description: "The instance({{ $labels.instance }}) is down." 53 | 54 | - name: HighCpuUsage 55 | rules: 56 | - alert: HighCpuUsage 57 | expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="kubernetes-service-endpoints",mode="idle"}[5m])) * 100) > 70 58 | for: 5m 59 | labels: 60 | severity: Warning 61 | cluster: eks-demo 62 | annotations: 63 | summary: "High CPU Usage(> 70%)" 64 | description: "The CPU usage of the instance({{ $labels.instance }}) has exceeded 70 percent for more than 5 minutes." 65 | 66 | - name: HighMemoryUsage 67 | rules: 68 | - alert: HighMemoryUsage 69 | expr: (node_memory_MemTotal_bytes - node_memory_MemFree_bytes - node_memory_Buffers_bytes - node_memory_Cached_bytes) / node_memory_MemTotal_bytes * 100 > 90 70 | for: 5m 71 | labels: 72 | severity: Warning 73 | cluster: eks-demo 74 | annotations: 75 | summary: "High Memory Usage(> 90%)" 76 | description: "The memory usage of the instance({{ $labels.instance }}) has exceeds 90 percent for more than 5 minutes." 77 | 78 | - name: PodCrashingLooping 79 | rules: 80 | - alert: PodCrashingLooping 81 | expr: round(increase(kube_pod_container_status_restarts_total[30m])) > 0 82 | for: 5m 83 | labels: 84 | severity: Critical 85 | cluster: eks-demo 86 | annotations: 87 | summary: "Pod Crash Looping(> 30m)" 88 | description: "Namespace : {{ $labels.namespace }} Pod : {{ $labels.pod }} -- crash {{ $value }} times" 89 | 90 | - name: KubeNodeNotReady 91 | rules: 92 | - alert: KubeNodeNotReady 93 | expr: kube_node_status_condition{job="kubernetes-service-endpoints",condition="Ready",status="true"} == 0 94 | for: 5m 95 | labels: 96 | severity: Critical 97 | cluster: eks-demo 98 | annotations: 99 | summary: "Kube Node Fail : {{ $labels.condition }}" 100 | description: "Node {{ $labels.node }} is failed. Check node!!" 101 | 102 | - name: AvgResponseTime 103 | rules: 104 | - alert: AvgResponseTime 105 | expr: (sum(rate(nginx_ingress_controller_response_duration_seconds_sum[5m])) by (host) !=0) / (sum(rate(nginx_ingress_controller_response_duration_seconds_count[5m])) by (host) !=0) > 5 106 | for: 5m 107 | labels: 108 | severity: Warning 109 | cluster: eks-demo 110 | annotations: 111 | summary: "Average Response Time(> 5s)" 112 | description: "{{ $labels.host }}'s Average Response Time is over 5sec." 113 | 114 | - name: HPAMaxUsage 115 | rules: 116 | - alert: HPAMaxUsage 117 | expr: (kube_hpa_status_current_replicas) / (kube_hpa_spec_max_replicas != 1) == 1 118 | for: 5m 119 | labels: 120 | severity: Warning 121 | cluster: eks-demo 122 | annotations: 123 | summary: "HPA Max Usage" 124 | description: "{{ $labels.hpa }} is using HPA Max." 125 | 126 | alertmanagerFiles: 127 | alertmanager.yml: 128 | global: 129 | slack_api_url: "" 130 | 131 | route: 132 | group_wait: 30s 133 | group_interval: 5m 134 | repeat_interval: 12h 135 | receiver: slack 136 | 137 | receivers: 138 | - name: slack 139 | slack_configs: 140 | - channel: "#kube-alerts" 141 | send_resolved: true 142 | username: '{{ template "slack.default.username" . }}' 143 | color: '{{ if eq .Status "firing" }}danger{{ else }}good{{ end }}' 144 | title: '{{ template "slack.default.title" . }}' 145 | title_link: '{{ template "slack.default.titlelink" . }}' 146 | pretext: "{{ .CommonAnnotations.summary }}" 147 | text: |- 148 | {{ range .Alerts }} 149 | *Cluster:* {{ .Labels.cluster }} 150 | *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` 151 | *Description:* `{{ .Annotations.description }}` 152 | *Details:* 153 | {{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }} 154 | {{ end }} 155 | {{ end }} 156 | fallback: '{{ template "slack.default.fallback" . }}' 157 | icon_emoji: '{{ template "slack.default.iconemoji" . }}' 158 | icon_url: '{{ template "slack.default.iconurl" }}' 159 | -------------------------------------------------------------------------------- /eks-charts/values/weave/weave-scope-gatekeeper.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: weave-scope-gatekeeper 2 | 3 | discoveryURL: https://keycloak.demo.mzdev.be/auth/realms/demo 4 | 5 | upstreamURL: http://weave-scope-weave-scope.weave.svc.cluster.local:80 6 | 7 | ClientID: weave-scope 8 | ClientSecret: 5b93b5c3-2337-4002-962a-c7770c770024 9 | 10 | ingress: 11 | enabled: true 12 | annotations: 13 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 14 | kubernetes.io/ingress.class: nginx 15 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 16 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 17 | hosts: 18 | - "weave-scope.demo.mzdev.be" 19 | tls: 20 | - secretName: weave-scope-gatekeeper-tls 21 | hosts: 22 | - "weave-scope.demo.mzdev.be" 23 | -------------------------------------------------------------------------------- /eks-charts/values/weave/weave-scope.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: weave-scope 2 | 3 | weave-scope-frontend: 4 | ingress: 5 | enabled: false 6 | annotations: 7 | cert-manager.io/cluster-issuer: "letsencrypt-issuer" 8 | kubernetes.io/ingress.class: nginx 9 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 10 | nginx.ingress.kubernetes.io/whitelist-source-range: "0.0.0.0/0" 11 | paths: 12 | - / 13 | hosts: 14 | - weave-scope.demo.mzdev.be 15 | tls: 16 | - secretName: weave-scope-tls 17 | hosts: 18 | - weave-scope.demo.mzdev.be 19 | -------------------------------------------------------------------------------- /eks/00-variable.tf: -------------------------------------------------------------------------------- 1 | # variable 2 | 3 | variable "region" { 4 | description = "생성될 리전을 입력 합니다. e.g: ap-northeast-2" 5 | default = "ap-northeast-2" 6 | } 7 | 8 | variable "name" { 9 | description = "EKS Cluster 이름을 입력합니다." 10 | default = "eks-demo" 11 | } 12 | 13 | variable "kubernetes_version" { 14 | description = "쿠버네티스 버전을 입력합니다." 15 | default = "1.16" 16 | } 17 | 18 | variable "cluster_log_types" { 19 | description = "CloudWatch 로그를 설정 합니다." 20 | default = ["api", "audit", "authenticator", "controllerManager", "scheduler"] 21 | # api, audit, authenticator, controllerManager, scheduler 22 | } 23 | 24 | variable "cluster_log_retention_in_days" { 25 | description = "로그 저장 기간을 입력 합니다." 26 | default = 14 27 | } 28 | 29 | variable "allow_ip_address" { 30 | description = "접속 허용 IP 목록을 입력 합니다." 31 | default = [ 32 | "10.10.1.0/24", # bastion 33 | # "211.60.50.190/32", # echo "$(curl -sL icanhazip.com)/32" 34 | ] 35 | } 36 | 37 | variable "launch_configuration_enable" { 38 | description = "Launch Configuration 을 생성 할지 선택 합니다." 39 | default = false 40 | } 41 | 42 | variable "launch_template_enable" { 43 | description = "Launch Template 을 생성 할지 선택 합니다." 44 | default = true 45 | } 46 | 47 | variable "launch_each_subnet" { 48 | description = "모든 Subnet 에 생성 할지 선택 합니다." 49 | default = false 50 | } 51 | 52 | variable "associate_public_ip_address" { 53 | description = "공개 IP 를 생성 할지 선택 합니다." 54 | default = false 55 | } 56 | 57 | variable "instance_type" { 58 | description = "워커 노드 인스턴스 타입" 59 | default = "m5.large" 60 | } 61 | 62 | variable "mixed_instances" { 63 | description = "워커 노드 추가 인스턴스 타입 목록" 64 | default = ["c5.large", "r5.large"] 65 | } 66 | 67 | variable "volume_type" { 68 | description = "워커 노드 볼륨 타입" 69 | default = "gp2" 70 | } 71 | 72 | variable "volume_size" { 73 | description = "워커 노드 볼륨 사이즈" 74 | default = "32" 75 | } 76 | 77 | variable "min" { 78 | description = "워커 노드 오토스케일링그룹 최소값" 79 | default = "2" 80 | } 81 | 82 | variable "max" { 83 | description = "워커 노드 오토스케일링그룹 최대값" 84 | default = "6" 85 | } 86 | 87 | variable "on_demand_base" { 88 | default = "0" 89 | } 90 | 91 | variable "on_demand_rate" { 92 | default = "0" 93 | } 94 | 95 | variable "key_name" { 96 | description = "키페어 이름을 입력 합니다." 97 | default = "nalbam-seoul" 98 | } 99 | 100 | variable "key_path" { 101 | description = "Local 의 키를 사용한다면 경로를 입력 합니다." 102 | default = "" 103 | } 104 | 105 | variable "buckets" { 106 | description = "S3 Bucket 을 생성 한다면 목록으로 입력 합니다." 107 | default = [ 108 | "argo", 109 | "chartmuseum", 110 | "maven-repo", 111 | "registry", 112 | ] 113 | } 114 | 115 | variable "launch_efs_enable" { 116 | description = "EFS 스토리지를 생성 여부를 선택 합니다." 117 | default = true 118 | } 119 | 120 | variable "root_domain" { 121 | default = "mzdev.be" 122 | } 123 | 124 | variable "base_domain" { 125 | default = "demo.mzdev.be" 126 | } 127 | -------------------------------------------------------------------------------- /eks/02-data.tf: -------------------------------------------------------------------------------- 1 | # data 2 | 3 | data "aws_caller_identity" "current" { 4 | } 5 | -------------------------------------------------------------------------------- /eks/03-locals.tf: -------------------------------------------------------------------------------- 1 | # locals 2 | 3 | locals { 4 | account_id = data.aws_caller_identity.current.account_id 5 | } 6 | 7 | locals { 8 | name = "${var.name}" 9 | 10 | worker = "${local.name}-worker" 11 | 12 | workers = [ 13 | "arn:aws:iam::${local.account_id}:role/${local.worker}", 14 | ] 15 | 16 | map_roles = [ 17 | { 18 | rolearn = "arn:aws:iam::${local.account_id}:role/dev-bastion" 19 | username = "iam-role-eks-bastion" 20 | groups = ["system:masters"] 21 | }, 22 | ] 23 | 24 | map_users = [ 25 | { 26 | userarn = "arn:aws:iam::${local.account_id}:user/jungyoul.yu" 27 | username = "jungyoul.yu" 28 | groups = ["system:masters"] 29 | }, 30 | { 31 | userarn = "arn:aws:iam::${local.account_id}:user/developer" 32 | username = "developer" 33 | groups = [] 34 | }, 35 | { 36 | userarn = "arn:aws:iam::${local.account_id}:user/readonly" 37 | username = "readonly" 38 | groups = [] 39 | }, 40 | ] 41 | 42 | buckets = flatten([ 43 | for bucket in var.buckets : [ 44 | "${var.name}-${bucket}-${local.account_id}" 45 | ] 46 | ]) 47 | } 48 | -------------------------------------------------------------------------------- /eks/04-backend.tf: -------------------------------------------------------------------------------- 1 | # backend 2 | 3 | terraform { 4 | backend "s3" { 5 | region = "ap-northeast-2" 6 | bucket = "terraform-workshop-mzcdev" 7 | key = "eks-demo.tfstate" 8 | dynamodb_table = "terraform-workshop-mzcdev" 9 | encrypt = true 10 | } 11 | required_version = ">= 0.12" 12 | } 13 | 14 | # terraform { 15 | # backend "remote" { 16 | # organization = "mzcdev" 17 | # workspaces { 18 | # name = "dev-eks-demo" 19 | # } 20 | # } 21 | # } 22 | 23 | data "terraform_remote_state" "vpc" { 24 | backend = "s3" 25 | config = { 26 | region = "ap-northeast-2" 27 | bucket = "terraform-workshop-mzcdev" 28 | key = "vpc-demo.tfstate" 29 | } 30 | } 31 | 32 | # data "terraform_remote_state" "vpc" { 33 | # backend = "remote" 34 | # config = { 35 | # organization = "mzcdev" 36 | # workspaces = { 37 | # name = "dev-vpc-demo" 38 | # } 39 | # } 40 | # } 41 | -------------------------------------------------------------------------------- /eks/05-provider.tf: -------------------------------------------------------------------------------- 1 | # provider 2 | 3 | provider "aws" { 4 | region = var.region 5 | } 6 | -------------------------------------------------------------------------------- /eks/30-eks.tf: -------------------------------------------------------------------------------- 1 | # eks 2 | 3 | module "eks" { 4 | source = "github.com/mzcdev/terraform-aws-eks?ref=v0.12.50" 5 | # source = "../../../terraform-aws-eks" 6 | 7 | region = var.region 8 | name = local.name 9 | 10 | kubernetes_version = var.kubernetes_version 11 | 12 | vpc_id = data.terraform_remote_state.vpc.outputs.vpc_id 13 | subnet_ids = data.terraform_remote_state.vpc.outputs.private_subnet_ids 14 | 15 | cluster_log_types = var.cluster_log_types 16 | cluster_log_retention_in_days = var.cluster_log_retention_in_days 17 | 18 | allow_ip_address = var.allow_ip_address 19 | 20 | workers = local.workers 21 | 22 | map_roles = local.map_roles 23 | map_users = local.map_users 24 | 25 | tags = {} 26 | } 27 | -------------------------------------------------------------------------------- /eks/50-worker.tf: -------------------------------------------------------------------------------- 1 | # worker 2 | 3 | module "worker" { 4 | source = "github.com/mzcdev/terraform-aws-eks-worker?ref=v0.12.33" 5 | # source = "../../terraform-aws-eks-worker" 6 | 7 | region = var.region 8 | name = local.worker 9 | 10 | cluster_name = module.eks.name 11 | cluster_endpoint = module.eks.endpoint 12 | cluster_certificate_authority = module.eks.certificate_authority 13 | cluster_security_group_id = module.eks.security_group_id 14 | kubernetes_version = module.eks.version 15 | 16 | vpc_id = data.terraform_remote_state.vpc.outputs.vpc_id 17 | 18 | subnet_ids = data.terraform_remote_state.vpc.outputs.private_subnet_ids 19 | subnet_azs = data.terraform_remote_state.vpc.outputs.private_subnet_azs 20 | 21 | allow_ip_address = var.allow_ip_address 22 | 23 | launch_configuration_enable = var.launch_configuration_enable 24 | launch_template_enable = var.launch_template_enable 25 | 26 | launch_each_subnet = var.launch_each_subnet 27 | associate_public_ip_address = var.associate_public_ip_address 28 | 29 | instance_type = var.instance_type 30 | mixed_instances = var.mixed_instances 31 | 32 | volume_type = var.volume_type 33 | volume_size = var.volume_size 34 | 35 | min = var.min 36 | max = var.max 37 | 38 | on_demand_base = var.on_demand_base 39 | on_demand_rate = var.on_demand_rate 40 | 41 | key_name = var.key_name 42 | key_path = var.key_path 43 | 44 | tags = {} 45 | } 46 | -------------------------------------------------------------------------------- /eks/70-acm.tf: -------------------------------------------------------------------------------- 1 | # acm 2 | 3 | data "aws_route53_zone" "this" { 4 | count = var.root_domain != "" ? 1 : 0 5 | 6 | name = var.root_domain 7 | } 8 | 9 | resource "aws_acm_certificate" "this" { 10 | count = var.root_domain != "" ? var.base_domain != "" ? 1 : 0 : 0 11 | 12 | domain_name = "*.${var.base_domain}" 13 | 14 | validation_method = "DNS" 15 | } 16 | 17 | resource "aws_route53_record" "this" { 18 | count = var.root_domain != "" ? var.base_domain != "" ? 1 : 0 : 0 19 | 20 | zone_id = data.aws_route53_zone.this[0].id 21 | name = aws_acm_certificate.this[0].domain_validation_options[0].resource_record_name 22 | type = aws_acm_certificate.this[0].domain_validation_options[0].resource_record_type 23 | ttl = 60 24 | 25 | records = [ 26 | aws_acm_certificate.this[0].domain_validation_options[0].resource_record_value, 27 | ] 28 | } 29 | 30 | resource "aws_acm_certificate_validation" "this" { 31 | count = var.root_domain != "" ? var.base_domain != "" ? 1 : 0 : 0 32 | 33 | certificate_arn = aws_acm_certificate.this[0].arn 34 | 35 | validation_record_fqdns = [ 36 | aws_route53_record.this[0].fqdn, 37 | ] 38 | } 39 | 40 | output "acm_root" { 41 | value = var.root_domain 42 | } 43 | 44 | output "acm_base" { 45 | value = var.base_domain 46 | } 47 | 48 | output "acm_arn" { 49 | value = aws_acm_certificate.this.*.arn 50 | } 51 | -------------------------------------------------------------------------------- /eks/70-buckets.tf: -------------------------------------------------------------------------------- 1 | # buckets 2 | 3 | resource "aws_s3_bucket" "this" { 4 | count = length(local.buckets) 5 | 6 | bucket = local.buckets[count.index] 7 | 8 | acl = "private" 9 | 10 | force_destroy = true 11 | 12 | tags = { 13 | "Name" = local.buckets[count.index] 14 | "KubernetesCluster" = local.name 15 | "kubernetes.io/cluster/${local.name}" = "owned" 16 | } 17 | } 18 | 19 | output "bucket_names" { 20 | value = aws_s3_bucket.this.*.bucket 21 | } 22 | -------------------------------------------------------------------------------- /eks/70-efs.tf: -------------------------------------------------------------------------------- 1 | # efs 2 | 3 | resource "aws_efs_file_system" "this" { 4 | count = var.launch_efs_enable ? 1 : 0 5 | 6 | creation_token = local.worker 7 | 8 | tags = { 9 | "Name" = local.worker 10 | "KubernetesCluster" = local.name 11 | "kubernetes.io/cluster/${local.name}" = "owned" 12 | } 13 | } 14 | 15 | resource "aws_efs_mount_target" "this" { 16 | count = var.launch_efs_enable ? length(data.terraform_remote_state.vpc.outputs.private_subnet_ids) : 0 17 | 18 | file_system_id = aws_efs_file_system.this[0].id 19 | 20 | subnet_id = data.terraform_remote_state.vpc.outputs.private_subnet_ids[count.index] 21 | 22 | security_groups = [aws_security_group.worker-efs[0].id] 23 | } 24 | 25 | resource "aws_security_group" "worker-efs" { 26 | count = var.launch_efs_enable ? 1 : 0 27 | 28 | name = "${local.worker}-efs" 29 | description = "Security group for efs in the cluster" 30 | 31 | vpc_id = data.terraform_remote_state.vpc.outputs.vpc_id 32 | 33 | egress { 34 | from_port = 0 35 | to_port = 0 36 | protocol = "-1" 37 | cidr_blocks = ["0.0.0.0/0"] 38 | } 39 | 40 | tags = { 41 | "Name" = "${local.worker}-efs" 42 | "KubernetesCluster" = local.name 43 | "kubernetes.io/cluster/${local.name}" = "owned" 44 | } 45 | } 46 | 47 | resource "aws_security_group_rule" "worker-efs" { 48 | count = var.launch_efs_enable ? 1 : 0 49 | 50 | description = "Allow worker to communicate with efs" 51 | security_group_id = aws_security_group.worker-efs[0].id 52 | source_security_group_id = module.worker.security_group_id 53 | from_port = 2049 54 | to_port = 2049 55 | protocol = "-1" 56 | type = "ingress" 57 | } 58 | 59 | output "efs_ids" { 60 | value = aws_efs_file_system.this.*.id 61 | } 62 | -------------------------------------------------------------------------------- /eks/99-output.tf: -------------------------------------------------------------------------------- 1 | # output 2 | 3 | output "region" { 4 | value = var.region 5 | } 6 | 7 | output "eks_name" { 8 | value = module.eks.name 9 | } 10 | 11 | output "eks_version" { 12 | value = module.eks.version 13 | } 14 | 15 | output "eks_endpoint" { 16 | value = module.eks.endpoint 17 | } 18 | 19 | # output "eks_certificate_authority" { 20 | # value = module.eks.certificate_authority 21 | # } 22 | 23 | # output "eks_token" { 24 | # value = module.eks.token 25 | # } 26 | 27 | output "eks_oidc_issuer" { 28 | value = module.eks.oidc_issuer 29 | } 30 | 31 | # output "eks_oidc_issuer_arn" { 32 | # value = module.eks.oidc_issuer_arn 33 | # } 34 | 35 | output "eks_iam_role_arn" { 36 | value = module.eks.iam_role_arn 37 | } 38 | 39 | # output "eks_iam_role_name" { 40 | # value = module.eks.iam_role_name 41 | # } 42 | 43 | output "eks_security_group_id" { 44 | value = module.eks.security_group_id 45 | } 46 | 47 | output "worker_iam_role_arn" { 48 | value = module.worker.iam_role_arn 49 | } 50 | 51 | # output "worker_iam_role_name" { 52 | # value = module.worker.iam_role_name 53 | # } 54 | 55 | output "worker_security_group_id" { 56 | value = module.worker.security_group_id 57 | } 58 | 59 | # output "update_kubeconfig" { 60 | # value = "aws eks update-kubeconfig --name ${module.eks.name} --alias ${module.eks.name}" 61 | # } 62 | -------------------------------------------------------------------------------- /instance/main.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_instance" "this" { 3 | ami = "ami-0bea7fd38fabe821a" 4 | instance_type = "t2.micro" 5 | 6 | tags = { 7 | Name = var.name 8 | } 9 | } 10 | 11 | output "id" { 12 | value = aws_instance.this.id 13 | } 14 | -------------------------------------------------------------------------------- /lambda/00-variable.tf: -------------------------------------------------------------------------------- 1 | # variable 2 | 3 | variable "region" { 4 | description = "생성될 리전." 5 | type = string 6 | default = "ap-northeast-2" 7 | } 8 | 9 | variable "name" { 10 | description = "서비스 이름." 11 | type = string 12 | default = "lambda-demo" 13 | } 14 | 15 | variable "stage" { 16 | description = "서비스 영역." 17 | type = string 18 | default = "dev" 19 | } 20 | 21 | variable "runtime" { 22 | description = "람다 펑션이 실행될 런타임." 23 | type = string 24 | default = "nodejs10.x" 25 | } 26 | 27 | variable "handler" { 28 | description = "람다 펑션이 실행될 핸들러 이름." 29 | type = string 30 | default = "index.handler" 31 | } 32 | 33 | variable "memory_size" { 34 | description = "람다 펑션이 실행될 메모리 사이즈." 35 | type = string 36 | default = "1024" 37 | } 38 | 39 | variable "timeout" { 40 | description = "람다 펑션의 타임아웃 값." 41 | type = string 42 | default = "5" 43 | } 44 | 45 | variable "s3_bucket" { 46 | description = "배포 패키지가 저장될 버켓 이름." 47 | type = string 48 | default = "terraform-workshop-seoul" 49 | } 50 | 51 | variable "s3_source" { 52 | description = "복사될 배포 패키지가 있는 경로." 53 | type = string 54 | default = "package/lambda.zip" 55 | } 56 | 57 | variable "s3_key" { 58 | description = "배포 패키지가 배포될 경로." 59 | type = string 60 | default = "package/lambda.zip" 61 | } 62 | 63 | variable "env_vars" { 64 | description = "람다 펑션에서 사용될 환경 변수 맵." 65 | type = map(string) 66 | default = { 67 | "PROFILE" = "dev", 68 | } 69 | } 70 | 71 | variable "path_part" { 72 | description = "The last path segment of this API resource." 73 | type = string 74 | default = "{proxy+}" 75 | } 76 | 77 | variable "http_methods" { 78 | description = "The HTTP Methods (HEAD, DELETE, POST, GET, OPTIONS, PUT, PATCH)" 79 | type = list(string) 80 | default = [ 81 | "ANY", 82 | ] 83 | } 84 | 85 | variable "root_domain" { 86 | description = "Route53 에 등록된 도메인 명" 87 | type = string 88 | default = "mzdev.be" 89 | } 90 | 91 | variable "domain_name" { 92 | description = "람다 펑션이 서비스 될 도메인 명" 93 | type = string 94 | default = "demo-api-workshop.mzdev.be" 95 | } 96 | -------------------------------------------------------------------------------- /lambda/02-data.tf: -------------------------------------------------------------------------------- 1 | # data 2 | 3 | data "aws_caller_identity" "current" { 4 | } 5 | -------------------------------------------------------------------------------- /lambda/04-backend.tf: -------------------------------------------------------------------------------- 1 | # backend 2 | 3 | terraform { 4 | backend "s3" { 5 | region = "ap-northeast-2" 6 | bucket = "terraform-workshop-seoul" 7 | key = "lambda-demo.tfstate" 8 | dynamodb_table = "terraform-workshop-seoul" 9 | encrypt = true 10 | } 11 | } 12 | 13 | # terraform { 14 | # backend "remote" { 15 | # organization = "mzcdev" 16 | # workspaces { 17 | # name = "dev-lambda-demo" 18 | # } 19 | # } 20 | # } 21 | -------------------------------------------------------------------------------- /lambda/05-provider.tf: -------------------------------------------------------------------------------- 1 | # provider 2 | 3 | provider "aws" { 4 | region = var.region 5 | } 6 | -------------------------------------------------------------------------------- /lambda/20-api-gateway.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_api_gateway_rest_api" "default" { 3 | name = "${var.stage}-${var.name}" 4 | } 5 | 6 | resource "aws_api_gateway_resource" "default" { 7 | rest_api_id = aws_api_gateway_rest_api.default.id 8 | parent_id = aws_api_gateway_rest_api.default.root_resource_id 9 | path_part = var.path_part # {proxy+} 10 | } 11 | 12 | resource "aws_api_gateway_method" "default" { 13 | count = length(var.http_methods) 14 | 15 | rest_api_id = aws_api_gateway_rest_api.default.id 16 | resource_id = aws_api_gateway_resource.default.id 17 | http_method = element(var.http_methods, count.index) 18 | authorization = "NONE" 19 | 20 | depends_on = [aws_api_gateway_resource.default] 21 | } 22 | 23 | resource "aws_api_gateway_integration" "default" { 24 | count = length(var.http_methods) 25 | 26 | type = "AWS_PROXY" 27 | rest_api_id = aws_api_gateway_rest_api.default.id 28 | resource_id = aws_api_gateway_resource.default.id 29 | http_method = element(var.http_methods, count.index) 30 | uri = aws_lambda_function.default.invoke_arn 31 | 32 | # AWS lambdas can only be invoked with the POST method 33 | integration_http_method = "POST" 34 | 35 | depends_on = [aws_api_gateway_method.default] 36 | } 37 | 38 | resource "aws_api_gateway_deployment" "default" { 39 | rest_api_id = aws_api_gateway_rest_api.default.id 40 | stage_name = var.stage 41 | 42 | depends_on = [aws_api_gateway_integration.default] 43 | } 44 | -------------------------------------------------------------------------------- /lambda/33-iam-role.tf: -------------------------------------------------------------------------------- 1 | 2 | # https://docs.aws.amazon.com/ko_kr/lambda/latest/dg/policy-templates.html 3 | 4 | data "aws_iam_policy_document" "lambda-role" { 5 | statement { 6 | sid = "" 7 | actions = [ 8 | "sts:AssumeRole", 9 | ] 10 | principals { 11 | type = "Service" 12 | identifiers = [ 13 | "lambda.amazonaws.com", 14 | ] 15 | } 16 | effect = "Allow" 17 | } 18 | } 19 | 20 | data "aws_iam_policy_document" "lambda-policy" { 21 | statement { 22 | sid = "" 23 | actions = [ 24 | "lambda:InvokeFunction", 25 | ] 26 | resources = [ 27 | "arn:aws:lambda:*", 28 | ] 29 | effect = "Allow" 30 | } 31 | statement { 32 | sid = "" 33 | actions = [ 34 | "logs:CreateLogGroup", 35 | "logs:CreateLogStream", 36 | "logs:PutLogEvents", 37 | ] 38 | resources = [ 39 | "arn:aws:logs:*", 40 | ] 41 | effect = "Allow" 42 | } 43 | statement { 44 | sid = "" 45 | actions = [ 46 | "s3:GetObject", 47 | "s3:PutObject", 48 | "s3:PutObjectAcl", 49 | "s3:PutObjectTagging", 50 | "s3:PutObjectVersionAcl", 51 | "s3:PutObjectVersionTagging", 52 | "s3:DeleteObject", 53 | ] 54 | resources = [ 55 | "arn:aws:s3:::*", 56 | ] 57 | effect = "Allow" 58 | } 59 | statement { 60 | sid = "" 61 | actions = [ 62 | "dynamodb:DeleteItem", 63 | "dynamodb:GetItem", 64 | "dynamodb:PutItem", 65 | "dynamodb:Scan", 66 | "dynamodb:UpdateItem", 67 | ] 68 | resources = [ 69 | "arn:aws:dynamodb:*", 70 | ] 71 | effect = "Allow" 72 | } 73 | } 74 | 75 | resource "aws_iam_role" "default" { 76 | name = "terraform-${var.stage}-${var.name}-lambda-role-${var.region}" 77 | assume_role_policy = data.aws_iam_policy_document.lambda-role.json 78 | } 79 | 80 | resource "aws_iam_role_policy" "default" { 81 | name = "terraform-${var.stage}-${var.name}-lambda-policy-${var.region}" 82 | role = aws_iam_role.default.id 83 | policy = data.aws_iam_policy_document.lambda-policy.json 84 | } 85 | -------------------------------------------------------------------------------- /lambda/35-lambda.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_s3_bucket_object" "default" { 3 | bucket = var.s3_bucket 4 | source = var.s3_source 5 | key = var.s3_key 6 | } 7 | 8 | resource "aws_lambda_function" "default" { 9 | function_name = "${var.stage}-${var.name}" 10 | 11 | s3_bucket = var.s3_bucket 12 | s3_key = var.s3_key 13 | 14 | runtime = var.runtime 15 | handler = var.handler 16 | 17 | memory_size = var.memory_size 18 | timeout = var.timeout 19 | 20 | role = aws_iam_role.default.arn 21 | 22 | depends_on = [ 23 | aws_iam_role.default, 24 | aws_iam_role_policy.default, 25 | aws_s3_bucket_object.default, 26 | ] 27 | 28 | environment { 29 | variables = var.env_vars 30 | } 31 | } 32 | 33 | resource "aws_lambda_permission" "default" { 34 | action = "lambda:InvokeFunction" 35 | function_name = aws_lambda_function.default.arn 36 | principal = "apigateway.amazonaws.com" 37 | statement_id = "AllowExecutionFromAPIGateway" 38 | 39 | # More: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-control-access-using-iam-policies-to-invoke-api.html 40 | //source_arn = "${aws_api_gateway_deployment.default.execution_arn}/${aws_api_gateway_method.default_get_req.http_method}${aws_api_gateway_resource.default.path}" 41 | //source_arn = "arn:aws:execute-api:${var.region}:${data.aws_caller_identity.current.account_id}:${aws_api_gateway_rest_api.default.id}/*/${aws_api_gateway_method.default.http_method}${aws_api_gateway_resource.default.path}" 42 | source_arn = "arn:aws:execute-api:${var.region}:${data.aws_caller_identity.current.account_id}:${aws_api_gateway_rest_api.default.id}/*/*" 43 | } 44 | -------------------------------------------------------------------------------- /lambda/52-acm.tf: -------------------------------------------------------------------------------- 1 | 2 | module "domain" { 3 | source = "../acm" 4 | 5 | root_domain = var.root_domain 6 | domain_name = var.domain_name 7 | 8 | acm_certificate = true 9 | } 10 | -------------------------------------------------------------------------------- /lambda/53-route53.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_api_gateway_domain_name" "default" { 3 | domain_name = var.domain_name 4 | certificate_arn = module.domain.certificate_arn 5 | } 6 | 7 | resource "aws_api_gateway_base_path_mapping" "default" { 8 | api_id = aws_api_gateway_rest_api.default.id 9 | stage_name = aws_api_gateway_deployment.default.stage_name 10 | domain_name = aws_api_gateway_domain_name.default.domain_name 11 | } 12 | 13 | resource "aws_route53_record" "default" { 14 | zone_id = module.domain.zone_id 15 | 16 | name = var.domain_name 17 | type = "A" 18 | 19 | alias { 20 | name = aws_api_gateway_domain_name.default.cloudfront_domain_name 21 | zone_id = aws_api_gateway_domain_name.default.cloudfront_zone_id 22 | evaluate_target_health = "false" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /lambda/99-output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "url" { 3 | value = "https://${var.domain_name}/demos" 4 | } 5 | 6 | output "invoke_url" { 7 | value = aws_api_gateway_deployment.default.invoke_url 8 | } 9 | -------------------------------------------------------------------------------- /lambda/package/lambda.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mzcdev/terraform-env-workshop/7a0648d32376025fcfa3739ebaef7048c1733b70/lambda/package/lambda.zip -------------------------------------------------------------------------------- /lambda/src/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports.handler = (event, context, callback) => { 4 | console.log(event); 5 | const res = { 6 | statusCode: 200, 7 | headers: { 8 | 'Access-Control-Allow-Origin': '*', // Required for CORS support to work 9 | }, 10 | body: event.body 11 | }; 12 | callback(null, res); 13 | }; 14 | -------------------------------------------------------------------------------- /lambda/src/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "lambda-workshop", 3 | "version": "1.0.0", 4 | "private": true, 5 | "scripts": { 6 | "build": "npm install -s" 7 | }, 8 | "dependencies": {} 9 | } 10 | -------------------------------------------------------------------------------- /replace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | OS_NAME="$(uname | awk '{print tolower($0)}')" 4 | 5 | # variable 6 | export ACCOUNT_ID=$(aws sts get-caller-identity | jq .Account -r) 7 | 8 | export REGION="ap-northeast-2" 9 | export BUCKET="terraform-workshop-${1:-${ACCOUNT_ID}}" 10 | 11 | command -v tput > /dev/null && TPUT=true 12 | 13 | _echo() { 14 | if [ "${TPUT}" != "" ] && [ "$2" != "" ]; then 15 | echo -e "$(tput setaf $2)$1$(tput sgr0)" 16 | else 17 | echo -e "$1" 18 | fi 19 | } 20 | 21 | _result() { 22 | echo 23 | _echo "# $@" 4 24 | } 25 | 26 | _command() { 27 | echo 28 | _echo "$ $@" 3 29 | } 30 | 31 | _success() { 32 | echo 33 | _echo "+ $@" 2 34 | exit 0 35 | } 36 | 37 | _error() { 38 | echo 39 | _echo "- $@" 1 40 | exit 1 41 | } 42 | 43 | _replace() { 44 | if [ "${OS_NAME}" == "darwin" ]; then 45 | sed -i "" -e "$1" "$2" 46 | else 47 | sed -i -e "$1" "$2" 48 | fi 49 | } 50 | 51 | _find_replace() { 52 | if [ "${OS_NAME}" == "darwin" ]; then 53 | find . -name "$2" -exec sed -i "" -e "$1" {} \; 54 | else 55 | find . -name "$2" -exec sed -i -e "$1" {} \; 56 | fi 57 | } 58 | 59 | _main() { 60 | _result "ACCOUNT_ID = ${ACCOUNT_ID}" 61 | 62 | _result "REGION = ${REGION}" 63 | _result "BUCKET = ${BUCKET}" 64 | 65 | _result "ROOT_DOMAIN = ${ROOT_DOMAIN}" 66 | _result "BASE_DOMAIN = ${BASE_DOMAIN}" 67 | 68 | if [ "${BASE_DOMAIN}" == "" ]; then 69 | _error "BASE_DOMAIN is empty." 70 | fi 71 | 72 | # replace 73 | _find_replace "s/terraform-workshop-[[:alnum:]]*/${BUCKET}/g" "*.tf" 74 | 75 | _find_replace "s/demo.mzdev.be/${BASE_DOMAIN}/g" "*.tf" 76 | _find_replace "s/demo.mzdev.be/${BASE_DOMAIN}/g" "*.yaml" 77 | _find_replace "s/demo.mzdev.be/${BASE_DOMAIN}/g" "*.json" 78 | 79 | _find_replace "s/mzdev.be/${ROOT_DOMAIN}/g" "*.tf" 80 | 81 | _find_replace "s/ADMIN_USERNAME/${ADMIN_USERNAME}/g" "*.tf" 82 | _find_replace "s/ADMIN_PASSWORD/${ADMIN_PASSWORD}/g" "*.tf" 83 | 84 | _find_replace "s/GOOGLE_CLIENT_ID/${GOOGLE_CLIENT_ID}/g" "*.json" 85 | _find_replace "s/GOOGLE_CLIENT_SECRET/${GOOGLE_CLIENT_SECRET}/g" "*.json" 86 | 87 | _find_replace "s|SLACK_TOKEN|${SLACK_TOKEN}|g" "*.tf" 88 | 89 | # create s3 bucket 90 | COUNT=$(aws s3 ls | grep ${BUCKET} | wc -l | xargs) 91 | if [ "x${COUNT}" == "x0" ]; then 92 | _command "aws s3 mb s3://${BUCKET}" 93 | aws s3 mb s3://${BUCKET} --region ${REGION} 94 | fi 95 | 96 | # create dynamodb table 97 | COUNT=$(aws dynamodb list-tables | jq -r .TableNames | grep ${BUCKET} | wc -l | xargs) 98 | if [ "x${COUNT}" == "x0" ]; then 99 | _command "aws dynamodb create-table --table-name ${BUCKET}" 100 | aws dynamodb create-table \ 101 | --table-name ${BUCKET} \ 102 | --attribute-definitions AttributeName=LockID,AttributeType=S \ 103 | --key-schema AttributeName=LockID,KeyType=HASH \ 104 | --provisioned-throughput ReadCapacityUnits=1,WriteCapacityUnits=1 \ 105 | --region ${REGION} | jq . 106 | fi 107 | } 108 | 109 | _main 110 | 111 | _success 112 | -------------------------------------------------------------------------------- /vpc/00-variable.tf: -------------------------------------------------------------------------------- 1 | # variable 2 | 3 | variable "region" { 4 | description = "생성될 리전을 입력 합니다. e.g: ap-northeast-2" 5 | default = "ap-northeast-2" 6 | } 7 | 8 | variable "name" { 9 | description = "VPC 이름을 입력합니다." 10 | default = "vpc-demo" 11 | } 12 | 13 | variable "vpc_id" { 14 | description = "이미 만들어진 VPC 가 있으면 ID 를 입력 합니다.." 15 | default = "" 16 | } 17 | 18 | variable "vpc_cidr" { 19 | description = "VPC 사이더를 입력합니다." 20 | default = "10.10.0.0/16" 21 | } 22 | 23 | variable "single_route_table" { 24 | description = "Subnet 마다 Route Table 을 설정 할지 선택 합니다." 25 | default = false 26 | } 27 | 28 | variable "enable_nat_gateway" { 29 | description = "Private Subnet 을 NAT Gateway 로 연결 할지 선택 합니다." 30 | default = true 31 | } 32 | 33 | variable "single_nat_gateway" { 34 | description = "Private Subnet 마다 NAT Gateway 을 연결 할지 선택 합니다." 35 | default = true 36 | } 37 | 38 | variable "public_subnets" { 39 | description = "생성될 공개 서브넷 목록을 입력합니다." 40 | # type = list(object({ 41 | # zone = string 42 | # cidr = string 43 | # tags = map 44 | # })) 45 | default = [ 46 | { 47 | name = "public-a" 48 | zone = "ap-northeast-2a" 49 | cidr = "10.10.1.0/24" 50 | tags = {} 51 | }, 52 | { 53 | name = "public-b" 54 | zone = "ap-northeast-2b" 55 | cidr = "10.10.2.0/24" 56 | tags = {} 57 | }, 58 | { 59 | name = "public-c" 60 | zone = "ap-northeast-2c" 61 | cidr = "10.10.3.0/24" 62 | tags = {} 63 | }, 64 | ] 65 | } 66 | 67 | variable "private_subnets" { 68 | description = "생성될 비공개 서브넷 목록을 입력합니다." 69 | # type = list(object({ 70 | # zone = string 71 | # cidr = string 72 | # tags = map 73 | # })) 74 | default = [ 75 | { 76 | name = "private-a" 77 | zone = "ap-northeast-2a" 78 | cidr = "10.10.4.0/24" 79 | tags = {} 80 | }, 81 | { 82 | name = "private-b" 83 | zone = "ap-northeast-2b" 84 | cidr = "10.10.5.0/24" 85 | tags = {} 86 | }, 87 | { 88 | name = "private-c" 89 | zone = "ap-northeast-2c" 90 | cidr = "10.10.6.0/24" 91 | tags = {} 92 | }, 93 | ] 94 | } 95 | 96 | variable "tags" { 97 | description = "생성되는 모든 리소스에 Tag 가 입력 됩니다." 98 | default = { 99 | "kubernetes.io/cluster/eks-demo" = "shared" 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /vpc/04-backend.tf: -------------------------------------------------------------------------------- 1 | # backend 2 | 3 | terraform { 4 | backend "s3" { 5 | region = "ap-northeast-2" 6 | bucket = "terraform-workshop-mzcdev" 7 | key = "vpc-demo.tfstate" 8 | dynamodb_table = "terraform-workshop-mzcdev" 9 | encrypt = true 10 | } 11 | required_version = ">= 0.12" 12 | } 13 | 14 | # terraform { 15 | # backend "remote" { 16 | # organization = "mzcdev" 17 | # workspaces { 18 | # name = "dev-vpc-demo" 19 | # } 20 | # } 21 | # } 22 | -------------------------------------------------------------------------------- /vpc/05-provider.tf: -------------------------------------------------------------------------------- 1 | # provider 2 | 3 | provider "aws" { 4 | region = var.region 5 | } 6 | -------------------------------------------------------------------------------- /vpc/20-main.tf: -------------------------------------------------------------------------------- 1 | # vpc 2 | 3 | module "vpc" { 4 | source = "github.com/mzcdev/terraform-aws-vpc?ref=v0.12.41" 5 | # source = "../../../terraform-aws-vpc" 6 | 7 | region = var.region 8 | name = var.name 9 | 10 | vpc_id = var.vpc_id 11 | vpc_cidr = var.vpc_cidr 12 | 13 | single_route_table = var.single_route_table 14 | 15 | enable_nat_gateway = var.enable_nat_gateway 16 | single_nat_gateway = var.single_nat_gateway 17 | 18 | public_subnets = var.public_subnets 19 | private_subnets = var.private_subnets 20 | 21 | tags = var.tags 22 | } 23 | -------------------------------------------------------------------------------- /vpc/99-output.tf: -------------------------------------------------------------------------------- 1 | # output 2 | 3 | output "region" { 4 | value = var.region 5 | } 6 | 7 | output "vpc_id" { 8 | value = module.vpc.vpc_id 9 | } 10 | 11 | output "public_subnet_ids" { 12 | value = module.vpc.public_subnet_ids 13 | } 14 | 15 | output "public_subnet_azs" { 16 | value = module.vpc.public_subnet_azs 17 | } 18 | 19 | output "public_subnet_cidr" { 20 | value = module.vpc.public_subnet_cidr 21 | } 22 | 23 | output "public_route_table_ids" { 24 | value = module.vpc.public_route_table_ids 25 | } 26 | 27 | output "private_subnet_ids" { 28 | value = module.vpc.private_subnet_ids 29 | } 30 | 31 | output "private_subnet_azs" { 32 | value = module.vpc.private_subnet_azs 33 | } 34 | 35 | output "private_subnet_cidr" { 36 | value = module.vpc.private_subnet_cidr 37 | } 38 | 39 | output "private_route_table_ids" { 40 | value = module.vpc.private_route_table_ids 41 | } 42 | 43 | output "nat_gateway_ids" { 44 | value = module.vpc.nat_gateway_ids 45 | } 46 | 47 | output "nat_gateway_ips" { 48 | value = module.vpc.nat_gateway_ips 49 | } 50 | --------------------------------------------------------------------------------