├── modules ├── kubernetes-dashboard │ ├── output.tf │ ├── variables.tf │ └── main.tf ├── prometheus │ ├── variables.tf │ ├── output.tf │ └── main.tf ├── registry │ ├── variables.tf │ ├── output.tf │ └── main.tf ├── nginx-ingress │ ├── variables.tf │ ├── output.tf │ └── main.tf ├── cert-manager │ ├── output.tf │ ├── modules │ │ ├── crds │ │ │ ├── dep.tf │ │ │ └── main.tf │ │ └── issuers │ │ │ ├── dep.tf │ │ │ ├── variables.tf │ │ │ ├── output.tf │ │ │ └── main.tf │ ├── variables.tf │ └── main.tf ├── cluster │ ├── dep.tf │ ├── modules │ │ └── node_groups │ │ │ ├── output.tf │ │ │ ├── variables.tf │ │ │ └── main.tf │ ├── output.tf │ ├── main.tf │ └── variables.tf ├── secrets │ ├── variables.tf │ └── main.tf ├── admins │ ├── output.tf │ ├── variables.tf │ └── main.tf ├── nfs-server-provisioner │ ├── output.tf │ ├── variables.tf │ └── main.tf ├── elasticsearch │ ├── modules │ │ ├── logstash │ │ │ ├── output.tf │ │ │ ├── variables.tf │ │ │ └── main.tf │ │ ├── filebeat │ │ │ ├── variables.tf │ │ │ └── main.tf │ │ ├── cluster │ │ │ ├── output.tf │ │ │ ├── variables.tf │ │ │ └── main.tf │ │ ├── kibana │ │ │ ├── variables.tf │ │ │ └── main.tf │ │ └── operator │ │ │ └── main.tf │ ├── output.tf │ ├── variables.tf │ └── main.tf ├── iam │ ├── output.tf │ ├── variables.tf │ ├── dep.tf │ └── main.tf └── vpc │ ├── output.tf │ ├── variables.tf │ └── main.tf ├── common ├── basic-auth │ ├── output.tf │ ├── variables.tf │ ├── main.tf │ └── scripts │ │ └── htpasswd.sh └── external-name-service │ ├── variables.tf │ └── main.tf ├── .gitignore ├── get-yc-cli.sh ├── files.tf ├── terraform.tfvars.example ├── output.tf ├── variables.tf ├── get-kubectl-provider.sh ├── LICENSE ├── docs └── variables.md ├── main.tf └── README.md /modules/kubernetes-dashboard/output.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /modules/prometheus/variables.tf: -------------------------------------------------------------------------------- 1 | variable "configs" {} 2 | -------------------------------------------------------------------------------- /modules/registry/variables.tf: -------------------------------------------------------------------------------- 1 | variable "registry_name" { 2 | type = string 3 | } 4 | -------------------------------------------------------------------------------- /modules/nginx-ingress/variables.tf: -------------------------------------------------------------------------------- 1 | variable "node_selector" { 2 | type = map(string) 3 | } 4 | -------------------------------------------------------------------------------- /common/basic-auth/output.tf: -------------------------------------------------------------------------------- 1 | output "auth" { 2 | value = data.external.basic-auth.result.auth 3 | } 4 | -------------------------------------------------------------------------------- /modules/cert-manager/output.tf: -------------------------------------------------------------------------------- 1 | output "cluster_issuers" { 2 | value = module.issuers.cluster_issuers 3 | } 4 | -------------------------------------------------------------------------------- /modules/cluster/dep.tf: -------------------------------------------------------------------------------- 1 | variable "dep" { 2 | default = [] 3 | } 4 | output "req" { 5 | value = [] 6 | } 7 | -------------------------------------------------------------------------------- /modules/registry/output.tf: -------------------------------------------------------------------------------- 1 | output "registry_id" { 2 | value = yandex_container_registry.registry.id 3 | } 4 | -------------------------------------------------------------------------------- /modules/registry/main.tf: -------------------------------------------------------------------------------- 1 | resource "yandex_container_registry" "registry" { 2 | name = var.registry_name 3 | } 4 | -------------------------------------------------------------------------------- /modules/secrets/variables.tf: -------------------------------------------------------------------------------- 1 | variable "opaque_secrets" {} 2 | variable "namespace" { 3 | type = string 4 | } 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .terraform/ 3 | terraform.d/ 4 | *.tfstate* 5 | *.tfvars 6 | output/ 7 | secrets/ 8 | yc-cli/ 9 | -------------------------------------------------------------------------------- /common/basic-auth/variables.tf: -------------------------------------------------------------------------------- 1 | variable "username" { 2 | type = string 3 | } 4 | variable "password" { 5 | type = string 6 | } 7 | -------------------------------------------------------------------------------- /get-yc-cli.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | curl -o- https://storage.yandexcloud.net/yandexcloud-yc/install.sh | bash -s -- -i ./yc-cli -n 4 | -------------------------------------------------------------------------------- /modules/cert-manager/modules/crds/dep.tf: -------------------------------------------------------------------------------- 1 | variable "dep" { 2 | default = [] 3 | } 4 | output "req" { 5 | value = [kubectl_manifest.crds] 6 | } 7 | -------------------------------------------------------------------------------- /modules/cert-manager/variables.tf: -------------------------------------------------------------------------------- 1 | variable "node_selector" { 2 | type = map(string) 3 | } 4 | variable "issuers_email" { 5 | type = string 6 | } 7 | -------------------------------------------------------------------------------- /modules/nginx-ingress/output.tf: -------------------------------------------------------------------------------- 1 | output "load_balancer_ip" { 2 | value = data.kubernetes_service.nginx-ingress.load_balancer_ingress[0].ip 3 | } 4 | -------------------------------------------------------------------------------- /modules/cert-manager/modules/issuers/dep.tf: -------------------------------------------------------------------------------- 1 | variable "dep" { 2 | default = [] 3 | } 4 | output "req" { 5 | value = [kubectl_manifest.issuers] 6 | } 7 | -------------------------------------------------------------------------------- /modules/admins/output.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfigs" { 2 | value = local.kubeconfigs 3 | } 4 | output "ssh_keys" { 5 | value = join("\n", local.ssh_keys) 6 | } 7 | -------------------------------------------------------------------------------- /modules/cert-manager/modules/issuers/variables.tf: -------------------------------------------------------------------------------- 1 | variable "staging_email" { 2 | type = string 3 | } 4 | variable "production_email" { 5 | type = string 6 | } 7 | -------------------------------------------------------------------------------- /modules/nfs-server-provisioner/output.tf: -------------------------------------------------------------------------------- 1 | output "storage_class" { 2 | value = jsondecode(helm_release.nfs-server-provisioner.metadata[0].values)["storageClass"]["name"] 3 | } 4 | -------------------------------------------------------------------------------- /modules/cert-manager/modules/issuers/output.tf: -------------------------------------------------------------------------------- 1 | output "cluster_issuers" { 2 | value = { 3 | for key, issuer in local.issuers: 4 | key => issuer.metadata.name 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /modules/elasticsearch/modules/logstash/output.tf: -------------------------------------------------------------------------------- 1 | output "logstash_host" { 2 | value = "logstash.${var.namespace}" 3 | } 4 | output "input_ports" { 5 | value = local.input_ports 6 | } 7 | -------------------------------------------------------------------------------- /common/external-name-service/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | variable "namespace" { 5 | type = string 6 | } 7 | variable "external_name" { 8 | type = string 9 | } 10 | -------------------------------------------------------------------------------- /modules/cluster/modules/node_groups/output.tf: -------------------------------------------------------------------------------- 1 | output "node_group_ids" { 2 | value = { 3 | for group in yandex_kubernetes_node_group.cluster_node_groups: 4 | group.name => group.id 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /common/basic-auth/main.tf: -------------------------------------------------------------------------------- 1 | data "external" "basic-auth" { 2 | program = ["bash", "${path.module}/scripts/htpasswd.sh"] 3 | query = { 4 | username = var.username 5 | password = var.password 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /modules/elasticsearch/modules/filebeat/variables.tf: -------------------------------------------------------------------------------- 1 | variable "namespace" { 2 | type = string 3 | } 4 | variable "logstash_host" { 5 | type = string 6 | } 7 | variable "logstash_port" { 8 | type = string 9 | } 10 | -------------------------------------------------------------------------------- /modules/nfs-server-provisioner/variables.tf: -------------------------------------------------------------------------------- 1 | variable "node_selector" { 2 | type = map(string) 3 | } 4 | variable "storage_class" { 5 | type = string 6 | } 7 | variable "storage_size" { 8 | type = string 9 | } 10 | -------------------------------------------------------------------------------- /modules/prometheus/output.tf: -------------------------------------------------------------------------------- 1 | output "grafana_admin_password" { 2 | value = random_string.grafana-password.result 3 | } 4 | output "prometheus_admin_password" { 5 | value = random_string.prometheus-password.result 6 | } 7 | -------------------------------------------------------------------------------- /common/basic-auth/scripts/htpasswd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | eval "$(jq -r '@sh "USERNAME=\(.username) PASSWORD=\(.password)"')" 3 | 4 | AUTH=$(htpasswd -nb ${USERNAME} ${PASSWORD}) 5 | 6 | jq -n --arg auth "$AUTH" '{"auth":$auth}' -------------------------------------------------------------------------------- /modules/iam/output.tf: -------------------------------------------------------------------------------- 1 | output "cluster_service_account_id" { 2 | value = yandex_iam_service_account.cluster.id 3 | } 4 | output "cluster_node_service_account_id" { 5 | value = yandex_iam_service_account.cluster_node.id 6 | } 7 | -------------------------------------------------------------------------------- /modules/elasticsearch/modules/cluster/output.tf: -------------------------------------------------------------------------------- 1 | output "elasticsearch_host" { 2 | value = "${var.cluster_name}-es-http.${var.namespace}" 3 | } 4 | 5 | output "elasticsearch_user" { 6 | value = data.kubernetes_secret.elastic-user.data 7 | } 8 | -------------------------------------------------------------------------------- /modules/iam/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_folder_id" { 2 | type = string 3 | } 4 | variable "cluster_service_account_name" { 5 | type = string 6 | } 7 | variable "cluster_node_service_account_name" { 8 | type = string 9 | } 10 | -------------------------------------------------------------------------------- /modules/kubernetes-dashboard/variables.tf: -------------------------------------------------------------------------------- 1 | variable "node_selector" { 2 | type = map(string) 3 | } 4 | variable "ingress" { 5 | type = object({ 6 | name = string 7 | issuer = string 8 | domain = string 9 | }) 10 | } 11 | -------------------------------------------------------------------------------- /modules/secrets/main.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_secret" "opaque" { 2 | for_each = var.opaque_secrets 3 | metadata { 4 | name = each.key 5 | namespace = var.namespace 6 | } 7 | data = each.value 8 | type = "Opaque" 9 | } 10 | -------------------------------------------------------------------------------- /modules/admins/variables.tf: -------------------------------------------------------------------------------- 1 | variable "admins" { 2 | type = map(object({ 3 | public_keys = list(string) 4 | })) 5 | } 6 | variable "cluster_name" { 7 | type = string 8 | } 9 | variable "cluster_endpoint" { 10 | type = string 11 | } 12 | -------------------------------------------------------------------------------- /common/external-name-service/main.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_service" "service" { 2 | metadata { 3 | name = var.name 4 | namespace = var.namespace 5 | } 6 | spec { 7 | type = "ExternalName" 8 | external_name = var.external_name 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /files.tf: -------------------------------------------------------------------------------- 1 | resource "local_file" "kubeconfigs" { 2 | for_each = module.admins.kubeconfigs 3 | filename = "${var.output_dir}/kubeconfigs/${each.key}.yaml" 4 | file_permission = "0600" 5 | directory_permission = "0700" 6 | sensitive_content = yamlencode(each.value) 7 | } 8 | -------------------------------------------------------------------------------- /modules/vpc/output.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = yandex_vpc_network.cluster.id 3 | } 4 | 5 | output "location_subnets" { 6 | value = [ 7 | for s in yandex_vpc_subnet.cluster_subnets: { 8 | id = s.id 9 | zone = s.zone 10 | } 11 | ] 12 | } 13 | 14 | -------------------------------------------------------------------------------- /modules/elasticsearch/output.tf: -------------------------------------------------------------------------------- 1 | output "elasticsearch_host" { 2 | value = module.cluster.elasticsearch_host 3 | } 4 | output "elasticsearch_user" { 5 | value = module.cluster.elasticsearch_user 6 | } 7 | output "logstash_host" { 8 | value = module.logstash.logstash_host 9 | } 10 | -------------------------------------------------------------------------------- /modules/vpc/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | variable "subnet" { 5 | type = string 6 | default = "10.0.0.0/12" 7 | } 8 | variable "zones" { 9 | type = list(string) 10 | default = [ 11 | "ru-central1-a", 12 | "ru-central1-b", 13 | "ru-central1-c"] 14 | } 15 | -------------------------------------------------------------------------------- /modules/iam/dep.tf: -------------------------------------------------------------------------------- 1 | variable "dep" { 2 | default = [] 3 | } 4 | output "req" { 5 | value = [ 6 | yandex_iam_service_account.cluster, 7 | yandex_iam_service_account.cluster_node, 8 | yandex_resourcemanager_folder_iam_member.cluster-admin, 9 | yandex_resourcemanager_folder_iam_member.cluster_node-admin, 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /modules/cluster/output.tf: -------------------------------------------------------------------------------- 1 | output "node_group_ids" { 2 | value = module.node_groups.node_group_ids 3 | } 4 | output "external_v4_endpoint" { 5 | value = yandex_kubernetes_cluster.cluster.master[0].external_v4_endpoint 6 | } 7 | output "ca_certificate" { 8 | value = yandex_kubernetes_cluster.cluster.master[0].cluster_ca_certificate 9 | } 10 | -------------------------------------------------------------------------------- /modules/elasticsearch/modules/kibana/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | type = string 3 | } 4 | variable "node_selector" { 5 | type = map(string) 6 | } 7 | variable "namespace" { 8 | type = string 9 | } 10 | variable "ingress" { 11 | type = object({ 12 | name = string 13 | issuer = string 14 | domain = string 15 | }) 16 | } 17 | -------------------------------------------------------------------------------- /modules/elasticsearch/modules/cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "node_selector" { 2 | type = map(string) 3 | } 4 | variable "scale" { 5 | type = number 6 | } 7 | variable "cluster_name" { 8 | type = string 9 | } 10 | variable "storage_class" { 11 | type = string 12 | } 13 | variable "storage_size" { 14 | type = string 15 | } 16 | variable "namespace" { 17 | type = string 18 | } 19 | -------------------------------------------------------------------------------- /modules/elasticsearch/modules/logstash/variables.tf: -------------------------------------------------------------------------------- 1 | variable "namespace" { 2 | type = string 3 | } 4 | variable "node_selector" { 5 | type = map(string) 6 | } 7 | variable "scale" { 8 | type = number 9 | } 10 | variable "elasticsearch_host" { 11 | type = string 12 | } 13 | variable "elasticsearch_username" { 14 | type = string 15 | } 16 | variable "elasticsearch_password" { 17 | type = string 18 | } 19 | -------------------------------------------------------------------------------- /modules/elasticsearch/modules/operator/main.tf: -------------------------------------------------------------------------------- 1 | data "local_file" "all_in_one_manifest" { 2 | filename = "${path.module}/sources/all-in-one.yaml" 3 | } 4 | locals { 5 | all_in_one = split("\n---\n", trimsuffix(data.local_file.all_in_one_manifest.content, "\n")) 6 | } 7 | resource "kubectl_manifest" "all_in_one" { 8 | count = length(local.all_in_one) 9 | yaml_body = local.all_in_one[count.index] 10 | } 11 | -------------------------------------------------------------------------------- /modules/vpc/main.tf: -------------------------------------------------------------------------------- 1 | resource "yandex_vpc_network" "cluster" { 2 | name = var.name 3 | } 4 | 5 | resource "yandex_vpc_subnet" "cluster_subnets" { 6 | count = length(var.zones) 7 | 8 | name = "${var.name}-${var.zones[count.index]}" 9 | v4_cidr_blocks = [cidrsubnet(var.subnet, length(var.zones)+1, count.index)] 10 | zone = var.zones[count.index] 11 | network_id = yandex_vpc_network.cluster.id 12 | } 13 | 14 | -------------------------------------------------------------------------------- /modules/cert-manager/modules/crds/main.tf: -------------------------------------------------------------------------------- 1 | data "http" "crds_manifest" { 2 | url = "https://raw.githubusercontent.com/jetstack/cert-manager/release-0.13/deploy/manifests/00-crds.yaml" 3 | } 4 | 5 | locals { 6 | crds = split("\n---\n", trimsuffix(data.http.crds_manifest.body, "\n---\n")) 7 | } 8 | 9 | resource "kubectl_manifest" "crds" { 10 | count = length(local.crds) 11 | yaml_body = local.crds[count.index] 12 | } 13 | -------------------------------------------------------------------------------- /modules/elasticsearch/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | type = string 3 | } 4 | variable "node_selector" { 5 | type = map(string) 6 | } 7 | variable "scale" { 8 | type = number 9 | } 10 | variable "storage_class" { 11 | type = string 12 | } 13 | variable "storage_size" { 14 | type = string 15 | } 16 | variable "kibana_ingress" { 17 | type = object({ 18 | name = string 19 | issuer = string 20 | domain = string 21 | }) 22 | } 23 | -------------------------------------------------------------------------------- /terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | yandex_token = "" 2 | yandex_cloud_id = "" 3 | yandex_folder_id = "" 4 | cluster_name = "example" 5 | cluster_domain = "example.com" 6 | node_groups_scale = { 7 | service = { 8 | fixed_scale = 3 9 | } 10 | nfs = { 11 | fixed_scale = 1 12 | } 13 | web = { 14 | fixed_scale = 3 15 | } 16 | } 17 | admin_email = "admin@example.com" 18 | admins = { 19 | adminname = { 20 | public_keys = ["ssh-rsa XXXXXXX"] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /modules/cluster/modules/node_groups/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_id" { 2 | type = string 3 | } 4 | variable "kube_version" { 5 | type = string 6 | default = "1.15" 7 | } 8 | variable "location_subnets" { 9 | type = list(object({ 10 | id = string 11 | zone = string 12 | })) 13 | } 14 | variable "cluster_node_groups" { 15 | type = map(object({ 16 | name = string 17 | cpu = number 18 | memory = number 19 | disk = object({ 20 | size = number 21 | type = string 22 | }) 23 | fixed_scale = list(number) 24 | auto_scale = list(object({ 25 | max = number 26 | min = number 27 | initial = number 28 | })) 29 | })) 30 | } 31 | variable "ssh_keys" { 32 | type = string 33 | } 34 | -------------------------------------------------------------------------------- /modules/iam/main.tf: -------------------------------------------------------------------------------- 1 | data "yandex_resourcemanager_folder" "cluster_folder" { 2 | folder_id = var.cluster_folder_id 3 | } 4 | 5 | resource "yandex_iam_service_account" "cluster" { 6 | name = var.cluster_service_account_name 7 | } 8 | 9 | resource "yandex_resourcemanager_folder_iam_member" "cluster-admin" { 10 | folder_id = data.yandex_resourcemanager_folder.cluster_folder.id 11 | role = "editor" 12 | member = "serviceAccount:${yandex_iam_service_account.cluster.id}" 13 | } 14 | 15 | resource "yandex_iam_service_account" "cluster_node" { 16 | name = var.cluster_node_service_account_name 17 | } 18 | 19 | resource "yandex_resourcemanager_folder_iam_member" "cluster_node-admin" { 20 | folder_id = data.yandex_resourcemanager_folder.cluster_folder.id 21 | role = "container-registry.images.puller" 22 | member = "serviceAccount:${yandex_iam_service_account.cluster_node.id}" 23 | } 24 | -------------------------------------------------------------------------------- /output.tf: -------------------------------------------------------------------------------- 1 | output "load_balancer_ip" { 2 | value = module.nginx-ingress.load_balancer_ip 3 | description = "Nginx ingress load balancer ip" 4 | } 5 | output "elasticsearch_host" { 6 | value = module.elasticsearch.elasticsearch_host 7 | description = "Elasticsearch cluster ingress host" 8 | } 9 | output "elasticsearch_user" { 10 | value = module.elasticsearch.elasticsearch_user 11 | description = "Elasticsearch cluster user" 12 | } 13 | output "grafana_admin_password" { 14 | value = module.prometheus.grafana_admin_password 15 | description = "Grafana admin user password" 16 | } 17 | output "container_registry_id" { 18 | value = module.registry.registry_id 19 | description = "Created container registry ID" 20 | } 21 | output "prometheus_admin_password" { 22 | value = module.prometheus.prometheus_admin_password 23 | description = "Prometheus basic-auth user password (username - prometheus)" 24 | } 25 | -------------------------------------------------------------------------------- /modules/nfs-server-provisioner/main.tf: -------------------------------------------------------------------------------- 1 | data "helm_repository" "stable" { 2 | name = "stable" 3 | url = "https://kubernetes-charts.storage.googleapis.com/" 4 | } 5 | 6 | locals { 7 | values = { 8 | persistence = { 9 | enabled = true 10 | storageClass = var.storage_class 11 | size = var.storage_size 12 | } 13 | storageClass = { 14 | name = "nfs-client" 15 | reclaimPolicy = "Retain" 16 | } 17 | nodeSelector = var.node_selector 18 | } 19 | } 20 | 21 | resource "kubernetes_namespace" "nfs-server-provisioner" { 22 | metadata { 23 | name = "nfs-server-provisioner" 24 | } 25 | } 26 | 27 | resource "helm_release" "nfs-server-provisioner" { 28 | name = "nfs-server-provisioner" 29 | repository = data.helm_repository.stable.metadata[0].name 30 | chart = "nfs-server-provisioner" 31 | namespace = kubernetes_namespace.nfs-server-provisioner.metadata[0].name 32 | 33 | values = [yamlencode(local.values)] 34 | } 35 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "yandex_token" { 2 | type = string 3 | } 4 | variable "yandex_cloud_id" { 5 | type = string 6 | } 7 | variable "yandex_folder_id" { 8 | type = string 9 | } 10 | variable "cluster_name" { 11 | type = string 12 | } 13 | variable "cluster_version" { 14 | type = string 15 | default = "1.15" 16 | } 17 | variable "cluster_release_channel" { 18 | type = string 19 | default = "STABLE" 20 | } 21 | variable "node_groups_scale" { 22 | default = { 23 | service = { 24 | fixed_scale = 3 25 | } 26 | nfs = { 27 | fixed_scale = 1 28 | } 29 | web = { 30 | auto_scale = { 31 | max = 3 32 | min = 3 33 | initial = 3 34 | } 35 | } 36 | } 37 | } 38 | variable "admin_email" { 39 | type = string 40 | } 41 | variable "cluster_domain" { 42 | type = string 43 | } 44 | variable "admins" { 45 | type = map(object({ 46 | public_keys = list(string) 47 | })) 48 | } 49 | variable "output_dir" { 50 | type = string 51 | default = "output" 52 | } 53 | -------------------------------------------------------------------------------- /modules/cluster/main.tf: -------------------------------------------------------------------------------- 1 | resource "yandex_kubernetes_cluster" "cluster" { 2 | name = var.name 3 | 4 | network_id = var.vpc_id 5 | 6 | master { 7 | regional { 8 | region = var.region 9 | 10 | dynamic "location" { 11 | for_each = var.location_subnets 12 | 13 | content { 14 | zone = location.value.zone 15 | subnet_id = location.value.id 16 | } 17 | } 18 | } 19 | 20 | version = var.kube_version 21 | public_ip = var.public 22 | } 23 | 24 | service_account_id = var.cluster_service_account_id 25 | node_service_account_id = var.node_service_account_id 26 | 27 | release_channel = var.release_channel 28 | 29 | depends_on = [ 30 | var.dep 31 | ] 32 | } 33 | 34 | module "node_groups" { 35 | source = "./modules/node_groups" 36 | 37 | cluster_id = yandex_kubernetes_cluster.cluster.id 38 | kube_version = var.kube_version 39 | location_subnets = var.location_subnets 40 | cluster_node_groups = var.cluster_node_groups 41 | ssh_keys = var.ssh_keys 42 | } 43 | -------------------------------------------------------------------------------- /get-kubectl-provider.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | download_links=`curl -sL https://api.github.com/repos/gavinbunney/terraform-provider-kubectl/releases/latest | jq -r '.assets[].browser_download_url'` 4 | 5 | case "$OSTYPE" in 6 | darwin*) 7 | download_link=`echo ${download_links} | tr ' ' '\n' | grep 'darwin'` 8 | OS="darwin" 9 | ARCH="amd64" 10 | ;; 11 | linux*) 12 | case "`uname -m`" in 13 | x86_64) 14 | download_link=`echo ${download_links} | tr ' ' '\n' | grep 'linux-amd64'` 15 | OS="linux" 16 | ARCH="amd64" 17 | ;; 18 | i?86) 19 | download_link=`echo ${download_links} | tr ' ' '\n' | grep 'linux-386'` 20 | OS="linux" 21 | ARCH="386" 22 | ;; 23 | armv*) 24 | download_link=`echo ${download_links} | tr ' ' '\n' | grep 'linux-arm'` 25 | OS="linux" 26 | ARCH="arm" 27 | ;; 28 | esac 29 | ;; 30 | esac 31 | 32 | plugins_path="terraform.d/plugins/${OS}_${ARCH}" 33 | 34 | mkdir -p ${plugins_path} 35 | 36 | curl -L# ${download_link} > ${plugins_path}/terraform-provider-kubectl 37 | 38 | chmod +x ${plugins_path}/terraform-provider-kubectl 39 | -------------------------------------------------------------------------------- /modules/nginx-ingress/main.tf: -------------------------------------------------------------------------------- 1 | data "helm_repository" "stable" { 2 | name = "stable" 3 | url = "https://kubernetes-charts.storage.googleapis.com/" 4 | } 5 | 6 | resource "kubernetes_namespace" "nginx-ingress" { 7 | metadata { 8 | name = "nginx-ingress" 9 | } 10 | } 11 | 12 | locals { 13 | values = { 14 | controller = { 15 | kind = "DaemonSet" 16 | nodeSelector = var.node_selector 17 | } 18 | defaultBackend = { 19 | nodeSelector = var.node_selector 20 | } 21 | } 22 | } 23 | 24 | resource "helm_release" "nginx-ingress" { 25 | name = "nginx-ingress" 26 | repository = data.helm_repository.stable.metadata[0].name 27 | chart = "nginx-ingress" 28 | version = "1.26.1" 29 | namespace = kubernetes_namespace.nginx-ingress.metadata[0].name 30 | 31 | values = [yamlencode(local.values)] 32 | } 33 | 34 | data "kubernetes_service" "nginx-ingress" { 35 | depends_on = [helm_release.nginx-ingress] 36 | metadata { 37 | name = "${helm_release.nginx-ingress.name}-controller" 38 | namespace = kubernetes_namespace.nginx-ingress.metadata[0].name 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /modules/cert-manager/main.tf: -------------------------------------------------------------------------------- 1 | data "helm_repository" "jetstack" { 2 | name = "jetstack" 3 | url = "https://charts.jetstack.io" 4 | } 5 | 6 | resource "kubernetes_namespace" "cert-manager" { 7 | metadata { 8 | name = "cert-manager" 9 | } 10 | } 11 | 12 | locals { 13 | values = { 14 | nodeSelector = var.node_selector 15 | webhook = { 16 | nodeSelector = var.node_selector 17 | } 18 | cainjector = { 19 | nodeSelector = var.node_selector 20 | } 21 | } 22 | } 23 | 24 | module "crds" { 25 | source = "./modules/crds" 26 | } 27 | 28 | resource "helm_release" "cert-manager" { 29 | name = "cert-manager" 30 | repository = data.helm_repository.jetstack.metadata[0].name 31 | chart = "cert-manager" 32 | namespace = kubernetes_namespace.cert-manager.metadata[0].name 33 | 34 | values = [yamlencode(local.values)] 35 | 36 | depends_on = [module.crds.req] 37 | } 38 | 39 | module "issuers" { 40 | source = "./modules/issuers" 41 | 42 | production_email = var.issuers_email 43 | staging_email = var.issuers_email 44 | 45 | dep = [module.crds.req, helm_release.cert-manager] 46 | } 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Daniel Chaplin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /modules/cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | variable "public" { 5 | type = bool 6 | default = true 7 | } 8 | variable "region" { 9 | type = string 10 | default = "ru-central1" 11 | } 12 | variable "kube_version" { 13 | type = string 14 | default = "1.15" 15 | } 16 | variable "release_channel" { 17 | type = string 18 | default = "STABLE" 19 | } 20 | variable "vpc_id" { 21 | type = string 22 | } 23 | variable "location_subnets" { 24 | type = list(object({ 25 | id = string 26 | zone = string 27 | })) 28 | } 29 | variable "cluster_service_account_id" { 30 | type = string 31 | } 32 | variable "node_service_account_id" { 33 | type = string 34 | } 35 | variable "cluster_node_groups" { 36 | type = map(object({ 37 | name = string 38 | cpu = number 39 | memory = number 40 | disk = object({ 41 | size = number 42 | type = string 43 | }) 44 | fixed_scale = list(number) 45 | auto_scale = list(object({ 46 | max = number 47 | min = number 48 | initial = number 49 | })) 50 | })) 51 | } 52 | variable "ssh_keys" { 53 | type = string 54 | } 55 | -------------------------------------------------------------------------------- /modules/kubernetes-dashboard/main.tf: -------------------------------------------------------------------------------- 1 | data "helm_repository" "stable" { 2 | name = "stable" 3 | url = "https://kubernetes-charts.storage.googleapis.com/" 4 | } 5 | 6 | locals { 7 | values = { 8 | extraArgs = ["--token-ttl", "0"] 9 | nodeSelector = var.node_selector 10 | ingress = { 11 | enabled = true 12 | annotations = { 13 | "kubernetes.io/ingress.class" = "nginx" 14 | "kubernetes.io/tls-acme" = "true" 15 | "cert-manager.io/cluster-issuer" = var.ingress.issuer 16 | "ingress.kubernetes.io/ssl-redirect" = "true" 17 | "nginx.ingress.kubernetes.io/rewrite-target" = "/" 18 | "nginx.ingress.kubernetes.io/backend-protocol" = "HTTPS" 19 | } 20 | hosts = [var.ingress.domain] 21 | tls = [ 22 | { 23 | secretName = var.ingress.name 24 | hosts = [var.ingress.domain] 25 | } 26 | ] 27 | } 28 | } 29 | } 30 | 31 | resource "helm_release" "kubernetes-dashboard" { 32 | name = "kubernetes-dashboard" 33 | repository = data.helm_repository.stable.metadata[0].name 34 | chart = "kubernetes-dashboard" 35 | namespace = "kube-system" 36 | 37 | values = [yamlencode(local.values)] 38 | } 39 | -------------------------------------------------------------------------------- /docs/variables.md: -------------------------------------------------------------------------------- 1 | ## Inputs 2 | 3 | | Name | Description | Type | Default | Required | 4 | |------|-------------|------|---------|:-----:| 5 | | admin\_email | n/a | `string` | n/a | yes | 6 | | admins | n/a |
map(object({
public_keys = list(string)
})) | n/a | yes |
7 | | cluster\_domain | n/a | `string` | n/a | yes |
8 | | cluster\_name | n/a | `string` | n/a | yes |
9 | | cluster\_release\_channel | n/a | `string` | `"STABLE"` | no |
10 | | cluster\_version | n/a | `string` | `"1.15"` | no |
11 | | node\_groups\_scale | n/a | `map` | {
"nfs": {
"fixed_scale": 1
},
"service": {
"fixed_scale": 3
},
"web": {
"auto_scale": {
"initial": 3,
"max": 3,
"min": 3
}
}
} | no |
12 | | output\_dir | n/a | `string` | `"output"` | no |
13 | | yandex\_cloud\_id | n/a | `string` | n/a | yes |
14 | | yandex\_folder\_id | n/a | `string` | n/a | yes |
15 | | yandex\_token | n/a | `string` | n/a | yes |
16 |
17 | ## Outputs
18 |
19 | | Name | Description |
20 | |------|-------------|
21 | | container\_registry\_id | Created container registry ID |
22 | | elasticsearch\_host | Elasticsearch cluster ingress host |
23 | | elasticsearch\_user | Elasticsearch cluster user |
24 | | grafana\_admin\_password | Grafana admin user password |
25 | | load\_balancer\_ip | Nginx ingress load balancer ip |
26 | | prometheus\_admin\_password | Prometheus basic-auth user password (username - prometheus) |
27 |
28 |
--------------------------------------------------------------------------------
/modules/cluster/modules/node_groups/main.tf:
--------------------------------------------------------------------------------
1 | resource "yandex_kubernetes_node_group" "cluster_node_groups" {
2 | for_each = var.cluster_node_groups
3 |
4 | name = each.value["name"]
5 | description = each.value["name"]
6 |
7 | version = var.kube_version
8 |
9 | cluster_id = var.cluster_id
10 |
11 | labels = {
12 | "group_name" = each.value["name"]
13 | }
14 |
15 | instance_template {
16 | platform_id = "standard-v2"
17 | nat = true
18 |
19 | metadata = {
20 | ssh-keys = var.ssh_keys
21 | }
22 |
23 | resources {
24 | cores = each.value["cpu"]
25 | memory = each.value["memory"]
26 | }
27 |
28 | boot_disk {
29 | type = each.value["disk"]["type"]
30 | size = each.value["disk"]["size"]
31 | }
32 |
33 | scheduling_policy {
34 | preemptible = false
35 | }
36 | }
37 |
38 | scale_policy {
39 | dynamic "auto_scale" {
40 | for_each = each.value["auto_scale"]
41 | content {
42 | min = auto_scale.value["min"]
43 | max = auto_scale.value["max"]
44 | initial = auto_scale.value["initial"]
45 | }
46 | }
47 | dynamic "fixed_scale" {
48 | for_each = each.value["fixed_scale"]
49 | content {
50 | size = fixed_scale.value
51 | }
52 | }
53 | }
54 |
55 | allocation_policy {
56 | dynamic "location" {
57 | for_each = var.location_subnets
58 |
59 | content {
60 | zone = location.value.zone
61 | subnet_id = location.value.id
62 | }
63 | }
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/modules/elasticsearch/main.tf:
--------------------------------------------------------------------------------
1 | module "operator" {
2 | source = "./modules/operator"
3 | }
4 | resource "kubernetes_namespace" "elasticsearch" {
5 | metadata {
6 | name = "elasticsearch"
7 | }
8 | }
9 | module "cluster" {
10 | source = "./modules/cluster"
11 |
12 | cluster_name = var.cluster_name
13 | node_selector = var.node_selector
14 | scale = var.scale
15 | storage_class = var.storage_class
16 | storage_size = var.storage_size
17 | namespace = kubernetes_namespace.elasticsearch.metadata[0].name
18 | }
19 | locals {
20 | elasticsearch_username = keys(module.cluster.elasticsearch_user)[0]
21 | elasticsearch_password = values(module.cluster.elasticsearch_user)[0]
22 | }
23 | module "kibana" {
24 | source = "./modules/kibana"
25 |
26 | cluster_name = var.cluster_name
27 | node_selector = var.node_selector
28 | namespace = kubernetes_namespace.elasticsearch.metadata[0].name
29 | ingress = var.kibana_ingress
30 | }
31 | module "logstash" {
32 | source = "./modules/logstash"
33 |
34 | namespace = kubernetes_namespace.elasticsearch.metadata[0].name
35 | elasticsearch_host = module.cluster.elasticsearch_host
36 | elasticsearch_username = local.elasticsearch_username
37 | elasticsearch_password = local.elasticsearch_password
38 | node_selector = var.node_selector
39 | scale = var.scale
40 | }
41 | module "filebeat" {
42 | source = "./modules/filebeat"
43 |
44 | namespace = kubernetes_namespace.elasticsearch.metadata[0].name
45 | logstash_host = module.logstash.logstash_host
46 | logstash_port = module.logstash.input_ports["beats"]
47 | }
48 |
--------------------------------------------------------------------------------
/modules/cert-manager/modules/issuers/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | issuers = {
3 | staging = {
4 | kind = "ClusterIssuer"
5 | apiVersion = "cert-manager.io/v1alpha2"
6 | metadata = {
7 | name = "letsencrypt-staging"
8 | }
9 | spec = {
10 | acme = {
11 | server = "https://acme-staging-v02.api.letsencrypt.org/directory"
12 | email = var.staging_email
13 | privateKeySecretRef = {
14 | name = "letsencrypt-staging"
15 | }
16 | solvers = [
17 | {
18 | http01 = {
19 | ingress = {
20 | class = "nginx"
21 | }
22 | }
23 | }
24 | ]
25 | }
26 | }
27 | }
28 | production = {
29 | kind = "ClusterIssuer"
30 | apiVersion = "cert-manager.io/v1alpha2"
31 | metadata = {
32 | name = "letsencrypt-production"
33 | }
34 | spec = {
35 | acme = {
36 | server = "https://acme-v02.api.letsencrypt.org/directory"
37 | email = var.production_email
38 | privateKeySecretRef = {
39 | name = "letsencrypt-production"
40 | }
41 | solvers = [
42 | {
43 | http01 = {
44 | ingress = {
45 | class = "nginx"
46 | }
47 | }
48 | }
49 | ]
50 | }
51 | }
52 | }
53 | }
54 | }
55 |
56 | resource "kubectl_manifest" "issuers" {
57 | for_each = local.issuers
58 | yaml_body = yamlencode(each.value)
59 | }
60 |
--------------------------------------------------------------------------------
/modules/elasticsearch/modules/kibana/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | kibana = {
3 | apiVersion = "kibana.k8s.elastic.co/v1beta1"
4 | kind = "Kibana"
5 | metadata = {
6 | name = var.cluster_name
7 | namespace = var.namespace
8 | }
9 | spec = {
10 | version = "7.5.0"
11 | count = 1
12 | elasticsearchRef = {
13 | name = var.cluster_name
14 | }
15 | podTemplate = {
16 | spec = {
17 | nodeSelector = var.node_selector
18 | }
19 | }
20 | http = {
21 | tls = {
22 | selfSignedCertificate = {
23 | disabled = true
24 | }
25 | }
26 | }
27 | }
28 | }
29 | }
30 | resource "kubectl_manifest" "kibana" {
31 | yaml_body = yamlencode(local.kibana)
32 | }
33 |
34 | resource "kubernetes_ingress" "kibana" {
35 | metadata {
36 | name = "${var.cluster_name}-kb"
37 | namespace = var.namespace
38 | annotations = {
39 | "kubernetes.io/ingress.class" = "nginx"
40 | "kubernetes.io/tls-acme" = "true"
41 | "cert-manager.io/cluster-issuer" = var.ingress.issuer
42 | "ingress.kubernetes.io/ssl-redirect" = "true"
43 | }
44 | }
45 | spec {
46 | rule {
47 | host = var.ingress.domain
48 | http {
49 | path {
50 | backend {
51 | service_name = "${var.cluster_name}-kb-http"
52 | service_port = 5601
53 | }
54 | path = "/"
55 | }
56 | }
57 | }
58 | tls {
59 | hosts = [var.ingress.domain]
60 | secret_name = "${var.cluster_name}-kb"
61 | }
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/modules/admins/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | usernames = keys(var.admins)
3 | ssh_keys = flatten([
4 | for admin, config in var.admins: [
5 | for key in config["public_keys"]: [
6 | format("%s:%s %s", admin, key, admin)
7 | ]
8 | ]
9 | ])
10 |
11 | }
12 | resource "kubernetes_service_account" "admin" {
13 | for_each = toset(local.usernames)
14 | metadata {
15 | namespace = "default"
16 | name = each.key
17 | }
18 | }
19 | resource "kubernetes_cluster_role_binding" "admin" {
20 | for_each = kubernetes_service_account.admin
21 | metadata {
22 | name = each.key
23 | }
24 | role_ref {
25 | api_group = "rbac.authorization.k8s.io"
26 | kind = "ClusterRole"
27 | name = "cluster-admin"
28 | }
29 | subject {
30 | api_group = ""
31 | kind = "ServiceAccount"
32 | name = each.key
33 | namespace = "default"
34 | }
35 | }
36 | data "kubernetes_secret" "admin" {
37 | for_each = kubernetes_service_account.admin
38 | metadata {
39 | name = each.value.default_secret_name
40 | }
41 | depends_on = [
42 | kubernetes_cluster_role_binding.admin
43 | ]
44 | }
45 | locals {
46 | kubeconfigs = {
47 | for username, secret in data.kubernetes_secret.admin:
48 | username => {
49 | apiVersion = "v1"
50 | kind = "Config"
51 | clusters = [
52 | {
53 | name = var.cluster_name
54 | cluster = {
55 | certificate-authority-data = base64encode(secret.data["ca.crt"])
56 | server = var.cluster_endpoint
57 | }
58 | }
59 | ]
60 | users = [
61 | {
62 | name = username
63 | user = {
64 | token = secret.data["token"]
65 | }
66 | }
67 | ]
68 | contexts = [
69 | {
70 | name = var.cluster_name
71 | context = {
72 | cluster = var.cluster_name
73 | namespace = secret.data["namespace"]
74 | user = username
75 | }
76 | }
77 | ]
78 | current-context = var.cluster_name
79 | preferences = {}
80 | }
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/modules/elasticsearch/modules/filebeat/main.tf:
--------------------------------------------------------------------------------
1 | data "helm_repository" "elastic" {
2 | name = "elastic"
3 | url = "https://helm.elastic.co/"
4 | }
5 |
6 | locals {
7 | values = {
8 | filebeatConfig = {
9 | "input-kubernetes.yml" = <<-EOF
10 | - type: container
11 | paths:
12 | - "/var/lib/docker/containers/*/*.log"
13 | processors:
14 | - add_kubernetes_metadata:
15 | in_cluster: true
16 | - drop_event:
17 | when:
18 | equals:
19 | kubernetes.labels.app: nginx-ingress
20 | exclude_lines: ['kube-probe']
21 | EOF
22 | "filebeat.yml" = <<-EOF
23 | filebeat.modules:
24 | - module: nginx
25 | filebeat.config:
26 | inputs:
27 | path: $${path.config}/input-*.yml
28 | reload.enabled: false
29 | modules:
30 | path: $${path.config}/modules.d/*.yml
31 | reload.enabled: false
32 | filebeat.autodiscover:
33 | providers:
34 | - type: kubernetes
35 | templates:
36 | - condition:
37 | equals:
38 | kubernetes.labels.app: nginx-ingress
39 | config:
40 | - module: nginx
41 | access:
42 | input:
43 | type: container
44 | stream: stdout
45 | paths:
46 | - "/var/lib/docker/containers/$${data.kubernetes.container.id}/*.log"
47 | error:
48 | input:
49 | type: container
50 | stream: stderr
51 | paths:
52 | - "/var/lib/docker/containers/$${data.kubernetes.container.id}/*.log"
53 | processors:
54 | - add_cloud_metadata:
55 | fields:
56 | logtype: kubernetes
57 | fields_under_root: true
58 | output.logstash:
59 | hosts: ["${var.logstash_host}:${var.logstash_port}"]
60 | EOF
61 | }
62 | }
63 | }
64 |
65 | resource "helm_release" "filebeat" {
66 | name = "filebeat"
67 | repository = data.helm_repository.elastic.metadata[0].name
68 | chart = "filebeat"
69 | version = "7.5.0"
70 | namespace = var.namespace
71 |
72 | values = [yamlencode(local.values)]
73 | }
74 |
--------------------------------------------------------------------------------
/modules/elasticsearch/modules/cluster/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | nodeSelectorPodTemplateSpec = {
3 | nodeSelector = var.node_selector
4 | }
5 | emptyDirPodTemplateSpec = {
6 | volumes = [
7 | {
8 | name = "elasticsearch-data"
9 | emptyDir = {}
10 | }
11 | ]
12 | }
13 | cluster = {
14 | apiVersion = "elasticsearch.k8s.elastic.co/v1beta1"
15 | kind = "Elasticsearch"
16 | metadata = {
17 | name = var.cluster_name
18 | namespace = var.namespace
19 | }
20 | spec = {
21 | version = "7.5.0"
22 | nodeSets = [
23 | {
24 | name = "master"
25 | count = var.scale
26 | podTemplate = {
27 | spec = merge(local.nodeSelectorPodTemplateSpec, local.emptyDirPodTemplateSpec)
28 | }
29 | config = {
30 | "node.master" = true
31 | "node.data" = false
32 | "node.ingest" = false
33 | "node.store.allow_mmap" = true
34 | }
35 | },
36 | {
37 | name = "data"
38 | count = var.scale
39 | podTemplate = {
40 | spec = local.nodeSelectorPodTemplateSpec
41 | }
42 | volumeClaimTemplates = [
43 | {
44 | metadata = {
45 | name = "elasticsearch-data"
46 | }
47 | spec = {
48 | accessModes = [
49 | "ReadWriteOnce"
50 | ]
51 | resources = {
52 | requests = {
53 | storage = var.storage_size
54 | }
55 | }
56 | }
57 | storageClassName: var.storage_class
58 | }
59 | ]
60 | config = {
61 | "node.master" = false
62 | "node.data" = true
63 | "node.ingest" = false
64 | "node.store.allow_mmap" = true
65 | }
66 | },
67 | {
68 | name = "ingest"
69 | count = var.scale
70 | podTemplate = {
71 | spec = merge(local.nodeSelectorPodTemplateSpec, local.emptyDirPodTemplateSpec)
72 | }
73 | config = {
74 | "node.master" = false
75 | "node.data" = false
76 | "node.ingest" = true
77 | "node.store.allow_mmap" = true
78 | }
79 | }
80 | ]
81 | http = {
82 | tls = {
83 | selfSignedCertificate = {
84 | disabled = true
85 | }
86 | }
87 | }
88 | }
89 | }
90 |
91 | }
92 |
93 | resource "kubectl_manifest" "cluster" {
94 | yaml_body = yamlencode(local.cluster)
95 | }
96 |
97 | data "kubernetes_secret" "elastic-user" {
98 | metadata {
99 | name = "${var.cluster_name}-es-elastic-user"
100 | namespace = var.namespace
101 | }
102 | depends_on = [kubectl_manifest.cluster]
103 | }
104 |
--------------------------------------------------------------------------------
/modules/elasticsearch/modules/logstash/main.tf:
--------------------------------------------------------------------------------
1 | data "helm_repository" "stable" {
2 | name = "stable"
3 | url = "https://kubernetes-charts.storage.googleapis.com/"
4 | }
5 |
6 | locals {
7 | input_ports = {
8 | beats = 5044
9 | tcp = 5045
10 | udp = 5046
11 | }
12 | input_configs = {
13 | for name, port in local.input_ports:
14 | name => {
15 | port = port
16 | protocol = contains(["beats", "tcp"], name) ? "TCP" : "UDP"
17 | codec = contains(["tcp", "udp"], name) ? "json_lines" : false
18 | }
19 | }
20 | ports = [
21 | for name, config in local.input_configs:
22 | {
23 | name = name
24 | containerPort = config["port"]
25 | protocol = config["protocol"]
26 | }
27 | ]
28 | service_ports = {
29 | for name, config in local.input_configs:
30 | name => {
31 | targetPort = name
32 | port = config["port"]
33 | protocol = config["protocol"]
34 | }
35 | }
36 | inputs = {
37 | for name, config in local.input_configs:
38 | name => <<-EOF
39 | input {
40 | ${name} {
41 | port => ${config["port"]}%{ if config["codec"] != "false" }
42 | codec => ${config["codec"]}%{ endif }
43 | }
44 | }
45 | EOF
46 | }
47 | filters = {
48 | type = <<-EOF
49 | filter {
50 | mutate {
51 | add_field => {
52 | "type" => "%%{[agent][type]}"
53 | }
54 | }
55 | }
56 | EOF
57 | nginx = <<-EOF
58 | filter {
59 | if [event][module] == "nginx" {
60 | if [fileset][name] == "access" {
61 | grok {
62 | match => { "message" => ["%%{IPORHOST:[nginx][access][remote_ip]} - %%{DATA:[nginx][access][user_name]} \[%%{HTTPDATE:[nginx][access][time]}\] \"%%{WORD:[nginx][access][method]} %%{DATA:[nginx][access][url]} HTTP/%%{NUMBER:[nginx][access][http_version]:float}\" %%{NUMBER:[nginx][access][response_code]:int} %%{NUMBER:[nginx][access][body_sent][bytes]:int} \"%%{DATA:[nginx][access][referrer]}\" \"%%{DATA:[nginx][access][agent]}\" %%{NUMBER:[nginx][access][request_length]:int} %%{NUMBER:[nginx][access][request_time]:float} \[%%{DATA:[nginx][access][proxy_upstream_name]}\] \[%%{DATA:[nginx][access][proxy_alternative_upstream_name]}\] %%{DATA:[nginx][access][upstream_addr]} %%{NUMBER:[nginx][access][upstream_response_length]:int} %%{NUMBER:[nginx][access][upstream_response_time]:float} %%{NUMBER:[nginx][access][upstream_status]:int} %%{DATA:[nginx][access][req_id]}"] }
63 | remove_field => "message"
64 | }
65 | mutate {
66 | add_field => { "read_timestamp" => "%%{@timestamp}" }
67 | }
68 | date {
69 | match => [ "[nginx][access][time]", "dd/MMM/YYYY:H:m:s Z" ]
70 | remove_field => "[nginx][access][time]"
71 | }
72 | useragent {
73 | source => "[nginx][access][agent]"
74 | target => "[nginx][access][user_agent]"
75 | remove_field => "[nginx][access][agent]"
76 | }
77 | geoip {
78 | source => "[nginx][access][remote_ip]"
79 | target => "[nginx][access][geoip]"
80 | }
81 | }
82 | else if [fileset][name] == "error" {
83 | grok {
84 | match => { "message" => ["%%{DATA:[nginx][error][time]} \[%%{DATA:[nginx][error][level]}\] %%{NUMBER:[nginx][error][pid]}#%%{NUMBER:[nginx][error][tid]}: (\*%%{NUMBER:[nginx][error][connection_id]} )?%%{GREEDYDATA:[nginx][error][message]}"] }
85 | remove_field => "message"
86 | }
87 | mutate {
88 | rename => { "@timestamp" => "read_timestamp" }
89 | }
90 | date {
91 | match => [ "[nginx][error][time]", "YYYY/MM/dd H:m:s" ]
92 | remove_field => "[nginx][error][time]"
93 | }
94 | }
95 | }
96 | }
97 | EOF
98 | }
99 | values = {
100 | nodeSelector = var.node_selector
101 | replicaCount = var.scale
102 | elasticsearch = {
103 | host = var.elasticsearch_host
104 | }
105 | service = {
106 | ports = local.service_ports
107 | }
108 | ports = local.ports
109 | inputs = merge(local.inputs, { main = "" })
110 | filters = local.filters
111 | outputs = {
112 | main = <<-EOF
113 | output {
114 | elasticsearch {
115 | hosts => ["$${ELASTICSEARCH_HOST}:$${ELASTICSEARCH_PORT}"]
116 | user => "${var.elasticsearch_username}"
117 | password => "${var.elasticsearch_password}"
118 |
119 | manage_template => false
120 | index => "%%{type}-%%{+YYYY.MM.dd}"
121 | }
122 | }
123 | EOF
124 | }
125 | }
126 | }
127 |
128 | resource "helm_release" "logstash" {
129 | name = "logstash"
130 | repository = data.helm_repository.stable.metadata[0].name
131 | chart = "logstash"
132 | version = ""
133 | namespace = var.namespace
134 | timeout = 600
135 |
136 | values = [yamlencode(local.values)]
137 | }
138 |
--------------------------------------------------------------------------------
/modules/prometheus/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | crds = {
3 | alertmanager = "https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/crds/crd-alertmanager.yaml"
4 | prometheus = "https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/crds/crd-prometheus.yaml"
5 | prometheusrules = "https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/crds/crd-prometheusrules.yaml"
6 | servicemonitor = "https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/crds/crd-servicemonitor.yaml"
7 | podmonitor = "https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/crds/crd-podmonitor.yaml"
8 | }
9 | }
10 |
11 | data "http" "crd_manifests" {
12 | for_each = local.crds
13 | url = each.value
14 | }
15 |
16 | resource "kubectl_manifest" "crds" {
17 | for_each = data.http.crd_manifests
18 | yaml_body = each.value["body"]
19 | }
20 |
21 | data "helm_repository" "stable" {
22 | name = "stable"
23 | url = "https://kubernetes-charts.storage.googleapis.com/"
24 | }
25 |
26 | resource "kubernetes_namespace" "prometheus" {
27 | metadata {
28 | name = "prometheus"
29 | }
30 | }
31 |
32 | resource "random_string" "prometheus-password" {
33 | length = 16
34 | special = false
35 | }
36 |
37 | locals {
38 | username = "admin"
39 | password = random_string.prometheus-password.result
40 | }
41 |
42 | module "basic-auth" {
43 | source = "./../../common/basic-auth"
44 |
45 | password = local.password
46 | username = local.username
47 | }
48 |
49 | resource "kubernetes_secret" "prometheus-basic-auth" {
50 | metadata {
51 | name = "prometheus-basic-auth"
52 | namespace = kubernetes_namespace.prometheus.metadata[0].name
53 | }
54 | data = {
55 | auth = module.basic-auth.auth
56 | }
57 | type = "Opaque"
58 | }
59 |
60 | resource "random_string" "grafana-password" {
61 | length = 16
62 | special = false
63 | }
64 |
65 | locals {
66 | # workaround for https://github.com/hashicorp/terraform/issues/22405
67 | ingress_json = {
68 | for name, config in var.configs:
69 | name => lookup(config, "ingress", false) != false ? jsonencode({
70 | enabled = true
71 | hosts = [config["ingress"]["domain"]]
72 | tls = [
73 | {
74 | secretName = config["ingress"]["domain"]
75 | hosts = [config["ingress"]["domain"]]
76 | }
77 | ]
78 | annotations = merge({
79 | "kubernetes.io/ingress.class" = "nginx"
80 | "kubernetes.io/tls-acme" = "true"
81 | "cert-manager.io/cluster-issuer" = config["ingress"]["issuer"]
82 | }, jsondecode(lookup(config, "http_auth", true) != false ? jsonencode({
83 | "nginx.ingress.kubernetes.io/auth-type" = "basic"
84 | "nginx.ingress.kubernetes.io/auth-secret" = kubernetes_secret.prometheus-basic-auth.metadata[0].name
85 | "nginx.ingress.kubernetes.io/auth-realm" = "Authentication Required"
86 | }) : jsonencode({})))
87 | }) : jsonencode({})
88 | }
89 | ingress = {
90 | for name, json in local.ingress_json:
91 | name => jsondecode(json)
92 | }
93 | # end of workaround
94 | disabled_component = {
95 | enabled = false
96 | }
97 | values = {
98 | alertmanager = {
99 | ingress = local.ingress["alertmanager"]
100 | alertmanagerSpec = {
101 | nodeSelector = var.configs["alertmanager"].node_selector
102 | storage = {
103 | volumeClaimTemplate = {
104 | spec = {
105 | storageClassName = var.configs["alertmanager"].storage_class
106 | accessModes = [var.configs["prometheus"].storage_mode]
107 | resources = {
108 | requests = {
109 | storage = var.configs["alertmanager"].storage_size
110 | }
111 | }
112 | }
113 | }
114 | }
115 | }
116 | }
117 | grafana = {
118 | ingress = local.ingress["grafana"]
119 | adminPassword = random_string.grafana-password.result
120 | }
121 | kubeScheduler = local.disabled_component
122 | kubeControllerManager = local.disabled_component
123 | kubeEtcd = local.disabled_component
124 | prometheusOperator = {
125 | createCustomResource = false
126 | nodeSelector = var.configs["operator"].node_selector
127 | }
128 | prometheus = {
129 | ingress = local.ingress["prometheus"]
130 | prometheusSpec = {
131 | nodeSelector = var.configs["prometheus"].node_selector
132 | storageSpec = {
133 | volumeClaimTemplate = {
134 | spec = {
135 | storageClassName = var.configs["prometheus"].storage_class
136 | accessModes = [var.configs["prometheus"].storage_mode]
137 | resources = {
138 | requests = {
139 | storage = var.configs["prometheus"].storage_size
140 | }
141 | }
142 | }
143 | }
144 | }
145 | }
146 | }
147 | }
148 | }
149 |
150 | resource "helm_release" "prometheus-operator" {
151 | name = "prometheus-operator"
152 | repository = data.helm_repository.stable.metadata[0].name
153 | chart = "prometheus-operator"
154 | namespace = kubernetes_namespace.prometheus.metadata[0].name
155 |
156 | values = [yamlencode(local.values)]
157 |
158 | depends_on = [kubectl_manifest.crds]
159 | }
160 |
--------------------------------------------------------------------------------
/main.tf:
--------------------------------------------------------------------------------
1 | provider "yandex" {
2 | token = var.yandex_token
3 | cloud_id = var.yandex_cloud_id
4 | folder_id = var.yandex_folder_id
5 | }
6 |
7 | locals {
8 | cluster_service_account_name = "${var.cluster_name}-cluster"
9 | cluster_node_service_account_name = "${var.cluster_name}-node"
10 |
11 | cluster_node_group_configs = {
12 | service = {
13 | name = "service"
14 | cpu = 6
15 | memory = 18
16 | disk = {
17 | size = 64
18 | type = "network-ssd"
19 | }
20 | }
21 | nfs = {
22 | name = "nfs"
23 | cpu = 2
24 | memory = 2
25 | disk = {
26 | size = 64
27 | type = "network-ssd"
28 | }
29 | }
30 | web = {
31 | name = "web"
32 | cpu = 6
33 | memory = 12
34 | disk = {
35 | size = 64
36 | type = "network-ssd"
37 | }
38 | }
39 | }
40 | cluster_node_groups = {
41 | for key, config in local.cluster_node_group_configs:
42 | key => merge(config, {
43 | fixed_scale = lookup(var.node_groups_scale[key], "fixed_scale", false) != false ? [var.node_groups_scale[key].fixed_scale] : []
44 | auto_scale = lookup(var.node_groups_scale[key], "auto_scale", false) != false ? [var.node_groups_scale[key].auto_scale] : []
45 | })
46 | }
47 | node_selectors = {
48 | for key, id in module.cluster.node_group_ids:
49 | key => {
50 | "yandex.cloud/node-group-id" = id
51 | }
52 | }
53 | hosts = {
54 | dashboard = {
55 | name = "k8s"
56 | issuer = module.cert-manager.cluster_issuers["production"]
57 | }
58 | kibana = {
59 | name = "kb"
60 | issuer = module.cert-manager.cluster_issuers["production"]
61 | }
62 | alertmanager = {
63 | name = "alerts"
64 | issuer = module.cert-manager.cluster_issuers["production"]
65 | }
66 | prometheus = {
67 | name = "metrics"
68 | issuer = module.cert-manager.cluster_issuers["production"]
69 | }
70 | grafana = {
71 | name = "stats"
72 | issuer = module.cert-manager.cluster_issuers["production"]
73 | }
74 | }
75 | ingress = {
76 | for key, host in local.hosts:
77 | key => merge(host, { domain = "${host.name}.${var.cluster_domain}" })
78 | }
79 | elasticsearch_username = keys(module.elasticsearch.elasticsearch_user)[0]
80 | elasticsearch_password = module.elasticsearch.elasticsearch_user[local.elasticsearch_username]
81 | elasticsearch_url = "http://${local.elasticsearch_username}:${local.elasticsearch_password}@${module.elasticsearch.elasticsearch_host}:9200"
82 | }
83 |
84 | module "vpc" {
85 | source = "./modules/vpc"
86 |
87 | name = var.cluster_name
88 | }
89 |
90 | module "iam" {
91 | source = "./modules/iam"
92 |
93 | cluster_folder_id = var.yandex_folder_id
94 | cluster_service_account_name = local.cluster_service_account_name
95 | cluster_node_service_account_name = local.cluster_node_service_account_name
96 | }
97 |
98 | module "cluster" {
99 | source = "./modules/cluster"
100 |
101 | name = var.cluster_name
102 | public = true
103 | kube_version = var.cluster_version
104 | release_channel = var.cluster_release_channel
105 | vpc_id = module.vpc.vpc_id
106 | location_subnets = module.vpc.location_subnets
107 | cluster_service_account_id = module.iam.cluster_service_account_id
108 | node_service_account_id = module.iam.cluster_node_service_account_id
109 | cluster_node_groups = local.cluster_node_groups
110 | ssh_keys = module.admins.ssh_keys
111 | dep = [
112 | module.iam.req
113 | ]
114 | }
115 |
116 | provider "helm" {
117 | kubernetes {
118 | load_config_file = false
119 |
120 | host = module.cluster.external_v4_endpoint
121 | cluster_ca_certificate = module.cluster.ca_certificate
122 | exec {
123 | api_version = "client.authentication.k8s.io/v1beta1"
124 | command = "${path.root}/yc-cli/bin/yc"
125 | args = [
126 | "managed-kubernetes",
127 | "create-token",
128 | "--cloud-id", var.yandex_cloud_id,
129 | "--folder-id", var.yandex_folder_id,
130 | "--token", var.yandex_token,
131 | ]
132 | }
133 | }
134 | }
135 |
136 | provider "kubernetes" {
137 | load_config_file = false
138 |
139 | host = module.cluster.external_v4_endpoint
140 | cluster_ca_certificate = module.cluster.ca_certificate
141 | exec {
142 | api_version = "client.authentication.k8s.io/v1beta1"
143 | command = "${path.root}/yc-cli/bin/yc"
144 | args = [
145 | "managed-kubernetes",
146 | "create-token",
147 | "--cloud-id", var.yandex_cloud_id,
148 | "--folder-id", var.yandex_folder_id,
149 | "--token", var.yandex_token,
150 | ]
151 | }
152 | }
153 |
154 | module "nginx-ingress" {
155 | source = "./modules/nginx-ingress"
156 |
157 | node_selector = local.node_selectors["web"]
158 | }
159 |
160 | provider "kubectl" {
161 | load_config_file = false
162 |
163 | host = module.cluster.external_v4_endpoint
164 | cluster_ca_certificate = module.cluster.ca_certificate
165 | exec {
166 | api_version = "client.authentication.k8s.io/v1beta1"
167 | command = "${path.root}/yc-cli/bin/yc"
168 | args = [
169 | "managed-kubernetes",
170 | "create-token",
171 | "--cloud-id", var.yandex_cloud_id,
172 | "--folder-id", var.yandex_folder_id,
173 | "--token", var.yandex_token,
174 | ]
175 | }
176 | }
177 |
178 | provider "http" {}
179 |
180 | module "cert-manager" {
181 | source = "./modules/cert-manager"
182 |
183 | issuers_email = var.admin_email
184 |
185 | node_selector = local.node_selectors["service"]
186 |
187 | }
188 |
189 | module "kubernetes-dashboard" {
190 | source = "./modules/kubernetes-dashboard"
191 |
192 | node_selector = local.node_selectors["service"]
193 |
194 | ingress = local.ingress["dashboard"]
195 | }
196 |
197 | module "admins" {
198 | source = "./modules/admins"
199 |
200 | admins = var.admins
201 | cluster_name = var.cluster_name
202 | cluster_endpoint = module.cluster.external_v4_endpoint
203 | }
204 |
205 | provider "local" {}
206 |
207 | provider "random" {}
208 |
209 | module "nfs-server-provisioner" {
210 | source = "./modules/nfs-server-provisioner"
211 |
212 | node_selector = local.node_selectors["nfs"]
213 | storage_class = "yc-network-ssd"
214 | storage_size = "200Gi"
215 | }
216 |
217 | module "registry" {
218 | source = "./modules/registry"
219 |
220 | registry_name = var.cluster_name
221 | }
222 |
223 | module "elasticsearch" {
224 | source = "./modules/elasticsearch"
225 |
226 | cluster_name = var.cluster_name
227 | node_selector = local.node_selectors["service"]
228 | scale = lookup(var.node_groups_scale["service"], "fixed_scale", 3)
229 | storage_class = "yc-network-ssd"
230 | storage_size = "50Gi"
231 | kibana_ingress = local.ingress["kibana"]
232 | }
233 |
234 | module "prometheus" {
235 | source = "./modules/prometheus"
236 |
237 | configs = {
238 | alertmanager = {
239 | ingress = local.ingress["alertmanager"]
240 | node_selector = local.node_selectors["service"]
241 | storage_class = module.nfs-server-provisioner.storage_class
242 | storage_mode = "ReadWriteMany"
243 | storage_size = "2Gi"
244 | }
245 | grafana = {
246 | ingress = local.ingress["grafana"]
247 | http_auth = false
248 | }
249 | operator = {
250 | node_selector = local.node_selectors["service"]
251 | }
252 | prometheus = {
253 | node_selector = local.node_selectors["service"]
254 | ingress = local.ingress["prometheus"]
255 | storage_class = module.nfs-server-provisioner.storage_class
256 | storage_mode = "ReadWriteMany"
257 | storage_size = "2Gi"
258 | }
259 | }
260 | }
261 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Infrastructure in Yandex Cloud
2 |
3 |
4 | ### All-in-one production-ready solution
5 |
6 | This project originally started as claustrophobia.com project infrastructure.
7 |
8 | Decided to open sources for education and production usage.
9 |
10 |
11 | ## Contents
12 |
13 | The project was built as terraform modular structured mono repository,
14 | containing submodules to deploy high-available infrastructure services
15 | using Yandex Cloud Managed Kubernetes and other platform services.
16 |
17 |
18 | ### Project structure
19 |
20 | The modules are mostly split by semantics, but some provides exact resource.
21 |
22 | ```
23 | ├── docs
24 | ├── common
25 | │ ├── basic-auth
26 | │ │ └── scripts
27 | │ └── external-name-service
28 | └── modules
29 | │ ├── vpc
30 | │ ├── iam
31 | │ ├── cluster
32 | │ │ └── modules
33 | │ │ └── node_groups
34 | │ ├── admins
35 | │ ├── nfs-server-provisioner
36 | │ ├── prometheus
37 | │ ├── elasticsearch
38 | │ │ └── modules
39 | │ │ ├── cluster
40 | │ │ ├── filebeat
41 | │ │ ├── kibana
42 | │ │ ├── logstash
43 | │ │ └── operator
44 | │ │ └── sources
45 | │ ├── nginx-ingress
46 | │ ├── cert-manager
47 | │ │ └── modules
48 | │ │ ├── crds
49 | │ │ └── issuers
50 | │ ├── kubernetes-dashboard
51 | │ ├── registry
52 | │ └── secrets
53 | ├── get-kubectl-provider.sh
54 | ├── get-yc-cli.sh
55 | ├── files.tf
56 | ├── output.tf
57 | ├── variables.tf
58 | └── main.tf
59 | ```
60 |
61 | #### Main module
62 |
63 | - Based on main.tf file of project root.
64 |
65 | - Input variables are defined in variables.tf file of project root.
66 |
67 | - Output variables are defined in output.tf
68 |
69 | - File outputs are defined in files.tf
70 |
71 | - Contain modules for common (abstract) and target usage
72 |
73 | - Defines and modifies input structures to configure project modules
74 |
75 | - Provide entrypoint for module configurations
76 |
77 | - Configure providers depends on kubernetes api to use yc-cli to get token
78 |
79 | Input and output variables of the module are described in [docs/variables.md](docs/variables.md)
80 |
81 |
82 | ### Modules description
83 |
84 |
85 | #### vpc
86 |
87 | Module to deploy virtual private cloud for infra to deploy on.
88 |
89 | - Creates network and subnets for given list of zone names.
90 |
91 |
92 | #### iam
93 |
94 | Module to deploy IAM resources for cluster to use.
95 |
96 | - Creates service accounts for cluster and for nodes and assign folder roles.
97 |
98 |
99 | #### cluster
100 |
101 | Module to deploy Kubernetes cluster and defined node groups.
102 |
103 | - Creates Managed Kubernetes Cluster configured to use regional master by given `location_subnets` value.
104 |
105 | - Creates Cluster Node Groups by given `cluster_node_groups` value, dynamically allocated by given `location_subnets`
106 |
107 |
108 | #### admins
109 |
110 | Module to deploy kubernetes service accounts for each admin
111 | and prepare their ssh-keys for cluster nodes deploying.
112 |
113 | - Creates service accounts for given `admins` value and bind `cluster-admin` role to them.
114 |
115 | - Prepares `kubeconfigs` including prefetched secret token for output.
116 |
117 | - Prepares `ssh-keys` string for cloud-init of cluster nodes.
118 |
119 |
120 | #### registry
121 |
122 | Module to deploy Container Registry for given name.
123 |
124 |
125 | #### nginx-ingress
126 |
127 | Module to deploy nginx-ingress controller helm chart configured as DaemonSet.
128 |
129 | - Creates helm release in `nginx-ingress` to deploy nginx-ingress service pods, configured with DaemonSet for given `node_selector`
130 |
131 | - Creates `ingress` with type `LoadBalancer` to expose nginx-ingress service
132 |
133 | - Provides ingress class `nginx` used by many other modules
134 |
135 | - Outputs `load_balancer_ip` for deployed ingress service.
136 |
137 |
138 | #### cert-manager
139 |
140 | Module to deploy cert-manager helm chart and cluster issuers
141 | (staging and production) to to automate certificates issuing by Let's Encrypt
142 |
143 | - Downloads and applies CRDs for cert-manager.
144 |
145 | - Creates helm release to deploy cert-manager service pods.
146 |
147 | - Applies Applies cluster issuers for given staging and production emails.
148 |
149 | - Provides cluster issuers, used by many other modules as ingress configuration.
150 |
151 |
152 | #### kubernetes-dashboard
153 |
154 | Module to deploy kubernetes-dashboard helm chart exposed with nginx-ingress
155 | and secured with Let's Encrypt certificate, automatically issued by cert-manager.
156 |
157 | Could be useful to debug both required modules.
158 |
159 | - Prepares `ingress` value for nginx-ingress helm chart based on given `ingress` value.
160 |
161 | - Creates helm release in `kube-system` namespace to deploy kubernetes-dashboard service pod, configured with Deployment for given `node_selector`
162 |
163 |
164 | #### nfs-server-provisioner
165 |
166 | Module to deploy nfs-server-provisioner controller helm chart.
167 |
168 | - Creates helm release in `nfs-server-provisioner` namespace to deploy nfs-server-provisioner service pod for given `node_selector`, configured to use PV on given `storage_class`.
169 |
170 | - Creates `network-ssd` attached to node for given `storage_class`
171 |
172 | - Provides `nfs-client` storage class used by many other modules
173 |
174 |
175 | #### elasticsearch
176 |
177 | Module to deploy ELK stack services to provide logging
178 |
179 | - creates resources for given `namespace`
180 |
181 | - operator submodule to deploy elasticsearch-operator
182 |
183 | - cluster submodule to deploy high-available elasticsearch cluster for given `cluster_name`, `scale` and `node_selector`, configured to use PVs for given `storage_size` and `storage_class`
184 |
185 | - kibana submodule to deploy kibana service pods and web-service, exposed by given `kibana_ingress`
186 |
187 | - logstash submodule to deploy logstash helm chart
188 |
189 | - filebeat submodule to deploy filebeat helm chart
190 |
191 | - Provices streaming to elasticsearch for instance logs
192 |
193 | - Provides exposed kibana service to watch the logs
194 |
195 | - Provides elasticsearch and logstash host names for output
196 |
197 | - Provides generated password of elastic user for output
198 |
199 |
200 | #### prometheus
201 |
202 | Module to deploy Prometheus-Alertmanager-Grafana stack services to provide monitoring
203 |
204 | - creates resources for given `namespace`
205 |
206 | - downloads and applies CRDs
207 |
208 | - Creates helm release for given namespace to deploy prometheus-operator helm chart services, configured by given `configs`.
209 |
210 |
211 |
212 | #### secrets
213 |
214 | Common module to create opaque kubernetes secrets for each `opaque_secrets` for given `namespace`
215 |
216 |
217 | #### basic-auth
218 |
219 | Common module to create kubernetes opaque secret for basic-auth on given `username` and `password` using external bash script execution
220 |
221 |
222 | #### external-name-service
223 |
224 | Common module to create ExternalName kubernetes service for given `name` and `external_name` for given `namespace`
225 |
226 |
227 | ### Scripts
228 |
229 |
230 | #### get-kubectl-provider.sh
231 |
232 | Silently installs suitable release of gavinbunney/terraform-provider-kubectl to `terraform.d/plugins` of project root
233 |
234 |
235 | #### get-yc-cli.sh
236 |
237 | Silently installs suitable release of yc-cli to `yc-cli` directory in project root
238 |
239 |
240 | ### Execution
241 |
242 | The order of execution is necessary at this point.
243 |
244 |
245 | 1. `git clone` this project
246 |
247 | 2. `./get-kubectl-provider.sh && ./get-yc-cli.sh`
248 |
249 | 3. configure all required variables of the main module as described in [docs/variables.md](docs/variables.md)
250 |
251 | `mv terraform.tfvars.example terraform.tfvars` and fill required values
252 |
253 | 4. `terraform plan -target module.cluster` and check your plan
254 |
255 | 5. `terraform apply -target module.cluster` and wait until k8s cluster will become HEALTHY
256 |
257 | 6. Configure yc-cli and get-credentials for newly created cluster
258 |
259 | [See Doc](https://cloud.yandex.ru/docs/managed-kubernetes/quickstart#add-conf)
260 |
261 | 7. `terraform apply -target module.nfs-server-provisioner`
262 |
263 | 8. `terraform apply -target module.nginx-ingress`
264 |
265 | 9. Create wildcard A DNS record for outputted `load_balancer_ip`
266 |
267 | `*.cluster_domain A IN load_balancer_ip`
268 |
269 | 10. `terraform apply -target module.cert-manager`
270 |
271 | check that all issuers are ready by `kubectl get clusterissuers`
272 |
273 | 11. `terraform apply -target module.admins -target local_file.kubeconfigs`
274 |
275 | check that output/kubeconfigs directory was created and contains files named as `admins`
276 |
277 | 12. `terraform apply -target module.kubernetes-dashboard`
278 |
279 | check that ingress is ready by `kubectl -n kube-system get ing -l app=kubernetes-dashboardkubectl -n kube-system get ing -l app=kubernetes-dashboard`
280 |
281 | try to access dashboard using web-browser with generated kubeconfig
282 |
283 | 13. `git clone https://github.com/Strangerxxx/yc-k8s-recept.git && cd yc-k8s-recept/sysctl-tuner && ./deploy.sh`
284 |
285 | and check that all sysctl-tuner's pods are running normally by `kubectl -n kube-system get po -l module=sysctl-tuner`
286 |
287 | 14. `terraform apply -target module.elasticsearch`
288 |
289 | and check that elasticsearch cluster HEALTH become green by `kubectl -n elasticsearch get elasticsearch`
290 |
291 | try to access kibana using web-browser with outputted `elasticsearch_user` value
292 |
293 | 15. `terraform apply -target module.prometheus`
294 |
295 | and check that all prometheus pods are running normally by `kubectl -n prometheus get pod`
296 |
297 | try to access Grafana using web-browser with user *admin* outputted `grafana_admin_password`
298 |
299 |
300 | ## TODO
301 |
302 | - [ ] refactor
303 |
304 | - [ ] deploy sysctl-tuner using helm and terraform
305 |
306 | - [ ] refactor to separate modules configuration
307 |
--------------------------------------------------------------------------------