├── _config.yml ├── k8s ├── master │ ├── token.csv │ ├── encryption.yaml │ └── config.yaml ├── client │ └── kubeconfig ├── workers │ ├── kubeconfig │ ├── kube-proxy-config │ └── config.yaml └── network │ ├── flannel.yaml │ └── coredns.yaml ├── apps ├── ticker │ ├── cleanup.sh │ └── deployment.yml ├── guestbook │ ├── cleanup.sh │ ├── frontend.yml │ └── redis.yml ├── guestbook-go │ ├── guestbook.yml │ ├── redis-master.yml │ └── redis-slave.yml ├── heapster │ ├── influxdb.yml │ ├── heapster.yml │ └── grafana.yml ├── dashboard │ └── deployment.yml └── kubedns │ └── deployment.yml ├── .gitignore ├── provider.tf ├── etcd ├── resolved.conf └── config.yaml ├── dns ├── Corefile ├── zone └── config.yaml ├── terraform.tfvars.sample ├── ca.tf ├── LICENSE ├── client.tf ├── dns.tf ├── etcd.tf ├── vars.tf ├── README.md ├── k8s-master.tf └── k8s-workers.tf /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-slate -------------------------------------------------------------------------------- /k8s/master/token.csv: -------------------------------------------------------------------------------- 1 | ${client_token},client,000 2 | -------------------------------------------------------------------------------- /apps/ticker/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl delete deployment ticker 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.tfstate* 2 | *.tfvars 3 | .tls 4 | *.swp 5 | .terraform 6 | .kubeconfig 7 | .cacert.pem 8 | -------------------------------------------------------------------------------- /provider.tf: -------------------------------------------------------------------------------- 1 | provider "digitalocean" { 2 | version = "~> 0.1" 3 | token = "${var.do_api_token}" 4 | } 5 | -------------------------------------------------------------------------------- /apps/guestbook/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl delete deployments,services -l "app in (redis, guestbook)" 4 | -------------------------------------------------------------------------------- /etcd/resolved.conf: -------------------------------------------------------------------------------- 1 | [Resolve] 2 | DNS=${dns_server} 3 | Domains=${domain} 4 | LLMNR=yes 5 | DNSSEC=allow-downgrade 6 | Cache=yes 7 | -------------------------------------------------------------------------------- /dns/Corefile: -------------------------------------------------------------------------------- 1 | ${domain} { 2 | file /opt/coredns/zones/${domain} 3 | cache 4 | health 5 | prometheus 6 | errors stdout 7 | log stdout 8 | } 9 | -------------------------------------------------------------------------------- /k8s/master/encryption.yaml: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | - name: key1 10 | secret: ${encryption_key} 11 | - identity: {} 12 | -------------------------------------------------------------------------------- /dns/zone: -------------------------------------------------------------------------------- 1 | $$ORIGIN ${domain}. 2 | $$TTL 1h 3 | 4 | @ IN SOA ns.${domain}. dev.${domain}. ( 5 | 2017092200 ; serial number of this zone file 6 | 1d ; slave refresh 7 | 2h ; slave retry time in case of a problem 8 | 4w ; slave expiration time 9 | 1h ; minimum caching time in case of failed lookups 10 | ) 11 | -------------------------------------------------------------------------------- /apps/ticker/deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: ticker 5 | spec: 6 | replicas: 2 7 | template: 8 | metadata: 9 | labels: 10 | run: ticker 11 | spec: 12 | containers: 13 | - name: ticker 14 | image: "ubuntu:wily" 15 | command: ["/bin/bash", "-c"] 16 | args: ['for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] 17 | -------------------------------------------------------------------------------- /k8s/client/kubeconfig: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: ${cluster_name} 5 | cluster: 6 | server: ${apiserver_endpoint} 7 | certificate-authority-data: ${cacert} 8 | contexts: 9 | - context: 10 | cluster: ${cluster_name} 11 | user: ${username} 12 | name: default 13 | users: 14 | - name: ${username} 15 | user: 16 | client-certificate-data: ${client_cert} 17 | client-key-data: ${client_key} 18 | current-context: default 19 | -------------------------------------------------------------------------------- /k8s/workers/kubeconfig: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - cluster: 5 | server: ${apiserver_endpoint} 6 | certificate-authority: ${cacert_file} 7 | name: ${cluster_name} 8 | contexts: 9 | - context: 10 | cluster: ${cluster_name} 11 | user: ${username} 12 | name: default 13 | users: 14 | - name: ${username} 15 | user: 16 | client-certificate: ${client_cert_file} 17 | client-key: ${client_key_file} 18 | current-context: default 19 | -------------------------------------------------------------------------------- /terraform.tfvars.sample: -------------------------------------------------------------------------------- 1 | do_api_token = "" 2 | 3 | droplet_ssh_user = "" 4 | droplet_private_key_file = "" 5 | droplet_private_key_id = 6 | 7 | k8s_apiserver_encryption_key = "" 8 | 9 | tls_cacert_subject_common_name = "" 10 | tls_cacert_subject_organization = "" 11 | 12 | tls_cert_subject_organizational_unit = "" 13 | tls_cert_subject_street_address = "" 14 | tls_cert_subject_locality = "" 15 | tls_cert_subject_province = "" 16 | tls_cert_subject_postal_code = "" 17 | tls_cert_subject_country = "" 18 | tls_cert_validity_period_hours = 19 | tls_cert_early_renewal_hours = 20 | -------------------------------------------------------------------------------- /apps/guestbook-go/guestbook.yml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1beta1 3 | metadata: 4 | name: guestbook 5 | labels: 6 | app: guestbook 7 | spec: 8 | replicas: 3 9 | template: 10 | metadata: 11 | labels: 12 | app: guestbook 13 | spec: 14 | containers: 15 | - name: guestbook 16 | image: gcr.io/google_containers/guestbook:v3 17 | ports: 18 | - name: http-server 19 | containerPort: 3000 20 | 21 | --- 22 | kind: Service 23 | apiVersion: v1 24 | metadata: 25 | name: guestbook 26 | labels: 27 | app: guestbook 28 | spec: 29 | ports: 30 | - port: 3000 31 | targetPort: http-server 32 | nodePort: 32100 33 | selector: 34 | app: guestbook 35 | type: NodePort 36 | -------------------------------------------------------------------------------- /apps/guestbook-go/redis-master.yml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1beta1 3 | metadata: 4 | name: redis-master 5 | labels: 6 | app: redis 7 | role: master 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: redis 14 | role: master 15 | spec: 16 | containers: 17 | - name: redis-master 18 | image: redis:2.8.23 19 | ports: 20 | - name: redis-server 21 | containerPort: 6379 22 | 23 | --- 24 | kind: Service 25 | apiVersion: v1 26 | metadata: 27 | name: redis-master 28 | labels: 29 | app: redis 30 | role: master 31 | spec: 32 | ports: 33 | - port: 6379 34 | targetPort: redis-server 35 | selector: 36 | app: redis 37 | role: master 38 | -------------------------------------------------------------------------------- /apps/guestbook-go/redis-slave.yml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1beta1 3 | metadata: 4 | name: redis-slave 5 | labels: 6 | app: redis 7 | role: slave 8 | spec: 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | app: redis 14 | role: slave 15 | spec: 16 | containers: 17 | - name: redis-slave 18 | image: kubernetes/redis-slave:v2 19 | ports: 20 | - name: redis-server 21 | containerPort: 6379 22 | 23 | --- 24 | kind: Service 25 | apiVersion: v1 26 | metadata: 27 | name: redis-slave 28 | labels: 29 | app: redis 30 | role: slave 31 | spec: 32 | ports: 33 | - port: 6379 34 | targetPort: redis-server 35 | selector: 36 | app: redis 37 | role: slave 38 | -------------------------------------------------------------------------------- /apps/guestbook/frontend.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: frontend 5 | labels: 6 | app: guestbook 7 | tier: frontend 8 | spec: 9 | type: NodePort 10 | ports: 11 | - port: 80 12 | nodePort: 32100 13 | selector: 14 | app: guestbook 15 | tier: frontend 16 | 17 | --- 18 | apiVersion: apps/v1beta1 19 | kind: Deployment 20 | metadata: 21 | name: frontend 22 | spec: 23 | replicas: 3 24 | template: 25 | metadata: 26 | labels: 27 | app: guestbook 28 | tier: frontend 29 | spec: 30 | containers: 31 | - name: php-redis 32 | image: gcr.io/google_samples/gb-frontend:v3 33 | resources: 34 | requests: 35 | cpu: 100m 36 | memory: 100M 37 | env: 38 | - name: GET_HOSTS_FROM 39 | value: dns 40 | ports: 41 | - containerPort: 80 42 | -------------------------------------------------------------------------------- /k8s/workers/kube-proxy-config: -------------------------------------------------------------------------------- 1 | apiVersion: componentconfig/v1alpha1 2 | bindAddress: 0.0.0.0 3 | clientConnection: 4 | acceptContentTypes: "" 5 | burst: 10 6 | contentType: application/vnd.kubernetes.protobuf 7 | kubeconfig: ${kube_proxy_kubeconfig} 8 | qps: 5 9 | clusterCIDR: ${cluster_cidr} 10 | configSyncPeriod: 15m0s 11 | conntrack: 12 | max: 0 13 | maxPerCore: 32768 14 | min: 131072 15 | tcpCloseWaitTimeout: 1h0m0s 16 | tcpEstablishedTimeout: 24h0m0s 17 | enableProfiling: false 18 | featureGates: "" 19 | healthzBindAddress: 0.0.0.0:10256 20 | hostnameOverride: "" 21 | iptables: 22 | masqueradeAll: false 23 | masqueradeBit: 14 24 | minSyncPeriod: 0s 25 | syncPeriod: 30s 26 | kind: KubeProxyConfiguration 27 | metricsBindAddress: 127.0.0.1:10249 28 | mode: "iptables" 29 | oomScoreAdj: -999 30 | portRange: "" 31 | resourceContainer: /kube-proxy 32 | udpTimeoutMilliseconds: 250ms 33 | -------------------------------------------------------------------------------- /apps/heapster/influxdb.yml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1beta1 3 | metadata: 4 | name: influxdb 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | app: influxdb 12 | spec: 13 | containers: 14 | - name: influxdb 15 | image: kubernetes/heapster_influxdb:v0.6 16 | ports: 17 | - name: influxdb-api 18 | containerPort: 8086 19 | volumeMounts: 20 | - mountPath: /data 21 | name: influxdb-storage 22 | volumes: 23 | - name: influxdb-storage 24 | emptyDir: {} 25 | 26 | --- 27 | kind: Service 28 | apiVersion: v1 29 | metadata: 30 | name: influxdb 31 | labels: 32 | app: influxdb 33 | namespace: kube-system 34 | spec: 35 | type: NodePort 36 | ports: 37 | - port: 8086 38 | targetPort: influxdb-api 39 | selector: 40 | app: influxdb 41 | -------------------------------------------------------------------------------- /apps/heapster/heapster.yml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1beta1 3 | metadata: 4 | name: heapster 5 | labels: 6 | app: heapster 7 | namespace: kube-system 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: heapster 14 | spec: 15 | containers: 16 | - name: heapster 17 | image: kubernetes/heapster:canary 18 | ports: 19 | - name: http-server 20 | containerPort: 8082 21 | command: 22 | - /heapster 23 | - --source=kubernetes:${apiserver_endpoint} 24 | - --sink=influxdb:http://influxdb:8086 25 | volumeMounts: 26 | - name: tls 27 | mountPath: /opt/k8s/tls 28 | readOnly: true 29 | volumes: 30 | - name: tls 31 | hostPath: 32 | path: /opt/k8s/tls 33 | --- 34 | kind: Service 35 | apiVersion: v1 36 | metadata: 37 | name: heapster 38 | labels: 39 | app: heapster 40 | namespace: kube-system 41 | spec: 42 | ports: 43 | - port: 80 44 | targetPort: http-server 45 | selector: 46 | app: heapster 47 | -------------------------------------------------------------------------------- /ca.tf: -------------------------------------------------------------------------------- 1 | resource "tls_private_key" "cakey" { 2 | algorithm = "RSA" 3 | rsa_bits = 4096 4 | } 5 | 6 | resource "tls_self_signed_cert" "cacert" { 7 | key_algorithm = "${tls_private_key.cakey.algorithm}" 8 | private_key_pem = "${tls_private_key.cakey.private_key_pem}" 9 | 10 | subject { 11 | common_name = "${var.tls_cacert_subject_common_name}" 12 | organization = "${var.tls_cacert_subject_organization}" 13 | organizational_unit = "${var.tls_cert_subject_organizational_unit}" 14 | street_address = ["${var.tls_cert_subject_street_address}"] 15 | locality = "${var.tls_cert_subject_locality}" 16 | province = "${var.tls_cert_subject_province}" 17 | country = "${var.tls_cert_subject_country}" 18 | postal_code = "${var.tls_cert_subject_postal_code}" 19 | } 20 | 21 | validity_period_hours = "${var.tls_cert_validity_period_hours}" 22 | early_renewal_hours = "${var.tls_cert_early_renewal_hours}" 23 | 24 | allowed_uses = [ 25 | "key_encipherment", 26 | "server_auth", 27 | "client_auth", 28 | "cert_signing" 29 | ] 30 | 31 | is_ca_certificate = true 32 | } 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Ivan Sim 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /dns/config.yaml: -------------------------------------------------------------------------------- 1 | systemd: 2 | units: 3 | - name: coredns.service 4 | enable: true 5 | contents: | 6 | [Unit] 7 | Description=CoreDNS 8 | Documentation=https://coredns.io/tags/documentation/ 9 | Requires=docker.service 10 | After=docker.service 11 | 12 | [Service] 13 | ExecStart=/usr/bin/docker run --rm \ 14 | --name coredns \ 15 | -p 53:53/udp \ 16 | -v /opt/coredns/Corefile:/opt/coredns/Corefile \ 17 | -v /opt/coredns/zones/${domain}:/opt/coredns/zones/${domain} \ 18 | coredns/coredns:${tag} \ 19 | -conf /opt/coredns/Corefile 20 | ExecStop=/usr/bin/docker stop coredns 21 | Restart=on-failure 22 | RestartSec=5 23 | 24 | [Install] 25 | WantedBy=multi-user.target 26 | 27 | storage: 28 | files: 29 | - path: /etc/systemd/resolved.conf.d/00-droplet.conf 30 | filesystem: root 31 | mode: 0644 32 | user: 33 | id: 0 34 | group: 35 | id: 0 36 | contents: 37 | inline: | 38 | [Resolve] 39 | DNS=${dns_server} 40 | Domains=${domain} 41 | LLMNR=yes 42 | DNSSEC=allow-downgrade 43 | Cache=yes 44 | DNSStubListener=udp 45 | -------------------------------------------------------------------------------- /apps/guestbook/redis.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis 5 | labels: 6 | app: redis 7 | role: master 8 | tier: backend 9 | spec: 10 | ports: 11 | - port: 6379 12 | selector: 13 | app: redis 14 | role: master 15 | tier: backend 16 | 17 | --- 18 | apiVersion: apps/v1beta1 19 | kind: Deployment 20 | metadata: 21 | name: redis-master 22 | spec: 23 | replicas: 1 24 | template: 25 | metadata: 26 | labels: 27 | app: redis 28 | role: master 29 | tier: backend 30 | spec: 31 | containers: 32 | - name: master 33 | image: redis 34 | resources: 35 | requests: 36 | cpu: 100m 37 | memory: 100Mi 38 | ports: 39 | - containerPort: 6379 40 | 41 | --- 42 | apiVersion: v1 43 | kind: Service 44 | metadata: 45 | name: redis-slave 46 | labels: 47 | app: redis 48 | role: slave 49 | tier: backend 50 | spec: 51 | ports: 52 | - port: 6379 53 | selector: 54 | app: redis 55 | role: slave 56 | tier: backend 57 | 58 | --- 59 | apiVersion: apps/v1beta1 60 | kind: Deployment 61 | metadata: 62 | name: redis-slave 63 | spec: 64 | replicas: 2 65 | template: 66 | metadata: 67 | labels: 68 | app: redis 69 | role: slave 70 | tier: backend 71 | spec: 72 | containers: 73 | - name: slave 74 | image: redis 75 | resources: 76 | requests: 77 | cpu: 100m 78 | memory: 100Mi 79 | env: 80 | - name: GET_HOSTS_FROM 81 | value: dns 82 | ports: 83 | - containerPort: 6379 84 | -------------------------------------------------------------------------------- /apps/heapster/grafana.yml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1beta1 3 | metadata: 4 | name: grafana 5 | labels: 6 | app: grafana 7 | namespace: kube-system 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: grafana 14 | spec: 15 | containers: 16 | - name: grafana 17 | image: gcr.io/google_containers/heapster_grafana:v3.1.1 18 | ports: 19 | - name: http-server 20 | containerPort: 3000 21 | protocol: TCP 22 | volumeMounts: 23 | - mountPath: /var 24 | name: grafana-storage 25 | env: 26 | - name: INFLUXDB_HOST 27 | value: influxdb 28 | - name: GRAFANA_PORT 29 | value: "3000" 30 | # The following env variables are required to make Grafana accessible via 31 | # the kubernetes api-server proxy. On production clusters, we recommend 32 | # removing these env variables, setup auth for grafana, and expose the grafana 33 | # service using a LoadBalancer or a public IP. 34 | - name: GF_AUTH_BASIC_ENABLED 35 | value: "false" 36 | - name: GF_AUTH_ANONYMOUS_ENABLED 37 | value: "true" 38 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 39 | value: Admin 40 | - name: GF_SERVER_ROOT_URL 41 | # value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ 42 | value: / 43 | volumes: 44 | - name: grafana-storage 45 | emptyDir: {} 46 | 47 | --- 48 | kind: Service 49 | apiVersion: v1 50 | metadata: 51 | name: grafana 52 | labels: 53 | app: grafana 54 | namespace: kube-system 55 | spec: 56 | type: NodePort 57 | ports: 58 | - port: 80 59 | targetPort: http-server 60 | selector: 61 | app: grafana 62 | -------------------------------------------------------------------------------- /client.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "client_kubeconfig" { 2 | triggers { 3 | master = "${digitalocean_droplet.k8s_masters.0.id}" 4 | } 5 | 6 | depends_on = ["null_resource.k8s_masters_tls"] 7 | 8 | provisioner "local-exec" { 9 | command = <> ${path.module}/.kubeconfig 12 | sleep 120 13 | kubectl --kubeconfig=${path.module}/.kubeconfig cluster-info 14 | kubectl --kubeconfig=${path.module}/.kubeconfig get componentstatus 15 | EOT 16 | } 17 | } 18 | 19 | 20 | data "template_file" "client_kubeconfig" { 21 | template = "${file("${path.module}/k8s/client/kubeconfig")}" 22 | 23 | vars { 24 | apiserver_endpoint = "${format("https://%s:%s", digitalocean_droplet.k8s_masters.0.ipv4_address, var.k8s_apiserver_secure_port)}" 25 | 26 | cacert = "${base64encode(tls_self_signed_cert.cacert.cert_pem)}" 27 | client_cert = "${base64encode(tls_locally_signed_cert.k8s_admin_client.cert_pem)}" 28 | client_key = "${base64encode(tls_private_key.k8s_admin_client.private_key_pem)}" 29 | 30 | cluster_name = "${var.k8s_cluster_name}" 31 | username = "${var.tls_client_cert_subject_common_name}" 32 | } 33 | } 34 | 35 | resource "tls_private_key" "k8s_admin_client" { 36 | algorithm = "RSA" 37 | rsa_bits = 2048 38 | } 39 | 40 | resource "tls_cert_request" "k8s_admin_client" { 41 | key_algorithm = "${tls_private_key.k8s_admin_client.algorithm}" 42 | private_key_pem = "${tls_private_key.k8s_admin_client.private_key_pem}" 43 | 44 | subject { 45 | common_name = "${var.tls_client_cert_subject_common_name}" 46 | organization = "${var.tls_client_cert_subject_organization}" 47 | organizational_unit = "${var.tls_cert_subject_organizational_unit}" 48 | street_address = ["${var.tls_cert_subject_street_address}"] 49 | locality = "${var.tls_cert_subject_locality}" 50 | province = "${var.tls_cert_subject_province}" 51 | country = "${var.tls_cert_subject_country}" 52 | postal_code = "${var.tls_cert_subject_postal_code}" 53 | } 54 | } 55 | 56 | resource "tls_locally_signed_cert" "k8s_admin_client" { 57 | cert_request_pem = "${tls_cert_request.k8s_admin_client.cert_request_pem}" 58 | ca_key_algorithm = "${tls_private_key.cakey.algorithm}" 59 | ca_private_key_pem = "${tls_private_key.cakey.private_key_pem}" 60 | ca_cert_pem = "${tls_self_signed_cert.cacert.cert_pem}" 61 | validity_period_hours = "${var.tls_cert_validity_period_hours}" 62 | early_renewal_hours = "${var.tls_cert_early_renewal_hours}" 63 | 64 | allowed_uses = [ 65 | "server_auth", 66 | "client_auth" 67 | ] 68 | } 69 | -------------------------------------------------------------------------------- /k8s/workers/config.yaml: -------------------------------------------------------------------------------- 1 | systemd: 2 | units: 3 | - name: kubelet.service 4 | enable: true 5 | contents: | 6 | [Unit] 7 | Description=Kubernetes Kubelet 8 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 9 | Requires=coreos-metadata.service 10 | After=coreos-metadata.service 11 | Requires=docker.service 12 | After=docker.service 13 | 14 | [Service] 15 | EnvironmentFile=/run/metadata/coreos 16 | ExecStart=/opt/k8s/bin/kubelet \ 17 | --allow-privileged=true \ 18 | --address=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \ 19 | --anonymous-auth=false \ 20 | --authentication-token-webhook \ 21 | --authorization-mode=Webhook \ 22 | --client-ca-file=${cacert_file} \ 23 | --cluster-dns=${cluster_dns_ip} \ 24 | --cluster-domain=${cluster_domain} \ 25 | --container-runtime=docker \ 26 | --docker-endpoint=unix://var/run/docker.sock \ 27 | --image-pull-progress-deadline=2m \ 28 | --kubeconfig=${kubelet_kubeconfig} \ 29 | --network-plugin=cni \ 30 | --register-node=true \ 31 | --root-dir=${lib_home} \ 32 | --runtime-request-timeout=10m \ 33 | --tls-cert-file=${cert_file} \ 34 | --tls-private-key-file=${key_file} \ 35 | --v=2 36 | Restart=on-failure 37 | RestartSec=5 38 | 39 | [Install] 40 | WantedBy=multi-user.target 41 | 42 | - name: kube-proxy.service 43 | enable: true 44 | contents: | 45 | [Unit] 46 | Description=Kubernetes Kube Proxy 47 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 48 | 49 | [Service] 50 | ExecStart=/opt/k8s/bin/kube-proxy \ 51 | --config=${kube_proxy_config_file} 52 | Restart=on-failure 53 | RestartSec=5 54 | 55 | [Install] 56 | WantedBy=multi-user.target 57 | 58 | storage: 59 | files: 60 | - path: /etc/systemd/resolved.conf.d/00-droplet.conf 61 | filesystem: root 62 | mode: 0644 63 | user: 64 | id: 0 65 | group: 66 | id: 0 67 | contents: 68 | inline: | 69 | [Resolve] 70 | DNS=${dns_server} 71 | Domains=${domain} 72 | LLMNR=yes 73 | DNSSEC=allow-downgrade 74 | Cache=yes 75 | DNSStubListener=udp 76 | 77 | update: 78 | group: ${update_channel} 79 | 80 | locksmith: 81 | reboot_strategy: etcd-lock 82 | window_start: ${maintenance_window_start} 83 | window_length: ${maintenance_window_length} 84 | etcd_endpoints: ${etcd_endpoints} 85 | etcd_cafile: ${cacert_file} 86 | etcd_certfile: ${cert_file} 87 | etcd_keyfile: ${key_file} 88 | -------------------------------------------------------------------------------- /etcd/config.yaml: -------------------------------------------------------------------------------- 1 | etcd: 2 | version: ${etcd_version} 3 | name: "{HOSTNAME}" 4 | initial_cluster: ${etcd_initial_cluster} 5 | initial_cluster_state: new 6 | 7 | data_dir: ${data_dir} 8 | 9 | advertise_client_urls: https://{PRIVATE_IPV4}:${etcd_client_port} 10 | initial_advertise_peer_urls: https://{PRIVATE_IPV4}:${etcd_peer_port} 11 | listen_client_urls: https://0.0.0.0:${etcd_client_port} 12 | listen_peer_urls: https://{PRIVATE_IPV4}:${etcd_peer_port} 13 | 14 | peer_client_cert_auth: true 15 | peer_trusted_ca_file: ${cacert_file} 16 | peer_cert_file: ${cert_file} 17 | peer_key_file: ${key_file} 18 | 19 | client_cert_auth: true 20 | trusted_ca_file: ${cacert_file} 21 | cert_file: ${cert_file} 22 | key_file: ${key_file} 23 | 24 | heartbeat_interval: ${etcd_heartbeat_interval} 25 | election_timeout: ${etcd_election_timeout} 26 | 27 | systemd: 28 | units: 29 | - name: var-lib-etcd.mount 30 | enable: true 31 | contents: | 32 | [Unit] 33 | Description=etcd data folder 34 | Before=etcd-member.service 35 | 36 | [Mount] 37 | What=${device_path} 38 | Where=${data_dir} 39 | Type=ext4 40 | 41 | [Install] 42 | RequiredBy=etcd-member.service 43 | WantedBy=multi-user.target 44 | 45 | storage: 46 | filesystems: 47 | - mount: 48 | device: ${device_path} 49 | format: ext4 50 | create: 51 | force: true 52 | 53 | files: 54 | - path: ${cacert_file} 55 | filesystem: root 56 | mode: 0644 57 | contents: 58 | inline: ${cacert} 59 | user: 60 | id: 500 61 | group: 62 | id: 500 63 | - path: /etc/profile.env 64 | filesystem: root 65 | mode: 0644 66 | contents: 67 | inline: | 68 | source /run/metadata/coreos 69 | export ETCDCTL_API=3 70 | export ETCDCTL_CACERT=${cacert_file} 71 | export ETCDCTL_CERT=${cert_file} 72 | export ETCDCTL_KEY=${key_file} 73 | export ETCDCTL_ENDPOINTS=https://$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0}:${etcd_client_port} 74 | - path: /etc/systemd/resolved.conf.d/00-droplet.conf 75 | filesystem: root 76 | mode: 0644 77 | user: 78 | id: 0 79 | group: 80 | id: 0 81 | contents: 82 | inline: | 83 | [Resolve] 84 | DNS=${dns_server} 85 | Domains=${domain} 86 | LLMNR=yes 87 | DNSSEC=allow-downgrade 88 | Cache=yes 89 | DNSStubListener=udp 90 | 91 | update: 92 | group: ${update_channel} 93 | 94 | locksmith: 95 | reboot_strategy: etcd-lock 96 | window_start: ${maintenance_window_start} 97 | window_length: ${maintenance_window_length} 98 | etcd_endpoints: https://{PRIVATE_IPV4}:${etcd_client_port} 99 | etcd_cafile: ${cacert_file} 100 | etcd_certfile: ${cert_file} 101 | etcd_keyfile: ${key_file} 102 | -------------------------------------------------------------------------------- /apps/dashboard/deployment.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Configuration to deploy release version of the Dashboard UI. 16 | # 17 | # Example usage: kubectl create -f 18 | kind: Deployment 19 | apiVersion: apps/v1beta1 20 | metadata: 21 | labels: 22 | app: kubernetes-dashboard 23 | name: kubernetes-dashboard 24 | namespace: kube-system 25 | spec: 26 | replicas: 1 27 | selector: 28 | matchLabels: 29 | app: kubernetes-dashboard 30 | template: 31 | metadata: 32 | labels: 33 | app: kubernetes-dashboard 34 | # Comment the following annotaion if Dashboard must not be deployed on master 35 | annotations: 36 | scheduler.alpha.kubernetes.io/tolerations: | 37 | [ 38 | { 39 | "key": "dedicated", 40 | "operator": "Equal", 41 | "value": "master", 42 | "effect": "NoSchedule" 43 | } 44 | ] 45 | spec: 46 | containers: 47 | - name: kubernetes-dashboard 48 | image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0 49 | imagePullPolicy: Always 50 | ports: 51 | - containerPort: 9090 52 | protocol: TCP 53 | volumeMounts: 54 | - name: "kubeconfig" 55 | mountPath: "${lib_home}" 56 | readOnly: true 57 | - name: "tls" 58 | mountPath: "${tls_home}" 59 | readOnly: true 60 | args: 61 | # Uncomment the following line to manually specify Kubernetes API server Host 62 | # If not specified, Dashboard will attempt to auto discover the API server and connect 63 | # to it. Uncomment only if the default does not work. 64 | # - --apiserver-host= 65 | - --kubeconfig=${kubeconfig_file} 66 | livenessProbe: 67 | httpGet: 68 | path: / 69 | port: 9090 70 | initialDelaySeconds: 30 71 | timeoutSeconds: 30 72 | volumes: 73 | - name: "kubeconfig" 74 | hostPath: 75 | path: "${lib_home}" 76 | - name: "tls" 77 | hostPath: 78 | path: "${tls_home}" 79 | --- 80 | kind: Service 81 | apiVersion: v1 82 | metadata: 83 | labels: 84 | app: kubernetes-dashboard 85 | name: kubernetes-dashboard 86 | namespace: kube-system 87 | spec: 88 | type: NodePort 89 | ports: 90 | - port: 80 91 | targetPort: 9090 92 | selector: 93 | app: kubernetes-dashboard 94 | -------------------------------------------------------------------------------- /k8s/network/flannel.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: flannel 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - pods 10 | verbs: 11 | - get 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - nodes 16 | verbs: 17 | - list 18 | - watch 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - nodes/status 23 | verbs: 24 | - patch 25 | 26 | --- 27 | kind: ClusterRoleBinding 28 | apiVersion: rbac.authorization.k8s.io/v1 29 | metadata: 30 | name: flannel 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: ClusterRole 34 | name: flannel 35 | subjects: 36 | - kind: ServiceAccount 37 | name: flannel 38 | namespace: kube-system 39 | --- 40 | apiVersion: v1 41 | kind: ServiceAccount 42 | metadata: 43 | name: flannel 44 | namespace: kube-system 45 | --- 46 | kind: ConfigMap 47 | apiVersion: v1 48 | metadata: 49 | name: kube-flannel-cfg 50 | namespace: kube-system 51 | labels: 52 | tier: node 53 | app: flannel 54 | data: 55 | cni-conf.json: | 56 | { 57 | \"name\": \"cbr0\", 58 | \"type\": \"flannel\", 59 | \"delegate\": { 60 | \"isDefaultGateway\": true 61 | } 62 | } 63 | net-conf.json: | 64 | { 65 | \"Network\": \"${pod_cidr}\", 66 | \"Backend\": { 67 | \"Type\": \"vxlan\" 68 | } 69 | } 70 | --- 71 | apiVersion: extensions/v1beta1 72 | kind: DaemonSet 73 | metadata: 74 | name: kube-flannel-ds 75 | namespace: kube-system 76 | labels: 77 | tier: node 78 | app: flannel 79 | spec: 80 | template: 81 | metadata: 82 | labels: 83 | tier: node 84 | app: flannel 85 | spec: 86 | hostNetwork: true 87 | nodeSelector: 88 | beta.kubernetes.io/arch: amd64 89 | tolerations: 90 | - key: node-role.kubernetes.io/master 91 | operator: Exists 92 | effect: NoSchedule 93 | serviceAccountName: flannel 94 | initContainers: 95 | - name: install-cni 96 | image: quay.io/coreos/flannel:${flannel_version}-amd64 97 | command: 98 | - cp 99 | args: 100 | - -f 101 | - /etc/kube-flannel/cni-conf.json 102 | - /etc/cni/net.d/10-flannel.conf 103 | volumeMounts: 104 | - name: cni 105 | mountPath: /etc/cni/net.d 106 | - name: flannel-cfg 107 | mountPath: /etc/kube-flannel/ 108 | containers: 109 | - name: kube-flannel 110 | image: quay.io/coreos/flannel:${flannel_version}-amd64 111 | command: 112 | - /opt/bin/flanneld 113 | - --ip-masq 114 | - --kube-subnet-mgr 115 | - --kube-api-url 116 | - ${apiserver_endpoint} 117 | - --kubeconfig-file 118 | - ${kubeconfig_file} 119 | securityContext: 120 | privileged: true 121 | env: 122 | - name: POD_NAME 123 | valueFrom: 124 | fieldRef: 125 | fieldPath: metadata.name 126 | - name: POD_NAMESPACE 127 | valueFrom: 128 | fieldRef: 129 | fieldPath: metadata.namespace 130 | volumeMounts: 131 | - name: run 132 | mountPath: /run 133 | - name: flannel-cfg 134 | mountPath: /etc/kube-flannel/ 135 | volumes: 136 | - name: run 137 | hostPath: 138 | path: /run 139 | - name: cni 140 | hostPath: 141 | path: /etc/cni/net.d 142 | - name: flannel-cfg 143 | configMap: 144 | name: kube-flannel-cfg 145 | -------------------------------------------------------------------------------- /k8s/network/coredns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRole 10 | metadata: 11 | labels: 12 | kubernetes.io/bootstrapping: rbac-defaults 13 | name: system:coredns 14 | rules: 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - endpoints 19 | - services 20 | - pods 21 | - namespaces 22 | verbs: 23 | - list 24 | - watch 25 | 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: ClusterRoleBinding 29 | metadata: 30 | labels: 31 | kubernetes.io/bootstrapping: rbac-defaults 32 | name: system:coredns 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: ClusterRole 36 | name: system:coredns 37 | subjects: 38 | - kind: ServiceAccount 39 | name: coredns 40 | namespace: kube-system 41 | 42 | --- 43 | apiVersion: v1 44 | kind: ConfigMap 45 | metadata: 46 | name: coredns 47 | namespace: kube-system 48 | data: 49 | Corefile: | 50 | .:53 { 51 | errors 52 | log stdout 53 | health 54 | kubernetes ${cluster_domain} { 55 | endpoint ${apiserver_endpoint} 56 | tls ${cert_file} ${key_file} ${cacert_file} 57 | } 58 | proxy . /etc/resolv.conf 59 | cache 30 60 | } 61 | 62 | --- 63 | apiVersion: extensions/v1beta1 64 | kind: Deployment 65 | metadata: 66 | name: coredns 67 | namespace: kube-system 68 | labels: 69 | k8s-app: coredns 70 | spec: 71 | replicas: 1 72 | selector: 73 | matchLabels: 74 | k8s-app: coredns 75 | template: 76 | metadata: 77 | labels: 78 | k8s-app: coredns 79 | addonmanager.kubernetes.io/mode: Reconcile 80 | annotations: 81 | scheduler.alpha.kubernetes.io/critical-pod: '' 82 | spec: 83 | serviceAccountName: coredns 84 | containers: 85 | - name: coredns 86 | image: coredns/coredns:${coredns_version} 87 | imagePullPolicy: Always 88 | args: [ "-conf", "/etc/coredns/Corefile" ] 89 | volumeMounts: 90 | - name: config-volume 91 | mountPath: /etc/coredns 92 | - name: tls-home 93 | mountPath: ${tls_home} 94 | ports: 95 | - containerPort: 53 96 | name: dns 97 | protocol: UDP 98 | - containerPort: 53 99 | name: dns-tcp 100 | protocol: TCP 101 | - containerPort: 9153 102 | name: metrics 103 | protocol: TCP 104 | livenessProbe: 105 | httpGet: 106 | path: /health 107 | port: 8080 108 | scheme: HTTP 109 | initialDelaySeconds: 60 110 | timeoutSeconds: 5 111 | successThreshold: 1 112 | failureThreshold: 5 113 | tolerations: 114 | - key: CriticalAddonsOnly 115 | operator: Exists 116 | dnsPolicy: Default 117 | volumes: 118 | - name: config-volume 119 | configMap: 120 | name: coredns 121 | items: 122 | - key: Corefile 123 | path: Corefile 124 | - name: tls-home 125 | hostPath: 126 | path: ${tls_home} 127 | 128 | --- 129 | apiVersion: v1 130 | kind: Service 131 | metadata: 132 | name: core-dns 133 | namespace: kube-system 134 | labels: 135 | k8s-app: coredns 136 | addonmanager.kubernetes.io/mode: Reconcile 137 | spec: 138 | selector: 139 | k8s-app: coredns 140 | clusterIP: ${cluster_dns_ip} 141 | ports: 142 | - name: dns 143 | port: 53 144 | protocol: UDP 145 | - name: dns-tcp 146 | port: 53 147 | protocol: TCP 148 | - name: metrics 149 | port: 9153 150 | protocol: TCP 151 | -------------------------------------------------------------------------------- /dns.tf: -------------------------------------------------------------------------------- 1 | resource "digitalocean_droplet" "coredns" { 2 | name = "coredns" 3 | image = "${var.coreos_image}" 4 | region = "${var.droplet_region}" 5 | size = "1GB" 6 | private_networking = "true" 7 | ssh_keys = ["${var.droplet_private_key_id}"] 8 | user_data = "${data.ct_config.coredns.rendered}" 9 | 10 | lifecycle { 11 | create_before_destroy = true 12 | } 13 | 14 | connection { 15 | user = "${var.droplet_ssh_user}" 16 | private_key = "${file(var.droplet_private_key_file)}" 17 | } 18 | 19 | provisioner "remote-exec" { 20 | inline = [ 21 | "sudo mkdir -p /opt/coredns/zones", 22 | "sudo chown -R ${var.droplet_ssh_user} /opt/coredns" 23 | ] 24 | } 25 | 26 | provisioner "file" { 27 | content = "${data.template_file.coredns_corefile.rendered}" 28 | destination = "/opt/coredns/Corefile" 29 | } 30 | 31 | provisioner "file" { 32 | content = "${data.template_file.coredns_zonefile.rendered}" 33 | destination = "/opt/coredns/zones/${var.droplet_domain}" 34 | } 35 | } 36 | 37 | resource "null_resource" "coredns_zonefile_records" { 38 | triggers { 39 | etcd = "${join(",", digitalocean_droplet.etcd.*.ipv4_address_private)}" 40 | k8s_masters = "${join(",", digitalocean_droplet.k8s_masters.*.ipv4_address_private)}" 41 | k8s_workers = "${join(",", digitalocean_droplet.k8s_workers.*.ipv4_address_private)}" 42 | } 43 | 44 | connection { 45 | user = "${var.droplet_ssh_user}" 46 | private_key = "${file(var.droplet_private_key_file)}" 47 | host = "${digitalocean_droplet.coredns.ipv4_address}" 48 | } 49 | 50 | provisioner "remote-exec" { 51 | inline = [<> /opt/coredns/zones/${var.droplet_domain} 53 | ${format("%s IN A %s", digitalocean_droplet.coredns.name, digitalocean_droplet.coredns.ipv4_address_private)} 54 | 55 | ${join("\n", formatlist("%s IN A %s", digitalocean_droplet.etcd.*.name, digitalocean_droplet.etcd.*.ipv4_address_private))} 56 | 57 | ${join("\n", formatlist("%s IN A %s", digitalocean_droplet.k8s_masters.*.name, digitalocean_droplet.k8s_masters.*.ipv4_address_private))} 58 | 59 | ${join("\n", formatlist("%s IN A %s", digitalocean_droplet.k8s_workers.*.name, digitalocean_droplet.k8s_workers.*.ipv4_address_private))} 60 | EOF 61 | 62 | sudo systemctl restart coredns 63 | CMD 64 | ] 65 | } 66 | } 67 | 68 | data "ct_config" "coredns" { 69 | platform = "digitalocean" 70 | content = "${data.template_file.coredns_config.rendered}" 71 | } 72 | 73 | data "template_file" "coredns_config" { 74 | template = "${file("${path.module}/dns/config.yaml")}" 75 | 76 | vars { 77 | dns_server = "127.0.0.1" 78 | domain = "${var.droplet_domain}" 79 | tag = "${var.coredns_version}" 80 | } 81 | } 82 | 83 | data "template_file" "coredns_corefile" { 84 | template = "${file("${path.module}/dns/Corefile")}" 85 | 86 | vars { 87 | domain = "${var.droplet_domain}" 88 | } 89 | } 90 | 91 | data "template_file" "coredns_zonefile" { 92 | template = "${file("${path.module}/dns/zone")}" 93 | 94 | vars { 95 | domain = "${var.droplet_domain}" 96 | } 97 | } 98 | 99 | resource "tls_private_key" "coredns" { 100 | algorithm = "RSA" 101 | rsa_bits = 4096 102 | } 103 | 104 | resource "tls_cert_request" "coredns" { 105 | key_algorithm = "${tls_private_key.coredns.algorithm}" 106 | private_key_pem = "${tls_private_key.coredns.private_key_pem}" 107 | 108 | ip_addresses = [ 109 | "${digitalocean_droplet.coredns.ipv4_address_private}", 110 | "${digitalocean_droplet.coredns.ipv4_address}" 111 | ] 112 | 113 | dns_names = [ 114 | "${digitalocean_droplet.coredns.name}", 115 | "${digitalocean_droplet.coredns.name}.${var.droplet_domain}" 116 | ] 117 | 118 | subject { 119 | common_name = "${var.tls_coredns_cert_subject_common_name}" 120 | organization = "${var.tls_coredns_cert_subject_organization}" 121 | organizational_unit = "${var.tls_cert_subject_organizational_unit}" 122 | street_address = ["${var.tls_cert_subject_street_address}"] 123 | locality = "${var.tls_cert_subject_locality}" 124 | province = "${var.tls_cert_subject_province}" 125 | country = "${var.tls_cert_subject_country}" 126 | postal_code = "${var.tls_cert_subject_postal_code}" 127 | } 128 | } 129 | 130 | resource "tls_locally_signed_cert" "coredns" { 131 | cert_request_pem = "${tls_cert_request.coredns.cert_request_pem}" 132 | ca_key_algorithm = "${tls_private_key.cakey.algorithm}" 133 | ca_private_key_pem = "${tls_private_key.cakey.private_key_pem}" 134 | ca_cert_pem = "${tls_self_signed_cert.cacert.cert_pem}" 135 | validity_period_hours = "${var.tls_cert_validity_period_hours}" 136 | early_renewal_hours = "${var.tls_cert_early_renewal_hours}" 137 | 138 | allowed_uses = [ 139 | "server_auth", 140 | "client_auth" 141 | ] 142 | } 143 | -------------------------------------------------------------------------------- /k8s/master/config.yaml: -------------------------------------------------------------------------------- 1 | systemd: 2 | units: 3 | - name: kube-apiserver.service 4 | enable: true 5 | contents: | 6 | [Unit] 7 | Description=Kubernetes API Server 8 | Documentation=http://kubernetes.io/docs/admin/kube-apiserver/ 9 | Requires=coreos-metadata.service 10 | After=coreos-metadata.service 11 | After=docker.service 12 | Wants=docker.service 13 | 14 | [Service] 15 | EnvironmentFile=/run/metadata/coreos 16 | ExecStart=/opt/k8s/bin/kube-apiserver \ 17 | --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction \ 18 | --advertise-address=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \ 19 | --allow-privileged=true \ 20 | --anonymous-auth=false \ 21 | --apiserver-count=${apiserver_count} \ 22 | --audit-log-maxage=30 \ 23 | --audit-log-maxbackup=3 \ 24 | --audit-log-maxsize=100 \ 25 | --audit-log-path=/var/log/audit.log \ 26 | --authorization-mode=Node,RBAC \ 27 | --bind-address=0.0.0.0 \ 28 | --client-ca-file=${cacert_file} \ 29 | --enable-swagger-ui=true \ 30 | --etcd-cafile=${cacert_file} \ 31 | --etcd-certfile=${cert_file} \ 32 | --etcd-keyfile=${key_file} \ 33 | --etcd-servers=${etcd_endpoints} \ 34 | --event-ttl=1h \ 35 | --experimental-encryption-provider-config=${apiserver_encryption_config_file} \ 36 | --insecure-bind-address=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \ 37 | --insecure-port=${apiserver_insecure_port} \ 38 | --kubelet-certificate-authority=${cacert_file} \ 39 | --kubelet-client-certificate=${cert_file} \ 40 | --kubelet-client-key=${key_file} \ 41 | --kubelet-https=true \ 42 | --runtime-config=api/all \ 43 | --secure-port=${apiserver_secure_port} \ 44 | --service-account-key-file=${cakey_file} \ 45 | --service-cluster-ip-range=${service_cluster_ip_range} \ 46 | --service-node-port-range=${service_node_port_range} \ 47 | --tls-ca-file=${cacert_file} \ 48 | --tls-cert-file=${cert_file} \ 49 | --tls-private-key-file=${key_file} \ 50 | --token-auth-file=/opt/k8s/token.csv \ 51 | --v=2 52 | Restart=on-failure 53 | RestartSec=5 54 | 55 | [Install] 56 | WantedBy=multi-user.target 57 | 58 | - name: kube-controller-manager.service 59 | enable: true 60 | contents: | 61 | [Unit] 62 | Description=Kubernetes Controller Manager 63 | Requires=coreos-metadata.service 64 | After=coreos-metadata.service 65 | After=kube-apiserver.service 66 | Wants=kube-apiserver.service 67 | 68 | [Service] 69 | EnvironmentFile=/run/metadata/coreos 70 | ExecStart=/opt/k8s/bin/kube-controller-manager \ 71 | --address=0.0.0.0 \ 72 | --allocate-node-cidrs=true \ 73 | --cluster-cidr=${cluster_cidr} \ 74 | --cluster-name=${cluster_name} \ 75 | --cluster-signing-cert-file=${cacert_file} \ 76 | --cluster-signing-key-file=${cakey_file} \ 77 | --leader-elect=true \ 78 | --master=http://$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0}:${apiserver_insecure_port} \ 79 | --root-ca-file=${cacert_file} \ 80 | --service-account-private-key-file=${cakey_file} \ 81 | --service-cluster-ip-range=${service_cluster_ip_range} \ 82 | --v=2 83 | Restart=on-failure 84 | RestartSec=5 85 | 86 | [Install] 87 | WantedBy=multi-user.target 88 | 89 | - name: kube-scheduler.service 90 | enable: true 91 | contents: | 92 | [Unit] 93 | Description=Kubernetes Scheduler 94 | Requires=coreos-metadata.service 95 | After=coreos-metadata.service 96 | After=kube-apiserver.service 97 | Wants=kube-apiserver.service 98 | 99 | [Service] 100 | EnvironmentFile=/run/metadata/coreos 101 | ExecStart=/opt/k8s/bin/kube-scheduler \ 102 | --leader-elect=true \ 103 | --master=http://$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0}:${apiserver_insecure_port} \ 104 | --v=2 105 | Restart=on-failure 106 | RestartSec=5 107 | 108 | [Install] 109 | WantedBy=multi-user.target 110 | 111 | storage: 112 | files: 113 | - path: /etc/systemd/resolved.conf.d/00-droplet.conf 114 | filesystem: root 115 | mode: 0644 116 | user: 117 | id: 0 118 | group: 119 | id: 0 120 | contents: 121 | inline: | 122 | [Resolve] 123 | DNS=${dns_server} 124 | Domains=${domain} 125 | LLMNR=yes 126 | DNSSEC=allow-downgrade 127 | Cache=yes 128 | DNSStubListener=udp 129 | 130 | update: 131 | group: ${update_channel} 132 | 133 | locksmith: 134 | reboot_strategy: etcd-lock 135 | window_start: ${maintenance_window_start} 136 | window_length: ${maintenance_window_length} 137 | etcd_endpoints: ${etcd_endpoints} 138 | etcd_cafile: ${cacert_file} 139 | etcd_certfile: ${cert_file} 140 | etcd_keyfile: ${key_file} 141 | -------------------------------------------------------------------------------- /apps/kubedns/deployment.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | apiVersion: apps/v1beta1 16 | kind: Deployment 17 | metadata: 18 | name: kube-dns-v20 19 | namespace: kube-system 20 | labels: 21 | k8s-app: kube-dns 22 | version: v20 23 | kubernetes.io/cluster-service: "true" 24 | spec: 25 | replicas: 2 26 | selector: 27 | matchLabels: 28 | k8s-app: kube-dns 29 | version: v20 30 | template: 31 | metadata: 32 | labels: 33 | k8s-app: kube-dns 34 | version: v20 35 | kubernetes.io/cluster-service: "true" 36 | annotations: 37 | scheduler.alpha.kubernetes.io/critical-pod: '' 38 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' 39 | spec: 40 | containers: 41 | - name: kubedns 42 | image: gcr.io/google_containers/kubedns-amd64:1.8 43 | resources: 44 | # TODO: Set memory limits when we've profiled the container for large 45 | # clusters, then set request = limit to keep this container in 46 | # guaranteed class. Currently, this container falls into the 47 | # "burstable" category so the kubelet doesn't backoff from restarting it. 48 | limits: 49 | memory: 170Mi 50 | requests: 51 | cpu: 100m 52 | memory: 70Mi 53 | livenessProbe: 54 | httpGet: 55 | path: /healthz-kubedns 56 | port: 8080 57 | scheme: HTTP 58 | initialDelaySeconds: 60 59 | timeoutSeconds: 5 60 | successThreshold: 1 61 | failureThreshold: 5 62 | readinessProbe: 63 | httpGet: 64 | path: /readiness 65 | port: 8081 66 | scheme: HTTP 67 | # we poll on pod startup for the Kubernetes master service and 68 | # only setup the /readiness HTTP server once that's available. 69 | initialDelaySeconds: 3 70 | timeoutSeconds: 5 71 | args: 72 | # command = "/kube-dns" 73 | - --kubecfg-file=${kubeconfig_file} 74 | - --domain=${cluster_domain} 75 | - --dns-port=10053 76 | ports: 77 | - containerPort: 10053 78 | name: dns-local 79 | protocol: UDP 80 | - containerPort: 10053 81 | name: dns-tcp-local 82 | protocol: TCP 83 | volumeMounts: 84 | - name: "kubeconfig" 85 | mountPath: "${lib_home}" 86 | readOnly: true 87 | - name: "tls" 88 | mountPath: "${tls_home}" 89 | readOnly: true 90 | - name: dnsmasq 91 | image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4 92 | livenessProbe: 93 | httpGet: 94 | path: /healthz-dnsmasq 95 | port: 8080 96 | scheme: HTTP 97 | initialDelaySeconds: 60 98 | timeoutSeconds: 5 99 | successThreshold: 1 100 | failureThreshold: 5 101 | args: 102 | - --cache-size=1000 103 | - --no-resolv 104 | - --server=127.0.0.1#10053 105 | - --log-facility=- 106 | ports: 107 | - containerPort: 53 108 | name: dns 109 | protocol: UDP 110 | - containerPort: 53 111 | name: dns-tcp 112 | protocol: TCP 113 | - name: healthz 114 | image: gcr.io/google_containers/exechealthz-amd64:1.2 115 | resources: 116 | limits: 117 | memory: 50Mi 118 | requests: 119 | cpu: 10m 120 | memory: 50Mi 121 | args: 122 | - --cmd=nslookup kubernetes.default.svc.${cluster_domain} 127.0.0.1 >/dev/null 123 | - --url=/healthz-dnsmasq 124 | - --cmd=nslookup kubernetes.default.svc.${cluster_domain} 127.0.0.1:10053 >/dev/null 125 | - --url=/healthz-kubedns 126 | - --port=8080 127 | - --quiet 128 | ports: 129 | - containerPort: 8080 130 | protocol: TCP 131 | dnsPolicy: Default # Don't use cluster DNS. 132 | volumes: 133 | - name: "kubeconfig" 134 | hostPath: 135 | path: "${lib_home}" 136 | - name: "tls" 137 | hostPath: 138 | path: "${tls_home}" 139 | 140 | --- 141 | apiVersion: v1 142 | kind: Service 143 | metadata: 144 | name: kube-dns 145 | namespace: kube-system 146 | labels: 147 | k8s-app: kube-dns 148 | kubernetes.io/cluster-service: "true" 149 | kubernetes.io/name: "KubeDNS" 150 | spec: 151 | selector: 152 | k8s-app: kube-dns 153 | clusterIP: ${cluster_dns_ip} 154 | ports: 155 | - name: dns 156 | port: 53 157 | protocol: UDP 158 | - name: dns-tcp 159 | port: 53 160 | protocol: TCP 161 | 162 | -------------------------------------------------------------------------------- /etcd.tf: -------------------------------------------------------------------------------- 1 | resource "digitalocean_droplet" "etcd" { 2 | count = "${var.etcd_count}" 3 | name = "${format("etcd-%02d", count.index)}" 4 | image = "${var.coreos_image}" 5 | region = "${var.droplet_region}" 6 | size = "1GB" 7 | private_networking = "true" 8 | ssh_keys = ["${var.droplet_private_key_id}"] 9 | user_data = "${element(data.ct_config.etcd.*.rendered, count.index)}" 10 | volume_ids = ["${element(digitalocean_volume.etcd_data.*.id, count.index)}"] 11 | 12 | lifecycle { 13 | create_before_destroy = true 14 | } 15 | } 16 | 17 | resource "null_resource" "etcd_tls" { 18 | count = "${var.etcd_count}" 19 | 20 | triggers { 21 | droplets = "${join(",", digitalocean_droplet.etcd.*.id)}" 22 | } 23 | 24 | connection { 25 | user = "${var.droplet_ssh_user}" 26 | private_key = "${file(var.droplet_private_key_file)}" 27 | host = "${element(digitalocean_droplet.etcd.*.ipv4_address, count.index)}" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo mkdir -p ${var.droplet_tls_certs_home}/${var.droplet_domain}", 33 | "sudo chown -R ${var.droplet_ssh_user} ${var.droplet_tls_certs_home}/${var.droplet_domain}" 34 | ] 35 | } 36 | 37 | provisioner "file" { 38 | content = "${element(tls_locally_signed_cert.etcd_cert.*.cert_pem, count.index)}" 39 | destination = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_cert_file}" 40 | } 41 | 42 | provisioner "file" { 43 | content = "${element(tls_private_key.etcd_key.*.private_key_pem, count.index)}" 44 | destination = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_key_file}" 45 | } 46 | } 47 | 48 | resource "digitalocean_volume" "etcd_data" { 49 | count = "${var.etcd_count}" 50 | 51 | name = "${format("etcd-%02d-data", count.index)}" 52 | region = "${var.droplet_region}" 53 | size = 10 54 | } 55 | 56 | data "ct_config" "etcd" { 57 | count = "${var.etcd_count}" 58 | 59 | platform = "digitalocean" 60 | content = "${element(data.template_file.etcd_config.*.rendered, count.index)}" 61 | } 62 | 63 | data "template_file" "etcd_config" { 64 | count = "${var.etcd_count}" 65 | template = "${file("${path.module}/etcd/config.yaml")}" 66 | 67 | vars { 68 | etcd_version = "${var.etcd_version}" 69 | data_dir = "${var.etcd_data_dir}" 70 | 71 | etcd_client_port = "${var.etcd_client_port}" 72 | etcd_peer_port = "${var.etcd_peer_port}" 73 | etcd_heartbeat_interval = "${var.etcd_heartbeat_interval}" 74 | etcd_election_timeout = "${var.etcd_election_timeout}" 75 | etcd_initial_cluster = "${join(",", formatlist("%s=https://%s.${var.droplet_domain}:%s", list("etcd-00", "etcd-01", "etcd-02"), list("etcd-00", "etcd-01", "etcd-02"), var.etcd_peer_port))}" 76 | 77 | domain = "${var.droplet_domain}" 78 | dns_server = "${digitalocean_droplet.coredns.ipv4_address_private}" 79 | 80 | cacert = "${jsonencode(tls_self_signed_cert.cacert.cert_pem)}" 81 | cacert_file = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_cacert_file}" 82 | cert_file = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_cert_file}" 83 | key_file = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_key_file}" 84 | 85 | device_path = "/dev/disk/by-id/scsi-0DO_Volume_${format("etcd-%02d-data", count.index)}" 86 | 87 | maintenance_window_start = "${var.droplet_maintenance_window_start}" 88 | maintenance_window_length = "${var.droplet_maintenance_window_length}" 89 | update_channel = "${var.droplet_update_channel}" 90 | } 91 | } 92 | 93 | resource "tls_private_key" "etcd_key" { 94 | count = "${var.etcd_count}" 95 | 96 | algorithm = "RSA" 97 | rsa_bits = 4096 98 | } 99 | 100 | resource "tls_cert_request" "etcd_csr" { 101 | count = "${var.etcd_count}" 102 | 103 | key_algorithm = "${element(tls_private_key.etcd_key.*.algorithm, count.index)}" 104 | private_key_pem = "${element(tls_private_key.etcd_key.*.private_key_pem, count.index)}" 105 | 106 | ip_addresses = [ 107 | "${element(digitalocean_droplet.etcd.*.ipv4_address_private, count.index)}" 108 | ] 109 | 110 | dns_names = [ 111 | "${element(digitalocean_droplet.etcd.*.name, count.index)}", 112 | "${element(digitalocean_droplet.etcd.*.name, count.index)}.${var.droplet_domain}", 113 | ] 114 | 115 | subject = { 116 | common_name = "${var.tls_etcd_cert_subject_common_name}" 117 | organization = "${var.tls_etcd_cert_subject_organization}" 118 | organizational_unit = "${var.tls_cert_subject_organizational_unit}" 119 | street_address = ["${var.tls_cert_subject_street_address}"] 120 | locality = "${var.tls_cert_subject_locality}" 121 | province = "${var.tls_cert_subject_province}" 122 | country = "${var.tls_cert_subject_country}" 123 | postal_code = "${var.tls_cert_subject_postal_code}" 124 | } 125 | } 126 | 127 | resource "tls_locally_signed_cert" "etcd_cert" { 128 | count = "${var.etcd_count}" 129 | 130 | cert_request_pem = "${element(tls_cert_request.etcd_csr.*.cert_request_pem, count.index)}" 131 | ca_private_key_pem = "${tls_private_key.cakey.private_key_pem}" 132 | ca_key_algorithm = "${tls_private_key.cakey.algorithm}" 133 | ca_cert_pem = "${tls_self_signed_cert.cacert.cert_pem}" 134 | 135 | validity_period_hours = "${var.tls_cert_validity_period_hours}" 136 | early_renewal_hours = "${var.tls_cert_early_renewal_hours}" 137 | 138 | allowed_uses = [ 139 | "key_encipherment", 140 | "server_auth", 141 | "client_auth" 142 | ] 143 | } 144 | -------------------------------------------------------------------------------- /vars.tf: -------------------------------------------------------------------------------- 1 | variable "do_api_token" { 2 | description = "DigitalOcean API access token. This can be generated from the DigitalOcean web console." 3 | } 4 | 5 | variable "droplet_ssh_user" { 6 | description = "SSH user used by Terraform's 'connection' provisioner to access the droplets." 7 | } 8 | 9 | variable "droplet_private_key_file" { 10 | description = "Path to the private key used by Terraform's 'connecton' provisioner to access the droplets." 11 | } 12 | 13 | variable "droplet_private_key_id" { 14 | description = "ID of the SSH key used by Terraform to create the droplets. This can be obtained from the DigitalOcean web console or CLI." 15 | } 16 | 17 | variable "droplet_region" { 18 | default = "sfo2" 19 | } 20 | 21 | variable "droplet_domain" { 22 | default = "droplet.cluster" 23 | description = "All droplets will be assigned FQDN in the form of ..." 24 | } 25 | 26 | variable "droplet_tls_certs_home" { 27 | default = "/etc/ssl/certs" 28 | } 29 | 30 | variable "droplet_maintenance_window_start" { 31 | default = "Sun 1:00" 32 | } 33 | 34 | variable "droplet_maintenance_window_length" { 35 | default = "2h" 36 | } 37 | 38 | variable "droplet_update_channel" { 39 | default = "stable" 40 | } 41 | 42 | variable "coreos_image" { 43 | default = "coreos-stable" 44 | description = "Slug of CoreOS image" 45 | } 46 | 47 | variable "coredns_version" { 48 | default = "011" 49 | } 50 | 51 | variable "etcd_version" { 52 | default = "3.2.0" 53 | } 54 | 55 | variable "etcd_count" { 56 | default = 3 57 | } 58 | 59 | variable "etcd_data_dir" { 60 | default = "/var/lib/etcd" 61 | } 62 | 63 | variable "etcd_client_port" { 64 | default = 2379 65 | } 66 | 67 | variable "etcd_peer_port" { 68 | default = 2380 69 | } 70 | 71 | variable "etcd_heartbeat_interval" { 72 | default = 5000 73 | } 74 | 75 | variable "etcd_election_timeout" { 76 | default = 25000 77 | } 78 | 79 | variable "k8s_version" { 80 | default = "v1.8.0" 81 | } 82 | 83 | variable "k8s_cluster_name" { 84 | default = "do-k8s" 85 | } 86 | 87 | variable "k8s_cluster_dns_ip" { 88 | default = "10.32.0.10" 89 | } 90 | 91 | variable "k8s_cluster_domain" { 92 | default = "kubernetes.internal" 93 | } 94 | 95 | variable "k8s_cluster_cidr" { 96 | default = "10.244.0.0/22" 97 | } 98 | 99 | variable "k8s_service_cluster_ip_range" { 100 | default = "10.32.0.0/24" 101 | } 102 | 103 | variable "k8s_service_node_port_range" { 104 | default = "30000-32767" 105 | } 106 | 107 | variable "k8s_apiserver_count" { 108 | default = 1 109 | } 110 | 111 | variable "k8s_apiserver_insecure_port" { 112 | default = 8080 113 | } 114 | 115 | variable "k8s_apiserver_secure_port" { 116 | default = 6443 117 | } 118 | 119 | variable "k8s_apiserver_encryption_key" { 120 | description = "Encryption key used in the API server's encryption config" 121 | } 122 | 123 | variable "k8s_apiserver_encryption_config_file" { 124 | default = "/opt/k8s/encryption-config.yaml" 125 | } 126 | 127 | variable "tls_cacert_file" { 128 | default = "cacert.pem" 129 | } 130 | 131 | variable "tls_cakey_file" { 132 | default = "cakey.pem" 133 | } 134 | 135 | variable "tls_key_file" { 136 | default = "key.pem" 137 | } 138 | 139 | variable "tls_cert_file" { 140 | default = "cert.pem" 141 | } 142 | 143 | variable "tls_cacert_subject_common_name" { 144 | description = "The self-generated CA cert subject common name used to sign all cluster certs. The cluster certs are used to secure and validate inter-cluster requests. The subject common name of this CA cert must be different from the subject common name for the Kubernetes' certificates. Otherwise, Kubernetes will fail, complaining that it's been assigned a self-signed certificate." 145 | } 146 | 147 | variable "tls_cacert_subject_organization" { 148 | description = "The self-generated CA cert subject organization name." 149 | } 150 | 151 | variable "tls_etcd_cert_subject_common_name" { 152 | description = "The etcd TLS cert subject organization name." 153 | default = "system:etcd" 154 | } 155 | 156 | variable "tls_etcd_cert_subject_organization" { 157 | description = "The etcd TLS cert subject organization name." 158 | default = "system:etcd" 159 | } 160 | 161 | variable "tls_coredns_cert_subject_common_name" { 162 | description = "The CoreDNS TLS cert subject organization name." 163 | default = "system:serviceaccount:kube-system:coredns" 164 | } 165 | 166 | variable "tls_coredns_cert_subject_organization" { 167 | description = "The CoreDNS TLS cert subject organization name." 168 | default = "system:serviceaccounts:kube-system" 169 | } 170 | 171 | variable "tls_kube_apiserver_cert_subject_common_name" { 172 | description = "The kubernetes API Server TLS cert subject organization name." 173 | default = "kubernetes" 174 | } 175 | 176 | variable "tls_kube_apiserver_cert_subject_organization" { 177 | description = "The kubernetes API Server TLS cert subject organization name." 178 | default = "system:masters" 179 | } 180 | 181 | variable "tls_kube_proxy_cert_subject_common_name" { 182 | description = "The kube-proxy TLS cert subject organization name." 183 | default = "system:kube-proxy" 184 | } 185 | 186 | variable "tls_kube_proxy_cert_subject_organization" { 187 | description = "The kube-proxy TLS cert subject organization name." 188 | default = "system:node-proxier" 189 | } 190 | 191 | variable "tls_workers_cert_subject_common_name" { 192 | description = "The workers' TLS cert subject organization name." 193 | default = "system:node" 194 | } 195 | 196 | variable "tls_workers_cert_subject_organization" { 197 | description = "The workers' TLS cert subject organization name." 198 | default = "system:nodes" 199 | } 200 | 201 | variable "tls_client_cert_subject_common_name" { 202 | description = "The client's TLS cert subject common name. Kubernetes uses this as the user name for the request. Refer http://kubernetes.io/docs/admin/authentication/#x509-client-certs" 203 | default = "admin" 204 | } 205 | 206 | variable "tls_client_cert_subject_organization" { 207 | description = "The client's TLS cert subject organization name. As of Kubernetes 1.4, Kubernetes uses this as the user's group. Refer http://kubernetes.io/docs/admin/authentication/#x509-client-certs" 208 | default = "system:masters" 209 | } 210 | 211 | variable "tls_cert_subject_organizational_unit" { 212 | description = "The Kubernetes and etcd clusters' TLS cert subject organizational unit." 213 | } 214 | 215 | variable "tls_cert_subject_street_address" { 216 | description = "The Kubernetes and etcd clusters' TLS cert subject street address." 217 | } 218 | 219 | variable "tls_cert_subject_locality" { 220 | description = "The Kubernetes and etcd clusters' TLS cert subject locality." 221 | } 222 | 223 | variable "tls_cert_subject_province" { 224 | description = "The Kubernetes and etcd clusters' TLS cert subject postal code." 225 | } 226 | 227 | variable "tls_cert_subject_postal_code" { 228 | description = "The Kubernetes and etcd clusters' TLS cert subject postal code." 229 | } 230 | 231 | variable "tls_cert_subject_country" { 232 | description = "The Kubernetes and etcd clusters' TLS cert subject 2-letter country code." 233 | } 234 | 235 | variable "tls_cert_validity_period_hours" { 236 | description = "The validity period in hours of the Kubernetes and etcd clusters' TLS cert." 237 | } 238 | 239 | variable "tls_cert_early_renewal_hours" { 240 | description = "The early renewal period in hours of the Kubernetes and etcd clusters' TLS cert. Set this variable to a time period that is earlier than the cert validity to force Terraform to generate a new cert before the existing one expires. " 241 | } 242 | 243 | variable "k8s_home" { 244 | default = "/opt/k8s" 245 | } 246 | 247 | variable "k8s_bin_home" { 248 | default = "/opt/k8s/bin" 249 | } 250 | 251 | variable "k8s_lib_home" { 252 | default = "/opt/k8s/lib" 253 | } 254 | 255 | variable "k8s_lib_kubelet_home" { 256 | default = "/opt/k8s/lib/kubelet" 257 | } 258 | 259 | variable "k8s_lib_kube_proxy_home" { 260 | default = "/opt/k8s/lib/kube-proxy" 261 | } 262 | 263 | variable "k8s_workers_count" { 264 | default = 3 265 | } 266 | 267 | variable "k8s_cni_home" { 268 | default = "/opt/cni/bin" 269 | } 270 | 271 | variable "k8s_cni_version" { 272 | default = "v0.6.0" 273 | } 274 | 275 | variable "flannel_version" { 276 | default = "v0.9.0" 277 | } 278 | 279 | variable "flannel_run" { 280 | default = "/run/kube-flannel" 281 | } 282 | 283 | variable "flannel_kubeconfig_file" { 284 | default = "/run/kube-flannel/kubeconfig" 285 | } 286 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kubernetes-terraform-secured 2 | 3 | This project uses [Terraform](https://www.terraform.io/) to provision [Kubernetes](https://kubernetes.io/) on [DigitalOcean](https://www.digitalocean.com/), with [Container Linux](https://coreos.com/os/docs/latest). 4 | 5 | **Note that the droplets and volumes created as part of this project aren't free.** 6 | 7 | ## Table of Content 8 | 9 | * [Prerequisites](#prerequisites) 10 | * [Getting Started](#getting-started) 11 | * [Cluster Layout](#cluster-layout) 12 | * [etcd3](#etcd3) 13 | * [Kubernetes](#kubernetes) 14 | * [Add-ons](#add-ons) 15 | * [Droplet Updates](#droplet-updates) 16 | * [License](#license) 17 | * [References](#references) 18 | 19 | ## Prerequisites 20 | 21 | * [Terraform v0.10.0](https://www.terraform.io/downloads.html) 22 | * [Container Linux Config Transpiler Provider](https://github.com/coreos/terraform-provider-ct) 23 | * [Go 1.8.3](https://golang.org/dl/) 24 | * [doctl 1.7.0](https://github.com/digitalocean/doctl) 25 | * [kubectl v1.8.0](https://storage.googleapis.com/kubernetes-release/release/v1.8.0/bin/linux/amd64/kubectl) 26 | 27 | ## Getting Started 28 | To get started, clone this repository: 29 | ```sh 30 | $ git clone git@github.com:ihcsim/kubernetes-terraform-secured.git 31 | ``` 32 | 33 | Initialize the project: 34 | ```sh 35 | $ cd kubernetes-terraform-secured 36 | $ terraform init 37 | ``` 38 | 39 | The above command will fail with errors complaining about the missing [Config Transpiler provider](https://github.com/coreos/terraform-provider-ct). Since at the time of this writing, the Linux Container's Config Transpiler provider isn't included in the official [Terraform Providers repository](https://github.com/terraform-providers), you must manually copy it into your local `kubernetes-terraform-secured/.terraform` folder. 40 | 41 | Use the following commands to install the Config Transpiler provider: 42 | ```sh 43 | $ go get -u github.com/coreos/terraform-provider-ct 44 | $ cp $GOPATH/bin/terraform-provider-ct .terraform/plugins// 45 | ``` 46 | 47 | Re-initialize the project: 48 | ```sh 49 | $ terraform init 50 | ``` 51 | 52 | Create a copy of the `terraform.tfvars` file from the provided `terraform.tfvars.sample` file. Provide all the required values. The description of all these variables can be found in the `vars.tf` file. 53 | 54 | Provision the Kubernetes cluster: 55 | ```sh 56 | $ terraform apply 57 | ``` 58 | 59 | Once Terraform completes provisioning the cluster, the `kubeconfig` data of the new Kubernetes cluster will be output to a local git-ignored `.kubeconfig` file. Use it to interact with the cluster: 60 | ```sh 61 | $ kubectl --kubeconfig=.kubeconfig get componentstatuses 62 | NAME STATUS MESSAGE ERROR 63 | scheduler Healthy ok 64 | controller-manager Healthy ok 65 | etcd-0 Healthy {"health": "true"} 66 | etcd-2 Healthy {"health": "true"} 67 | etcd-1 Healthy {"health": "true"} 68 | 69 | $ kubectl --kubeconfig=.kubeconfig get no 70 | NAME STATUS AGE VERSION 71 | k8s-worker-00 Ready 44s v1.7.0 72 | k8s-worker-01 Ready 46s v1.7.0 73 | k8s-worker-02 Ready 48s v1.7.0 74 | ``` 75 | 76 | ## Cluster Layout 77 | By default, this project provisions a cluster that is comprised of: 78 | 79 | * 3 etcd3 droplets 80 | * 1 Kubernetes Master droplet and 81 | * 3 Kubernetes Workers droplets 82 | 83 | All droplets are initialized using CoreOS' [Container Linux Config](https://coreos.com/os/docs/latest/provisioning.html). These configurations are defined in the `config.yaml` files found in the `etcd/` and `k8s/` folders. They are interpolated using the Terraform's Config Transpiler provider. 84 | 85 | [CoreDNS](https://coredns.io/tags/documentation/) is used to provide droplet-level hostname resolution. 86 | 87 | **Note that this setup uses the Terraform [TLS Provider](https://www.terraform.io/docs/providers/tls/index.html) to generate RSA private keys, CSR and certificates for development purposes only. The resources generated will be saved in the Terraform state file as plain text. Make sure the Terraform state file is stored securely.** 88 | 89 | ### etcd3 90 | By default, a 3-node [static cluster](https://coreos.com/etcd/docs/latest/v2/clustering.html#static) of etcd instances are provisioned. The number of instances in the cluster can be altered using the `etcd_count` Terraform variable. The `etcd_initial_cluster` variable in the `etcd.tf` file must also be updated to reflect the initial cluster membership. 91 | 92 | All peer-to-peer and client-to-server communications are encrypted and authenticated using the self-signed CA, private key and TLS certificate. 93 | 94 | Every etcd instance's data directory (defaulted to `/var/lib/etcd`) is mounted as a volume to a [DigitalOcean block storage](https://www.digitalocean.com/products/storage/). 95 | 96 | For testing purposes, the `etcdctl` v3 client on every etcd droplet is configured to target the etcd cluster. For example, 97 | ```sh 98 | $ doctl compute ssh etcd-00 99 | Last login: 100 | Container Linux by CoreOS stable (1465.7.0) 101 | 102 | $ etcdctl cluster-health 103 | member 1ac8697e1ee7cb22 is healthy: got healthy result from https://xx.xxx.xxx.xxx:xxxx 104 | member 8e62221f3a6bf84d is healthy: got healthy result from https://xx.xxx.xxx.xxx:xxxx 105 | member 9350aa2a45b92d34 is healthy: got healthy result from https://xx.xxx.xxx.xxx:xxxx 106 | cluster is healthy 107 | ``` 108 | 109 | ### Kubernetes 110 | The following componenets are deployed in the Kubernetes cluster: 111 | 112 | * Kubernetes Master: kube-apiserver, kube-controller-manager, kube-scheduler 113 | * Kubernetes Workers: kubelet, kube-proxy 114 | 115 | The number of Kubernetes workers can be altered using the `k8s_workers_count` Terraform variable. 116 | 117 | The API Server is started with the following admission controllers: 118 | 119 | 1. NamespaceLifecycle 120 | 1. LimitRanger 121 | 1. ServiceAccount 122 | 1. PersistentVolumeLabel 123 | 1. DefaultStorageClass 124 | 1. ResourceQuota 125 | 1. DefaultTolerationSeconds 126 | 1. NodeRestriction 127 | 128 | All API requests to the API Server are authenticated using X.509 TLS certificates. The API Server is started with the `--anonymous-auth=false` flag in order to disable [anonymous requests](https://kubernetes.io/docs/admin/authentication/#anonymous-requests). Refer to the Kubernetes [_Authentication_](https://kubernetes.io/docs/admin/authentication/) documentation for more information on how the authentication scheme works. All the TLS artifacts are declared in the `ca.tf`, `k8s-master.tf` and `k8s-workers` files. The Controller Manager and Scheduler communicate with the API Server via its insecure network interface, since they resides on the same host as the API Server. The Controller Manager uses the CA cert and key declared in `ca.tf` to serve cluster-scoped certificates-issuing requests. 129 | 130 | The Kubelets are also started with the `--anonymous-auth=false` option. They use the `--authentication-token-webhook` and `--authorization-mode-Webhook` options to enable authentication and authorization. For more information on Kubelet authentication and authorization, refer to this [documentation](https://kubernetes.io/docs/admin/kubelet-authentication-authorization/). 131 | 132 | Inter-pod communication is supported by an overlay network using [flannel](https://coreos.com/flannel/docs/latest/). The `k8s/network/flannel.yaml` manifest defines the relevant DaemonSet, ConfigMap and RBAC resources. These resources are deployed to the `kube-system` namespace. CoreDNS is used to provide in-cluster DNS resolution. The manifest can be found in `k8s/network/coredns/yaml`. 133 | 134 | ## Add-ons 135 | All add-ons are deployed using [Helm charts](https://helm.sh/). 136 | 137 | ## Droplet Updates 138 | CoreOS [locksmith](https://github.com/coreos/locksmith) is enabled to perform updates on Container Linux. By default, `locksmithd` is configured to use the `etcd-lock` reboot strategy during updates. The reboot window is set to a 2 hour window starting at 1 AM on Sundays. 139 | 140 | The following Terraform variables can be used to configure the reboot strategy and maintenance window: 141 | 142 | * `droplet_maintenance_window_start` 143 | * `droplet_maintenance_window_length` 144 | * `droplet_update_channel` 145 | 146 | The default update group is `stable`. 147 | 148 | For more information, refer to the Container Linux documentation on [Update Strategies](https://coreos.com/os/docs/1506.0.0/update-strategies.html). 149 | 150 | ## License 151 | See the [LICENSE](LICENSE) file for the full license text. 152 | 153 | ## References 154 | 155 | * [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) 156 | * [Running etcd on Container Linux](https://coreos.com/etcd/docs/latest/getting-started-with-etcd.html) 157 | * [Container Linux Config Spec](https://coreos.com/os/docs/1506.0.0/configuration.html) 158 | * [CoreDNS](https://coredns.io/tags/documentation/) 159 | * [How To Install And Configure Kubernetes On Top Of A CoreOS Cluster](https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-kubernetes-on-top-of-a-coreos-cluster) 160 | * [CoreOS + Kubernetes Step By Step](https://coreos.com/kubernetes/docs/latest/getting-started.html) 161 | * [Kubernetes API Authentication](https://kubernetes.io/docs/admin/authentication/) 162 | * [Kubernetes API Authorization](https://kubernetes.io/docs/admin/authorization/) 163 | * [Kubelet Authentication & Authorization](https://kubernetes.io/docs/admin/kubelet-authentication-authorization/) 164 | * [Kubernetes Master Node Communication](http://kubernetes.io/docs/admin/master-node-communication/#controller-manager-configuration) for details. 165 | * [Using Flannel with Kubernetes](https://coreos.com/flannel/docs/latest/kubernetes.html) 166 | * [Etcd Clustering](https://coreos.com/etcd/docs/latest/v2/clustering.html) 167 | -------------------------------------------------------------------------------- /k8s-master.tf: -------------------------------------------------------------------------------- 1 | resource "digitalocean_droplet" "k8s_masters" { 2 | count = "${var.k8s_apiserver_count}" 3 | 4 | name = "${format("k8s-master-%02d", count.index)}" 5 | image = "${var.coreos_image}" 6 | region = "${var.droplet_region}" 7 | size = "2GB" 8 | private_networking = "true" 9 | ssh_keys = ["${var.droplet_private_key_id}"] 10 | user_data = "${data.ct_config.k8s_master.rendered}" 11 | 12 | connection { 13 | user = "${var.droplet_ssh_user}" 14 | private_key = "${file(var.droplet_private_key_file)}" 15 | } 16 | 17 | provisioner "remote-exec" { 18 | inline = [ 19 | "sudo mkdir -p ${var.k8s_bin_home}", 20 | "sudo chown -R core ${var.k8s_home}", 21 | "wget -p ${var.k8s_bin_home} https://storage.googleapis.com/kubernetes-release/release/${var.k8s_version}/bin/linux/amd64/kube-aggregator", 22 | "wget -P ${var.k8s_bin_home} https://storage.googleapis.com/kubernetes-release/release/${var.k8s_version}/bin/linux/amd64/kube-apiserver", 23 | "wget -P ${var.k8s_bin_home} https://storage.googleapis.com/kubernetes-release/release/${var.k8s_version}/bin/linux/amd64/kube-controller-manager", 24 | "wget -P ${var.k8s_bin_home} https://storage.googleapis.com/kubernetes-release/release/${var.k8s_version}/bin/linux/amd64/kube-scheduler", 25 | "wget -P ${var.k8s_bin_home} https://storage.googleapis.com/kubernetes-release/release/${var.k8s_version}/bin/linux/amd64/kubefed", 26 | "wget -P ${var.k8s_bin_home} https://storage.googleapis.com/kubernetes-release/release/${var.k8s_version}/bin/linux/amd64/kubectl", 27 | "wget -P ${var.k8s_bin_home} https://storage.googleapis.com/kubernetes-release/release/${var.k8s_version}/bin/linux/amd64/cloud-controller-manager", 28 | "wget -P ${var.k8s_bin_home} https://storage.googleapis.com/kubernetes-release/release/${var.k8s_version}/bin/linux/amd64/apiextensions-apiserver", 29 | "chmod +x ${var.k8s_bin_home}/*" 30 | ] 31 | } 32 | 33 | connection { 34 | user = "${var.droplet_ssh_user}" 35 | private_key = "${file(var.droplet_private_key_file)}" 36 | } 37 | 38 | provisioner "file" { 39 | content = "${data.template_file.k8s_apiserver_encryption_config.rendered}" 40 | destination = "${var.k8s_home}/encryption.yaml" 41 | } 42 | 43 | provisioner "file" { 44 | content = "${data.template_file.k8s_apiserver_token_file.rendered}" 45 | destination = "/opt/k8s/token.csv" 46 | } 47 | } 48 | 49 | resource "null_resource" "k8s_masters_tls" { 50 | count = "${var.k8s_apiserver_count}" 51 | 52 | triggers { 53 | droplet = "${join(",", digitalocean_droplet.k8s_masters.*.id)}" 54 | } 55 | 56 | connection { 57 | user = "${var.droplet_ssh_user}" 58 | private_key = "${file(var.droplet_private_key_file)}" 59 | host = "${element(digitalocean_droplet.k8s_masters.*.ipv4_address, count.index)}" 60 | } 61 | 62 | provisioner "remote-exec" { 63 | inline = [ 64 | "sudo mkdir -p ${var.droplet_tls_certs_home}/${var.droplet_domain}", 65 | "sudo chown -R ${var.droplet_ssh_user} ${var.droplet_tls_certs_home}/${var.droplet_domain}" 66 | ] 67 | } 68 | 69 | provisioner "file" { 70 | content = "${tls_self_signed_cert.cacert.cert_pem}" 71 | destination = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_cacert_file}" 72 | } 73 | 74 | provisioner "file" { 75 | content = "${tls_private_key.cakey.private_key_pem}" 76 | destination = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_cakey_file}" 77 | } 78 | 79 | provisioner "file" { 80 | content = "${element(tls_locally_signed_cert.kube_apiserver.*.cert_pem, count.index)}" 81 | destination = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_cert_file}" 82 | } 83 | 84 | provisioner "file" { 85 | content = "${element(tls_private_key.kube_apiserver.*.private_key_pem, count.index)}" 86 | destination = "${var.droplet_tls_certs_home}/${var.droplet_domain}/${var.tls_key_file}" 87 | } 88 | } 89 | 90 | resource "null_resource" "network_manifest" { 91 | triggers { 92 | master = "${digitalocean_droplet.k8s_masters.0.id}" 93 | } 94 | 95 | depends_on = ["null_resource.client_kubeconfig"] 96 | 97 | provisioner "local-exec" { 98 | command = <