├── files ├── csv_templatez │ └── token.csv ├── json_templatez │ ├── auth_policy.jsonl │ ├── pki_ca_config.json │ ├── pki_ca_csr.json │ └── pki_crt_csr.json ├── pod_definitionz │ ├── calico.yaml │ ├── kubedns-svc.yaml │ └── kubedns.yaml ├── systemd_templatez │ ├── docker.service │ ├── etcd.service │ ├── kube-apiserver.service │ ├── kube-controller-manager.service │ ├── kube-proxy.service │ ├── kube-scheduler.service │ └── kubelet.service └── yaml_templatez │ ├── calicoctl_cfg │ └── kubeconfig ├── handlers └── main.yaml ├── tasks ├── main.yaml ├── master_build.yaml ├── minion_build.yaml └── ssl_cert_gen.yaml └── vars └── main.yaml /files/csv_templatez/token.csv: -------------------------------------------------------------------------------- 1 | {%- for token in auth_tokens -%} 2 | {%- for key, value in token.iteritems() -%} 3 | {{ value.password }},{{ value.username }},{{ value.uid }}{{"\n"}} 4 | {%- endfor -%} 5 | {%- endfor -%} -------------------------------------------------------------------------------- /files/json_templatez/auth_policy.jsonl: -------------------------------------------------------------------------------- 1 | {%- for policy in auth_policy -%} 2 | {%- for key, value in policy.iteritems() -%} 3 | {%- if value.username -%} 4 | {"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user": "{{ value.username }}", "namespace": "{{ value.namespace }}", "resource": "{{ value.resource }}","apiGroup": "{{ value.apigroup }}", "nonResourcePath": "{{ value.nonresourcepath }}", "readonloy": "{{ value.readonly }}"}}{{"\n"}} 5 | {%- elif value.group -%} 6 | {"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group": "{{ value.group }}", "namespace": "{{ value.namespace }}", "resource": "{{ value.resource }}","apiGroup": "{{ value.apigroup }}", "nonResourcePath": "{{ value.nonresourcepath }}", "readonloy": "{{ value.readonly }}"}}{{"\n"}} 7 | {%- endif -%} 8 | {%- endfor -%} 9 | {%- endfor -%} 10 | 11 | 12 | -------------------------------------------------------------------------------- /files/json_templatez/pki_ca_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "{{ pki_info.ca_expire }}" 5 | }, 6 | "profiles": { 7 | "{{ pki_info.cert_name }}": { 8 | "usages": ["signing", "key encipherment", "server auth", "client auth"], 9 | "expiry": "{{ pki_info.ca_expire }}" 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /files/json_templatez/pki_ca_csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "{{ pki_info.cert_name }}", 3 | "key": { 4 | "algo": "rsa", 5 | "size": {{ pki_info.key_size }} 6 | }, 7 | "names": [ 8 | { 9 | "C": "{{ pki_info.cert_country }}", 10 | "L": "{{ pki_info.cert_city }}", 11 | "O": "{{ pki_info.cert_org }}", 12 | "OU": "{{ pki_info.cert_ou }}", 13 | "ST": "{{ pki_info.cert_province }}" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /files/json_templatez/pki_crt_csr.json: -------------------------------------------------------------------------------- 1 | {%- set hosts = [cluster_info.service_network_cidr|ipaddr(1)|ipaddr('address'),"127.0.0.1","kubernetes.default"] -%} 2 | {%- for node in host_roles -%} 3 | {%- for key, value in node.iteritems() -%} 4 | {{- hosts.append(value['ipaddress']) -}} 5 | {{- hosts.append(value['fqdn']) -}} 6 | {{- hosts.append(value['fqdn'].split('.')[0]) -}} 7 | {%- endfor -%} 8 | {%- endfor -%} 9 | { 10 | "CN": "{{ pki_info.cert_name }}", 11 | "hosts": {{ hosts | to_json }}, 12 | "key": { 13 | "algo": "rsa", 14 | "size": {{ pki_info.key_size }} 15 | }, 16 | "names": [ 17 | { 18 | "C": "{{ pki_info.cert_country }}", 19 | "L": "{{ pki_info.cert_city }}", 20 | "O": "{{ pki_info.cert_org }}", 21 | "OU": "{{ pki_info.cert_ou }}", 22 | "ST": "{{ pki_info.cert_province }}" 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /files/pod_definitionz/calico.yaml: -------------------------------------------------------------------------------- 1 | # Calico Version v2.1.5 2 | # http://docs.projectcalico.org/v2.1/releases#v2.1.5 3 | # This manifest includes the following component versions: 4 | # calico/node:v1.1.3 5 | # calico/cni:v1.8.0 6 | # calico/kube-policy-controller:v0.5.4 7 | 8 | # This ConfigMap is used to configure a self-hosted Calico installation. 9 | kind: ConfigMap 10 | apiVersion: v1 11 | metadata: 12 | name: calico-config 13 | namespace: kube-system 14 | data: 15 | # Configure this with the location of your etcd cluster. 16 | etcd_endpoints: "https://ubuntu-1:2379" 17 | 18 | # Configure the Calico backend to use. 19 | calico_backend: "bird" 20 | 21 | # The CNI network configuration to install on each node. 22 | cni_network_config: |- 23 | { 24 | "name": "k8s-pod-network", 25 | "type": "calico", 26 | "etcd_endpoints": "__ETCD_ENDPOINTS__", 27 | "etcd_key_file": "__ETCD_KEY_FILE__", 28 | "etcd_cert_file": "__ETCD_CERT_FILE__", 29 | "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", 30 | "log_level": "info", 31 | "ipam": { 32 | "type": "calico-ipam" 33 | }, 34 | "policy": { 35 | "type": "k8s", 36 | "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", 37 | "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" 38 | }, 39 | "kubernetes": { 40 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 41 | } 42 | } 43 | 44 | # If you're using TLS enabled etcd uncomment the following. 45 | # You must also populate the Secret below with these files. 46 | etcd_ca: "/calico-secrets/etcd-ca" 47 | etcd_cert: "/calico-secrets/etcd-cert" 48 | etcd_key: "/calico-secrets/etcd-key" 49 | 50 | --- 51 | 52 | # The following contains k8s Secrets for use with a TLS enabled etcd cluster. 53 | # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ 54 | apiVersion: v1 55 | kind: Secret 56 | type: Opaque 57 | metadata: 58 | name: calico-etcd-secrets 59 | namespace: kube-system 60 | data: 61 | # Populate the following files with etcd TLS configuration if desired, but leave blank if 62 | # not using TLS for etcd. 63 | # This self-hosted install expects three files with the following names. The values 64 | # should be base64 encoded strings of the entire contents of each file. 65 | etcd-key: {{ lookup('file', '/var/lib/kube_certs/kubernetes-key.pem') | b64encode }} 66 | etcd-cert: {{ lookup('file', '/var/lib/kube_certs/kubernetes.pem') | b64encode }} 67 | etcd-ca: {{ lookup('file', '/var/lib/kube_certs/ca.pem') | b64encode }} 68 | 69 | --- 70 | 71 | # This manifest installs the calico/node container, as well 72 | # as the Calico CNI plugins and network config on 73 | # each master and worker node in a Kubernetes cluster. 74 | kind: DaemonSet 75 | apiVersion: extensions/v1beta1 76 | metadata: 77 | name: calico-node 78 | namespace: kube-system 79 | labels: 80 | k8s-app: calico-node 81 | spec: 82 | selector: 83 | matchLabels: 84 | k8s-app: calico-node 85 | template: 86 | metadata: 87 | labels: 88 | k8s-app: calico-node 89 | annotations: 90 | scheduler.alpha.kubernetes.io/critical-pod: '' 91 | scheduler.alpha.kubernetes.io/tolerations: | 92 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 93 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 94 | spec: 95 | hostNetwork: true 96 | containers: 97 | # Runs calico/node container on each Kubernetes node. This 98 | # container programs network policy and routes on each 99 | # host. 100 | - name: calico-node 101 | image: quay.io/calico/node:v1.1.3 102 | env: 103 | # The location of the Calico etcd cluster. 104 | - name: ETCD_ENDPOINTS 105 | valueFrom: 106 | configMapKeyRef: 107 | name: calico-config 108 | key: etcd_endpoints 109 | # Choose the backend to use. 110 | - name: CALICO_NETWORKING_BACKEND 111 | valueFrom: 112 | configMapKeyRef: 113 | name: calico-config 114 | key: calico_backend 115 | # Disable file logging so `kubectl logs` works. 116 | - name: CALICO_DISABLE_FILE_LOGGING 117 | value: "true" 118 | # Set Felix endpoint to host default action to ACCEPT. 119 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 120 | value: "ACCEPT" 121 | # Configure the IP Pool from which Pod IPs will be chosen. 122 | - name: CALICO_IPV4POOL_CIDR 123 | value: "10.100.0.0/16" 124 | - name: CALICO_IPV4POOL_IPIP 125 | value: "always" 126 | # Disable IPv6 on Kubernetes. 127 | - name: FELIX_IPV6SUPPORT 128 | value: "false" 129 | # Set Felix logging to "info" 130 | - name: FELIX_LOGSEVERITYSCREEN 131 | value: "info" 132 | # Location of the CA certificate for etcd. 133 | - name: ETCD_CA_CERT_FILE 134 | valueFrom: 135 | configMapKeyRef: 136 | name: calico-config 137 | key: etcd_ca 138 | # Location of the client key for etcd. 139 | - name: ETCD_KEY_FILE 140 | valueFrom: 141 | configMapKeyRef: 142 | name: calico-config 143 | key: etcd_key 144 | # Location of the client certificate for etcd. 145 | - name: ETCD_CERT_FILE 146 | valueFrom: 147 | configMapKeyRef: 148 | name: calico-config 149 | key: etcd_cert 150 | # Auto-detect the BGP IP address. 151 | - name: IP 152 | value: "" 153 | securityContext: 154 | privileged: true 155 | resources: 156 | requests: 157 | cpu: 250m 158 | volumeMounts: 159 | - mountPath: /lib/modules 160 | name: lib-modules 161 | readOnly: true 162 | - mountPath: /var/run/calico 163 | name: var-run-calico 164 | readOnly: false 165 | - mountPath: /calico-secrets 166 | name: etcd-certs 167 | # This container installs the Calico CNI binaries 168 | # and CNI network config file on each node. 169 | - name: install-cni 170 | image: quay.io/calico/cni:v1.8.0 171 | command: ["/install-cni.sh"] 172 | env: 173 | # The location of the Calico etcd cluster. 174 | - name: ETCD_ENDPOINTS 175 | valueFrom: 176 | configMapKeyRef: 177 | name: calico-config 178 | key: etcd_endpoints 179 | # The CNI network config to install on each node. 180 | - name: CNI_NETWORK_CONFIG 181 | valueFrom: 182 | configMapKeyRef: 183 | name: calico-config 184 | key: cni_network_config 185 | volumeMounts: 186 | - mountPath: /host/opt/cni/bin 187 | name: cni-bin-dir 188 | - mountPath: /host/etc/cni/net.d 189 | name: cni-net-dir 190 | - mountPath: /calico-secrets 191 | name: etcd-certs 192 | volumes: 193 | # Used by calico/node. 194 | - name: lib-modules 195 | hostPath: 196 | path: /lib/modules 197 | - name: var-run-calico 198 | hostPath: 199 | path: /var/run/calico 200 | # Used to install CNI. 201 | - name: cni-bin-dir 202 | hostPath: 203 | path: /opt/cni/bin 204 | - name: cni-net-dir 205 | hostPath: 206 | path: /etc/cni/net.d 207 | # Mount in the etcd TLS secrets. 208 | - name: etcd-certs 209 | secret: 210 | secretName: calico-etcd-secrets 211 | 212 | --- 213 | 214 | # This manifest deploys the Calico policy controller on Kubernetes. 215 | # See https://github.com/projectcalico/k8s-policy 216 | apiVersion: extensions/v1beta1 217 | kind: Deployment 218 | metadata: 219 | name: calico-policy-controller 220 | namespace: kube-system 221 | labels: 222 | k8s-app: calico-policy 223 | annotations: 224 | scheduler.alpha.kubernetes.io/critical-pod: '' 225 | scheduler.alpha.kubernetes.io/tolerations: | 226 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 227 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 228 | spec: 229 | # The policy controller can only have a single active instance. 230 | replicas: 1 231 | strategy: 232 | type: Recreate 233 | template: 234 | metadata: 235 | name: calico-policy-controller 236 | namespace: kube-system 237 | labels: 238 | k8s-app: calico-policy 239 | spec: 240 | # The policy controller must run in the host network namespace so that 241 | # it isn't governed by policy that would prevent it from working. 242 | hostNetwork: true 243 | containers: 244 | - name: calico-policy-controller 245 | image: quay.io/calico/kube-policy-controller:v0.5.4 246 | env: 247 | # The location of the Calico etcd cluster. 248 | - name: ETCD_ENDPOINTS 249 | valueFrom: 250 | configMapKeyRef: 251 | name: calico-config 252 | key: etcd_endpoints 253 | # Location of the CA certificate for etcd. 254 | - name: ETCD_CA_CERT_FILE 255 | valueFrom: 256 | configMapKeyRef: 257 | name: calico-config 258 | key: etcd_ca 259 | # Location of the client key for etcd. 260 | - name: ETCD_KEY_FILE 261 | valueFrom: 262 | configMapKeyRef: 263 | name: calico-config 264 | key: etcd_key 265 | # Location of the client certificate for etcd. 266 | - name: ETCD_CERT_FILE 267 | valueFrom: 268 | configMapKeyRef: 269 | name: calico-config 270 | key: etcd_cert 271 | # The location of the Kubernetes API. Use the default Kubernetes 272 | # service for API access. 273 | - name: K8S_API 274 | value: "https://kubernetes.default:443" 275 | # Since we're running in the host namespace and might not have KubeDNS 276 | # access, configure the container's /etc/hosts to resolve 277 | # kubernetes.default to the correct service clusterIP. 278 | - name: CONFIGURE_ETC_HOSTS 279 | value: "true" 280 | volumeMounts: 281 | # Mount in the etcd TLS secrets. 282 | - mountPath: /calico-secrets 283 | name: etcd-certs 284 | volumes: 285 | # Mount in the etcd TLS secrets. 286 | - name: etcd-certs 287 | secret: 288 | secretName: calico-etcd-secrets 289 | -------------------------------------------------------------------------------- /files/pod_definitionz/kubedns-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "KubeDNS" 10 | spec: 11 | selector: 12 | k8s-app: kube-dns 13 | clusterIP: {{ cluster_info.dns_service_ip }} 14 | ports: 15 | - name: dns 16 | port: 53 17 | protocol: UDP 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /files/pod_definitionz/kubedns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: kube-dns-v20 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | version: v20 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 2 12 | selector: 13 | matchLabels: 14 | k8s-app: kube-dns 15 | version: v20 16 | template: 17 | metadata: 18 | labels: 19 | k8s-app: kube-dns 20 | version: v20 21 | kubernetes.io/cluster-service: "true" 22 | annotations: 23 | scheduler.alpha.kubernetes.io/critical-pod: '' 24 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' 25 | spec: 26 | containers: 27 | - name: kubedns 28 | image: gcr.io/google_containers/kubedns-amd64:1.8 29 | resources: 30 | # TODO: Set memory limits when we've profiled the container for large 31 | # clusters, then set request = limit to keep this container in 32 | # guaranteed class. Currently, this container falls into the 33 | # "burstable" category so the kubelet doesn't backoff from restarting it. 34 | limits: 35 | memory: 170Mi 36 | requests: 37 | cpu: 100m 38 | memory: 70Mi 39 | livenessProbe: 40 | httpGet: 41 | path: /healthz-kubedns 42 | port: 8080 43 | scheme: HTTP 44 | initialDelaySeconds: 60 45 | timeoutSeconds: 5 46 | successThreshold: 1 47 | failureThreshold: 5 48 | readinessProbe: 49 | httpGet: 50 | path: /readiness 51 | port: 8081 52 | scheme: HTTP 53 | # we poll on pod startup for the Kubernetes master service and 54 | # only setup the /readiness HTTP server once that's available. 55 | initialDelaySeconds: 3 56 | timeoutSeconds: 5 57 | args: 58 | # command = "/kube-dns" 59 | - --domain={{ cluster_info.cluster_domain }} 60 | - --dns-port=10053 61 | ports: 62 | - containerPort: 10053 63 | name: dns-local 64 | protocol: UDP 65 | - containerPort: 10053 66 | name: dns-tcp-local 67 | protocol: TCP 68 | - name: dnsmasq 69 | image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4 70 | livenessProbe: 71 | httpGet: 72 | path: /healthz-dnsmasq 73 | port: 8080 74 | scheme: HTTP 75 | initialDelaySeconds: 60 76 | timeoutSeconds: 5 77 | successThreshold: 1 78 | failureThreshold: 5 79 | args: 80 | - --cache-size=1000 81 | - --no-resolv 82 | - --server=127.0.0.1#10053 83 | - --log-facility=- 84 | ports: 85 | - containerPort: 53 86 | name: dns 87 | protocol: UDP 88 | - containerPort: 53 89 | name: dns-tcp 90 | protocol: TCP 91 | - name: healthz 92 | image: gcr.io/google_containers/exechealthz-amd64:1.2 93 | resources: 94 | limits: 95 | memory: 50Mi 96 | requests: 97 | cpu: 10m 98 | memory: 50Mi 99 | args: 100 | - --cmd=nslookup kubernetes.default.svc.{{ cluster_info.cluster_domain }} 127.0.0.1 >/dev/null 101 | - --url=/healthz-dnsmasq 102 | - --cmd=nslookup kubernetes.default.svc.{{ cluster_info.cluster_domain }} 127.0.0.1:10053 >/dev/null 103 | - --url=/healthz-kubedns 104 | - --port=8080 105 | - --quiet 106 | ports: 107 | - containerPort: 8080 108 | protocol: TCP 109 | dnsPolicy: Default # Don't use cluster DNS 110 | -------------------------------------------------------------------------------- /files/systemd_templatez/docker.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Application Container Engine 3 | Documentation=http://docs.docker.io 4 | 5 | [Service] 6 | ExecStart=/usr/bin/docker daemon \ 7 | --iptables=false \ 8 | --ip-masq=false \ 9 | --host=unix:///var/run/docker.sock \ 10 | --log-level=error \ 11 | --storage-driver=overlay 12 | Restart=on-failure 13 | RestartSec=5 14 | 15 | [Install] 16 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /files/systemd_templatez/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | Documentation=https://github.com/coreos 4 | 5 | [Service] 6 | ExecStart=/usr/bin/etcd --name etcdclusterz \ 7 | --cert-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}.pem \ 8 | --key-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}-key.pem \ 9 | --peer-cert-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}.pem \ 10 | --peer-key-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}-key.pem \ 11 | --trusted-ca-file={{ pki_info.cert_path }}/ca.pem \ 12 | --peer-trusted-ca-file={{ pki_info.cert_path }}/ca.pem \ 13 | --initial-advertise-peer-urls https://{{ansible_default_ipv4.address}}:2380 \ 14 | --listen-peer-urls https://{{ansible_default_ipv4.address}}:2380 \ 15 | --listen-client-urls https://{{ansible_default_ipv4.address}}:2379,http://127.0.0.1:2379 \ 16 | --advertise-client-urls https://{{ansible_default_ipv4.address}}:2379 \ 17 | --initial-cluster-token etcd-cluster-0 \ 18 | --initial-cluster etcdclusterz=https://{{ansible_default_ipv4.address}}:2380 \ 19 | --initial-cluster-state new \ 20 | --data-dir=/var/lib/etcd 21 | Restart=on-failure 22 | RestartSec=5 23 | 24 | [Install] 25 | WantedBy=multi-user.target 26 | -------------------------------------------------------------------------------- /files/systemd_templatez/kube-apiserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/bin/kube-apiserver \ 7 | --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \ 8 | --advertise-address={{ansible_default_ipv4.address}} \ 9 | --allow-privileged=true \ 10 | --apiserver-count=3 \ 11 | --authorization-mode=ABAC \ 12 | --authorization-policy-file=/var/lib/kubernetes/authorization-policy.jsonl \ 13 | --bind-address=0.0.0.0 \ 14 | --enable-swagger-ui=true \ 15 | --etcd-cafile={{ pki_info.cert_path }}/ca.pem \ 16 | --insecure-bind-address=0.0.0.0 \ 17 | --kubelet-certificate-authority={{ pki_info.cert_path }}/ca.pem \ 18 | --etcd-servers=https://{{ansible_default_ipv4.address}}:2379 \ 19 | --service-account-key-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}-key.pem \ 20 | --service-cluster-ip-range={{ cluster_info.service_network_cidr }} \ 21 | --service-node-port-range=30000-32767 \ 22 | --tls-cert-file={{ pki_info.cert_path }}/kubernetes.pem \ 23 | --tls-private-key-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}-key.pem \ 24 | --token-auth-file=/var/lib/kubernetes/token.csv \ 25 | --v=2 26 | Restart=on-failure 27 | RestartSec=5 28 | 29 | [Install] 30 | WantedBy=multi-user.target 31 | -------------------------------------------------------------------------------- /files/systemd_templatez/kube-controller-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/bin/kube-controller-manager \ 7 | --allocate-node-cidrs=false \ 8 | --cluster-name={{ cluster_info.cluster_name }} \ 9 | --leader-elect=true \ 10 | --master=http://{{ansible_default_ipv4.address}}:8080 \ 11 | --root-ca-file={{ pki_info.cert_path }}/ca.pem \ 12 | --service-account-private-key-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}-key.pem \ 13 | --service-cluster-ip-range={{ cluster_info.service_network_cidr }} \ 14 | --v=2 15 | Restart=on-failure 16 | RestartSec=5 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /files/systemd_templatez/kube-proxy.service: -------------------------------------------------------------------------------- 1 | {%- set master = [] -%} 2 | {%- for node in host_roles -%} 3 | {%- for key, value in node.iteritems() -%} 4 | {%- if "master" in value['type'] -%} 5 | {{- master.append(value['ipaddress']) -}} 6 | {%- endif -%} 7 | {%- endfor -%} 8 | {%- endfor -%} 9 | [Unit] 10 | Description=Kubernetes Kube Proxy 11 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 12 | 13 | [Service] 14 | ExecStart=/usr/bin/kube-proxy \ 15 | --master=https://{{ master | join(",") }}:6443 \ 16 | --kubeconfig=/var/lib/kubelet/kubeconfig \ 17 | --proxy-mode=iptables \ 18 | --v=2 19 | 20 | Restart=on-failure 21 | RestartSec=5 22 | 23 | [Install] 24 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /files/systemd_templatez/kube-scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/bin/kube-scheduler \ 7 | --leader-elect=true \ 8 | --master=http://{{ansible_default_ipv4.address}}:8080 \ 9 | --v=2 10 | Restart=on-failure 11 | RestartSec=5 12 | 13 | [Install] 14 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /files/systemd_templatez/kubelet.service: -------------------------------------------------------------------------------- 1 | {%- set master = [] -%} 2 | {%- for node in host_roles -%} 3 | {%- for key, value in node.iteritems() -%} 4 | {%- if "master" in value['type'] -%} 5 | {{- master.append(value['ipaddress']) -}} 6 | {%- endif -%} 7 | {%- endfor -%} 8 | {%- endfor -%} 9 | [Unit] 10 | Description=Kubernetes Kubelet 11 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 12 | After=docker.service 13 | Requires=docker.service 14 | 15 | [Service] 16 | ExecStart=/usr/bin/kubelet \ 17 | --allow-privileged=true \ 18 | --api-servers=https://{{ master | join(",") }}:6443 \ 19 | --cloud-provider= \ 20 | --cluster-dns={{ cluster_info.dns_service_ip }} \ 21 | --cluster-domain={{ cluster_info.cluster_domain }} \ 22 | --container-runtime=docker \ 23 | --docker=unix:///var/run/docker.sock \ 24 | --network-plugin=cni \ 25 | --kubeconfig=/var/lib/kubelet/kubeconfig \ 26 | --serialize-image-pulls=false \ 27 | --tls-cert-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}.pem \ 28 | --tls-private-key-file={{ pki_info.cert_path }}/{{ pki_info.cert_name }}-key.pem \ 29 | --v=2 30 | 31 | Restart=on-failure 32 | RestartSec=5 33 | 34 | [Install] 35 | WantedBy=multi-user.target 36 | -------------------------------------------------------------------------------- /files/yaml_templatez/calicoctl_cfg: -------------------------------------------------------------------------------- 1 | {%- set master = [] -%} 2 | {%- for node in host_roles -%} 3 | {%- for key, value in node.iteritems() -%} 4 | {%- if "master" in value['type'] -%} 5 | {{- master.append(value['ipaddress']) -}} 6 | {%- endif -%} 7 | {%- endfor -%} 8 | {%- endfor -%} 9 | apiVersion: v1 10 | kind: calicoApiConfig 11 | metadata: 12 | spec: 13 | etcdEndpoints: https://{{ master | join(",") }}:2379 14 | etcdKeyFile: /etc/cni/net.d/calico-tls/etcd-key 15 | etcdCertFile: /etc/cni/net.d/calico-tls/etcd-cert 16 | etcdCACertFile: /etc/cni/net.d/calico-tls/etcd-ca 17 | -------------------------------------------------------------------------------- /files/yaml_templatez/kubeconfig: -------------------------------------------------------------------------------- 1 | {%- set master = [] -%} 2 | {%- for node in host_roles -%} 3 | {%- for key, value in node.iteritems() -%} 4 | {%- if "master" in value['type'] -%} 5 | {{- master.append(value['ipaddress']) -}} 6 | {%- endif -%} 7 | {%- endfor -%} 8 | {%- endfor -%} 9 | 10 | apiVersion: v1 11 | kind: Config 12 | clusters: 13 | - cluster: 14 | certificate-authority: {{ pki_info.cert_path }}/ca.pem 15 | server: https://{{ master | join(",") }}:6443 16 | name: {{ cluster_info.cluster_name }} 17 | contexts: 18 | - context: 19 | cluster: {{ cluster_info.cluster_name }} 20 | user: kubelet 21 | name: kubelet 22 | current-context: kubelet 23 | users: 24 | - name: kubelet 25 | user: 26 | {% for token in auth_tokens %} 27 | {% for key, value in token.iteritems() %} 28 | {% if "kubelet" in value.username %} 29 | token: {{ value.password }} 30 | {% endif %} 31 | {% endfor %} 32 | {% endfor %} 33 | -------------------------------------------------------------------------------- /handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart kubelet 3 | become: true 4 | systemd: 5 | name: kubelet 6 | daemon_reload: yes 7 | state: restarted 8 | 9 | - name: restart kube-proxy 10 | become: true 11 | systemd: 12 | name: kube-proxy 13 | daemon_reload: yes 14 | state: restarted 15 | 16 | - name: restart kube-api-server 17 | become: true 18 | systemd: 19 | name: kube-apiserver 20 | daemon_reload: yes 21 | state: restarted 22 | 23 | - name: restart etcd 24 | become: true 25 | systemd: 26 | name: etcd 27 | daemon_reload: yes 28 | state: restarted 29 | 30 | - name: restart kube-controller-manager 31 | become: true 32 | systemd: 33 | name: kube-controller-manager 34 | daemon_reload: yes 35 | state: restarted 36 | 37 | - name: restart kube-scheduler 38 | become: true 39 | systemd: 40 | name: kube-scheduler 41 | daemon_reload: yes 42 | state: restarted 43 | 44 | 45 | -------------------------------------------------------------------------------- /tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: ssl_cert_gen.yaml 3 | tags: ssl_gen 4 | when: 5 | "'masters' in group_names" 6 | 7 | - include: master_build.yaml 8 | tags: master 9 | when: 10 | "'masters' in group_names" 11 | 12 | - include: minion_build.yaml 13 | tags: minion 14 | when: 15 | "'minions' in group_names" 16 | -------------------------------------------------------------------------------- /tasks/master_build.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create base directories 3 | become: true 4 | file: 5 | path: "{{item.path}}" 6 | mode: 0755 7 | state: directory 8 | with_items: 9 | - {path: "/etc/etcd"} 10 | - {path: "/var/lib/etcd"} 11 | - {path: "/var/lib/kubernetes"} 12 | - {path: "/var/lib/kubernetes/pod_defs"} 13 | 14 | - name: download etcd binary 15 | become: true 16 | unarchive: 17 | src: https://github.com/coreos/etcd/releases/download/v3.0.10/etcd-v3.0.10-linux-amd64.tar.gz 18 | dest: /tmp/ 19 | remote_src: True 20 | creates: /tmp/etcd-v3.0.10-linux-amd64/etcd 21 | 22 | - name: copy etcd to bin 23 | become: true 24 | copy: 25 | mode: 0755 26 | src: /tmp/etcd-v3.0.10-linux-amd64/etcd 27 | dest: /usr/bin/etcd 28 | 29 | - name: copy etcdctl to bin 30 | become: true 31 | copy: 32 | mode: 0755 33 | src: /tmp/etcd-v3.0.10-linux-amd64/etcdctl 34 | dest: /usr/bin/etcdctl 35 | 36 | - name: download apiserver binary 37 | become: true 38 | get_url: 39 | url: https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-apiserver 40 | dest: /usr/bin/kube-apiserver 41 | mode: 0755 42 | force: true 43 | 44 | - name: download controller binary 45 | become: true 46 | get_url: 47 | url: https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-controller-manager 48 | dest: /usr/bin/kube-controller-manager 49 | mode: 0755 50 | force: true 51 | 52 | - name: download scheduler binary 53 | become: true 54 | get_url: 55 | url: https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-scheduler 56 | dest: /usr/bin/kube-scheduler 57 | mode: 0755 58 | force: true 59 | 60 | - name: download kubectl binary 61 | become: true 62 | get_url: 63 | url: https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kubectl 64 | dest: /usr/bin/kubectl 65 | mode: 0755 66 | force: true 67 | 68 | - name: generate token file 69 | become: true 70 | template: 71 | src: /etc/ansible/roles/kubernetes/files/csv_templatez/token.csv 72 | dest: /var/lib/kubernetes/token.csv 73 | mode: 0755 74 | 75 | - name: generate auth file 76 | become: true 77 | template: 78 | src: /etc/ansible/roles/kubernetes/files/json_templatez/auth_policy.jsonl 79 | dest: /var/lib/kubernetes/authorization-policy.jsonl 80 | mode: 0755 81 | 82 | - name: etcd_service_config 83 | become: true 84 | template: 85 | src: /etc/ansible/roles/kubernetes/files/systemd_templatez/etcd.service 86 | dest: /etc/systemd/system/etcd.service 87 | mode: 0755 88 | 89 | - name: apiserver_service_config 90 | become: true 91 | template: 92 | src: /etc/ansible/roles/kubernetes/files/systemd_templatez/kube-apiserver.service 93 | dest: /etc/systemd/system/kube-apiserver.service 94 | mode: 0755 95 | notify: 96 | - restart kube-api-server 97 | 98 | - name: controller_manager_service_config 99 | become: true 100 | template: 101 | src: /etc/ansible/roles/kubernetes/files/systemd_templatez/kube-controller-manager.service 102 | dest: /etc/systemd/system/kube-controller-manager.service 103 | mode: 0755 104 | notify: 105 | - restart kube-controller-manager 106 | 107 | - name: scheduler_service_config 108 | become: true 109 | template: 110 | src: /etc/ansible/roles/kubernetes/files/systemd_templatez/kube-scheduler.service 111 | dest: /etc/systemd/system/kube-scheduler.service 112 | mode: 0755 113 | notify: 114 | - restart kube-scheduler 115 | 116 | - name: start k8s services 117 | become: true 118 | systemd: 119 | state: started 120 | enabled: yes 121 | name: "{{item}}" 122 | with_items: 123 | - etcd 124 | - kube-apiserver 125 | - kube-controller-manager 126 | - kube-scheduler 127 | 128 | - name: copy pod definitions 129 | become: true 130 | template: 131 | mode: 0755 132 | src: /etc/ansible/roles/kubernetes/files/pod_definitionz/{{ item }} 133 | dest: /var/lib/kubernetes/pod_defs/{{ item }} 134 | with_items: 135 | - kubedns.yaml 136 | - kubedns-svc.yaml 137 | - calico.yaml 138 | 139 | - name: download calicoctl binary 140 | become: true 141 | get_url: 142 | url: https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl 143 | dest: /usr/bin/calicoctl 144 | mode: 0755 145 | -------------------------------------------------------------------------------- /tasks/minion_build.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create base directories 3 | become: true 4 | file: 5 | path: "{{item.path}}" 6 | mode: 0755 7 | state: directory 8 | with_items: 9 | - {path: "/var/lib/kubelet"} 10 | - {path: "/opt/cni"} 11 | - {path: "/var/lib/kube_certs"} 12 | - {path: "/etc/calico"} 13 | 14 | - name: copy ca pem 15 | become: true 16 | copy: 17 | mode: 0755 18 | src: /var/lib/kube_certs/ca.pem 19 | dest: /var/lib/kube_certs/ca.pem 20 | notify: 21 | - restart kubelet 22 | - restart kube-proxy 23 | 24 | - name: copy cert 25 | become: true 26 | copy: 27 | mode: 0755 28 | src: /var/lib/kube_certs/kubernetes.pem 29 | dest: /var/lib/kube_certs/kubernetes.pem 30 | notify: 31 | - restart kubelet 32 | - restart kube-proxy 33 | 34 | - name: copy cert key 35 | become: true 36 | copy: 37 | mode: 0755 38 | src: /var/lib/kube_certs/kubernetes-key.pem 39 | dest: /var/lib/kube_certs/kubernetes-key.pem 40 | notify: 41 | - restart kubelet 42 | - restart kube-proxy 43 | 44 | - name: copy kubeconfig 45 | become: true 46 | template: 47 | mode: 0755 48 | src: /etc/ansible/roles/kubernetes/files/yaml_templatez/kubeconfig 49 | dest: /var/lib/kubelet/kubeconfig 50 | notify: 51 | - restart kubelet 52 | - restart kube-proxy 53 | 54 | - name: download docker binary 55 | become: true 56 | unarchive: 57 | src: https://get.docker.com/builds/Linux/x86_64/docker-1.12.1.tgz 58 | dest: /tmp 59 | remote_src: True 60 | creates: /tmp/docker/docker 61 | 62 | - name: copy docker binaries to bin 63 | become: true 64 | copy: 65 | mode: 0755 66 | src: /tmp/docker/{{ item }} 67 | dest: /usr/bin/{{ item }} 68 | remote_src: yes 69 | with_items: 70 | - docker 71 | - docker-containerd 72 | - docker-containerd-ctr 73 | - docker-containerd-shim 74 | - dockerd 75 | - docker-proxy 76 | - docker-runc 77 | 78 | - name: download kubelet binary 79 | become: true 80 | get_url: 81 | url: https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kubelet 82 | dest: /usr/bin/kubelet 83 | mode: 0755 84 | force: yes 85 | 86 | - name: download kube-proxy binary 87 | become: true 88 | get_url: 89 | url: https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-proxy 90 | dest: /usr/bin/kube-proxy 91 | mode: 0755 92 | force: yes 93 | 94 | - name: download cni files 95 | become: true 96 | unarchive: 97 | src: https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz 98 | dest: /opt/cni 99 | remote_src: True 100 | creates: /opt/cni/bin/bridge 101 | 102 | - name: docker_service_config 103 | become: true 104 | template: 105 | src: /etc/ansible/roles/kubernetes/files/systemd_templatez/docker.service 106 | dest: /etc/systemd/system/docker.service 107 | mode: 0755 108 | 109 | - name: kubelet_service_config 110 | become: true 111 | template: 112 | src: /etc/ansible/roles/kubernetes/files/systemd_templatez/kubelet.service 113 | dest: /etc/systemd/system/kubelet.service 114 | mode: 0755 115 | notify: 116 | - restart kubelet 117 | 118 | - name: kube-proxy_service_config 119 | become: true 120 | template: 121 | src: /etc/ansible/roles/kubernetes/files/systemd_templatez/kube-proxy.service 122 | dest: /etc/systemd/system/kube-proxy.service 123 | mode: 0755 124 | notify: 125 | - restart kube-proxy 126 | 127 | - name: start k8s services 128 | become: true 129 | systemd: 130 | state: started 131 | enabled: yes 132 | name: "{{item}}" 133 | with_items: 134 | - docker 135 | - kubelet 136 | - kube-proxy 137 | 138 | - name: download calicoctl binary 139 | become: true 140 | get_url: 141 | url: https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl 142 | dest: /usr/bin/calicoctl 143 | mode: 0755 144 | 145 | - name: place calicoctl_cfg 146 | become: true 147 | template: 148 | mode: 0755 149 | src: /etc/ansible/roles/kubernetes/files/yaml_templatez/calicoctl_cfg 150 | dest: /etc/calico/calicoctl.cfg 151 | -------------------------------------------------------------------------------- /tasks/ssl_cert_gen.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create base directories 3 | become: true 4 | file: 5 | path: "{{item.path}}" 6 | mode: 0755 7 | state: directory 8 | with_items: 9 | - {path: "{{ pki_info.cert_path }}"} 10 | 11 | - name: ca_config 12 | become: true 13 | template: 14 | src: /etc/ansible/roles/kubernetes/files/json_templatez/pki_ca_config.json 15 | dest: "{{ pki_info.cert_path }}/pki_ca_config.json" 16 | mode: 0755 17 | 18 | - name: ca_csr_config 19 | become: true 20 | template: 21 | src: /etc/ansible/roles/kubernetes/files/json_templatez/pki_ca_csr.json 22 | dest: "{{ pki_info.cert_path }}/pki_ca_csr.json" 23 | mode: 0755 24 | 25 | - name: crt_config 26 | become: true 27 | template: 28 | src: /etc/ansible/roles/kubernetes/files/json_templatez/pki_crt_csr.json 29 | dest: "{{ pki_info.cert_path }}/pki_crt_csr.json" 30 | mode: 0755 31 | 32 | - name: download cfssl binary 33 | become: true 34 | get_url: 35 | url: https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 36 | dest: /usr/local/bin/cfssl 37 | mode: 0755 38 | 39 | - name: download cfssl json binary 40 | become: true 41 | get_url: 42 | url: https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 43 | dest: /usr/local/bin/cfssljson 44 | mode: 0755 45 | 46 | - name: generate ca 47 | become: true 48 | shell: cfssl gencert -initca pki_ca_csr.json | cfssljson -bare ca 49 | args: 50 | chdir: "{{ pki_info.cert_path }}" 51 | creates: "{{ pki_info.cert_path }}/ca.pem" 52 | notify: 53 | - restart etcd 54 | - restart kube-api-server 55 | - restart kube-controller-manager 56 | 57 | - name: generate cert 58 | become: true 59 | shell: cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=pki_ca_config.json -profile={{ pki_info.cert_name }} pki_crt_csr.json | cfssljson -bare {{ pki_info.cert_name }} 60 | args: 61 | chdir: "{{ pki_info.cert_path }}" 62 | creates: "{{ pki_info.cert_path }}/{{ pki_info.cert_name }}.pem" 63 | notify: 64 | - restart etcd 65 | - restart kube-api-server 66 | - restart kube-controller-manager 67 | 68 | - name: set permissions on key file 69 | become: true 70 | file: 71 | path: "{{item}}" 72 | owner: "{{setup_user}}" 73 | with_items: 74 | - "{{ pki_info.cert_path }}/{{ pki_info.cert_name }}-key.pem" 75 | - "{{ pki_info.cert_path }}/ca.pem" 76 | - "{{ pki_info.cert_path }}/{{ pki_info.cert_name }}.pem" 77 | -------------------------------------------------------------------------------- /vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #Define the user you which to use for provisioning on the hosts. It is 3 | #implied that this user will exist on all nodes 4 | setup_user: user 5 | 6 | #Define all of your hosts here. The FQDN and IP address are important to 7 | #get right since they will be defined in the certificates kubernetes uses 8 | #for communication 9 | host_roles: 10 | - ubuntu-1: 11 | type: master 12 | fqdn: ubuntu-1.interubernet.local 13 | ipaddress: 10.20.30.71 14 | - ubuntu-2: 15 | type: minion 16 | fqdn: ubuntu-2.interubernet.local 17 | ipaddress: 10.20.30.72 18 | - ubuntu-3: 19 | type: minion 20 | fqdn: ubuntu-3.interubernet.local 21 | ipaddress: 10.20.30.73 22 | - ubuntu-4: 23 | type: minion 24 | fqdn: ubuntu-4.interubernet.local 25 | ipaddress: 192.168.50.74 26 | - ubuntu-5: 27 | type: minion 28 | fqdn: ubuntu-5.interubernet.local 29 | ipaddress: 192.168.50.75 30 | 31 | #Most of these settings are not incredibly important for your first configuration 32 | #however make sure that the two CIDRs defined below are unique to your network as 33 | #the cluster_node_cidr will need to be routed on your L3 gateway. Also make sure 34 | #that the cluster_node_cidr is big as each host get's assigned a /24 out of it. 35 | cluster_info: 36 | cluster_name: my_cluster 37 | cluster_domain: k8s.cluster.local 38 | service_network_cidr: 10.11.12.0/24 39 | dns_service_ip: 10.11.12.254 40 | 41 | #Again - not super critical to change this stuff but you can 42 | pki_info: 43 | cert_path: /var/lib/kube_certs 44 | key_size: 2048 45 | ca_expire: 87600h 46 | key_expire: 87600h 47 | cert_country: US 48 | cert_province: MN 49 | cert_city: Minneapolis 50 | cert_org: Test Org 51 | cert_email: test@test.com 52 | cert_ou: Test 53 | cert_name: kubernetes 54 | 55 | #You can change these if you want but for sure leave the 'kubelet' token. You can 56 | #change the password for it if you want but the username needs to be there. 57 | auth_tokens: 58 | - token1: 59 | uid: jontoken 60 | username: jontoken 61 | password: jontoken 62 | - token2: 63 | uid: kubelet 64 | username: kubelet 65 | password: changeme 66 | 67 | #I recommend leaving this alone until you get a better understanding of Kubernetes 68 | auth_policy: 69 | - policy1: 70 | username: "*" 71 | group: "" 72 | namespace: "" 73 | resource: "" 74 | apigroup: "" 75 | nonresourcepath: "*" 76 | readonly: true 77 | - policy2: 78 | username: admin 79 | group: "" 80 | namespace: "*" 81 | resource: "*" 82 | apigroup: "*" 83 | nonresourcepath: "" 84 | readonly: false 85 | - policy3: 86 | username: scheduler 87 | group: "" 88 | namespace: "*" 89 | resource: "*" 90 | apigroup: "*" 91 | nonresourcepath: "" 92 | readonly: false 93 | - policy4: 94 | username: kubelet 95 | group: "" 96 | namespace: "*" 97 | resource: "*" 98 | apigroup: "*" 99 | nonresourcepath: "" 100 | readonly: false 101 | - policy5: 102 | username: "" 103 | group: system:serviceaccounts 104 | namespace: "*" 105 | resource: "*" 106 | apigroup: "*" 107 | nonresourcepath: "*" 108 | readonly: false 109 | --------------------------------------------------------------------------------