├── .gitignore ├── .gitlab-ci.yml ├── .travis.yml ├── LICENSE ├── README.md ├── Vagrantfile ├── addons.yml ├── ansible.cfg ├── cluster.yml ├── contrib └── heat │ ├── README.md │ ├── elements │ ├── ansible │ │ └── install.d │ │ │ └── 56-ansible │ └── kubernetes │ │ └── install.d │ │ └── 57-kubernetes │ ├── env.yml │ ├── hack │ ├── build-images.sh │ └── upload-images.sh │ ├── requirements.txt │ ├── snapshots │ ├── snapshot1.png │ ├── snapshot2.png │ └── snapshot3.png │ └── stack.yml ├── extra-playbooks ├── add-node.yml └── delete-node.yml ├── hack ├── .config.rb ├── .func-vars ├── clear-vms ├── offline-tools │ └── pkg-downloader.py └── setup-vms ├── inventory ├── group_vars │ └── all.yml └── hosts.ini.example ├── reset-cluster.yml └── roles ├── cert ├── defaults │ └── main.yml ├── meta │ └── main.yml ├── tasks │ ├── create-etcd-certs.yml │ ├── create-k8s-certs.yml │ ├── create-k8s-kubelet-certs.yml │ ├── main.yml │ └── purge-files.yml └── templates │ ├── ca-config.json.j2 │ ├── ca-csr.json.j2 │ └── kubelet-csr.json.j2 ├── cluster-default ├── defaults │ ├── cert-path.yml │ ├── etcd-path.yml │ ├── k8s-path.yml │ ├── main.yml │ └── system-path.yml └── tasks │ └── main.yml ├── cluster-reset └── tasks │ ├── delete-k8s.yml │ ├── main.yml │ ├── reset-etcd.yml │ └── reset-k8s.yml ├── common ├── copy-files │ └── tasks │ │ └── main.yml └── os-check │ └── tasks │ └── main.yml ├── container-runtime ├── defaults │ └── main.yml ├── meta │ └── main.yml ├── tasks │ ├── containerd │ │ └── main.yml │ ├── docker │ │ ├── config-opts.yml │ │ ├── config-systemd.yml │ │ └── main.yml │ ├── main.yml │ └── nvidia-docker │ │ └── main.yml └── templates │ ├── containerd │ └── containerd.service.j2 │ ├── docker │ ├── docker.env.j2 │ ├── docker.service.j2 │ └── docker.socket.j2 │ └── nvidia-docker │ ├── config.toml.j2 │ ├── daemon.json.j2 │ └── libnvidia.conf.j2 ├── download ├── image │ └── tasks │ │ └── main.yml └── package │ ├── defaults │ └── main.yml │ └── tasks │ ├── archive.yml │ ├── binary.yml │ └── main.yml ├── etcd ├── defaults │ └── main.yml ├── meta │ └── main.yml ├── tasks │ ├── config-etcd.yml │ ├── main.yml │ └── systemd-etcd.yml └── templates │ ├── etcd-config.yml.j2 │ └── etcd.service.j2 ├── k8s-addon ├── defaults │ └── main.yml ├── files │ └── monitoring │ │ ├── grafana │ │ └── grafana-definitions.yml │ │ └── prometheus │ │ └── prometheus-rules.yml ├── tasks │ └── main.yml └── templates │ ├── dashboard │ ├── dashboard-anonymous-rbac.yml.j2 │ ├── dashboard-dp.yml.j2 │ ├── dashboard-rbac.yml.j2 │ ├── dashboard-sa.yml.j2 │ ├── dashboard-secret.yml.j2 │ └── dashboard-svc.yml.j2 │ ├── ingress-nginx │ ├── ingress-controller-cm.yml.j2 │ ├── ingress-controller-dp.yml.j2 │ ├── ingress-controller-ns.yml.j2 │ ├── ingress-controller-rbac.yml.j2 │ ├── ingress-controller-sa.yml.j2 │ └── ingress-controller-svc.yml.j2 │ ├── kubedns │ ├── kubedns-cm.yml.j2 │ ├── kubedns-dp.yml.j2 │ ├── kubedns-rbac.yml.j2 │ ├── kubedns-sa.yml.j2 │ └── kubedns-svc.yml.j2 │ ├── kubeproxy │ ├── kubeproxy-cm.yml.j2 │ ├── kubeproxy-ds.yml.j2 │ ├── kubeproxy-rbac.yml.j2 │ └── kubeproxy-sa.yml.j2 │ ├── logging │ ├── es │ │ ├── elasticsearch-rbac.yml.j2 │ │ ├── elasticsearch-sa.yml.j2 │ │ ├── elasticsearch-sts.yml.j2 │ │ └── elasticsearch-svc.yml.j2 │ ├── fluentd │ │ ├── fluentd-es-cm.yml.j2 │ │ ├── fluentd-es-ds.yml.j2 │ │ ├── fluentd-rbac.yml.j2 │ │ └── fluentd-sa.yml.j2 │ └── kibana │ │ ├── kibana-dp.yml.j2 │ │ └── kibana-svc.yml.j2 │ ├── metric-server │ ├── metric-server-sa.yml.j2 │ ├── metrics-apiservice.yml.j2 │ ├── metrics-server-dp.yml.j2 │ ├── metrics-server-rbac.yml.j2 │ └── metrics-server-svc.yml.j2 │ └── monitoring │ ├── alertmanater │ ├── alertmanager-main-sa.yml.j2 │ ├── alertmanager-main-secret.yml.j2 │ ├── alertmanager-main-svc.yml.j2 │ └── alertmanager-main.yml.j2 │ ├── grafana │ ├── grafana-admin-secret.yml.j2 │ ├── grafana-datasources.yml.j2 │ ├── grafana-dp.yml.j2 │ ├── grafana-sa.yml.j2 │ ├── grafana-source.yml.j2 │ └── grafana-svc.yml.j2 │ ├── kube-state-metrics │ ├── kube-state-metrics-dp.yml.j2 │ ├── kube-state-metrics-rbac.yml.j2 │ ├── kube-state-metrics-sa.yml.j2 │ └── kube-state-metrics-svc.yml.j2 │ ├── monitoring-ns.yml.j2 │ ├── node-exporter │ ├── node-exporter-ds.yml.j2 │ ├── node-exporter-rbac.yml.j2 │ ├── node-exporter-sa.yml.j2 │ └── node-exporter-svc.yml.j2 │ ├── operator │ ├── operator-dp.yml.j2 │ ├── operator-rbac.yml.j2 │ ├── operator-sa.yml.j2 │ └── operator-svc.yml.j2 │ ├── prometheus │ ├── prometheus-main.yml.j2 │ ├── prometheus-rbac.yml.j2 │ ├── prometheus-sa.yml.j2 │ └── prometheus-svc.yml.j2 │ ├── service-discovery │ ├── kube-controller-manager-svc.yml.j2 │ └── kube-scheduler-svc.yml.j2 │ └── servicemonitor │ ├── alertmanager-sm.yml.j2 │ ├── coredns-sm.yml.j2 │ ├── kube-apiserver-sm.yml.j2 │ ├── kube-controller-manager-sm.yml.j2 │ ├── kube-scheduler-sm.yml.j2 │ ├── kubelet-sm.yml.j2 │ ├── kubestate-metrics-sm.yml.j2 │ ├── node-exporter-sm.yml.j2 │ ├── prometheus-operator-sm.yml.j2 │ └── prometheus-sm.yml.j2 ├── k8s-cni ├── tasks │ └── main.yml └── templates │ ├── calico.yml.j2 │ └── flannel.yml.j2 ├── k8s-kubeconfig ├── defaults │ └── main.yml ├── meta │ └── main.yml └── tasks │ ├── create-configs.yml │ └── main.yml └── k8s-setup ├── defaults └── main.yml ├── files ├── apiserver-to-kubelet-rbac.yml ├── kubelet-bootstrap-rbac.yml └── kubelet-config-rbac.yml ├── meta └── main.yml ├── tasks ├── config-systemd.yml ├── main.yml ├── setup-masters.yml ├── setup-nodes.yml └── setup-resources.yml └── templates ├── 10-kubelet.conf.j2 ├── audit └── policy.yml.j2 ├── encryption └── config.yml.j2 ├── etc └── haproxy.cfg.j2 ├── kubelet-bootstrap-secret.yml.j2 ├── kubelet-config-cm.yml.j2 ├── kubelet-config.yml.j2 ├── kubelet.service.j2 └── manifests ├── haproxy.yml.j2 ├── keepalived.yml.j2 ├── kube-apiserver.yml.j2 ├── kube-controller-manager.yml.j2 └── kube-scheduler.yml.j2 /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | .DS_Store 3 | *.retry 4 | tmp 5 | tmp/* 6 | hosts 7 | *.ovpn 8 | .keys/ 9 | .venv/ 10 | logs/ 11 | roles-bk/ 12 | extra-playbooks-bk/ 13 | *.ini 14 | openrc -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - Virtualbox Deploy 3 | - Libvirt Deploy 4 | - OpenStack Deploy 5 | 6 | before_script: 7 | - 'sudo rm -rf ~/VirtualBox\ VMs/*' 8 | - 'sudo rm -rf /tmp/ssl' 9 | 10 | # virtualbox template 11 | .vbox_template: &vbox_template 12 | stage: Virtualbox Deploy 13 | script: 14 | - './tools/setup -m ${MEMORY} -i ${IFACE} -n ${CNI_PLUGIN} -w ${WORKER} -b ${MASTER} -o ${OS} -f true' 15 | - './tools/reset' 16 | only: [/^pr-.*$/] 17 | tags: 18 | - shell 19 | - vagrant 20 | 21 | # openstack template 22 | .os_template: &os_template 23 | stage: OpenStack Deploy 24 | when: manual 25 | script: 26 | - 'cp ${INVENTORY_PATH} ./' 27 | - export MASTER_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}') 28 | - 'perl -i -pe "s/172.16.35.9/${MASTER_IP}/g" ${GROUP_VARS_PATH}' 29 | - 'perl -i -pe "s/cni_iface:.*/cni_iface: ${CNI_IFACE}/g" ${GROUP_VARS_PATH}' 30 | - 'perl -i -pe "s/network:.*/network: ${CNI_PLUGIN}/g" ${GROUP_VARS_PATH}' 31 | - 'perl -i -pe "s/keepalived:.*/keepalived: false/g" ${GROUP_VARS_PATH}' 32 | - ansible-playbook reset.yml 33 | - ansible-playbook cluster.yml 34 | - ansible-playbook addons.yml 35 | - mkdir -p ~/.kube/ 36 | - sudo cp /etc/kubernetes/admin.conf ~/.kube/config 37 | - sudo chmod 775 ~/.kube/config 38 | - kubectl get node 39 | - kubectl -n kube-system get po 40 | - kubectl -n kube-system get svc 41 | only: [/^pr-.*$/] 42 | 43 | vagrant-ubuntu-calico: 44 | variables: 45 | OS: ubuntu16 46 | CNI_PLUGIN: calico 47 | MEMORY: 2048 48 | MASTER: 1 49 | WORKER: 2 50 | IFACE: eth1 51 | <<: *vbox_template 52 | 53 | vagrant-centos-calico: 54 | variables: 55 | OS: centos7 56 | CNI_PLUGIN: calico 57 | MEMORY: 2048 58 | MASTER: 1 59 | WORKER: 2 60 | IFACE: enp0s8 61 | <<: *vbox_template 62 | 63 | # deploy openstack 64 | openstack-ubuntu-flannel: 65 | before_script: 66 | - sudo apt-get update && sudo apt-get install -y software-properties-common git 67 | - sudo apt-add-repository -y ppa:ansible/ansible 68 | - sudo apt-get update && sudo apt-get install -y ansible 69 | variables: 70 | GROUP_VARS_PATH: "./group_vars/all.yml" 71 | INVENTORY_PATH: "/home/gitlab-runner/inventory" 72 | CNI_IFACE: "ens3" 73 | CNI_PLUGIN: "flannel" 74 | <<: *os_template 75 | tags: 76 | - shell 77 | - ubuntu 78 | - openstack 79 | 80 | openstack-centos-flannel: 81 | before_script: 82 | - sudo yum install -y epel-release git 83 | - sudo yum install -y ansible 84 | variables: 85 | GROUP_VARS_PATH: "./group_vars/all.yml" 86 | INVENTORY_PATH: "/home/gitlab-runner/inventory" 87 | CNI_IFACE: "eth0" 88 | CNI_PLUGIN: "flannel" 89 | <<: *os_template 90 | tags: 91 | - shell 92 | - centos 93 | - openstack 94 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | os: linux 3 | sudo: required 4 | services: 5 | - docker 6 | python: 7 | - "2.7" 8 | before_install: 9 | - pip install -U ansible ansible-lint 10 | script: 11 | - ansible-lint addons.yml 12 | - ansible-lint cluster.yml 13 | - ansible-lint reset-cluster.yml 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/kairen/kube-ansible.svg?branch=master)](https://travis-ci.org/kairen/kube-ansible) 2 | # Kubernetes Ansible 3 | A playbooks to building the hard way Kubernetes cluster, This playbook is a fully automated command to bring up a Kubernetes cluster on VM or Baremetal. 4 | 5 | [![asciicast](https://asciinema.org/a/fDjMx3fTZX9SZktqEdTtWwZwi.png)](https://asciinema.org/a/fDjMx3fTZX9SZktqEdTtWwZwi?speed=2) 6 | 7 | Feature list: 8 | - [x] Support Kubernetes v1.10.0+. 9 | - [x] Highly available Kubernetes cluster. 10 | - [x] Full of the binaries installation. 11 | - [x] Kubernetes addons: 12 | - [x] Promethues monitoring. 13 | - [x] Metrics Server. 14 | - [x] EFK logging. 15 | - [x] Ingress Controller. 16 | - [x] Kubernetes Dashboard. 17 | - [x] Support container network: 18 | - [x] calico. 19 | - [x] flannel. 20 | - [x] Support container runtime: 21 | - [x] docker. 22 | - [x] nvidia-docker.(Require NVIDIA driver and CUDA 9.0+) 23 | - [x] containerd. 24 | - [ ] cri-o. 25 | 26 | ## Quick Start 27 | In this section you will deploy a cluster using vagrant. 28 | 29 | Prerequisites: 30 | * Ansible version: *v2.5 (or newer)*. 31 | * [Vagrant](https://www.vagrantup.com/downloads.html): >= 2.0.0. 32 | * [VirtualBox](https://www.virtualbox.org/wiki/Downloads): >= 5.0.0. 33 | * Mac OS X need to install `sshpass` tool. 34 | 35 | ```sh 36 | $ brew install http://git.io/sshpass.rb 37 | ``` 38 | 39 | The getting started guide will use Vagrant with VirtualBox to deploy a Kubernetes cluster on virtual machines. You can deploy the cluster with a single command: 40 | ```sh 41 | $ ./hack/setup-vms 42 | Cluster Size: 1 master, 2 worker. 43 | VM Size: 1 vCPU, 2048 MB 44 | VM Info: ubuntu16, virtualbox 45 | CNI binding iface: eth1 46 | Start to deploy?(y): 47 | ``` 48 | > * You also can use `sudo ./hack/setup-vms -p libvirt -i eth1` command to deploy the cluster on KVM. 49 | 50 | If you want to access API you need to create RBAC object define the permission of role. For example using `cluster-admin` role: 51 | ```sh 52 | $ kubectl create clusterrolebinding open-api --clusterrole=cluster-admin --user=system:anonymous 53 | ``` 54 | 55 | Login the addon's dashboard: 56 | - Dashboard: [https://API_SERVER:8443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](https://API_SERVER:8443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) 57 | - Logging: [https://API_SERVER:8443/api/v1/namespaces/kube-system/services/kibana-logging/proxy/](https://API_SERVER:8443/api/v1/namespaces/kube-system/services/kibana-logging/proxy/) 58 | 59 | As of release 1.7 Dashboard no longer has full admin privileges granted by default, so you need to create a token to access the resources: 60 | ```sh 61 | $ kubectl -n kube-system create sa dashboard 62 | $ kubectl create clusterrolebinding dashboard --clusterrole cluster-admin --serviceaccount=kube-system:dashboard 63 | $ kubectl -n kube-system get sa dashboard -o yaml 64 | apiVersion: v1 65 | kind: ServiceAccount 66 | metadata: 67 | creationTimestamp: 2017-11-27T17:06:41Z 68 | name: dashboard 69 | namespace: kube-system 70 | resourceVersion: "69076" 71 | selfLink: /api/v1/namespaces/kube-system/serviceaccounts/dashboard 72 | uid: 56b880bf-d395-11e7-9528-448a5ba4bd34 73 | secrets: 74 | - name: dashboard-token-vg52j 75 | 76 | $ kubectl -n kube-system describe secrets dashboard-token-vg52j 77 | ... 78 | token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtdG9rZW4tdmc1MmoiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNTZiODgwYmYtZDM5NS0xMWU3LTk1MjgtNDQ4YTViYTRiZDM0Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZCJ9.bVRECfNS4NDmWAFWxGbAi1n9SfQ-TMNafPtF70pbp9Kun9RbC3BNR5NjTEuKjwt8nqZ6k3r09UKJ4dpo2lHtr2RTNAfEsoEGtoMlW8X9lg70ccPB0M1KJiz3c7-gpDUaQRIMNwz42db7Q1dN7HLieD6I4lFsHgk9NPUIVKqJ0p6PNTp99pBwvpvnKX72NIiIvgRwC2cnFr3R6WdUEsuVfuWGdF-jXyc6lS7_kOiXp2yh6Ym_YYIr3SsjYK7XUIPHrBqWjF-KXO_AL3J8J_UebtWSGomYvuXXbbAUefbOK4qopqQ6FzRXQs00KrKa8sfqrKMm_x71Kyqq6RbFECsHPA 79 | ``` 80 | > Copy and paste the `token` to dashboard. 81 | 82 | ## Manual deployment 83 | In this section you will manually deploy a cluster on your machines. 84 | 85 | Prerequisites: 86 | * Ansible version: *v2.5 (or newer)*. 87 | * *Linux distributions*: Ubuntu 16+/Debian/CentOS 7.x. 88 | * All Master/Node should have password-less access from `deploy` node. 89 | 90 | For machine example: 91 | 92 | | IP Address | Role | CPU | Memory | 93 | |-----------------|------------------|----------|------------| 94 | | 172.16.35.9 | vip | - | - | 95 | | 172.16.35.10 | k8s-m1 | 4 | 8G | 96 | | 172.16.35.11 | k8s-n1 | 4 | 8G | 97 | | 172.16.35.12 | k8s-n2 | 4 | 8G | 98 | | 172.16.35.13 | k8s-n3 | 4 | 8G | 99 | 100 | Add the machine info gathered above into a file called `inventory/hosts.ini`. For inventory example: 101 | ``` 102 | [etcds] 103 | k8s-m1 104 | k8s-n[1:2] 105 | 106 | [masters] 107 | k8s-m1 108 | k8s-n1 109 | 110 | [nodes] 111 | k8s-n[1:3] 112 | 113 | [kube-cluster:children] 114 | masters 115 | nodes 116 | ``` 117 | 118 | Set the variables in `group_vars/all.yml` to reflect you need options. For example: 119 | ```yml 120 | # overide kubernetes version(default: 1.10.6) 121 | kube_version: 1.11.2 122 | 123 | # container runtime, supported: docker, nvidia-docker, containerd. 124 | container_runtime: docker 125 | 126 | # container network, supported: calico, flannel. 127 | cni_enable: true 128 | container_network: calico 129 | cni_iface: '' 130 | 131 | # highly available variables 132 | vip_interface: '' 133 | vip_address: 172.16.35.9 134 | 135 | # etcd variables 136 | etcd_iface: '' 137 | 138 | # kubernetes extra addons variables 139 | enable_dashboard: true 140 | enable_logging: false 141 | enable_monitoring: false 142 | enable_ingress: false 143 | enable_metric_server: true 144 | 145 | # monitoring grafana user/password 146 | monitoring_grafana_user: "admin" 147 | monitoring_grafana_password: "p@ssw0rd" 148 | ``` 149 | 150 | ### Deploy a Kubernetes cluster 151 | If everything is ready, just run `cluster.yml` playbook to deploy the cluster: 152 | ```sh 153 | $ ansible-playbook -i inventory/hosts.ini cluster.yml 154 | ``` 155 | 156 | And then run `addons.yml` to create addons: 157 | ```sh 158 | $ ansible-playbook -i inventory/hosts.ini addons.yml 159 | ``` 160 | 161 | ## Verify cluster 162 | Verify that you have deployed the cluster, check the cluster as following commands: 163 | ```sh 164 | $ kubectl -n kube-system get po,svc 165 | 166 | NAME READY STATUS RESTARTS AGE IP NODE 167 | po/haproxy-master1 1/1 Running 0 2h 172.16.35.10 k8s-m1 168 | ... 169 | ``` 170 | 171 | ### Reset cluster 172 | Finally, if you want to clean the cluster and redeploy, you can reset the cluster by `reset-cluster.yml` playbook.: 173 | ```sh 174 | $ ansible-playbook -i inventory/hosts.ini reset-cluster.yml 175 | ``` 176 | 177 | ## Contributing 178 | Pull requests are always welcome!!! I am always thrilled to receive pull requests. 179 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | require "yaml" 2 | require "fileutils" 3 | 4 | Vagrant.require_version ">= 1.7.0" 5 | 6 | CONFIG = File.expand_path("./hack/.config.rb") 7 | if File.exist?(CONFIG) 8 | require CONFIG 9 | end 10 | 11 | $os_image = (ENV['OS_IMAGE'] || "ubuntu16").to_sym 12 | $provider = (ENV['PROVIDER'] || "virtualbox").to_sym 13 | $auto_deploy = (ENV['DEPLOY']|| "true").to_sym 14 | 15 | def set_vbox(vb, config) 16 | vb.gui = false 17 | vb.memory = $system_memory 18 | vb.cpus = $system_vcpus 19 | 20 | case $os_image 21 | when :centos7 22 | config.vm.box = "bento/centos-7.3" 23 | when :ubuntu16 24 | config.vm.box = "bento/ubuntu-16.04" 25 | end 26 | end 27 | 28 | def set_libvirt(lv, config) 29 | lv.nested = true 30 | lv.volume_cache = 'none' 31 | lv.uri = 'qemu+unix:///system' 32 | lv.memory = $system_memory 33 | lv.cpus = $system_vcpus 34 | 35 | case $os_image 36 | when :centos7 37 | config.vm.box = "centos/7" 38 | when :ubuntu16 39 | config.vm.box = "yk0/ubuntu-xenial" 40 | end 41 | end 42 | 43 | def set_hyperv(hv, config) 44 | hv.memory = $system_memory 45 | hv.cpus = $system_vcpus 46 | 47 | case $os_image 48 | when :centos7 49 | config.vm.box = "generic/centos7" 50 | config.vm.provision "shell", inline: "sudo yum install -y python" 51 | when :ubuntu16 52 | config.vm.box = "generic/ubuntu1604" 53 | config.vm.provision "shell", inline: "sudo apt-get update && sudo apt-get install -y python" 54 | end 55 | end 56 | 57 | Vagrant.configure("2") do |config| 58 | config.vm.provider "hyperv" 59 | config.vm.provider "virtualbox" 60 | config.vm.provider "libvirt" 61 | 62 | config.vm.provision "shell", inline: "sudo swapoff -a" 63 | if $provider.to_s != 'hyperv' 64 | config.vm.provision "shell", inline: "sudo cp /vagrant/hosts /etc/" 65 | end 66 | 67 | count = $net_count 68 | (1..($master_count + $node_count)).each do |mid| 69 | name = (mid <= $master_count) ? "k8s-m" : "k8s-n" 70 | id = (mid <= $master_count) ? mid : (mid - $master_count) 71 | 72 | config.vm.define "#{name}#{id}" do |n| 73 | n.vm.hostname = "#{name}#{id}" 74 | ip_addr = "#{$private_subnet}.#{count}" 75 | n.vm.network :private_network, ip: "#{ip_addr}", auto_config: true 76 | if $bridge_enable && $bridge_eth.to_s != '' 77 | n.vm.network "public_network", bridge: $bridge_eth 78 | end 79 | 80 | # Configure virtualbox provider 81 | n.vm.provider :virtualbox do |vb, override| 82 | vb.name = "#{n.vm.hostname}" 83 | set_vbox(vb, override) 84 | end 85 | 86 | # Configure libvirt provider 87 | n.vm.provider :libvirt do |lv, override| 88 | lv.host = "#{n.vm.hostname}" 89 | set_libvirt(lv, override) 90 | end 91 | 92 | # Configure hyperv provider 93 | n.vm.provider :hyperv do |hv, override| 94 | hv.vmname = "#{n.vm.hostname}" 95 | set_hyperv(hv, override) 96 | end 97 | 98 | count += 1 99 | if mid == ($master_count + $node_count) && $provider.to_s != 'hyperv' && $auto_deploy.to_s == 'true' 100 | n.vm.provision "cluster", type: "ansible" do |ansible| 101 | ansible.playbook = "cluster.yml" 102 | ansible.inventory_path = "./inventory/hosts.ini" 103 | ansible.become = true 104 | ansible.limit = "all" 105 | ansible.host_key_checking = false 106 | end 107 | n.vm.provision "addon", type: "ansible" do |ansible| 108 | ansible.playbook = "addons.yml" 109 | ansible.inventory_path = "./inventory/hosts.ini" 110 | ansible.become = true 111 | ansible.limit = "all" 112 | ansible.host_key_checking = false 113 | end 114 | end 115 | end 116 | end 117 | end 118 | -------------------------------------------------------------------------------- /addons.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Require the cluster is fully operation and running 3 | 4 | - hosts: masters 5 | become: true 6 | gather_facts: false 7 | roles: 8 | - { role: cluster-default } 9 | - { role: k8s-addon, tags: dashboard, when: enable_dashboard, addon: "{{ addons.dashboard }}" } 10 | - { role: k8s-addon, tags: logging, when: enable_logging, addon: "{{ addons.logging }}" } 11 | - { role: k8s-addon, tags: monitoring, when: enable_monitoring, addon: "{{ addons.monitoring }}" } 12 | - { role: k8s-addon, tags: ingress-nginx, when: enable_ingress, addon: "{{ addons.ingress_nginx }}" } 13 | - { role: k8s-addon, tags: metric-server, when: enable_metric_server, addon: "{{ addons.metric_server }}" } 14 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [ssh_connection] 2 | ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null 3 | pipelining = True 4 | 5 | [defaults] 6 | roles_path = ./roles 7 | remote_tmp = $HOME/.ansible/tmp 8 | local_tmp = $HOME/.ansible/tmp 9 | gathering = smart 10 | fact_caching = jsonfile 11 | fact_caching_connection = /tmp/facts_cache 12 | host_key_checking = False 13 | stdout_callback = skippy 14 | deprecation_warnings = False 15 | inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds 16 | become = True 17 | host_key_checking = False 18 | callback_whitelist = profile_tasks 19 | -------------------------------------------------------------------------------- /cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: masters 4 | become: true 5 | roles: 6 | - { role: cluster-default } 7 | - { role: cert, tags: cert } 8 | 9 | - hosts: kube-cluster 10 | gather_facts: false 11 | become: true 12 | roles: 13 | - { role: cluster-default } 14 | - { role: container-runtime, tags: container-runtime } 15 | 16 | - hosts: etcds 17 | become: true 18 | roles: 19 | - { role: cluster-default } 20 | - { role: etcd, tags: etcd } 21 | 22 | - hosts: masters 23 | become: true 24 | roles: 25 | - { role: cluster-default } 26 | - { role: k8s-kubeconfig, node_role: 'master' } 27 | - { role: k8s-setup, node_role: 'master', tags: master-setup } 28 | - { role: k8s-addon, addon: "{{ addons.kubedns }}" } 29 | - { role: k8s-addon, addon: "{{ addons.kubeproxy }}" } 30 | 31 | - hosts: nodes 32 | become: true 33 | roles: 34 | - { role: cluster-default } 35 | - { role: k8s-kubeconfig, node_role: 'node' } 36 | - { role: k8s-setup, node_role: 'node', tags: node-setup } 37 | 38 | - hosts: masters 39 | become: true 40 | gather_facts: false 41 | roles: 42 | - { role: cluster-default } 43 | - { role: k8s-cni, when: cni_enable } 44 | -------------------------------------------------------------------------------- /contrib/heat/README.md: -------------------------------------------------------------------------------- 1 | # Heat Kubernetes Template 2 | A Heat template to deploy a Kubernetes cluster on the OpenStack cloud. 3 | 4 | ## Quick Start 5 | First, you must be installed OpenStack CLI tool on your machine, you can execute the following command to install: 6 | ```sh 7 | $ virutalenv .env 8 | $ source .env/bin/activate 9 | $ pip install -r requirements.txt 10 | ``` 11 | 12 | Create client environment scripts for the projects and users: 13 | ```sh 14 | $ cat < openrc 15 | export OS_PROJECT_DOMAIN_NAME=default 16 | export OS_PROJECT_DOMAIN_ID=default 17 | export OS_USER_DOMAIN_NAME=default 18 | export OS_REGION_NAME=RegionOne 19 | export OS_PROJECT_NAME=admin 20 | export OS_TENANT_NAME=admin 21 | export OS_USERNAME=admin 22 | export OS_PASSWORD=password 23 | export OS_AUTH_URL=http://YOUR_OPENSTACK_HOST/identity 24 | export OS_IDENTITY_API_VERSION=3 25 | export OS_IMAGE_API_VERSION=2 26 | EOF 27 | 28 | $ source openrc 29 | ``` 30 | 31 | Set the variables in `env.yml` to reflect you need options: 32 | ```yaml 33 | parameters: 34 | image: ubuntu-16.04-server 35 | flavor: m1.small 36 | private_net: 95be5d06-b6e7-4571-8998-7ea9ca21a384 37 | public_net: b164fae0-3dc3-4309-b464-a08d8aab8fef 38 | ``` 39 | 40 | Now, just execute the following command to create the stack: 41 | ```sh 42 | $ openstack stack create k8s -t online-stack.yml -e env.yml 43 | ``` 44 | -------------------------------------------------------------------------------- /contrib/heat/elements/ansible/install.d/56-ansible: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Install Ansible in build stage. 4 | # 5 | 6 | # install ansible 7 | sudo apt-add-repository -y ppa:ansible/ansible 8 | sudo apt-get update && sudo apt-get install -y ansible 9 | -------------------------------------------------------------------------------- /contrib/heat/elements/kubernetes/install.d/57-kubernetes: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Install Kubernetes in build stage. 4 | # 5 | 6 | # add docker repo 7 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 8 | sudo add-apt-repository \ 9 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ 10 | $(lsb_release -cs) \ 11 | stable" 12 | 13 | # add kubernetes repo 14 | curl -s "https://packages.cloud.google.com/apt/doc/apt-key.gpg" | sudo apt-key add - 15 | echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list 16 | 17 | export KUBE_VERSION="1.11.2" 18 | sudo apt-get update && sudo apt-get install -y \ 19 | kubelet=${KUBE_VERSION}-00 \ 20 | kubeadm=${KUBE_VERSION}-00 \ 21 | kubectl=${KUBE_VERSION}-00 \ 22 | docker-ce 23 | -------------------------------------------------------------------------------- /contrib/heat/env.yml: -------------------------------------------------------------------------------- 1 | parameters: 2 | image: ubuntu-16.04-server 3 | flavor: m1.small 4 | private_net: 95be5d06-b6e7-4571-8998-7ea9ca21a384 5 | public_net: b164fae0-3dc3-4309-b464-a08d8aab8fef 6 | -------------------------------------------------------------------------------- /contrib/heat/hack/build-images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Build cloud images for Heat Kubernetes. 4 | # 5 | 6 | set -eu 7 | 8 | if [ ! -f ".env/" ]; 9 | virtualenv .env 10 | fi 11 | 12 | .env/bin/activate 13 | pip install diskimage-builder 14 | 15 | export DIB_RELEASE=xenial 16 | # export ELEMENTS_PATH=./elements 17 | 18 | # build image using diskimage-builder 19 | disk-image-create -a amd64 -o ubuntu-16.04-server vm ubuntu -p python 20 | -------------------------------------------------------------------------------- /contrib/heat/hack/upload-images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Upload cloud images to glance. 4 | # 5 | 6 | set -eu 7 | 8 | # upload ubuntu 16.04 python image 9 | openstack image create "ubuntu-16.04-server" \ 10 | --file ubuntu-16.04-server.qcow2 \ 11 | --disk-format qcow2 \ 12 | --container-format bare \ 13 | --public 14 | -------------------------------------------------------------------------------- /contrib/heat/requirements.txt: -------------------------------------------------------------------------------- 1 | python-openstackclient 2 | python-heatclient 3 | -------------------------------------------------------------------------------- /contrib/heat/snapshots/snapshot1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inwinstack/kube-ansible/eacde13eb256489a142f52ad7ab2c8d94d3731ae/contrib/heat/snapshots/snapshot1.png -------------------------------------------------------------------------------- /contrib/heat/snapshots/snapshot2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inwinstack/kube-ansible/eacde13eb256489a142f52ad7ab2c8d94d3731ae/contrib/heat/snapshots/snapshot2.png -------------------------------------------------------------------------------- /contrib/heat/snapshots/snapshot3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inwinstack/kube-ansible/eacde13eb256489a142f52ad7ab2c8d94d3731ae/contrib/heat/snapshots/snapshot3.png -------------------------------------------------------------------------------- /contrib/heat/stack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | heat_template_version: 2015-10-15 4 | description: A template to deploy the Kubernetes cluster 5 | parameters: 6 | image: 7 | type: string 8 | description: Image used for servers 9 | flavor: 10 | type: string 11 | description: Flavor used by the servers 12 | private_net: 13 | type: string 14 | description: Network used by the servers 15 | public_net: 16 | type: string 17 | description: Public IP used by the servers 18 | ansible_user: 19 | type: string 20 | description: "Ansible SSH user name" 21 | default: ubuntu 22 | kube_ansible_repo: 23 | type: string 24 | description: "Kubernetes ansible playbook repo" 25 | default: "https://github.com/kairen/kube-ansible.git" 26 | kube_version: 27 | type: string 28 | description: "Kubernetes version" 29 | default: 1.11.2 30 | 31 | resources: 32 | prefix: 33 | type: OS::Heat::TestResource 34 | properties: 35 | value: { get_param: "OS::stack_name" } 36 | 37 | base_k8s_sg: 38 | type: OS::Neutron::SecurityGroup 39 | properties: 40 | name: 41 | str_replace: 42 | template: "%prefix%-base_k8s_sg" 43 | params: 44 | "%prefix%": { get_attr: [prefix, output] } 45 | description: Base security group for Kubernetes node 46 | rules: 47 | - direction: egress 48 | ethertype: IPv4 49 | - direction: egress 50 | ethertype: IPv6 51 | - protocol: icmp 52 | - protocol: tcp 53 | port_range_min: 6443 54 | port_range_max: 6443 55 | - protocol: tcp 56 | port_range_min: 22 57 | port_range_max: 22 58 | 59 | keypair: 60 | type: OS::Nova::KeyPair 61 | properties: 62 | save_private_key: true 63 | name: 64 | str_replace: 65 | template: "%prefix%-keypair" 66 | params: 67 | "%prefix%": { get_attr: [prefix, output] } 68 | 69 | k8s_worker_nodes: 70 | type: OS::Heat::ResourceGroup 71 | properties: 72 | count: 2 73 | resource_def: 74 | type: OS::Nova::Server 75 | properties: 76 | name: 77 | str_replace: 78 | template: "%prefix%-worker-%index%" 79 | params: 80 | "%prefix%": { get_attr: [prefix, output] } 81 | image: { get_param: image } 82 | key_name: { get_resource: keypair } 83 | flavor: { get_param: flavor } 84 | networks: 85 | - network: { get_param: private_net } 86 | security_groups: 87 | - get_resource: base_k8s_sg 88 | 89 | swift_signal_handle: 90 | type: OS::Heat::SwiftSignalHandle 91 | 92 | swift_signal: 93 | type: OS::Heat::SwiftSignal 94 | properties: 95 | handle: { get_resource: swift_signal_handle } 96 | count: 1 97 | timeout: 14400 98 | 99 | cloud_config_ansible: 100 | type: OS::Heat::CloudConfig 101 | properties: 102 | cloud_config: 103 | write_files: 104 | - path: "/opt/k8s/id_rsa" 105 | permissions: "0600" 106 | content: { get_attr: [keypair, private_key] } 107 | - path: "/opt/k8s/hosts.j2" 108 | permissions: "0644" 109 | content: 110 | str_replace: 111 | template: | 112 | [all] 113 | master ansible_ssh_host={{ master_node_ip_address }} ansible_user="%ansible_user%" 114 | {% for worker_node_ip_address in worker_nodes_ip_address %} 115 | worker-{{ loop.index0 }} ansible_ssh_host={{ worker_node_ip_address }} ansible_user="%ansible_user%" 116 | {% endfor %} 117 | 118 | [etcds] 119 | master 120 | 121 | [masters] 122 | master 123 | 124 | [nodes] 125 | {% for worker_node_ip_address in worker_nodes_ip_address %} 126 | worker-{{ loop.index0 }} 127 | {% endfor %} 128 | [kube-cluster:children] 129 | masters 130 | nodes 131 | params: 132 | "%ansible_user%": { get_param: ansible_user } 133 | - path: "/opt/k8s/environment.yaml" 134 | permissions: "0600" 135 | content: 136 | str_replace: 137 | template: | 138 | --- 139 | master_node_floating_ip: "%master_node_floating_ip%" 140 | worker_nodes_ip_address: %worker_nodes_ip_address% 141 | params: 142 | "%master_node_floating_ip%": { get_attr: [floating_ip, floating_ip_address] } 143 | "%worker_nodes_ip_address%": { get_attr: [k8s_worker_nodes, first_address] } 144 | - path: "/opt/k8s/all.yml" 145 | permissions: "0644" 146 | content: 147 | str_replace: 148 | template: | 149 | --- 150 | kube_version: "%kube_version%" 151 | 152 | vip_address: "%master_node_floating_ip%" 153 | lb_secure_port: 6443 154 | 155 | enable_keepalived: false 156 | enable_haproxy: false 157 | enable_dashboard: true 158 | params: 159 | "%master_node_floating_ip%": { get_attr: [floating_ip, floating_ip_address] } 160 | "%kube_version%": { get_param: kube_version } 161 | - path: "/opt/k8s/ansible.cfg" 162 | permissions: "0644" 163 | content: | 164 | [ssh_connection] 165 | ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null 166 | pipelining = True 167 | 168 | [defaults] 169 | roles_path = ./roles 170 | remote_tmp = $HOME/.ansible/tmp 171 | local_tmp = $HOME/.ansible/tmp 172 | gathering = smart 173 | private_key_file=/opt/k8s/id_rsa 174 | fact_caching = jsonfile 175 | fact_caching_connection = /tmp/facts_cache 176 | host_key_checking = False 177 | stdout_callback = skippy 178 | deprecation_warnings = False 179 | inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds 180 | become = True 181 | host_key_checking = False 182 | callback_whitelist = profile_tasks 183 | - path: "/opt/k8s/runcmd-bash" 184 | permissions: "0700" 185 | content: 186 | str_replace: 187 | template: | 188 | #!/bin/bash 189 | 190 | set -ux 191 | 192 | function exit_failure { 193 | %swift_signal_notify% --data-binary '{"status": "FAILURE", "reason": "'"$@"'"}' 194 | exit 1 195 | } 196 | 197 | function exit_success { 198 | %swift_signal_notify% --data-binary '{"status": "SUCCESS"}' 199 | } 200 | 201 | sudo apt-add-repository -y ppa:ansible/ansible || exit_failure "Ansible key added apt" 202 | sudo apt-get update && sudo apt-get install -y ansible || exit_failure "Ansible apt install" 203 | 204 | git clone %kube_ansible_repo% /opt/k8s/kube-ansible || exit_failure "Git Clone - %kube_ansible_repo%" 205 | cd /opt/k8s/kube-ansible 206 | 207 | # copy and generate config 208 | echo "master_node_ip_address: $(ip route get 8.8.8.8 | awk '{print $NF; exit}')" | sudo tee -a /opt/k8s/environment.yaml 209 | sudo cp -r /opt/k8s/ansible.cfg ansible.cfg 210 | sudo cp -r /opt/k8s/all.yml inventory/group_vars/all.yml 211 | ansible localhost -e @/opt/k8s/environment.yaml -m template \ 212 | -a "src=/opt/k8s/hosts.j2 dest=inventory/hosts.ini" || exit_failure "Generate Ansible Inventory" 213 | 214 | # run playbook 215 | ansible-playbook -i inventory/hosts.ini cluster.yml || exit_failure "Run Ansible Playbook" 216 | 217 | exit_success 218 | params: 219 | "%swift_signal_notify%": { get_attr: [swift_signal_handle, curl_cli] } 220 | "%kube_ansible_repo%": { get_param: kube_ansible_repo } 221 | runcmd: 222 | - ./opt/k8s/runcmd-bash 223 | 224 | k8s_master_node: 225 | depends_on: 226 | - k8s_worker_nodes 227 | type: OS::Nova::Server 228 | properties: 229 | name: 230 | str_replace: 231 | template: "%prefix%-master" 232 | params: 233 | "%prefix%": { get_attr: [prefix, output] } 234 | image: { get_param: image } 235 | key_name: { get_resource: keypair } 236 | flavor: { get_param: flavor } 237 | networks: 238 | - network: { get_param: private_net } 239 | security_groups: 240 | - get_resource: base_k8s_sg 241 | user_data_format: RAW 242 | user_data: { get_resource: cloud_config_ansible } 243 | 244 | floating_ip: 245 | type: OS::Neutron::FloatingIP 246 | properties: 247 | floating_network: { get_param: public_net } 248 | 249 | floating_ip_association: 250 | type: OS::Nova::FloatingIPAssociation 251 | properties: 252 | floating_ip: { get_resource: floating_ip } 253 | server_id: { get_resource: k8s_master_node } 254 | 255 | outputs: 256 | kube_apiserver_addr: 257 | description: Kubernetes API Server Address 258 | value: 259 | str_replace: 260 | template: https://host:6443 261 | params: 262 | host: { get_attr: [floating_ip, floating_ip_address] } 263 | master_ip: 264 | description: IP address of master 265 | value: { get_attr: [k8s_master_node, first_address] } 266 | workers_ip: 267 | description: IP address of workers 268 | value: { get_attr: [k8s_worker_nodes, first_address] } 269 | floating_ip: 270 | description: Floating IP of master 271 | value: { get_attr: [floating_ip, floating_ip_address] } 272 | private_key: 273 | description: Private key of all nodes 274 | value: { get_attr: [keypair, private_key] } 275 | -------------------------------------------------------------------------------- /extra-playbooks/add-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Add nodes 3 | 4 | - hosts: add-nodes 5 | gather_facts: false 6 | become: true 7 | roles: 8 | - { role: cluster-default } 9 | - { role: container-runtime, tags: container-runtime } 10 | 11 | - hosts: add-nodes 12 | become: true 13 | roles: 14 | - { role: cluster-default } 15 | - { role: k8s-kubeconfig, node_role: 'node' } 16 | - { role: k8s-setup, node_role: 'node', tags: node-setup } -------------------------------------------------------------------------------- /extra-playbooks/delete-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Tear down nodes 3 | 4 | - hosts: masters 5 | become: yes 6 | roles: 7 | - { role: cluster-default } 8 | - { role: cluster-reset, delete_k8s: true } 9 | 10 | - hosts: delete-nodes 11 | become: yes 12 | roles: 13 | - { role: cluster-reset , reset_k8s: true } 14 | -------------------------------------------------------------------------------- /hack/.config.rb: -------------------------------------------------------------------------------- 1 | # Vagrant machine variables 2 | $master_count = 1 3 | $node_count = 2 4 | $system_vcpus = 1 5 | $system_memory = 2048 6 | $bridge_enable = false 7 | $bridge_eth = "eno1" 8 | $private_subnet = "172.16.35" 9 | $net_count = 10 10 | -------------------------------------------------------------------------------- /hack/.func-vars: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Program: Vagrant func and vars 4 | # History: 2017/1/19 Kyle.b Release 5 | 6 | GROUP_VARS_PATH="./inventory/group_vars/all.yml" 7 | INVENTORY_PATH="./inventory/hosts.ini" 8 | VAGRAN_CONFIG_PATH="./hack/.config.rb" 9 | SUBNET=$(grep -i "^\$private_subnet" ${VAGRAN_CONFIG_PATH} | awk '{ print $3 }' | sed 's/\"//g') 10 | NET_COUNT=$(grep -i "^\$net_count" ${VAGRAN_CONFIG_PATH} | awk '{ print $3 }' | sed 's/\"//g') 11 | 12 | # Usage message 13 | function usage() { 14 | echo -e "Usage : setup-vagrant [options]\n" 15 | echo " -b|--boss Number of master." 16 | echo " -w|--worker Number of worker." 17 | echo " -c|--cpu Number of cores per vm." 18 | echo " -m|--memory Memory size per vm." 19 | echo " -p|--provider Virtual machine provider(virtualbox, libvirt, hyperv)." 20 | echo " -o|--os-image Virtual machine operation system(ubuntu16, centos7)." 21 | echo " -i|--interface Network bind interface." 22 | echo " -f|--force Force deployment." 23 | echo " -d|--deploy Auto deploy cluster." 24 | echo " --combine-master Combine number of worker into masters." 25 | echo " --combine-etcd Combine number of worker into etcds." 26 | exit 1 27 | } 28 | 29 | # Replace vagrant config 30 | function vagrant_config() { 31 | perl -i -pe "s/${1}/${2}/g" ${VAGRAN_CONFIG_PATH} 32 | } 33 | 34 | # Replace roles defaults variable 35 | function role_config() { 36 | perl -i -pe "s/${1}/${2}/g" ${3} 37 | } 38 | 39 | # Create inventory file 40 | function set_inventory() { 41 | local TOTAL=$((${NODES}+${MASTERS})) 42 | local masters="${SUBNET}.[${NET_COUNT}:$((${NET_COUNT}+${COMBINE_MASTERS}))]" 43 | local nodes="${SUBNET}.[$((${NET_COUNT}+${NODES}-1)):$((${NET_COUNT}+${TOTAL}-1))]" 44 | local etcds="${SUBNET}.[${NET_COUNT}:$((${NET_COUNT}+${COMBINE_ETCD}))]" 45 | local host_var="ansible_user=vagrant ansible_password=vagrant" 46 | rm -f ${INVENTORY_PATH} 47 | for group in "etcds" "masters" "nodes"; do 48 | echo "[${group}]" >> ${INVENTORY_PATH} 49 | if [ ${group} == "nodes" ]; then 50 | echo -e "${nodes} ${host_var}\n" >> ${INVENTORY_PATH} 51 | elif [ ${group} == "etcds" ]; then 52 | echo -e "${etcds} ${host_var}\n" >> ${INVENTORY_PATH} 53 | else 54 | echo -e "${masters} ${host_var}\n" >> ${INVENTORY_PATH} 55 | fi 56 | done 57 | echo -e "[kube-cluster:children]\nmasters\nnodes\n" >> ${INVENTORY_PATH} 58 | } 59 | 60 | # Create hosts file 61 | function set_hosts() { 62 | local TOTAL=$((${NODES}+${MASTERS})) 63 | echo -e "127.0.0.1 localhost\n::1 localhost\n" > hosts 64 | 65 | for ((i=0; i<${TOTAL}; i++)) do 66 | PREFIX="" 67 | if [ ${i} -lt ${MASTERS} ]; then 68 | PREFIX="k8s-m$((${i}+1))" 69 | else 70 | PREFIX="k8s-n$((${i}+1-${MASTERS}))" 71 | fi 72 | echo "${SUBNET}.$((${NET_COUNT}+${i})) ${PREFIX}" >> hosts 73 | done 74 | } 75 | 76 | # Check is number 77 | function isnum() { 78 | re='^[0-9]+$' 79 | if ! [[ ${1} =~ ${re} ]] ; then 80 | echo "Error: Not a number." >&2; exit 1 81 | fi 82 | } 83 | 84 | function check_cni() { 85 | local cni=${1} 86 | local isExist=false 87 | for n in "calico" "flannel" "canal" "weave" "router"; do 88 | if [ ${cni} == ${n} ]; then 89 | isExist=true 90 | fi 91 | done 92 | if [ ${isExist} == "false" ]; then 93 | echo "ERROR: the \"${cni}\" is not support." 94 | exit 1; 95 | fi 96 | } 97 | 98 | function check_opts() { 99 | while [ ${#} -gt 0 ]; do 100 | case "${1}" in 101 | -b|--boss) isnum ${2}; export MASTERS=${2}; shift;; 102 | -w|--worker) isnum ${2}; export NODES=${2}; shift;; 103 | -c|--cpu) isnum ${2}; export CPU=${2}; shift;; 104 | -m|--memory) isnum ${2}; export MEMORY=${2}; shift;; 105 | -i|--interface) export ETH=${2}; shift;; 106 | -o|--os-image) export OS_IMAGE=${2}; shift;; 107 | -p|--provider) export PROVIDER=${2}; shift;; 108 | -f|--force) export FORCE=${2}; shift;; 109 | -d|--deploy) export DEPLOY=${2}; shift;; 110 | --combine-master) isnum ${2}; export COMBINE_MASTERS=${2}; shift;; 111 | --combine-etcd) isnum ${2}; export COMBINE_ETCD=${2}; shift;; 112 | *) usage; break;; 113 | esac 114 | shift 115 | done 116 | } 117 | -------------------------------------------------------------------------------- /hack/clear-vms: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Program: Reset all config 4 | # History: 2017/1/25 Kyle.b Release 5 | 6 | source ./hack/.func-vars 7 | set -eu 8 | 9 | ETH=${1:-""} 10 | 11 | role_config "container_network:.*" "container_network: calico" ${GROUP_VARS_PATH} 12 | role_config "cni_iface:.*" "cni_iface: \"\"" ${GROUP_VARS_PATH} 13 | role_config "etcd_iface:.*" "etcd_iface: \"\"" ${GROUP_VARS_PATH} 14 | role_config "vip_interface:.*" "vip_interface: \"\"" ${GROUP_VARS_PATH} 15 | role_config "${SUBNET}.[0-9]*" "${SUBNET}.9" ${GROUP_VARS_PATH} 16 | 17 | # clean up 18 | rm -rf ${INVENTORY_PATH} hosts 19 | vagrant destroy -f 20 | 21 | vagrant_config "master_count.*" "master_count = 1" 22 | vagrant_config "node_count.*" "node_count = 2" 23 | vagrant_config "system_vcpus.*" "system_vcpus = 1" 24 | vagrant_config "system_memory.*" "system_memory = 2048" 25 | -------------------------------------------------------------------------------- /hack/offline-tools/pkg-downloader.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | import os 4 | import urllib2 5 | import yaml 6 | from jinja2 import Environment, FileSystemLoader 7 | 8 | TMPL_FILE = '../../roles/download/package/defaults/main.yml' 9 | YML_FILE = './pkg.yml' 10 | PKG_HOME_DIR = os.environ.get('PKG_HOME_DIR','/usr/local/apache2/htdocs') 11 | PATH = os.path.dirname(os.path.abspath(__file__)) 12 | TMPL_ENV = Environment( 13 | autoescape=False, 14 | loader=FileSystemLoader(os.path.join(PATH, '')), 15 | trim_blocks=False 16 | ) 17 | 18 | 19 | def create_download_yml(): 20 | '''Create download yml''' 21 | 22 | tmpl = open(TMPL_FILE, 'r') 23 | pkg_yml = yaml.load(tmpl) 24 | with open(YML_FILE, 'w') as output: 25 | yml = TMPL_ENV.get_template(TMPL_FILE).render(pkg_yml) 26 | output.write(yml) 27 | 28 | 29 | def download_pkg(yml_file): 30 | '''Download package from ymal define''' 31 | 32 | with open(yml_file, 'r') as f: 33 | yml = yaml.load(f) 34 | downloads = yml['package'] 35 | bases = yml['bases'] 36 | for key, value in zip(downloads, downloads.values()) : 37 | for item in value['items']: 38 | print("Downloading with {0} ...".format(item)) 39 | 40 | path = value['url'].replace("{0}".format(bases[key]['url']), '') 41 | download_path = "{0}{1}".format(PKG_HOME_DIR, path) 42 | 43 | if not os.path.exists(download_path): 44 | os.makedirs(download_path) 45 | 46 | item_file = urllib2.urlopen("{0}/{1}".format(value['url'], item)) 47 | data = item_file.read() 48 | with open("{0}/{1}".format(download_path, item), "wb") as code: 49 | code.write(data) 50 | 51 | def main(): 52 | create_download_yml() 53 | download_pkg(YML_FILE) 54 | 55 | 56 | if __name__ == "__main__": 57 | main() 58 | -------------------------------------------------------------------------------- /hack/setup-vms: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Program: Setup a vagrant env 4 | # History: 2017/1/19 Kyle.b Release 5 | 6 | source ./hack/.func-vars 7 | 8 | set -eu 9 | 10 | : ${MASTERS:="1"} 11 | : ${NODES:="2"} 12 | : ${CPU:="1"} 13 | : ${MEMORY:="2048"} 14 | : ${ETH:="eth1"} 15 | : ${CNI_PLUGIN:="calico"} 16 | : ${PROVIDER:="virtualbox"} 17 | : ${OS_IMAGE:="ubuntu16"} 18 | : ${FORCE:="false"} 19 | : ${DEPLOY:="true"} 20 | : ${COMBINE_MASTERS:="0"} 21 | : ${COMBINE_ETCD:="0"} 22 | 23 | [ ${#} -gt 0 ] && [ ${1} == "-h" ] && usage 24 | 25 | check_opts $@ 26 | echo "Cluster Size: ${MASTERS} master, ${NODES} worker." 27 | echo " VM Size: ${CPU} vCPU, ${MEMORY} MB" 28 | echo " VM Info: ${OS_IMAGE}, ${PROVIDER}" 29 | echo " CNI binding iface: ${ETH}" 30 | if [ ${FORCE} == "false" ]; then 31 | read -p "Start to deploy?(y): " check && [ "${check}" != "y" ] && exit 1 32 | fi 33 | 34 | vagrant_config "master_count.*" "master_count = ${MASTERS}" 35 | vagrant_config "node_count.*" "node_count = ${NODES}" 36 | vagrant_config "system_vcpus.*" "system_vcpus = ${CPU}" 37 | vagrant_config "system_memory.*" "system_memory = ${MEMORY}" 38 | 39 | role_config "etcd_iface:.*" "etcd_iface: ${ETH}" ${GROUP_VARS_PATH} 40 | role_config "cni_iface:.*" "cni_iface: ${ETH}" ${GROUP_VARS_PATH} 41 | role_config "vip_interface:.*" "vip_interface: ${ETH}" ${GROUP_VARS_PATH} 42 | role_config "${SUBNET}.[0-9]*" "${SUBNET}.9" ${GROUP_VARS_PATH} 43 | 44 | # Create inventory and hosts 45 | set_inventory 46 | set_hosts 47 | 48 | vagrant up --provider ${PROVIDER} 49 | -------------------------------------------------------------------------------- /inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | kube_version: 1.11.2 4 | 5 | # Container runtime, 6 | # Supported: docker, nvidia-docker, containerd. 7 | container_runtime: docker 8 | 9 | # Container network, 10 | # Supported: calico, flannel. 11 | cni_enable: true 12 | container_network: calico 13 | cni_iface: "" 14 | 15 | # Kubernetes HA extra variables. 16 | vip_interface: "" 17 | vip_address: 172.16.35.9 18 | 19 | # etcd extra variables. 20 | etcd_iface: "" 21 | 22 | # Kubernetes extra addons 23 | enable_ingress: false 24 | enable_dashboard: false 25 | enable_logging: false 26 | enable_monitoring: false 27 | enable_metric_server: false 28 | 29 | grafana_user: "admin" 30 | grafana_password: "p@ssw0rd" 31 | -------------------------------------------------------------------------------- /inventory/hosts.ini.example: -------------------------------------------------------------------------------- 1 | [etcds] 2 | k8s-m1 3 | 4 | [masters] 5 | k8s-m1 6 | 7 | [nodes] 8 | k8s-n1 9 | k8s-n2 10 | 11 | [kube-cluster:children] 12 | masters 13 | nodes 14 | -------------------------------------------------------------------------------- /reset-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: "{{ hosts | default('kube-cluster') }}" 4 | become: true 5 | tags: reset_k8s 6 | roles: 7 | - { role: cluster-default } 8 | - { role: cluster-reset, reset_k8s: true } 9 | 10 | - hosts: etcds 11 | become: true 12 | tags: reset_etcd 13 | roles: 14 | - { role: cluster-default } 15 | - { role: cluster-reset, reset_etcd: true } 16 | -------------------------------------------------------------------------------- /roles/cert/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | etcd_hosts: " 4 | {% for host in groups['etcds'] %}{% if etcd_iface != '' %}{{ hostvars[host]['ansible_' + etcd_iface].ipv4.address }}{% else %}{{ hostvars[host].ansible_default_ipv4.address }}{% endif %}{% if not loop.last %},{% endif %}{% endfor %},127.0.0.1,{{ etcd_domain_name }}" 5 | apiserver_hosts: "{{ vip_address }},{{ api_service_ip }},127.0.0.1,kubernetes.default,kubernetes" 6 | 7 | cert_key_algo: rsa 8 | cert_key_size: 2048 9 | cert_info_country: TW 10 | cert_info_state: Taipei 11 | cert_info_locality: "New Taipei city" 12 | cert_info_org_uit: Kubernetes-ansible 13 | cert_info_expiry: 87600h 14 | cert_info_profile: kubernetes 15 | -------------------------------------------------------------------------------- /roles/cert/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: download/package, pkg: "{{ package.cfssl }}" } 5 | - { role: download/package, pkg: "{{ package.cfssljson }}" } 6 | -------------------------------------------------------------------------------- /roles/cert/tasks/create-etcd-certs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure etcd PKI directory already exists 4 | file: path={{ etcd_pki_dir }} state=directory 5 | 6 | - name: Check etcd SSL certificate json files 7 | stat: 8 | path: "{{ etcd_pki_dir }}/{{ item.bare }}.pem" 9 | with_items: 10 | - { file: etcd-ca-csr.json, name: etcd, org: etcd, bare: etcd-ca} 11 | - { file: etcd-csr.json, name: etcd, org: etcd, bare: etcd } 12 | delegate_to: "{{ groups['masters'][0] }}" 13 | run_once: true 14 | register: check_etcd_json_files 15 | 16 | - name: Generate etcd SSL certificate json files 17 | when: check_etcd_json_files and not item.stat.exists 18 | template: 19 | src: ca-csr.json.j2 20 | dest: "{{ etcd_pki_dir }}/{{ item.item.file }}" 21 | with_items: "{{ check_etcd_json_files['results'] }}" 22 | delegate_to: "{{ groups['masters'][0] }}" 23 | run_once: true 24 | register: gen_etcd_json_files 25 | 26 | - name: Check etcd SSL certificate authority files 27 | stat: 28 | path: "{{ etcd_ca }}" 29 | delegate_to: "{{ groups['masters'][0] }}" 30 | run_once: true 31 | register: check_etcd_ca_file 32 | 33 | - name: Create etcd SSL certificate authority files 34 | when: not check_etcd_ca_file.stat.exists 35 | shell: | 36 | {{ bin_dir }}/cfssl gencert -initca {{ etcd_pki_dir }}/etcd-ca-csr.json | \ 37 | {{ bin_dir }}/cfssljson -bare {{ etcd_pki_dir }}/etcd-ca 38 | delegate_to: "{{ groups['masters'][0] }}" 39 | run_once: true 40 | register: create_etcd_ca_file 41 | 42 | - name: Check etcd SSL certificate key file 43 | stat: 44 | path: "{{ etcd_cert }}" 45 | delegate_to: "{{ groups['masters'][0] }}" 46 | run_once: true 47 | register: check_etcd_ssl_file 48 | 49 | - name: Create etcd SSL certificate key files 50 | when: not check_etcd_ssl_file.stat.exists 51 | shell: | 52 | {{ bin_dir }}/cfssl gencert \ 53 | -ca={{ etcd_ca }} \ 54 | -ca-key={{ etcd_ca_key }} \ 55 | -config={{ ca_config }} \ 56 | -hostname={{ etcd_hosts | trim }} \ 57 | -profile={{ cert_info_profile }} \ 58 | {{ etcd_pki_dir }}/etcd-csr.json | \ 59 | {{ bin_dir }}/cfssljson -bare {{ etcd_pki_dir }}/etcd 60 | delegate_to: "{{ groups['masters'][0] }}" 61 | run_once: true 62 | register: create_etcd_ssl_file 63 | -------------------------------------------------------------------------------- /roles/cert/tasks/create-k8s-certs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure Kubernetes PKI directory already exists 4 | file: path={{ pki_dir }} state=directory 5 | 6 | - name: Check Kubernetes SSL certificate json files 7 | stat: 8 | path: "{{ pki_dir }}/{{ item.bare }}.pem" 9 | with_items: 10 | - { file: ca-csr.json, name: kubernetes, org: Kubernetes, bare: ca } 11 | - { file: admin-csr.json, name: admin, org: "system:masters", bare: admin } 12 | - { file: apiserver-csr.json, name: kube-apiserver, org: Kubernetes, bare: apiserver } 13 | - { file: manager-csr.json, name: "system:kube-controller-manager", org: "system:kube-controller-manager", bare: controller-manager } 14 | - { file: scheduler-csr.json, name: "system:kube-scheduler", org: "system:kube-scheduler", bare: scheduler } 15 | - { file: front-proxy-ca-csr.json, name: kubernetes-front, org: Kubernetes, bare: front-proxy-ca } 16 | - { file: front-proxy-client-csr.json, name: front-proxy-client, org: Kubernetes, bare: front-proxy-client } 17 | delegate_to: "{{ groups['masters'][0] }}" 18 | run_once: true 19 | register: check_json_files 20 | 21 | - name: Generate Kubernetes SSL certificate json files 22 | when: check_json_files and not item.stat.exists 23 | template: 24 | src: ca-csr.json.j2 25 | dest: "{{ pki_dir }}/{{ item.item.file }}" 26 | with_items: "{{ check_json_files['results'] }}" 27 | delegate_to: "{{ groups['masters'][0] }}" 28 | run_once: true 29 | register: gen_json_files 30 | 31 | - name: Check Kubernetes SSL certificate authority files 32 | stat: 33 | path: "{{ pki_dir }}/{{ item.bare }}.pem" 34 | with_items: 35 | - { file: ca-csr.json, bare: ca } 36 | - { file: front-proxy-ca-csr.json, bare: front-proxy-ca } 37 | delegate_to: "{{ groups['masters'][0] }}" 38 | run_once: true 39 | register: check_ca_files 40 | 41 | - name: Create Kubernetes SSL certificate authority files 42 | when: check_ca_files and not item.stat.exists 43 | shell: | 44 | {{ bin_dir }}/cfssl gencert -initca {{ pki_dir }}/{{ item.item.file }} | \ 45 | {{ bin_dir }}/cfssljson -bare {{ pki_dir }}/{{ item.item.bare }} 46 | with_items: "{{ check_ca_files['results'] }}" 47 | delegate_to: "{{ groups['masters'][0] }}" 48 | run_once: true 49 | register: create_ca_files 50 | 51 | - name: Check Kubernetes SSL certificate key files 52 | stat: 53 | path: "{{ pki_dir }}/{{ item.bare }}.pem" 54 | with_items: 55 | - { file: apiserver-csr.json, ca: ca, hosts: "{{ apiserver_hosts }}", bare: apiserver } 56 | - { file: admin-csr.json, ca: ca, bare: admin } 57 | - { file: manager-csr.json, ca: ca, bare: controller-manager } 58 | - { file: scheduler-csr.json, ca: ca, bare: scheduler } 59 | - { file: front-proxy-client-csr.json, ca: front-proxy-ca, bare: front-proxy-client } 60 | delegate_to: "{{ groups['masters'][0] }}" 61 | run_once: true 62 | register: check_ssl_files 63 | 64 | - name: Create Kubernetes SSL certificate key files 65 | when: check_ssl_files and not item.stat.exists 66 | shell: | 67 | {{ bin_dir }}/cfssl gencert \ 68 | -ca={{ pki_dir }}/{{ item.item.ca }}.pem \ 69 | -ca-key={{ pki_dir }}/{{ item.item.ca }}-key.pem \ 70 | -config={{ ca_config }} \ 71 | {% if item.item.hosts is defined -%} 72 | -hostname={{ item.item.hosts }} \ 73 | {% endif -%} 74 | -profile={{ cert_info_profile }} \ 75 | {{ pki_dir }}/{{ item.item.file }} | \ 76 | {{ bin_dir }}/cfssljson -bare {{ pki_dir }}/{{ item.item.bare }} 77 | with_items: "{{ check_ssl_files['results'] }}" 78 | delegate_to: "{{ groups['masters'][0] }}" 79 | run_once: true 80 | register: create_ssl_files 81 | 82 | - name: Check service account key already exists 83 | stat: 84 | path: "{{ pki_dir }}/sa.key" 85 | delegate_to: "{{ groups['masters'][0] }}" 86 | run_once: true 87 | register: check_sa_key 88 | 89 | - name: Create service account private and public key 90 | when: not check_sa_key.stat.exists 91 | command: "{{ item }}" 92 | with_items: 93 | - "openssl genrsa -out {{ pki_dir}}/sa.key 2048" 94 | - "openssl rsa -in {{ pki_dir }}/sa.key -pubout -out {{ pki_dir }}/sa.pub" 95 | delegate_to: "{{ groups['masters'][0] }}" 96 | run_once: true 97 | register: create_sa_key 98 | -------------------------------------------------------------------------------- /roles/cert/tasks/create-k8s-kubelet-certs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check kubelet SSL certificate key files 4 | stat: 5 | path: "{{ kubelet }}" 6 | register: check_kubelet_ssl_file 7 | 8 | - name: Generate kubelet SSL certificate json files 9 | when: not check_kubelet_ssl_file.stat.exists 10 | template: 11 | src: kubelet-csr.json.j2 12 | dest: "{{ pki_dir }}/kubelet-csr.json" 13 | register: gen_kubelet_json_file 14 | 15 | - name: Create kubelet SSL certificate key files 16 | when: not check_kubelet_ssl_file.stat.exists 17 | shell: | 18 | {{ bin_dir }}/cfssl gencert \ 19 | -ca={{ pki_dir }}/ca.pem \ 20 | -ca-key={{ pki_dir }}/ca-key.pem \ 21 | -config={{ ca_config }} \ 22 | -hostname="{{ ansible_hostname }}" \ 23 | -profile={{ cert_info_profile }} \ 24 | {{ pki_dir }}/kubelet-csr.json | \ 25 | {{ bin_dir }}/cfssljson -bare {{ pki_dir }}/kubelet 26 | register: create_kubelet_ssl_file 27 | -------------------------------------------------------------------------------- /roles/cert/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check SSL CA json config 4 | stat: path={{ ca_config }} 5 | register: check_ca_config 6 | 7 | - name: Generate SSL CA config 8 | when: not check_ca_config.stat.exists 9 | template: src=ca-config.json.j2 dest="{{ ca_config }}" 10 | register: gen_ca_config 11 | 12 | - include_tasks: create-k8s-certs.yml 13 | 14 | - name: Copy ca certificate and key 15 | vars: 16 | files: 17 | - "{{ ca }}" 18 | - "{{ ca_key }}" 19 | import_role: 20 | name: common/copy-files 21 | 22 | - include_tasks: create-k8s-kubelet-certs.yml 23 | 24 | - include_tasks: create-etcd-certs.yml 25 | 26 | - include_tasks: purge-files.yml 27 | -------------------------------------------------------------------------------- /roles/cert/tasks/purge-files.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Find unnecessary Kubernetes CSR files 4 | find: 5 | paths: "{{ pki_dir }}" 6 | patterns: "*.csr,*.json" 7 | register: find_k8s_result 8 | 9 | - name: Delete unnecessary Kubernetes files 10 | when: find_k8s_result 11 | file: path={{ item.path }} state=absent 12 | with_items: "{{ find_k8s_result.files }}" 13 | 14 | - name: Find unnecessary etcd files 15 | find: 16 | paths: "{{ etcd_pki_dir }}" 17 | patterns: "*.csr,*.json" 18 | delegate_to: "{{ groups['masters'][0] }}" 19 | run_once: true 20 | register: find_etcd_result 21 | 22 | - name: Delete unnecessary etcd files 23 | when: find_etcd_result 24 | file: path={{ item.path }} state=absent 25 | with_items: "{{ find_etcd_result.files }}" 26 | delegate_to: "{{ groups['masters'][0] }}" 27 | run_once: true 28 | -------------------------------------------------------------------------------- /roles/cert/templates/ca-config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "{{ cert_info_expiry }}" 5 | }, 6 | "profiles": { 7 | "{{ cert_info_profile }}": { 8 | "usages": [ 9 | "signing", 10 | "key encipherment", 11 | "server auth", 12 | "client auth" 13 | ], 14 | "expiry": "{{ cert_info_expiry }}" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /roles/cert/templates/ca-csr.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "{{ item.item.name }}", 3 | "key": { 4 | "algo": "{{ cert_key_algo }}", 5 | "size": {{ cert_key_size }} 6 | }, 7 | "names": [ 8 | { 9 | "C": "{{ cert_info_country }}", 10 | "ST": "{{ cert_info_state }}", 11 | "L": "{{ cert_info_locality }}", 12 | "O": "{{ item.item.org }}", 13 | "OU": "{{ cert_info_org_uit }}" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /roles/cert/templates/kubelet-csr.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:node:{{ ansible_hostname }}", 3 | "key": { 4 | "algo": "{{ cert_key_algo }}", 5 | "size": {{ cert_key_size }} 6 | }, 7 | "names": [ 8 | { 9 | "C": "{{ cert_info_country }}", 10 | "ST": "{{ cert_info_state}}", 11 | "L": "{{ cert_info_locality }}", 12 | "O": "system:nodes", 13 | "OU": "{{ cert_info_org_uit }}" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /roles/cluster-default/defaults/cert-path.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ca_config: "{{ cache_dir }}/ca-config.json" 4 | pki_dir: /etc/kubernetes/pki 5 | etcd_pki_dir: "{{ pki_dir }}/etcd" 6 | 7 | # cluster ca path 8 | ca: "{{ pki_dir }}/ca.pem" 9 | ca_key: "{{ pki_dir }}/ca-key.pem" 10 | 11 | # etcd certs path 12 | etcd_ca: "{{ etcd_pki_dir }}/etcd-ca.pem" 13 | etcd_ca_key: "{{ etcd_pki_dir }}/etcd-ca-key.pem" 14 | etcd_cert: "{{ etcd_pki_dir }}/etcd.pem" 15 | etcd_cert_key: "{{ etcd_pki_dir }}/etcd-key.pem" 16 | 17 | # Kubernetes component certs path 18 | kubelet: "{{ pki_dir }}/kubelet.pem" 19 | kubelet_key: "{{ pki_dir }}/kubelet-key.pem" 20 | admin: "{{ pki_dir }}/admin.pem" 21 | admin_key: "{{ pki_dir }}/admin-key.pem" 22 | apiserver: "{{ pki_dir }}/apiserver.pem" 23 | apiserver_key: "{{ pki_dir }}/apiserver-key.pem" 24 | scheduler: "{{ pki_dir }}/scheduler.pem" 25 | scheduler_key: "{{ pki_dir }}/scheduler-key.pem" 26 | controller_manager: "{{ pki_dir }}/controller-manager.pem" 27 | controller_manager_key: "{{ pki_dir }}/controller-manager-key.pem" 28 | 29 | # Kubernetes authenticating proxy cert path 30 | front_ca: "{{ pki_dir }}/front-proxy-ca.pem" 31 | front_ca_key: "{{ pki_dir }}/front-proxy-ca-key.pem" 32 | front_client: "{{ pki_dir }}/front-proxy-client.pem" 33 | front_client_key: "{{ pki_dir }}/front-proxy-client-key.pem" 34 | 35 | # Kubernetes sa key path 36 | sa_public_key: "{{ pki_dir }}/sa.pub" 37 | sa_private_key: "{{ pki_dir }}/sa.key" 38 | -------------------------------------------------------------------------------- /roles/cluster-default/defaults/etcd-path.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | etcd_data_dir: /var/lib/etcd 4 | etcd_wal_dir: /var/lib/etcd/wal 5 | etcd_config_dir: /etc/etcd 6 | etcd_config_file: /etc/etcd/config.yml 7 | -------------------------------------------------------------------------------- /roles/cluster-default/defaults/k8s-path.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | etc_dir: /etc/kubernetes 4 | manifest_dir: /etc/kubernetes/manifests 5 | addon_dir: /etc/kubernetes/addons 6 | kubelet_config_dir: /var/lib/kubelet 7 | cni_etc_dir: /etc/cni/net.d 8 | cni_bin_dir: /opt/cni/bin 9 | audit_policy_dir: /etc/kubernetes/audit 10 | audit_log_dir: /var/log/kubernetes 11 | encryption_config_dir: /etc/kubernetes/encryption 12 | haproxy_config_dir: /etc/haproxy 13 | 14 | admin_kubeconfig: "{{ etc_dir }}/admin.conf" 15 | scheduler_kubeconfig: "{{ etc_dir }}/scheduler.conf" 16 | controller_manager_kubeconfig: "{{ etc_dir }}/controller-manager.conf" 17 | kubelet_kubeconfig: "{{ etc_dir }}/kubelet.conf" 18 | bootstrap_kubeconfig: "{{ etc_dir }}/kubelet-bootstrap.conf" 19 | kubelet_config: "{{ kubelet_config_dir }}/config.yml" 20 | audit_log: "{{ audit_log_dir }}/audit.log" 21 | audit_policy: "{{ audit_policy_dir }}/policy.yml" 22 | encryption_config: "{{ encryption_config_dir }}/config.yml" 23 | haproxy_config: "{{ haproxy_config_dir }}/haproxy.cfg" 24 | -------------------------------------------------------------------------------- /roles/cluster-default/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ################### 4 | # Kubernetes opts # 5 | ################### 6 | 7 | kube_version: 1.10.5 8 | 9 | # Container runtime, 10 | # Supported: docker, containerd. 11 | container_runtime: docker 12 | 13 | # Container network, 14 | # Supported: calico, flannel. 15 | cni_enable: true 16 | container_network: calico 17 | cni_iface: "" 18 | 19 | # Kubernetes cluster network 20 | pod_network_cidr: 10.244.0.0/16 21 | cluster_subnet: 10.96.0 22 | service_ip_range: "{{ cluster_subnet }}.0/12" 23 | service_node_port_range: 30000-32767 24 | api_service_ip: "{{ cluster_subnet }}.1" 25 | 26 | # Kubernetes HA extra variables. 27 | enable_keepalived: true 28 | enable_haproxy: true 29 | vip_interface: "" 30 | vip_address: 172.16.35.9 31 | lb_secure_port: 8443 32 | lb_api_url: "https://{{ vip_address }}:{{ lb_secure_port }}" 33 | 34 | # etcd extra variables. 35 | etcd_iface: "" 36 | etcd_domain_name: test.etcd.com 37 | 38 | # Kubernetes bootstrap token 39 | bootstrap_token_id: "6ac849" 40 | bootstrap_token_secret: "18fac0a6405e8e15" 41 | bootstrap_token: "{{ bootstrap_token_id }}.{{ bootstrap_token_secret }}" 42 | 43 | # Kubernetes secret encryption 44 | encryption_token: iTNwwjHuxNI9+8niwh8GJKT5NQiHFqcOTrrhzYfhAvk= 45 | 46 | # Kubernetes extra addons 47 | enable_ingress: false 48 | enable_dashboard: false 49 | enable_logging: false 50 | enable_monitoring: false 51 | enable_metric_server: false 52 | 53 | ingress_lb_address: "{{ vip_address }}" 54 | 55 | monitoring_grafana_user: "admin" 56 | monitoring_grafana_password: "p@ssw0rd" 57 | 58 | # kube-proxy variables 59 | kubeproxy_mode: iptables # support: iptables or ipvs. 60 | kubeproxy_ipvs_scheduler: rr 61 | 62 | ############### 63 | # Docker opts # 64 | ############### 65 | 66 | # A list of insecure registrys you might need to define 67 | add_registry: 68 | # - "gcr.io" 69 | 70 | insecure_registrys: 71 | # - "172.16.35.9:5000" 72 | 73 | docker_opts: 74 | # - "HTTP_PROXY=http://proxy.example.com:80/" 75 | # - "HTTPS_PROXY=https://proxy.example.com:443/" 76 | 77 | # Add http and https proxy 78 | proxy_env: 79 | http_proxy: "" # http://:@:: 80 | https_proxy: "" # https://:@: 81 | 82 | ############# 83 | # Msic opts # 84 | ############# 85 | repos_offline: false # download pkg from custom url 86 | repos_port: 4040 87 | repos_offline_url: "http://172.16.35.9:{{ repos_port }}" 88 | -------------------------------------------------------------------------------- /roles/cluster-default/defaults/system-path.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | bin_dir: /usr/local/bin 4 | sbin_dir: /usr/local/sbin 5 | cache_dir: /tmp 6 | ld_config_dir: /etc/ld.so.conf.d 7 | 8 | systemd_service_dir: /lib/systemd/system 9 | systemd_env_dir: /etc/default 10 | systemd_dropin_dir: /etc/systemd/system 11 | 12 | docker_data_dir: /var/lib/docker 13 | docker_config_dir: /etc/docker 14 | nvidia_runtime_config_dir: /etc/nvidia-container-runtime 15 | libnvidia_header_file: /usr/local/include/nvc.h 16 | libnvidia_lib_dir: /usr/local/lib/libnvidia 17 | containerd_data_dir: /var/lib/containerd 18 | containerd_config_dir: /etc/containerd 19 | -------------------------------------------------------------------------------- /roles/cluster-default/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Configure Kubernetes default vars 4 | debug: 5 | msg: "Check roles/k8s-default/defaults/main.yml" 6 | tags: 7 | - always 8 | 9 | - name: Include vars of system-path.yml 10 | include_vars: 11 | file: defaults/system-path.yml 12 | 13 | - name: Include vars of cert-path.yml 14 | include_vars: 15 | file: defaults/cert-path.yml 16 | 17 | - name: Include vars of k8s-path.yml 18 | include_vars: 19 | file: defaults/k8s-path.yml 20 | 21 | - name: Include vars of etcd-path.yml 22 | include_vars: 23 | file: defaults/etcd-path.yml 24 | -------------------------------------------------------------------------------- /roles/cluster-reset/tasks/delete-k8s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set node to maintainer mode 4 | command: | 5 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 6 | drain {{ hostvars['' + item].ansible_hostname }} --ignore-daemonsets --force --delete-local-data 7 | register: set_drain 8 | delegate_to: "{{ groups['masters'][0] }}" 9 | run_once: true 10 | until: set_drain.rc == 0 11 | retries: 10 12 | delay: 2 13 | ignore_errors: true 14 | with_items: "{{ groups['delete-nodes'] }}" 15 | 16 | - name: Delete node 17 | command: | 18 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 19 | delete node {{ hostvars['' + item].ansible_hostname }} 20 | register: delete_node 21 | delegate_to: "{{ groups['masters'][0] }}" 22 | run_once: true 23 | until: delete_node.rc == 0 24 | retries: 10 25 | delay: 2 26 | ignore_errors: true 27 | with_items: "{{ groups['delete-nodes'] }}" -------------------------------------------------------------------------------- /roles/cluster-reset/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Include vars of package/defaults 4 | include_vars: 5 | file: ../download/package/defaults/main.yml 6 | 7 | - name: Delete Kubernetes nodes 8 | when: delete_k8s|default(false) 9 | include_tasks: delete-k8s.yml 10 | 11 | - name: Reset Kubernetes cluster 12 | when: reset_k8s|default(false) 13 | include_tasks: reset-k8s.yml 14 | 15 | - name: Reset etcd cluster 16 | when: reset_etcd|default(false) 17 | include_tasks: reset-etcd.yml 18 | 19 | -------------------------------------------------------------------------------- /roles/cluster-reset/tasks/reset-etcd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Stop etcd systemd service 4 | systemd: name=etcd state=stopped enabled=no 5 | ignore_errors: True 6 | 7 | - name: Delete etcd directorys 8 | file: path="{{ item }}" state=absent 9 | with_items: 10 | - "{{ etcd_config_dir }}" 11 | - "{{ etcd_data_dir }}" 12 | - "{{ package.etcd.dir }}" 13 | ignore_errors: True 14 | -------------------------------------------------------------------------------- /roles/cluster-reset/tasks/reset-k8s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Stop kubelet systemd service 4 | systemd: name=kubelet.service state=stopped enabled=no 5 | ignore_errors: True 6 | register: stop_kubelet_service 7 | 8 | - name: Stop docker containers 9 | when: container_runtime == 'docker' or container_runtime == 'nvidia-docker' 10 | shell: "{{ bin_dir }}/docker stop $(docker ps -aq) " 11 | ignore_errors: True 12 | register: stop_docker_containers 13 | 14 | - name: Clean docker containers 15 | when: stop_docker_containers and (container_runtime == 'docker' or container_runtime == 'nvidia-docker') 16 | shell: "{{ bin_dir }}/docker rm $(docker ps -aq)" 17 | ignore_errors: True 18 | register: clean_docker_containers 19 | 20 | - name: Clean docker images 21 | when: clean_docker_containers and (container_runtime == 'docker' or container_runtime == 'nvidia-docker') 22 | shell: "{{ bin_dir }}/docker rmi $(docker images -aq)" 23 | ignore_errors: True 24 | register: clean_docker_images 25 | 26 | - name: Stop docker systemd service 27 | when: container_runtime == 'docker' or container_runtime == 'nvidia-docker' 28 | systemd: name={{ item }} state=stopped enabled=no 29 | with_items: 30 | - docker.service 31 | - docker.socket 32 | ignore_errors: True 33 | register: stop_docker_service 34 | 35 | - name: Stop containerd systemd service 36 | when: container_runtime == 'containerd' 37 | systemd: name=containerd.service state=stopped enabled=no 38 | ignore_errors: True 39 | register: stop_containerd_service 40 | 41 | - name: Find kubelet mounted volumes 42 | when: stop_kubelet_service 43 | shell: "df | grep -o '/var/lib/kubelet.*'" 44 | ignore_errors: True 45 | register: find_mounts 46 | 47 | - name: Unmounting kubelet volumes 48 | when: find_mounts 49 | mount: name={{ item }} state=unmounted 50 | with_items: "{{ find_mounts.stdout_lines | list }}" 51 | ignore_errors: True 52 | 53 | - name: Delete component directorys 54 | when: stop_kubelet_service 55 | file: path="{{ item }}" state=absent 56 | with_items: 57 | - "{{ etc_dir }}" 58 | - "{{ cni_etc_dir }}" 59 | - "{{ kubelet_config_dir }}" 60 | - "{{ audit_log_dir }}" 61 | - "{{ haproxy_config_dir }}" 62 | - "{{ ansible_env.HOME }}/.kube" 63 | - "{{ ansible_env.HOME }}/.helm" 64 | - "{{ docker_data_dir }}" 65 | - "{{ docker_config_dir }}" 66 | - "{{ containerd_data_dir }}" 67 | - "{{ nvidia_runtime_config_dir }}" 68 | - "{{ libnvidia_header_file }}" 69 | - "{{ libnvidia_lib_dir }}" 70 | - "{{ package.docker.dir }}" 71 | - "{{ package.nvidia_docker.dir }}" 72 | - "{{ package.libnvidia_container.dir }}" 73 | - "{{ package.nvidia_container_runtime.dir }}" 74 | - "{{ package.containerd.dir }}" 75 | - "{{ package.kubelet.dir }}" 76 | - "{{ package.helm.dir }}" 77 | - "{{ package.cfssl.dir }}" 78 | - "{{ package.cni.dir }}" 79 | ignore_errors: True 80 | 81 | - name: Delete IP-in-IP tunnel 82 | when: stop_kubelet_service 83 | command: "modprobe -r ipip" 84 | ignore_errors: True 85 | 86 | - name: Find network interfaces for Kubernetes 87 | when: stop_kubelet_service 88 | shell: "ip addr | grep {{ item }}" 89 | with_items: 90 | - "docker0" 91 | - "flannel.1" 92 | - "cni0" 93 | - "tunl0" 94 | register: find_eths 95 | ignore_errors: True 96 | 97 | - name: Delete network interfaces for Kubernetes 98 | when: stop_kubelet_service and item.stdout != '' 99 | shell: "ip link delete {{ item.item }}" 100 | with_items: "{{ find_eths['results'] }}" 101 | ignore_errors: True 102 | 103 | - name: Find blackhole route rule 104 | when: stop_kubelet_service 105 | shell: "ip route | awk '/blackhole/ {print $2}'" 106 | register: find_blackhole 107 | ignore_errors: True 108 | 109 | - name: Delete blackhole route rule 110 | when: stop_kubelet_service and find_blackhole.stdout != '' 111 | shell: "ip route del {{ find_blackhole.stdout }}" 112 | ignore_errors: True 113 | -------------------------------------------------------------------------------- /roles/common/copy-files/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check the files already exists 4 | stat: 5 | path: "{{ item }}" 6 | with_items: "{{ files }}" 7 | register: check_files 8 | 9 | - name: Read the config files 10 | slurp: src={{ item.item }} 11 | with_items: "{{ check_files['results'] }}" 12 | delegate_to: "{{ groups['masters'][0] }}" 13 | run_once: true 14 | register: read_files 15 | 16 | - name: Write the content of files 17 | when: item[0].item == item[1].source and not item[0].stat.exists 18 | copy: 19 | mode: 0644 20 | content: "{{ item[1].content | b64decode }}" 21 | dest: "{{ item[1].source }}" 22 | with_nested: 23 | - "{{ check_files['results'] }}" 24 | - "{{ read_files['results'] }}" 25 | register: write_files 26 | -------------------------------------------------------------------------------- /roles/common/os-check/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Get os_version from /etc/os-release 4 | when: ansible_os_family is not defined 5 | raw: "grep '^VERSION_ID=' /etc/os-release | sed s'/VERSION_ID=//'" 6 | register: os_version 7 | 8 | - name: Get distro name from /etc/os-release 9 | when: ansible_os_family is not defined 10 | raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'" 11 | register: distro 12 | 13 | - name: Set fact ansible_os_family var to Debian 14 | when: 15 | - ansible_os_family is not defined 16 | - "'Ubuntu' in distro.stdout" 17 | set_fact: 18 | ansible_os_family: Debian 19 | 20 | - name: Set fact ansible_os_family var to RedHat 21 | when: 22 | - ansible_os_family is not defined 23 | - "'CentOS' in distro.stdout" 24 | set_fact: 25 | ansible_os_family: RedHat 26 | -------------------------------------------------------------------------------- /roles/container-runtime/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/container-runtime/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: common/os-check } 5 | - { role: download/package, when: container_runtime == 'docker' or container_runtime == 'nvidia-docker', pkg: "{{ package.docker }}" } 6 | - { role: download/package, when: container_runtime == 'nvidia-docker', pkg: "{{ package.nvidia_docker }}" } 7 | - { role: download/package, when: container_runtime == 'nvidia-docker', pkg: "{{ package.nvidia_container_runtime }}" } 8 | - { role: download/package, when: container_runtime == 'nvidia-docker', pkg: "{{ package.libnvidia_container }}" } 9 | - { role: download/package, when: container_runtime == 'containerd', pkg: "{{ package.containerd }}" } 10 | -------------------------------------------------------------------------------- /roles/container-runtime/tasks/containerd/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure cni config directory exists 4 | file: path="{{ cni_etc_dir }}" state=directory 5 | 6 | - name: Ensure containerd config directory exists 7 | file: path="{{ containerd_config_dir }}" state=directory 8 | 9 | - name: Copy containerd service files 10 | template: 11 | src: "containerd/containerd.service.j2" 12 | dest: "{{ systemd_service_dir }}/containerd.service" 13 | owner: root 14 | group: root 15 | mode: 0644 16 | register: change_system_service 17 | 18 | - name: Enable and start containerd service 19 | when: change_system_service is succeeded 20 | systemd: 21 | name: containerd.service 22 | daemon_reload: yes 23 | state: started 24 | enabled: yes 25 | register: containerd_started 26 | -------------------------------------------------------------------------------- /roles/container-runtime/tasks/docker/config-opts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy Docker environment config file 4 | template: src=docker/docker.env.j2 dest={{ systemd_env_dir }}/docker 5 | 6 | - name: Add any insecure registrys to Docker config 7 | when: insecure_registrys is defined and insecure_registrys > 0 8 | lineinfile: 9 | dest: "{{ systemd_env_dir }}/docker" 10 | regexp: "^INSECURE_REGISTRY=" 11 | line: 'INSECURE_REGISTRY="{% for reg in insecure_registrys %}--insecure-registry={{ reg }} {% endfor %}"' 12 | 13 | - name: Add registry to Docker config 14 | when: add_registry is defined and add_registry > 0 15 | lineinfile: 16 | dest: "{{ systemd_env_dir }}/docker" 17 | regexp: "^ADD_REGISTRY= " 18 | line: 'ADD_REGISTRY="{% for reg in add_registry %}--add-registry={{ reg }} {%endfor %}"' 19 | 20 | - name: Add extra options to Docker config 21 | when: docker_opts is defined and docker_opts > 0 22 | lineinfile: 23 | dest: "{{ systemd_env_dir }}/docker" 24 | regexp: "^DOCKER_OPTS= " 25 | line: 'DOCKER_OPTS="{% for opt in docker_opts %}{{ opt }} {%endfor %}"' 26 | -------------------------------------------------------------------------------- /roles/container-runtime/tasks/docker/config-systemd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create Docker system user group 4 | group: 5 | name: docker 6 | state: present 7 | 8 | - name: Create Docker system user 9 | user: 10 | name: docker 11 | comment: "Docker user" 12 | shell: /sbin/nologin 13 | state: present 14 | system: yes 15 | groups: docker 16 | 17 | - name: Copy Docker engine service files from host 18 | template: 19 | src: "docker/{{ item }}.j2" 20 | dest: "{{ systemd_service_dir }}/{{ item }}" 21 | owner: root 22 | group: root 23 | mode: 0644 24 | with_items: 25 | - docker.service 26 | - docker.socket 27 | register: change_system_service 28 | -------------------------------------------------------------------------------- /roles/container-runtime/tasks/docker/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Add Docker systemd service 4 | - include_tasks: config-systemd.yml 5 | 6 | # Add Docker options 7 | - include_tasks: config-opts.yml 8 | 9 | - name: Enable and start Docker socket 10 | when: change_system_service is succeeded 11 | systemd: 12 | name: docker.socket 13 | daemon_reload: yes 14 | state: started 15 | enabled: yes 16 | register: docker_socket_started 17 | 18 | - name: Enable and restart Docker engine 19 | systemd: 20 | name: docker 21 | daemon_reload: yes 22 | state: restarted 23 | enabled: yes 24 | register: started_docker 25 | -------------------------------------------------------------------------------- /roles/container-runtime/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - when: container_runtime == 'nvidia-docker' 4 | raw: "nvidia-smi" 5 | ignore_errors: True 6 | register: check_driver 7 | 8 | # nvidia-docker runtime 9 | - when: 10 | - container_runtime == 'nvidia-docker' 11 | - "'not found' not in check_driver.stdout" 12 | include_tasks: nvidia-docker/main.yml 13 | 14 | # docker runtime 15 | - when: container_runtime == 'docker' or container_runtime == 'nvidia-docker' 16 | include_tasks: docker/main.yml 17 | 18 | # containerd runtime 19 | - when: container_runtime == 'containerd' 20 | include_tasks: containerd/main.yml 21 | 22 | - name: Check sysctl k8s file 23 | stat: path=/etc/sysctl.d/99-k8s.conf 24 | register: check_sysctl_conf 25 | 26 | - name: Write bridge-netfilter and ip-forward system variables 27 | when: not check_sysctl_conf.stat.exists 28 | copy: 29 | content: | 30 | net.ipv4.ip_forward=1 31 | net.bridge.bridge-nf-call-ip6tables=1 32 | net.bridge.bridge-nf-call-iptables=1 33 | dest: /etc/sysctl.d/99-k8s.conf 34 | register: copy_sysctl 35 | 36 | - name: Enable bridge-netfilter and ip-forward system variables 37 | when: copy_sysctl 38 | command: "sysctl -p /etc/sysctl.d/99-k8s.conf" 39 | -------------------------------------------------------------------------------- /roles/container-runtime/tasks/nvidia-docker/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure docker config directory exists 4 | file: path="{{ docker_config_dir }}" state=directory 5 | 6 | - name: Ensure nvidia runtime config directory exists 7 | file: path="{{ nvidia_runtime_config_dir }}" state=directory 8 | 9 | - name: Copy nvidia docker and runtime config template 10 | template: 11 | src: "nvidia-docker/{{ item.file }}.j2" 12 | dest: "{{ item.dir }}/{{ item.file }}" 13 | owner: root 14 | group: root 15 | mode: 0644 16 | with_items: 17 | - { file: "daemon.json", dir: "{{ docker_config_dir }}" } 18 | - { file: "config.toml", dir: "{{ nvidia_runtime_config_dir }}" } 19 | - { file: "libnvidia.conf", dir: "{{ ld_config_dir }}" } 20 | register: copy_nvidia_config_files 21 | 22 | - name: Ensure libnvidia lib directory exists 23 | file: path="{{ libnvidia_lib_dir }}" state=directory 24 | 25 | - name: Copy libnvidia header file to system 26 | copy: 27 | src: "{{ package.libnvidia_container.dir }}/usr/local/include/nvc.h" 28 | dest: "{{ libnvidia_header_file }}" 29 | remote_src: yes 30 | 31 | - name: Copy libnvidia lib dir to system 32 | shell: | 33 | cp -rp {{ package.libnvidia_container.dir }}/usr/local/lib/* {{ libnvidia_lib_dir }} 34 | register: copy_libnvidia_lib 35 | 36 | - name: Configure dynamic linker for libnvidia 37 | when: copy_libnvidia_lib 38 | command: "ldconfig -v" -------------------------------------------------------------------------------- /roles/container-runtime/templates/containerd/containerd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=containerd container runtime 3 | Documentation=https://containerd.io 4 | After=network.target 5 | 6 | [Service] 7 | ExecStartPre=/sbin/modprobe overlay 8 | ExecStartPre=/sbin/modprobe br_netfilter 9 | ExecStart={{ bin_dir }}/containerd 10 | Restart=on-failure 11 | StartLimitBurst=3 12 | StartLimitInterval=60s 13 | Delegate=yes 14 | KillMode=process 15 | OOMScoreAdjust=-999 16 | LimitNOFILE=1048576 17 | LimitNPROC=infinity 18 | LimitCORE=infinity 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /roles/container-runtime/templates/docker/docker.env.j2: -------------------------------------------------------------------------------- 1 | INSECURE_REGISTRY="" 2 | DOCKER_OPTS="" 3 | {% if ansible_os_family == "RedHat" -%} 4 | DOCKER_STORAGE_OPTIONS="--storage-driver=overlay" 5 | {% endif -%} 6 | -------------------------------------------------------------------------------- /roles/container-runtime/templates/docker/docker.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Engine 3 | After=network.target docker.socket 4 | 5 | [Service] 6 | Type=notify 7 | EnvironmentFile=-{{ systemd_env_dir }}/docker 8 | ExecStart={{ bin_dir }}/dockerd {% if ansible_os_family == 'Debian' -%} -H fd:// {% endif -%} \ 9 | $OPTIONS \ 10 | $DOCKER_STORAGE_OPTIONS \ 11 | $DOCKER_OPTS \ 12 | $DOCKER_NETWORK_OPTIONS \ 13 | $ADD_REGISTRY \ 14 | $BLOCK_REGISTRY \ 15 | $INSECURE_REGISTRY 16 | 17 | ExecReload=/bin/kill -s HUP $MAINPID 18 | Restart=on-failure 19 | StartLimitBurst=3 20 | StartLimitInterval=60s 21 | 22 | LimitNOFILE=1048576 23 | LimitNPROC=infinity 24 | LimitCORE=infinity 25 | TimeoutStartSec=0 26 | Delegate=yes 27 | KillMode=process 28 | 29 | [Install] 30 | WantedBy=multi-user.target 31 | -------------------------------------------------------------------------------- /roles/container-runtime/templates/docker/docker.socket.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Socket for the API 3 | PartOf=docker.service 4 | 5 | [Socket] 6 | ListenStream=/var/run/docker.sock 7 | SocketMode=0660 8 | SocketUser=root 9 | SocketGroup=docker 10 | 11 | [Install] 12 | WantedBy=sockets.target 13 | -------------------------------------------------------------------------------- /roles/container-runtime/templates/nvidia-docker/config.toml.j2: -------------------------------------------------------------------------------- 1 | disable-require = false 2 | #swarm-resource = "DOCKER_RESOURCE_GPU" 3 | 4 | [nvidia-container-cli] 5 | #root = "/run/nvidia/driver" 6 | #path = "/usr/bin/nvidia-container-cli" 7 | environment = [] 8 | #debug = "/var/log/nvidia-container-runtime-hook.log" 9 | #ldcache = "/etc/ld.so.cache" 10 | load-kmods = true 11 | #no-cgroups = false 12 | #user = "root:video" 13 | ldconfig = "@/sbin/ldconfig.real" 14 | -------------------------------------------------------------------------------- /roles/container-runtime/templates/nvidia-docker/daemon.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "default-runtime": "nvidia", 3 | "runtimes": { 4 | "nvidia": { 5 | "path": "{{ bin_dir }}/nvidia-container-runtime", 6 | "runtimeArgs": [] 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /roles/container-runtime/templates/nvidia-docker/libnvidia.conf.j2: -------------------------------------------------------------------------------- 1 | {{ libnvidia_lib_dir }} 2 | -------------------------------------------------------------------------------- /roles/download/image/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Override registry repos 4 | when: 'img is defined' 5 | set_fact: 6 | repos: "{{ img.repos }}" 7 | 8 | - name: Override local registry repos 9 | when: 'img is defined and registry' 10 | set_fact: 11 | repos: "{{ registry_url }}/" 12 | 13 | - name: "Check {{ img.name }} image already exists" 14 | when: img is defined 15 | shell: | 16 | {{ bin_dir }}/docker images | awk '{print $1":"$2}' | grep -o '{{ repos }}{{ img.name }}:{{ img.tag }}' 17 | run_once: true 18 | ignore_errors: True 19 | register: check_image 20 | 21 | - name: "Pull {{ img.name }} image" 22 | when: img is defined and check_image.stdout == '' and pre_pull|default(true) 23 | command: | 24 | {{ bin_dir }}/docker pull {{ repos }}{{ img.name }}:{{ img.tag }} 25 | register: pull_result 26 | until: pull_result is succeeded 27 | retries: 4 28 | 29 | - name: Create archive tmp directory 30 | when: img is defined and archive|default(false) 31 | file: path="{{ dir.images }}" state=directory 32 | 33 | - name: "Archive {{ img.name }} image" 34 | when: 35 | - archive|default(false) 36 | - pull_result is succeeded or check_image.stdout|int > 0 37 | command: | 38 | {{ bin_dir }}/docker save {{ img.repos }}{{ img.name }}:{{ img.tag }} \ 39 | > {{ cache_dir }}/{{ img.name }}-{{ img.tag }}-image.tar 40 | register: archive_image 41 | 42 | - name: "Tag {{ img.name }} image" 43 | when: 44 | - push|default(false) 45 | - pull_result is succeeded or check_image.stdout|int > 0 46 | command: | 47 | {{ bin_dir }}/docker tag {{ repos }}{{ img.name }}:{{ img.tag }} \ 48 | {{ item }}/{{ img.name }}:{{ img.tag }} 49 | with_items: "{{ insecure_registrys }}" 50 | register: tag_image 51 | 52 | - name: "Push {{ img.name }} image to registry" 53 | when: 54 | - tag_image 55 | - push|default(false) 56 | - pull_result is succeeded or check_image.stdout|int > 0 57 | command: | 58 | {{ bin_dir }}/docker push {{ item }}/{{ img.name }}:{{ img.tag }} 59 | with_items: "{{ insecure_registrys }}" 60 | register: push_image 61 | 62 | - name: "Remove {{ img.name }} image tag" 63 | when: 64 | - push_image 65 | - push|default(false) 66 | - pull_result is succeeded or check_image.stdout|int > 0 67 | command: | 68 | {{ bin_dir }}/docker rmi {{ item }}/{{ img.name }}:{{ img.tag }} 69 | with_items: "{{ insecure_registrys }}" 70 | register: remove_image 71 | -------------------------------------------------------------------------------- /roles/download/package/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | base: 4 | kubelet: 5 | url: "https://storage.googleapis.com" 6 | version: "{{ kube_version }}" 7 | kubectl: 8 | url: "https://storage.googleapis.com" 9 | version: "{{ kube_version }}" 10 | etcd: 11 | url: "https://github.com" 12 | version: 3.2.9 13 | docker: 14 | url: "https://download.docker.com" 15 | version: 18.03.0-ce 16 | nvidia_docker: 17 | url: "https://github.com" 18 | version: 2.0.3 19 | libnvidia_container: 20 | url: "https://github.com" 21 | version: 1.0.0-rc.2 22 | nvidia_container_runtime: 23 | url: "https://github.com" 24 | version: 1.4.0.1 25 | containerd: 26 | url: "https://storage.googleapis.com" 27 | version: 1.1.2 28 | cni: 29 | url: "https://github.com" 30 | version: 0.6.0 31 | helm: 32 | url: "https://kubernetes-helm.storage.googleapis.com" 33 | version: 2.9.1 34 | cfssl: 35 | url: "https://pkg.cfssl.org" 36 | version: 1.2 37 | cfssljson: 38 | url: "https://pkg.cfssl.org" 39 | version: 1.2 40 | 41 | package: 42 | kubectl: 43 | name: kubectl 44 | dir: /opt/kubernetes 45 | url: "{{ base.kubectl.url }}/kubernetes-release/release/v{{ base.kubectl.version }}/bin/linux/amd64" 46 | symlinks: 47 | - kubectl 48 | file: kubectl 49 | kubelet: 50 | name: kubelet 51 | dir: /opt/kubernetes 52 | url: "{{ base.kubelet.url }}/kubernetes-release/release/v{{ base.kubelet.version }}/bin/linux/amd64" 53 | symlinks: 54 | - kubelet 55 | file: kubelet 56 | etcd: 57 | name: etcd 58 | dir: /opt/etcd 59 | url: "{{ base.etcd.url }}/coreos/etcd/releases/download/v{{ base.etcd.version }}" 60 | unarchive: 61 | extra_opt: "['--strip-components=1']" 62 | symlinks: 63 | - etcd 64 | - etcdctl 65 | file: "etcd-v{{ base.etcd.version }}-linux-amd64.tar.gz" 66 | docker: 67 | name: docker 68 | dir: /opt/docker 69 | url: "{{ base.docker.url }}/linux/static/stable/x86_64" 70 | unarchive: 71 | extra_opt: "['--strip-components=1']" 72 | symlinks: 73 | - docker 74 | - dockerd 75 | - docker-containerd 76 | - docker-containerd-ctr 77 | - docker-containerd-shim 78 | - docker-init 79 | - docker-proxy 80 | - docker-runc 81 | file: "docker-{{ base.docker.version }}.tgz" 82 | nvidia_docker: 83 | name: nvidia-docker 84 | dir: /opt/nvidia-docker 85 | url: "{{ base.nvidia_docker.url }}/kairen/k8s-manual-files/releases/download/nvidia" 86 | unarchive: 87 | extra_opt: "['--strip-components=1']" 88 | symlinks: 89 | - usr/bin/nvidia-docker 90 | file: "nvidia-docker-v{{ base.nvidia_docker.version }}_amd64.tar.gz" 91 | libnvidia_container: 92 | name: libnvidia-container 93 | dir: /opt/libnvidia-container 94 | url: "{{ base.libnvidia_container.url }}/kairen/k8s-manual-files/releases/download/nvidia" 95 | unarchive: 96 | extra_opt: "['--strip-components=1']" 97 | symlinks: 98 | - usr/local/bin/nvidia-container-cli 99 | file: "libnvidia-container-{{ base.libnvidia_container.version }}_amd64.tar.xz" 100 | nvidia_container_runtime: 101 | name: nvidia-container-runtime 102 | dir: /opt/nvidia-container-runtime 103 | url: "{{ base.nvidia_container_runtime.url }}/kairen/k8s-manual-files/releases/download/nvidia" 104 | unarchive: 105 | extra_opt: "['--strip-components=1']" 106 | symlinks: 107 | - usr/bin/nvidia-container-runtime 108 | - usr/bin/nvidia-container-runtime-hook 109 | file: "nvidia-container-runtime-v{{ base.nvidia_container_runtime.version }}_amd64.tar.gz" 110 | containerd: 111 | name: containerd 112 | dir: /opt/containerd 113 | url: "{{ base.containerd.url }}/cri-containerd-release" 114 | unarchive: 115 | extra_opt: "['--strip-components=0']" 116 | symlinks: 117 | - usr/local/bin/containerd 118 | - usr/local/bin/containerd-release 119 | - usr/local/bin/containerd-shim 120 | - usr/local/bin/containerd-stress 121 | - usr/local/bin/crictl 122 | - usr/local/bin/ctr 123 | - usr/local/sbin/runc 124 | file: "cri-containerd-cni-{{ base.containerd.version }}.linux-amd64.tar.gz" 125 | cni: 126 | name: cni 127 | dir: /opt/cni/bin 128 | url: "{{ base.cni.url }}/containernetworking/plugins/releases/download/v{{ base.cni.version }}" 129 | unarchive: 130 | extra_opt: "['--strip-components=0']" 131 | file: "cni-plugins-amd64-v{{ base.cni.version }}.tgz" 132 | helm: 133 | name: helm 134 | dir: /opt/helm 135 | url: "{{ base.helm.url }}" 136 | unarchive: 137 | extra_opt: "['--strip-components=1']" 138 | symlinks: 139 | - helm 140 | file: "helm-v{{ base.helm.version }}-linux-amd64.tar.gz" 141 | cfssl: 142 | name: cfssl 143 | dir: /opt/cfssl 144 | url: "{{ base.cfssl.url }}/R{{ base.cfssl.version }}" 145 | symlinks: 146 | - cfssl 147 | file: cfssl_linux-amd64 148 | cfssljson: 149 | name: cfssljson 150 | dir: /opt/cfssl 151 | url: "{{ base.cfssljson.url }}/R{{ base.cfssljson.version }}" 152 | symlinks: 153 | - cfssljson 154 | file: cfssljson_linux-amd64 155 | -------------------------------------------------------------------------------- /roles/download/package/tasks/archive.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check {{ pkg.name }} archive already exists" 4 | stat: path="{{ cache_dir }}/{{ pkg.file }}" 5 | register: pkg_check 6 | 7 | - name: "Downloading {{ pkg.name }} file" 8 | when: not pkg_check.stat.exists 9 | get_url: 10 | url: "{{ pkg.url }}/{{ pkg.file }}" 11 | dest: "{{ cache_dir }}/{{ pkg.file }}" 12 | validate_certs: False 13 | environment: "{{ proxy_env }}" 14 | register: pkg_download 15 | 16 | - name: "Extract {{ pkg.name }} file" 17 | unarchive: 18 | src: "{{ cache_dir }}/{{ pkg.file }}" 19 | dest: "{{ pkg.dir }}" 20 | copy: no 21 | extra_opts: "{{ pkg.unarchive.extra_opt }}" 22 | register: pkg_copy 23 | -------------------------------------------------------------------------------- /roles/download/package/tasks/binary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check {{ pkg.name }} binary already exists" 4 | stat: path="{{ cache_dir }}/{{ pkg.file }}-{{ base[''+ pkg.name].version }}" 5 | register: pkg_check 6 | 7 | - name: "Downloading {{ pkg.name }} file" 8 | when: not pkg_check.stat.exists 9 | get_url: 10 | url: "{{ pkg.url }}/{{ pkg.file }}" 11 | dest: "{{ cache_dir }}/{{ pkg.file }}-{{ base[''+ pkg.name].version }}" 12 | validate_certs: False 13 | environment: "{{ proxy_env }}" 14 | register: pkg_download 15 | 16 | - name: "Copy {{ pkg.name }} file to release directory" 17 | copy: 18 | src: "{{ cache_dir }}/{{ pkg.file }}-{{ base[''+ pkg.name].version }}" 19 | dest: "{{ pkg.dir }}/{{ item }}" 20 | owner: root 21 | group: root 22 | mode: 0755 23 | remote_src: True 24 | with_items: "{{ pkg['symlinks'] }}" 25 | register: pkg_copy 26 | -------------------------------------------------------------------------------- /roles/download/package/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Override local repository url 4 | when: 'pkg is defined and repos_offline' 5 | set_fact: 6 | base: > 7 | {{ base | 8 | combine({ pkg.name: { 9 | 'url': repos_offline_url, 10 | 'version': base[pkg.name]['version'] 11 | }}) 12 | }} 13 | 14 | - name: Create download binaries tmp directory 15 | when: pkg is defined 16 | file: path="{{ cache_dir }}" state=directory 17 | 18 | - name: "Create {{ pkg.name }} release directory" 19 | when: pkg is defined 20 | file: path="{{ pkg.dir }}" state=directory 21 | 22 | - name: Include download archive tasks 23 | when: 'pkg is defined and "unarchive" in pkg' 24 | include_tasks: archive.yml 25 | 26 | - name: Include download binary tasks 27 | when: 'pkg is defined and not "unarchive" in pkg' 28 | include_tasks: binary.yml 29 | 30 | - name: "Symlinks {{ pkg.name }} to {{ bin_dir }}" 31 | when: 'pkg is defined and "symlinks" in pkg and pkg_copy' 32 | file: 33 | src: "{{ pkg.dir }}/{{ item }}" 34 | dest: "{{ bin_dir }}/{{ item.split('/')[-1] }}" 35 | state: link 36 | force: yes 37 | with_items: "{{ pkg['symlinks'] }}" 38 | -------------------------------------------------------------------------------- /roles/etcd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | etcd_listen_addr: "{% if etcd_iface != '' %}{{ hostvars[inventory_hostname]['ansible_' + etcd_iface].ipv4.address }} 4 | {%- else %}{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}{% endif %}" 5 | etcd_listen_client_urls: "https://127.0.0.1:2379,https://{{ etcd_listen_addr }}:2379" 6 | etcd_listen_peer_urls: "https://{{ etcd_listen_addr }}:2380" 7 | etcd_advertise_client_urls: "https://{{ etcd_listen_addr }}:2379" 8 | etcd_initial_advertise_peer_urls: "https://{{ etcd_listen_addr }}:2380" 9 | 10 | etcd_initial_cluster_state: new 11 | etcd_initial_token: k8s-etcd-cluster 12 | -------------------------------------------------------------------------------- /roles/etcd/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: download/package, pkg: "{{ package.etcd }}" } 5 | -------------------------------------------------------------------------------- /roles/etcd/tasks/config-etcd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure etcd config directory exists 4 | file: 5 | path: "{{ etcd_config_dir }}" 6 | recurse: yes 7 | state: directory 8 | 9 | - name: Ensure etcd data directory exists 10 | file: 11 | path: "{{ etcd_data_dir }}" 12 | recurse: yes 13 | state: directory 14 | 15 | - name: Copy etcd config template file 16 | template: src=etcd-config.yml.j2 dest={{ etcd_config_file }} 17 | -------------------------------------------------------------------------------- /roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure etcd PKI directory already exists 4 | file: path={{ etcd_pki_dir }} state=directory 5 | register: create_etcd_pki_dir 6 | 7 | - name: Copy etcd certificates and keys 8 | vars: 9 | files: 10 | - "{{ etcd_ca }}" 11 | - "{{ etcd_ca_key }}" 12 | - "{{ etcd_cert }}" 13 | - "{{ etcd_cert_key }}" 14 | import_role: 15 | name: common/copy-files 16 | 17 | - include_tasks: config-etcd.yml 18 | 19 | - include_tasks: systemd-etcd.yml 20 | -------------------------------------------------------------------------------- /roles/etcd/tasks/systemd-etcd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy etcd systemd service file 4 | template: 5 | src: etcd.service.j2 6 | dest: "{{ systemd_service_dir }}/etcd.service" 7 | owner: root 8 | group: root 9 | mode: 0644 10 | register: change_etcd 11 | 12 | - name: Enable and restart etcd 13 | when: change_etcd 14 | systemd: 15 | name: etcd 16 | daemon_reload: yes 17 | state: restarted 18 | enabled: yes 19 | register: started_etcd 20 | -------------------------------------------------------------------------------- /roles/etcd/templates/etcd-config.yml.j2: -------------------------------------------------------------------------------- 1 | {% macro initial_cluster() -%} 2 | {% for host in groups['etcds'] -%} 3 | {{ hostvars[host]['ansible_hostname'] }}=https:// 4 | {%- if etcd_iface != "" -%} 5 | {{ hostvars[host]['ansible_' + etcd_iface].ipv4.address }} 6 | {%- else -%} 7 | {{ hostvars[host].ansible_default_ipv4.address }} 8 | {%- endif -%} 9 | :2380 10 | {%- if not loop.last -%},{%- endif -%} 11 | {%- endfor -%} 12 | {% endmacro -%} 13 | 14 | name: {{ ansible_hostname }} 15 | data-dir: {{ etcd_data_dir }} 16 | wal-dir: {{ etcd_wal_dir }} 17 | listen-peer-urls: '{{ etcd_listen_peer_urls }}' 18 | listen-client-urls: '{{ etcd_listen_client_urls }}' 19 | advertise-client-urls: '{{ etcd_advertise_client_urls }}' 20 | initial-advertise-peer-urls: '{{ etcd_initial_advertise_peer_urls }}' 21 | initial-cluster: '{{ initial_cluster() }}' 22 | max-snapshots: 3 23 | snapshot-count: 5000 24 | max-wals: 5 25 | heartbeat-interval: 100 26 | election-timeout: 1000 27 | quota-backend-bytes: 0 28 | discovery-fallback: 'proxy' 29 | initial-cluster-token: '{{ etcd_initial_token }}' 30 | initial-cluster-state: '{{ etcd_initial_cluster_state }}' 31 | strict-reconfig-check: false 32 | enable-v2: true 33 | enable-pprof: true 34 | proxy: 'off' 35 | proxy-failure-wait: 5000 36 | proxy-refresh-interval: 30000 37 | proxy-dial-timeout: 1000 38 | proxy-write-timeout: 5000 39 | proxy-read-timeout: 0 40 | client-transport-security: 41 | ca-file: {{ etcd_ca }} 42 | trusted-ca-file: {{ etcd_ca }} 43 | cert-file: {{ etcd_cert }} 44 | key-file: {{ etcd_cert_key }} 45 | client-cert-auth: true 46 | auto-tls: true 47 | peer-transport-security: 48 | ca-file: {{ etcd_ca }} 49 | trusted-ca-file: {{ etcd_ca }} 50 | cert-file: {{ etcd_cert }} 51 | key-file: {{ etcd_cert_key }} 52 | peer-client-cert-auth: true 53 | auto-tls: true 54 | debug: false 55 | log-package-levels: 56 | log-output: default 57 | force-new-cluster: false 58 | -------------------------------------------------------------------------------- /roles/etcd/templates/etcd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Etcd Service 3 | After=network.target 4 | 5 | [Service] 6 | Type=notify 7 | ExecStart={{ bin_dir }}/etcd --config-file={{ etcd_config_file }} 8 | Restart=on-failure 9 | RestartSec=10 10 | LimitNOFILE=65536 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | Alias=etcd3.service 15 | -------------------------------------------------------------------------------- /roles/k8s-addon/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | addons: 4 | kubeproxy: 5 | name: kube-proxy 6 | dirs: 7 | - kubeproxy 8 | templates: 9 | - kubeproxy/kubeproxy-sa.yml 10 | - kubeproxy/kubeproxy-rbac.yml 11 | - kubeproxy/kubeproxy-cm.yml 12 | - kubeproxy/kubeproxy-ds.yml 13 | kubedns: 14 | name: kube-dns 15 | dirs: 16 | - kubedns 17 | templates: 18 | - kubedns/kubedns-sa.yml 19 | - kubedns/kubedns-rbac.yml 20 | - kubedns/kubedns-cm.yml 21 | - kubedns/kubedns-svc.yml 22 | - kubedns/kubedns-dp.yml 23 | dashboard: 24 | name: kubernetes-dashboard 25 | dirs: 26 | - dashboard 27 | templates: 28 | - dashboard/dashboard-sa.yml 29 | - dashboard/dashboard-rbac.yml 30 | - dashboard/dashboard-anonymous-rbac.yml 31 | - dashboard/dashboard-secret.yml 32 | - dashboard/dashboard-svc.yml 33 | - dashboard/dashboard-dp.yml 34 | metric_server: 35 | name: metric-server 36 | dirs: 37 | - metric-server 38 | templates: 39 | - metric-server/metric-server-sa.yml 40 | - metric-server/metrics-server-rbac.yml 41 | - metric-server/metrics-apiservice.yml 42 | - metric-server/metrics-server-svc.yml 43 | - metric-server/metrics-server-dp.yml 44 | ingress_nginx: 45 | name: ingress-nginx 46 | dirs: 47 | - ingress-nginx 48 | dependencies: 49 | - name: ingress-nginx 50 | kind: ns 51 | files: 52 | - ingress-nginx/ingress-controller-ns.yml 53 | templates: 54 | - ingress-nginx/ingress-controller-sa.yml 55 | - ingress-nginx/ingress-controller-cm.yml 56 | - ingress-nginx/ingress-controller-rbac.yml 57 | - ingress-nginx/ingress-controller-svc.yml 58 | - ingress-nginx/ingress-controller-dp.yml 59 | logging: 60 | name: kube-logging 61 | dirs: 62 | - logging/es 63 | - logging/fluentd 64 | - logging/kibana 65 | templates: 66 | - logging/es/elasticsearch-sa.yml 67 | - logging/es/elasticsearch-rbac.yml 68 | - logging/es/elasticsearch-svc.yml 69 | - logging/es/elasticsearch-sts.yml 70 | - logging/fluentd/fluentd-sa.yml 71 | - logging/fluentd/fluentd-rbac.yml 72 | - logging/fluentd/fluentd-es-cm.yml 73 | - logging/fluentd/fluentd-es-ds.yml 74 | - logging/kibana/kibana-svc.yml 75 | - logging/kibana/kibana-dp.yml 76 | monitoring: 77 | name: prometheus-monitoring 78 | dirs: 79 | - monitoring/operator 80 | - monitoring/alertmanater 81 | - monitoring/node-exporter 82 | - monitoring/kube-state-metrics 83 | - monitoring/grafana 84 | - monitoring/service-discovery 85 | - monitoring/prometheus 86 | - monitoring/servicemonitor 87 | dependencies: 88 | - name: monitoring 89 | kind: ns 90 | files: 91 | - monitoring/monitoring-ns.yml 92 | - name: prometheus-operator 93 | kind: pod 94 | namespace: monitoring 95 | files: 96 | - monitoring/operator/operator-sa.yml 97 | - monitoring/operator/operator-rbac.yml 98 | - monitoring/operator/operator-svc.yml 99 | - monitoring/operator/operator-dp.yml 100 | files: 101 | - monitoring/grafana/grafana-definitions.yml 102 | - monitoring/prometheus/prometheus-rules.yml 103 | templates: 104 | - monitoring/alertmanater/alertmanager-main-sa.yml 105 | - monitoring/alertmanater/alertmanager-main-secret.yml 106 | - monitoring/alertmanater/alertmanager-main-svc.yml 107 | - monitoring/alertmanater/alertmanager-main.yml 108 | - monitoring/node-exporter/node-exporter-sa.yml 109 | - monitoring/node-exporter/node-exporter-rbac.yml 110 | - monitoring/node-exporter/node-exporter-svc.yml 111 | - monitoring/node-exporter/node-exporter-ds.yml 112 | - monitoring/kube-state-metrics/kube-state-metrics-sa.yml 113 | - monitoring/kube-state-metrics/kube-state-metrics-rbac.yml 114 | - monitoring/kube-state-metrics/kube-state-metrics-svc.yml 115 | - monitoring/kube-state-metrics/kube-state-metrics-dp.yml 116 | - monitoring/grafana/grafana-sa.yml 117 | - monitoring/grafana/grafana-source.yml 118 | - monitoring/grafana/grafana-datasources.yml 119 | - monitoring/grafana/grafana-admin-secret.yml 120 | - monitoring/grafana/grafana-svc.yml 121 | - monitoring/grafana/grafana-dp.yml 122 | - monitoring/service-discovery/kube-controller-manager-svc.yml 123 | - monitoring/service-discovery/kube-scheduler-svc.yml 124 | - monitoring/prometheus/prometheus-sa.yml 125 | - monitoring/prometheus/prometheus-rbac.yml 126 | - monitoring/prometheus/prometheus-svc.yml 127 | - monitoring/prometheus/prometheus-main.yml 128 | - monitoring/servicemonitor/alertmanager-sm.yml 129 | - monitoring/servicemonitor/coredns-sm.yml 130 | - monitoring/servicemonitor/kube-apiserver-sm.yml 131 | - monitoring/servicemonitor/kube-controller-manager-sm.yml 132 | - monitoring/servicemonitor/kube-scheduler-sm.yml 133 | - monitoring/servicemonitor/kubelet-sm.yml 134 | - monitoring/servicemonitor/kubestate-metrics-sm.yml 135 | - monitoring/servicemonitor/node-exporter-sm.yml 136 | - monitoring/servicemonitor/prometheus-operator-sm.yml 137 | - monitoring/servicemonitor/prometheus-sm.yml 138 | -------------------------------------------------------------------------------- /roles/k8s-addon/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure {{ addon.name }} addons directory exists" 4 | when: addon is defined 5 | file: path="{{ addon_dir }}/{{ item }}" state=directory 6 | with_items: "{{ addon.dirs }}" 7 | 8 | - name: "Copy {{ addon.name }} addon template dependencies" 9 | when: addon is defined and addon.dependencies is defined 10 | template: 11 | src: "{{ item.1 }}.j2" 12 | dest: "{{ addon_dir }}/{{ item.1 }}" 13 | with_subelements: 14 | - "{{ addon.dependencies }}" 15 | - files 16 | delegate_to: "{{ groups['masters'][0] }}" 17 | run_once: true 18 | register: copy_addon_deps 19 | 20 | - name: "Apply {{ addon.name }} addon dependencies" 21 | when: addon is defined and addon.dependencies is defined and copy_addon_deps 22 | command: | 23 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 24 | apply -f {{ addon_dir }}/{{ item.1 }} 25 | with_subelements: 26 | - "{{ addon.dependencies }}" 27 | - files 28 | delegate_to: "{{ groups['masters'][0] }}" 29 | run_once: true 30 | register: apply_addon_deps 31 | until: apply_addon_deps.rc == 0 32 | retries: 10 33 | delay: 2 34 | 35 | - name: "Check {{ addon.name }} addon dependencies status" 36 | when: addon is defined and addon.dependencies is defined and apply_addon_deps 37 | shell: | 38 | {% if item.kind == 'ns' %} 39 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 40 | get {{ item.kind }} --no-headers | \ 41 | grep {{ item.name }} | awk '{ print $2}' | uniq 42 | {% else %} 43 | {{ bin_dir }}/kubectl -n {{ item.namespace }} --kubeconfig={{ admin_kubeconfig }} \ 44 | get {{ item.kind }} --no-headers | \ 45 | grep {{ item.name }} | awk '{ print $3}' | uniq 46 | {% endif %} 47 | with_items: "{{ addon.dependencies }}" 48 | delegate_to: "{{ groups['masters'][0] }}" 49 | run_once: true 50 | register: check_addon_deps 51 | until: check_addon_deps.stdout == 'Running' or check_addon_deps.stdout == 'Active' 52 | retries: 10 53 | delay: 10 54 | 55 | - name: "Copy {{ addon.name }} addon files" 56 | when: addon is defined and addon.files is defined 57 | copy: 58 | src: "{{ item }}" 59 | dest: "{{ addon_dir }}/{{ item }}" 60 | with_items: "{{ addon.files }}" 61 | delegate_to: "{{ groups['masters'][0] }}" 62 | run_once: true 63 | register: copy_addon_files 64 | 65 | - name: "Copy {{ addon.name }} addon template files" 66 | when: addon is defined 67 | template: 68 | src: "{{ item }}.j2" 69 | dest: "{{ addon_dir }}/{{ item }}" 70 | with_items: "{{ addon.templates }}" 71 | delegate_to: "{{ groups['masters'][0] }}" 72 | run_once: true 73 | register: copy_addon_templates 74 | 75 | - name: "Apply {{ addon.name }} addon files" 76 | when: addon is defined and copy_addon_templates 77 | command: | 78 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 79 | apply -f {{ addon_dir }}/{{ item }} 80 | with_items: "{{ addon.dirs }}" 81 | delegate_to: "{{ groups['masters'][0] }}" 82 | run_once: true 83 | register: apply_addon 84 | until: apply_addon.rc == 0 85 | retries: 10 86 | delay: 2 87 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/dashboard/dashboard-anonymous-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: anonymous-dashboard-proxy-role 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - "services/proxy" 10 | resourceNames: 11 | - "https:kubernetes-dashboard:" 12 | verbs: 13 | - get 14 | - create 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: ClusterRoleBinding 18 | metadata: 19 | name: anonymous-dashboard-proxy-binding 20 | namespace: "" 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: anonymous-dashboard-proxy-role 25 | subjects: 26 | - apiGroup: rbac.authorization.k8s.io 27 | kind: User 28 | name: system:anonymous 29 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/dashboard/dashboard-dp.yml.j2: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | namespace: kube-system 8 | spec: 9 | replicas: 1 10 | revisionHistoryLimit: 10 11 | selector: 12 | matchLabels: 13 | k8s-app: kubernetes-dashboard 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: kubernetes-dashboard 18 | spec: 19 | containers: 20 | - name: kubernetes-dashboard 21 | image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3 22 | ports: 23 | - containerPort: 8443 24 | protocol: TCP 25 | args: 26 | - --auto-generate-certificates 27 | volumeMounts: 28 | - name: kubernetes-dashboard-certs 29 | mountPath: /certs 30 | - mountPath: /tmp 31 | name: tmp-volume 32 | livenessProbe: 33 | httpGet: 34 | scheme: HTTPS 35 | path: / 36 | port: 8443 37 | initialDelaySeconds: 30 38 | timeoutSeconds: 30 39 | volumes: 40 | - name: kubernetes-dashboard-certs 41 | secret: 42 | secretName: kubernetes-dashboard-certs 43 | - name: tmp-volume 44 | emptyDir: {} 45 | serviceAccountName: kubernetes-dashboard 46 | tolerations: 47 | - key: node-role.kubernetes.io/master 48 | effect: NoSchedule 49 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/dashboard/dashboard-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: kubernetes-dashboard-minimal 5 | namespace: kube-system 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["secrets"] 9 | verbs: ["create"] 10 | - apiGroups: [""] 11 | resources: ["configmaps"] 12 | verbs: ["create"] 13 | - apiGroups: [""] 14 | resources: ["secrets"] 15 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] 16 | verbs: ["get", "update", "delete"] 17 | - apiGroups: [""] 18 | resources: ["configmaps"] 19 | resourceNames: ["kubernetes-dashboard-settings"] 20 | verbs: ["get", "update"] 21 | - apiGroups: [""] 22 | resources: ["services"] 23 | resourceNames: ["heapster"] 24 | verbs: ["proxy"] 25 | - apiGroups: [""] 26 | resources: ["services/proxy"] 27 | resourceNames: ["heapster", "http:heapster:", "https:heapster:"] 28 | verbs: ["get"] 29 | --- 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | kind: RoleBinding 32 | metadata: 33 | name: kubernetes-dashboard-minimal 34 | namespace: kube-system 35 | roleRef: 36 | apiGroup: rbac.authorization.k8s.io 37 | kind: Role 38 | name: kubernetes-dashboard-minimal 39 | subjects: 40 | - kind: ServiceAccount 41 | name: kubernetes-dashboard 42 | namespace: kube-system 43 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/dashboard/dashboard-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | namespace: kube-system 8 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/dashboard/dashboard-secret.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard-certs 7 | namespace: kube-system 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/dashboard/dashboard-svc.yml.j2: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | namespace: kube-system 8 | spec: 9 | ports: 10 | - port: 443 11 | targetPort: 8443 12 | selector: 13 | k8s-app: kubernetes-dashboard 14 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/ingress-nginx/ingress-controller-cm.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-configuration 5 | namespace: ingress-nginx 6 | labels: 7 | app: ingress-nginx 8 | --- 9 | kind: ConfigMap 10 | apiVersion: v1 11 | metadata: 12 | name: tcp-services 13 | namespace: ingress-nginx 14 | data: 15 | 53: "external-dns/coredns-tcp:53" 16 | --- 17 | kind: ConfigMap 18 | apiVersion: v1 19 | metadata: 20 | name: udp-services 21 | namespace: ingress-nginx 22 | data: 23 | 53: "external-dns/coredns-udp:53" 24 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/ingress-nginx/ingress-controller-dp.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: default-http-backend 5 | labels: 6 | app: default-http-backend 7 | namespace: ingress-nginx 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: default-http-backend 13 | template: 14 | metadata: 15 | labels: 16 | app: default-http-backend 17 | spec: 18 | terminationGracePeriodSeconds: 60 19 | containers: 20 | - name: default-http-backend 21 | image: gcr.io/google_containers/defaultbackend:1.4 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | scheme: HTTP 27 | initialDelaySeconds: 30 28 | timeoutSeconds: 5 29 | ports: 30 | - containerPort: 8080 31 | resources: 32 | limits: 33 | cpu: 10m 34 | memory: 20Mi 35 | requests: 36 | cpu: 10m 37 | memory: 20Mi 38 | --- 39 | apiVersion: extensions/v1beta1 40 | kind: Deployment 41 | metadata: 42 | name: nginx-ingress-controller 43 | namespace: ingress-nginx 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: ingress-nginx 49 | template: 50 | metadata: 51 | labels: 52 | app: ingress-nginx 53 | annotations: 54 | prometheus.io/port: '10254' 55 | prometheus.io/scrape: 'true' 56 | spec: 57 | serviceAccountName: nginx-ingress-serviceaccount 58 | containers: 59 | - name: nginx-ingress-controller 60 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.17.1 61 | args: 62 | - /nginx-ingress-controller 63 | - --default-backend-service=$(POD_NAMESPACE)/default-http-backend 64 | - --configmap=$(POD_NAMESPACE)/nginx-configuration 65 | - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services 66 | - --udp-services-configmap=$(POD_NAMESPACE)/udp-services 67 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx 68 | - --annotations-prefix=nginx.ingress.kubernetes.io 69 | securityContext: 70 | capabilities: 71 | drop: 72 | - ALL 73 | add: 74 | - NET_BIND_SERVICE 75 | runAsUser: 33 76 | env: 77 | - name: POD_NAME 78 | valueFrom: 79 | fieldRef: 80 | fieldPath: metadata.name 81 | - name: POD_NAMESPACE 82 | valueFrom: 83 | fieldRef: 84 | fieldPath: metadata.namespace 85 | ports: 86 | - name: http 87 | containerPort: 80 88 | - name: https 89 | containerPort: 443 90 | livenessProbe: 91 | failureThreshold: 3 92 | httpGet: 93 | path: /healthz 94 | port: 10254 95 | scheme: HTTP 96 | initialDelaySeconds: 10 97 | periodSeconds: 10 98 | successThreshold: 1 99 | timeoutSeconds: 1 100 | readinessProbe: 101 | failureThreshold: 3 102 | httpGet: 103 | path: /healthz 104 | port: 10254 105 | scheme: HTTP 106 | periodSeconds: 10 107 | successThreshold: 1 108 | timeoutSeconds: 1 109 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/ingress-nginx/ingress-controller-ns.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress-nginx 5 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/ingress-nginx/ingress-controller-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: nginx-ingress-clusterrole 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "extensions" 33 | resources: 34 | - ingresses 35 | verbs: 36 | - get 37 | - list 38 | - watch 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - events 43 | verbs: 44 | - create 45 | - patch 46 | - apiGroups: 47 | - "extensions" 48 | resources: 49 | - ingresses/status 50 | verbs: 51 | - update 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1beta1 54 | kind: Role 55 | metadata: 56 | name: nginx-ingress-role 57 | namespace: ingress-nginx 58 | rules: 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - configmaps 63 | - pods 64 | - secrets 65 | - namespaces 66 | verbs: 67 | - get 68 | - apiGroups: 69 | - "" 70 | resources: 71 | - configmaps 72 | resourceNames: 73 | - "ingress-controller-leader-nginx" 74 | verbs: 75 | - get 76 | - update 77 | - apiGroups: 78 | - "" 79 | resources: 80 | - configmaps 81 | verbs: 82 | - create 83 | - apiGroups: 84 | - "" 85 | resources: 86 | - endpoints 87 | verbs: 88 | - get 89 | --- 90 | apiVersion: rbac.authorization.k8s.io/v1beta1 91 | kind: RoleBinding 92 | metadata: 93 | name: nginx-ingress-role-nisa-binding 94 | namespace: ingress-nginx 95 | roleRef: 96 | apiGroup: rbac.authorization.k8s.io 97 | kind: Role 98 | name: nginx-ingress-role 99 | subjects: 100 | - kind: ServiceAccount 101 | name: nginx-ingress-serviceaccount 102 | namespace: ingress-nginx 103 | --- 104 | apiVersion: rbac.authorization.k8s.io/v1beta1 105 | kind: ClusterRoleBinding 106 | metadata: 107 | name: nginx-ingress-clusterrole-nisa-binding 108 | roleRef: 109 | apiGroup: rbac.authorization.k8s.io 110 | kind: ClusterRole 111 | name: nginx-ingress-clusterrole 112 | subjects: 113 | - kind: ServiceAccount 114 | name: nginx-ingress-serviceaccount 115 | namespace: ingress-nginx 116 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/ingress-nginx/ingress-controller-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nginx-ingress-serviceaccount 5 | namespace: ingress-nginx 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/ingress-nginx/ingress-controller-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ingress-nginx 5 | namespace: ingress-nginx 6 | labels: 7 | app: ingress-nginx 8 | spec: 9 | type: LoadBalancer 10 | externalIPs: 11 | - {{ ingress_lb_address }} 12 | ports: 13 | - port: 80 14 | targetPort: 80 15 | selector: 16 | app: ingress-nginx 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: default-http-backend 22 | namespace: ingress-nginx 23 | labels: 24 | app: default-http-backend 25 | spec: 26 | ports: 27 | - port: 80 28 | targetPort: 8080 29 | selector: 30 | app: default-http-backend 31 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubedns/kubedns-cm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | Corefile: | 4 | .:53 { 5 | errors 6 | health 7 | kubernetes {{ cluster_domain_name }} in-addr.arpa ip6.arpa { 8 | pods insecure 9 | upstream 10 | fallthrough in-addr.arpa ip6.arpa 11 | } 12 | prometheus :9153 13 | proxy . /etc/resolv.conf 14 | cache 30 15 | loop 16 | reload 17 | loadbalance 18 | } 19 | kind: ConfigMap 20 | metadata: 21 | name: coredns 22 | namespace: kube-system 23 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubedns/kubedns-dp.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | kubernetes.io/name: "CoreDNS" 9 | spec: 10 | replicas: 2 11 | strategy: 12 | type: RollingUpdate 13 | rollingUpdate: 14 | maxUnavailable: 1 15 | selector: 16 | matchLabels: 17 | k8s-app: kube-dns 18 | template: 19 | metadata: 20 | labels: 21 | k8s-app: kube-dns 22 | spec: 23 | serviceAccountName: coredns 24 | priorityClassName: system-cluster-critical 25 | tolerations: 26 | - key: node-role.kubernetes.io/master 27 | effect: NoSchedule 28 | - key: "CriticalAddonsOnly" 29 | operator: "Exists" 30 | nodeSelector: 31 | beta.kubernetes.io/os: linux 32 | containers: 33 | - name: coredns 34 | image: coredns/coredns:1.2.5 35 | imagePullPolicy: IfNotPresent 36 | resources: 37 | limits: 38 | memory: 170Mi 39 | requests: 40 | cpu: 100m 41 | memory: 70Mi 42 | args: [ "-conf", "/etc/coredns/Corefile" ] 43 | volumeMounts: 44 | - name: config-volume 45 | mountPath: /etc/coredns 46 | readOnly: true 47 | ports: 48 | - containerPort: 53 49 | name: dns 50 | protocol: UDP 51 | - containerPort: 53 52 | name: dns-tcp 53 | protocol: TCP 54 | - containerPort: 9153 55 | name: metrics 56 | protocol: TCP 57 | securityContext: 58 | allowPrivilegeEscalation: false 59 | capabilities: 60 | add: 61 | - NET_BIND_SERVICE 62 | drop: 63 | - all 64 | readOnlyRootFilesystem: true 65 | livenessProbe: 66 | httpGet: 67 | path: /health 68 | port: 8080 69 | scheme: HTTP 70 | initialDelaySeconds: 60 71 | timeoutSeconds: 5 72 | successThreshold: 1 73 | failureThreshold: 5 74 | dnsPolicy: Default 75 | volumes: 76 | - name: config-volume 77 | configMap: 78 | name: coredns 79 | items: 80 | - key: Corefile 81 | path: Corefile -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubedns/kubedns-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | kubernetes.io/bootstrapping: rbac-defaults 6 | name: system:coredns 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - endpoints 12 | - services 13 | - pods 14 | - namespaces 15 | verbs: 16 | - list 17 | - watch 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - nodes 22 | verbs: 23 | - get 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1beta1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | annotations: 29 | rbac.authorization.kubernetes.io/autoupdate: "true" 30 | labels: 31 | kubernetes.io/bootstrapping: rbac-defaults 32 | name: system:coredns 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: ClusterRole 36 | name: system:coredns 37 | subjects: 38 | - kind: ServiceAccount 39 | name: coredns 40 | namespace: kube-system -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubedns/kubedns-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubedns/kubedns-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | annotations: 7 | prometheus.io/port: "9153" 8 | prometheus.io/scrape: "true" 9 | labels: 10 | k8s-app: kube-dns 11 | kubernetes.io/cluster-service: "true" 12 | kubernetes.io/name: "CoreDNS" 13 | spec: 14 | selector: 15 | k8s-app: kube-dns 16 | clusterIP: {{ cluster_dns_ip }} 17 | ports: 18 | - name: dns 19 | port: 53 20 | protocol: UDP 21 | - name: dns-tcp 22 | port: 53 23 | protocol: TCP 24 | - name: http-metrics 25 | port: 9153 26 | protocol: TCP 27 | targetPort: 9153 -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubeproxy/kubeproxy-cm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | config.conf: |- 4 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 5 | kind: KubeProxyConfiguration 6 | bindAddress: {{ kubeproxy_bind_address }} 7 | clientConnection: 8 | acceptContentTypes: "" 9 | burst: 10 10 | contentType: application/vnd.kubernetes.protobuf 11 | kubeconfig: /var/lib/kube-proxy/kubeconfig.conf 12 | qps: 5 13 | clusterCIDR: {{ pod_network_cidr }} 14 | configSyncPeriod: 15m0s 15 | conntrack: 16 | maxPerCore: 32768 17 | min: 131072 18 | tcpCloseWaitTimeout: 1h0m0s 19 | tcpEstablishedTimeout: 24h0m0s 20 | enableProfiling: false 21 | healthzBindAddress: {{ kubeproxy_healthz_bind_address }} 22 | hostnameOverride: "" 23 | iptables: 24 | masqueradeAll: false 25 | masqueradeBit: 14 26 | minSyncPeriod: 0s 27 | syncPeriod: 30s 28 | ipvs: 29 | minSyncPeriod: 0s 30 | scheduler: {{ kubeproxy_ipvs_scheduler }} 31 | syncPeriod: 30s 32 | metricsBindAddress: {{ kubeproxy_metrics_bind_address }} 33 | mode: {{ kubeproxy_mode }} 34 | featureGates: 35 | SupportIPVSProxyMode: true 36 | oomScoreAdj: -999 37 | portRange: "" 38 | resourceContainer: /kube-proxy 39 | udpIdleTimeout: 250ms 40 | kubeconfig.conf: |- 41 | apiVersion: v1 42 | kind: Config 43 | clusters: 44 | - cluster: 45 | certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 46 | server: {{ lb_api_url }} 47 | name: default 48 | contexts: 49 | - context: 50 | cluster: default 51 | namespace: default 52 | user: default 53 | name: default 54 | current-context: default 55 | users: 56 | - name: default 57 | user: 58 | tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 59 | kind: ConfigMap 60 | metadata: 61 | labels: 62 | app: kube-proxy 63 | name: kube-proxy 64 | namespace: kube-system 65 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubeproxy/kubeproxy-ds.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | k8s-app: kube-proxy 6 | name: kube-proxy 7 | namespace: kube-system 8 | spec: 9 | selector: 10 | matchLabels: 11 | k8s-app: kube-proxy 12 | template: 13 | metadata: 14 | annotations: 15 | scheduler.alpha.kubernetes.io/critical-pod: "" 16 | labels: 17 | k8s-app: kube-proxy 18 | spec: 19 | serviceAccount: kube-proxy 20 | serviceAccountName: kube-proxy 21 | priorityClassName: system-node-critical 22 | tolerations: 23 | - key: CriticalAddonsOnly 24 | operator: Exists 25 | - effect: NoSchedule 26 | key: node-role.kubernetes.io/master 27 | hostNetwork: true 28 | containers: 29 | - name: kube-proxy 30 | image: k8s.gcr.io/kube-proxy-amd64:v{{ kube_version }} 31 | command: 32 | - /usr/local/bin/kube-proxy 33 | - --config=/var/lib/kube-proxy/config.conf 34 | securityContext: 35 | privileged: true 36 | volumeMounts: 37 | - mountPath: /var/lib/kube-proxy 38 | name: kube-proxy 39 | - mountPath: /run/xtables.lock 40 | name: xtables-lock 41 | - mountPath: /lib/modules 42 | name: lib-modules 43 | readOnly: true 44 | volumes: 45 | - configMap: 46 | defaultMode: 420 47 | name: kube-proxy 48 | name: kube-proxy 49 | - hostPath: 50 | path: /run/xtables.lock 51 | type: FileOrCreate 52 | name: xtables-lock 53 | - hostPath: 54 | path: /lib/modules 55 | type: "" 56 | name: lib-modules 57 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubeproxy/kubeproxy-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: system:kube-proxy 5 | labels: 6 | addonmanager.kubernetes.io/mode: Reconcile 7 | subjects: 8 | - kind: ServiceAccount 9 | name: kube-proxy 10 | namespace: kube-system 11 | roleRef: 12 | kind: ClusterRole 13 | name: system:node-proxier 14 | apiGroup: rbac.authorization.k8s.io 15 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/kubeproxy/kubeproxy-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-proxy 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/es/elasticsearch-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: elasticsearch-logging 5 | labels: 6 | k8s-app: elasticsearch-logging 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - "services" 13 | - "namespaces" 14 | - "endpoints" 15 | verbs: 16 | - "get" 17 | --- 18 | kind: ClusterRoleBinding 19 | apiVersion: rbac.authorization.k8s.io/v1beta1 20 | metadata: 21 | namespace: kube-system 22 | name: elasticsearch-logging 23 | labels: 24 | k8s-app: elasticsearch-logging 25 | addonmanager.kubernetes.io/mode: Reconcile 26 | subjects: 27 | - kind: ServiceAccount 28 | name: elasticsearch-logging 29 | namespace: kube-system 30 | apiGroup: "" 31 | roleRef: 32 | kind: ClusterRole 33 | name: elasticsearch-logging 34 | apiGroup: "" 35 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/es/elasticsearch-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/es/elasticsearch-sts.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | spec: 10 | serviceName: elasticsearch-logging 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | k8s-app: elasticsearch-logging 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: elasticsearch-logging 19 | spec: 20 | tolerations: 21 | - key: CriticalAddonsOnly 22 | operator: Exists 23 | - effect: NoSchedule 24 | key: node-role.kubernetes.io/master 25 | serviceAccountName: elasticsearch-logging 26 | priorityClassName: system-cluster-critical 27 | initContainers: 28 | - name: elasticsearch-logging-init 29 | image: alpine:3.6 30 | command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] 31 | securityContext: 32 | privileged: true 33 | containers: 34 | - name: elasticsearch-logging 35 | image: k8s.gcr.io/elasticsearch:v6.2.5 36 | resources: 37 | limits: 38 | cpu: 1000m 39 | requests: 40 | cpu: 100m 41 | ports: 42 | - containerPort: 9200 43 | name: db 44 | protocol: TCP 45 | - containerPort: 9300 46 | name: transport 47 | protocol: TCP 48 | volumeMounts: 49 | - name: elasticsearch-logging 50 | mountPath: /data 51 | env: 52 | - name: "NAMESPACE" 53 | valueFrom: 54 | fieldRef: 55 | fieldPath: metadata.namespace 56 | - name: MINIMUM_MASTER_NODES 57 | value: "1" 58 | volumes: 59 | - name: elasticsearch-logging 60 | emptyDir: {} 61 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/es/elasticsearch-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | spec: 10 | ports: 11 | - port: 9200 12 | protocol: TCP 13 | targetPort: db 14 | selector: 15 | k8s-app: elasticsearch-logging 16 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/fluentd/fluentd-es-ds.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd-es 5 | namespace: kube-system 6 | labels: 7 | k8s-app: fluentd-es 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | spec: 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: fluentd-es 14 | annotations: 15 | scheduler.alpha.kubernetes.io/critical-pod: '' 16 | spec: 17 | tolerations: 18 | - key: CriticalAddonsOnly 19 | operator: Exists 20 | - effect: NoSchedule 21 | key: node-role.kubernetes.io/master 22 | priorityClassName: system-node-critical 23 | serviceAccountName: fluentd-es 24 | containers: 25 | - name: fluentd-es 26 | image: k8s.gcr.io/fluentd-elasticsearch:v2.2.0 27 | env: 28 | - name: FLUENTD_ARGS 29 | value: --no-supervisor -q 30 | resources: 31 | limits: 32 | memory: 500Mi 33 | requests: 34 | cpu: 100m 35 | memory: 200Mi 36 | volumeMounts: 37 | - name: varlog 38 | mountPath: /var/log 39 | - name: varlibdockercontainers 40 | mountPath: /var/lib/docker/containers 41 | readOnly: true 42 | - name: libsystemddir 43 | mountPath: /host/lib 44 | readOnly: true 45 | - name: config-volume 46 | mountPath: /etc/fluent/config.d 47 | terminationGracePeriodSeconds: 30 48 | volumes: 49 | - name: varlog 50 | hostPath: 51 | path: /var/log 52 | - name: varlibdockercontainers 53 | hostPath: 54 | path: /var/lib/docker/containers 55 | - name: libsystemddir 56 | hostPath: 57 | path: /usr/lib64 58 | - name: config-volume 59 | configMap: 60 | name: fluentd-es-config 61 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/fluentd/fluentd-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: fluentd-es 5 | labels: 6 | k8s-app: fluentd-es 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - "namespaces" 13 | - "pods" 14 | verbs: 15 | - "get" 16 | - "watch" 17 | - "list" 18 | --- 19 | kind: ClusterRoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1beta1 21 | metadata: 22 | name: fluentd-es 23 | labels: 24 | k8s-app: fluentd-es 25 | addonmanager.kubernetes.io/mode: Reconcile 26 | subjects: 27 | - kind: ServiceAccount 28 | name: fluentd-es 29 | namespace: kube-system 30 | apiGroup: "" 31 | roleRef: 32 | kind: ClusterRole 33 | name: fluentd-es 34 | apiGroup: "" 35 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/fluentd/fluentd-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: fluentd-es 5 | namespace: kube-system 6 | labels: 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/kibana/kibana-dp.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kibana-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | kubernetes.io/cluster-service: "true" 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | k8s-app: kibana-logging 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: kibana-logging 18 | annotations: 19 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 20 | spec: 21 | containers: 22 | - name: kibana-logging 23 | image: docker.elastic.co/kibana/kibana-oss:6.2.4 24 | resources: 25 | limits: 26 | cpu: 1000m 27 | requests: 28 | cpu: 100m 29 | env: 30 | - name: ELASTICSEARCH_URL 31 | value: http://elasticsearch-logging:9200 32 | - name: SERVER_BASEPATH 33 | value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy 34 | ports: 35 | - containerPort: 5601 36 | name: ui 37 | protocol: TCP 38 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/logging/kibana/kibana-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | spec: 10 | ports: 11 | - port: 5601 12 | protocol: TCP 13 | targetPort: ui 14 | selector: 15 | k8s-app: kibana-logging 16 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/metric-server/metric-server-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: metrics-server 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/metric-server/metrics-apiservice.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1beta1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.metrics.k8s.io 5 | spec: 6 | service: 7 | name: metrics-server 8 | namespace: kube-system 9 | group: metrics.k8s.io 10 | version: v1beta1 11 | insecureSkipTLSVerify: true 12 | groupPriorityMinimum: 100 13 | versionPriority: 100 14 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/metric-server/metrics-server-dp.yml.j2: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | labels: 8 | k8s-app: metrics-server 9 | spec: 10 | selector: 11 | matchLabels: 12 | k8s-app: metrics-server 13 | template: 14 | metadata: 15 | name: metrics-server 16 | labels: 17 | k8s-app: metrics-server 18 | spec: 19 | serviceAccountName: metrics-server 20 | containers: 21 | - name: metrics-server 22 | image: gcr.io/google_containers/metrics-server-amd64:v0.2.1 23 | imagePullPolicy: Always 24 | command: 25 | - /metrics-server 26 | - --source=kubernetes.summary_api:'' 27 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/metric-server/metrics-server-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: system:metrics-server 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - pods 10 | - nodes 11 | - nodes/stats 12 | - namespaces 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "extensions" 19 | resources: 20 | - deployments 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | --- 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | kind: ClusterRoleBinding 28 | metadata: 29 | name: system:metrics-server 30 | roleRef: 31 | apiGroup: rbac.authorization.k8s.io 32 | kind: ClusterRole 33 | name: system:metrics-server 34 | subjects: 35 | - kind: ServiceAccount 36 | name: metrics-server 37 | namespace: kube-system 38 | --- 39 | apiVersion: rbac.authorization.k8s.io/v1beta1 40 | kind: RoleBinding 41 | metadata: 42 | name: metrics-server-auth-reader 43 | namespace: kube-system 44 | roleRef: 45 | apiGroup: rbac.authorization.k8s.io 46 | kind: Role 47 | name: extension-apiserver-authentication-reader 48 | subjects: 49 | - kind: ServiceAccount 50 | name: metrics-server 51 | namespace: kube-system 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1beta1 54 | kind: ClusterRoleBinding 55 | metadata: 56 | name: metrics-server:system:auth-delegator 57 | roleRef: 58 | apiGroup: rbac.authorization.k8s.io 59 | kind: ClusterRole 60 | name: system:auth-delegator 61 | subjects: 62 | - kind: ServiceAccount 63 | name: metrics-server 64 | namespace: kube-system 65 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/metric-server/metrics-server-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: metrics-server 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/name: "Metrics-server" 8 | spec: 9 | selector: 10 | k8s-app: metrics-server 11 | ports: 12 | - port: 443 13 | protocol: TCP 14 | targetPort: 443 15 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/alertmanater/alertmanager-main-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: alertmanager-main 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/alertmanater/alertmanager-main-secret.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | alertmanager.yaml: Z2xvYmFsOgogIHJlc29sdmVfdGltZW91dDogNW0KICBzbGFja19hcGlfdXJsOiAnaHR0cHM6Ly9ob29rcy5zbGFjay5jb20vc2VydmljZXMveW91cl9zbGFja19hcGlfdG9rZW4nCiAgc210cF9zbWFydGhvc3Q6ICd5b3VyX3NtdHBfc21hcnRob3N0OjU4NycKICBzbXRwX2Zyb206ICd5b3VyX3NtdHBfZnJvbScKICBzbXRwX2F1dGhfdXNlcm5hbWU6ICd5b3VyX3NtdHBfdXNlcicKICBzbXRwX2F1dGhfcGFzc3dvcmQ6ICd5b3VyX3NtdHBfcGFzcycKdGVtcGxhdGVzOgotICcvZXRjL2FsZXJ0bWFuYWdlci90ZW1wbGF0ZS8qLnRtcGwnCnJvdXRlOgogIGdyb3VwX2J5OiBbJ2FsZXJ0bmFtZScsICdjbHVzdGVyJywgJ3NlcnZpY2UnXQogIGdyb3VwX3dhaXQ6IDMwcwogIGdyb3VwX2ludGVydmFsOiA1bQogIHJlcGVhdF9pbnRlcnZhbDogMWgKICByZWNlaXZlcjogZGVmYXVsdC1yZWNlaXZlcgogIHJvdXRlczoKICAtIG1hdGNoOgogICAgICBhbGVydG5hbWU6IERlYWRNYW5zU3dpdGNoCiAgICByZWNlaXZlcjogJ251bGwnCmluaGliaXRfcnVsZXM6Ci0gc291cmNlX21hdGNoOgogICAgc2V2ZXJpdHk6ICdjcml0aWNhbCcKICB0YXJnZXRfbWF0Y2g6CiAgICBzZXZlcml0eTogJ3dhcm5pbmcnCiAgIyBBcHBseSBpbmhpYml0aW9uIGlmIHRoZSBhbGVydG5hbWUgaXMgdGhlIHNhbWUuCiAgZXF1YWw6IFsnYWxlcnRuYW1lJywgJ2NsdXN0ZXInLCAnc2VydmljZSddCnJlY2VpdmVyczoKLSBuYW1lOiAnZGVmYXVsdC1yZWNlaXZlcicKICBzbGFja19jb25maWdzOgogIC0gY2hhbm5lbDogJyN5b3VyX3NsYWNrX2NoYW5uZWwnCiAgICB0aXRsZTogJ1t7eyAuU3RhdHVzIHwgdG9VcHBlciB9fXt7IGlmIGVxIC5TdGF0dXMgImZpcmluZyIgfX06e3sgLkFsZXJ0cy5GaXJpbmcgfCBsZW4gfX17eyBlbmQgfX1dIFByb21ldGhldXMgRXZlbnQgTm90aWZpY2F0aW9uJwogICAgdGV4dDogPi0KICAgICAgICB7eyByYW5nZSAuQWxlcnRzIH19CiAgICAgICAgICAgKkFsZXJ0Oioge3sgLkFubm90YXRpb25zLnN1bW1hcnkgfX0gLSBge3sgLkxhYmVscy5zZXZlcml0eSB9fWAKICAgICAgICAgICpEZXNjcmlwdGlvbjoqIHt7IC5Bbm5vdGF0aW9ucy5kZXNjcmlwdGlvbiB9fQogICAgICAgICAgKkdyYXBoOiogPHt7IC5HZW5lcmF0b3JVUkwgfX18OmNoYXJ0X3dpdGhfdXB3YXJkc190cmVuZDo+ICpSdW5ib29rOiogPHt7IC5Bbm5vdGF0aW9ucy5ydW5ib29rIH19fDpzcGlyYWxfbm90ZV9wYWQ6PgogICAgICAgICAgKkRldGFpbHM6KgogICAgICAgICAge3sgcmFuZ2UgLkxhYmVscy5Tb3J0ZWRQYWlycyB9fSDigKIgKnt7IC5OYW1lIH19OiogYHt7IC5WYWx1ZSB9fWAKICAgICAgICAgIHt7IGVuZCB9fQogICAgICAgIHt7IGVuZCB9fQogICAgc2VuZF9yZXNvbHZlZDogdHJ1ZQogIGVtYWlsX2NvbmZpZ3M6CiAgLSB0bzogJ3lvdXJfYWxlcnRfZW1haWxfYWRkcmVzcycKICAgIHNlbmRfcmVzb2x2ZWQ6IHRydWUKLSBuYW1lOiAnbnVsbCcK 4 | kind: Secret 5 | metadata: 6 | name: alertmanager-main 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/alertmanater/alertmanager-main-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: alertmanager-main 7 | namespace: monitoring 8 | spec: 9 | type: ClusterIP 10 | ports: 11 | - name: web 12 | port: 9093 13 | targetPort: web 14 | selector: 15 | alertmanager: main 16 | app: alertmanager 17 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/alertmanater/alertmanager-main.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: main 7 | namespace: monitoring 8 | spec: 9 | serviceAccountName: alertmanager-main 10 | baseImage: quay.io/prometheus/alertmanager 11 | externalUrl: http://127.0.0.1:9093 12 | replicas: 3 13 | version: v0.15.0 14 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/grafana/grafana-admin-secret.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | user: "{{ monitoring_grafana_user | b64encode }}" 4 | password: "{{ monitoring_grafana_password | b64encode }}" 5 | kind: Secret 6 | metadata: 7 | name: grafana-credentials 8 | namespace: monitoring 9 | type: Opaque 10 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/grafana/grafana-datasources.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | prometheus.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0= 4 | kind: Secret 5 | metadata: 6 | name: grafana-datasources 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/grafana/grafana-dp.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | template: 14 | metadata: 15 | labels: 16 | app: grafana 17 | spec: 18 | containers: 19 | - name: grafana 20 | image: grafana/grafana:5.1.0 21 | env: 22 | - name: GF_AUTH_BASIC_ENABLED 23 | value: "true" 24 | - name: GF_AUTH_ANONYMOUS_ENABLED 25 | value: "true" 26 | - name: GF_SECURITY_ADMIN_USER 27 | valueFrom: 28 | secretKeyRef: 29 | name: grafana-credentials 30 | key: user 31 | - name: GF_SECURITY_ADMIN_PASSWORD 32 | valueFrom: 33 | secretKeyRef: 34 | name: grafana-credentials 35 | key: password 36 | ports: 37 | - containerPort: 3000 38 | name: http 39 | resources: 40 | limits: 41 | cpu: 200m 42 | memory: 200Mi 43 | requests: 44 | cpu: 100m 45 | memory: 100Mi 46 | volumeMounts: 47 | - mountPath: /var/lib/grafana 48 | name: grafana-storage 49 | readOnly: false 50 | - mountPath: /etc/grafana/provisioning/datasources 51 | name: grafana-datasources 52 | readOnly: false 53 | - mountPath: /etc/grafana/provisioning/dashboards 54 | name: grafana-dashboards 55 | readOnly: false 56 | - mountPath: /grafana-dashboard-definitions/0/k8s-cluster-rsrc-use 57 | name: grafana-dashboard-k8s-cluster-rsrc-use 58 | readOnly: false 59 | - mountPath: /grafana-dashboard-definitions/0/k8s-node-rsrc-use 60 | name: grafana-dashboard-k8s-node-rsrc-use 61 | readOnly: false 62 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-cluster 63 | name: grafana-dashboard-k8s-resources-cluster 64 | readOnly: false 65 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-namespace 66 | name: grafana-dashboard-k8s-resources-namespace 67 | readOnly: false 68 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-pod 69 | name: grafana-dashboard-k8s-resources-pod 70 | readOnly: false 71 | - mountPath: /grafana-dashboard-definitions/0/nodes 72 | name: grafana-dashboard-nodes 73 | readOnly: false 74 | - mountPath: /grafana-dashboard-definitions/0/pods 75 | name: grafana-dashboard-pods 76 | readOnly: false 77 | - mountPath: /grafana-dashboard-definitions/0/statefulset 78 | name: grafana-dashboard-statefulset 79 | readOnly: false 80 | securityContext: 81 | runAsNonRoot: true 82 | runAsUser: 65534 83 | serviceAccountName: grafana 84 | volumes: 85 | - emptyDir: {} 86 | name: grafana-storage 87 | - name: grafana-datasources 88 | secret: 89 | secretName: grafana-datasources 90 | - configMap: 91 | name: grafana-dashboards 92 | name: grafana-dashboards 93 | - configMap: 94 | name: grafana-dashboard-k8s-cluster-rsrc-use 95 | name: grafana-dashboard-k8s-cluster-rsrc-use 96 | - configMap: 97 | name: grafana-dashboard-k8s-node-rsrc-use 98 | name: grafana-dashboard-k8s-node-rsrc-use 99 | - configMap: 100 | name: grafana-dashboard-k8s-resources-cluster 101 | name: grafana-dashboard-k8s-resources-cluster 102 | - configMap: 103 | name: grafana-dashboard-k8s-resources-namespace 104 | name: grafana-dashboard-k8s-resources-namespace 105 | - configMap: 106 | name: grafana-dashboard-k8s-resources-pod 107 | name: grafana-dashboard-k8s-resources-pod 108 | - configMap: 109 | name: grafana-dashboard-nodes 110 | name: grafana-dashboard-nodes 111 | - configMap: 112 | name: grafana-dashboard-pods 113 | name: grafana-dashboard-pods 114 | - configMap: 115 | name: grafana-dashboard-statefulset 116 | name: grafana-dashboard-statefulset 117 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/grafana/grafana-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/grafana/grafana-source.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | [ 5 | { 6 | "folder": "", 7 | "name": "0", 8 | "options": { 9 | "path": "/grafana-dashboard-definitions/0" 10 | }, 11 | "org_id": 1, 12 | "type": "file" 13 | } 14 | ] 15 | kind: ConfigMap 16 | metadata: 17 | name: grafana-dashboards 18 | namespace: monitoring 19 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/grafana/grafana-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | ports: 8 | - name: http 9 | port: 3000 10 | targetPort: http 11 | selector: 12 | app: grafana 13 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/kube-state-metrics/kube-state-metrics-dp.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: kube-state-metrics 13 | template: 14 | metadata: 15 | labels: 16 | app: kube-state-metrics 17 | spec: 18 | serviceAccountName: kube-state-metrics 19 | containers: 20 | - image: quay.io/coreos/kube-rbac-proxy:v0.3.1 21 | name: kube-rbac-proxy-main 22 | args: 23 | - --secure-listen-address=:8443 24 | - --upstream=http://127.0.0.1:8081/ 25 | ports: 26 | - containerPort: 8443 27 | name: https-main 28 | resources: 29 | limits: 30 | cpu: 20m 31 | memory: 40Mi 32 | requests: 33 | cpu: 10m 34 | memory: 20Mi 35 | - name: kube-rbac-proxy-self 36 | image: quay.io/coreos/kube-rbac-proxy:v0.3.1 37 | args: 38 | - --secure-listen-address=:9443 39 | - --upstream=http://127.0.0.1:8082/ 40 | ports: 41 | - containerPort: 9443 42 | name: https-self 43 | resources: 44 | limits: 45 | cpu: 20m 46 | memory: 40Mi 47 | requests: 48 | cpu: 10m 49 | memory: 20Mi 50 | - name: kube-state-metrics 51 | image: quay.io/coreos/kube-state-metrics:v1.3.1 52 | args: 53 | - --host=127.0.0.1 54 | - --port=8081 55 | - --telemetry-host=127.0.0.1 56 | - --telemetry-port=8082 57 | resources: 58 | limits: 59 | cpu: 102m 60 | memory: 180Mi 61 | requests: 62 | cpu: 102m 63 | memory: 180Mi 64 | - name: addon-resizer 65 | image: quay.io/coreos/addon-resizer:1.0 66 | command: 67 | - /pod_nanny 68 | - --container=kube-state-metrics 69 | - --cpu=100m 70 | - --extra-cpu=2m 71 | - --memory=150Mi 72 | - --extra-memory=30Mi 73 | - --threshold=5 74 | - --deployment=kube-state-metrics 75 | env: 76 | - name: MY_POD_NAME 77 | valueFrom: 78 | fieldRef: 79 | apiVersion: v1 80 | fieldPath: metadata.name 81 | - name: MY_POD_NAMESPACE 82 | valueFrom: 83 | fieldRef: 84 | apiVersion: v1 85 | fieldPath: metadata.namespace 86 | resources: 87 | limits: 88 | cpu: 10m 89 | memory: 30Mi 90 | requests: 91 | cpu: 10m 92 | memory: 30Mi 93 | securityContext: 94 | runAsNonRoot: true 95 | runAsUser: 65534 96 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/kube-state-metrics/kube-state-metrics-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: kube-state-metrics 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - secrets 11 | - nodes 12 | - pods 13 | - services 14 | - resourcequotas 15 | - replicationcontrollers 16 | - limitranges 17 | - persistentvolumeclaims 18 | - persistentvolumes 19 | - namespaces 20 | - endpoints 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - extensions 26 | resources: 27 | - daemonsets 28 | - deployments 29 | - replicasets 30 | verbs: 31 | - list 32 | - watch 33 | - apiGroups: 34 | - apps 35 | resources: 36 | - statefulsets 37 | verbs: 38 | - list 39 | - watch 40 | - apiGroups: 41 | - batch 42 | resources: 43 | - cronjobs 44 | - jobs 45 | verbs: 46 | - list 47 | - watch 48 | - apiGroups: 49 | - autoscaling 50 | resources: 51 | - horizontalpodautoscalers 52 | verbs: 53 | - list 54 | - watch 55 | - apiGroups: 56 | - authentication.k8s.io 57 | resources: 58 | - tokenreviews 59 | verbs: 60 | - create 61 | - apiGroups: 62 | - authorization.k8s.io 63 | resources: 64 | - subjectaccessreviews 65 | verbs: 66 | - create 67 | --- 68 | apiVersion: rbac.authorization.k8s.io/v1 69 | kind: ClusterRoleBinding 70 | metadata: 71 | name: kube-state-metrics 72 | roleRef: 73 | apiGroup: rbac.authorization.k8s.io 74 | kind: ClusterRole 75 | name: kube-state-metrics 76 | subjects: 77 | - kind: ServiceAccount 78 | name: kube-state-metrics 79 | namespace: monitoring 80 | --- 81 | apiVersion: rbac.authorization.k8s.io/v1 82 | kind: Role 83 | metadata: 84 | name: kube-state-metrics 85 | namespace: monitoring 86 | rules: 87 | - apiGroups: 88 | - "" 89 | resources: 90 | - pods 91 | verbs: 92 | - get 93 | - apiGroups: 94 | - extensions 95 | resourceNames: 96 | - kube-state-metrics 97 | resources: 98 | - deployments 99 | verbs: 100 | - get 101 | - update 102 | --- 103 | apiVersion: rbac.authorization.k8s.io/v1 104 | kind: RoleBinding 105 | metadata: 106 | name: kube-state-metrics 107 | namespace: monitoring 108 | roleRef: 109 | apiGroup: rbac.authorization.k8s.io 110 | kind: Role 111 | name: kube-state-metrics 112 | subjects: 113 | - kind: ServiceAccount 114 | name: kube-state-metrics 115 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/kube-state-metrics/kube-state-metrics-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/kube-state-metrics/kube-state-metrics-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https-main 12 | port: 8443 13 | targetPort: https-main 14 | - name: https-self 15 | port: 9443 16 | targetPort: https-self 17 | selector: 18 | app: kube-state-metrics 19 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/monitoring-ns.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/node-exporter/node-exporter-ds.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: node-exporter 12 | template: 13 | metadata: 14 | labels: 15 | app: node-exporter 16 | spec: 17 | serviceAccountName: node-exporter 18 | tolerations: 19 | - effect: NoSchedule 20 | key: node-role.kubernetes.io/master 21 | containers: 22 | - name: node-exporter 23 | image: quay.io/prometheus/node-exporter:v0.15.2 24 | args: 25 | - --web.listen-address=127.0.0.1:9101 26 | - --path.procfs=/host/proc 27 | - --path.sysfs=/host/sys 28 | resources: 29 | limits: 30 | cpu: 102m 31 | memory: 180Mi 32 | requests: 33 | cpu: 102m 34 | memory: 180Mi 35 | volumeMounts: 36 | - mountPath: /host/proc 37 | name: proc 38 | readOnly: false 39 | - mountPath: /host/sys 40 | name: sys 41 | readOnly: false 42 | - name: kube-rbac-proxy 43 | image: quay.io/coreos/kube-rbac-proxy:v0.3.1 44 | args: 45 | - --secure-listen-address=:9100 46 | - --upstream=http://127.0.0.1:9101/ 47 | ports: 48 | - containerPort: 9100 49 | hostPort: 9100 50 | name: https 51 | resources: 52 | limits: 53 | cpu: 20m 54 | memory: 40Mi 55 | requests: 56 | cpu: 10m 57 | memory: 20Mi 58 | hostNetwork: true 59 | hostPID: true 60 | securityContext: 61 | runAsNonRoot: true 62 | runAsUser: 65534 63 | volumes: 64 | - hostPath: 65 | path: /proc 66 | name: proc 67 | - hostPath: 68 | path: /sys 69 | name: sys 70 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/node-exporter/node-exporter-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: node-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | --- 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | kind: ClusterRoleBinding 21 | metadata: 22 | name: node-exporter 23 | roleRef: 24 | apiGroup: rbac.authorization.k8s.io 25 | kind: ClusterRole 26 | name: node-exporter 27 | subjects: 28 | - kind: ServiceAccount 29 | name: node-exporter 30 | namespace: monitoring 31 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/node-exporter/node-exporter-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/node-exporter/node-exporter-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https 12 | port: 9100 13 | targetPort: https 14 | selector: 15 | app: node-exporter 16 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/operator/operator-dp.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | k8s-app: prometheus-operator 6 | name: prometheus-operator 7 | namespace: monitoring 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | k8s-app: prometheus-operator 13 | template: 14 | metadata: 15 | labels: 16 | k8s-app: prometheus-operator 17 | spec: 18 | serviceAccountName: prometheus-operator 19 | containers: 20 | - name: prometheus-operator 21 | image: quay.io/coreos/prometheus-operator:v0.22.0 22 | args: 23 | - --kubelet-service=kube-system/kubelet 24 | - --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1 25 | - --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.22.0 26 | ports: 27 | - containerPort: 8080 28 | name: http 29 | resources: 30 | limits: 31 | cpu: 200m 32 | memory: 100Mi 33 | requests: 34 | cpu: 100m 35 | memory: 50Mi 36 | securityContext: 37 | runAsNonRoot: true 38 | runAsUser: 65534 39 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/operator/operator-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-operator 5 | rules: 6 | - apiGroups: 7 | - apiextensions.k8s.io 8 | resources: 9 | - customresourcedefinitions 10 | verbs: 11 | - '*' 12 | - apiGroups: 13 | - monitoring.coreos.com 14 | resources: 15 | - alertmanagers 16 | - prometheuses 17 | - prometheuses/finalizers 18 | - alertmanagers/finalizers 19 | - servicemonitors 20 | - prometheusrules 21 | verbs: 22 | - '*' 23 | - apiGroups: 24 | - apps 25 | resources: 26 | - statefulsets 27 | verbs: 28 | - '*' 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - configmaps 33 | - secrets 34 | verbs: 35 | - '*' 36 | - apiGroups: 37 | - "" 38 | resources: 39 | - pods 40 | verbs: 41 | - list 42 | - delete 43 | - apiGroups: 44 | - "" 45 | resources: 46 | - services 47 | - endpoints 48 | verbs: 49 | - get 50 | - create 51 | - update 52 | - apiGroups: 53 | - "" 54 | resources: 55 | - nodes 56 | verbs: 57 | - list 58 | - watch 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - namespaces 63 | verbs: 64 | - list 65 | - watch 66 | --- 67 | apiVersion: rbac.authorization.k8s.io/v1 68 | kind: ClusterRoleBinding 69 | metadata: 70 | name: prometheus-operator 71 | roleRef: 72 | apiGroup: rbac.authorization.k8s.io 73 | kind: ClusterRole 74 | name: prometheus-operator 75 | subjects: 76 | - kind: ServiceAccount 77 | name: prometheus-operator 78 | namespace: monitoring 79 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/operator/operator-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-operator 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/operator/operator-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: prometheus-operator 6 | name: prometheus-operator 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: http 11 | port: 8080 12 | protocol: TCP 13 | targetPort: http 14 | selector: 15 | k8s-app: prometheus-operator 16 | type: ClusterIP 17 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/prometheus/prometheus-main.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: k8s 7 | namespace: monitoring 8 | spec: 9 | alerting: 10 | alertmanagers: 11 | - name: alertmanager-main 12 | namespace: monitoring 13 | port: web 14 | baseImage: quay.io/prometheus/prometheus 15 | replicas: 2 16 | resources: 17 | requests: 18 | memory: 400Mi 19 | ruleSelector: 20 | matchLabels: 21 | prometheus: k8s 22 | role: alert-rules 23 | serviceAccountName: prometheus-k8s 24 | serviceMonitorSelector: 25 | matchExpressions: 26 | - key: k8s-app 27 | operator: Exists 28 | version: v2.3.1 29 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/prometheus/prometheus-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-k8s 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes/metrics 10 | verbs: 11 | - get 12 | - nonResourceURLs: 13 | - /metrics 14 | verbs: 15 | - get 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: prometheus-k8s 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: prometheus-k8s 25 | subjects: 26 | - kind: ServiceAccount 27 | name: prometheus-k8s 28 | namespace: monitoring 29 | --- 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | kind: Role 32 | metadata: 33 | name: prometheus-k8s 34 | namespace: monitoring 35 | rules: 36 | - apiGroups: 37 | - "" 38 | resources: 39 | - nodes 40 | - services 41 | - endpoints 42 | - pods 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | --- 48 | apiVersion: rbac.authorization.k8s.io/v1 49 | kind: RoleBinding 50 | metadata: 51 | name: prometheus-k8s 52 | namespace: monitoring 53 | roleRef: 54 | apiGroup: rbac.authorization.k8s.io 55 | kind: Role 56 | name: prometheus-k8s 57 | subjects: 58 | - kind: ServiceAccount 59 | name: prometheus-k8s 60 | namespace: monitoring 61 | --- 62 | apiVersion: rbac.authorization.k8s.io/v1 63 | kind: Role 64 | metadata: 65 | name: prometheus-k8s 66 | namespace: kube-system 67 | rules: 68 | - apiGroups: 69 | - "" 70 | resources: 71 | - nodes 72 | - services 73 | - endpoints 74 | - pods 75 | verbs: 76 | - get 77 | - list 78 | - watch 79 | --- 80 | apiVersion: rbac.authorization.k8s.io/v1 81 | kind: RoleBinding 82 | metadata: 83 | name: prometheus-k8s 84 | namespace: kube-system 85 | roleRef: 86 | apiGroup: rbac.authorization.k8s.io 87 | kind: Role 88 | name: prometheus-k8s 89 | subjects: 90 | - kind: ServiceAccount 91 | name: prometheus-k8s 92 | namespace: monitoring 93 | --- 94 | apiVersion: rbac.authorization.k8s.io/v1 95 | kind: Role 96 | metadata: 97 | name: prometheus-k8s 98 | namespace: default 99 | rules: 100 | - apiGroups: 101 | - "" 102 | resources: 103 | - nodes 104 | - services 105 | - endpoints 106 | - pods 107 | verbs: 108 | - get 109 | - list 110 | - watch 111 | --- 112 | apiVersion: rbac.authorization.k8s.io/v1 113 | kind: RoleBinding 114 | metadata: 115 | name: prometheus-k8s 116 | namespace: default 117 | roleRef: 118 | apiGroup: rbac.authorization.k8s.io 119 | kind: Role 120 | name: prometheus-k8s 121 | subjects: 122 | - kind: ServiceAccount 123 | name: prometheus-k8s 124 | namespace: monitoring 125 | --- 126 | apiVersion: rbac.authorization.k8s.io/v1 127 | kind: Role 128 | metadata: 129 | name: prometheus-k8s-config 130 | namespace: monitoring 131 | rules: 132 | - apiGroups: 133 | - "" 134 | resources: 135 | - configmaps 136 | verbs: 137 | - get 138 | --- 139 | apiVersion: rbac.authorization.k8s.io/v1 140 | kind: RoleBinding 141 | metadata: 142 | name: prometheus-k8s-config 143 | namespace: monitoring 144 | roleRef: 145 | apiGroup: rbac.authorization.k8s.io 146 | kind: Role 147 | name: prometheus-k8s-config 148 | subjects: 149 | - kind: ServiceAccount 150 | name: prometheus-k8s 151 | namespace: monitoring 152 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/prometheus/prometheus-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/prometheus/prometheus-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: prometheus-k8s 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9090 12 | targetPort: web 13 | selector: 14 | app: prometheus 15 | prometheus: k8s 16 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/service-discovery/kube-controller-manager-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: kube-controller-manager 6 | name: kube-controller-manager 7 | namespace: kube-system 8 | spec: 9 | type: ClusterIP 10 | clusterIP: None 11 | ports: 12 | - name: http-metrics 13 | port: 10252 14 | protocol: TCP 15 | targetPort: 10252 16 | selector: 17 | component: kube-controller-manager 18 | tier: control-plane 19 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/service-discovery/kube-scheduler-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler 7 | namespace: kube-system 8 | spec: 9 | type: ClusterIP 10 | clusterIP: None 11 | ports: 12 | - name: http-metrics 13 | port: 10251 14 | protocol: TCP 15 | targetPort: 10251 16 | selector: 17 | component: kube-scheduler 18 | tier: control-plane 19 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/alertmanager-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: alertmanager 6 | name: alertmanager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | alertmanager: main 15 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/coredns-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-dns 6 | name: coredns 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 15s 12 | port: http-metrics 13 | jobLabel: k8s-app 14 | namespaceSelector: 15 | matchNames: 16 | - kube-system 17 | selector: 18 | matchLabels: 19 | k8s-app: kube-dns 20 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/kube-apiserver-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: apiserver 6 | name: kube-apiserver 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https 13 | scheme: https 14 | tlsConfig: 15 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 16 | serverName: kubernetes.default 17 | jobLabel: component 18 | namespaceSelector: 19 | matchNames: 20 | - default 21 | selector: 22 | matchLabels: 23 | component: apiserver 24 | provider: kubernetes 25 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/kube-controller-manager-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-controller-manager 6 | name: kube-controller-manager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: http-metrics 12 | jobLabel: component 13 | namespaceSelector: 14 | matchNames: 15 | - kube-system 16 | selector: 17 | matchLabels: 18 | k8s-app: kube-controller-manager 19 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/kube-scheduler-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: http-metrics 12 | jobLabel: component 13 | namespaceSelector: 14 | matchNames: 15 | - kube-system 16 | selector: 17 | matchLabels: 18 | k8s-app: kube-scheduler 19 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/kubelet-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kubelet 6 | name: kubelet 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | honorLabels: true 12 | interval: 30s 13 | port: https-metrics 14 | scheme: https 15 | tlsConfig: 16 | insecureSkipVerify: true 17 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 18 | honorLabels: true 19 | interval: 30s 20 | path: /metrics/cadvisor 21 | port: https-metrics 22 | scheme: https 23 | tlsConfig: 24 | insecureSkipVerify: true 25 | jobLabel: k8s-app 26 | namespaceSelector: 27 | matchNames: 28 | - kube-system 29 | selector: 30 | matchLabels: 31 | k8s-app: kubelet 32 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/kubestate-metrics-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | honorLabels: true 12 | interval: 30s 13 | port: https-main 14 | scheme: https 15 | tlsConfig: 16 | insecureSkipVerify: true 17 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 18 | interval: 30s 19 | port: https-self 20 | scheme: https 21 | tlsConfig: 22 | insecureSkipVerify: true 23 | jobLabel: k8s-app 24 | selector: 25 | matchLabels: 26 | k8s-app: kube-state-metrics 27 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/node-exporter-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https 13 | scheme: https 14 | tlsConfig: 15 | insecureSkipVerify: true 16 | jobLabel: k8s-app 17 | selector: 18 | matchLabels: 19 | k8s-app: node-exporter 20 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/prometheus-operator-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: prometheus-operator 6 | name: prometheus-operator 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - port: http 11 | selector: 12 | matchLabels: 13 | k8s-app: prometheus-operator 14 | -------------------------------------------------------------------------------- /roles/k8s-addon/templates/monitoring/servicemonitor/prometheus-sm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: prometheus 6 | name: prometheus 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | prometheus: k8s 15 | -------------------------------------------------------------------------------- /roles/k8s-cni/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy Kubernetes CNI files 4 | template: 5 | src: "{{ container_network }}.yml.j2" 6 | dest: "{{ cache_dir }}/{{ container_network }}.yml" 7 | delegate_to: "{{ groups['masters'][0] }}" 8 | run_once: true 9 | register: copy_cni 10 | 11 | - name: Apply Kubernetes CNI 12 | when: copy_cni 13 | command: | 14 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 15 | apply -f {{ cache_dir }}/{{ container_network }}.yml 16 | delegate_to: "{{ groups['masters'][0] }}" 17 | run_once: true 18 | register: apply_cni 19 | until: apply_cni.rc == 0 20 | retries: 10 21 | delay: 2 22 | -------------------------------------------------------------------------------- /roles/k8s-cni/templates/flannel.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: flannel 5 | namespace: kube-system 6 | --- 7 | kind: ClusterRole 8 | apiVersion: rbac.authorization.k8s.io/v1beta1 9 | metadata: 10 | name: flannel 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - pods 16 | verbs: 17 | - get 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - nodes 22 | verbs: 23 | - list 24 | - watch 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - nodes/status 29 | verbs: 30 | - patch 31 | --- 32 | kind: ClusterRoleBinding 33 | apiVersion: rbac.authorization.k8s.io/v1beta1 34 | metadata: 35 | name: flannel 36 | roleRef: 37 | apiGroup: rbac.authorization.k8s.io 38 | kind: ClusterRole 39 | name: flannel 40 | subjects: 41 | - kind: ServiceAccount 42 | name: flannel 43 | namespace: kube-system 44 | --- 45 | kind: ConfigMap 46 | apiVersion: v1 47 | metadata: 48 | name: kube-flannel-cfg 49 | namespace: kube-system 50 | labels: 51 | tier: node 52 | app: flannel 53 | data: 54 | cni-conf.json: | 55 | { 56 | "name": "cbr0", 57 | "plugins": [ 58 | { 59 | "type": "flannel", 60 | "delegate": { 61 | "hairpinMode": true, 62 | "isDefaultGateway": true 63 | } 64 | }, 65 | { 66 | "type": "portmap", 67 | "capabilities": { 68 | "portMappings": true 69 | } 70 | } 71 | ] 72 | } 73 | net-conf.json: | 74 | { 75 | "Network": "{{ pod_network_cidr }}", 76 | "Backend": { 77 | "Type": "vxlan" 78 | } 79 | } 80 | --- 81 | apiVersion: extensions/v1beta1 82 | kind: DaemonSet 83 | metadata: 84 | name: kube-flannel-ds 85 | namespace: kube-system 86 | labels: 87 | tier: node 88 | app: flannel 89 | spec: 90 | template: 91 | metadata: 92 | labels: 93 | tier: node 94 | app: flannel 95 | spec: 96 | hostNetwork: true 97 | tolerations: 98 | - key: node-role.kubernetes.io/master 99 | operator: Exists 100 | effect: NoSchedule 101 | serviceAccountName: flannel 102 | initContainers: 103 | - name: install-cni 104 | image: quay.io/coreos/flannel:v0.10.0-amd64 105 | command: 106 | - cp 107 | args: 108 | - -f 109 | - /etc/kube-flannel/cni-conf.json 110 | - /etc/cni/net.d/10-flannel.conflist 111 | volumeMounts: 112 | - name: cni 113 | mountPath: /etc/cni/net.d 114 | - name: flannel-cfg 115 | mountPath: /etc/kube-flannel/ 116 | containers: 117 | - name: kube-flannel 118 | image: quay.io/coreos/flannel:v0.10.0-amd64 119 | command: 120 | - /opt/bin/flanneld 121 | args: 122 | - --ip-masq 123 | {% if cni_iface != '' -%} 124 | - --iface={{ cni_iface }} 125 | {% endif -%} 126 | - --kube-subnet-mgr 127 | resources: 128 | requests: 129 | cpu: "100m" 130 | memory: "50Mi" 131 | limits: 132 | cpu: "100m" 133 | memory: "50Mi" 134 | securityContext: 135 | privileged: true 136 | env: 137 | - name: POD_NAME 138 | valueFrom: 139 | fieldRef: 140 | fieldPath: metadata.name 141 | - name: POD_NAMESPACE 142 | valueFrom: 143 | fieldRef: 144 | fieldPath: metadata.namespace 145 | volumeMounts: 146 | - name: run 147 | mountPath: /run 148 | - name: flannel-cfg 149 | mountPath: /etc/kube-flannel/ 150 | volumes: 151 | - name: run 152 | hostPath: 153 | path: /run 154 | - name: cni 155 | hostPath: 156 | path: /etc/cni/net.d 157 | - name: flannel-cfg 158 | configMap: 159 | name: kube-flannel-cfg 160 | -------------------------------------------------------------------------------- /roles/k8s-kubeconfig/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | config: 4 | cluster_name: kubernetes 5 | contexts: 6 | - name: kubernetes-admin 7 | context: kubernetes-admin@kubernetes 8 | ca: "{{ ca }}" 9 | cert: "{{ admin }}" 10 | cert_key: "{{ admin_key }}" 11 | path: "{{ admin_kubeconfig }}" 12 | - name: system:kube-controller-manager 13 | context: system:kube-controller-manager@kubernetes 14 | ca: "{{ ca }}" 15 | cert: "{{ controller_manager }}" 16 | cert_key: "{{ controller_manager_key }}" 17 | path: "{{ controller_manager_kubeconfig }}" 18 | - name: system:kube-scheduler 19 | context: system:kube-scheduler@kubernetes 20 | ca: "{{ ca }}" 21 | cert: "{{ scheduler }}" 22 | cert_key: "{{ scheduler_key }}" 23 | path: "{{ scheduler_kubeconfig }}" 24 | - name: "system:node:{{ ansible_hostname }}" 25 | context: "system:node:{{ ansible_hostname }}@kubernetes" 26 | ca: "{{ ca }}" 27 | cert: "{{ kubelet }}" 28 | cert_key: "{{ kubelet_key }}" 29 | path: "{{ kubelet_kubeconfig }}" 30 | - name: kubelet-bootstrap 31 | context: default 32 | ca: "{{ ca }}" 33 | token: "{{ bootstrap_token }}" 34 | path: "{{ bootstrap_kubeconfig }}" 35 | copy_keys: 36 | node: 37 | - "{{ ca }}" 38 | - "{{ ca_key }}" 39 | master: 40 | - "{{ admin }}" 41 | - "{{ admin_key }}" 42 | - "{{ apiserver }}" 43 | - "{{ apiserver_key }}" 44 | - "{{ scheduler }}" 45 | - "{{ scheduler_key }}" 46 | - "{{ controller_manager }}" 47 | - "{{ controller_manager_key }}" 48 | - "{{ front_ca }}" 49 | - "{{ front_ca_key }}" 50 | - "{{ front_client }}" 51 | - "{{ front_client_key }}" 52 | - "{{ sa_private_key }}" 53 | - "{{ sa_public_key }}" 54 | -------------------------------------------------------------------------------- /roles/k8s-kubeconfig/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: download/package, when: node_role == 'master', pkg: "{{ package.kubectl }}" } 5 | -------------------------------------------------------------------------------- /roles/k8s-kubeconfig/tasks/create-configs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check Kubernetes config already exists 4 | stat: 5 | path: "{{ item.path }}" 6 | with_items: "{{ config.contexts }}" 7 | register: check_config_files 8 | 9 | - name: Set Kubernetes cluster into config files 10 | when: not item.stat.exists 11 | command: | 12 | {{ bin_dir }}/kubectl config set-cluster {{ config.cluster_name }} \ 13 | --certificate-authority={{ item.item.ca }} \ 14 | --embed-certs=true \ 15 | --server={{ lb_api_url }} \ 16 | --kubeconfig={{ item.item.path }} 17 | with_items: "{{ check_config_files['results'] }}" 18 | register: set_cluster_config 19 | 20 | - name: Set Kubernetes credentials into config files 21 | when: set_cluster_config and not item.stat.exists 22 | command: | 23 | {{ bin_dir }}/kubectl config set-credentials {{ item.item.name }} \ 24 | {% if item.item.token is defined -%} 25 | --token={{ item.item.token }} \ 26 | {% else %} 27 | --client-certificate={{ item.item.cert }} \ 28 | --client-key={{ item.item.cert_key }} \ 29 | --embed-certs=true \ 30 | {% endif -%} 31 | --kubeconfig={{ item.item.path }} 32 | with_items: "{{ check_config_files['results'] }}" 33 | register: set_credentials_config 34 | 35 | - name: Set Kubernetes context into config files 36 | when: set_credentials_config and not item.stat.exists 37 | command: | 38 | {{ bin_dir }}/kubectl config set-context {{ item.item.context }} \ 39 | --cluster={{ config.cluster_name }} \ 40 | --user={{ item.item.name }} \ 41 | --kubeconfig={{ item.item.path }} 42 | with_items: "{{ check_config_files['results'] }}" 43 | register: set_context_config 44 | 45 | - name: Use Kubernetes context config files 46 | when: set_context_config and not item.stat.exists 47 | command: | 48 | {{ bin_dir }}/kubectl config use-context {{ item.item.context }} \ 49 | --kubeconfig={{ item.item.path }} 50 | with_items: "{{ check_config_files['results'] }}" 51 | register: use_context_config 52 | -------------------------------------------------------------------------------- /roles/k8s-kubeconfig/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure Kubernetes PKI directory exists 4 | file: path="{{ pki_dir }}" state=directory 5 | 6 | - name: Copy certificates and keys 7 | vars: 8 | files: "{{ copy_keys['' + node_role] }}" 9 | import_role: 10 | name: common/copy-files 11 | 12 | - when: node_role == 'master' 13 | include_tasks: create-configs.yml 14 | 15 | - name: Copy bootstrap kubeconfig to nodes 16 | when: node_role == 'node' 17 | vars: 18 | files: 19 | - "{{ bootstrap_kubeconfig }}" 20 | import_role: 21 | name: common/copy-files 22 | -------------------------------------------------------------------------------- /roles/k8s-setup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | log_level: 0 4 | 5 | # feature-gates 6 | feature_gates: "PodPriority=true" 7 | kubelet_feature_gates: 8 | PodPriority: true 9 | DevicePlugins: true 10 | 11 | # apiserver variables 12 | apiserver_bind_address: 0.0.0.0 13 | apiserver_secure_port: 6443 14 | apiserver_insecure_port: 0 15 | apiserver_allowed_names: front-proxy-client 16 | apiserver_disable_admission: "PersistentVolumeLabel" 17 | apiserver_enable_admission: "NodeRestriction" 18 | apiserver_authorization_mode: "Node,RBAC" 19 | 20 | # kubelet variables 21 | kubelet_bind_address: 0.0.0.0 22 | kubelet_bind_healthz_address: 127.0.0.1 23 | kubelet_bind_port: 10250 24 | kubelet_bind_read_port: 10255 25 | kubelet_bind_healthz_port: 10248 26 | 27 | # keepalived variables 28 | keepalived_unicast_peers: "{% for host in groups['masters'] %}{% if vip_interface != '' %}'{{ hostvars[host]['ansible_' + vip_interface].ipv4.address }}'{% else %}'{{ hostvars[host].ansible_default_ipv4.address }}'{% endif %}{% if not loop.last %},{% endif %}{% endfor %}" 29 | keepalived_password: "koobernetes" 30 | keepalived_priority: "{% if inventory_hostname == groups['masters'][0] %}100{% else %}150{% endif %}" 31 | keepalived_router_id: 51 32 | 33 | # haproxy variables 34 | haproxy_stats_bind_address: 9090 35 | haproxy_stats_uri: "/haproxy_stats" 36 | haproxy_stats_user: "admin" 37 | haproxy_stats_password: "admin123" 38 | 39 | # kube-dns variables 40 | cluster_domain_name: cluster.local 41 | cluster_dns_ip: "{{ cluster_subnet }}.10" 42 | 43 | # kube-proxy variables 44 | kubeproxy_bind_address: 0.0.0.0 45 | kubeproxy_healthz_bind_address: 0.0.0.0:10256 46 | kubeproxy_metrics_bind_address: 127.0.0.1:10249 47 | 48 | runtime_sockets: 49 | containerd: /run/containerd/containerd.sock 50 | -------------------------------------------------------------------------------- /roles/k8s-setup/files/apiserver-to-kubelet-rbac.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: 5 | rbac.authorization.kubernetes.io/autoupdate: "true" 6 | labels: 7 | kubernetes.io/bootstrapping: rbac-defaults 8 | name: system:kube-apiserver-to-kubelet 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - nodes/proxy 14 | - nodes/stats 15 | - nodes/log 16 | - nodes/spec 17 | - nodes/metrics 18 | verbs: 19 | - "*" 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: ClusterRoleBinding 23 | metadata: 24 | name: system:kube-apiserver 25 | namespace: "" 26 | roleRef: 27 | apiGroup: rbac.authorization.k8s.io 28 | kind: ClusterRole 29 | name: system:kube-apiserver-to-kubelet 30 | subjects: 31 | - apiGroup: rbac.authorization.k8s.io 32 | kind: User 33 | name: kube-apiserver 34 | -------------------------------------------------------------------------------- /roles/k8s-setup/files/kubelet-bootstrap-rbac.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kubelet-bootstrap 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:node-bootstrapper 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: Group 12 | name: system:bootstrappers:default-node-token 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | kind: ClusterRoleBinding 16 | metadata: 17 | name: node-autoapprove-bootstrap 18 | roleRef: 19 | apiGroup: rbac.authorization.k8s.io 20 | kind: ClusterRole 21 | name: system:certificates.k8s.io:certificatesigningrequests:nodeclient 22 | subjects: 23 | - apiGroup: rbac.authorization.k8s.io 24 | kind: Group 25 | name: system:bootstrappers:default-node-token 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: ClusterRoleBinding 29 | metadata: 30 | name: node-autoapprove-certificate-rotation 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: ClusterRole 34 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient 35 | subjects: 36 | - apiGroup: rbac.authorization.k8s.io 37 | kind: Group 38 | name: system:nodes 39 | -------------------------------------------------------------------------------- /roles/k8s-setup/files/kubelet-config-rbac.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: kubernetes:kubelet-config 5 | namespace: kube-system 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resourceNames: 10 | - kubelet-config-1.11 11 | resources: 12 | - configmaps 13 | verbs: 14 | - get 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: RoleBinding 18 | metadata: 19 | name: kubernetes:kubelet-config 20 | namespace: kube-system 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: Role 24 | name: kubernetes:kubelet-config 25 | subjects: 26 | - apiGroup: rbac.authorization.k8s.io 27 | kind: Group 28 | name: system:nodes 29 | - apiGroup: rbac.authorization.k8s.io 30 | kind: Group 31 | name: system:bootstrappers:kubernetes:default-node-token 32 | -------------------------------------------------------------------------------- /roles/k8s-setup/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: download/package, pkg: "{{ package.cni }}" } 5 | - { role: download/package, pkg: "{{ package.kubelet }}" } 6 | -------------------------------------------------------------------------------- /roles/k8s-setup/tasks/config-systemd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create kubelet systemd dropin directory 4 | file: path="{{ systemd_dropin_dir }}/kubelet.service.d" state=directory 5 | 6 | - name: Copy kubelet dropin file into directory 7 | template: 8 | src: "10-kubelet.conf.j2" 9 | dest: "{{ systemd_dropin_dir }}/kubelet.service.d/10-kubelet.conf" 10 | 11 | - name: Copy kubelet systemd service file 12 | template: 13 | src: "kubelet.service.j2" 14 | dest: "{{ systemd_service_dir }}/kubelet.service" 15 | owner: root 16 | group: root 17 | mode: 0644 18 | 19 | - name: Disable vm swappiness 20 | shell: "swapoff -a && sysctl -w vm.swappiness=0" 21 | 22 | - name: Enable and restart kubelet engine 23 | systemd: 24 | name: kubelet 25 | daemon_reload: yes 26 | state: restarted 27 | enabled: yes 28 | register: service_started 29 | -------------------------------------------------------------------------------- /roles/k8s-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - when: node_role == 'master' 4 | include_tasks: setup-masters.yml 5 | 6 | - include_tasks: setup-nodes.yml 7 | 8 | - include_tasks: config-systemd.yml 9 | 10 | - name: Wait for Kubernetes core component start 11 | when: node_role == 'master' 12 | wait_for: 13 | host: "127.0.0.1" 14 | port: "{{ item }}" 15 | delay: 1 16 | connect_timeout: 60 17 | timeout: 300 18 | with_items: 19 | - "{{ apiserver_secure_port }}" 20 | - "{{ lb_secure_port }}" 21 | - "10252" 22 | - "10251" 23 | 24 | - when: node_role == 'master' 25 | include_tasks: setup-resources.yml 26 | -------------------------------------------------------------------------------- /roles/k8s-setup/tasks/setup-masters.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure HAProxy config directory exists 4 | file: path={{ haproxy_config_dir }} state=directory 5 | 6 | - name: Ensure audit directory exists 7 | file: path={{ audit_policy_dir }} state=directory 8 | 9 | - name: Ensure audit log directory exists 10 | file: path={{ audit_log_dir }} state=directory 11 | 12 | - name: Ensure encryption directory exists 13 | file: path={{ encryption_config_dir }} state=directory 14 | 15 | - name: Ensure Kubernetes manifests directory exists 16 | file: path={{ manifest_dir }} state=directory 17 | 18 | - name: Copy HAProxy manifest and config files into cluster 19 | when: enable_haproxy 20 | template: src="{{ item.src }}.j2" dest="{{ item.dest }}" 21 | with_items: 22 | - { src: "etc/haproxy.cfg", dest: "{{ haproxy_config }}" } 23 | - { src: "manifests/haproxy.yml", dest: "{{ manifest_dir }}/haproxy.yml" } 24 | 25 | - name: Copy Keepalived manifest and config files into cluster 26 | when: enable_keepalived 27 | template: src="manifests/keepalived.yml.j2" dest="{{ manifest_dir }}/keepalived.yml" 28 | 29 | - name: Copy Kubernetes manifest and config files into cluster 30 | template: src="{{ item.src }}.j2" dest="{{ item.dest }}" 31 | with_items: 32 | - { src: "audit/policy.yml", dest: "{{ audit_policy }}" } 33 | - { src: "encryption/config.yml", dest: "{{ encryption_config }}" } 34 | - { src: "manifests/kube-apiserver.yml", dest: "{{ manifest_dir }}/kube-apiserver.yml" } 35 | - { src: "manifests/kube-scheduler.yml", dest: "{{ manifest_dir }}/kube-scheduler.yml" } 36 | - { src: "manifests/kube-controller-manager.yml", dest: "{{ manifest_dir }}/kube-controller-manager.yml" } 37 | 38 | - name: Copy Kubernetes admin config to home directory 39 | copy: 40 | src: "{{ admin_kubeconfig }}" 41 | dest: "{{ ansible_env.HOME }}/.kube/config" 42 | remote_src: yes 43 | -------------------------------------------------------------------------------- /roles/k8s-setup/tasks/setup-nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure kubelet config directory exists 4 | file: path={{ kubelet_config_dir }} state=directory 5 | 6 | - name: Ensure cni config directory exists 7 | file: path={{ cni_etc_dir }} state=directory 8 | 9 | - name: Copy kubelet config template 10 | template: 11 | src: "kubelet-config.yml.j2" 12 | dest: "{{ kubelet_config }}" 13 | -------------------------------------------------------------------------------- /roles/k8s-setup/tasks/setup-resources.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy kube-apiserver to kubelet rbac yaml 4 | copy: src="{{ item }}" dest="{{ cache_dir }}/{{ item }}" 5 | with_items: 6 | - apiserver-to-kubelet-rbac.yml 7 | - kubelet-bootstrap-rbac.yml 8 | delegate_to: "{{ groups['masters'][0] }}" 9 | run_once: true 10 | register: copy_rbac_file 11 | 12 | - name: Copy TLS bootstrap secret template 13 | template: src=kubelet-bootstrap-secret.yml.j2 dest="{{ cache_dir }}/kubelet-bootstrap-secret.yml" 14 | delegate_to: "{{ groups['masters'][0] }}" 15 | run_once: true 16 | register: copy_sercet_file 17 | 18 | - name: Create kube-apiserver to kubelet RBAC 19 | when: copy_rbac_file and service_started 20 | command: | 21 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 22 | apply -f {{ cache_dir }}/apiserver-to-kubelet-rbac.yml 23 | delegate_to: "{{ groups['masters'][0] }}" 24 | run_once: true 25 | register: create_result 26 | until: create_result.rc == 0 27 | retries: 10 28 | delay: 2 29 | ignore_errors: true 30 | 31 | - name: Create TLS bootstrap secret 32 | when: copy_sercet_file and service_started 33 | command: | 34 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 35 | apply -f {{ cache_dir }}/kubelet-bootstrap-secret.yml 36 | delegate_to: "{{ groups['masters'][0] }}" 37 | run_once: true 38 | register: create_result 39 | until: create_result.rc == 0 40 | retries: 10 41 | delay: 2 42 | ignore_errors: true 43 | 44 | - name: Create TLS bootstrap RBAC 45 | when: copy_rbac_file and service_started 46 | command: | 47 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 48 | apply -f {{ cache_dir }}/kubelet-bootstrap-rbac.yml 49 | delegate_to: "{{ groups['masters'][0] }}" 50 | run_once: true 51 | register: create_result 52 | until: create_result.rc == 0 53 | retries: 10 54 | delay: 2 55 | ignore_errors: true 56 | 57 | - name: Set taint to effect NoSchedule 58 | command: | 59 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 60 | taint nodes {{ ansible_hostname }} node-role.kubernetes.io/master="":NoSchedule --overwrite 61 | register: set_taint 62 | until: set_taint.rc == 0 63 | retries: 10 64 | delay: 2 65 | ignore_errors: true 66 | 67 | - name: Create nvidia device plugin daemonset 68 | when: container_runtime == 'nvidia-docker' and service_started 69 | command: | 70 | {{ bin_dir }}/kubectl --kubeconfig={{ admin_kubeconfig }} \ 71 | apply -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v1.{{ kube_version.split(".")[1] }}/nvidia-device-plugin.yml 72 | delegate_to: "{{ groups['masters'][0] }}" 73 | run_once: true 74 | register: create_result 75 | until: create_result.rc == 0 76 | retries: 10 77 | delay: 2 78 | ignore_errors: true 79 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/10-kubelet.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig={{ bootstrap_kubeconfig }} --kubeconfig={{ kubelet_kubeconfig }}" 3 | Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir={{ cni_etc_dir }} --cni-bin-dir={{ cni_bin_dir }} --allow-privileged=true" 4 | Environment="KUBELET_CONFIG_ARGS=--config={{ kubelet_config }}" 5 | {% if node_role == 'master' %} 6 | Environment="KUBELET_EXTRA_ARGS=--node-labels=node-role.kubernetes.io/master=''" 7 | {% endif %} 8 | {% if container_runtime != 'docker' and container_runtime != 'nvidia-docker' %} 9 | Environment="CRI_RUNTIME_ARGS=--container-runtime=remote --container-runtime-endpoint=unix://{{ runtime_sockets['' + container_runtime] }}" 10 | {% endif %} 11 | ExecStart= 12 | ExecStart={{ bin_dir }}/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $CRI_RUNTIME_ARGS 13 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/audit/policy.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: audit.k8s.io/v1beta1 2 | kind: Policy 3 | rules: 4 | - level: Metadata 5 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/encryption/config.yml.j2: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | - name: key1 10 | secret: "{{ encryption_token }}" 11 | - identity: {} 12 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/etc/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | global 2 | log 127.0.0.1 local0 3 | log 127.0.0.1 local1 notice 4 | tune.ssl.default-dh-param 2048 5 | 6 | defaults 7 | log global 8 | mode http 9 | option dontlognull 10 | timeout connect 5000ms 11 | timeout client 1800000ms 12 | timeout server 1800000ms 13 | 14 | listen stats 15 | bind :{{ haproxy_stats_bind_address }} 16 | mode http 17 | balance 18 | stats uri {{ haproxy_stats_uri }} 19 | stats auth {{ haproxy_stats_user }}:{{ haproxy_stats_password }} 20 | stats admin if TRUE 21 | 22 | frontend kube-apiserver-https 23 | mode tcp 24 | bind :{{ lb_secure_port }} 25 | default_backend kube-apiserver-backend 26 | 27 | backend kube-apiserver-backend 28 | mode tcp 29 | {% for host in groups['masters'] %} 30 | server kube-apiserver{{ loop.index }} {{ host }}:{{ apiserver_secure_port }} check 31 | {% if not loop.last -%}{%- endif -%} 32 | {% endfor %} 33 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/kubelet-bootstrap-secret.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: bootstrap-token-{{ bootstrap_token_id }} 5 | namespace: kube-system 6 | type: bootstrap.kubernetes.io/token 7 | stringData: 8 | token-id: "{{ bootstrap_token_id }}" 9 | token-secret: "{{ bootstrap_token_secret }}" 10 | usage-bootstrap-authentication: "true" 11 | usage-bootstrap-signing: "true" 12 | auth-extra-groups: system:bootstrappers:default-node-token 13 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/kubelet-config-cm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | kubelet: | 4 | {{ kubelet_config_content }} 5 | kind: ConfigMap 6 | metadata: 7 | name: kubelet-config 8 | namespace: kube-system 9 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/kubelet-config.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kubelet.config.k8s.io/v1beta1 2 | kind: KubeletConfiguration 3 | address: {{ kubelet_bind_address }} 4 | port: {{ kubelet_bind_port }} 5 | readOnlyPort: {{ kubelet_bind_read_port }} 6 | healthzBindAddress: {{ kubelet_bind_healthz_address }} 7 | healthzPort: {{ kubelet_bind_healthz_port }} 8 | staticPodPath: {{ manifest_dir }} 9 | clusterDNS: 10 | - {{ cluster_dns_ip }} 11 | clusterDomain: {{ cluster_domain_name }} 12 | {% if kubelet_feature_gates -%} 13 | featureGates: 14 | {% for k,v in kubelet_feature_gates.iteritems() %} 15 | {{ k }}: {{ v | lower }} 16 | {% endfor -%} 17 | {% endif -%} 18 | authentication: 19 | anonymous: 20 | enabled: false 21 | webhook: 22 | cacheTTL: 2m0s 23 | enabled: true 24 | x509: 25 | clientCAFile: {{ ca }} 26 | authorization: 27 | mode: Webhook 28 | webhook: 29 | cacheAuthorizedTTL: 5m0s 30 | cacheUnauthorizedTTL: 30s 31 | failSwapOn: true 32 | cgroupDriver: cgroupfs 33 | cgroupsPerQOS: true 34 | containerLogMaxFiles: 5 35 | containerLogMaxSize: 10Mi 36 | contentType: application/vnd.kubernetes.protobuf 37 | cpuCFSQuota: true 38 | cpuManagerPolicy: none 39 | cpuManagerReconcilePeriod: 10s 40 | enableControllerAttachDetach: true 41 | enableDebuggingHandlers: true 42 | enforceNodeAllocatable: 43 | - pods 44 | eventBurst: 10 45 | eventRecordQPS: 5 46 | evictionHard: 47 | imagefs.available: 15% 48 | memory.available: 100Mi 49 | nodefs.available: 10% 50 | nodefs.inodesFree: 5% 51 | evictionPressureTransitionPeriod: 5m0s 52 | fileCheckFrequency: 20s 53 | hairpinMode: promiscuous-bridge 54 | httpCheckFrequency: 20s 55 | imageGCHighThresholdPercent: 85 56 | imageGCLowThresholdPercent: 80 57 | imageMinimumGCAge: 2m0s 58 | iptablesDropBit: 15 59 | iptablesMasqueradeBit: 14 60 | kubeAPIBurst: 10 61 | kubeAPIQPS: 5 62 | makeIPTablesUtilChains: true 63 | maxOpenFiles: 1000000 64 | maxPods: 110 65 | nodeStatusUpdateFrequency: 10s 66 | oomScoreAdj: -999 67 | podPidsLimit: -1 68 | registryBurst: 10 69 | registryPullQPS: 5 70 | resolvConf: /etc/resolv.conf 71 | rotateCertificates: true 72 | runtimeRequestTimeout: 2m0s 73 | serializeImagePulls: true 74 | streamingConnectionIdleTimeout: 4h0m0s 75 | syncFrequency: 1m0s 76 | volumeStatsAggPeriod: 1m0s 77 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/kubelet.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=The Kubernetes Node Agent 3 | Documentation=http://kubernetes.io/docs/ 4 | 5 | [Service] 6 | ExecStart={{ bin_dir }}/kubelet 7 | Restart=on-failure 8 | StartLimitBurst=3 9 | StartLimitInterval=60s 10 | RestartSec=10 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/manifests/haproxy.yml.j2: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | annotations: 5 | scheduler.alpha.kubernetes.io/critical-pod: "" 6 | labels: 7 | component: haproxy 8 | tier: control-plane 9 | name: kube-haproxy 10 | namespace: kube-system 11 | spec: 12 | hostNetwork: true 13 | priorityClassName: system-cluster-critical 14 | containers: 15 | - name: kube-haproxy 16 | image: docker.io/haproxy:1.7-alpine 17 | resources: 18 | requests: 19 | cpu: 100m 20 | volumeMounts: 21 | - name: haproxy-cfg 22 | readOnly: true 23 | mountPath: /usr/local/etc/haproxy/haproxy.cfg 24 | volumes: 25 | - hostPath: 26 | path: {{ haproxy_config }} 27 | type: File 28 | name: haproxy-cfg 29 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/manifests/keepalived.yml.j2: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | annotations: 5 | scheduler.alpha.kubernetes.io/critical-pod: "" 6 | labels: 7 | component: keepalived 8 | tier: control-plane 9 | name: kube-keepalived 10 | namespace: kube-system 11 | spec: 12 | hostNetwork: true 13 | priorityClassName: system-cluster-critical 14 | containers: 15 | - name: kube-keepalived 16 | image: docker.io/osixia/keepalived:1.4.5 17 | env: 18 | - name: KEEPALIVED_VIRTUAL_IPS 19 | value: "{{ vip_address }}" 20 | - name: KEEPALIVED_INTERFACE 21 | value: "{% if vip_interface != '' %}{{ vip_interface }}{% else %}{{ ansible_default_ipv4.interface }}{% endif %}" 22 | - name: KEEPALIVED_UNICAST_PEERS 23 | value: "#PYTHON2BASH:[{{ keepalived_unicast_peers }}]" 24 | - name: KEEPALIVED_PASSWORD 25 | value: {{ keepalived_password }} 26 | - name: KEEPALIVED_PRIORITY 27 | value: "{{ keepalived_priority }}" 28 | - name: KEEPALIVED_ROUTER_ID 29 | value: "{{ keepalived_router_id }}" 30 | resources: 31 | requests: 32 | cpu: 100m 33 | securityContext: 34 | privileged: true 35 | capabilities: 36 | add: 37 | - NET_ADMIN 38 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/manifests/kube-apiserver.yml.j2: -------------------------------------------------------------------------------- 1 | {% macro etcd_initial_cluster() -%} 2 | {% for host in groups['etcds'] -%} 3 | https:// 4 | {%- if etcd_iface != "" -%} 5 | {{ hostvars[host]['ansible_' + etcd_iface].ipv4.address }} 6 | {%- else -%} 7 | {{ hostvars[host].ansible_default_ipv4.address }} 8 | {%- endif -%} 9 | :2379 10 | {%- if not loop.last -%},{%- endif -%} 11 | {%- endfor -%} 12 | {% endmacro -%} 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | annotations: 17 | scheduler.alpha.kubernetes.io/critical-pod: "" 18 | labels: 19 | component: kube-apiserver 20 | tier: control-plane 21 | name: kube-apiserver 22 | namespace: kube-system 23 | spec: 24 | hostNetwork: true 25 | priorityClassName: system-cluster-critical 26 | containers : 27 | - name: kube-apiserver 28 | image: k8s.gcr.io/kube-apiserver-amd64:v{{ kube_version }} 29 | command: 30 | - kube-apiserver 31 | - --v={{ log_level }} 32 | - --logtostderr=true 33 | - --allow-privileged=true 34 | - --bind-address={{ apiserver_bind_address }} 35 | - --secure-port={{ apiserver_secure_port }} 36 | - --insecure-port={{ apiserver_insecure_port }} 37 | - --advertise-address={{ vip_address }} 38 | - --service-cluster-ip-range={{ service_ip_range }} 39 | - --service-node-port-range={{ service_node_port_range }} 40 | - --etcd-servers={{ etcd_initial_cluster() }} 41 | - --etcd-cafile={{ etcd_ca }} 42 | - --etcd-certfile={{ etcd_cert }} 43 | - --etcd-keyfile={{ etcd_cert_key }} 44 | - --client-ca-file={{ ca }} 45 | - --tls-cert-file={{ apiserver }} 46 | - --tls-private-key-file={{ apiserver_key }} 47 | - --kubelet-client-certificate={{ apiserver }} 48 | - --kubelet-client-key={{ apiserver_key }} 49 | - --service-account-key-file={{ sa_public_key }} 50 | - --requestheader-client-ca-file={{ front_ca }} 51 | - --proxy-client-cert-file={{ front_client }} 52 | - --proxy-client-key-file={{ front_client_key }} 53 | - --requestheader-allowed-names={{ apiserver_allowed_names }} 54 | - --requestheader-group-headers=X-Remote-Group 55 | - --requestheader-extra-headers-prefix=X-Remote-Extra- 56 | - --requestheader-username-headers=X-Remote-User 57 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 58 | - --disable-admission-plugins={{ apiserver_disable_admission }} 59 | - --enable-admission-plugins={{ apiserver_enable_admission }} 60 | - --authorization-mode={{ apiserver_authorization_mode }} 61 | - --enable-bootstrap-token-auth=true 62 | - --audit-log-maxage=30 63 | - --audit-log-maxbackup=3 64 | - --audit-log-maxsize=128 65 | - --audit-log-path={{ audit_log }} 66 | - --audit-policy-file={{ audit_policy }} 67 | - --experimental-encryption-provider-config={{ encryption_config }} 68 | {% if feature_gates != '' -%} 69 | - --feature-gates={{ feature_gates }} 70 | {% endif -%} 71 | - --event-ttl=1h 72 | livenessProbe: 73 | failureThreshold: 8 74 | httpGet: 75 | host: 127.0.0.1 76 | path: /healthz 77 | port: {{ apiserver_secure_port }} 78 | scheme: HTTPS 79 | initialDelaySeconds: 15 80 | timeoutSeconds: 15 81 | resources: 82 | requests: 83 | cpu: 250m 84 | volumeMounts: 85 | - mountPath: {{ audit_log_dir }} 86 | name: k8s-audit-log 87 | - mountPath: {{ pki_dir }} 88 | name: k8s-certs 89 | readOnly: true 90 | - mountPath: /etc/ssl/certs 91 | name: ca-certs 92 | readOnly: true 93 | - mountPath: {{ encryption_config }} 94 | name: encryption-config 95 | readOnly: true 96 | - mountPath: {{ audit_policy }} 97 | name: audit-policy 98 | readOnly: true 99 | - mountPath: {{ etcd_pki_dir }} 100 | name: etcd-ca-certs 101 | readOnly: true 102 | volumes: 103 | - hostPath: 104 | path: {{ audit_log_dir }} 105 | type: Directory 106 | name: k8s-audit-log 107 | - hostPath: 108 | path: {{ pki_dir }} 109 | type: Directory 110 | name: k8s-certs 111 | - hostPath: 112 | path: {{ encryption_config }} 113 | type: File 114 | name: encryption-config 115 | - hostPath: 116 | path: {{ audit_policy }} 117 | type: File 118 | name: audit-policy 119 | - hostPath: 120 | path: {{ etcd_pki_dir }} 121 | type: Directory 122 | name: etcd-ca-certs 123 | - hostPath: 124 | path: /etc/ssl/certs 125 | type: Directory 126 | name: ca-certs 127 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/manifests/kube-controller-manager.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | scheduler.alpha.kubernetes.io/critical-pod: "" 6 | labels: 7 | component: kube-controller-manager 8 | tier: control-plane 9 | name: kube-controller-manager 10 | namespace: kube-system 11 | spec: 12 | hostNetwork: true 13 | priorityClassName: system-cluster-critical 14 | containers: 15 | - name: kube-controller-manager 16 | image: k8s.gcr.io/kube-controller-manager-amd64:v{{ kube_version }} 17 | command: 18 | - kube-controller-manager 19 | - --v={{ log_level }} 20 | - --logtostderr=true 21 | - --address=127.0.0.1 22 | - --root-ca-file={{ ca }} 23 | - --cluster-signing-cert-file={{ ca }} 24 | - --cluster-signing-key-file={{ ca_key }} 25 | - --service-account-private-key-file={{ sa_private_key }} 26 | - --kubeconfig={{ controller_manager_kubeconfig }} 27 | - --leader-elect=true 28 | - --use-service-account-credentials=true 29 | - --node-monitor-grace-period=40s 30 | - --node-monitor-period=5s 31 | - --pod-eviction-timeout=2m0s 32 | - --controllers=*,bootstrapsigner,tokencleaner 33 | - --allocate-node-cidrs=true 34 | - --cluster-cidr={{ pod_network_cidr }} 35 | {% if feature_gates != '' -%} 36 | - --feature-gates={{ feature_gates }} 37 | {% endif -%} 38 | - --node-cidr-mask-size=24 39 | livenessProbe: 40 | failureThreshold: 8 41 | httpGet: 42 | host: 127.0.0.1 43 | path: /healthz 44 | port: 10252 45 | scheme: HTTP 46 | initialDelaySeconds: 15 47 | timeoutSeconds: 15 48 | resources: 49 | requests: 50 | cpu: 200m 51 | volumeMounts: 52 | - mountPath: {{ pki_dir }} 53 | name: k8s-certs 54 | readOnly: true 55 | - mountPath: /etc/ssl/certs 56 | name: ca-certs 57 | readOnly: true 58 | - mountPath: {{ controller_manager_kubeconfig }} 59 | name: kubeconfig 60 | readOnly: true 61 | - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec 62 | name: flexvolume-dir 63 | volumes: 64 | - hostPath: 65 | path: {{ pki_dir }} 66 | type: DirectoryOrCreate 67 | name: k8s-certs 68 | - hostPath: 69 | path: /etc/ssl/certs 70 | type: DirectoryOrCreate 71 | name: ca-certs 72 | - hostPath: 73 | path: {{ controller_manager_kubeconfig }} 74 | type: File 75 | name: kubeconfig 76 | - hostPath: 77 | path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec 78 | type: DirectoryOrCreate 79 | name: flexvolume-dir 80 | -------------------------------------------------------------------------------- /roles/k8s-setup/templates/manifests/kube-scheduler.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | scheduler.alpha.kubernetes.io/critical-pod: "" 6 | labels: 7 | component: kube-scheduler 8 | tier: control-plane 9 | name: kube-scheduler 10 | namespace: kube-system 11 | spec: 12 | hostNetwork: true 13 | priorityClassName: system-cluster-critical 14 | containers: 15 | - name: kube-scheduler 16 | image: k8s.gcr.io/kube-scheduler-amd64:v{{ kube_version }} 17 | command: 18 | - kube-scheduler 19 | - --v={{ log_level }} 20 | - --logtostderr=true 21 | - --address=127.0.0.1 22 | - --leader-elect=true 23 | {% if feature_gates != '' -%} 24 | - --feature-gates={{ feature_gates }} 25 | {% endif -%} 26 | - --kubeconfig={{ scheduler_kubeconfig }} 27 | livenessProbe: 28 | failureThreshold: 8 29 | httpGet: 30 | host: 127.0.0.1 31 | path: /healthz 32 | port: 10251 33 | scheme: HTTP 34 | initialDelaySeconds: 15 35 | timeoutSeconds: 15 36 | resources: 37 | requests: 38 | cpu: 100m 39 | volumeMounts: 40 | - mountPath: {{ pki_dir }} 41 | name: k8s-certs 42 | readOnly: true 43 | - mountPath: {{ scheduler_kubeconfig }} 44 | name: kubeconfig 45 | readOnly: true 46 | volumes: 47 | - hostPath: 48 | path: {{ pki_dir }} 49 | type: DirectoryOrCreate 50 | name: k8s-certs 51 | - hostPath: 52 | path: {{ scheduler_kubeconfig }} 53 | type: FileOrCreate 54 | name: kubeconfig 55 | --------------------------------------------------------------------------------