├── .gitignore ├── playbooks ├── calico.yaml ├── docker.yaml ├── kubeadm-prep.yaml ├── repos.yaml ├── helm-install.yaml ├── uninstall.yaml ├── elastic-search.yaml ├── nginx-install.yaml ├── aad-authentication.yaml ├── ansible-requirements.yaml ├── kubeadm-init-master.yaml ├── kubeadm-join-workers.yaml ├── os-patch-updates.yaml ├── kubernetes-dashboard.yaml ├── kubeadm-upgrade-master.yaml ├── kubeadm-singlenode-cluster.yaml ├── local-storage-provisioner.yaml ├── kubeadm-upgrade-workers.yaml ├── kubeadm-join-masters.yaml ├── upgrade-all.yaml ├── install-all.yaml ├── install_docker.yaml └── upgrade_worker.yaml ├── roles ├── calico │ ├── README.md │ ├── tasks │ │ └── main.yaml │ └── templates │ │ └── calico.yaml ├── docker │ ├── templates │ │ └── container-registry01.nonprod.domain.local-ca.crt │ └── tasks │ │ └── main.yaml ├── kubernetes-dashboard │ ├── README.md │ ├── tasks │ │ └── main.yaml │ └── templates │ │ └── recommended.yaml ├── kubeadm-singlenode-cluster │ └── tasks │ │ └── main.yaml ├── kubeadm-upgrade-workers │ └── tasks │ │ └── main.yaml ├── os-patch-updates │ └── tasks │ │ ├── main.yml │ │ └── os-patches.yml ├── local-storage-provisioner │ ├── tasks │ │ └── main.yaml │ └── templates │ │ └── local-storage-provisioner.yaml ├── aad-authentication │ ├── tasks │ │ ├── aad_masters.yaml │ │ └── main.yaml │ └── README.md ├── kubeadm-join-workers │ └── tasks │ │ └── main.yaml ├── ansible-requirements │ └── tasks │ │ └── main.yml ├── backup-cronjob │ ├── README.md │ ├── tasks │ │ └── main.yaml │ └── templates │ │ └── etcd-backup-cronjob.yaml ├── kubeadm-upgrade-masters │ └── tasks │ │ ├── upgrade_masters.yaml │ │ └── main.yaml ├── ingress-nginx │ ├── tasks │ │ └── main.yaml │ └── templates │ │ └── ingress-nginx.yaml ├── helm-install │ ├── README.md │ └── tasks │ │ └── main.yml ├── repos │ └── tasks │ │ └── main.yaml ├── uninstall │ └── tasks │ │ └── main.yaml ├── kubeadm-join-masters │ └── tasks │ │ └── main.yaml ├── kubeadm-prep │ └── tasks │ │ └── main.yaml ├── kubeadm-init-master │ └── tasks │ │ └── main.yaml └── elastic-search │ ├── tasks │ └── main.yaml │ └── templates │ ├── kube-state-metrics.yaml │ ├── filebeat-kubernetes.yaml │ └── metricbeat-kubernetes.yaml ├── ansible.cfg ├── inventory ├── cluster1-prod ├── cluster2-nonprod └── group_vars │ ├── cluster1-prod │ └── all │ └── cluster2-nonprod │ └── all └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *.csr 3 | *.key 4 | *secret.yaml 5 | key.pem 6 | -------------------------------------------------------------------------------- /playbooks/calico.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - calico -------------------------------------------------------------------------------- /roles/calico/README.md: -------------------------------------------------------------------------------- 1 | 2 | curl https://docs.projectcalico.org/v3.8/manifests/calico.yaml -O 3 | -------------------------------------------------------------------------------- /playbooks/docker.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - docker 5 | 6 | 7 | -------------------------------------------------------------------------------- /playbooks/kubeadm-prep.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - kubeadm-prep -------------------------------------------------------------------------------- /playbooks/repos.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - repos 5 | 6 | 7 | -------------------------------------------------------------------------------- /playbooks/helm-install.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - helm-install -------------------------------------------------------------------------------- /playbooks/uninstall.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - uninstall 5 | 6 | 7 | -------------------------------------------------------------------------------- /playbooks/elastic-search.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - elastic-search -------------------------------------------------------------------------------- /playbooks/nginx-install.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - ingress-nginx 5 | -------------------------------------------------------------------------------- /playbooks/aad-authentication.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - aad-authentication -------------------------------------------------------------------------------- /playbooks/ansible-requirements.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - ansible-requirements 5 | -------------------------------------------------------------------------------- /playbooks/kubeadm-init-master.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - kubeadm-init-master -------------------------------------------------------------------------------- /playbooks/kubeadm-join-workers.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-workers 2 | become: yes 3 | roles: 4 | - kubeadm-join-workers 5 | -------------------------------------------------------------------------------- /playbooks/os-patch-updates.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters:k8s-workers 2 | become: yes 3 | roles: 4 | - os-patch-updates -------------------------------------------------------------------------------- /playbooks/kubernetes-dashboard.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - kubernetes-dashboard -------------------------------------------------------------------------------- /playbooks/kubeadm-upgrade-master.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - kubeadm-upgrade-masters -------------------------------------------------------------------------------- /playbooks/kubeadm-singlenode-cluster.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - kubeadm-singlenode-cluster -------------------------------------------------------------------------------- /playbooks/local-storage-provisioner.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-primary 2 | become: yes 3 | roles: 4 | - local-storage-provisioner -------------------------------------------------------------------------------- /roles/docker/templates/container-registry01.nonprod.domain.local-ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | XXXXX 3 | -----END CERTIFICATE----- 4 | -------------------------------------------------------------------------------- /playbooks/kubeadm-upgrade-workers.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-workers 2 | become: yes 3 | serial: 1 4 | roles: 5 | - kubeadm-upgrade-workers 6 | -------------------------------------------------------------------------------- /roles/kubernetes-dashboard/README.md: -------------------------------------------------------------------------------- 1 | 2 | curl https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta3/aio/deploy/recommended.yaml -O 3 | 4 | -------------------------------------------------------------------------------- /playbooks/kubeadm-join-masters.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-master-replicas 2 | become: yes 3 | roles: 4 | - kubeadm-join-masters 5 | #- {role: kubeadm-join-master, when: "groups['k8s-masters'][0] != inventory_hostname"} -------------------------------------------------------------------------------- /playbooks/upgrade-all.yaml: -------------------------------------------------------------------------------- 1 | #- import_playbook: execute-backup.yaml 2 | - import_playbook: kubeadm-upgrade-master.yaml 3 | #- import_playbook: calico.yaml 4 | #- import_playbook: kubeadm-upgrade-masterreplicas.yaml 5 | - import_playbook: kubeadm-upgrade-workers.yaml 6 | -------------------------------------------------------------------------------- /roles/kubeadm-singlenode-cluster/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: kubectl taint remove master 2 | shell: kubectl taint node {{ hostvars[item].inventory_hostname_short }} node-role.kubernetes.io/master- 3 | with_items: "{{ groups['k8s-master-primary'][0] }}" 4 | ignore_errors: True -------------------------------------------------------------------------------- /roles/kubeadm-upgrade-workers/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | 2 | - name: Upgrade workers sequentially 3 | include_tasks: "upgrade_worker.yaml" 4 | with_items: "{{ groups['k8s-workers'] }}" 5 | when: "hostvars[host_item].inventory_hostname == inventory_hostname" 6 | loop_control: 7 | loop_var: host_item -------------------------------------------------------------------------------- /roles/os-patch-updates/tasks/main.yml: -------------------------------------------------------------------------------- 1 | 2 | - name: OS patch hosts sequentially 3 | include_tasks: "os-patches.yml" 4 | with_items: 5 | - "{{ groups['k8s-masters'] }}" 6 | - "{{ groups['k8s-workers'] }}" 7 | when: "hostvars[host_item].inventory_hostname == inventory_hostname" 8 | loop_control: 9 | loop_var: host_item 10 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | forks = 12 4 | inventory = inventory 5 | roles_path = roles 6 | display_skipped_hosts = false 7 | any_errors_fatal = true 8 | 9 | #[ssh_connection] 10 | #pipelining = True 11 | #control_path = /tmp/ansible-ssh-%%h-%%p-%%r 12 | #ssh_args = -C -o ControlMaster=auto -o ControlPersist=1800 13 | -------------------------------------------------------------------------------- /roles/local-storage-provisioner/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | 2 | - name: copy local-storage-provisioner manifests 3 | template: 4 | src: local-storage-provisioner.yaml 5 | dest: /tmp/local-storage-provisioner.yaml 6 | force: yes 7 | 8 | - name: Create Local Storage Provisioner Deployments 9 | shell: kubectl apply -f /tmp/local-storage-provisioner.yaml -------------------------------------------------------------------------------- /roles/aad-authentication/tasks/aad_masters.yaml: -------------------------------------------------------------------------------- 1 | - name: Add Azure OIDC authentication to Kubernetes 2 | blockinfile: 3 | path: /etc/kubernetes/manifests/kube-apiserver.yaml 4 | insertafter: '^ - kube-apiserver' 5 | block: |2 6 | - --oidc-client-id=spn:XXXXX 7 | - --oidc-issuer-url=https://sts.windows.net/XXXXX/ 8 | - --oidc-username-claim=upn 9 | - --oidc-groups-claim=groups -------------------------------------------------------------------------------- /roles/kubeadm-join-workers/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Generate join token 2 | shell: kubeadm token create --print-join-command 3 | register: kubeadm_join_cmd 4 | delegate_to: "{{ groups['k8s-masters'][0] }}" 5 | 6 | - set_fact: 7 | kubeadm_join: "{{ kubeadm_join_cmd.stdout }}" 8 | 9 | - name: Run kubeadm join 10 | shell: "{{ kubeadm_join }}" 11 | 12 | - pause: 13 | prompt: "Wait for containers to Pull and install" -------------------------------------------------------------------------------- /inventory/cluster1-prod: -------------------------------------------------------------------------------- 1 | [k8s-master-primary] 2 | master1.domain.local 3 | 4 | [k8s-master-replicas] 5 | master2.domain.local 6 | master3.domain.local 7 | 8 | [k8s-masters:children] 9 | k8s-master-primary 10 | k8s-master-replicas 11 | 12 | [k8s-workers] 13 | worker1.domain.local 14 | worker2.domain.local 15 | worker3.domain.local 16 | 17 | [k8s-nodes:children] 18 | k8s-masters 19 | k8s-workers 20 | 21 | [cluster1-prod:children] 22 | k8s-nodes 23 | -------------------------------------------------------------------------------- /inventory/cluster2-nonprod: -------------------------------------------------------------------------------- 1 | [k8s-master-primary] 2 | master1.domain.local 3 | 4 | [k8s-master-replicas] 5 | master2.domain.local 6 | master3.domain.local 7 | 8 | [k8s-masters:children] 9 | k8s-master-primary 10 | k8s-master-replicas 11 | 12 | [k8s-workers] 13 | worker1.domain.local 14 | worker2.domain.local 15 | worker3.domain.local 16 | 17 | [k8s-nodes:children] 18 | k8s-masters 19 | k8s-workers 20 | 21 | [cluster2-nonprod:children] 22 | k8s-nodes 23 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Install docker dependencies 2 | become: yes 3 | yum: 4 | name: "{{item}}" 5 | with_items: 6 | - device-mapper-persistent-data 7 | - lvm2 8 | 9 | - name: Install docker on hosts sequentially 10 | include_tasks: "install_docker.yaml" 11 | with_items: "{{ groups['k8s-nodes'] }}" 12 | when: "hostvars[host_item].inventory_hostname == inventory_hostname" 13 | loop_control: 14 | loop_var: host_item 15 | -------------------------------------------------------------------------------- /roles/ansible-requirements/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check if yum is installed. 2 | raw: yum --version 3 | register: has_yum 4 | failed_when: False 5 | 6 | - name: Clean yum 7 | raw: yum clean all 8 | when: has_yum.rc == 0 9 | 10 | - name: Install python2 [yum]. 11 | raw: yum install -y python 12 | register: installed_via_yum 13 | when: has_yum.rc == 0 14 | 15 | - name: Ensure python2 has been installed properly. 16 | raw: test -e /usr/bin/python 17 | -------------------------------------------------------------------------------- /roles/calico/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Copy calico.yaml manifests 2 | template: 3 | src: calico.yaml 4 | dest: /tmp/calico.yaml 5 | force: yes 6 | 7 | - name: Set Calico container image value 8 | replace: 9 | path: /tmp/calico.yaml 10 | regexp: "image: calico" 11 | replace: "image: {{ container_registry }}/keystone" 12 | 13 | - name: Create Calico Deployment 14 | shell: kubectl apply -f /tmp/calico.yaml 15 | 16 | - pause: 17 | prompt: "Wait for containers to Pull and install" -------------------------------------------------------------------------------- /roles/backup-cronjob/README.md: -------------------------------------------------------------------------------- 1 | # CronJob to backup ETCD state to S3 storage 2 | 3 | ### 2. Update API yaml environment variables 4 | 5 | For example: 6 | - S3_FOLDER=k8skaf-api05-prod 7 | - S3_BUCKET=prod-container-registry 8 | - cron schedule 9 | 10 | ### 3. Upload Kubernetes API and S3 secrets to kubernetes 11 | 12 | Create kubernetes secrets for the UCP password and S3 access keys 13 | ``` 14 | kubectl create -n kube-system -f ./etcd-backup-cronjob.yaml 15 | kubectl create -n kube-system -f ./s3-storage-secret.yaml 16 | ``` -------------------------------------------------------------------------------- /roles/kubernetes-dashboard/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Copy kubernetes-dashboard.yaml manifests 2 | template: 3 | src: kubernetes-dashboard.yaml 4 | dest: /tmp/kubernetes-dashboard.yaml 5 | force: yes 6 | 7 | - name: Set kubernetes-dashboard container image value 8 | replace: 9 | path: /tmp/kubernetes-dashboard.yaml 10 | regexp: "image: kubernetesui" 11 | replace: "image: {{ container_registry }}/keystone" 12 | 13 | - name: Create kubernetes-dashboard Deployment 14 | shell: kubectl apply -f /tmp/kubernetes-dashboard.yaml 15 | -------------------------------------------------------------------------------- /playbooks/install-all.yaml: -------------------------------------------------------------------------------- 1 | - import_playbook: ansible-requirements.yaml 2 | - import_playbook: repos.yaml 3 | - import_playbook: docker.yaml 4 | - import_playbook: kubeadm-prep.yaml 5 | #- import_playbook: kubeadm-singlenode-cluster.yaml 6 | - import_playbook: kubeadm-init-master.yaml 7 | - import_playbook: calico.yaml 8 | - import_playbook: kubeadm-join-masters.yaml 9 | - import_playbook: kubeadm-join-workers.yaml 10 | - import_playbook: helm-install.yaml 11 | - import_playbook: local-storage-provisioner.yaml 12 | - import_playbook: kubernetes-dashboard.yaml 13 | - import_playbook: elastic-search.yaml 14 | - import_playbook: aad-authentication.yaml -------------------------------------------------------------------------------- /roles/kubeadm-upgrade-masters/tasks/upgrade_masters.yaml: -------------------------------------------------------------------------------- 1 | - name: Upgrade kubenetes packages 2 | yum: 3 | name: "{{ packages }}" 4 | enablerepo: kubernetes 5 | vars: 6 | packages: 7 | - kubelet-{{kubeadm_version}} 8 | - kubectl-{{kubeadm_version}} 9 | - kubeadm-{{kubeadm_version}} 10 | 11 | - name: "Kubeadm upgrade: kubeadm upgrade node experimental-control-plane - remove (experimental-control-plane) for +v1.15" 12 | shell: kubeadm upgrade node experimental-control-plane 13 | register: rslt 14 | 15 | - pause: 16 | prompt: "Proceed with restart" 17 | 18 | - name: Restart kubelet 19 | systemd: 20 | state: restarted 21 | daemon_reload: yes 22 | name: kubelet 23 | enabled: yes -------------------------------------------------------------------------------- /roles/ingress-nginx/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: copy ingress-nginx manifests 2 | template: 3 | src: ingress-nginx.yaml 4 | dest: /tmp/ingress-nginx.yaml 5 | force: yes 6 | 7 | - name: Set nginx-ingress-controller container image value 8 | replace: 9 | path: /tmp/ingress-nginx.yaml 10 | regexp: "image: quay.io/kubernetes-ingress-controller" 11 | replace: "image: {{ container_registry }}/system" 12 | 13 | - name: Set alpines container image value 14 | replace: 15 | path: /tmp/ingress-nginx.yaml 16 | regexp: "image: privateregistry" 17 | replace: "image: {{ container_registry }}/system" 18 | 19 | - name: Create nginx-ingress-controller Deployments 20 | shell: kubectl apply -f /tmp/ingress-nginx.yaml 21 | -------------------------------------------------------------------------------- /roles/helm-install/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Helm and Tiller installation 3 | 4 | ### 2. Create helm tiller namespace and service-account: 5 | ``` 6 | kubectl create ns kube-tiller 7 | kubectl create serviceaccount tiller --namespace kube-tiller 8 | 9 | cat << EOF | kubectl create -f - 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRoleBinding 12 | metadata: 13 | name: tiller 14 | roleRef: 15 | apiGroup: rbac.authorization.k8s.io 16 | kind: ClusterRole 17 | name: cluster-admin 18 | subjects: 19 | - kind: ServiceAccount 20 | name: tiller 21 | namespace: kube-tiller 22 | EOF 23 | ``` 24 | 25 | ### 3. Install Helm Chart package manager: 26 | ``` 27 | helm init --service-account tiller --tiller-namespace kube-tiller 28 | ``` 29 | -------------------------------------------------------------------------------- /roles/backup-cronjob/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | 2 | - name: Copy s3 storage secret 3 | template: 4 | src: s3-storage-secret.yaml 5 | dest: /tmp/s3-storage-secret.yaml 6 | force: yes 7 | 8 | - name: Create S3 storage secret 9 | shell: kubectl apply -f /tmp/s3-storage-secret.yaml 10 | 11 | - name: Copy etcd-backup manifests 12 | template: 13 | src: etcd-backup-cronjob.yaml 14 | dest: /tmp/etcd-backup-cronjob.yaml 15 | force: yes 16 | 17 | - name: Set S3 Folder Name within template 18 | replace: 19 | path: /tmp/etcd-backup-cronjob.yaml 20 | regexp: "kubernetes_cluster_label" 21 | replace: "{{ kubernetes_cluster_label }}" 22 | 23 | - name: Create Etcd Backup Cronjob Deployments 24 | shell: kubectl apply -f /tmp/etcd-backup-cronjob.yaml -------------------------------------------------------------------------------- /roles/repos/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Install yum-utils 2 | become: yes 3 | yum: 4 | name: "{{item}}" 5 | with_items: 6 | - yum-utils 7 | 8 | - name: "Enable extras" 9 | shell: 10 | yum-config-manager --enable "CentOS-7 - Extras" 11 | 12 | - name: Add docker-ce-stable repo 13 | yum_repository: 14 | name: docker-ce 15 | description: docker-ce stable repo 16 | baseurl: https://pkgs.domain.local/linux/docker-ce/ 17 | gpgcheck: no 18 | sslverify: no 19 | enabled: false 20 | 21 | - name: Add kubernetes repo 22 | yum_repository: 23 | name: kubernetes 24 | description: Kubernetes repo 25 | baseurl: https://pkgs.domain.local/linux/kubernetes/yum/repos/kubernetes-el7-$basearch 26 | gpgcheck: no 27 | sslverify: no 28 | enabled: false 29 | -------------------------------------------------------------------------------- /roles/uninstall/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: cleanup kubernetes data 2 | shell: | 3 | kubeadm reset -f 4 | systemctl stop kubelet 5 | systemctl stop docker 6 | rm -rf /var/lib/cni/ 7 | rm -rf /var/lib/etcd 8 | rm -rf /var/lib/kubelet/* 9 | rm -rf /etc/cni/ 10 | ip link delete cni0 11 | rm -rf /var/lib/docker/* 12 | rm -rf /etc/kubernetes 13 | 14 | - name: remove yum packages 15 | become: yes 16 | yum: 17 | name: "{{item}}" 18 | state: absent 19 | with_items: 20 | - kubelet 21 | - kubeadm 22 | - kubectl 23 | - docker-ce 24 | - device-mapper-persistent-data 25 | # - lvm2 26 | 27 | - name: Remove docker-ce-stable repo 28 | yum_repository: 29 | name: docker-ce 30 | state: absent 31 | 32 | - name: Remove kubernetes repo 33 | yum_repository: 34 | name: kubernetes 35 | state: absent 36 | -------------------------------------------------------------------------------- /roles/kubeadm-join-masters/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: "Create kubernetes folders" 2 | file: 3 | path: /etc/kubernetes/pki/etcd 4 | state: directory 5 | mode: 0640 6 | owner: root 7 | group: root 8 | 9 | - name: "Push Kubernetes Master PKI files to master replicas" 10 | copy: 11 | src: /tmp/kubeadm-ha/ 12 | dest: /etc/kubernetes/ 13 | owner: root 14 | group: root 15 | mode: preserve 16 | # set .crt to 640 set all else to 600 17 | 18 | - name: Generate master join token 19 | shell: kubeadm token create --print-join-command 20 | register: kubeadm_join_cmd 21 | delegate_to: "{{groups['k8s-master-primary'][0]}}" 22 | 23 | - set_fact: 24 | kubeadm_join: "{{ kubeadm_join_cmd.stdout }}" 25 | 26 | - name: Join Master replicas to cluster 27 | shell: "{{ kubeadm_join }} --control-plane" 28 | 29 | - pause: 30 | prompt: "Wait for containers to Pull and install" -------------------------------------------------------------------------------- /playbooks/install_docker.yaml: -------------------------------------------------------------------------------- 1 | - name: Install Docker-CE engine 2 | yum: 3 | name: docker-ce-{{ docker_version }} 4 | state: installed 5 | enablerepo: docker-ce 6 | 7 | - name: Create directory for trusted registry 8 | file: 9 | path: /etc/docker/certs.d/container-registry01.nonprod.domain.local 10 | state: directory 11 | 12 | - name: Copy registry CA certificate 13 | template: 14 | src: container-registry01.nonprod.domain.local-ca.crt 15 | dest: /etc/docker/certs.d/container-registry01.nonprod.domain.local/ca.crt 16 | force: yes 17 | 18 | - name: Configure additional engine options 19 | copy: 20 | content: "{{ docker_ce_daemon_options | to_nice_json }}" 21 | dest: /etc/docker/daemon.json 22 | mode: 0644 23 | when: docker_ce_daemon_options is defined 24 | 25 | - name: Enable docker service 26 | systemd: 27 | name: docker 28 | state: restarted 29 | enabled: yes -------------------------------------------------------------------------------- /playbooks/upgrade_worker.yaml: -------------------------------------------------------------------------------- 1 | - name: Upgrade kubenetes packages 2 | yum: 3 | name: "{{ packages }}" 4 | enablerepo: kubernetes 5 | vars: 6 | packages: 7 | - kubelet-{{kubeadm_version}} 8 | - kubectl-{{kubeadm_version}} 9 | - kubeadm-{{kubeadm_version}} 10 | 11 | - name: Drain kubernetes worker node 12 | shell: kubectl drain {{ inventory_hostname_short }} --delete-local-data --ignore-daemonsets 13 | register: kubeadm_drain 14 | delegate_to: "{{ groups['k8s-masters'][0] }}" 15 | 16 | - pause: 17 | prompt: "Wait for drain" 18 | 19 | - name: Upgrade worker nodes 20 | shell: kubeadm upgrade node config --kubelet-version {{kubernetes_version}} 21 | 22 | - name: Restart kubelet 23 | systemd: 24 | state: restarted 25 | daemon_reload: yes 26 | name: kubelet 27 | enabled: yes 28 | 29 | - name: Uncordon worker node 30 | shell: kubectl uncordon {{ inventory_hostname_short }} 31 | register: kubeadm_uncordon 32 | delegate_to: "{{ groups['k8s-masters'][0] }}" 33 | 34 | - pause: 35 | prompt: "Wait for uncordon" -------------------------------------------------------------------------------- /roles/os-patch-updates/tasks/os-patches.yml: -------------------------------------------------------------------------------- 1 | - pause: 2 | prompt: "Set kubectl context to cluster and proceed with draining node {{ inventory_hostname_short }}" 3 | 4 | - name: Drain kubernetes worker node {{ inventory_hostname_short }} 5 | local_action: command kubectl drain {{ inventory_hostname_short }} --delete-local-data --ignore-daemonsets --force 6 | register: kubeadm_drain 7 | until: kubeadm_drain.rc == 0 8 | become: no 9 | 10 | - name: Update the system 11 | yum: 12 | name: "*" 13 | state: latest 14 | disable_gpg_check: true 15 | 16 | - name: Check for reboot 17 | shell: if [ $(rpm -q kernel|tail -n 1) != kernel-$(uname -r) ]; then echo 'reboot' else echo 'no'; fi 18 | ignore_errors: true 19 | register: reboot_hint 20 | changed_when: "'reboot' in reboot_hint.stdout" 21 | 22 | - name: Reboot system to complete patching 23 | reboot: 24 | reboot_timeout: 300 25 | ignore_errors: true 26 | when: reboot_hint.stdout.find("reboot") != -1 27 | register: rebooting 28 | 29 | - name: Uncordon worker node 30 | local_action: command kubectl uncordon {{ inventory_hostname_short }} 31 | register: kubeadm_uncordon 32 | become: no 33 | -------------------------------------------------------------------------------- /roles/helm-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Docker Pull Container Images 2 | shell: docker pull {{ tiller_image }} 3 | delegate_to: "{{ item }}" 4 | with_items: "{{ groups['k8s-workers'] }}" 5 | 6 | - name: Helm download and install 7 | shell: export HELM_URL=https://storage.googleapis.com/kubernetes-helm/helm-v2.12.1-$(uname | tr A-Z a-z)-amd64.tar.gz && curl "$HELM_URL" | tar --strip-components 1 -C /usr/local/bin -zxf - 8 | environment: "{{proxy_env}}" 9 | 10 | - name: Create Helm namespace 11 | shell: kubectl create ns kube-tiller 12 | 13 | - name: Create Helm service-account 14 | shell: kubectl create serviceaccount tiller --namespace kube-tiller 15 | 16 | - name: Create Helm ClusterRoleBinding 17 | shell: 18 | cmd: | 19 | cat << EOF | kubectl create -f - 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | kind: ClusterRoleBinding 22 | metadata: 23 | name: tiller 24 | roleRef: 25 | apiGroup: rbac.authorization.k8s.io 26 | kind: ClusterRole 27 | name: cluster-admin 28 | subjects: 29 | - kind: ServiceAccount 30 | name: tiller 31 | namespace: kube-tiller 32 | EOF 33 | 34 | - name: Helm init 35 | shell: /usr/local/bin/helm init --service-account tiller --tiller-namespace kube-tiller 36 | environment: "{{proxy_env}}" 37 | -------------------------------------------------------------------------------- /roles/aad-authentication/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | 2 | - name: Upgrade masters sequentially 3 | include_tasks: "aad_masters.yaml" 4 | with_items: "{{ groups['k8s-masters'] }}" 5 | when: "hostvars[host_item].inventory_hostname == inventory_hostname" 6 | loop_control: 7 | loop_var: host_item 8 | 9 | - name: Create AAD Admin ClusterRoleBindings 10 | shell: 11 | cmd: | 12 | cat << EOF | kubectl apply -f - 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | kind: ClusterRoleBinding 15 | metadata: 16 | name: {{ aad_admin_groupname }}:cluster-admin 17 | subjects: 18 | - kind: Group 19 | name: {{ aad_admin_groupid }} 20 | apiGroup: rbac.authorization.k8s.io 21 | roleRef: 22 | kind: ClusterRole 23 | name: cluster-admin 24 | apiGroup: rbac.authorization.k8s.io 25 | EOF 26 | 27 | - name: Create AAD User ClusterRoleBindings 28 | shell: 29 | cmd: | 30 | cat << EOF | kubectl apply -f - 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRoleBinding 33 | metadata: 34 | name: {{ aad_user_groupname }}:cluster-admin 35 | subjects: 36 | - kind: Group 37 | name: {{ aad_user_groupid }} 38 | apiGroup: rbac.authorization.k8s.io 39 | roleRef: 40 | kind: ClusterRole 41 | name: cluster-admin 42 | apiGroup: rbac.authorization.k8s.io 43 | EOF -------------------------------------------------------------------------------- /roles/kubeadm-upgrade-masters/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Upgrade kubenetes packages 2 | yum: 3 | name: "{{ packages }}" 4 | enablerepo: kubernetes 5 | vars: 6 | packages: 7 | - kubelet-{{kubeadm_version}} 8 | - kubectl-{{kubeadm_version}} 9 | - kubeadm-{{kubeadm_version}} 10 | 11 | - name: "Update kubeadm config yaml" 12 | copy: 13 | content: "{{ kubeadm_config_options | to_nice_yaml }}" 14 | dest: /tmp/kubeadm-config.yaml 15 | mode: 0644 16 | 17 | - name: "Kubeadm upgrade plan" 18 | shell: kubeadm upgrade plan --config /tmp/kubeadm-config.yaml --print-config 19 | register: rslt 20 | 21 | - pause: 22 | prompt: "Validate plan" 23 | 24 | - name: "Kubeadm upgrade dry-run" 25 | shell: kubeadm upgrade apply {{kubernetes_version}} --config /tmp/kubeadm-config.yaml --dry-run 26 | register: rslt 27 | 28 | - pause: 29 | prompt: "Validate dry-run" 30 | 31 | - name: "Kubeadm upgrade" 32 | shell: kubeadm upgrade apply -f --config /tmp/kubeadm-config.yaml {{kubernetes_version}} 33 | register: rslt 34 | 35 | - pause: 36 | prompt: "Proceed with restart" 37 | 38 | - name: Restart kubelet 39 | systemd: 40 | state: restarted 41 | daemon_reload: yes 42 | name: kubelet 43 | enabled: yes 44 | 45 | - name: Upgrade masters sequentially 46 | include_tasks: "upgrade_masters.yaml" 47 | with_items: "{{ groups['k8s-master-replicas'] }}" 48 | when: "hostvars[host_item].inventory_hostname == inventory_hostname" 49 | loop_control: 50 | loop_var: host_item -------------------------------------------------------------------------------- /roles/kubeadm-prep/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Remove swapfile from /etc/fstab 2 | mount: 3 | name: swap 4 | fstype: swap 5 | state: absent 6 | 7 | - name: Turn swap off 8 | shell: swapoff -a 9 | 10 | - name: Set Enforce 11 | command: setenforce 0 12 | ignore_errors: True 13 | 14 | - name: Install kubelet package 15 | become: yes 16 | yum: 17 | name: "kubelet-{{kubeadm_version}}" 18 | enablerepo: kubernetes 19 | 20 | - name: Install kubectl package 21 | become: yes 22 | yum: 23 | name: "kubectl-{{kubeadm_version}}" 24 | enablerepo: kubernetes 25 | 26 | - name: Install kubeadm package 27 | become: yes 28 | yum: 29 | name: "kubeadm-{{kubeadm_version}}" 30 | enablerepo: kubernetes 31 | 32 | - name: Add vm swappiness 33 | lineinfile: 34 | path: /etc/sysctl.d/k8s.conf 35 | line: 'vm.swappiness = 0' 36 | state: present 37 | create: yes 38 | 39 | - name: Add vm overcommit_memory 40 | lineinfile: 41 | path: /etc/sysctl.d/k8s.conf 42 | line: 'vm.overcommit_memory = 1' 43 | state: present 44 | create: yes 45 | 46 | - name: Add netbridge config ip4 47 | lineinfile: 48 | path: /etc/sysctl.d/k8s.conf 49 | line: 'net.bridge.bridge-nf-call-iptables = 1' 50 | state: present 51 | create: yes 52 | 53 | - name: Increase net ipv4 tcp_max_syn_backlog 54 | lineinfile: 55 | path: /etc/sysctl.d/k8s.conf 56 | line: 'net.ipv4.tcp_max_syn_backlog=2621440' 57 | state: present 58 | create: yes 59 | 60 | - name: update sysctl 61 | command: sysctl --system 62 | 63 | - name: Start kubelet 64 | systemd: 65 | state: started 66 | daemon_reload: yes 67 | name: kubelet 68 | enabled: yes -------------------------------------------------------------------------------- /inventory/group_vars/cluster1-prod/all: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | docker_version: 18.09.6 4 | kubernetes_version: v1.15.0 5 | kubeadm_version: 1.15.0 6 | 7 | # kubernetes API load balanced VIP for HA installations 8 | kubernetes_loadbalanced_api_dns: k8s-api01.domain.local 9 | kubernetes_cluster_label: cluster1-prod 10 | 11 | # Container registry01 12 | container_registry: container-registry01.nonprod.domain.local 13 | 14 | # Docker Daemon configuration 15 | docker_ce_daemon_options: 16 | exec-opts: [ "native.cgroupdriver=systemd" ] 17 | log-driver: json-file 18 | log-opts: 19 | max-size: "100m" 20 | max-file: "7" 21 | storage-driver: overlay2 22 | storage-opts: [ "overlay2.override_kernel_check=true" ] 23 | 24 | # Kubernetes Kubeadm Cluster Configuration 25 | kubeadm_config_options: 26 | apiVersion: kubeadm.k8s.io/v1beta1 27 | kind: ClusterConfiguration 28 | kubernetesVersion: "{{ kubernetes_version }}" 29 | apiServer: 30 | certSANs: 31 | - "{{ kubernetes_loadbalanced_api_dns }}" 32 | controlPlaneEndpoint: "{{ kubernetes_loadbalanced_api_dns }}:6443" 33 | networking: 34 | podSubnet: 10.244.0.0/16 35 | imageRepository: "{{ container_registry }}/keystone" 36 | useHyperKubeImage: true 37 | clusterName: "{{ kubernetes_cluster_label }}" 38 | etcd: 39 | local: 40 | imageRepository: "{{ container_registry }}/keystone" 41 | 42 | 43 | # Addon Container Images 44 | tiller_image: "{{ container_registry }}/keystone/tiller:v2.12.1" 45 | 46 | # Filebeat Kafka Topic Name 47 | filebeat_app_id: kubernetes-logs-prod 48 | 49 | # Metricbeat Kafka Topic Name 50 | metricbeat_app_id: metricbeat-kubernetes-prod 51 | 52 | # Azure AD Admin group and AppID 53 | aad_admin_groupname: XXXXX 54 | aad_admin_groupid: XXXXX 55 | 56 | # Azure AD User group and AppID 57 | aad_user_groupname: XXXXX 58 | aad_user_groupid: XXXXX -------------------------------------------------------------------------------- /inventory/group_vars/cluster2-nonprod/all: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | docker_version: 18.09.6 4 | kubernetes_version: v1.15.0 5 | kubeadm_version: 1.15.0 6 | 7 | # kubernetes API load balanced VIP for HA installations 8 | kubernetes_loadbalanced_api_dns: k8s-api02.nonprod.domain.local 9 | kubernetes_cluster_label: cluster2-nonprod 10 | 11 | # Container registry01 12 | container_registry: container-registry01.nonprod.domain.local 13 | 14 | # Docker Daemon configuration 15 | docker_ce_daemon_options: 16 | exec-opts: [ "native.cgroupdriver=systemd" ] 17 | log-driver: json-file 18 | log-opts: 19 | max-size: "100m" 20 | max-file: "7" 21 | storage-driver: overlay2 22 | storage-opts: [ "overlay2.override_kernel_check=true" ] 23 | 24 | # Kubernetes Kubeadm Cluster Configuration 25 | kubeadm_config_options: 26 | apiVersion: kubeadm.k8s.io/v1beta1 27 | kind: ClusterConfiguration 28 | kubernetesVersion: "{{ kubernetes_version }}" 29 | apiServer: 30 | certSANs: 31 | - "{{ kubernetes_loadbalanced_api_dns }}" 32 | controlPlaneEndpoint: "{{ kubernetes_loadbalanced_api_dns }}:6443" 33 | networking: 34 | podSubnet: 10.244.0.0/16 35 | imageRepository: "{{ container_registry }}/keystone" 36 | useHyperKubeImage: true 37 | clusterName: "{{ kubernetes_cluster_label }}" 38 | etcd: 39 | local: 40 | imageRepository: "{{ container_registry }}/keystone" 41 | 42 | 43 | # Addon Container Images 44 | tiller_image: "{{ container_registry }}/keystone/tiller:v2.12.1" 45 | 46 | # Filebeat Kafka Topic Name 47 | filebeat_app_id: kubernetes-logs-nonprod 48 | 49 | # Metricbeat Kafka Topic Name 50 | metricbeat_app_id: metricbeat-kubernetes-nonprod 51 | 52 | # Azure AD Admin group and AppID 53 | aad_admin_groupname: XXXXX 54 | aad_admin_groupid: XXXXX 55 | 56 | # Azure AD User group and AppID 57 | aad_user_groupname: XXXXX 58 | aad_user_groupid: XXXXX -------------------------------------------------------------------------------- /roles/kubeadm-init-master/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | 2 | - pause: 3 | prompt: "Wait before initializing master" 4 | 5 | - name: "Create kubeadm init config yaml" 6 | copy: 7 | content: "{{ kubeadm_config_options | to_nice_yaml }}" 8 | dest: /tmp/kubeadm-config.yaml 9 | mode: 0644 10 | 11 | - name: Kubeadm init 12 | shell: kubeadm init --config=/tmp/kubeadm-config.yaml 13 | register: rslt 14 | ignore_errors: yes 15 | 16 | - pause: 17 | prompt: "Validate kubeadm init" 18 | 19 | - name: Store init output 20 | action: copy content="{{ rslt.stdout }}" dest="/etc/kubernetes/kubeadm-init.stdout" 21 | 22 | - name: Create .kube folder 23 | file: 24 | path: "~{{ ansible_ssh_user }}/.kube" 25 | state: directory 26 | owner: "{{ ansible_ssh_user }}" 27 | 28 | - name: Copy admin.conf to .kube folder 29 | copy: 30 | src: /etc/kubernetes/admin.conf 31 | dest: "~{{ ansible_ssh_user }}/.kube/config" 32 | owner: "{{ ansible_ssh_user }}" 33 | remote_src: yes 34 | 35 | - name: Create .kube folder 36 | file: 37 | path: "/root/.kube" 38 | state: directory 39 | owner: "root" 40 | 41 | - name: Copy admin.conf to .kube folder 42 | copy: 43 | src: /etc/kubernetes/admin.conf 44 | dest: "/root/.kube/config" 45 | owner: "root" 46 | remote_src: yes 47 | 48 | - name: "Fetching Kubernetes Master PKI files from primary master" 49 | fetch: 50 | src: /etc/kubernetes/pki/{{item}} 51 | dest: /tmp/kubeadm-ha/pki/{{item}} 52 | flat: yes 53 | with_items: 54 | - ca.crt 55 | - ca.key 56 | - sa.key 57 | - sa.pub 58 | - front-proxy-ca.crt 59 | - front-proxy-ca.key 60 | 61 | - name: "Fetching Kubernetes Master ETCD files from primary master" 62 | fetch: 63 | src: /etc/kubernetes/pki/etcd/{{item}} 64 | dest: /tmp/kubeadm-ha/pki/etcd/{{item}} 65 | flat: yes 66 | with_items: 67 | - ca.crt 68 | - ca.key 69 | 70 | - name: "Fetching Kubernetes Master Admin files from primary master" 71 | fetch: 72 | src: /etc/kubernetes/{{item}} 73 | dest: /tmp/kubeadm-ha/{{item}} 74 | flat: yes 75 | with_items: 76 | - admin.conf 77 | -------------------------------------------------------------------------------- /roles/elastic-search/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: copy kube-state-metrics manifests 2 | template: 3 | src: kube-state-metrics.yaml 4 | dest: /tmp/kube-state-metrics.yaml 5 | force: yes 6 | 7 | - name: Set Kube State Metrics container image value 8 | replace: 9 | path: /tmp/kube-state-metrics.yaml 10 | regexp: "image: quay.io/coreos" 11 | replace: "image: {{ container_registry }}/system" 12 | 13 | - name: Set Kube State Metrics container image value 14 | replace: 15 | path: /tmp/kube-state-metrics.yaml 16 | regexp: "image: k8s.gcr.io" 17 | replace: "image: {{ container_registry }}/system" 18 | 19 | - name: Create Kube State Metrics Deployments 20 | shell: kubectl apply -f /tmp/kube-state-metrics.yaml 21 | 22 | 23 | - name: copy filebeat manifests 24 | template: 25 | src: filebeat-kubernetes.yaml 26 | dest: /tmp/filebeat-kubernetes.yaml 27 | force: yes 28 | 29 | - name: Set filebeat container image value 30 | replace: 31 | path: /tmp/filebeat-kubernetes.yaml 32 | regexp: "image: docker.elastic.co/beats" 33 | replace: "image: {{ container_registry }}/system" 34 | 35 | - name: Set filebeat app_id value 36 | replace: 37 | path: /tmp/filebeat-kubernetes.yaml 38 | regexp: "app_id: filebeat_app_id" 39 | replace: "app_id: {{ filebeat_app_id }}" 40 | 41 | - name: Create filebeat Deployments 42 | shell: kubectl apply -f /tmp/filebeat-kubernetes.yaml 43 | 44 | - name: copy metricbeat manifests 45 | template: 46 | src: metricbeat-kubernetes.yaml 47 | dest: /tmp/metricbeat-kubernetes.yaml 48 | force: yes 49 | 50 | - name: Set metricbeat container image value 51 | replace: 52 | path: /tmp/metricbeat-kubernetes.yaml 53 | regexp: "image: docker.elastic.co/beats" 54 | replace: "image: {{ container_registry }}/system" 55 | 56 | - name: Set metricbeat app_id value 57 | replace: 58 | path: /tmp/metricbeat-kubernetes.yaml 59 | regexp: "app_id: metricbeat_app_id" 60 | replace: "app_id: {{ metricbeat_app_id }}" 61 | 62 | - name: Create metricbeat Deployments 63 | shell: kubectl apply -f /tmp/metricbeat-kubernetes.yaml 64 | 65 | - name: Label kubernetes nodes with cluster tag 66 | shell: kubectl label node {{ hostvars[item].inventory_hostname_short }} "cluster={{kubernetes_cluster_label}}" 67 | delegate_to: "{{ groups['k8s-master-primary'][0] }}" 68 | with_items: "{{ groups['k8s-nodes'] }}" 69 | -------------------------------------------------------------------------------- /roles/aad-authentication/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Notes on using Azure AD as an authentication provider for Kubernetes Adminstration 3 | 4 | https://github.com/kubernetes/client-go/tree/master/plugin/pkg/client/auth/azure?source=post_page--------------------------- 5 | 6 | `On the "Manage" / "Manifest" set groupMembershipClaims property to SecurityGroup` 7 | 8 | Azure Tenant ID: XXXXX 9 | 10 | AzureAD API Server SP: ts-container-platform-sp 11 | appid: XXXXX 12 | 13 | AzureAD User Auth SP: 14 | appid: XXXXX 15 | 16 | ### 1. Allow cluster firewall access to https://sts.windows.net from k8s nodes 17 | 18 | ### 2. Set up kubernetes API-server with Azure AD ts-container-platform-sp SP (Applied by Ansible) 19 | 20 | Update /etc/kubernetes/manifests/kube-apiserver.yaml (Applied by Ansible) 21 | 22 | ``` 23 | --oidc-client-id=spn:XXXXX 24 | --oidc-issuer-url=https://sts.windows.net/XXXXX/ 25 | --oidc-username-claim=upn 26 | --oidc-groups-claim=groups 27 | ``` 28 | 29 | ### 3. Set Azure Group access for the cluster 30 | 31 | #### Admin NonProduction Cluster Role 32 | ``` 33 | cat << EOF | kubectl apply -f - 34 | kind: ClusterRoleBinding 35 | apiVersion: rbac.authorization.k8s.io/v1 36 | metadata: 37 | name: XXXXX:cluster-admin 38 | subjects: 39 | - kind: Group 40 | name: XXXXX 41 | apiGroup: rbac.authorization.k8s.io 42 | roleRef: 43 | kind: ClusterRole 44 | name: cluster-admin 45 | apiGroup: rbac.authorization.k8s.io 46 | EOF 47 | ``` 48 | 49 | #### User NonProduction Cluster Role 50 | ``` 51 | cat << EOF | kubectl apply -f - 52 | kind: ClusterRoleBinding 53 | apiVersion: rbac.authorization.k8s.io/v1 54 | metadata: 55 | name: XXXXX:edit 56 | subjects: 57 | - kind: Group 58 | name: XXXXX 59 | apiGroup: rbac.authorization.k8s.io 60 | roleRef: 61 | kind: ClusterRole 62 | name: edit 63 | apiGroup: rbac.authorization.k8s.io 64 | EOF 65 | ``` 66 | 67 | 68 | 69 | ### 4. Update client side kubectl config with azure user and cluster configuration 70 | 71 | ``` 72 | kubectl config set-credentials azureuser@domain.local \ 73 | --auth-provider=azure \ 74 | --auth-provider-arg=environment=AzurePublicCloud \ 75 | --auth-provider-arg=client-id=XXXXX \ 76 | --auth-provider-arg=tenant-id=XXXXX \ 77 | --auth-provider-arg=apiserver-id=XXXXX 78 | 79 | kubectl config set-cluster cluster1-nonprod \ 80 | --server=https://k8s-api01.nonprod.domain.local:6443 \ 81 | --insecure-skip-tls-verify=true 82 | 83 | kubectl config set-context cluster1-nonprod_azureuser@domain.local \ 84 | --cluster=cluster1-nonprod \ 85 | --user=azureuser@domain.local 86 | 87 | kubectl config use-context cluster1-nonprod_azureuser@domain.local 88 | ``` -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible Playbooks to install an HA Kubernetes (multi-master) cluster using Kubeadm. 2 | 3 | This repository provides Ansible Playbooks to install a Kubernetes HA cluster in an airgapped environment. 4 | - Uses recently GA'd Kubeadm HA joining features 5 | 6 | 7 | # Prerequisites: 8 | - Install Ansible and a forward proxy on the Ansible host 9 | - Ansible: 10 | for macos `brew install ansible` 11 | for linux `yum install ansible` 12 | 13 | - Setup ssh access from Ansible host to Kubernetes nodes. 14 | ```ssh-copy-id -i ~/.ssh/id_rsa.pub ``` 15 | 16 | # Environment preparation: 17 | 18 | Specify the Master and Workers in the `inventory/*cluster*` file: 19 | ``` 20 | [k8s-masters] # these are all the masters 21 | [k8s-workers] # these are all the worker nodes 22 | ``` 23 | 24 | Update the `inventory/group_vars/*cluster*` section: 25 | - choose the desired versions for kubernetes and docker 26 | - setup the pod network cidr (default setup is for calico - modify in calico.yaml as well) 27 | - specify the version of Helm to use 28 | - specify the Local Storage Provisioner version 29 | 30 | 31 | # Install a highly available kubernetes using kubeadm 32 | 33 | You can now run install-all.yaml playbook to get your cluster setup. 34 | You can also run the different playbooks separately for different purposes (setting up docker, masters, kubeadm, heml ...). 35 | 36 | ``` 37 | ansible-playbook -i inventory/cluster1-prod playbooks/install-all.yaml --private-key=~/.ssh/id_rsa -u %username% -v 38 | ``` 39 | 40 | # Restarting the install: 41 | If you need to restart the process using kubeadm reset, please use the uninstall.yaml playbook that deletes the state from all vms. 42 | 43 | 44 | # Upgrade a highly available kubernetes using kubeadm 45 | 46 | To upgrade the kubernetes control plane run: 47 | ``` 48 | ansible-playbook -i inventory/cluster1-prod playbooks/upgrade-all.yaml --private-key=~/.ssh/id_rsa -u username -v 49 | ``` 50 | 51 | # What install-all.yaml includes: 52 | 53 | - Adding the required yum repositories 54 | - Installing docker 55 | - Installing kubeadm, kubelet and kubectl 56 | - Initializing the first master with etcd and kubernetes-api 57 | - Join replica master nodes to the primary master 58 | - Adding the worker nodes to the cluster 59 | - Installing Helm & Tiller 60 | - Installing Local Storage Provisioner 61 | - Enable Azure AD OIDC authentication 62 | 63 | # Restarting the install: 64 | 65 | If you need to restart the process using kubeadm reset, please use the uninstall.yaml playbook that deletes the state from all vms. 66 | 67 | # To sequentially drain and patch the underlying OS hosts: 68 | 69 | ``` 70 | ansible-playbook -i inventory/cluster1-prod playbooks/os-patch-updates.yaml --private-key=~/.ssh/id_rsa -u username -v 71 | ``` -------------------------------------------------------------------------------- /roles/backup-cronjob/templates/etcd-backup-cronjob.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: kube-system 6 | --- 7 | apiVersion: batch/v1beta1 8 | kind: CronJob 9 | metadata: 10 | name: etcd-backup 11 | namespace: kube-system 12 | spec: 13 | concurrencyPolicy: Forbid 14 | failedJobsHistoryLimit: 7 15 | jobTemplate: 16 | spec: 17 | template: 18 | spec: 19 | nodeSelector: 20 | node-role.kubernetes.io/master: "" 21 | tolerations: 22 | - key: node-role.kubernetes.io/master 23 | effect: NoSchedule 24 | operator: "Exists" 25 | hostNetwork: true 26 | containers: 27 | - name: etcd-backup 28 | image: container-registry01.nonprod.domain.local/system/etcd-backup:v1 29 | imagePullPolicy: IfNotPresent 30 | command: ['sh', '-c', 'etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key snapshot save /tmp/etcd-snapshot-$(date +%Y-%m-%d_%H:%M:%S_%Z).db && s3cmd --access_key=$ACCESS_KEY --secret_key=$SECRET_KEY --host=ashobjectstorage.domain.local --host-bucket=$S3_BUCKET.ashobjectstorage.domain.local --no-check-certificate put /tmp/etcd-snapshot-$(date +%Y-%m-%d_%H:%M:%S_%Z).db s3://$S3_BUCKET/backups/$S3_FOLDER/'] 31 | securityContext: 32 | allowPrivilegeEscalation: true 33 | env: 34 | - name: ETCDCTL_API 35 | value: "3" 36 | - name: S3_BUCKET 37 | value: "nonprod-container-registry" 38 | - name: S3_FOLDER 39 | value: "kubernetes_cluster_label" 40 | - name: ACCESS_KEY 41 | valueFrom: 42 | secretKeyRef: 43 | name: s3-storage-secret 44 | key: access_key 45 | - name: SECRET_KEY 46 | valueFrom: 47 | secretKeyRef: 48 | name: s3-storage-secret 49 | key: secret_key 50 | volumeMounts: 51 | - name: etcd-certs 52 | mountPath: /etc/kubernetes/pki/etcd 53 | readOnly: true 54 | - mountPath: /tmp 55 | name: tmp 56 | volumes: 57 | - name: etcd-certs 58 | hostPath: 59 | path: /etc/kubernetes/pki/etcd 60 | type: DirectoryOrCreate 61 | - name: tmp 62 | hostPath: 63 | path: /tmp 64 | type: DirectoryOrCreate 65 | restartPolicy: OnFailure 66 | terminationGracePeriodSeconds: 45 67 | securityContext: 68 | runAsNonRoot: false 69 | runAsUser: 0 70 | schedule: '00 00 * * 6' 71 | successfulJobsHistoryLimit: 7 72 | suspend: false -------------------------------------------------------------------------------- /roles/local-storage-provisioner/templates/local-storage-provisioner.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Only valid for Kubernetes 1.9+ 3 | apiVersion: storage.k8s.io/v1 4 | kind: StorageClass 5 | metadata: 6 | name: local-storage-elastic 7 | provisioner: kubernetes.io/no-provisioner 8 | volumeBindingMode: WaitForFirstConsumer 9 | # Supported policies: Delete, Retain 10 | reclaimPolicy: Delete 11 | --- 12 | # Source: provisioner/templates/provisioner.yaml 13 | 14 | apiVersion: v1 15 | kind: ConfigMap 16 | metadata: 17 | name: local-provisioner-config 18 | namespace: default 19 | data: 20 | storageClassMap: | 21 | local-storage-elastic: 22 | hostDir: /pv 23 | mountDir: /pv 24 | blockCleanerCommand: 25 | - "/scripts/shred.sh" 26 | - "2" 27 | volumeMode: Filesystem 28 | fsType: ext4 29 | --- 30 | apiVersion: apps/v1 31 | kind: DaemonSet 32 | metadata: 33 | name: local-volume-provisioner 34 | namespace: default 35 | labels: 36 | app: local-volume-provisioner 37 | spec: 38 | selector: 39 | matchLabels: 40 | app: local-volume-provisioner 41 | template: 42 | metadata: 43 | labels: 44 | app: local-volume-provisioner 45 | spec: 46 | serviceAccountName: local-storage-admin 47 | containers: 48 | - image: "container-registry01.nonprod.domain.local/keystone/local-volume-provisioner:v2.3.2" 49 | imagePullPolicy: "IfNotPresent" 50 | name: provisioner 51 | securityContext: 52 | privileged: true 53 | env: 54 | - name: MY_NODE_NAME 55 | valueFrom: 56 | fieldRef: 57 | fieldPath: spec.nodeName 58 | volumeMounts: 59 | - mountPath: /etc/provisioner/config 60 | name: provisioner-config 61 | readOnly: true 62 | - mountPath: /pv 63 | name: local-storage-elastic 64 | mountPropagation: "HostToContainer" 65 | volumes: 66 | - name: provisioner-config 67 | configMap: 68 | name: local-provisioner-config 69 | - name: local-storage-elastic 70 | hostPath: 71 | path: /pv 72 | --- 73 | # Source: provisioner/templates/provisioner-service-account.yaml 74 | 75 | apiVersion: v1 76 | kind: ServiceAccount 77 | metadata: 78 | name: local-storage-admin 79 | namespace: default 80 | 81 | --- 82 | # Source: provisioner/templates/provisioner-cluster-role-binding.yaml 83 | 84 | apiVersion: rbac.authorization.k8s.io/v1 85 | kind: ClusterRoleBinding 86 | metadata: 87 | name: local-storage-provisioner-pv-binding 88 | namespace: default 89 | subjects: 90 | - kind: ServiceAccount 91 | name: local-storage-admin 92 | namespace: default 93 | roleRef: 94 | kind: ClusterRole 95 | name: system:persistent-volume-provisioner 96 | apiGroup: rbac.authorization.k8s.io 97 | --- 98 | apiVersion: rbac.authorization.k8s.io/v1 99 | kind: ClusterRole 100 | metadata: 101 | name: local-storage-provisioner-node-clusterrole 102 | namespace: default 103 | rules: 104 | - apiGroups: [""] 105 | resources: ["nodes"] 106 | verbs: ["get"] 107 | --- 108 | apiVersion: rbac.authorization.k8s.io/v1 109 | kind: ClusterRoleBinding 110 | metadata: 111 | name: local-storage-provisioner-node-binding 112 | namespace: default 113 | subjects: 114 | - kind: ServiceAccount 115 | name: local-storage-admin 116 | namespace: default 117 | roleRef: 118 | kind: ClusterRole 119 | name: local-storage-provisioner-node-clusterrole 120 | apiGroup: rbac.authorization.k8s.io 121 | -------------------------------------------------------------------------------- /roles/elastic-search/templates/kube-state-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | # Kubernetes versions after 1.9.0 should use apps/v1 3 | # Kubernetes versions before 1.8.0 should use apps/v1beta1 or extensions/v1beta1 4 | kind: Deployment 5 | metadata: 6 | name: kube-state-metrics 7 | namespace: kube-system 8 | spec: 9 | selector: 10 | matchLabels: 11 | k8s-app: kube-state-metrics 12 | replicas: 1 13 | template: 14 | metadata: 15 | labels: 16 | k8s-app: kube-state-metrics 17 | spec: 18 | serviceAccountName: kube-state-metrics 19 | containers: 20 | - name: kube-state-metrics 21 | imagePullPolicy: IfNotPresent 22 | image: quay.io/coreos/kube-state-metrics:v1.5.0 23 | ports: 24 | - name: http-metrics 25 | containerPort: 8080 26 | - name: telemetry 27 | containerPort: 8081 28 | readinessProbe: 29 | httpGet: 30 | path: /healthz 31 | port: 8080 32 | initialDelaySeconds: 5 33 | timeoutSeconds: 5 34 | - name: addon-resizer 35 | imagePullPolicy: IfNotPresent 36 | image: k8s.gcr.io/addon-resizer:1.8.3 37 | resources: 38 | limits: 39 | cpu: 150m 40 | memory: 50Mi 41 | requests: 42 | cpu: 150m 43 | memory: 50Mi 44 | env: 45 | - name: MY_POD_NAME 46 | valueFrom: 47 | fieldRef: 48 | fieldPath: metadata.name 49 | - name: MY_POD_NAMESPACE 50 | valueFrom: 51 | fieldRef: 52 | fieldPath: metadata.namespace 53 | command: 54 | - /pod_nanny 55 | - --container=kube-state-metrics 56 | - --cpu=200m 57 | - --extra-cpu=1m 58 | - --memory=200Mi 59 | - --extra-memory=2Mi 60 | - --threshold=5 61 | - --deployment=kube-state-metrics 62 | --- 63 | apiVersion: rbac.authorization.k8s.io/v1 64 | # kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 65 | kind: ClusterRole 66 | metadata: 67 | name: kube-state-metrics 68 | rules: 69 | - apiGroups: [""] 70 | resources: 71 | - configmaps 72 | - secrets 73 | - nodes 74 | - pods 75 | - services 76 | - resourcequotas 77 | - replicationcontrollers 78 | - limitranges 79 | - persistentvolumeclaims 80 | - persistentvolumes 81 | - namespaces 82 | - endpoints 83 | verbs: ["list", "watch"] 84 | - apiGroups: ["extensions"] 85 | resources: 86 | - daemonsets 87 | - deployments 88 | - replicasets 89 | verbs: ["list", "watch"] 90 | - apiGroups: ["apps"] 91 | resources: 92 | - statefulsets 93 | verbs: ["list", "watch"] 94 | - apiGroups: ["batch"] 95 | resources: 96 | - cronjobs 97 | - jobs 98 | verbs: ["list", "watch"] 99 | - apiGroups: ["autoscaling"] 100 | resources: 101 | - horizontalpodautoscalers 102 | verbs: ["list", "watch"] 103 | - apiGroups: ["policy"] 104 | resources: 105 | - poddisruptionbudgets 106 | verbs: ["list", "watch"] 107 | --- 108 | apiVersion: rbac.authorization.k8s.io/v1 109 | # kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 110 | kind: ClusterRoleBinding 111 | metadata: 112 | name: kube-state-metrics 113 | roleRef: 114 | apiGroup: rbac.authorization.k8s.io 115 | kind: ClusterRole 116 | name: kube-state-metrics 117 | subjects: 118 | - kind: ServiceAccount 119 | name: kube-state-metrics 120 | namespace: kube-system 121 | --- 122 | apiVersion: rbac.authorization.k8s.io/v1 123 | # kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 124 | kind: RoleBinding 125 | metadata: 126 | name: kube-state-metrics 127 | namespace: kube-system 128 | roleRef: 129 | apiGroup: rbac.authorization.k8s.io 130 | kind: Role 131 | name: kube-state-metrics-resizer 132 | subjects: 133 | - kind: ServiceAccount 134 | name: kube-state-metrics 135 | namespace: kube-system 136 | --- 137 | apiVersion: rbac.authorization.k8s.io/v1 138 | # kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 139 | kind: Role 140 | metadata: 141 | namespace: kube-system 142 | name: kube-state-metrics-resizer 143 | rules: 144 | - apiGroups: [""] 145 | resources: 146 | - pods 147 | verbs: ["get"] 148 | - apiGroups: ["extensions"] 149 | resources: 150 | - deployments 151 | resourceNames: ["kube-state-metrics"] 152 | verbs: ["get", "update"] 153 | --- 154 | apiVersion: v1 155 | kind: ServiceAccount 156 | metadata: 157 | name: kube-state-metrics 158 | namespace: kube-system 159 | --- 160 | apiVersion: v1 161 | kind: Service 162 | metadata: 163 | name: kube-state-metrics 164 | namespace: kube-system 165 | labels: 166 | k8s-app: kube-state-metrics 167 | annotations: 168 | prometheus.io/scrape: 'true' 169 | spec: 170 | ports: 171 | - name: http-metrics 172 | port: 8080 173 | targetPort: http-metrics 174 | protocol: TCP 175 | - name: telemetry 176 | port: 8081 177 | targetPort: telemetry 178 | protocol: TCP 179 | selector: 180 | k8s-app: kube-state-metrics 181 | --- -------------------------------------------------------------------------------- /roles/elastic-search/templates/filebeat-kubernetes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: filebeat-config 6 | namespace: kube-system 7 | labels: 8 | k8s-app: filebeat 9 | data: 10 | filebeat.yml: |- 11 | filebeat.config: 12 | #inputs: 13 | # Mounted `filebeat-inputs` configmap: 14 | # path: ${path.config}/inputs.d/*.yml 15 | # Reload inputs configs as they change: 16 | # reload.enabled: false 17 | modules: 18 | path: ${path.config}/modules.d/*.yml 19 | # Reload module configs as they change: 20 | reload.enabled: false 21 | 22 | # To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this: 23 | filebeat.autodiscover: 24 | providers: 25 | - type: kubernetes 26 | hints.enabled: true 27 | labels.dedot: true 28 | annotations.dedot: true 29 | 30 | processors: 31 | - add_cloud_metadata: 32 | - add_kubernetes_metadata: 33 | labels.dedot: true 34 | annotations.dedot: true 35 | # - drop_fields: # label added by helm by default and causes failure in Beats. This can be moved with Beats 6.7 release which has fix 36 | # when: 37 | # has_fields: ['kubernetes.labels.app'] 38 | # fields: 39 | # - 'kubernetes.labels.app' 40 | 41 | cloud.id: ${ELASTIC_CLOUD_ID} 42 | cloud.auth: ${ELASTIC_CLOUD_AUTH} 43 | 44 | output.kafka: 45 | # initial brokers for reading cluster metadata 46 | hosts: ["kafrck-vccn010.domain.local:31090", "kafrck-vccn011.domain.local:31091", "kafrck-vccn012.domain.local:31092", "kafrck-vccn013.domain.local:31093", "kafrck-vccn014.domain.local:31094"] 47 | # message topic selection + partitioning 48 | topic: '%{[fields.app_id]}' 49 | partition.round_robin: 50 | reachable_only: false 51 | required_acks: 1 52 | compression: gzip 53 | max_message_bytes: 1000000 54 | 55 | # Optional fields that you can specify to add additional information to the 56 | # output. 57 | fields: 58 | app_id: filebeat_app_id 59 | --- 60 | apiVersion: v1 61 | kind: ConfigMap 62 | metadata: 63 | name: filebeat-inputs 64 | namespace: kube-system 65 | labels: 66 | k8s-app: filebeat 67 | data: 68 | kubernetes.yml: |- 69 | - type: docker 70 | containers.ids: 71 | - "*" 72 | processors: 73 | - add_kubernetes_metadata: 74 | in_cluster: true 75 | --- 76 | apiVersion: extensions/v1beta1 77 | kind: DaemonSet 78 | metadata: 79 | name: filebeat 80 | namespace: kube-system 81 | labels: 82 | k8s-app: filebeat 83 | spec: 84 | template: 85 | metadata: 86 | labels: 87 | k8s-app: filebeat 88 | spec: 89 | serviceAccountName: filebeat 90 | terminationGracePeriodSeconds: 30 91 | containers: 92 | - name: filebeat 93 | image: docker.elastic.co/beats/filebeat:6.7.1 94 | imagePullPolicy: IfNotPresent 95 | args: [ 96 | "-c", "/etc/filebeat.yml", 97 | "-e", 98 | ] 99 | env: 100 | - name: ELASTIC_CLOUD_ID 101 | value: 102 | - name: ELASTIC_CLOUD_AUTH 103 | value: 104 | securityContext: 105 | runAsUser: 0 106 | # If using Red Hat OpenShift uncomment this: 107 | #privileged: true 108 | resources: 109 | limits: 110 | memory: 768Mi 111 | requests: 112 | cpu: 100m 113 | memory: 100Mi 114 | volumeMounts: 115 | - name: config 116 | mountPath: /etc/filebeat.yml 117 | readOnly: true 118 | subPath: filebeat.yml 119 | - name: inputs 120 | mountPath: /usr/share/filebeat/inputs.d 121 | readOnly: true 122 | - name: data 123 | mountPath: /usr/share/filebeat/data 124 | - name: varlibdockercontainers 125 | mountPath: /var/lib/docker/containers 126 | readOnly: true 127 | volumes: 128 | - name: config 129 | configMap: 130 | defaultMode: 0600 131 | name: filebeat-config 132 | - name: varlibdockercontainers 133 | hostPath: 134 | path: /var/lib/docker/containers 135 | - name: inputs 136 | configMap: 137 | defaultMode: 0600 138 | name: filebeat-inputs 139 | # data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart 140 | - name: data 141 | hostPath: 142 | path: /var/lib/filebeat-data 143 | type: DirectoryOrCreate 144 | --- 145 | apiVersion: rbac.authorization.k8s.io/v1beta1 146 | kind: ClusterRoleBinding 147 | metadata: 148 | name: filebeat 149 | subjects: 150 | - kind: ServiceAccount 151 | name: filebeat 152 | namespace: kube-system 153 | roleRef: 154 | kind: ClusterRole 155 | name: filebeat 156 | apiGroup: rbac.authorization.k8s.io 157 | --- 158 | apiVersion: rbac.authorization.k8s.io/v1beta1 159 | kind: ClusterRole 160 | metadata: 161 | name: filebeat 162 | labels: 163 | k8s-app: filebeat 164 | rules: 165 | - apiGroups: [""] # "" indicates the core API group 166 | resources: 167 | - namespaces 168 | - pods 169 | verbs: 170 | - get 171 | - watch 172 | - list 173 | --- 174 | apiVersion: v1 175 | kind: ServiceAccount 176 | metadata: 177 | name: filebeat 178 | namespace: kube-system 179 | labels: 180 | k8s-app: filebeat 181 | --- -------------------------------------------------------------------------------- /roles/ingress-nginx/templates/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: kube-ingress-nginx 5 | --- 6 | kind: ConfigMap 7 | apiVersion: v1 8 | metadata: 9 | name: nginx-configuration 10 | namespace: kube-ingress-nginx 11 | labels: 12 | app.kubernetes.io/name: ingress-nginx 13 | app.kubernetes.io/part-of: ingress-nginx 14 | data: 15 | log-format-upstream: '"$http_x_forwarded_for" $remote_user [$time_local] "$request" $request_time $host $request_length $request_time $status "$http_referer" "$http_user_agent" "$the_real_ip" $cookie_WSPE_SID $cookie_CoreID6 $cookie_WSPFY $cookie_svi_dec $namespace' 16 | worker-shutdown-timeout: "600s" 17 | gzip-level: "1" 18 | upstream-keepalive-connections: "128" 19 | skip-access-log-urls: "/nginx_status" 20 | large-client-header-buffers: "4 16k" 21 | --- 22 | apiVersion: v1 23 | kind: ServiceAccount 24 | metadata: 25 | name: nginx-ingress-serviceaccount 26 | namespace: kube-ingress-nginx 27 | labels: 28 | app.kubernetes.io/name: ingress-nginx 29 | app.kubernetes.io/part-of: ingress-nginx 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRole 33 | metadata: 34 | name: nginx-ingress-clusterrole 35 | labels: 36 | app.kubernetes.io/name: ingress-nginx 37 | app.kubernetes.io/part-of: ingress-nginx 38 | rules: 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - configmaps 43 | - endpoints 44 | - nodes 45 | - pods 46 | - secrets 47 | verbs: 48 | - list 49 | - watch 50 | - apiGroups: 51 | - "" 52 | resources: 53 | - nodes 54 | verbs: 55 | - get 56 | - apiGroups: 57 | - "" 58 | resources: 59 | - services 60 | verbs: 61 | - get 62 | - list 63 | - watch 64 | - apiGroups: 65 | - "extensions" 66 | resources: 67 | - ingresses 68 | verbs: 69 | - get 70 | - list 71 | - watch 72 | - apiGroups: 73 | - "" 74 | resources: 75 | - events 76 | verbs: 77 | - create 78 | - patch 79 | - apiGroups: 80 | - "extensions" 81 | resources: 82 | - ingresses/status 83 | verbs: 84 | - update 85 | --- 86 | apiVersion: rbac.authorization.k8s.io/v1 87 | kind: Role 88 | metadata: 89 | name: nginx-ingress-role 90 | namespace: kube-ingress-nginx 91 | labels: 92 | app.kubernetes.io/name: ingress-nginx 93 | app.kubernetes.io/part-of: ingress-nginx 94 | rules: 95 | - apiGroups: 96 | - "" 97 | resources: 98 | - configmaps 99 | - pods 100 | - secrets 101 | - namespaces 102 | verbs: 103 | - get 104 | - apiGroups: 105 | - "" 106 | resources: 107 | - configmaps 108 | resourceNames: 109 | # Defaults to "-" 110 | # Here: "-" 111 | # This has to be adapted if you change either parameter 112 | # when launching the nginx-ingress-controller. 113 | - "ingress-controller-leader-nginx" 114 | verbs: 115 | - get 116 | - update 117 | - apiGroups: 118 | - "" 119 | resources: 120 | - configmaps 121 | verbs: 122 | - create 123 | - apiGroups: 124 | - "" 125 | resources: 126 | - endpoints 127 | verbs: 128 | - get 129 | --- 130 | apiVersion: rbac.authorization.k8s.io/v1 131 | kind: RoleBinding 132 | metadata: 133 | name: nginx-ingress-role-nisa-binding 134 | namespace: kube-ingress-nginx 135 | labels: 136 | app.kubernetes.io/name: ingress-nginx 137 | app.kubernetes.io/part-of: ingress-nginx 138 | roleRef: 139 | apiGroup: rbac.authorization.k8s.io 140 | kind: Role 141 | name: nginx-ingress-role 142 | subjects: 143 | - kind: ServiceAccount 144 | name: nginx-ingress-serviceaccount 145 | namespace: kube-ingress-nginx 146 | --- 147 | apiVersion: rbac.authorization.k8s.io/v1 148 | kind: ClusterRoleBinding 149 | metadata: 150 | name: nginx-ingress-clusterrole-nisa-binding 151 | labels: 152 | app.kubernetes.io/name: ingress-nginx 153 | app.kubernetes.io/part-of: ingress-nginx 154 | roleRef: 155 | apiGroup: rbac.authorization.k8s.io 156 | kind: ClusterRole 157 | name: nginx-ingress-clusterrole 158 | subjects: 159 | - kind: ServiceAccount 160 | name: nginx-ingress-serviceaccount 161 | namespace: kube-ingress-nginx 162 | --- 163 | apiVersion: extensions/v1beta1 164 | kind: Deployment 165 | metadata: 166 | name: nginx-ingress-controller 167 | namespace: kube-ingress-nginx 168 | labels: 169 | app.kubernetes.io/name: ingress-nginx 170 | app.kubernetes.io/part-of: ingress-nginx 171 | spec: 172 | replicas: 3 173 | selector: 174 | matchLabels: 175 | app.kubernetes.io/name: ingress-nginx 176 | app.kubernetes.io/part-of: ingress-nginx 177 | template: 178 | metadata: 179 | labels: 180 | app.kubernetes.io/name: ingress-nginx 181 | app.kubernetes.io/part-of: ingress-nginx 182 | annotations: 183 | co.elastic.metrics/raw: '[{"metricsets":["collector"],"module":"prometheus","namespace":"kube-ingress-nginx","period":"10s","hosts":["${data.host}:10254"]}]' 184 | spec: 185 | serviceAccountName: nginx-ingress-serviceaccount 186 | #nodeSelector: 187 | # role: edge 188 | affinity: 189 | podAntiAffinity: 190 | requiredDuringSchedulingIgnoredDuringExecution: 191 | - labelSelector: 192 | matchExpressions: 193 | - key: app.kubernetes.io/name 194 | operator: In 195 | values: 196 | - ingress-nginx 197 | topologyKey: kubernetes.io/hostname 198 | initContainers: 199 | - command: 200 | - sh 201 | - -c 202 | - sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range="32768 65535" 203 | image: privateregistry/alpine:3.9 204 | imagePullPolicy: IfNotPresent 205 | name: sysctl 206 | securityContext: 207 | privileged: true 208 | containers: 209 | - name: nginx-ingress-controller 210 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.24.1 211 | args: 212 | - /nginx-ingress-controller 213 | - --configmap=$(POD_NAMESPACE)/nginx-configuration 214 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx 215 | - --enable-ssl-passthrough 216 | - --annotations-prefix=nginx.ingress.kubernetes.io 217 | - --v=1 218 | securityContext: 219 | capabilities: 220 | drop: 221 | - ALL 222 | add: 223 | - NET_BIND_SERVICE 224 | # www-data -> 33 225 | runAsUser: 33 226 | env: 227 | - name: POD_NAME 228 | valueFrom: 229 | fieldRef: 230 | fieldPath: metadata.name 231 | - name: POD_NAMESPACE 232 | valueFrom: 233 | fieldRef: 234 | fieldPath: metadata.namespace 235 | ports: 236 | - name: http 237 | containerPort: 80 238 | - name: https 239 | containerPort: 443 240 | livenessProbe: 241 | failureThreshold: 3 242 | httpGet: 243 | path: /healthz 244 | port: 10254 245 | scheme: HTTP 246 | initialDelaySeconds: 10 247 | periodSeconds: 10 248 | successThreshold: 1 249 | timeoutSeconds: 10 250 | readinessProbe: 251 | failureThreshold: 3 252 | httpGet: 253 | path: /healthz 254 | port: 10254 255 | scheme: HTTP 256 | periodSeconds: 10 257 | successThreshold: 1 258 | timeoutSeconds: 10 259 | --- 260 | kind: Service 261 | apiVersion: v1 262 | metadata: 263 | name: ingress-nginx 264 | namespace: kube-ingress-nginx 265 | labels: 266 | app.kubernetes.io/name: ingress-nginx 267 | app.kubernetes.io/part-of: ingress-nginx 268 | spec: 269 | type: NodePort 270 | externalTrafficPolicy: Local 271 | selector: 272 | app.kubernetes.io/name: ingress-nginx 273 | app.kubernetes.io/part-of: ingress-nginx 274 | ports: 275 | - name: https 276 | nodePort: 30443 277 | port: 443 278 | targetPort: 443 279 | protocol: TCP 280 | --- 281 | -------------------------------------------------------------------------------- /roles/kubernetes-dashboard/templates/recommended.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | apiVersion: v1 16 | kind: Namespace 17 | metadata: 18 | name: kubernetes-dashboard 19 | 20 | --- 21 | 22 | apiVersion: v1 23 | kind: ServiceAccount 24 | metadata: 25 | labels: 26 | k8s-app: kubernetes-dashboard 27 | name: kubernetes-dashboard 28 | namespace: kubernetes-dashboard 29 | 30 | --- 31 | 32 | kind: Service 33 | apiVersion: v1 34 | metadata: 35 | labels: 36 | k8s-app: kubernetes-dashboard 37 | name: kubernetes-dashboard 38 | namespace: kubernetes-dashboard 39 | spec: 40 | ports: 41 | - port: 443 42 | targetPort: 8443 43 | selector: 44 | k8s-app: kubernetes-dashboard 45 | 46 | --- 47 | 48 | apiVersion: v1 49 | kind: Secret 50 | metadata: 51 | labels: 52 | k8s-app: kubernetes-dashboard 53 | name: kubernetes-dashboard-certs 54 | namespace: kubernetes-dashboard 55 | type: Opaque 56 | 57 | --- 58 | 59 | apiVersion: v1 60 | kind: Secret 61 | metadata: 62 | labels: 63 | k8s-app: kubernetes-dashboard 64 | name: kubernetes-dashboard-csrf 65 | namespace: kubernetes-dashboard 66 | type: Opaque 67 | data: 68 | csrf: "" 69 | 70 | --- 71 | 72 | apiVersion: v1 73 | kind: Secret 74 | metadata: 75 | labels: 76 | k8s-app: kubernetes-dashboard 77 | name: kubernetes-dashboard-key-holder 78 | namespace: kubernetes-dashboard 79 | type: Opaque 80 | 81 | --- 82 | 83 | kind: ConfigMap 84 | apiVersion: v1 85 | metadata: 86 | labels: 87 | k8s-app: kubernetes-dashboard 88 | name: kubernetes-dashboard-settings 89 | namespace: kubernetes-dashboard 90 | 91 | --- 92 | 93 | kind: Role 94 | apiVersion: rbac.authorization.k8s.io/v1 95 | metadata: 96 | labels: 97 | k8s-app: kubernetes-dashboard 98 | name: kubernetes-dashboard 99 | namespace: kubernetes-dashboard 100 | rules: 101 | # Allow Dashboard to get, update and delete Dashboard exclusive secrets. 102 | - apiGroups: [""] 103 | resources: ["secrets"] 104 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] 105 | verbs: ["get", "update", "delete"] 106 | # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. 107 | - apiGroups: [""] 108 | resources: ["configmaps"] 109 | resourceNames: ["kubernetes-dashboard-settings"] 110 | verbs: ["get", "update"] 111 | # Allow Dashboard to get metrics. 112 | - apiGroups: [""] 113 | resources: ["services"] 114 | resourceNames: ["heapster", "dashboard-metrics-scraper"] 115 | verbs: ["proxy"] 116 | - apiGroups: [""] 117 | resources: ["services/proxy"] 118 | resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] 119 | verbs: ["get"] 120 | 121 | --- 122 | 123 | kind: ClusterRole 124 | apiVersion: rbac.authorization.k8s.io/v1 125 | metadata: 126 | labels: 127 | k8s-app: kubernetes-dashboard 128 | name: kubernetes-dashboard 129 | rules: 130 | # Allow Metrics Scraper to get metrics from the Metrics server 131 | - apiGroups: ["metrics.k8s.io"] 132 | resources: ["pods", "nodes"] 133 | verbs: ["get", "list", "watch"] 134 | 135 | --- 136 | 137 | apiVersion: rbac.authorization.k8s.io/v1 138 | kind: RoleBinding 139 | metadata: 140 | labels: 141 | k8s-app: kubernetes-dashboard 142 | name: kubernetes-dashboard 143 | namespace: kubernetes-dashboard 144 | roleRef: 145 | apiGroup: rbac.authorization.k8s.io 146 | kind: Role 147 | name: kubernetes-dashboard 148 | subjects: 149 | - kind: ServiceAccount 150 | name: kubernetes-dashboard 151 | namespace: kubernetes-dashboard 152 | 153 | --- 154 | 155 | apiVersion: rbac.authorization.k8s.io/v1 156 | kind: ClusterRoleBinding 157 | metadata: 158 | name: kubernetes-dashboard 159 | namespace: kubernetes-dashboard 160 | roleRef: 161 | apiGroup: rbac.authorization.k8s.io 162 | kind: ClusterRole 163 | name: kubernetes-dashboard 164 | subjects: 165 | - kind: ServiceAccount 166 | name: kubernetes-dashboard 167 | namespace: kubernetes-dashboard 168 | 169 | --- 170 | 171 | kind: Deployment 172 | apiVersion: apps/v1 173 | metadata: 174 | labels: 175 | k8s-app: kubernetes-dashboard 176 | name: kubernetes-dashboard 177 | namespace: kubernetes-dashboard 178 | spec: 179 | replicas: 1 180 | revisionHistoryLimit: 10 181 | selector: 182 | matchLabels: 183 | k8s-app: kubernetes-dashboard 184 | template: 185 | metadata: 186 | labels: 187 | k8s-app: kubernetes-dashboard 188 | spec: 189 | containers: 190 | - name: kubernetes-dashboard 191 | image: kubernetesui/dashboard:v2.0.0-beta3 192 | imagePullPolicy: Always 193 | ports: 194 | - containerPort: 8443 195 | protocol: TCP 196 | args: 197 | - --auto-generate-certificates 198 | - --namespace=kubernetes-dashboard 199 | # Uncomment the following line to manually specify Kubernetes API server Host 200 | # If not specified, Dashboard will attempt to auto discover the API server and connect 201 | # to it. Uncomment only if the default does not work. 202 | # - --apiserver-host=http://my-address:port 203 | volumeMounts: 204 | - name: kubernetes-dashboard-certs 205 | mountPath: /certs 206 | # Create on-disk volume to store exec logs 207 | - mountPath: /tmp 208 | name: tmp-volume 209 | livenessProbe: 210 | httpGet: 211 | scheme: HTTPS 212 | path: / 213 | port: 8443 214 | initialDelaySeconds: 30 215 | timeoutSeconds: 30 216 | volumes: 217 | - name: kubernetes-dashboard-certs 218 | secret: 219 | secretName: kubernetes-dashboard-certs 220 | - name: tmp-volume 221 | emptyDir: {} 222 | serviceAccountName: kubernetes-dashboard 223 | # Comment the following tolerations if Dashboard must not be deployed on master 224 | tolerations: 225 | - key: node-role.kubernetes.io/master 226 | effect: NoSchedule 227 | 228 | --- 229 | 230 | kind: Service 231 | apiVersion: v1 232 | metadata: 233 | labels: 234 | k8s-app: dashboard-metrics-scraper 235 | name: dashboard-metrics-scraper 236 | namespace: kubernetes-dashboard 237 | spec: 238 | ports: 239 | - port: 8000 240 | targetPort: 8000 241 | selector: 242 | k8s-app: dashboard-metrics-scraper 243 | 244 | --- 245 | 246 | kind: Deployment 247 | apiVersion: apps/v1 248 | metadata: 249 | labels: 250 | k8s-app: dashboard-metrics-scraper 251 | name: dashboard-metrics-scraper 252 | namespace: kubernetes-dashboard 253 | spec: 254 | replicas: 1 255 | revisionHistoryLimit: 10 256 | selector: 257 | matchLabels: 258 | k8s-app: dashboard-metrics-scraper 259 | template: 260 | metadata: 261 | labels: 262 | k8s-app: dashboard-metrics-scraper 263 | spec: 264 | containers: 265 | - name: dashboard-metrics-scraper 266 | image: kubernetesui/metrics-scraper:v1.0.1 267 | ports: 268 | - containerPort: 8000 269 | protocol: TCP 270 | livenessProbe: 271 | httpGet: 272 | scheme: HTTP 273 | path: / 274 | port: 8000 275 | initialDelaySeconds: 30 276 | timeoutSeconds: 30 277 | volumeMounts: 278 | - mountPath: /tmp 279 | name: tmp-volume 280 | serviceAccountName: kubernetes-dashboard 281 | # Comment the following tolerations if Dashboard must not be deployed on master 282 | tolerations: 283 | - key: node-role.kubernetes.io/master 284 | effect: NoSchedule 285 | volumes: 286 | - name: tmp-volume 287 | emptyDir: {} 288 | -------------------------------------------------------------------------------- /roles/elastic-search/templates/metricbeat-kubernetes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: metricbeat-daemonset-config 6 | namespace: kube-system 7 | labels: 8 | k8s-app: metricbeat 9 | data: 10 | metricbeat.yml: |- 11 | metricbeat.config.modules: 12 | # Mounted `metricbeat-daemonset-modules` configmap: 13 | path: ${path.config}/modules.d/*.yml 14 | # Reload module configs as they change: 15 | reload.enabled: false 16 | 17 | # To enable hints based autodiscover uncomment this: 18 | metricbeat.autodiscover: 19 | providers: 20 | - type: kubernetes 21 | host: ${NODE_NAME} 22 | hints.enabled: true 23 | labels.dedot: true 24 | annotations.dedot: true 25 | 26 | processors: 27 | - add_cloud_metadata: 28 | - add_kubernetes_metadata: 29 | labels.dedot: true 30 | annotations.dedot: true 31 | - drop_fields: 32 | when: 33 | has_fields: ['kubernetes.labels.app'] 34 | fields: 35 | - 'kubernetes.labels.app' 36 | cloud.id: ${ELASTIC_CLOUD_ID} 37 | cloud.auth: ${ELASTIC_CLOUD_AUTH} 38 | 39 | output.kafka: 40 | # initial brokers for reading cluster metadata 41 | hosts: ["kafrck-vccn010.domain.local:31090", "kafrck-vccn011.domain.local:31091", "kafrck-vccn012.domain.local:31092", "kafrck-vccn013.domain.local:31093", "kafrck-vccn014.domain.local:31094"] 42 | # message topic selection + partitioning 43 | topic: '%{[fields.app_id]}' 44 | #topic: 'metricbeat' 45 | partition.round_robin: 46 | reachable_only: false 47 | required_acks: 1 48 | compression: gzip 49 | max_message_bytes: 1000000 50 | # Optional fields that you can specify to add additional information to the 51 | # output. 52 | fields: 53 | app_id: metricbeat_app_id 54 | --- 55 | apiVersion: v1 56 | kind: ConfigMap 57 | metadata: 58 | name: metricbeat-daemonset-modules 59 | namespace: kube-system 60 | labels: 61 | k8s-app: metricbeat 62 | data: 63 | system.yml: |- 64 | - module: system 65 | period: 10s 66 | metricsets: 67 | - cpu 68 | - load 69 | - memory 70 | - network 71 | - process 72 | - process_summary 73 | #- core 74 | #- diskio 75 | #- socket 76 | processes: ['.*'] 77 | process.include_top_n: 78 | by_cpu: 5 # include top 5 processes by CPU 79 | by_memory: 5 # include top 5 processes by memory 80 | 81 | - module: system 82 | period: 1m 83 | metricsets: 84 | - filesystem 85 | - fsstat 86 | processors: 87 | - drop_event.when.regexp: 88 | system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)' 89 | kubernetes.yml: |- 90 | - module: kubernetes 91 | metricsets: 92 | - node 93 | - system 94 | - pod 95 | - container 96 | - volume 97 | period: 10s 98 | host: ${NODE_NAME} 99 | labels.dedot: true 100 | annotations.dedot: true 101 | #hosts: ["localhost:10255"] 102 | # If using Red Hat OpenShift remove the previous hosts entry and 103 | # uncomment these settings: 104 | hosts: ["https://${HOSTNAME}:10250"] 105 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 106 | ssl.verification_mode: "none" 107 | ssl.certificate_authorities: 108 | - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 109 | --- 110 | # Deploy a Metricbeat instance per node for node metrics retrieval 111 | apiVersion: extensions/v1beta1 112 | kind: DaemonSet 113 | metadata: 114 | name: metricbeat 115 | namespace: kube-system 116 | labels: 117 | k8s-app: metricbeat 118 | spec: 119 | template: 120 | metadata: 121 | labels: 122 | k8s-app: metricbeat 123 | spec: 124 | serviceAccountName: metricbeat 125 | terminationGracePeriodSeconds: 30 126 | hostNetwork: true 127 | dnsPolicy: ClusterFirstWithHostNet 128 | containers: 129 | - name: metricbeat 130 | image: docker.elastic.co/beats/metricbeat:6.7.1 131 | imagePullPolicy: IfNotPresent 132 | args: [ 133 | "-c", "/etc/metricbeat.yml", 134 | "-e", 135 | "-system.hostfs=/hostfs", 136 | ] 137 | env: 138 | - name: ELASTIC_CLOUD_ID 139 | value: 140 | - name: ELASTIC_CLOUD_AUTH 141 | value: 142 | - name: NODE_NAME 143 | valueFrom: 144 | fieldRef: 145 | fieldPath: spec.nodeName 146 | securityContext: 147 | runAsUser: 0 148 | resources: 149 | limits: 150 | memory: 400Mi 151 | requests: 152 | cpu: 100m 153 | memory: 100Mi 154 | volumeMounts: 155 | - name: config 156 | mountPath: /etc/metricbeat.yml 157 | readOnly: true 158 | subPath: metricbeat.yml 159 | - name: modules 160 | mountPath: /usr/share/metricbeat/modules.d 161 | readOnly: true 162 | - name: dockersock 163 | mountPath: /var/run/docker.sock 164 | - name: proc 165 | mountPath: /hostfs/proc 166 | readOnly: true 167 | - name: cgroup 168 | mountPath: /hostfs/sys/fs/cgroup 169 | readOnly: true 170 | volumes: 171 | - name: proc 172 | hostPath: 173 | path: /proc 174 | - name: cgroup 175 | hostPath: 176 | path: /sys/fs/cgroup 177 | - name: dockersock 178 | hostPath: 179 | path: /var/run/docker.sock 180 | - name: config 181 | configMap: 182 | defaultMode: 0600 183 | name: metricbeat-daemonset-config 184 | - name: modules 185 | configMap: 186 | defaultMode: 0600 187 | name: metricbeat-daemonset-modules 188 | - name: data 189 | hostPath: 190 | path: /var/lib/metricbeat-data 191 | type: DirectoryOrCreate 192 | --- 193 | apiVersion: v1 194 | kind: ConfigMap 195 | metadata: 196 | name: metricbeat-deployment-config 197 | namespace: kube-system 198 | labels: 199 | k8s-app: metricbeat 200 | data: 201 | metricbeat.yml: |- 202 | metricbeat.config.modules: 203 | # Mounted `metricbeat-daemonset-modules` configmap: 204 | path: ${path.config}/modules.d/*.yml 205 | # Reload module configs as they change: 206 | reload.enabled: false 207 | 208 | processors: 209 | - add_cloud_metadata: 210 | - add_kubernetes_metadata: 211 | labels.dedot: true 212 | annotations.dedot: true 213 | - drop_fields: 214 | when: 215 | has_fields: ['kubernetes.labels.app'] 216 | fields: 217 | - 'kubernetes.labels.app' 218 | cloud.id: ${ELASTIC_CLOUD_ID} 219 | cloud.auth: ${ELASTIC_CLOUD_AUTH} 220 | output.kafka: 221 | # initial brokers for reading cluster metadata 222 | hosts: ["kafrck-vccn010.domain.local:31090", "kafrck-vccn011.domain.local:31091", "kafrck-vccn012.domain.local:31092", "kafrck-vccn013.domain.local:31093", "kafrck-vccn014.domain.local:31094"] 223 | # message topic selection + partitioning 224 | topic: '%{[fields.app_id]}' 225 | #topic: 'metricbeat' 226 | partition.round_robin: 227 | reachable_only: false 228 | required_acks: 1 229 | compression: gzip 230 | max_message_bytes: 1000000 231 | 232 | # Optional fields that you can specify to add additional information to the 233 | # output. 234 | fields: 235 | app_id: metricbeat-kubernetes 236 | 237 | --- 238 | apiVersion: v1 239 | kind: ConfigMap 240 | metadata: 241 | name: metricbeat-deployment-modules 242 | namespace: kube-system 243 | labels: 244 | k8s-app: metricbeat 245 | data: 246 | # This module requires `kube-state-metrics` up and running under `kube-system` namespace 247 | kubernetes.yml: |- 248 | - module: kubernetes 249 | metricsets: 250 | - state_node 251 | - state_deployment 252 | - state_replicaset 253 | - state_pod 254 | - state_container 255 | # Uncomment this to get k8s events: 256 | - event 257 | period: 10s 258 | host: ${NODE_NAME} 259 | hosts: ["kube-state-metrics:8080"] 260 | --- 261 | # Deploy singleton instance in the whole cluster for some unique data sources, like kube-state-metrics 262 | apiVersion: apps/v1beta1 263 | kind: Deployment 264 | metadata: 265 | name: metricbeat 266 | namespace: kube-system 267 | labels: 268 | k8s-app: metricbeat 269 | spec: 270 | template: 271 | metadata: 272 | labels: 273 | k8s-app: metricbeat 274 | spec: 275 | serviceAccountName: metricbeat 276 | hostNetwork: true 277 | dnsPolicy: ClusterFirstWithHostNet 278 | containers: 279 | - name: metricbeat 280 | image: docker.elastic.co/beats/metricbeat:6.7.1 281 | imagePullPolicy: IfNotPresent 282 | args: [ 283 | "-c", "/etc/metricbeat.yml", 284 | "-e", 285 | ] 286 | env: 287 | - name: ELASTIC_CLOUD_ID 288 | value: 289 | - name: ELASTIC_CLOUD_AUTH 290 | value: 291 | - name: NODE_NAME 292 | valueFrom: 293 | fieldRef: 294 | fieldPath: spec.nodeName 295 | securityContext: 296 | runAsUser: 0 297 | resources: 298 | limits: 299 | memory: 400Mi 300 | requests: 301 | cpu: 100m 302 | memory: 100Mi 303 | volumeMounts: 304 | - name: config 305 | mountPath: /etc/metricbeat.yml 306 | readOnly: true 307 | subPath: metricbeat.yml 308 | - name: modules 309 | mountPath: /usr/share/metricbeat/modules.d 310 | readOnly: true 311 | volumes: 312 | - name: config 313 | configMap: 314 | defaultMode: 0600 315 | name: metricbeat-deployment-config 316 | - name: modules 317 | configMap: 318 | defaultMode: 0600 319 | name: metricbeat-deployment-modules 320 | --- 321 | apiVersion: rbac.authorization.k8s.io/v1beta1 322 | kind: ClusterRoleBinding 323 | metadata: 324 | name: metricbeat 325 | subjects: 326 | - kind: ServiceAccount 327 | name: metricbeat 328 | namespace: kube-system 329 | roleRef: 330 | kind: ClusterRole 331 | name: metricbeat 332 | apiGroup: rbac.authorization.k8s.io 333 | --- 334 | apiVersion: rbac.authorization.k8s.io/v1beta1 335 | kind: ClusterRole 336 | metadata: 337 | name: metricbeat 338 | labels: 339 | k8s-app: metricbeat 340 | rules: 341 | - apiGroups: [""] 342 | resources: 343 | - nodes 344 | - namespaces 345 | - events 346 | - pods 347 | verbs: ["get", "list", "watch"] 348 | - apiGroups: ["extensions"] 349 | resources: 350 | - replicasets 351 | verbs: ["get", "list", "watch"] 352 | - apiGroups: ["apps"] 353 | resources: 354 | - statefulsets 355 | - deployments 356 | verbs: ["get", "list", "watch"] 357 | - apiGroups: 358 | - "" 359 | resources: 360 | - nodes/stats 361 | verbs: 362 | - get 363 | --- 364 | apiVersion: v1 365 | kind: ServiceAccount 366 | metadata: 367 | name: metricbeat 368 | namespace: kube-system 369 | labels: 370 | k8s-app: metricbeat 371 | --- -------------------------------------------------------------------------------- /roles/calico/templates/calico.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: calico/templates/calico-config.yaml 3 | # This ConfigMap is used to configure a self-hosted Calico installation. 4 | kind: ConfigMap 5 | apiVersion: v1 6 | metadata: 7 | name: calico-config 8 | namespace: kube-system 9 | data: 10 | # Typha is disabled. 11 | typha_service_name: "none" 12 | # Configure the backend to use. 13 | calico_backend: "bird" 14 | 15 | # Configure the MTU to use 16 | veth_mtu: "1440" 17 | 18 | # The CNI network configuration to install on each node. The special 19 | # values in this config will be automatically populated. 20 | cni_network_config: |- 21 | { 22 | "name": "k8s-pod-network", 23 | "cniVersion": "0.3.1", 24 | "plugins": [ 25 | { 26 | "type": "calico", 27 | "log_level": "info", 28 | "datastore_type": "kubernetes", 29 | "nodename": "__KUBERNETES_NODE_NAME__", 30 | "mtu": __CNI_MTU__, 31 | "ipam": { 32 | "type": "calico-ipam" 33 | }, 34 | "policy": { 35 | "type": "k8s" 36 | }, 37 | "kubernetes": { 38 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 39 | } 40 | }, 41 | { 42 | "type": "portmap", 43 | "snat": true, 44 | "capabilities": {"portMappings": true} 45 | } 46 | ] 47 | } 48 | 49 | --- 50 | # Source: calico/templates/kdd-crds.yaml 51 | apiVersion: apiextensions.k8s.io/v1beta1 52 | kind: CustomResourceDefinition 53 | metadata: 54 | name: felixconfigurations.crd.projectcalico.org 55 | spec: 56 | scope: Cluster 57 | group: crd.projectcalico.org 58 | version: v1 59 | names: 60 | kind: FelixConfiguration 61 | plural: felixconfigurations 62 | singular: felixconfiguration 63 | --- 64 | 65 | apiVersion: apiextensions.k8s.io/v1beta1 66 | kind: CustomResourceDefinition 67 | metadata: 68 | name: ipamblocks.crd.projectcalico.org 69 | spec: 70 | scope: Cluster 71 | group: crd.projectcalico.org 72 | version: v1 73 | names: 74 | kind: IPAMBlock 75 | plural: ipamblocks 76 | singular: ipamblock 77 | 78 | --- 79 | 80 | apiVersion: apiextensions.k8s.io/v1beta1 81 | kind: CustomResourceDefinition 82 | metadata: 83 | name: blockaffinities.crd.projectcalico.org 84 | spec: 85 | scope: Cluster 86 | group: crd.projectcalico.org 87 | version: v1 88 | names: 89 | kind: BlockAffinity 90 | plural: blockaffinities 91 | singular: blockaffinity 92 | 93 | --- 94 | 95 | apiVersion: apiextensions.k8s.io/v1beta1 96 | kind: CustomResourceDefinition 97 | metadata: 98 | name: ipamhandles.crd.projectcalico.org 99 | spec: 100 | scope: Cluster 101 | group: crd.projectcalico.org 102 | version: v1 103 | names: 104 | kind: IPAMHandle 105 | plural: ipamhandles 106 | singular: ipamhandle 107 | 108 | --- 109 | 110 | apiVersion: apiextensions.k8s.io/v1beta1 111 | kind: CustomResourceDefinition 112 | metadata: 113 | name: ipamconfigs.crd.projectcalico.org 114 | spec: 115 | scope: Cluster 116 | group: crd.projectcalico.org 117 | version: v1 118 | names: 119 | kind: IPAMConfig 120 | plural: ipamconfigs 121 | singular: ipamconfig 122 | 123 | --- 124 | 125 | apiVersion: apiextensions.k8s.io/v1beta1 126 | kind: CustomResourceDefinition 127 | metadata: 128 | name: bgppeers.crd.projectcalico.org 129 | spec: 130 | scope: Cluster 131 | group: crd.projectcalico.org 132 | version: v1 133 | names: 134 | kind: BGPPeer 135 | plural: bgppeers 136 | singular: bgppeer 137 | 138 | --- 139 | 140 | apiVersion: apiextensions.k8s.io/v1beta1 141 | kind: CustomResourceDefinition 142 | metadata: 143 | name: bgpconfigurations.crd.projectcalico.org 144 | spec: 145 | scope: Cluster 146 | group: crd.projectcalico.org 147 | version: v1 148 | names: 149 | kind: BGPConfiguration 150 | plural: bgpconfigurations 151 | singular: bgpconfiguration 152 | 153 | --- 154 | 155 | apiVersion: apiextensions.k8s.io/v1beta1 156 | kind: CustomResourceDefinition 157 | metadata: 158 | name: ippools.crd.projectcalico.org 159 | spec: 160 | scope: Cluster 161 | group: crd.projectcalico.org 162 | version: v1 163 | names: 164 | kind: IPPool 165 | plural: ippools 166 | singular: ippool 167 | 168 | --- 169 | 170 | apiVersion: apiextensions.k8s.io/v1beta1 171 | kind: CustomResourceDefinition 172 | metadata: 173 | name: hostendpoints.crd.projectcalico.org 174 | spec: 175 | scope: Cluster 176 | group: crd.projectcalico.org 177 | version: v1 178 | names: 179 | kind: HostEndpoint 180 | plural: hostendpoints 181 | singular: hostendpoint 182 | 183 | --- 184 | 185 | apiVersion: apiextensions.k8s.io/v1beta1 186 | kind: CustomResourceDefinition 187 | metadata: 188 | name: clusterinformations.crd.projectcalico.org 189 | spec: 190 | scope: Cluster 191 | group: crd.projectcalico.org 192 | version: v1 193 | names: 194 | kind: ClusterInformation 195 | plural: clusterinformations 196 | singular: clusterinformation 197 | 198 | --- 199 | 200 | apiVersion: apiextensions.k8s.io/v1beta1 201 | kind: CustomResourceDefinition 202 | metadata: 203 | name: globalnetworkpolicies.crd.projectcalico.org 204 | spec: 205 | scope: Cluster 206 | group: crd.projectcalico.org 207 | version: v1 208 | names: 209 | kind: GlobalNetworkPolicy 210 | plural: globalnetworkpolicies 211 | singular: globalnetworkpolicy 212 | 213 | --- 214 | 215 | apiVersion: apiextensions.k8s.io/v1beta1 216 | kind: CustomResourceDefinition 217 | metadata: 218 | name: globalnetworksets.crd.projectcalico.org 219 | spec: 220 | scope: Cluster 221 | group: crd.projectcalico.org 222 | version: v1 223 | names: 224 | kind: GlobalNetworkSet 225 | plural: globalnetworksets 226 | singular: globalnetworkset 227 | 228 | --- 229 | 230 | apiVersion: apiextensions.k8s.io/v1beta1 231 | kind: CustomResourceDefinition 232 | metadata: 233 | name: networkpolicies.crd.projectcalico.org 234 | spec: 235 | scope: Namespaced 236 | group: crd.projectcalico.org 237 | version: v1 238 | names: 239 | kind: NetworkPolicy 240 | plural: networkpolicies 241 | singular: networkpolicy 242 | 243 | --- 244 | 245 | apiVersion: apiextensions.k8s.io/v1beta1 246 | kind: CustomResourceDefinition 247 | metadata: 248 | name: networksets.crd.projectcalico.org 249 | spec: 250 | scope: Namespaced 251 | group: crd.projectcalico.org 252 | version: v1 253 | names: 254 | kind: NetworkSet 255 | plural: networksets 256 | singular: networkset 257 | --- 258 | # Source: calico/templates/rbac.yaml 259 | 260 | # Include a clusterrole for the kube-controllers component, 261 | # and bind it to the calico-kube-controllers serviceaccount. 262 | kind: ClusterRole 263 | apiVersion: rbac.authorization.k8s.io/v1 264 | metadata: 265 | name: calico-kube-controllers 266 | rules: 267 | # Nodes are watched to monitor for deletions. 268 | - apiGroups: [""] 269 | resources: 270 | - nodes 271 | verbs: 272 | - watch 273 | - list 274 | - get 275 | # Pods are queried to check for existence. 276 | - apiGroups: [""] 277 | resources: 278 | - pods 279 | verbs: 280 | - get 281 | # IPAM resources are manipulated when nodes are deleted. 282 | - apiGroups: ["crd.projectcalico.org"] 283 | resources: 284 | - ippools 285 | verbs: 286 | - list 287 | - apiGroups: ["crd.projectcalico.org"] 288 | resources: 289 | - blockaffinities 290 | - ipamblocks 291 | - ipamhandles 292 | verbs: 293 | - get 294 | - list 295 | - create 296 | - update 297 | - delete 298 | # Needs access to update clusterinformations. 299 | - apiGroups: ["crd.projectcalico.org"] 300 | resources: 301 | - clusterinformations 302 | verbs: 303 | - get 304 | - create 305 | - update 306 | --- 307 | kind: ClusterRoleBinding 308 | apiVersion: rbac.authorization.k8s.io/v1 309 | metadata: 310 | name: calico-kube-controllers 311 | roleRef: 312 | apiGroup: rbac.authorization.k8s.io 313 | kind: ClusterRole 314 | name: calico-kube-controllers 315 | subjects: 316 | - kind: ServiceAccount 317 | name: calico-kube-controllers 318 | namespace: kube-system 319 | --- 320 | # Include a clusterrole for the calico-node DaemonSet, 321 | # and bind it to the calico-node serviceaccount. 322 | kind: ClusterRole 323 | apiVersion: rbac.authorization.k8s.io/v1 324 | metadata: 325 | name: calico-node 326 | rules: 327 | # The CNI plugin needs to get pods, nodes, and namespaces. 328 | - apiGroups: [""] 329 | resources: 330 | - pods 331 | - nodes 332 | - namespaces 333 | verbs: 334 | - get 335 | - apiGroups: [""] 336 | resources: 337 | - endpoints 338 | - services 339 | verbs: 340 | # Used to discover service IPs for advertisement. 341 | - watch 342 | - list 343 | # Used to discover Typhas. 344 | - get 345 | - apiGroups: [""] 346 | resources: 347 | - nodes/status 348 | verbs: 349 | # Needed for clearing NodeNetworkUnavailable flag. 350 | - patch 351 | # Calico stores some configuration information in node annotations. 352 | - update 353 | # Watch for changes to Kubernetes NetworkPolicies. 354 | - apiGroups: ["networking.k8s.io"] 355 | resources: 356 | - networkpolicies 357 | verbs: 358 | - watch 359 | - list 360 | # Used by Calico for policy information. 361 | - apiGroups: [""] 362 | resources: 363 | - pods 364 | - namespaces 365 | - serviceaccounts 366 | verbs: 367 | - list 368 | - watch 369 | # The CNI plugin patches pods/status. 370 | - apiGroups: [""] 371 | resources: 372 | - pods/status 373 | verbs: 374 | - patch 375 | # Calico monitors various CRDs for config. 376 | - apiGroups: ["crd.projectcalico.org"] 377 | resources: 378 | - globalfelixconfigs 379 | - felixconfigurations 380 | - bgppeers 381 | - globalbgpconfigs 382 | - bgpconfigurations 383 | - ippools 384 | - ipamblocks 385 | - globalnetworkpolicies 386 | - globalnetworksets 387 | - networkpolicies 388 | - networksets 389 | - clusterinformations 390 | - hostendpoints 391 | verbs: 392 | - get 393 | - list 394 | - watch 395 | # Calico must create and update some CRDs on startup. 396 | - apiGroups: ["crd.projectcalico.org"] 397 | resources: 398 | - ippools 399 | - felixconfigurations 400 | - clusterinformations 401 | verbs: 402 | - create 403 | - update 404 | # Calico stores some configuration information on the node. 405 | - apiGroups: [""] 406 | resources: 407 | - nodes 408 | verbs: 409 | - get 410 | - list 411 | - watch 412 | # These permissions are only requried for upgrade from v2.6, and can 413 | # be removed after upgrade or on fresh installations. 414 | - apiGroups: ["crd.projectcalico.org"] 415 | resources: 416 | - bgpconfigurations 417 | - bgppeers 418 | verbs: 419 | - create 420 | - update 421 | # These permissions are required for Calico CNI to perform IPAM allocations. 422 | - apiGroups: ["crd.projectcalico.org"] 423 | resources: 424 | - blockaffinities 425 | - ipamblocks 426 | - ipamhandles 427 | verbs: 428 | - get 429 | - list 430 | - create 431 | - update 432 | - delete 433 | - apiGroups: ["crd.projectcalico.org"] 434 | resources: 435 | - ipamconfigs 436 | verbs: 437 | - get 438 | # Block affinities must also be watchable by confd for route aggregation. 439 | - apiGroups: ["crd.projectcalico.org"] 440 | resources: 441 | - blockaffinities 442 | verbs: 443 | - watch 444 | # The Calico IPAM migration needs to get daemonsets. These permissions can be 445 | # removed if not upgrading from an installation using host-local IPAM. 446 | - apiGroups: ["apps"] 447 | resources: 448 | - daemonsets 449 | verbs: 450 | - get 451 | --- 452 | apiVersion: rbac.authorization.k8s.io/v1 453 | kind: ClusterRoleBinding 454 | metadata: 455 | name: calico-node 456 | roleRef: 457 | apiGroup: rbac.authorization.k8s.io 458 | kind: ClusterRole 459 | name: calico-node 460 | subjects: 461 | - kind: ServiceAccount 462 | name: calico-node 463 | namespace: kube-system 464 | 465 | --- 466 | # Source: calico/templates/calico-node.yaml 467 | # This manifest installs the calico-node container, as well 468 | # as the CNI plugins and network config on 469 | # each master and worker node in a Kubernetes cluster. 470 | kind: DaemonSet 471 | apiVersion: apps/v1 472 | metadata: 473 | name: calico-node 474 | namespace: kube-system 475 | labels: 476 | k8s-app: calico-node 477 | spec: 478 | selector: 479 | matchLabels: 480 | k8s-app: calico-node 481 | updateStrategy: 482 | type: RollingUpdate 483 | rollingUpdate: 484 | maxUnavailable: 1 485 | template: 486 | metadata: 487 | labels: 488 | k8s-app: calico-node 489 | annotations: 490 | # This, along with the CriticalAddonsOnly toleration below, 491 | # marks the pod as a critical add-on, ensuring it gets 492 | # priority scheduling and that its resources are reserved 493 | # if it ever gets evicted. 494 | scheduler.alpha.kubernetes.io/critical-pod: '' 495 | spec: 496 | nodeSelector: 497 | beta.kubernetes.io/os: linux 498 | hostNetwork: true 499 | tolerations: 500 | # Make sure calico-node gets scheduled on all nodes. 501 | - effect: NoSchedule 502 | operator: Exists 503 | # Mark the pod as a critical add-on for rescheduling. 504 | - key: CriticalAddonsOnly 505 | operator: Exists 506 | - effect: NoExecute 507 | operator: Exists 508 | serviceAccountName: calico-node 509 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 510 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 511 | terminationGracePeriodSeconds: 0 512 | priorityClassName: system-node-critical 513 | initContainers: 514 | # This container performs upgrade from host-local IPAM to calico-ipam. 515 | # It can be deleted if this is a fresh installation, or if you have already 516 | # upgraded to use calico-ipam. 517 | - name: upgrade-ipam 518 | image: calico/cni:v3.8.1 519 | command: ["/opt/cni/bin/calico-ipam", "-upgrade"] 520 | env: 521 | - name: KUBERNETES_NODE_NAME 522 | valueFrom: 523 | fieldRef: 524 | fieldPath: spec.nodeName 525 | - name: CALICO_NETWORKING_BACKEND 526 | valueFrom: 527 | configMapKeyRef: 528 | name: calico-config 529 | key: calico_backend 530 | volumeMounts: 531 | - mountPath: /var/lib/cni/networks 532 | name: host-local-net-dir 533 | - mountPath: /host/opt/cni/bin 534 | name: cni-bin-dir 535 | # This container installs the CNI binaries 536 | # and CNI network config file on each node. 537 | - name: install-cni 538 | image: calico/cni:v3.8.1 539 | command: ["/install-cni.sh"] 540 | env: 541 | # Name of the CNI config file to create. 542 | - name: CNI_CONF_NAME 543 | value: "10-calico.conflist" 544 | # The CNI network config to install on each node. 545 | - name: CNI_NETWORK_CONFIG 546 | valueFrom: 547 | configMapKeyRef: 548 | name: calico-config 549 | key: cni_network_config 550 | # Set the hostname based on the k8s node name. 551 | - name: KUBERNETES_NODE_NAME 552 | valueFrom: 553 | fieldRef: 554 | fieldPath: spec.nodeName 555 | # CNI MTU Config variable 556 | - name: CNI_MTU 557 | valueFrom: 558 | configMapKeyRef: 559 | name: calico-config 560 | key: veth_mtu 561 | # Prevents the container from sleeping forever. 562 | - name: SLEEP 563 | value: "false" 564 | volumeMounts: 565 | - mountPath: /host/opt/cni/bin 566 | name: cni-bin-dir 567 | - mountPath: /host/etc/cni/net.d 568 | name: cni-net-dir 569 | # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes 570 | # to communicate with Felix over the Policy Sync API. 571 | - name: flexvol-driver 572 | image: calico/pod2daemon-flexvol:v3.8.1 573 | volumeMounts: 574 | - name: flexvol-driver-host 575 | mountPath: /host/driver 576 | containers: 577 | # Runs calico-node container on each Kubernetes node. This 578 | # container programs network policy and routes on each 579 | # host. 580 | - name: calico-node 581 | image: calico/node:v3.8.1 582 | env: 583 | # Use Kubernetes API as the backing datastore. 584 | - name: DATASTORE_TYPE 585 | value: "kubernetes" 586 | # Wait for the datastore. 587 | - name: WAIT_FOR_DATASTORE 588 | value: "true" 589 | # Set based on the k8s node name. 590 | - name: NODENAME 591 | valueFrom: 592 | fieldRef: 593 | fieldPath: spec.nodeName 594 | # Choose the backend to use. 595 | - name: CALICO_NETWORKING_BACKEND 596 | valueFrom: 597 | configMapKeyRef: 598 | name: calico-config 599 | key: calico_backend 600 | # Cluster type to identify the deployment type 601 | - name: CLUSTER_TYPE 602 | value: "k8s,bgp" 603 | # Auto-detect the BGP IP address. 604 | - name: IP 605 | value: "autodetect" 606 | # Enable IPIP 607 | - name: CALICO_IPV4POOL_IPIP 608 | value: "Always" 609 | # Set MTU for tunnel device used if ipip is enabled 610 | - name: FELIX_IPINIPMTU 611 | valueFrom: 612 | configMapKeyRef: 613 | name: calico-config 614 | key: veth_mtu 615 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 616 | # chosen from this range. Changing this value after installation will have 617 | # no effect. This should fall within `--cluster-cidr`. 618 | - name: CALICO_IPV4POOL_CIDR 619 | value: "192.168.0.0/16" 620 | # Disable file logging so `kubectl logs` works. 621 | - name: CALICO_DISABLE_FILE_LOGGING 622 | value: "true" 623 | # Set Felix endpoint to host default action to ACCEPT. 624 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 625 | value: "ACCEPT" 626 | # Disable IPv6 on Kubernetes. 627 | - name: FELIX_IPV6SUPPORT 628 | value: "false" 629 | # Set Felix logging to "info" 630 | - name: FELIX_LOGSEVERITYSCREEN 631 | value: "info" 632 | - name: FELIX_HEALTHENABLED 633 | value: "true" 634 | securityContext: 635 | privileged: true 636 | resources: 637 | requests: 638 | cpu: 250m 639 | livenessProbe: 640 | httpGet: 641 | path: /liveness 642 | port: 9099 643 | host: localhost 644 | periodSeconds: 10 645 | initialDelaySeconds: 10 646 | failureThreshold: 6 647 | readinessProbe: 648 | exec: 649 | command: 650 | - /bin/calico-node 651 | - -bird-ready 652 | - -felix-ready 653 | periodSeconds: 10 654 | volumeMounts: 655 | - mountPath: /lib/modules 656 | name: lib-modules 657 | readOnly: true 658 | - mountPath: /run/xtables.lock 659 | name: xtables-lock 660 | readOnly: false 661 | - mountPath: /var/run/calico 662 | name: var-run-calico 663 | readOnly: false 664 | - mountPath: /var/lib/calico 665 | name: var-lib-calico 666 | readOnly: false 667 | - name: policysync 668 | mountPath: /var/run/nodeagent 669 | volumes: 670 | # Used by calico-node. 671 | - name: lib-modules 672 | hostPath: 673 | path: /lib/modules 674 | - name: var-run-calico 675 | hostPath: 676 | path: /var/run/calico 677 | - name: var-lib-calico 678 | hostPath: 679 | path: /var/lib/calico 680 | - name: xtables-lock 681 | hostPath: 682 | path: /run/xtables.lock 683 | type: FileOrCreate 684 | # Used to install CNI. 685 | - name: cni-bin-dir 686 | hostPath: 687 | path: /opt/cni/bin 688 | - name: cni-net-dir 689 | hostPath: 690 | path: /etc/cni/net.d 691 | # Mount in the directory for host-local IPAM allocations. This is 692 | # used when upgrading from host-local to calico-ipam, and can be removed 693 | # if not using the upgrade-ipam init container. 694 | - name: host-local-net-dir 695 | hostPath: 696 | path: /var/lib/cni/networks 697 | # Used to create per-pod Unix Domain Sockets 698 | - name: policysync 699 | hostPath: 700 | type: DirectoryOrCreate 701 | path: /var/run/nodeagent 702 | # Used to install Flex Volume Driver 703 | - name: flexvol-driver-host 704 | hostPath: 705 | type: DirectoryOrCreate 706 | path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds 707 | --- 708 | 709 | apiVersion: v1 710 | kind: ServiceAccount 711 | metadata: 712 | name: calico-node 713 | namespace: kube-system 714 | 715 | --- 716 | # Source: calico/templates/calico-kube-controllers.yaml 717 | 718 | # See https://github.com/projectcalico/kube-controllers 719 | apiVersion: apps/v1 720 | kind: Deployment 721 | metadata: 722 | name: calico-kube-controllers 723 | namespace: kube-system 724 | labels: 725 | k8s-app: calico-kube-controllers 726 | spec: 727 | # The controllers can only have a single active instance. 728 | replicas: 1 729 | selector: 730 | matchLabels: 731 | k8s-app: calico-kube-controllers 732 | strategy: 733 | type: Recreate 734 | template: 735 | metadata: 736 | name: calico-kube-controllers 737 | namespace: kube-system 738 | labels: 739 | k8s-app: calico-kube-controllers 740 | annotations: 741 | scheduler.alpha.kubernetes.io/critical-pod: '' 742 | spec: 743 | nodeSelector: 744 | beta.kubernetes.io/os: linux 745 | tolerations: 746 | # Mark the pod as a critical add-on for rescheduling. 747 | - key: CriticalAddonsOnly 748 | operator: Exists 749 | - key: node-role.kubernetes.io/master 750 | effect: NoSchedule 751 | serviceAccountName: calico-kube-controllers 752 | priorityClassName: system-cluster-critical 753 | containers: 754 | - name: calico-kube-controllers 755 | image: calico/kube-controllers:v3.8.1 756 | env: 757 | # Choose which controllers to run. 758 | - name: ENABLED_CONTROLLERS 759 | value: node 760 | - name: DATASTORE_TYPE 761 | value: kubernetes 762 | readinessProbe: 763 | exec: 764 | command: 765 | - /usr/bin/check-status 766 | - -r 767 | 768 | --- 769 | 770 | apiVersion: v1 771 | kind: ServiceAccount 772 | metadata: 773 | name: calico-kube-controllers 774 | namespace: kube-system 775 | --- 776 | # Source: calico/templates/calico-etcd-secrets.yaml 777 | 778 | --- 779 | # Source: calico/templates/calico-typha.yaml 780 | 781 | --- 782 | # Source: calico/templates/configure-canal.yaml 783 | 784 | 785 | --------------------------------------------------------------------------------