├── CHANGELOG ├── README.md └── Readme-v1.20.5.md ├── CHANGELOG.md ├── .gitignore ├── roles ├── backup-etcd │ ├── vars │ │ └── main.yaml │ ├── tasks │ │ ├── main.yaml │ │ └── backup-etcd.yaml │ ├── defaults │ │ └── main.yaml │ └── templates │ │ └── command_backup_etcd.sh.j2 ├── install-cni │ ├── vars │ │ └── main.yaml │ ├── templates │ │ └── 99-loopback.conf.j2 │ └── tasks │ │ └── main.yaml ├── restore-etcd │ ├── vars │ │ └── main.yaml │ ├── defaults │ │ └── main.yaml │ ├── tasks │ │ ├── main.yaml │ │ └── restore-etcd.yaml │ └── templates │ │ └── restore.sh.j2 ├── install-kubectl │ ├── defaults │ │ └── main.yaml │ └── tasks │ │ └── main.yaml ├── populate_etc_hosts │ ├── defaults │ │ └── main.yaml │ └── tasks │ │ └── main.yaml ├── setup-etcd │ ├── vars │ │ └── main.yaml │ ├── defaults │ │ └── main.yaml │ ├── handlers │ │ └── main.yaml │ ├── tasks │ │ ├── clean-previous-etcd-install.yaml │ │ ├── main.yaml │ │ ├── delete-etcd-clusters.yaml │ │ ├── setup-etcd.yaml │ │ ├── import-certs.yaml │ │ └── check-etcd.yaml │ ├── README.md │ └── templates │ │ └── etcd.service.j2 ├── configure-kubeconfig │ ├── vars │ │ └── main.yaml │ └── tasks │ │ └── main.yaml ├── remove_etc_hosts │ ├── defaults │ │ └── main.yaml │ └── tasks │ │ ├── main.yaml │ │ └── remove-etc-hosts.yaml ├── generate-certs │ ├── defaults │ │ └── main.yaml │ ├── tasks │ │ ├── main.yaml │ │ └── oidc-keycloak.yaml │ └── vars │ │ └── main.yaml ├── connexion-to-hosts │ └── tasks │ │ └── main.yaml ├── setup-master │ ├── tasks │ │ ├── clean-previous-k8s-install.yaml │ │ ├── main.yaml │ │ ├── delete-k8s-components.yaml │ │ ├── import-certs.yaml │ │ └── setup-master.yaml │ ├── files │ │ ├── admission-control-config-file.yaml │ │ ├── eventconfig.yaml │ │ └── audit-policy.yaml │ ├── templates │ │ ├── encryption-provider-config.yaml.j2 │ │ ├── kube-scheduler.service.j2 │ │ ├── kube-controller-manager.service.j2 │ │ ├── kube-scheduler.yaml.j2 │ │ └── kube-apiserver.service.j2 │ ├── handlers │ │ └── main.yaml │ ├── vars │ │ └── main.yaml │ └── defaults │ │ └── main.yaml ├── compliance-checks │ ├── vars │ │ └── main.yaml │ └── tasks │ │ ├── main.yaml │ │ └── infra_compliance.yaml ├── check-hosts-names │ └── tasks │ │ └── main.yaml ├── install-runtimes │ ├── files │ │ ├── daemon.json │ │ └── config.toml │ ├── vars │ │ └── main.yaml │ ├── handlers │ │ └── main.yaml │ ├── defaults │ │ └── main.yaml │ └── tasks │ │ ├── repo-centos-like.yaml │ │ ├── main.yaml │ │ ├── docker-centos-like.yaml │ │ ├── docker-debian-like.yaml │ │ ├── repo-debian-like.yaml │ │ ├── docker.yaml │ │ ├── containerd.yaml │ │ ├── delete-containerd-components.yaml │ │ └── uninstall-docker.yaml ├── setup-worker │ ├── defaults │ │ └── main.yaml │ ├── tasks │ │ ├── disable-swap.yaml │ │ ├── clean-previous-k8s-install.yaml │ │ ├── main.yaml │ │ ├── import-certs.yaml │ │ ├── delete-k8s-components.yaml │ │ └── setup-worker.yaml │ ├── vars │ │ └── main.yaml │ ├── handlers │ │ └── main.yaml │ └── templates │ │ ├── kube-proxy-config.yaml.j2 │ │ ├── kubelet-config.yaml.j2 │ │ ├── kube-proxy.service.j2 │ │ └── kubelet.service.j2 ├── post-scripts │ ├── vars │ │ └── main.yaml │ ├── templates │ │ ├── keycloak-ingress.yaml.j2 │ │ ├── tls-monitoring-gen.yaml.j2 │ │ ├── configure_storage_openebs.yaml.j2 │ │ ├── logrotate.yaml.j2 │ │ ├── reloader.yaml.j2 │ │ ├── Readme.md │ │ ├── metrics.yaml.j2 │ │ ├── coredns.yaml.j2 │ │ ├── haproxy.yaml.j2 │ │ ├── backup-etcd-cronjob.yaml.j2 │ │ ├── kube-router.yaml.j2 │ │ ├── traefik.yaml.j2 │ │ └── logging-efk.yaml │ ├── tasks │ │ ├── kube-router.yaml │ │ ├── haproxy.yaml │ │ ├── logrotate.yaml │ │ ├── calico.yaml │ │ ├── metrics-server.yaml │ │ ├── grafana.yaml │ │ ├── reloader.yaml │ │ ├── traefik.yaml │ │ ├── monitoring.yaml │ │ ├── nginx.yaml │ │ ├── log-centralization.yaml │ │ ├── coredns.yaml │ │ ├── argocd.yaml │ │ ├── gatekeeper.yaml │ │ ├── openebs.yaml │ │ ├── metallb_l2.yaml │ │ ├── backup-etcd-cronjob.yaml │ │ ├── main.yaml │ │ ├── default_dashboard.yaml │ │ ├── label-hosts.yaml │ │ └── keycloak-oidc.yaml │ └── defaults │ │ └── main.yaml ├── clear │ └── tasks │ │ ├── main.yaml │ │ └── delete_openebs.yaml ├── uncordon │ └── tasks │ │ └── main.yaml ├── drain │ └── tasks │ │ └── main.yaml └── show-info │ ├── tasks │ └── main.yml │ └── templates │ └── info.j2 ├── images ├── AGORAKUBE.png └── AGORAKUBE_diagram.png ├── tools ├── etcd │ ├── backup-etcd-cluster.yaml │ └── restore-etcd-cluster.yaml ├── rolling_update │ └── rolling.yaml └── oidc.sh ├── actions └── ansible-lint │ ├── lint-config │ └── ansible-lint.conf │ ├── entrypoint.sh │ ├── action.yml │ ├── Dockerfile │ └── dockerfiles │ └── ansible-lint │ └── v1 │ └── Dockerfile ├── .github ├── ISSUE_TEMPLATE │ ├── custom.md │ ├── feature_request.md │ └── bug_report.md └── workflows │ ├── ansible-lint.yaml │ └── stale.yml ├── requirements.txt ├── test └── inventory ├── hosts ├── agorakube.yaml ├── setup-hosts.sh ├── docs ├── manage_etcd.md └── CONTRIBUTING.md ├── setup-deploy.sh ├── LOCAL_ENVIRONMENT.md ├── CODE_OF_CONDUCT.md └── group_vars └── all.yaml /CHANGELOG/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | CHANGELOG/README.md 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | group_vars/all/secret.yaml 2 | -------------------------------------------------------------------------------- /roles/backup-etcd/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | pki_path: "{{ data_path }}/pki" 3 | -------------------------------------------------------------------------------- /roles/install-cni/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | agorakube_cni_release: "0.8.5" 3 | -------------------------------------------------------------------------------- /roles/restore-etcd/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | pki_path: "{{ data_path }}/pki" 3 | -------------------------------------------------------------------------------- /roles/install-kubectl/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #kubernetes_release: 1.18.3 3 | -------------------------------------------------------------------------------- /images/AGORAKUBE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ilkilab/agorakube/HEAD/images/AGORAKUBE.png -------------------------------------------------------------------------------- /roles/populate_etc_hosts/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | agorakube_populate_etc_hosts: True 3 | -------------------------------------------------------------------------------- /roles/setup-etcd/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | pki_path: "{{ agorakube.global.data_path }}/pki" 3 | -------------------------------------------------------------------------------- /roles/configure-kubeconfig/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | pki_path: "{{ agorakube.global.data_path }}/pki" 3 | -------------------------------------------------------------------------------- /roles/remove_etc_hosts/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | backup_etc_hosts: "{{ agorakube_backup_etc_hosts }}" 3 | -------------------------------------------------------------------------------- /images/AGORAKUBE_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ilkilab/agorakube/HEAD/images/AGORAKUBE_diagram.png -------------------------------------------------------------------------------- /roles/generate-certs/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | advertise_masters: 10.10.0.3 3 | master_custom_alt_name: "" 4 | -------------------------------------------------------------------------------- /tools/etcd/backup-etcd-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: deploy 3 | become: true 4 | roles: 5 | - backup-etcd 6 | -------------------------------------------------------------------------------- /roles/install-cni/templates/99-loopback.conf.j2: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.3.1", 3 | "name": "lo", 4 | "type": "loopback" 5 | } 6 | -------------------------------------------------------------------------------- /roles/backup-etcd/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Import backup_etcd tasks 3 | include_tasks: backup-etcd.yaml 4 | when: backup_etcd 5 | -------------------------------------------------------------------------------- /roles/setup-etcd/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #etcd_release: v3.3.13 3 | #etcd_data_directory: "/var/lib/etcd" 4 | #delete_etcd_install: False 5 | -------------------------------------------------------------------------------- /roles/backup-etcd/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | backup_etcd: true 3 | etcd_release: 3.4.10 4 | #custom_etcd_backup_dir: 5 | data_path: "/var/agorakube" 6 | -------------------------------------------------------------------------------- /actions/ansible-lint/lint-config/ansible-lint.conf: -------------------------------------------------------------------------------- 1 | skip_list: 2 | - '403' 3 | - '503' 4 | - '601' 5 | - '306' 6 | - '204' 7 | - '602' 8 | - '301' 9 | -------------------------------------------------------------------------------- /roles/connexion-to-hosts/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Validate host connexion 3 | ping: 4 | when: 5 | - inventory_hostname not in groups['etc_hosts'] 6 | -------------------------------------------------------------------------------- /roles/restore-etcd/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #restoration_snapshot_file: 3 | etcd_release: 3.4.10 4 | #custom_etcd_backup_dir: 5 | data_path: "/var/agorakube" 6 | -------------------------------------------------------------------------------- /roles/restore-etcd/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include restore-etcd.yaml tasks 3 | include_tasks: restore-etcd.yaml 4 | when: restoration_snapshot_file is defined 5 | -------------------------------------------------------------------------------- /roles/setup-master/tasks/clean-previous-k8s-install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete prevous k8s binaries 3 | file: 4 | state: absent 5 | path: /usr/local/bin/kubernetes 6 | -------------------------------------------------------------------------------- /roles/compliance-checks/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | MASTER_NUM: "{{ groups['masters'] | length }}" 3 | WORKER_NUM: "{{ groups['workers'] | length }}" 4 | ETCD_NUM: "{{ groups['etcd'] | length }}" 5 | -------------------------------------------------------------------------------- /roles/compliance-checks/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check Host compliance 3 | include_tasks: infra_compliance.yaml 4 | - name: Check Host compliance 5 | include_tasks: check_variables.yaml 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom issue template 3 | about: Describe this issue template's purpose here. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /actions/ansible-lint/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -f /var/ansible-lint.conf ]; then 4 | /usr/local/bin/ansible-lint -c /var/ansible-lint.conf 5 | else 6 | /usr/local/bin/ansible-lint 7 | fi 8 | -------------------------------------------------------------------------------- /roles/check-hosts-names/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: test node_name 3 | assert: 4 | that: 5 | ansible_fqdn == inventory_hostname 6 | when: 7 | - inventory_hostname not in groups['etc_hosts'] 8 | -------------------------------------------------------------------------------- /actions/ansible-lint/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Ansible lint for Agorakube' 2 | description: 'Lint Ansible Playbooks and roles. Used for Agorakube project check' 3 | runs: 4 | using: 'docker' 5 | image: 'Dockerfile' 6 | -------------------------------------------------------------------------------- /roles/remove_etc_hosts/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove orphaned /etc/hosts entries 3 | include_tasks: remove-etc-hosts.yaml 4 | when: agorakube_remove_etc_hosts | bool == True 5 | tags: remove-etc-hosts 6 | -------------------------------------------------------------------------------- /roles/install-runtimes/files/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "exec-opts": ["native.cgroupdriver=cgroupfs"], 3 | "log-driver": "json-file", 4 | "log-opts": { 5 | "max-size": "100m" 6 | }, 7 | "storage-driver": "overlay2" 8 | } 9 | -------------------------------------------------------------------------------- /roles/setup-master/files/admission-control-config-file.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiserver.config.k8s.io/v1 2 | kind: AdmissionConfiguration 3 | plugins: 4 | - name: EventRateLimit 5 | path: /etc/kubernetes/manifests/eventconfig.yaml 6 | -------------------------------------------------------------------------------- /actions/ansible-lint/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM agorakube/ansible-lint:v1 2 | MAINTAINER Ilkilabs @ www.ilki.fr 3 | COPY ./entrypoint.sh /bin/entrypoint.sh 4 | RUN chmod +x /bin/entrypoint.sh 5 | COPY ./lint-config/ /var/ 6 | CMD ["/bin/entrypoint.sh"] 7 | -------------------------------------------------------------------------------- /roles/setup-etcd/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload daemons 3 | systemd: 4 | state: restarted 5 | daemon_reload: yes 6 | name: etcd 7 | 8 | - name: Restart etcd 9 | systemd: 10 | state: restarted 11 | name: etcd 12 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==2.10.7 2 | ansible-base==2.10.5 3 | cffi==1.14.5 4 | cryptography==3.4.6 5 | Jinja2==2.11.3 6 | MarkupSafe==1.1.1 7 | netaddr==0.8.0 8 | packaging==20.9 9 | pycparser==2.20 10 | pyparsing==2.4.7 11 | PyYAML==5.4.1 12 | selinux==0.2.1 -------------------------------------------------------------------------------- /roles/setup-master/files/eventconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eventratelimit.admission.k8s.io/v1alpha1 2 | kind: Configuration 3 | limits: 4 | - type: Namespace 5 | qps: 50 6 | burst: 100 7 | cacheSize: 2000 8 | - type: User 9 | qps: 10 10 | burst: 50 11 | -------------------------------------------------------------------------------- /roles/setup-worker/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #kubernetes_release: v1.15.0 3 | #cluster_cidr: 10.200.0.0/16 4 | #cluster_dns_ip: 10.32.0.10 5 | #runtime: containerd 6 | #delete_previous_k8s_install: false 7 | #pod_cidr: 10.200.0.0/16 8 | #enable_persistence: True 9 | -------------------------------------------------------------------------------- /roles/post-scripts/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | pki_path: "{{ agorakube.global.data_path }}/pki" 3 | number_storage: "{{ groups['storage'] | length }}" 4 | replicas_openebs: "{{ number_storage if number_storage < '3' else '3' }}" 5 | #replicas_openebs: "{{ groups['workers'] | length }}" 6 | -------------------------------------------------------------------------------- /roles/clear/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name : Clear Openebs Data Storage 3 | include_tasks: delete_openebs.yaml 4 | tags: [ 'never', 'uninstall' ] 5 | 6 | - name: Clear Calico dir 7 | file: 8 | path: /var/run/calico 9 | state: absent 10 | tags: [ 'never', 'uninstall' ] 11 | -------------------------------------------------------------------------------- /roles/install-runtimes/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | is_debian_like: "{{ 'True' if ansible_distribution|lower == 'ubuntu' or ansible_distribution|lower == 'debian' else 'False' }}" 3 | is_centos_like: "{{ 'True' if ansible_distribution|lower == 'centos' or ansible_distribution|lower == 'fedora' else 'False' }}" 4 | -------------------------------------------------------------------------------- /roles/setup-worker/tasks/disable-swap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Make sure swapfile is removed from /etc/fstab 3 | lineinfile: 4 | path: /etc/fstab 5 | state: absent 6 | regexp: "swap" 7 | - name: Ensure all swap are disabled 8 | command: swapoff -a 9 | when: ansible_swaptotal_mb > 0 10 | -------------------------------------------------------------------------------- /roles/setup-worker/tasks/clean-previous-k8s-install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete prevous k8s binaries 3 | file: 4 | state: absent 5 | path: /usr/local/bin/kubernetes 6 | when: 7 | - ansible_fqdn in groups['workers'] or ansible_fqdn in groups['storage'] 8 | - ansible_fqdn not in groups['masters'] 9 | -------------------------------------------------------------------------------- /roles/setup-worker/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | pki_path: "{{ agorakube.global.data_path }}/pki" 3 | #kube_proxy_mode: ipvs 4 | #kube_proxy_ipvs_algotithm: rr 5 | iscsi_package_name: "{{ 'open-iscsi' if ansible_distribution|lower == 'ubuntu' or ansible_distribution|lower == 'debian' else 'iscsi-initiator-utils' }}" 6 | -------------------------------------------------------------------------------- /roles/configure-kubeconfig/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create /root/.kube directory 3 | file: 4 | path: /root/.kube 5 | state: directory 6 | 7 | - name: Inject Kubeconfig 8 | copy: 9 | src: "{{ pki_path }}/kubeconfigs/admin/admin.conf" 10 | dest: /root/.kube/config 11 | mode: 0600 12 | -------------------------------------------------------------------------------- /roles/install-kubectl/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install kubectl 3 | get_url: 4 | url: https://storage.googleapis.com/kubernetes-release/release/{{ agorakube_base_components.kubernetes.release }}/bin/linux/amd64/kubectl 5 | dest: /usr/bin/kubectl 6 | owner: root 7 | group: root 8 | mode: 0777 9 | -------------------------------------------------------------------------------- /roles/populate_etc_hosts/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure /etc/hosts 3 | lineinfile: 4 | path: /etc/hosts 5 | line: "{{ hostvars[item].ansible_host }} {{ item }}" 6 | create: yes 7 | with_items: 8 | - "{{ groups['all'] }}" 9 | when: 10 | - agorakube_populate_etc_hosts 11 | - hostvars[item].ansible_host is defined 12 | -------------------------------------------------------------------------------- /actions/ansible-lint/dockerfiles/ansible-lint/v1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | MAINTAINER Ilkilabs @ www.ilki.fr 3 | RUN apt update && apt install software-properties-common -yqq 4 | RUN apt-add-repository --yes --update ppa:ansible/ansible 5 | RUN apt install ansible python-pip git -yqq 6 | RUN pip install --no-cache-dir ansible-lint 7 | CMD ["/bin/bash"] 8 | -------------------------------------------------------------------------------- /roles/generate-certs/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Include label hosts 2 | include_tasks: generate-kubernetes-pki.yaml 3 | 4 | - name: Generate Keycloak PKI 5 | include_tasks: oidc-keycloak.yaml 6 | when: 7 | - agorakube_features.keycloak_oidc.auto_bootstrap.bootstrap_keycloak | bool == True 8 | - agorakube_features.keycloak_oidc.enabled | bool == True 9 | -------------------------------------------------------------------------------- /roles/install-runtimes/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart containerd 3 | systemd: 4 | state: restarted 5 | name: containerd 6 | 7 | - name: Reload docker 8 | systemd: 9 | state: restarted 10 | daemon_reload: yes 11 | name: docker 12 | 13 | - name: restart docker 14 | systemd: 15 | state: restarted 16 | name: docker 17 | -------------------------------------------------------------------------------- /roles/clear/tasks/delete_openebs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete Jiva 3 | file: 4 | path: "{{ agorakube_features.storage.jiva.data_path }}" 5 | state: absent 6 | tags: ['never', 'uninstall'] 7 | 8 | - name: Delete HostPath 9 | file: 10 | path: "{{ agorakube_features.storage.hostpath.data_path }}" 11 | state: absent 12 | tags: ['never', 'uninstall'] 13 | -------------------------------------------------------------------------------- /roles/install-runtimes/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #cluster_cidr: 10.200.0.0/16 3 | #pod_cidr: 4 | #runtime: containerd 5 | #docker_release: "" # By default install latest 6 | #docker_release: 19.03.2-3.el7 sample on Centos get with: yum list docker-ce --showduplicates | sort -r 7 | #docker_release: 5:18.09.1~3-0~ubuntu-xenial sample Ubuntu get with : apt-cache madison docker-ce 8 | -------------------------------------------------------------------------------- /roles/setup-etcd/tasks/clean-previous-etcd-install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Find old ETCD dir binaries 3 | find: 4 | path: /usr/bin/ 5 | patterns: '*etcd*' 6 | file_type: directory 7 | register: etcd_dir_bin 8 | 9 | - name: Delete prevous etcd binaries 10 | file: 11 | state: absent 12 | path: "{{ item.path }}" 13 | loop: "{{ etcd_dir_bin.files }}" 14 | -------------------------------------------------------------------------------- /roles/setup-worker/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload Daemons 3 | systemd: 4 | daemon_reload: yes 5 | - name: restart kubelet 6 | systemd: 7 | state: restarted 8 | enabled: yes 9 | name: kubelet 10 | daemon_reload: yes 11 | - name: restart kube-proxy 12 | systemd: 13 | state: restarted 14 | enabled: yes 15 | name: kube-proxy 16 | daemon_reload: yes 17 | -------------------------------------------------------------------------------- /roles/uncordon/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Uncordon Node 3 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf uncordon {{ inventory_hostname }} 4 | retries: 300 5 | delay: 10 6 | run_once: true 7 | delegate_to: "{{ item }}" 8 | with_items: 9 | - "{{ groups['deploy'] }}" 10 | ignore_errors: yes 11 | register: drain_status 12 | until: drain_status is not failed 13 | -------------------------------------------------------------------------------- /roles/setup-master/templates/encryption-provider-config.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apiserver.config.k8s.io/v1 2 | kind: EncryptionConfiguration 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | {% for key, value in agorakube_encrypt_etcd_keys.items() %} 10 | - name: {{ key }} 11 | secret: {{ value.secret }} 12 | {% endfor %} 13 | - identity: {} 14 | -------------------------------------------------------------------------------- /roles/setup-worker/templates/kube-proxy-config.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: KubeProxyConfiguration 2 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 3 | clientConnection: 4 | kubeconfig: "/etc/kubernetes/manifests/proxier.conf" 5 | mode: "{{ agorakube_network.kube_proxy.mode }}" 6 | clusterCIDR: "{{ agorakube_network.cidr.pod }}" 7 | {% if agorakube_network.kube_proxy.mode == 'ipvs' %} 8 | ipvs: 9 | strictARP: true 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /test/inventory: -------------------------------------------------------------------------------- 1 | [deploy] 2 | deploy ansible_connection=local 3 | 4 | [masters] 5 | deploy ansible_connection=local ip=10.20.20.8 6 | 7 | [etcd] 8 | deploy ansible_connection=local ip=10.20.20.8 9 | [workers] 10 | 11 | deploy ansible_connection=local ip=10.20.20.8 12 | 13 | [all:vars] 14 | ansible_ssh_extra_args='-o StrictHostKeyChecking=no' 15 | ansible_user=cloud 16 | ansible_ssh_private_key_file=/tmp/private.pem 17 | -------------------------------------------------------------------------------- /tools/etcd/restore-etcd-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: masters 3 | become: true 4 | tasks: 5 | - name: Stop API server 6 | service: 7 | name: kube-apiserver 8 | state: stopped 9 | - hosts: etcd 10 | become: true 11 | roles: 12 | - restore-etcd 13 | - hosts: masters 14 | become: true 15 | tasks: 16 | - name: Restart API server 17 | service: 18 | name: kube-apiserver 19 | state: started 20 | -------------------------------------------------------------------------------- /.github/workflows/ansible-lint.yaml: -------------------------------------------------------------------------------- 1 | name: ansible-lint-agorakube 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | - develop 8 | 9 | jobs: 10 | ansible_lint: 11 | runs-on: ubuntu-latest 12 | name: A job to lint Ansible Playbooks and roles for Agorakube 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v1 16 | - name: test ansible lint 17 | uses: ./actions/ansible-lint 18 | -------------------------------------------------------------------------------- /roles/drain/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Drain Node 3 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data --force 4 | retries: 300 5 | delay: 10 6 | run_once: true 7 | delegate_to: "{{ item }}" 8 | with_items: 9 | - "{{ groups['deploy'] }}" 10 | ignore_errors: yes 11 | register: drain_status 12 | until: drain_status is not failed 13 | -------------------------------------------------------------------------------- /roles/show-info/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Store info file in /root 3 | template: 4 | src: info.j2 5 | dest: /root/agorakube-info.txt 6 | owner: root 7 | group: root 8 | mode: '0640' 9 | 10 | - name: Get file content to show to the user 11 | command: cat /root/agorakube-info.txt 12 | register: agorakube_info 13 | changed_when: False 14 | 15 | - name: Show information to the user 16 | debug: 17 | var: agorakube_info.stdout_lines 18 | -------------------------------------------------------------------------------- /roles/setup-master/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Import certs 3 | include_tasks: import-certs.yaml 4 | 5 | - name: Clean previous install of K8S 6 | include_tasks: clean-previous-k8s-install.yaml 7 | when: agorakube_base_components.kubernetes.upgrade | bool == True 8 | 9 | - name: Include Setup 10 | include_tasks: setup-master.yaml 11 | 12 | - name: Clear kubernetes components 13 | include_tasks: delete-k8s-components.yaml 14 | tags: ['never', 'uninstall'] 15 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/repo-centos-like.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install {{ item }}" 3 | package: 4 | name: "{{ item }}" 5 | state: present 6 | with_items: 7 | - yum-utils 8 | - device-mapper-persistent-data 9 | - lvm2 10 | 11 | - name: Add docker repository 12 | command: yum-config-manager --add-repo https://download.docker.com/linux/{{ ansible_distribution | lower }}/docker-ce.repo 13 | args: 14 | creates: /etc/yum.repos.d/docker-ce.repo 15 | -------------------------------------------------------------------------------- /roles/setup-worker/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Import Worker certs 3 | include_tasks: import-certs.yaml 4 | - name: Disable swap 5 | include_tasks: disable-swap.yaml 6 | - name: Clean previous K8S install 7 | include_tasks: clean-previous-k8s-install.yaml 8 | when: agorakube_base_components.kubernetes.upgrade | bool == True 9 | - name: Setup Worker 10 | include_tasks: setup-worker.yaml 11 | - name: Clear kubenetes components 12 | include_tasks: delete-k8s-components.yaml 13 | tags: ['never', 'uninstall'] 14 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install runtime 3 | include_tasks: "{{ agorakube_base_components.container.engine }}.yaml" 4 | 5 | - name: Uninstall docker 6 | include_tasks: uninstall-docker.yaml 7 | when: agorakube_base_components.container.engine == "docker" 8 | tags: [ 'never', 'uninstall' ] 9 | 10 | - name: Clear Containerd Components 11 | include_tasks: delete-containerd-components.yaml 12 | when: agorakube_base_components.container.engine == "containerd" 13 | tags: ['never', 'uninstall'] 14 | -------------------------------------------------------------------------------- /roles/setup-master/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload daemons 3 | systemd: 4 | daemon_reload: yes 5 | 6 | - name: Restart kube-apiserver service 7 | systemd: 8 | state: restarted 9 | daemon_reload: yes 10 | name: kube-apiserver 11 | 12 | - name: Restart kube-scheduler service 13 | systemd: 14 | state: restarted 15 | daemon_reload: yes 16 | name: kube-scheduler 17 | 18 | - name: Restart kube-controller-manager service 19 | systemd: 20 | state: restarted 21 | daemon_reload: yes 22 | name: kube-controller-manager 23 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/keycloak-ingress.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: keycloak 6 | namespace: keycloak 7 | spec: 8 | tls: 9 | - hosts: 10 | - {{ agorakube_features.keycloak_oidc.auto_bootstrap.host }} 11 | secretName: keycloak-tls 12 | rules: 13 | - host: {{ agorakube_features.keycloak_oidc.auto_bootstrap.host }} 14 | http: 15 | paths: 16 | - path: / 17 | pathType: Prefix 18 | backend: 19 | service: 20 | name: keycloak 21 | port: 22 | number: 8080 23 | -------------------------------------------------------------------------------- /roles/compliance-checks/tasks/infra_compliance.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check number of MASTER(S) 3 | fail: 4 | msg: "There is {{ MASTER_NUM }} masters - Number of masters MUST be 1,3,5 or 7, and SHOULD be 3 or 5." 5 | when: MASTER_NUM not in ['1','3','5','7'] 6 | - name: Check number of ETCD 7 | fail: 8 | msg: "There is {{ ETCD_NUM }} etcd servers- Number of etcd MUST be 1,3,5 or 7, and SHOULD be 3 or 5." 9 | when: ETCD_NUM not in ['1','3','5','7'] 10 | - name: Check number of WORKER(S) 11 | fail: 12 | msg: "There is {{ WORKER_NUM }} workers - You MUST have at least 1 worker." 13 | when: WORKER_NUM == '0' 14 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/docker-centos-like.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install docker 3 | package: 4 | name: ["containerd.io","docker-ce","docker-ce-cli"] 5 | state: latest 6 | update_cache: yes 7 | when: agorakube_base_components.container.release|length == 0 8 | 9 | - name: Install docker in specific release 10 | package: 11 | name: ["containerd.io","docker-ce-{{ agorakube_base_components.container.release }}","docker-ce-cli-{{ agorakube_base_components.container.release }}"] 12 | state: present 13 | update_cache: yes 14 | when: agorakube_base_components.container.release|length > 0 15 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/docker-debian-like.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install docker 3 | package: 4 | name: ["containerd.io","docker-ce","docker-ce-cli"] 5 | state: latest 6 | update_cache: yes 7 | when: agorakube_base_components.container.release | length == 0 8 | 9 | - name: Install docker in specific release 10 | package: 11 | name: ["containerd.io","docker-ce={{ agorakube_base_components.container.release }}","docker-ce-cli={{ agorakube_base_components.container.release }}"] 12 | state: present 13 | update_cache: yes 14 | when: agorakube_base_components.container.release | length > 0 15 | -------------------------------------------------------------------------------- /roles/setup-master/templates/kube-scheduler.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler {{ agorakube_base_components.kubernetes.release }} 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kubernetes/server/bin/kube-scheduler \ 7 | --bind-address=127.0.0.1 \ 8 | --config=/etc/kubernetes/manifests/kube-scheduler.yaml \ 9 | --profiling=false \ 10 | --log-file=/var/log/kubernetes/kube-scheduler.log \ 11 | --log-file-max-size=1800 \ 12 | --logtostderr=false \ 13 | --v=2 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /roles/setup-etcd/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Import etcd certs 3 | include_tasks: import-certs.yaml 4 | 5 | - name: Delete previous ETCD install 6 | include_tasks: clean-previous-etcd-install.yaml 7 | when: agorakube_base_components.etcd.upgrade | bool == True 8 | 9 | - name: Setup etcd server 10 | include_tasks: setup-etcd.yaml 11 | 12 | - name: Check etcd cluster after installation 13 | include_tasks: check-etcd.yaml 14 | when: agorakube_base_components.etcd.check | bool == True 15 | tags: check-etcd 16 | 17 | - name: Clear and delete etcd clusters 18 | include_tasks: delete-etcd-clusters.yaml 19 | tags: [ 'never', 'uninstall' ] 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/repo-debian-like.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install packages to allow apt to use a repository over HTTPS 3 | package: 4 | name: ['apt-transport-https', 'ca-certificates', 'curl', 'software-properties-common', 'gnupg-agent'] 5 | state: latest 6 | update_cache: yes 7 | 8 | - name: Add Docker’s official GPG key 9 | apt_key: 10 | url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg 11 | state: present 12 | 13 | - name: Add Docker apt repository 14 | apt_repository: 15 | repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_lsb.codename }} stable 16 | state: present 17 | update_cache: yes 18 | 19 | -------------------------------------------------------------------------------- /roles/restore-etcd/templates/restore.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | {% set etcd_initial_cluster = [] %} 4 | {% for host in groups['etcd'] %} 5 | {{ etcd_initial_cluster.append( host+"=https://"+hostvars[host].ansible_host+":2380" ) }} 6 | {% endfor %} 7 | export RESET_EVENT_ETCD=true 8 | ETCDCTL_API=3 /usr/bin/etcd-{{ agorakube_base_components.etcd.release }}-linux-amd64/etcdctl \ 9 | --name {{ ansible_fqdn }} \ 10 | --initial-cluster {{ etcd_initial_cluster|join(',') }} \ 11 | --initial-cluster-token etcd-cluster-0 \ 12 | --initial-advertise-peer-urls https://{{ hostvars[ansible_fqdn].ansible_host}}:2380 \ 13 | --data-dir {{ agorakube_base_components.etcd.data_path }} \ 14 | snapshot restore {{ backup_etcd_tempdir.path }}/snapshot-etcd.db 15 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/kube-router.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: kube_router 6 | register: kube_router_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ kube_router_tempdir.path }}/kube-router.yaml" 12 | src: "kube-router.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ kube_router_tempdir.path }}/kube-router.yaml 17 | register: apply_kube_router 18 | changed_when: > 19 | apply_kube_router.stdout is search("created") 20 | 21 | - name: Cleanup tempdir 22 | file: 23 | state: absent 24 | path: "{{ kube_router_tempdir.path }}" 25 | changed_when: false 26 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Mark stale issues and pull requests 2 | 3 | on: 4 | schedule: 5 | - cron: '00 12 * * *' 6 | 7 | jobs: 8 | stale: 9 | 10 | runs-on: ubuntu-latest 11 | permissions: 12 | issues: write 13 | pull-requests: write 14 | 15 | steps: 16 | - uses: actions/stale@v3 17 | with: 18 | repo-token: ${{ secrets.GITHUB_TOKEN }} 19 | stale-issue-message: 'Stale issue message' 20 | stale-pr-message: 'Stale pull request message' 21 | stale-issue-label: 'no-issue-activity' 22 | stale-pr-label: 'no-pr-activity' 23 | days-before-stale: 60 24 | days-before-close: 14 25 | close-issue-message: 'Automatically closed due to no activity' 26 | close-pr-message: 'Automatically closed due to no activity' 27 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/haproxy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: haproxy 6 | register: haproxy_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ haproxy_tempdir.path }}/haproxy.yaml" 12 | src: "haproxy.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ haproxy_tempdir.path }}/haproxy.yaml 17 | register: apply_haproxy 18 | changed_when: > 19 | apply_haproxy.stdout is search("created") 20 | or apply_haproxy.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ haproxy_tempdir.path }}" 26 | changed_when: false 27 | -------------------------------------------------------------------------------- /roles/install-cni/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create /etc/cni/net.d/ directory 4 | file: 5 | path: /etc/cni/net.d 6 | state: directory 7 | owner: root 8 | group: root 9 | mode: '644' 10 | recurse: yes 11 | 12 | - name: Push /etc/cni/net.d/99-loopback.conf 13 | template: 14 | src: 99-loopback.conf.j2 15 | dest: /etc/cni/net.d/99-loopback.conf 16 | 17 | - name: Create /opt/cni/bin/ directory 18 | file: 19 | path: /opt/cni/bin/ 20 | state: directory 21 | owner: root 22 | group: root 23 | mode: '644' 24 | 25 | - name: Download CNI 26 | unarchive: 27 | src: https://github.com/containernetworking/plugins/releases/download/v{{ agorakube_cni_release }}/cni-plugins-linux-amd64-v{{ agorakube_cni_release }}.tgz 28 | dest: /opt/cni/bin/ 29 | remote_src: yes 30 | creates: /opt/cni/bin/bridge 31 | -------------------------------------------------------------------------------- /hosts: -------------------------------------------------------------------------------- 1 | [deploy] 2 | deploy ansible_connection=local ansible_python_interpreter=/usr/bin/python3 3 | 4 | [masters] 5 | master1 ansible_host=10.10.20.4 6 | 7 | [etcd] 8 | master1 ansible_host=10.10.20.4 9 | 10 | [workers] 11 | worker2 ansible_host=10.10.20.5 12 | worker3 ansible_host=10.10.20.6 13 | 14 | [storage] 15 | worker4 ansible_host=10.10.20.20 16 | 17 | [all:vars] 18 | advertise_masters=10.10.20.4 19 | #advertise_masters=kubernetes.localcluster.lan 20 | 21 | # SSH connection settings 22 | ansible_ssh_extra_args=-o StrictHostKeyChecking=no 23 | ansible_user=vagrant 24 | ansible_ssh_private_key_file=/home/vagrant/ssh-private-key.pem 25 | 26 | # Python version 27 | 28 | # If centOS-7, use python2.7 29 | # If no-CentOS-7, use Python3 30 | ansible_python_interpreter=/usr/bin/python3 31 | 32 | [etc_hosts] 33 | #kubernetes.localcluster.lan ansible_host=10.10.20.4 34 | -------------------------------------------------------------------------------- /roles/setup-worker/templates/kubelet-config.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: KubeletConfiguration 2 | apiVersion: kubelet.config.k8s.io/v1beta1 3 | authentication: 4 | anonymous: 5 | enabled: false 6 | webhook: 7 | enabled: true 8 | x509: 9 | clientCAFile: "/etc/kubernetes/pki/ca.crt" 10 | authorization: 11 | mode: Webhook 12 | clusterDomain: "cluster.local" 13 | clusterDNS: 14 | - "{{ agorakube_network.service_ip.coredns }}" 15 | eventRecordQPS: 5 16 | makeIPTablesUtilChains: true 17 | podCIDR: "{{ agorakube_network.cidr.pod }}" 18 | protectKernelDefaults: true 19 | readOnlyPort: 0 20 | {% if ansible_distribution == "Ubuntu" %} 21 | resolvConf: "/run/systemd/resolve/resolv.conf" 22 | {% endif %} 23 | runtimeRequestTimeout: "15m" 24 | streamingConnectionIdleTimeout: "5m" 25 | tlsCertFile: "/etc/kubernetes/pki/kubelet.crt" 26 | tlsPrivateKeyFile: "/etc/kubernetes/pki/kubelet.key" 27 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/logrotate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Make sure Reloader is installed 3 | include_tasks: reloader.yaml 4 | 5 | - name: Create tempdir 6 | tempfile: 7 | state: directory 8 | suffix: logrotate 9 | register: logrotate_tempdir 10 | changed_when: false 11 | 12 | - name: Render templates 13 | template: 14 | dest: "{{ logrotate_tempdir.path }}/logrotate.yaml" 15 | src: "logrotate.yaml.j2" 16 | changed_when: false 17 | 18 | - name: Apply templates 19 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ logrotate_tempdir.path }}/logrotate.yaml 20 | register: apply_logrotate 21 | changed_when: > 22 | apply_logrotate.stdout is search("created") 23 | or apply_logrotate.stdout is search("configured") 24 | - name: Cleanup tempdir 25 | file: 26 | state: absent 27 | path: "{{ logrotate_tempdir.path }}" 28 | changed_when: false 29 | -------------------------------------------------------------------------------- /roles/setup-etcd/README.md: -------------------------------------------------------------------------------- 1 | Sample useful commands to manage etcd clusters 2 | 3 | **list etcd members** 4 | 5 | ETCDCTL_API=3 /usr/bin/etcd-v3.4.3-linux-amd64/etcdctl member list --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key 6 | 7 | **Take a snapshot** 8 | 9 | ETCDCTL_API=3 /usr/bin/etcd-v3.4.3-linux-amd64/etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key snapshot save snapshot_db_name 10 | 11 | **Get etcd infos** 12 | 13 | 14 | ETCDCTL_API=3 /usr/bin/etcd-v3.4.3-linux-amd64/etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key endpoint status --write-out=table 15 | -------------------------------------------------------------------------------- /agorakube.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: workers,etcd,masters,storage 3 | any_errors_fatal: true 4 | become: true 5 | roles: 6 | - connexion-to-hosts 7 | - check-hosts-names 8 | - populate_etc_hosts 9 | - remove_etc_hosts 10 | - hosts: deploy 11 | become: true 12 | vars: 13 | ansible_python_interpreter: /usr/local/agorakube-env/bin/python3 14 | roles: 15 | # - compliance-checks 16 | - install-kubectl 17 | - generate-certs 18 | - configure-kubeconfig 19 | - hosts: etcd 20 | become: true 21 | roles: 22 | - setup-etcd 23 | - hosts: masters 24 | become: true 25 | roles: 26 | - setup-master 27 | - install-cni 28 | - install-runtimes 29 | - setup-worker 30 | - hosts: workers,storage 31 | become: true 32 | roles: 33 | - install-cni 34 | - install-runtimes 35 | - setup-worker 36 | - clear 37 | - hosts: deploy 38 | become: true 39 | roles: 40 | - post-scripts 41 | - show-info 42 | -------------------------------------------------------------------------------- /roles/setup-worker/tasks/import-certs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create /etc/kubernetes/pki Dir 3 | file: 4 | path: /etc/kubernetes/pki 5 | state: directory 6 | owner: root 7 | group: root 8 | mode: '644' 9 | recurse: True 10 | 11 | - name: Import master CA crt for kubelet 12 | copy: 13 | src: "{{ pki_path }}/intermediate/ca.crt" 14 | dest: /etc/kubernetes/pki/ca.crt 15 | with_items: 16 | - { src: 'intermediate/ca.crt', dest: 'ca.crt' } 17 | notify: 18 | - restart kubelet 19 | 20 | - name: Import specific KEY for kubelet 21 | copy: 22 | src: "{{ pki_path }}/kubeconfigs/kubelet/{{ inventory_hostname }}/kubelet.key" 23 | dest: /etc/kubernetes/pki/kubelet.key 24 | notify: 25 | - restart kubelet 26 | 27 | - name: Import specific KEY for kubelet 28 | copy: 29 | src: "{{ pki_path }}/kubeconfigs/kubelet/{{ inventory_hostname }}/kubelet.crt" 30 | dest: /etc/kubernetes/pki/kubelet.crt 31 | notify: 32 | - restart kubelet 33 | -------------------------------------------------------------------------------- /roles/setup-master/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | pki_path: "{{ agorakube.global.data_path }}/pki" 3 | agorakube_kube_apiserver_enable_admission_plugins: 4 | # plugin AlwaysPullImage can be deleted. Credentials would be required to pull the private images every time. 5 | # Also, in trusted environments, this might increases load on network, registry, and decreases speed. 6 | # - AlwaysPullImages 7 | - NamespaceLifecycle 8 | # EventRateLimit is used to limit DoS on API server in case of event Flooding 9 | - EventRateLimit 10 | - LimitRanger 11 | - ServiceAccount 12 | - TaintNodesByCondition 13 | - PodNodeSelector 14 | - Priority 15 | - DefaultTolerationSeconds 16 | - DefaultStorageClass 17 | - StorageObjectInUseProtection 18 | - PersistentVolumeClaimResize 19 | - MutatingAdmissionWebhook 20 | - NodeRestriction 21 | - ValidatingAdmissionWebhook 22 | - RuntimeClass 23 | - ResourceQuota 24 | # SecurityContextDeny should be replaced by PodSecurityPolicy 25 | # - SecurityContextDeny 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /CHANGELOG/Readme-v1.20.5.md: -------------------------------------------------------------------------------- 1 | # AGORAKUBE v1.20.5 2 | 3 | This release add the following: 4 | 5 | [FEATURE] Add the possibility to uninstall AGORAKUBE from AGORAKUBE using tags: `ansible-playbook agorakube --tags uninstall`. Issues : #16,#23,#24,#25,#26,#41 /@bryanILKI /@sabrine-hammami /@pierreilki 6 | [FEATURE] AGORAKUBE can now run on Python Virtual Env / @pierreilki 7 | 8 | [DOCUMENTATION] AGORAKUBE project now follows the all-contributors specification. Contributions of any kind welcome!. Issue: #46 / @pierreilki 9 | [DOCUMENTATION] Documentation has been updated to give people a better understanding of how AGORAKUBE works. /@pierreilki, /@bryanILKI /@sabrine-hammami 10 | 11 | 12 | [ENHANCEMENT] AGORAKUBE now install K8S v1.20.5 by default /@pierreilki 13 | [ENHANCEMENT] CoreDNS now expose prometheus metrics. PR: #39 /@acjohnson 14 | 15 | [OTHER] AGORAKUBE project now follows the Core Infrastructure Initiative Guideline (CII- Best-Practice) /@pierreilki 16 | [OTHER] AGORAKUBE integrated the FOSSA license scan Bot /@pierreilki 17 | -------------------------------------------------------------------------------- /roles/setup-etcd/tasks/delete-etcd-clusters.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Stop the etcd service 3 | systemd: 4 | name: etcd 5 | state: stopped 6 | tags: ['never', 'uninstall'] 7 | ignore_errors: yes 8 | 9 | - name: Uninstall and Delete Etcd PKI Files 10 | file: 11 | path: "/etc/kubernetes/pki" 12 | state: absent 13 | tags: ['never', 'uninstall'] 14 | 15 | - name: Uninstall and delete ETCD Service Definition 16 | file: 17 | path: "/etc/systemd/system/etcd.service" 18 | state: absent 19 | tags: ['never', 'uninstall'] 20 | 21 | - name: Reload daemons 22 | systemd: 23 | daemon_reload: yes 24 | tags: ['never', 'uninstall'] 25 | 26 | - name: Uninstall and Delete ETCD Binaries 27 | file: 28 | path: "/usr/bin/etcd-{{ agorakube_base_components.etcd.release }}-linux-amd64" 29 | state: absent 30 | tags: ['never', 'uninstall'] 31 | 32 | - name: Uninstall and delete ETCD data-dir 33 | file: 34 | path: "{{ agorakube_base_components.etcd.data_path }}" 35 | state: absent 36 | tags: ['never', 'uninstall'] 37 | -------------------------------------------------------------------------------- /tools/rolling_update/rolling.yaml: -------------------------------------------------------------------------------- 1 | # This playbook must be run from Agorakube root directory 2 | --- 3 | - hosts: workers,etcd,masters,storage 4 | any_errors_fatal: true 5 | become: true 6 | roles: 7 | - connexion-to-hosts 8 | - check-hosts-names 9 | - populate_etc_hosts 10 | - hosts: deploy 11 | become: true 12 | vars: 13 | ansible_python_interpreter: /usr/local/agorakube-env/bin/python3 14 | roles: 15 | # - compliance-checks 16 | - install-kubectl 17 | - generate-certs 18 | - configure-kubeconfig 19 | - hosts: etcd 20 | become: true 21 | roles: 22 | - setup-etcd 23 | - hosts: masters 24 | become: true 25 | serial: 1 26 | roles: 27 | - drain 28 | - setup-master 29 | - install-cni 30 | - install-runtimes 31 | - setup-worker 32 | - uncordon 33 | - hosts: workers,storage 34 | become: true 35 | serial: 1 36 | roles: 37 | - drain 38 | - install-cni 39 | - install-runtimes 40 | - setup-worker 41 | - clear 42 | - uncordon 43 | - hosts: deploy 44 | become: true 45 | roles: 46 | - post-scripts 47 | - show-info 48 | -------------------------------------------------------------------------------- /roles/setup-worker/templates/kube-proxy.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kube Proxy {{ agorakube_base_components.kubernetes.release }} 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | {% if ansible_fqdn in groups['masters'] %} 7 | ExecStart=/usr/local/bin/kubernetes/server/bin/kube-proxy \ 8 | {% else %} 9 | ExecStart=/usr/local/bin/kubernetes/node/bin/kube-proxy \ 10 | {% endif %} 11 | {% if agorakube_network.kube_proxy.mode in ['userspace','iptables','ipvs'] %} 12 | --proxy-mode={{ agorakube_network.kube_proxy.mode }} \ 13 | {% endif %} 14 | {% if agorakube_network.kube_proxy.mode == 'ipvs' %} 15 | {% if agorakube_network.kube_proxy.algorithm in ['rr','lc','dh','sh','sed','nq'] %} 16 | --ipvs-scheduler={{ agorakube_network.kube_proxy.algorithm }} \ 17 | {% endif %} 18 | {% endif %} 19 | --hostname-override={{ ansible_fqdn }} \ 20 | --log-file=/var/log/kubernetes/kube-proxy.log \ 21 | --log-file-max-size=1800 \ 22 | --logtostderr=false \ 23 | --config=/etc/kubernetes/manifests/kube-proxy-config.yaml 24 | Restart=on-failure 25 | RestartSec=5 26 | 27 | [Install] 28 | WantedBy=multi-user.target 29 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/calico.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: calico 6 | register: calico_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ calico_tempdir.path }}/calico.yaml" 12 | src: "calico.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ calico_tempdir.path }}/calico.yaml 17 | register: apply_calico 18 | changed_when: > 19 | apply_calico.stdout is search("created") 20 | or apply_calico.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ calico_tempdir.path }}" 26 | changed_when: false 27 | 28 | - name: Wait for Calico to be deployed 29 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n kube-system calico-kube-controllers 30 | changed_when: false 31 | register: calico_deployment 32 | until: calico_deployment.stdout.find("1/1") != -1 33 | retries: 300 34 | delay: 10 35 | run_once: true 36 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/metrics-server.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: metrics 6 | register: metrics_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ metrics_tempdir.path }}/metrics.yaml" 12 | src: "metrics.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ metrics_tempdir.path }}/metrics.yaml 17 | register: apply_metrics 18 | changed_when: > 19 | apply_metrics.stdout is search("created") 20 | or apply_metrics.stdout is search("configured") 21 | - name: Cleanup tempdir 22 | file: 23 | state: absent 24 | path: "{{ metrics_tempdir.path }}" 25 | changed_when: false 26 | 27 | - name: Verify if metrics-server pod has successfully started 28 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n kube-system metrics-server 29 | changed_when: false 30 | register: metrics_deployment 31 | until: metrics_deployment.stdout.find("1/1") != -1 32 | retries: 300 33 | delay: 10 34 | run_once: true 35 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/grafana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir Grafana 3 | tempfile: 4 | state: directory 5 | suffix: grafana 6 | register: grafana_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates Grafana 10 | template: 11 | dest: "{{ grafana_tempdir.path }}/grafana.yaml" 12 | src: "grafana.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates Grafana 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ grafana_tempdir.path }}/grafana.yaml --force 17 | register: apply_grafana 18 | changed_when: > 19 | apply_grafana.stdout is search("created") 20 | or apply_grafana.stdout is search("configured") 21 | - name: Cleanup tempdir 22 | file: 23 | state: absent 24 | path: "{{ grafana_tempdir.path }}" 25 | changed_when: false 26 | 27 | 28 | - name: Wait for Grafana to be deployed 29 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n supervision grafana 30 | changed_when: false 31 | register: grafana_deployment 32 | until: grafana_deployment.stdout.find("1/1") != -1 33 | retries: 300 34 | delay: 10 35 | run_once: true 36 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/reloader.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: reloader 6 | register: reloader_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ reloader_tempdir.path }}/reloader.yaml" 12 | src: "reloader.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ reloader_tempdir.path }}/reloader.yaml 17 | register: apply_reloader 18 | changed_when: > 19 | apply_reloader.stdout is search("created") 20 | or apply_reloader.stdout is search("configured") 21 | - name: Cleanup tempdir 22 | file: 23 | state: absent 24 | path: "{{ reloader_tempdir.path }}" 25 | changed_when: false 26 | 27 | - name: Verify if reloader pod has successfully started 28 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n kube-system reloader-reloader 29 | changed_when: false 30 | register: reloader_deployment 31 | until: reloader_deployment.stdout.find("1/1") != -1 32 | retries: 300 33 | delay: 10 34 | run_once: true 35 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/traefik.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: traefik 6 | register: traefik_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ traefik_tempdir.path }}/traefik.yaml" 12 | src: "traefik.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ traefik_tempdir.path }}/traefik.yaml 17 | register: apply_traefik 18 | changed_when: > 19 | apply_traefik.stdout is search("created") 20 | or apply_traefik.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ traefik_tempdir.path }}" 26 | changed_when: false 27 | 28 | - name: Verify if traefik-ingress-controller pods has successfully started 29 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n ingress-traefik traefik 30 | changed_when: false 31 | register: traefik_deployment 32 | until: traefik_deployment.stdout.find("1/1") != -1 33 | retries: 300 34 | delay: 10 35 | run_once: true 36 | -------------------------------------------------------------------------------- /roles/setup-etcd/tasks/setup-etcd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download etcd binaries 3 | unarchive: 4 | src: https://github.com/etcd-io/etcd/releases/download/{{ agorakube_base_components.etcd.release }}/etcd-{{ agorakube_base_components.etcd.release }}-linux-amd64.tar.gz 5 | dest: /usr/bin 6 | remote_src: yes 7 | creates: /usr/bin/etcd-{{ agorakube_base_components.etcd.release }}-linux-amd64/etcd 8 | 9 | - name: create folder {{ agorakube_base_components.etcd.data_path }} 10 | file: 11 | path: "{{ agorakube_base_components.etcd.data_path }}" 12 | state: directory 13 | recurse: yes 14 | owner: root 15 | group: root 16 | mode: '0700' 17 | 18 | - name: Import etcd.service 19 | template: 20 | src: etcd.service.j2 21 | dest: /etc/systemd/system/etcd.service 22 | owner: root 23 | group: root 24 | mode: '644' 25 | notify: 26 | - Reload daemons 27 | - Restart etcd 28 | 29 | - name: Run etcd 30 | systemd: 31 | state: started 32 | daemon_reload: yes 33 | name: etcd 34 | enabled: yes 35 | 36 | - name: Force all notified handlers to run at this point, not waiting for normal sync points 37 | meta: flush_handlers 38 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/monitoring.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: monitoring 6 | register: monitoring_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 1 10 | template: 11 | dest: "{{ monitoring_tempdir.path }}/monitoring.yaml" 12 | src: "monitoring.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 1 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ monitoring_tempdir.path }}/monitoring.yaml 17 | register: apply_monitoring 18 | changed_when: > 19 | apply_monitoring.stdout is search("created") 20 | or apply_monitoring.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ monitoring_tempdir.path }}" 26 | changed_when: false 27 | 28 | - name: Wait for Prometheus to be deployed 29 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n supervision prometheus 30 | changed_when: false 31 | register: monitoring_deployment 32 | until: monitoring_deployment.stdout.find("1/1") != -1 33 | retries: 300 34 | delay: 10 35 | run_once: true 36 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/nginx.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: nginx 6 | register: nginx_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ nginx_tempdir.path }}/nginx.yaml" 12 | src: "nginx.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ nginx_tempdir.path }}/nginx.yaml --force 17 | register: apply_nginx 18 | changed_when: > 19 | apply_nginx.stdout is search("created") 20 | or apply_nginx.stdout is search("configured") 21 | - name: Cleanup tempdir 22 | file: 23 | state: absent 24 | path: "{{ nginx_tempdir.path }}" 25 | changed_when: false 26 | 27 | - name: Verify if nginx-ingress-controller pods has successfully started 28 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n ingress-nginx ingress-nginx-controller 29 | changed_when: false 30 | register: nginx_deployment 31 | until: nginx_deployment.stdout.find("1/1") != -1 32 | retries: 300 33 | delay: 10 34 | run_once: true 35 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/log-centralization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: logging 6 | register: logging_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 1 10 | template: 11 | dest: "{{ logging_tempdir.path }}/log-centralization.yaml" 12 | src: "log-centralization.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 1 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ logging_tempdir.path }}/log-centralization.yaml --force 17 | register: apply_logging 18 | changed_when: > 19 | apply_logging.stdout is search("created") 20 | or apply_logging.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ logging_tempdir.path }}" 26 | changed_when: false 27 | 28 | #- name: Wait for Prometheus to be deployed 29 | # command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n supervision prometheus 30 | # changed_when: false 31 | # register: monitoring_deployment 32 | # until: monitoring_deployment.stdout.find("1/1") != -1 33 | # retries: 300 34 | # delay: 10 35 | # run_once: true 36 | -------------------------------------------------------------------------------- /roles/setup-etcd/tasks/import-certs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create /etc/kubernetes/pki/etcd directory 3 | file: 4 | path: /etc/kubernetes/pki/etcd 5 | state: directory 6 | recurse: True 7 | 8 | - name: Import etcd keys and certificates 9 | copy: 10 | src: "{{ pki_path }}/{{ item.src }}" 11 | dest: /etc/kubernetes/pki/{{ item.dest }} 12 | with_items: 13 | - { src: 'intermediate/etcd/ca.key', dest: 'etcd/ca.key' } 14 | - { src: 'intermediate/etcd/ca.crt', dest: 'etcd/ca.crt' } 15 | - { src: 'end/kube-etcd.key', dest: 'etcd/server.key' } 16 | - { src: 'end/kube-etcd.crt', dest: 'etcd/server.crt' } 17 | - { src: 'end/kube-etcd-peer.key', dest: 'etcd/peer.key' } 18 | - { src: 'end/kube-etcd-peer.crt', dest: 'etcd/peer.crt' } 19 | notify: 20 | - Restart etcd 21 | 22 | - name: Import etcdctl keys and certificates 23 | copy: 24 | src: "{{ pki_path }}/{{ item.src }}" 25 | dest: /etc/kubernetes/pki/{{ item.dest }} 26 | with_items: 27 | - { src: 'end/kube-etcd-healthcheck-client.key', dest: 'etcd/healthcheck-client.key' } 28 | - { src: 'end/kube-etcd-healthcheck-client.crt', dest: 'etcd/healthcheck-client.crt' } 29 | - { src: 'intermediate/etcd/ca.crt', dest: 'etcd/ca.crt' } 30 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/tls-monitoring-gen.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: monitoring 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: ClusterRole 9 | metadata: 10 | name: tls-admin-monitoring 11 | rules: 12 | - apiGroups: [""] 13 | resources: 14 | - secrets 15 | verbs: ["get", "list", "watch", "create"] 16 | --- 17 | apiVersion: v1 18 | kind: ServiceAccount 19 | metadata: 20 | name: tls-admin-monitoring 21 | namespace: monitoring 22 | --- 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | kind: ClusterRoleBinding 25 | metadata: 26 | name: tls-admin-monitoring 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: tls-admin-monitoring 31 | subjects: 32 | - kind: ServiceAccount 33 | name: tls-admin-monitoring 34 | namespace: monitoring 35 | --- 36 | apiVersion: batch/v1 37 | kind: Job 38 | metadata: 39 | name: tls-monitoring-gen 40 | namespace: monitoring 41 | spec: 42 | ttlSecondsAfterFinished: 100 43 | template: 44 | spec: 45 | serviceAccountName: tls-admin-monitoring 46 | containers: 47 | - name: certgen 48 | image: agorakube/tls-gen-monitoring:v1.0.0 49 | restartPolicy: Never 50 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/coredns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: coredns 6 | register: coredns_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ coredns_tempdir.path }}/coredns.yaml" 12 | src: "coredns.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ coredns_tempdir.path }}/coredns.yaml 17 | register: apply_coredns 18 | changed_when: > 19 | apply_coredns.stdout is search("created") 20 | or apply_coredns.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ coredns_tempdir.path }}" 26 | changed_when: false 27 | 28 | - name: Verify if coredns pods has successfully started 29 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n kube-system coredns 30 | changed_when: false 31 | register: dashboard_deployment 32 | until: dashboard_deployment.stdout.find(agorakube_features.coredns.replicas | string + "/" + agorakube_features.coredns.replicas | string) != -1 33 | retries: 300 34 | delay: 10 35 | run_once: true 36 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/argocd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: argocd 6 | register: argocd_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ argocd_tempdir.path }}/argocd.yaml" 12 | src: "argocd.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ argocd_tempdir.path }}/argocd.yaml 17 | register: apply_argocd 18 | changed_when: > 19 | apply_argocd.stdout is search("created") 20 | or apply_argocd.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ argocd_tempdir.path }}" 26 | changed_when: false 27 | 28 | #- name: Verify if argocd pods has successfully started 29 | # command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n argocd-system 30 | # changed_when: false 31 | # register: argocd_deployment 32 | # until: argocd_deployment.stdout.find(agorakube_features.argocd.replicas.controller_manager | string + "/" + agorakube_features.argocd.replicas.controller_manager | string) != -1 33 | # retries: 300 34 | # delay: 10 35 | # run_once: true 36 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/gatekeeper.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: gatekeeper 6 | register: gatekeeper_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ gatekeeper_tempdir.path }}/gatekeeper.yaml" 12 | src: "gatekeeper.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ gatekeeper_tempdir.path }}/gatekeeper.yaml 17 | register: apply_gatekeeper 18 | changed_when: > 19 | apply_gatekeeper.stdout is search("created") 20 | or apply_gatekeeper.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ gatekeeper_tempdir.path }}" 26 | changed_when: false 27 | 28 | - name: Verify if gatekeeper pods has successfully started 29 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n gatekeeper-system 30 | changed_when: false 31 | register: gatekeeper_deployment 32 | until: gatekeeper_deployment.stdout.find(agorakube_features.gatekeeper.replicas.controller_manager | string + "/" + agorakube_features.gatekeeper.replicas.controller_manager | string) != -1 33 | retries: 300 34 | delay: 10 35 | run_once: true 36 | -------------------------------------------------------------------------------- /roles/backup-etcd/tasks/backup-etcd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download etcd binaries 3 | unarchive: 4 | src: https://github.com/etcd-io/etcd/releases/download/{{ etcd_release }}/etcd-{{ etcd_release }}-linux-amd64.tar.gz 5 | dest: /usr/local/bin 6 | remote_src: yes 7 | 8 | - name: Ensure backup directory exist 9 | file: 10 | path: "{{ data_path }}/backups_etcd" 11 | state: directory 12 | when: custom_etcd_backup_dir is not defined 13 | 14 | - name: Ensure backup directory exist 15 | file: 16 | path: "{{ custom_etcd_backup_dir }}" 17 | state: directory 18 | when: custom_etcd_backup_dir is defined 19 | 20 | - name: Create tempdir for backup etcd script 21 | tempfile: 22 | state: directory 23 | suffix: backup_etcd 24 | register: backup_etcd_tempdir 25 | changed_when: false 26 | 27 | - name: Render templates 28 | template: 29 | dest: "{{ backup_etcd_tempdir.path }}/command_backup_etcd.sh" 30 | src: "command_backup_etcd.sh.j2" 31 | mode: 0777 32 | changed_when: false 33 | 34 | - name: Backup ETCD 35 | shell: | 36 | "{{ backup_etcd_tempdir.path }}/command_backup_etcd.sh" 37 | args: 38 | executable: /bin/bash 39 | when: backup_etcd 40 | 41 | - name: Cleanup tempdir 42 | file: 43 | state: absent 44 | path: "{{ backup_etcd_tempdir.path }}" 45 | changed_when: false 46 | -------------------------------------------------------------------------------- /roles/setup-master/templates/kube-controller-manager.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager {{ agorakube_base_components.kubernetes.release }} 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kubernetes/server/bin/kube-controller-manager \ 7 | --bind-address=127.0.0.1 \ 8 | --cluster-cidr={{ agorakube_network.cidr.pod }} \ 9 | --cluster-name=kubernetes \ 10 | --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt \ 11 | --cluster-signing-key-file=/etc/kubernetes/pki/ca.key \ 12 | --kubeconfig=/etc/kubernetes/manifests/controller-manager.conf \ 13 | --leader-elect=true \ 14 | --profiling=false \ 15 | --root-ca-file=/etc/kubernetes/pki/ca.crt \ 16 | --client-ca-file=/etc/kubernetes/pki/ca.crt \ 17 | --service-account-private-key-file=/etc/kubernetes/pki/sa/sa.key \ 18 | --service-cluster-ip-range={{ agorakube_network.cidr.service }} \ 19 | --terminated-pod-gc-threshold=500 \ 20 | --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt \ 21 | --use-service-account-credentials=true \ 22 | --allocate-node-cidrs=true \ 23 | --log-file=/var/log/kubernetes/kube-controller-manager.log \ 24 | --log-file-max-size=1800 \ 25 | --logtostderr=false \ 26 | --v=2 27 | Restart=on-failure 28 | RestartSec=5 29 | 30 | [Install] 31 | WantedBy=multi-user.target 32 | -------------------------------------------------------------------------------- /roles/post-scripts/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #network_cni_plugin: kube-router 3 | #cluster_dns_ip: 10.32.0.10 4 | #ingress_controller: traefik 5 | #k8s_dashboard: true 6 | #k8s_dashboard_admin: true 7 | #service_cluster_ip_range: 10.32.0.0/24 8 | #cni_release: 0.8.5 9 | #cluster_cidr: 10.33.0.0/16 10 | #enable_metrics_server: True 11 | # Calico 12 | # calico_mtu: 0 = auto-detect MTU. Configure calico_mtu manually for better performances. 13 | # Calico is configured to ues VXLan Cross-subnet. VxLAN add a 50-bytes header, so if you configure manually MTU, minus the max size by 50: eg 1500 => 1450 14 | #calico_mtu: 0 15 | #enable_metallb_layer2: True 16 | #metallb_layer2_ips: 10.100.200.10-10.100.200.250 17 | # metallb_secret_key is generated with command : openssl rand -base64 128 18 | #metallb_secret_key: LGyt2l9XftOxEUIeFf2w0eCM7KjyQdkHform0gldYBKMORWkfQIsfXW0sQlo1VjJBB17shY5RtLg0klDNqNq4PAhNaub+olSka61LxV73KN2VaJY/snrZmHbdf/a7DfdzaeQ5pzP6D5O7zbUZwfb5ASOhNrG8aDMY3rkf4ZzHkc= 19 | #enable_persistence: True 20 | #enable_monitoring: True 21 | #dashboard_admin_user: administrator 22 | #dashboard_admin_password: P@ssw0rd 23 | # ingress_nginx_release is used to specify the nginx release to install. Usefull for installation, AND UPGRADES 24 | #ingress_nginx_release: v0.41.2 25 | #openebs_io_base_dir: /var/openebs 26 | #jiva_fs_type: ext4 27 | #io_local_host_path: /var/local-hostpath 28 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/docker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add Docker repository for Debian lagorakube distros 3 | include_tasks: "repo-debian-like.yaml" 4 | when: is_debian_like 5 | 6 | - name: Add Docker repository for centos lagorakube distros 7 | include_tasks: "repo-centos-like.yaml" 8 | when: is_centos_like 9 | 10 | - name: Install package Iptables 11 | package: 12 | name: iptables 13 | state: latest 14 | 15 | - name: Ensure Iptable mode selected is Legacy 16 | alternatives: 17 | name: iptables 18 | path: /usr/sbin/iptables-legacy 19 | when: ansible_distribution|lower == 'debian' 20 | 21 | - name: Configure /etc/docker/daemon.json 22 | copy: 23 | dest: /etc/docker/daemon.json 24 | src: daemon.json 25 | notify: 26 | - Reload docker 27 | 28 | - name: Install Docker for Debian lagorakube distros 29 | include_tasks: "docker-debian-like.yaml" 30 | when: is_debian_like 31 | 32 | - name: Install Docker for Centos lagorakube distros 33 | include_tasks: "docker-centos-like.yaml" 34 | when: is_centos_like 35 | 36 | - name: Start Runtime 37 | systemd: 38 | state: started 39 | name: docker 40 | 41 | - name: Make sure /etc/docker existe 42 | file: 43 | path: /etc/docker 44 | state: directory 45 | 46 | - name: Make sure docker is running 47 | systemd: 48 | state: started 49 | name: docker 50 | enabled: yes 51 | -------------------------------------------------------------------------------- /roles/backup-etcd/templates/command_backup_etcd.sh.j2: -------------------------------------------------------------------------------- 1 | {% set etcd_initial_cluster = [] %} 2 | {% for host in groups['etcd'] %} 3 | {{ etcd_initial_cluster.append( "https://"+hostvars[host].ansible_host+":2379" ) }} 4 | {% endfor %} 5 | 6 | {% if custom_etcd_backup_dir is defined %} 7 | ETCDCTL_API=3 /usr/local/bin/etcd-{{etcd_release}}-linux-amd64/etcdctl --endpoints=$(ETCDCTL_API=3 /usr/local/bin/etcd-{{etcd_release}}-linux-amd64/etcdctl --endpoints={{ etcd_initial_cluster|join(',') }} --cacert={{pki_path}}/intermediate/etcd/ca.crt --cert={{pki_path}}/end/kube-etcd-healthcheck-client.crt --key={{pki_path}}/end/kube-etcd-healthcheck-client.key snapshot save {{custom_etcd_backup_dir}}/snapshot_etcd_cluster.`date +%m-%d-%y_%H-%M-%S`.db 8 | {% else %} 9 | ETCDCTL_API=3 /usr/local/bin/etcd-{{etcd_release}}-linux-amd64/etcdctl --endpoints=$(ETCDCTL_API=3 /usr/local/bin/etcd-{{etcd_release}}-linux-amd64/etcdctl --endpoints={{ etcd_initial_cluster|join(',') }} --cacert={{pki_path}}/intermediate/etcd/ca.crt --cert={{pki_path}}/end/kube-etcd-healthcheck-client.crt --key={{pki_path}}/end/kube-etcd-healthcheck-client.key endpoint status | grep ', true, false,' | awk '{ print $1}' | sed 's/,//g') --cacert={{pki_path}}/intermediate/etcd/ca.crt --cert={{pki_path}}/end/kube-etcd-healthcheck-client.crt --key={{pki_path}}/end/kube-etcd-healthcheck-client.key snapshot save {{data_path}}/backups_etcd/snapshot_etcd_cluster.`date +%m-%d-%y_%H-%M-%S`.db 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /roles/setup-master/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #kubernetes_release: v1.15.0 3 | #cluster_cidr: 10.200.0.0/16 4 | #service_cluster_ip_range: 10.32.0.0/24 5 | #service_node_port_range: 30000-32767 6 | #encrypt_etcd_keys: 7 | # Warrning: If multiple keys are defined ONLY LAST KEY is used for encrypt and decrypt. 8 | # Other keys are used only for decrypt purpose. Keys can be generated with command: head -c 32 /dev/urandom | base64 9 | # key1: 10 | # secret: 1fJcKt6vBxMt+AkBanoaxFF2O6ytHIkETNgQWv4b/+Q= 11 | #delete_previous_k8s_install: false 12 | #kube_apiserver_enable_admission_plugins: 13 | # plugin AlwaysPullImage can be deleted. Credentials would be required to pull the private images every time. 14 | # Also, in trusted environments, this might increases load on network, registry, and decreases speed. 15 | # - AlwaysPullImages 16 | # - NamespaceLifecycle 17 | # EventRateLimit is used to limit DoS on API server in case of event Flooding 18 | # - EventRateLimit 19 | # - LimitRanger 20 | # - ServiceAccount 21 | # - TaintNodesByCondition 22 | # - PodNodeSelector 23 | # - Priority 24 | # - DefaultTolerationSeconds 25 | # - DefaultStorageClass 26 | # - StorageObjectInUseProtection 27 | # - PersistentVolumeClaimResize 28 | # - MutatingAdmissionWebhook 29 | # - NodeRestriction 30 | # - ValidatingAdmissionWebhook 31 | # - RuntimeClass 32 | # - ResourceQuota 33 | # SecurityContextDeny should be replaced by PodSecurityPolicy 34 | # - SecurityContextDeny 35 | -------------------------------------------------------------------------------- /roles/setup-master/tasks/delete-k8s-components.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Stop the Services before uninstalling them 3 | - name: Stop Kube APIServer Service 4 | systemd: 5 | state: stopped 6 | name: kube-apiserver 7 | tags: ['never', 'uninstall'] 8 | ignore_errors: yes 9 | 10 | - name: Stop Kube Controller Manager Service 11 | systemd: 12 | state: stopped 13 | name: kube-controller-manager 14 | tags: ['never', 'uninstall'] 15 | ignore_errors: yes 16 | 17 | - name: Stop Kube Scheduler Service 18 | systemd: 19 | state: stopped 20 | name: kube-scheduler 21 | tags: ['never', 'uninstall'] 22 | ignore_errors: yes 23 | 24 | # Delete Services Definition 25 | - name: Delete Master Service 26 | file: 27 | path: "/etc/systemd/system/{{ item }}" 28 | state: absent 29 | with_items: 30 | - "kube-apiserver.service" 31 | - "kube-controller-manager.service" 32 | - "kube-scheduler.service" 33 | notify: 34 | - Reload daemons 35 | tags: ['never', 'uninstall'] 36 | 37 | # Delete PKI files and Data Directory 38 | - name: Delete PKI files and Data Directory 39 | file: 40 | path: /etc/kubernetes 41 | state: absent 42 | tags: ['never', 'uninstall'] 43 | 44 | # Delete Binaries 45 | - name: Delete Server Binaries 46 | file: 47 | path: /usr/local/bin/kubernetes 48 | state: absent 49 | tags: ['never', 'uninstall'] 50 | 51 | # Delete Logs 52 | - name: Delete Logs 53 | file: 54 | path: /var/log/kubernetes 55 | state: absent 56 | tags: ['never', 'uninstall'] 57 | -------------------------------------------------------------------------------- /roles/generate-certs/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | etcd_hosts_dns: "{{ groups['etcd'] | map('regex_replace', '^(.*)$', 'DNS:\\1') | join(',') }}" 3 | etcd_hosts_ips: "{{ groups['etcd'] | map('extract',hostvars,'ansible_host') | list | map('regex_replace', '^(.*)$', 'IP:\\1') | join(',')}}" 4 | etcd_subject: "{{ etcd_hosts_dns + ',' + etcd_hosts_ips + ',IP:127.0.0.1,DNS:localhost' }}" 5 | master_hosts_dns: "{{ groups['masters'] | list | map('regex_replace', '^(.*)$', 'DNS:\\1') | join(',') }}" 6 | master_hosts_ips: "{{ groups['masters'] | map('extract',hostvars,'ansible_host') | list | map('regex_replace', '^(.*)$', 'IP:\\1') | join(',')}}" 7 | master_custom_subject: "IP:127.0.0.1,DNS:localhost,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster,DNS:kubernetes.default.svc.cluster.local{% if master_custom_alt_name %}{{ ',DNS:' + master_custom_alt_name }}{% endif %}" 8 | kube_api_subject: "{% if advertise_masters | ansible.netcommon.ipv4 %}{{ 'IP:' + advertise_masters + ',' + master_hosts_dns + ',' + master_hosts_ips + ',' + master_custom_subject + ',IP:' + agorakube_network.service_ip.kubernetes }}{% else %}{{ 'DNS:' + advertise_masters + ',' + master_hosts_dns + ',' + master_hosts_ips + ',' + master_custom_subject + ',IP:' + agorakube_network.service_ip.kubernetes }}{% endif %}" 9 | pki_path: "{{ agorakube.global.data_path }}/pki" 10 | rotate_private_keys: False # This parametter is used to renew K8S PKI keys. Keys, CSR and CRT are renewed. If calico is used as CNI, you must retart your cluster... 11 | -------------------------------------------------------------------------------- /roles/setup-etcd/templates/etcd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | Documentation=https://github.com/coreos 4 | {% set etcd_initial_cluster = [] %} 5 | {% for host in groups['etcd'] %} 6 | {{ etcd_initial_cluster.append( host+"=https://"+hostvars[host].ansible_host+":2380" ) }} 7 | {% endfor %} 8 | [Service] 9 | Type=notify 10 | ExecStart=/usr/bin/etcd-{{ agorakube_base_components.etcd.release }}-linux-amd64/etcd \ 11 | --auto-tls=false \ 12 | --peer-auto-tls=false \ 13 | --name {{ ansible_fqdn }} \ 14 | --cert-file=/etc/kubernetes/pki/etcd/server.crt \ 15 | --key-file=/etc/kubernetes/pki/etcd/server.key \ 16 | --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt \ 17 | --peer-key-file=/etc/kubernetes/pki/etcd/peer.key \ 18 | --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt \ 19 | --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt \ 20 | --peer-client-cert-auth \ 21 | --client-cert-auth \ 22 | --initial-advertise-peer-urls https://{{ hostvars[ansible_fqdn].ansible_host}}:2380 \ 23 | --listen-peer-urls https://{{ hostvars[ansible_fqdn].ansible_host}}:2380 \ 24 | --listen-client-urls https://{{ hostvars[ansible_fqdn].ansible_host}}:2379,https://127.0.0.1:2379 \ 25 | --advertise-client-urls https://{{ hostvars[ansible_fqdn].ansible_host}}:2379 \ 26 | --initial-cluster-token etcd-cluster-0 \ 27 | --initial-cluster {{ etcd_initial_cluster|join(',') }} \ 28 | --initial-cluster-state new \ 29 | --logger 'zap' \ 30 | --log-outputs=stderr,/var/log/etcd.log \ 31 | --data-dir={{ agorakube_base_components.etcd.data_path }} 32 | Restart=on-failure 33 | RestartSec=5 34 | 35 | [Install] 36 | WantedBy=multi-user.target 37 | -------------------------------------------------------------------------------- /roles/remove_etc_hosts/tasks/remove-etc-hosts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Combine etc_hosts list with etc_hosts group entries 3 | set_fact: 4 | etc_hosts: "{{ etc_hosts | default([]) + [{'hostname': item, 'ip': hostvars[item].ansible_host}] }}" 5 | with_items: 6 | - "{{ groups['etc_hosts'] }}" 7 | when: hostvars[item].ansible_host not in etc_hosts | map(attribute="ip") | list 8 | 9 | - name: Slurp /etc/hosts file 10 | slurp: 11 | src: /etc/hosts 12 | register: slurpfile 13 | 14 | - name: Create remote_etc_hosts list 15 | set_fact: 16 | remote_etc_hosts: "{{ remote_etc_hosts | default([]) + [{'ip': item.split(' ') | first, 'hostname': item.split(' ')[1:] | join(' ')}] }}" 17 | with_items: 18 | - "{{ slurpfile['content'] | b64decode | trim | split('\n') }}" 19 | when: 20 | - item is not regex('^#(.*)$') 21 | - item | trim != '' 22 | 23 | - name: Update /etc/hosts 24 | lineinfile: 25 | path: /etc/hosts 26 | backup: "{{ backup_etc_hosts | bool }}" 27 | state: present 28 | line: "{{ item.ip }} {{ item.hostname }}" 29 | with_items: 30 | - "{{ etc_hosts }}" 31 | become: true 32 | 33 | - name: Cleanup /etc/hosts 34 | lineinfile: 35 | path: /etc/hosts 36 | backup: "{{ backup_etc_hosts | bool }}" 37 | state: "{{ ((item.ip not in (etc_hosts | map(attribute='ip') | list)) or 38 | (item.hostname not in (etc_hosts | map(attribute='hostname') | list))) | ternary('absent', 'present') }}" 39 | line: "{{ item.ip }} {{ item.hostname }}" 40 | regexp: "^{{ item.ip | regex_replace('\\.','\\.' ) }}\\s+{{ item.hostname }}" 41 | with_items: 42 | - "{{ remote_etc_hosts }}" 43 | become: true 44 | -------------------------------------------------------------------------------- /roles/restore-etcd/tasks/restore-etcd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Stop control plan server 3 | systemd: 4 | name: "{{ item }}" 5 | state: stopped 6 | with_items: 7 | - kube-apiserver 8 | - kube-scheduler 9 | - kube-controller-manager 10 | - etcd 11 | 12 | - name: Create tempdir 13 | tempfile: 14 | state: directory 15 | suffix: backup_etcd 16 | register: backup_etcd_tempdir 17 | 18 | - name: Import etcd backup file 19 | copy: 20 | src: "{{ restoration_snapshot_file }}" 21 | dest: "{{ backup_etcd_tempdir.path }}/snapshot-etcd.db" 22 | 23 | - name: Delete previous etcd data 24 | file: 25 | path: "{{ agorakube_base_components.etcd.data_path }}" 26 | state: absent 27 | 28 | - name: Render Restoration script 29 | template: 30 | src: restore.sh.j2 31 | dest: "{{ backup_etcd_tempdir.path }}/restore.sh" 32 | mode: u+rwx 33 | 34 | - name: Restore ETCD data from backup file with rendered script 35 | shell: | 36 | {{ backup_etcd_tempdir.path }}/restore.sh 37 | when: restoration_snapshot_file is defined 38 | 39 | - name: delete tmp file 40 | file: 41 | path: "{{ backup_etcd_tempdir.path }}" 42 | state: absent 43 | 44 | - name: create folder {{ agorakube_base_components.etcd.data_path }} 45 | file: 46 | path: "{{ agorakube_base_components.etcd.data_path }}" 47 | state: directory 48 | recurse: yes 49 | owner: root 50 | group: root 51 | mode: '0700' 52 | 53 | - name: Start control plan service 54 | systemd: 55 | name: "{{ item }}" 56 | state: started 57 | with_items: 58 | - kube-apiserver 59 | - kube-scheduler 60 | - kube-controller-manager 61 | - etcd 62 | -------------------------------------------------------------------------------- /setup-hosts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Determine OS platform 3 | UNAME=$(uname | tr "[:upper:]" "[:lower:]") 4 | # If Linux, try to determine specific distribution 5 | if [ "$UNAME" == "linux" ]; then 6 | # If available, use LSB to identify distribution 7 | if [ -f /etc/lsb-release -o -d /etc/lsb-release.d ]; then 8 | export DISTRO=$(lsb_release -i | cut -d: -f2 | sed s/'^\t'//) 9 | # Otherwise, use release info file 10 | else 11 | export DISTRO=$(ls -d /etc/[A-Za-z]*[_-][rv]e[lr]* | grep -v "lsb" | cut -d'/' -f3 | cut -d'-' -f1 | cut -d'_' -f1) 12 | fi 13 | fi 14 | # For everything else (or if above failed), just use generic identifier 15 | [ "$DISTRO" == "" ] && export DISTRO=$UNAME 16 | unset UNAME 17 | DISTRO=$(echo $DISTRO | tr '[:upper:]' '[:lower:]') 18 | #echo $DISTRO 19 | 20 | if [[ $DISTRO == *"ubuntu"* ]]; then 21 | export DEBIAN_FRONTEND=noninteractive 22 | sudo killall apt apt-get 23 | sudo apt update 24 | sudo apt install software-properties-common curl openssh-server -yqq 25 | sudo apt -y install python3 python3-pip 26 | 27 | elif [[ $DISTRO == *"centos"* ]]; then 28 | sudo killall -9 yum 29 | sudo yum -y update && sudo yum -y install curl openssh-server 30 | sudo yum -y --enablerepo=extras install epel-release 31 | sudo yum -y install python python-pip libselinux-python 32 | 33 | elif [[ $DISTRO == *"debian"* ]]; then 34 | export DEBIAN_FRONTEND=noninteractive 35 | sudo killall apt apt-get 36 | sudo apt update 37 | sudo apt install software-properties-common curl openssh-server -yqq 38 | sudo apt -y install python3 python3-pip 39 | 40 | else 41 | echo "Unsupported OS" 42 | exit 43 | fi 44 | -------------------------------------------------------------------------------- /roles/setup-worker/templates/kubelet.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet {{ agorakube_base_components.kubernetes.release }} 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | {% if agorakube_base_components.container.engine == 'docker' %} 5 | After=docker.service 6 | Requires=docker.service 7 | {% endif %} 8 | {% if agorakube_base_components.container.engine == 'containerd' %} 9 | After=containerd.service 10 | Requires=containerd.service 11 | {% endif %} 12 | 13 | [Service] 14 | {% if ansible_fqdn in groups['masters'] %} 15 | ExecStart=/usr/local/bin/kubernetes/server/bin/kubelet \ 16 | {% else %} 17 | ExecStart=/usr/local/bin/kubernetes/node/bin/kubelet \ 18 | {% endif %} 19 | --hostname-override={{ ansible_fqdn }} \ 20 | --config=/etc/kubernetes/manifests/kubelet-config.yaml \ 21 | {% if agorakube_base_components.container.engine == 'docker' %} 22 | --container-runtime=docker \ 23 | {% endif %} 24 | {% if agorakube_base_components.cloud_controller_manager.enabled | bool == True %} 25 | --cloud-provider=external \ 26 | {% endif %} 27 | {% if agorakube_base_components.container.engine == 'containerd' %} 28 | --container-runtime=remote \ 29 | --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \ 30 | {% endif %} 31 | --image-pull-progress-deadline=2m \ 32 | --kubeconfig=/etc/kubernetes/manifests/kubelet.conf \ 33 | --network-plugin=cni \ 34 | --register-node=true \ 35 | --log-file=/var/log/kubernetes/kubelet.log \ 36 | --log-file-max-size=1800 \ 37 | --logtostderr=false \ 38 | --node-ip={{ ansible_host }} \ 39 | --v=2 40 | Restart=on-failure 41 | RestartSec=5 42 | 43 | [Install] 44 | WantedBy=multi-user.target 45 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/openebs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: openebs 6 | register: openebs_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ openebs_tempdir.path }}/openebs.yaml" 12 | src: "openebs.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ openebs_tempdir.path }}/openebs.yaml 17 | register: apply_openebs 18 | changed_when: > 19 | apply_openebs.stdout is search("created") 20 | or apply_openebs.stdout is search("configured") 21 | 22 | - name: Verify if openebs pods has successfully started 23 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n openebs maya-apiserver 24 | changed_when: false 25 | register: openebs_deployment 26 | until: openebs_deployment.stdout.find("1/1") != -1 27 | retries: 300 28 | delay: 10 29 | run_once: true 30 | 31 | - name: Render templates 32 | template: 33 | dest: "{{ openebs_tempdir.path }}/configure_storage_openebs.yaml" 34 | src: "configure_storage_openebs.yaml.j2" 35 | changed_when: false 36 | 37 | - name: Apply templates 38 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ openebs_tempdir.path }}/configure_storage_openebs.yaml 39 | register: apply_openebs_sc 40 | changed_when: > 41 | apply_openebs_sc.stdout is search("created") 42 | or apply_openebs_sc.stdout is search("configured") 43 | 44 | - name: Cleanup tempdir 45 | file: 46 | state: absent 47 | path: "{{ openebs_tempdir.path }}" 48 | changed_when: false 49 | -------------------------------------------------------------------------------- /roles/setup-master/files/audit-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: audit.k8s.io/v1beta1 2 | kind: Policy 3 | rules: 4 | # Do not log from kube-system accounts 5 | - level: None 6 | userGroups: 7 | - system:serviceaccounts:kube-system 8 | 9 | # Do not log from kubernetes service system accounts 10 | - level: None 11 | users: 12 | - system:apiserver 13 | - system:kube-scheduler 14 | - system:volume-scheduler 15 | - system:kube-controller-manager 16 | - system:node 17 | - system:kube-proxy 18 | 19 | # Do not log IKE components logs 20 | - level: None 21 | users: 22 | - system:serviceaccount:ingress-nginx:ingress-nginx 23 | - system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard 24 | - system:serviceaccount:monitoring:prometheus 25 | - system:serviceaccount:openebs:openebs-maya-operator 26 | - system:serviceaccount:metallb-system:speaker 27 | - system:serviceaccount:metallb-system:controller 28 | - system:serviceaccount:ingress-traefik:traefik-ingress-controller 29 | - system:serviceaccount:monitoring:metrics-prometheus-server 30 | 31 | # Do not log from collector 32 | - level: None 33 | users: 34 | - system:serviceaccount:collectorforkubernetes:collectorforkubernetes 35 | # Don't log nodes communications 36 | - level: None 37 | userGroups: 38 | - system:nodes 39 | 40 | # Don't log these read-only URLs. 41 | - level: None 42 | nonResourceURLs: 43 | - /healthz* 44 | - /version 45 | - /swagger* 46 | 47 | # Log configmap and secret changes in all namespaces at the metadata level. 48 | - level: Metadata 49 | resources: 50 | - resources: ["secrets", "configmaps"] 51 | 52 | # A catch-all rule to log all other requests at the request level. 53 | - level: Request 54 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/containerd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add the [overlay] and [br_netfilter] modules 3 | modprobe: 4 | name: "{{ item }}" 5 | state: present 6 | with_items: 7 | - overlay 8 | - br_netfilter 9 | 10 | - name: Create activated kernel modules directory 11 | file: 12 | path: /etc/modules-load.d/ 13 | state: directory 14 | 15 | - name: Load kernel modules at boot 16 | copy: 17 | content: "{{ item }}" 18 | dest: /etc/modules-load.d/{{ item }}.conf 19 | with_items: 20 | - overlay 21 | - br_netfilter 22 | 23 | - name: Setup required sysctl params, these persist across reboots 24 | sysctl: 25 | name: "{{ item.name }}" 26 | value: "{{ item.value }}" 27 | state: present 28 | reload: yes 29 | sysctl_file: /etc/sysctl.d/99-kubernetes-cri.conf 30 | with_items: 31 | - { name: 'net.bridge.bridge-nf-call-iptables', value: '1' } 32 | - { name: 'net.ipv4.ip_forward', value: '1' } 33 | - { name: 'net.bridge.bridge-nf-call-ip6tables', value: '1' } 34 | 35 | - name: Add Docker repository for Debian lagorakube distros 36 | include_tasks: "repo-debian-like.yaml" 37 | when: is_debian_like 38 | 39 | - name: Add Docker repository for centos lagorakube distros 40 | include_tasks: "repo-centos-like.yaml" 41 | when: is_centos_like 42 | 43 | - name: Install containerd 44 | package: 45 | name: containerd.io 46 | state: latest 47 | 48 | - name: Create /etc/containerd directory 49 | file: 50 | path: /etc/containerd 51 | state: directory 52 | 53 | - name: Configure containerd 54 | copy: 55 | src: config.toml 56 | dest: /etc/containerd/config.toml 57 | notify: 58 | - restart containerd 59 | 60 | - name: Start Runtime 61 | systemd: 62 | state: started 63 | name: containerd 64 | enabled: yes 65 | -------------------------------------------------------------------------------- /docs/manage_etcd.md: -------------------------------------------------------------------------------- 1 | # Manage ETCD Clusters 2 | 3 | This file explain how to **backup** and **restore** ETCD Cluster for Disaster Recovery. 4 | 5 | ## How to backup etcd cluster ? 6 | 7 | You can backup ETCD cluster from Agorakube racine directory by following the next steps: 8 | 9 | ``` 10 | # Run the following command: 11 | 12 | sudo ansible-playbook tools/etcd/backup-etcd-cluster.yaml 13 | 14 | ``` 15 | 16 | Backup file will be saved on the **deploy** machine located in the following path: ```{{data_path}}/backups_etcd/``` 17 | 18 | 19 | 20 | --- 21 | If you wish to customize the backup path, set the following variable in ```group_vars/all.yaml``` file: 22 | 23 | ``` 24 | custom_etcd_backup_dir: /path/to/store/backups/on/deploy/machine 25 | 26 | ``` 27 | 28 | ## How to restore etcd cluster ? 29 | 30 | You can restore ETCD cluster from Agorakube racine directory by following the next steps: 31 | 32 | ``` 33 | # Edit group_vars/all.yaml file and add the following variable: 34 | 35 | restoration_snapshot_file: /path/to/the/backups/file/on/deploy/machine 36 | 37 | # Then, from Agorakube racine directory, run the following command: 38 | 39 | sudo ansible-playbook tools/etcd/restore-etcd-cluster.yaml 40 | ``` 41 | 42 | ## how to check the state of the etcd ? 43 | 44 | We can check the state of the etcd by typing the command below without forgetting to mention the ansible tag: ```check-etcd``` , which will 45 | allow to play only the tasks dedicated to the check-etcd: 46 | 47 | ```ansible-playbook agorakube.yaml --tags check-etcd``` 48 | 49 | ## how to Upgrade/downgrade the etcd cluster? 50 | 51 | Just edit file "group_vars/all.yaml" with: 52 | 53 | ``` 54 | agorakube_base_components: 55 | etcd: 56 | release: v3.4.14 (Desired ETCD release) 57 | upgrade: True 58 | ``` 59 | Then apply agorakube with command `ansible-playbook agorakube.yaml` 60 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/configure_storage_openebs.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: openebs.io/v1alpha1 3 | kind: StoragePool 4 | metadata: 5 | name: default-storage-pool-jiva 6 | type: hostdir 7 | spec: 8 | path: "{{ agorakube_features.storage.jiva.data_path }}" 9 | --- 10 | apiVersion: storage.k8s.io/v1 11 | kind: StorageClass 12 | metadata: 13 | name: default-jiva 14 | annotations: 15 | storageclass.kubernetes.io/is-default-class: "true" 16 | openebs.io/cas-type: jiva 17 | cas.openebs.io/config: | 18 | - name: ReplicaCount 19 | value: "{{ replicas_openebs }}" 20 | - name: StoragePool 21 | value: default-storage-pool-jiva 22 | - name: VolumeMonitor 23 | value: ON 24 | - name: FSType 25 | value: "{{ agorakube_features.storage.jiva.fs_type }}" 26 | - name: ReplicaNodeSelector 27 | value: |- 28 | node-role.kubernetes.io/storage: true 29 | - name: ReplicaTolerations 30 | value: |- 31 | t1: 32 | key: "NoSchedulabe" 33 | operator: "Exists" 34 | effect: "NoSchedule" 35 | - name: TargetTolerations 36 | value: |- 37 | t1: 38 | key: "NoSchedulabe" 39 | operator: "Exists" 40 | effect: "NoSchedule" 41 | provisioner: openebs.io/provisioner-iscsi 42 | reclaimPolicy: Delete 43 | volumeBindingMode: Immediate 44 | --- 45 | apiVersion: storage.k8s.io/v1 46 | kind: StorageClass 47 | metadata: 48 | name: local-hostpath 49 | annotations: 50 | openebs.io/cas-type: local 51 | cas.openebs.io/config: | 52 | - name: StorageType 53 | value: hostpath 54 | - name: BasePath 55 | value: {{ agorakube_features.storage.hostpath.data_path }} 56 | provisioner: openebs.io/local 57 | reclaimPolicy: Delete 58 | volumeBindingMode: WaitForFirstConsumer 59 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/metallb_l2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create tempdir 3 | tempfile: 4 | state: directory 5 | suffix: metallb 6 | register: metallb_tempdir 7 | changed_when: false 8 | 9 | - name: Render templates 10 | template: 11 | dest: "{{ metallb_tempdir.path }}/metal-lb-layer2.yaml" 12 | src: "metal-lb-layer2.yaml.j2" 13 | changed_when: false 14 | 15 | - name: Apply templates 16 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ metallb_tempdir.path }}/metal-lb-layer2.yaml 17 | register: apply_metallb 18 | changed_when: > 19 | apply_metallb.stdout is search("created") 20 | or apply_metallb.stdout is search("configured") 21 | 22 | - name: Cleanup tempdir 23 | file: 24 | state: absent 25 | path: "{{ metallb_tempdir.path }}" 26 | changed_when: false 27 | 28 | #- name: Test if secret memberlist exist in namespace metallb-system 29 | # command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get secret -n metallb-system 30 | # register: search_memberlist 31 | # changed_when: False 32 | 33 | #- name: Create memberlist secret if it doesn t exist 34 | # command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" 35 | # register: apply_memberlist 36 | # changed_when: > 37 | # apply_memberlist.stdout is search("created") 38 | # or apply_memberlist.stdout is search("configured") 39 | # when: search_memberlist.stdout.find("memberlist") != -1 40 | 41 | - name: Verify if metallb pods has successfully started 42 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n metallb-system controller 43 | changed_when: false 44 | register: metallb_deployment 45 | until: metallb_deployment.stdout.find("1/1") != -1 46 | retries: 300 47 | delay: 10 48 | run_once: true 49 | -------------------------------------------------------------------------------- /roles/setup-worker/tasks/delete-k8s-components.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Stop the Services before uninstalling them 3 | - name: Stop Kubelet Service 4 | systemd: 5 | state: stopped 6 | name: kubelet 7 | tags: ['never', 'uninstall'] 8 | ignore_errors: yes 9 | 10 | - name: Stop Kube-proxy Service 11 | systemd: 12 | state: stopped 13 | name: kube-proxy 14 | tags: ['never', 'uninstall'] 15 | ignore_errors: yes 16 | 17 | # Delete Services Definition 18 | - name: Delete Kubelet Service 19 | file: 20 | path: /etc/systemd/system/kubelet.service 21 | state: absent 22 | notify: 23 | - Reload Daemons 24 | tags: ['never', 'uninstall'] 25 | 26 | - name: Delete Kube-proxy Service 27 | file: 28 | path: /etc/systemd/system/kube-proxy.service 29 | state: absent 30 | notify: 31 | - Reload Daemons 32 | tags: ['never', 'uninstall'] 33 | 34 | - name: Get kubelet volumes 35 | shell: > 36 | df -HT | grep '/var/lib/kubelet/' | awk '{print $7}' 37 | register: kubelet_volumes 38 | tags: ['never', 'uninstall'] 39 | 40 | - name: Unmount kubelet volumes 41 | mount: 42 | path: "{{ item }}" 43 | state: unmounted 44 | tags: ['never', 'uninstall'] 45 | when: kubelet_volumes.stdout != "" 46 | with_items: 47 | - "{{ kubelet_volumes.stdout_lines }}" 48 | 49 | # Delete Kubelet 50 | - name: Delete Kubelet data dir 51 | file: 52 | path: /var/lib/kubelet 53 | state: absent 54 | tags: ['never', 'uninstall'] 55 | 56 | # Delete PKI files and Data Directory 57 | - name: Delete PKI files 58 | file: 59 | path: /etc/kubernetes 60 | state: absent 61 | tags: ['never', 'uninstall'] 62 | 63 | # Delete Binaries 64 | - name: Delete Node Binaries 65 | file: 66 | path: /usr/local/bin/kubernetes 67 | state: absent 68 | tags: ['never', 'uninstall'] 69 | 70 | # Delete Logs 71 | - name: Delete Logs 72 | file: 73 | path: /var/log/kubernetes 74 | state: absent 75 | tags: ['never', 'uninstall'] 76 | -------------------------------------------------------------------------------- /setup-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Determine OS platform 3 | UNAME=$(uname | tr "[:upper:]" "[:lower:]") 4 | # If Linux, try to determine specific distribution 5 | if [ "$UNAME" == "linux" ]; then 6 | # If available, use LSB to identify distribution 7 | if [ -f /etc/lsb-release -o -d /etc/lsb-release.d ]; then 8 | export DISTRO=$(lsb_release -i | cut -d: -f2 | sed s/'^\t'//) 9 | # Otherwise, use release info file 10 | else 11 | export DISTRO=$(ls -d /etc/[A-Za-z]*[_-][rv]e[lr]* | grep -v "lsb" | cut -d'/' -f3 | cut -d'-' -f1 | cut -d'_' -f1) 12 | fi 13 | fi 14 | # For everything else (or if above failed), just use generic identifier 15 | [ "$DISTRO" == "" ] && export DISTRO=$UNAME 16 | unset UNAME 17 | DISTRO=$(echo $DISTRO | tr '[:upper:]' '[:lower:]') 18 | #echo $DISTRO 19 | 20 | if [[ $DISTRO == *"ubuntu"* ]]; then 21 | export DEBIAN_FRONTEND=noninteractive 22 | sudo killall apt apt-get 23 | sudo apt update 24 | sudo apt install software-properties-common curl git openssh-server -yqq 25 | sudo apt -y install python3 python3-pip 26 | sudo pip3 install ansible 27 | sudo pip3 install netaddr 28 | git clone https://github.com/ilkilab/agorakube.git -b master 29 | 30 | elif [[ $DISTRO == *"centos"* ]]; then 31 | sudo killall -9 yum 32 | sudo yum update -y && sudo yum install -y curl openssh-server git 33 | sudo yum -y --enablerepo=extras install epel-release 34 | sudo yum -y install python python-pip libselinux-python python-netaddr 35 | sudo pip install ansible 36 | sudo pip install netaddr 37 | git clone https://github.com/ilkilab/agorakube.git -b master 38 | 39 | elif [[ $DISTRO == *"debian"* ]]; then 40 | export DEBIAN_FRONTEND=noninteractive 41 | sudo killall apt apt-get 42 | sudo apt update 43 | sudo apt install software-properties-common curl git openssh-server -yqq 44 | sudo apt -y install python3 python3-pip 45 | sudo pip3 install ansible 46 | sudo pip3 install netaddr 47 | git clone https://github.com/ilkilab/agorakube.git -b master 48 | 49 | else 50 | echo "Unsupported OS" 51 | exit 52 | fi 53 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/delete-containerd-components.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Stop Runtime 3 | - name: Stop Runtime 4 | systemd: 5 | state: stopped 6 | name: containerd 7 | tags: ['never', 'uninstall'] 8 | ignore_errors: yes 9 | 10 | # Uninstall Containerd 11 | - name: Uninstall Containerd 12 | package: 13 | name: containerd.io 14 | state: absent 15 | tags: ['never', 'uninstall'] 16 | 17 | # Delete Config Files and Data Directory 18 | - name: Delete Containerd Config Files and Data Directory 19 | file: 20 | path: /etc/containerd 21 | state: absent 22 | tags: ['never', 'uninstall'] 23 | 24 | # Delete Data 25 | - name: Delete Data 26 | file: 27 | path: /var/lib/containerd 28 | state: absent 29 | tags: ['never', 'uninstall'] 30 | 31 | # Delete all Container Network Interface (CNI) on the host 32 | - name: Delete /etc/cni 33 | file: 34 | path: /etc/cni 35 | state: absent 36 | tags: ['never', 'uninstall'] 37 | 38 | - name: Delete /var/lib/cni 39 | file: 40 | path: /var/lib/cni 41 | state: absent 42 | tags: ['never', 'uninstall'] 43 | 44 | - name: Make sure AWK is installed 45 | package: 46 | name: gawk 47 | state: present 48 | tags: ['never', 'uninstall'] 49 | 50 | - name: get containerd mount points 51 | shell: > 52 | df -HT | grep '/run/containerd/' | awk '{print $7}' 53 | register: containerd_volumes 54 | tags: ['never', 'uninstall'] 55 | 56 | - name: Unmount containerd volumes 57 | mount: 58 | path: "{{ item }}" 59 | state: unmounted 60 | tags: ['never', 'uninstall'] 61 | when: containerd_volumes.stdout != "" 62 | with_items: 63 | - "{{ containerd_volumes.stdout_lines }}" 64 | 65 | - name: Delete containerd data dir 66 | file: 67 | path: /run/containerd/ 68 | state: absent 69 | tags: ['never', 'uninstall'] 70 | 71 | - name: Delete /var/lib/containerd 72 | file: 73 | path: /var/lib/containerd 74 | state: absent 75 | tags: ['never', 'uninstall'] 76 | 77 | - name: Delete /var/run/containerd 78 | file: 79 | path: /var/run/containerd 80 | state: absent 81 | tags: ['never', 'uninstall'] 82 | -------------------------------------------------------------------------------- /roles/setup-master/templates/kube-scheduler.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if (agorakube_base_components.kubernetes.release | replace('v','')) is version('1.19.0', '<') %} 2 | algorithmSource: 3 | provider: DefaultProvider 4 | apiVersion: kubescheduler.config.k8s.io/v1alpha1 5 | bindTimeoutSeconds: 600 6 | clientConnection: 7 | acceptContentTypes: "" 8 | burst: 100 9 | contentType: application/vnd.kubernetes.protobuf 10 | kubeconfig: /etc/kubernetes/manifests/scheduler.conf 11 | qps: 50 12 | disablePreemption: false 13 | enableContentionProfiling: false 14 | enableProfiling: false 15 | hardPodAffinitySymmetricWeight: 1 16 | healthzBindAddress: 0.0.0.0:10251 17 | kind: KubeSchedulerConfiguration 18 | leaderElection: 19 | leaderElect: true 20 | leaseDuration: 15s 21 | lockObjectName: kube-scheduler 22 | lockObjectNamespace: kube-system 23 | renewDeadline: 10s 24 | resourceLock: endpoints 25 | retryPeriod: 2s 26 | metricsBindAddress: 0.0.0.0:10251 27 | percentageOfNodesToScore: 0 28 | schedulerName: default-scheduler 29 | {% else %} 30 | {% if (agorakube_base_components.kubernetes.release | replace('v','')) is version('1.23.0', '<') %} 31 | apiVersion: kubescheduler.config.k8s.io/v1beta1 32 | {% endif %} 33 | {% if (agorakube_base_components.kubernetes.release | replace('v','')) is version('1.23.0', '>=') %} 34 | apiVersion: kubescheduler.config.k8s.io/v1beta2 35 | {% endif %} 36 | kind: KubeSchedulerConfiguration 37 | clientConnection: 38 | acceptContentTypes: "" 39 | burst: 100 40 | contentType: application/vnd.kubernetes.protobuf 41 | kubeconfig: /etc/kubernetes/manifests/scheduler.conf 42 | qps: 50 43 | {% if (agorakube_base_components.kubernetes.release | replace('v','')) is version('1.23.0', '>=') %} 44 | healthzBindAddress: 0.0.0.0:0 45 | {% endif %} 46 | leaderElection: 47 | leaderElect: true 48 | leaseDuration: 15s 49 | resourceName: kube-scheduler 50 | resourceNamespace: kube-system 51 | renewDeadline: 10s 52 | resourceLock: endpoints 53 | retryPeriod: 2s 54 | {% if (agorakube_base_components.kubernetes.release | replace('v','')) is version('1.23.0', '>=') %} 55 | metricsBindAddress: 0.0.0.0:0 56 | {% endif %} 57 | percentageOfNodesToScore: 0 58 | {% endif %} 59 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/logrotate.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | containers: | 5 | /var/log/pods/*/*/*.log { 6 | missingok 7 | notifempty 8 | daily 9 | rotate {{ agorakube_features.logrotate.day_retention }} 10 | compress 11 | copytruncate 12 | } 13 | kubernetes: | 14 | /var/log/kubernetes/*.log { 15 | missingok 16 | notifempty 17 | daily 18 | rotate {{ agorakube_features.logrotate.day_retention }} 19 | compress 20 | copytruncate 21 | } 22 | kind: ConfigMap 23 | metadata: 24 | namespace: kube-system 25 | name: logrotate-configs 26 | --- 27 | apiVersion: v1 28 | data: 29 | root: | 30 | # do daily/weekly/monthly maintenance 31 | # min hour day month weekday command 32 | {{ agorakube_features.logrotate.crontab }} logrotate -vf /etc/logrotate.conf 33 | kind: ConfigMap 34 | metadata: 35 | namespace: kube-system 36 | name: logrotate-crontab 37 | --- 38 | apiVersion: apps/v1 39 | kind: DaemonSet 40 | metadata: 41 | annotations: 42 | configmap.reloader.stakater.com/reload: "logrotate-crontab,logrotate-configs" 43 | labels: 44 | app: logrotate 45 | name: logrotate 46 | namespace: kube-system 47 | spec: 48 | selector: 49 | matchLabels: 50 | app: logrotate 51 | template: 52 | metadata: 53 | labels: 54 | app: logrotate 55 | spec: 56 | tolerations: 57 | - effect: NoSchedule 58 | operator: Exists 59 | containers: 60 | - image: agorakube/crond:alpine-3 61 | name: crond 62 | volumeMounts: 63 | - name: varlog 64 | mountPath: /var/log/ 65 | - name: logrotate-crontab 66 | mountPath: /etc/crontabs/ 67 | - name: logrotate-configs 68 | mountPath: /etc/logrotate.d/ 69 | volumes: 70 | - name: logrotate-configs 71 | configMap: 72 | name: logrotate-configs 73 | - name: logrotate-crontab 74 | configMap: 75 | name: logrotate-crontab 76 | - name: varlog 77 | hostPath: 78 | path: /var/log/ 79 | -------------------------------------------------------------------------------- /roles/generate-certs/tasks/oidc-keycloak.yaml: -------------------------------------------------------------------------------- 1 | - name: Create oidc dir 2 | file: 3 | path: "{{ pki_path }}/oidc" 4 | state: directory 5 | 6 | - name: Generate oidc-ca.key private keys 7 | openssl_privatekey: 8 | path: "{{ pki_path }}/oidc/oidc-ca.key" 9 | force: false 10 | 11 | - name: Generate an OpenSSL Certificate Signing Request for OIDC CA 12 | openssl_csr: 13 | path: "{{ pki_path }}/oidc/oidc-ca.csr" 14 | privatekey_path: "{{ pki_path }}/oidc/oidc-ca.key" 15 | common_name: "{{ agorakube_pki.infos.country }}" 16 | state_or_province_name: "{{ agorakube_features.keycloak_oidc.auto_bootstrap.host }}" 17 | locality_name: "{{ agorakube_pki.infos.locality }}" 18 | country_name: "{{ agorakube_pki.infos.country }}" 19 | basic_constraints: ['CA:TRUE'] 20 | 21 | - name: Generate an OpenSSL certificate signed with your own OIDC CA certificate 22 | openssl_certificate: 23 | path: "{{ pki_path }}/oidc/oidc-ca.crt" 24 | privatekey_path: "{{ pki_path }}/oidc/oidc-ca.key" 25 | csr_path: "{{ pki_path }}/oidc/oidc-ca.csr" 26 | provider: selfsigned 27 | selfsigned_not_after: "{{ agorakube_pki.infos.expirity }}" 28 | force: false 29 | 30 | - name: Generate oidc-end.key private keys 31 | openssl_privatekey: 32 | path: "{{ pki_path }}/oidc/oidc-end.key" 33 | force: false 34 | 35 | - name: Generate an OpenSSL Certificate Signing Request for OIDC-END CSR 36 | openssl_csr: 37 | path: "{{ pki_path }}/oidc/oidc-end.csr" 38 | privatekey_path: "{{ pki_path }}/oidc/oidc-end.key" 39 | common_name: "{{ agorakube_features.keycloak_oidc.auto_bootstrap.host }}" 40 | state_or_province_name: "{{ agorakube_pki.infos.state }}" 41 | locality_name: "{{ agorakube_pki.infos.locality }}" 42 | country_name: "{{ agorakube_pki.infos.country }}" 43 | basic_constraints: ['CA:TRUE'] 44 | 45 | - name: Generate an OpenSSL certificate signed OIDC END certificate 46 | openssl_certificate: 47 | path: "{{ pki_path }}/oidc/oidc-end.crt" 48 | csr_path: "{{ pki_path }}/oidc/oidc-end.csr" 49 | ownca_path: "{{ pki_path }}/oidc/oidc-ca.crt" 50 | ownca_privatekey_path: "{{ pki_path }}/oidc/oidc-ca.key" 51 | provider: ownca 52 | force: false 53 | -------------------------------------------------------------------------------- /roles/setup-etcd/tasks/check-etcd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify if etcd cluster has successfully started 3 | command: >- 4 | /usr/bin/etcd-{{ agorakube_base_components.etcd.release }}-linux-amd64/etcdctl member list -w=table 5 | --endpoints=https://127.0.0.1:2379 6 | --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key 7 | environment: 8 | ETCDCTL_API: 3 9 | changed_when: false 10 | register: etcd_memberlist 11 | until: etcd_memberlist.stdout.find("started") != -1 12 | retries: 5 13 | delay: 10 14 | run_once: true 15 | tags: check-etcd 16 | 17 | - name: Show etcd member list 18 | debug: 19 | msg: "{{ etcd_memberlist.stdout_lines }}" 20 | run_once: true 21 | tags: check-etcd 22 | 23 | - name: Verify etcd cluster health 24 | command: >- 25 | /usr/bin/etcd-{{ agorakube_base_components.etcd.release }}-linux-amd64/etcdctl endpoint health -w=table 26 | --endpoints=https://127.0.0.1:2379 27 | --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key 28 | environment: 29 | ETCDCTL_API: 3 30 | changed_when: false 31 | register: etcd_clusterhealth 32 | until: etcd_clusterhealth.stdout.find("true") != -1 33 | retries: 5 34 | delay: 10 35 | tags: check-etcd 36 | 37 | - name: Show etcd cluster health 38 | debug: 39 | msg: "{{ etcd_clusterhealth.stdout_lines }}" 40 | run_once: true 41 | tags: check-etcd 42 | - name: Get etcd cluster status 43 | command: >- 44 | /usr/bin/etcd-{{ agorakube_base_components.etcd.release }}-linux-amd64/etcdctl endpoint status -w=table 45 | --endpoints=https://127.0.0.1:2379 46 | --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key 47 | environment: 48 | ETCDCTL_API: 3 49 | changed_when: false 50 | register: etcd_clusterstatus 51 | tags: check-etcd 52 | 53 | - name: Show etcd cluster status 54 | debug: 55 | msg: "{{ etcd_clusterstatus.stdout_lines }}" 56 | run_once: true 57 | tags: check-etcd 58 | -------------------------------------------------------------------------------- /roles/install-runtimes/files/config.toml: -------------------------------------------------------------------------------- 1 | root = "/var/lib/containerd" 2 | state = "/run/containerd" 3 | oom_score = 0 4 | 5 | [grpc] 6 | address = "/run/containerd/containerd.sock" 7 | uid = 0 8 | gid = 0 9 | max_recv_message_size = 16777216 10 | max_send_message_size = 16777216 11 | 12 | [debug] 13 | address = "" 14 | uid = 0 15 | gid = 0 16 | level = "" 17 | 18 | [metrics] 19 | address = "" 20 | grpc_histogram = false 21 | 22 | [cgroup] 23 | path = "" 24 | 25 | [plugins] 26 | [plugins.cgroups] 27 | no_prometheus = false 28 | [plugins.cri] 29 | stream_server_address = "127.0.0.1" 30 | stream_server_port = "0" 31 | enable_selinux = false 32 | sandbox_image = "k8s.gcr.io/pause:3.1" 33 | stats_collect_period = 10 34 | systemd_cgroup = false 35 | enable_tls_streaming = false 36 | max_container_log_line_size = 16384 37 | disable_proc_mount = false 38 | [plugins.cri.containerd] 39 | snapshotter = "overlayfs" 40 | no_pivot = false 41 | [plugins.cri.containerd.default_runtime] 42 | runtime_type = "io.containerd.runtime.v1.linux" 43 | runtime_engine = "" 44 | runtime_root = "" 45 | [plugins.cri.containerd.untrusted_workload_runtime] 46 | runtime_type = "" 47 | runtime_engine = "" 48 | runtime_root = "" 49 | [plugins.cri.cni] 50 | bin_dir = "/opt/cni/bin" 51 | conf_dir = "/etc/cni/net.d" 52 | conf_template = "" 53 | [plugins.cri.registry] 54 | [plugins.cri.registry.mirrors] 55 | [plugins.cri.registry.mirrors."docker.io"] 56 | endpoint = ["https://registry-1.docker.io"] 57 | [plugins.cri.x509_key_pair_streaming] 58 | tls_cert_file = "" 59 | tls_key_file = "" 60 | [plugins.diff-service] 61 | default = ["walking"] 62 | [plugins.linux] 63 | shim = "containerd-shim" 64 | runtime = "runc" 65 | runtime_root = "" 66 | no_shim = false 67 | shim_debug = false 68 | [plugins.opt] 69 | path = "/opt/containerd" 70 | [plugins.restart] 71 | interval = "10s" 72 | [plugins.scheduler] 73 | pause_threshold = 0.02 74 | deletion_threshold = 0 75 | mutation_threshold = 100 76 | schedule_delay = "0s" 77 | startup_delay = "100ms" 78 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/backup-etcd-cronjob.yaml: -------------------------------------------------------------------------------- 1 | - name: Update etcd CA crt 2 | shell: kubectl -n kube-system --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf create configmap etcd-ca.crt --from-file={{ agorakube.global.data_path }}/pki/intermediate/etcd/ca.crt -o yaml --dry-run | kubectl apply -f - 3 | register: apply_etcd_ca 4 | changed_when: > 5 | apply_etcd_ca.stdout is search("created") 6 | or apply_etcd_ca.stdout is search("configured") 7 | 8 | - name: Update etcd backup crt 9 | shell: kubectl -n kube-system --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf create configmap etcd-healthcheck-client.crt --from-file={{ agorakube.global.data_path }}/pki/end/kube-etcd-healthcheck-client.crt -o yaml --dry-run | kubectl apply -f - 10 | register: apply_etcd_client_crt 11 | changed_when: > 12 | apply_etcd_client_crt.stdout is search("created") 13 | or apply_etcd_client_crt.stdout is search("configured") 14 | 15 | - name: Update etcd backup key 16 | shell: kubectl -n kube-system --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf create secret generic etcd-healthcheck-client.key --from-file={{ agorakube.global.data_path }}/pki/end/kube-etcd-healthcheck-client.key -o yaml --dry-run | kubectl apply -f - 17 | register: apply_etcd_client_key 18 | changed_when: > 19 | apply_etcd_client_key.stdout is search("created") 20 | or apply_etcd_client_key.stdout is search("configured") 21 | 22 | - name: Create tempdir 23 | tempfile: 24 | state: directory 25 | suffix: backup_etcd 26 | register: backup_etcd_tempdir 27 | changed_when: false 28 | 29 | - name: Render templates 30 | template: 31 | dest: "{{ backup_etcd_tempdir.path }}/backup_etcd.yaml" 32 | src: "backup-etcd-cronjob.yaml.j2" 33 | changed_when: false 34 | 35 | - name: Apply templates 36 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ backup_etcd_tempdir.path }}/backup_etcd.yaml 37 | register: apply_backup_etcd 38 | changed_when: > 39 | apply_backup_etcd.stdout is search("created") 40 | or apply_backup_etcd.stdout is search("configured") 41 | 42 | - name: Cleanup tempdir 43 | file: 44 | state: absent 45 | path: "{{ backup_etcd_tempdir.path }}" 46 | changed_when: false 47 | -------------------------------------------------------------------------------- /roles/show-info/templates/info.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | ################################################################# 4 | ### ILKI KUBERNETES ENGINE Installation Information ### 5 | ################################################################# 6 | 7 | 8 | Thank you for using ILKI KUBERNETES ENGINE to deploy and manage your Kubernetes installation. 9 | 10 | This file (agorakube-info.txt) is stored in root home directory for your reference. 11 | 12 | ############################################################# 13 | 14 | ################## 15 | # Kubectl config # 16 | ################## 17 | Kubectl config file is located at: /root/.kube/config 18 | 19 | {% if agorakube_features.dashboard.enabled | bool == True %} 20 | ######################## 21 | # Kubernetes Dashboard # 22 | ######################## 23 | Kubernetes dashboard can be accessed by following the steps below: 24 | 25 | 1. Run follwoing command on deploy (ansible control) machine. 26 | # kubectl proxy 27 | 28 | 2. Visit following URL on deploy machine. 29 | http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ 30 | 31 | {% if agorakube_features.dashboard.generate_admin_token | bool == True %} 32 | 3. The Admin token is stored in: /root/.kube/dashboardadmin 33 | {% endif %} 34 | {% endif %} 35 | ################################################################################ 36 | 37 | Run following command to see this information again anytime. 38 | $ cat /root/agorakube-info.txt 39 | 40 | ############################################################################### 41 | 42 | ################################################################################ 43 | 44 | {% if agorakube_features.supervision.monitoring.enabled | bool == True %} 45 | 46 | 1.1. By default, run the follwoing command on your client machine. (Where your Browser is installed) 47 | # kubectl port-forward service/grafana 3000:3000 -n supervision 48 | 2.To log in to Grafana for the first time: open your web browser and go to 49 | # http://localhost:3000 50 | 51 | 3. To login, enter Default Grafana admin user and Default grafana admin password : 52 | 53 | user: {{ agorakube_features.supervision.dashboard.admin.user }} 54 | password: {{ agorakube_features.supervision.dashboard.admin.password }} 55 | 56 | {% endif %} 57 | 58 | ############################################################################## 59 | -------------------------------------------------------------------------------- /roles/install-runtimes/tasks/uninstall-docker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: stop docker 3 | systemd: 4 | state: stopped 5 | name: docker 6 | tags: [ 'never', 'uninstall' ] 7 | ignore_errors: yes 8 | 9 | - name: Uninstall docker 10 | package: 11 | name: ["docker-ce","docker-ce-cli"] 12 | state: absent 13 | when: agorakube_base_components.container.release|length == 0 14 | tags: ['never', 'uninstall'] 15 | 16 | - name: Uninstall docker in specific release 17 | package: 18 | name: ["docker-ce={{ agorakube_base_components.container.release }}","docker-ce-cli={{ agorakube_base_components.container.release }}"] 19 | state: absent 20 | when: agorakube_base_components.container.release|length > 0 21 | tags: ['never', 'uninstall'] 22 | 23 | - name: uninstall Docker for Debian lagorakube distros 24 | file: 25 | path: "docker-debian-like.yaml" 26 | state: absent 27 | when: is_debian_like 28 | tags: ['never', 'uninstall'] 29 | 30 | - name: uninstall Docker for Centos lagorakube distros 31 | file: 32 | path: "docker-centos-like.yaml" 33 | state: absent 34 | when: is_centos_like 35 | tags: ['never', 'uninstall'] 36 | 37 | - name : Delete Docker config files 38 | file: 39 | path: "/etc/docker" 40 | state: absent 41 | tags: ['never', 'uninstall'] 42 | 43 | - name: Delete all Docker Network interfaces presents on the host 44 | file: 45 | path: "/var/lib/cni/networks" 46 | state: absent 47 | tags: ['never', 'uninstall'] 48 | 49 | - name: Delete plugin CNI 50 | file: 51 | path: /etc/cni/net.d 52 | state: absent 53 | tags: ['never', 'uninstall'] 54 | 55 | - name: Delete /var/lib/docker 56 | file: 57 | path: /var/lib/docker 58 | state: absent 59 | tags: ['never', 'uninstall'] 60 | 61 | - name: Delete /var/lib/dockershim 62 | file: 63 | path: /var/lib/dockershim 64 | state: absent 65 | tags: ['never', 'uninstall'] 66 | 67 | - name: Delete /var/run/dockershim.sock 68 | file: 69 | path: /var/run/dockershim.sock 70 | state: absent 71 | tags: ['never', 'uninstall'] 72 | 73 | - name: Delete /var/run/docker.sock 74 | file: 75 | path: /var/run/docker.sock 76 | state: absent 77 | tags: ['never', 'uninstall'] 78 | 79 | - name: Unmount /var/run/docker/netns/default volumes 80 | mount: 81 | path: /var/run/docker/netns/default 82 | state: unmounted 83 | tags: ['never', 'uninstall'] 84 | 85 | - name: Delete /var/run/docker dir 86 | file: 87 | path: /var/run/docker 88 | state: absent 89 | tags: ['never', 'uninstall'] 90 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/reloader.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app: reloader-reloader 7 | name: reloader-reloader-role 8 | namespace: kube-system 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - secrets 14 | - configmaps 15 | verbs: 16 | - list 17 | - get 18 | - watch 19 | - apiGroups: 20 | - "apps" 21 | resources: 22 | - deployments 23 | - daemonsets 24 | - statefulsets 25 | verbs: 26 | - list 27 | - get 28 | - update 29 | - patch 30 | - apiGroups: 31 | - "extensions" 32 | resources: 33 | - deployments 34 | - daemonsets 35 | verbs: 36 | - list 37 | - get 38 | - update 39 | - patch 40 | --- 41 | apiVersion: rbac.authorization.k8s.io/v1 42 | kind: ClusterRoleBinding 43 | metadata: 44 | labels: 45 | app: reloader-reloader 46 | name: reloader-reloader-role-binding 47 | namespace: kube-system 48 | roleRef: 49 | apiGroup: rbac.authorization.k8s.io 50 | kind: ClusterRole 51 | name: reloader-reloader-role 52 | subjects: 53 | - kind: ServiceAccount 54 | name: reloader-reloader 55 | namespace: kube-system 56 | --- 57 | apiVersion: apps/v1 58 | kind: Deployment 59 | metadata: 60 | labels: 61 | app: reloader-reloader 62 | name: reloader-reloader 63 | namespace: kube-system 64 | spec: 65 | replicas: 1 66 | revisionHistoryLimit: 2 67 | selector: 68 | matchLabels: 69 | app: reloader-reloader 70 | release: "reloader" 71 | template: 72 | metadata: 73 | labels: 74 | app: reloader-reloader 75 | release: "reloader" 76 | spec: 77 | containers: 78 | - image: "stakater/reloader:v{{ agorakube_features.reloader.release }}" 79 | imagePullPolicy: IfNotPresent 80 | name: reloader-reloader 81 | 82 | ports: 83 | - name: http 84 | containerPort: 9090 85 | livenessProbe: 86 | httpGet: 87 | path: /metrics 88 | port: http 89 | readinessProbe: 90 | httpGet: 91 | path: /metrics 92 | port: http 93 | securityContext: 94 | runAsNonRoot: true 95 | runAsUser: 65534 96 | serviceAccountName: reloader-reloader 97 | --- 98 | apiVersion: v1 99 | kind: ServiceAccount 100 | metadata: 101 | labels: 102 | app: reloader-reloader 103 | name: reloader-reloader 104 | namespace: kube-system 105 | -------------------------------------------------------------------------------- /roles/setup-master/tasks/import-certs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Folder creation /etc/kubernetes/pki /etc/kubernetes/pki/etcd 3 | file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: root 7 | group: root 8 | mode: '644' 9 | recurse: True 10 | with_items: 11 | - /etc/kubernetes/pki/etcd 12 | - /etc/kubernetes/pki/sa/backups 13 | 14 | - name: Import all master components keys and certificates 15 | copy: 16 | src: "{{ pki_path }}/{{ item.src }}" 17 | dest: /etc/kubernetes/pki/{{ item.dest }} 18 | with_items: 19 | - { src: 'intermediate/etcd/ca.key', dest: 'etcd/ca.key' } 20 | - { src: 'intermediate/etcd/ca.crt', dest: 'etcd/ca.crt' } 21 | - { src: 'end/kube-apiserver-etcd-client.key', dest: 'apiserver-etcd-client.key' } 22 | - { src: 'end/kube-apiserver-etcd-client.crt', dest: 'apiserver-etcd-client.crt' } 23 | - { src: 'intermediate/ca.key', dest: 'ca.key' } 24 | - { src: 'intermediate/ca.crt', dest: 'ca.crt' } 25 | - { src: 'end/kube-apiserver.key', dest: 'apiserver.key' } 26 | - { src: 'end/kube-apiserver.crt', dest: 'apiserver.crt' } 27 | - { src: 'end/kube-apiserver-kubelet-client.key', dest: 'apiserver-kubelet-client.key' } 28 | - { src: 'end/kube-apiserver-kubelet-client.crt', dest: 'apiserver-kubelet-client.crt' } 29 | - { src: 'intermediate/front-proxy-ca.key', dest: 'front-proxy-ca.key' } 30 | - { src: 'intermediate/front-proxy-ca.crt', dest: 'front-proxy-ca.crt' } 31 | - { src: 'end/front-proxy-client.key', dest: 'front-proxy-client.key' } 32 | - { src: 'end/front-proxy-client.crt', dest: 'front-proxy-client.crt' } 33 | notify: 34 | - Restart kube-apiserver service 35 | - Restart kube-scheduler service 36 | - Restart kube-controller-manager service 37 | 38 | - name: Import SA private keys and public key used for Token authentication 39 | copy: 40 | src: "{{ item }}" 41 | dest: /etc/kubernetes/pki/sa/ 42 | with_fileglob: 43 | - "{{ pki_path }}/sa/*" 44 | notify: 45 | - Restart kube-apiserver service 46 | - Restart kube-scheduler service 47 | - Restart kube-controller-manager service 48 | 49 | #- name: Import new token authentication key 50 | # copy: 51 | # src: "{{ item }}" 52 | # dest: /etc/kubernetes/pki/token_authentication_keys/ 53 | # owner: root 54 | # group: root 55 | # mode: '600' 56 | # with_fileglob: 57 | # - "{{ data_path }}/authentication_keys/*" 58 | 59 | - name: Import CA Cert OIDC 60 | copy: 61 | src: "{{ pki_path }}/oidc/oidc-ca.crt" 62 | dest: /etc/kubernetes/pki/oidc/ 63 | notify: 64 | - Restart kube-apiserver service 65 | when: 66 | - agorakube_features.keycloak_oidc.auto_bootstrap.bootstrap_keycloak | bool == True 67 | - agorakube_features.keycloak_oidc.enabled | bool == True 68 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include label hosts 3 | include_tasks: label-hosts.yaml 4 | 5 | - name: Include Tasks Calico 6 | include_tasks: calico.yaml 7 | when: agorakube_network.cni_plugin == 'calico' 8 | 9 | - name: Include Tasks Kube-router 10 | include_tasks: kube-router.yaml 11 | when: agorakube_network.cni_plugin == 'kube-router' 12 | 13 | - name: Deploy CoreDNS to K8S 14 | include_tasks: coredns.yaml 15 | 16 | - name: Install MetalLB Layer2 17 | include_tasks: metallb_l2.yaml 18 | when: agorakube_network.external_loadbalancing.enabled | bool == True 19 | 20 | - name: Install Metrics-Server 21 | include_tasks: metrics-server.yaml 22 | when: agorakube_features.metrics_server.enabled | bool == True 23 | 24 | - name: Install Reloader 25 | include_tasks: reloader.yaml 26 | when: agorakube_features.reloader.enabled | bool == True 27 | 28 | - name: Install logrotate 29 | include_tasks: logrotate.yaml 30 | when: agorakube_features.logrotate.enabled | bool == True 31 | 32 | - name: Install ingress (traefik) 33 | include_tasks: traefik.yaml 34 | when: agorakube_features.ingress.controller == 'traefik' 35 | 36 | - name: Install ingress (haproxy) 37 | include_tasks: haproxy.yaml 38 | when: agorakube_features.ingress.controller == 'haproxy' 39 | 40 | - name: Install ingress (nginx) 41 | include_tasks: nginx.yaml 42 | when: agorakube_features.ingress.controller == 'nginx' 43 | 44 | - name: Install openebs for persistence 45 | include_tasks: openebs.yaml 46 | when: agorakube_features.storage.enabled | bool == True 47 | 48 | - name: Install Keycloak OIDC 49 | include_tasks: keycloak-oidc.yaml 50 | when: agorakube_features.keycloak_oidc.enabled | bool == True 51 | 52 | - name: Install K8S Default Dashboard 53 | include_tasks: default_dashboard.yaml 54 | when: agorakube_features.dashboard.enabled | bool == True 55 | 56 | - name: Install Monitoring 57 | include_tasks: monitoring.yaml 58 | when: agorakube_features.supervision.monitoring.enabled | bool == True 59 | 60 | - name: Install Grafana 61 | include_tasks: grafana.yaml 62 | when: agorakube_features.supervision.monitoring.enabled | bool == True or agorakube_features.supervision.logging.enabled | bool == True 63 | 64 | - name: enable etcd backup cronJob 65 | include_tasks: backup-etcd-cronjob.yaml 66 | when: agorakube_base_components.etcd.backup.enabled | bool == True 67 | 68 | - name: Install Gatekeeper 69 | include_tasks: gatekeeper.yaml 70 | when: agorakube_features.gatekeeper.enabled | bool == True 71 | 72 | - name: Deploy Log centralization 73 | include_tasks: log-centralization.yaml 74 | when: agorakube_features.supervision.logging.enabled | bool == True 75 | 76 | - name: Install argocd 77 | include_tasks: argocd.yaml 78 | when: agorakube_features.argocd.enabled | bool == True 79 | -------------------------------------------------------------------------------- /LOCAL_ENVIRONMENT.md: -------------------------------------------------------------------------------- 1 | # Local AGORAKUBE Development Environment 2 | 3 | You can create a local environment by using Vagrant. 4 | The document below describes pre-requisites for AGORAKUBE local environment and how you can start using them. 5 | 6 | 7 | ## Pre-requisites 8 | 9 | * [Vagrant](https://www.vagrantup.com/downloads) 10 | * [VirtualBox](https://www.virtualbox.org/wiki/Downloads) 11 | 12 | 13 | ## Environment customization 14 | 15 | We use Vagrant and VirtualBox to deploy local environments. 16 | 17 | Test environments are located in [labs](./labs) folder which contains a set of sub-folders for each configuration you may want to implement. Feel free to customize VagrantFiles according to your need ! 18 | 19 | 20 | ## Start the environment 21 | 22 | 1) Simply open a terminal and go to [labs/multi-nodes](./labs/multi-nodes) or [labs/all-in-one](./labs/all-in-one) folder depending on the configuration you want to deploy. 23 | 24 | 25 | 2) Once you are located on the folder that contains your file "Vagrantfile", run the command : 26 | 27 | ``` 28 | vagrant up 29 | ``` 30 | 31 | 3) Once ILKE installation is finished, a kubeconfig file ("config") is generated next to your Vagrantfile. You can use this file to manage your Kubernetes installation with [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), or you can connect to the deploy machine with the following command : 32 | 33 | ``` 34 | vagrant ssh deploy (all-in-one configuration) 35 | 36 | vagrant ssh worker1 (multi-nodes configuration) 37 | ``` 38 | 39 | 4) Kubernetes CLI "kubectl" is configured for root user, so use the following command to become root : 40 | 41 | ``` 42 | sudo su 43 | ``` 44 | 45 | 5) You can now enjoy your AGORAKUBE/K8S fresh cluster ! Use the following command to print K8S version : 46 | 47 | ``` 48 | kubectl version 49 | ``` 50 | 51 | 52 | 6) If you want to stop your kubernetes cluster, juste go to your Vagrantfile folder and run : 53 | 54 | ``` 55 | vagrant halt -f 56 | ``` 57 | 58 | You can also start again your K8S cluster with `vagrant up`. If `vagrant up` does not start all the machines correctly, simply start them manually on VirtualBox. 59 | 60 | 61 | 7) If you want to destroy your local cluster, just run : 62 | 63 | ``` 64 | vagrant destroy -f 65 | ``` 66 | 67 | 68 | ## Best practices 69 | 70 | If you want to test your branch in the local environment, be sure to follow these tips : 71 | 72 | 1) Create a test folder in which you will clone your branch : 73 | 74 | ``` 75 | mkdir test 76 | 77 | cd test 78 | 79 | git clone https://github.com/repo/agorakube.git -b test-branch 80 | ``` 81 | 82 | 83 | 2) Copy the "hosts.yaml" file to your branch : 84 | 85 | ``` 86 | cp agorakube/hosts test/agorakube/hosts 87 | ``` 88 | 89 | 90 | 3) You are now ready to test : 91 | 92 | ``` 93 | source /usr/local/agorakube-env/bin/activate 94 | 95 | ansible-playbook agorakube.yaml 96 | ``` 97 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/Readme.md: -------------------------------------------------------------------------------- 1 | # Notes 2 | 3 | ## Metrics-Server 4 | We use official Metrics-Server yaml file. 5 | 6 | We **will** add **{{ agorakube_features.metrics_server.release }}** variable to chose the correct Metrics-Server release to install 7 | 8 | ## CoreDNS 9 | 10 | We use official CoreDNS yaml file. 11 | We added **{{ agorakube_features.coredns.replicas }}** variable to chose the correct coredns release to install 12 | We added **{{ agorakube_features.coredns.release }}** variable to chose the number of CoreDNS replicas that will be deployed. 13 | 14 | ## Nginx 15 | 16 | We use official Ningix-controller yaml file. 17 | 18 | We added **{{ agorakube_features.ingress.release }}** variable to chose the correct nginx release to install. 19 | 20 | This is useful when installing, and updateting Nginx with IKE 21 | 22 | ## Calico 23 | 24 | Calico is configured in VxLAN-Cross-Subnet mode. 25 | 26 | When updateing calico.yaml.j2, make sure that YAML file contains: 27 | 28 | In **"calico-config"** ConfigMap: 29 | * calico_backend: "vxlan" 30 | * veth_mtu: "{{ calico_mtu }}" 31 | 32 | In *"calico-node"* DaemonSet: 33 | * Env CALICO_IPV4POOL_IPIP : "Never" 34 | * Env: CALICO_IPV4POOL_VXLAN : "CrossSubnet" 35 | * Env: CALICO_IPV4POOL_CIDR: "{{ cluster_cidr }}" 36 | * Comment or delete : "-bird-live" and "-bird-ready" in livenessProbe and readynessProbe 37 | 38 | ## OpenEBS 39 | 40 | OpenEBS is configured with 2 files: 41 | * openebs.yaml.j2: Deploy control plane 42 | If updateting, make sure that the following params are set: 43 | ``` 44 | MAYA API 45 | 46 | - name: OPENEBS_IO_BASE_DIR 47 | value: "{{ openebs_io_base_dir }}" 48 | - name: OPENEBS_IO_CSTOR_TARGET_DIR 49 | value: "{{ openebs_io_base_dir }}/sparse" 50 | - name: OPENEBS_IO_CSTOR_POOL_SPARSE_DIR 51 | value: "{{ openebs_io_base_dir }}/sparse" 52 | - name: OPENEBS_IO_JIVA_POOL_DIR 53 | value: "{{ openebs_io_base_dir }}" 54 | - name: OPENEBS_IO_LOCALPV_HOSTPATH_DIR 55 | value: "{{ openebs_io_base_dir }}/local" 56 | - name: OPENEBS_IO_JIVA_REPLICA_COUNT 57 | value: "{{ replicas_openebs }}" 58 | - name: OPENEBS_IO_ENABLE_ANALYTICS 59 | value: "false" 60 | - name: OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG 61 | value: "false" 62 | 63 | 64 | 65 | NDM 66 | 67 | - name: SPARSE_FILE_DIR 68 | value: "{{ openebs_io_base_dir }}/sparse" 69 | 70 | ``` 71 | Add to all Deployments, DaemonSet 72 | 73 | ``` 74 | tolerations: 75 | - effect: NoSchedule 76 | operator: Exists 77 | nodeSelector: 78 | "node-role.kubernetes.io/storage": "true" 79 | ``` 80 | 81 | * configure_storage_openebs.yaml.j2: Configure Data Plane (StoragePools and StorageClass) 82 | 83 | ## Traefik 84 | 85 | We add {{ agorakube_features.ingress.release }} variable to select Traefik release to install. 86 | 87 | 88 | ## HA-PROXY 89 | 90 | We add {{ agorakube_features.ingress.release }} variable to select HA-PROXY release to install. 91 | 92 | {{ agorakube_features.ingress.release }} sample: "1.5.O" Releases are according Docker Hub Tags ! 93 | -------------------------------------------------------------------------------- /roles/setup-master/tasks/setup-master.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create master dir hierarchy 3 | file: 4 | path: /etc/kubernetes/manifests 5 | state: directory 6 | owner: root 7 | group: root 8 | mode: '644' 9 | recurse: True 10 | 11 | - name: Create /var/logs/kubernetes dir 12 | file: 13 | path: /var/log/kubernetes 14 | state: directory 15 | recurse: True 16 | 17 | - name: Download Kubernetes server binaries 18 | unarchive: 19 | src: https://dl.k8s.io/{{ agorakube_base_components.kubernetes.release }}/kubernetes-server-linux-amd64.tar.gz 20 | dest: /usr/local/bin 21 | remote_src: yes 22 | creates: /usr/local/bin/kubernetes/server/bin/kube-apiserver 23 | notify: 24 | - Restart kube-apiserver service 25 | - Restart kube-controller-manager service 26 | - Restart kube-scheduler service 27 | #- name: Retrive all keys to authenticate K8S service account tokens 28 | # find: 29 | # paths: "{{ pki_path }}/sa" 30 | # register: token_keys 31 | 32 | - name: Import kubernetes master services definitions in /etc/systemd/system/ 33 | template: 34 | src: "{{ item }}.j2" 35 | dest: /etc/systemd/system/{{ item }} 36 | owner: root 37 | group: root 38 | mode: '644' 39 | with_items: 40 | - kube-apiserver.service 41 | - kube-controller-manager.service 42 | - kube-scheduler.service 43 | notify: 44 | - Restart kube-apiserver service 45 | - Restart kube-controller-manager service 46 | - Restart kube-scheduler service 47 | 48 | 49 | - name: Import kube-apiserver configuration files 50 | copy: 51 | src: "{{ item }}" 52 | dest: /etc/kubernetes/manifests/{{ item }} 53 | owner: root 54 | group: root 55 | mode: '644' 56 | with_items: 57 | - eventconfig.yaml 58 | - admission-control-config-file.yaml 59 | - audit-policy.yaml 60 | notify: 61 | - Restart kube-apiserver service 62 | 63 | - name: Import controller-manager.conf kubeconfig 64 | copy: 65 | src: "{{ pki_path }}/kubeconfigs/controller-manager/controller-manager.conf" 66 | dest: /etc/kubernetes/manifests/controller-manager.conf 67 | owner: root 68 | group: root 69 | mode: '644' 70 | notify: 71 | - Restart kube-controller-manager service 72 | 73 | - name: Import kube scheduler yaml file config 74 | template: 75 | src: kube-scheduler.yaml.j2 76 | dest: /etc/kubernetes/manifests/kube-scheduler.yaml 77 | owner: root 78 | group: root 79 | mode: '644' 80 | notify: 81 | - Restart kube-scheduler service 82 | 83 | - name: Import /etc/kubernetes/manifests/scheduler.conf 84 | copy: 85 | src: "{{ pki_path }}/kubeconfigs/scheduler/scheduler.conf" 86 | dest: /etc/kubernetes/manifests/scheduler.conf 87 | owner: root 88 | group: root 89 | mode: '644' 90 | notify: 91 | - Restart kube-scheduler service 92 | 93 | - name: Inject kube-encrypt config 94 | template: 95 | src: encryption-provider-config.yaml.j2 96 | dest: /etc/kubernetes/manifests/encryption-provider-config.yaml 97 | owner: root 98 | group: root 99 | mode: '644' 100 | notify: 101 | - Restart kube-apiserver service 102 | 103 | - name: Load [kube-apiserver] [kube-controller-manager] [kube-scheduler] 104 | systemd: 105 | state: started 106 | daemon_reload: yes 107 | enabled: yes 108 | name: "{{ item }}" 109 | with_items: 110 | - kube-apiserver 111 | - kube-controller-manager 112 | - kube-scheduler 113 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at pvillard@ilki.fr. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /roles/setup-worker/tasks/setup-worker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install packages [socat] [conntrack] [ipset] 3 | package: 4 | name: ["socat", "conntrack", "ipset"] 5 | state: latest 6 | 7 | 8 | - name: install {{ iscsi_package_name }} package 9 | package: 10 | name: "{{ iscsi_package_name }}" 11 | state: latest 12 | when: agorakube_features.storage.enabled | bool == True 13 | 14 | - name: Ensure iscsid is running and enabled 15 | systemd: 16 | name: iscsid 17 | state: started 18 | enabled: yes 19 | daemon_reload: yes 20 | when: agorakube_features.storage.enabled | bool == True 21 | 22 | - name: Create /etc/kubernetes/manifests directory 23 | file: 24 | path: /etc/kubernetes/manifests 25 | state: directory 26 | recurse: True 27 | 28 | - name: Create /var/log/kubernetes directory 29 | file: 30 | path: /var/log/kubernetes 31 | state: directory 32 | recurse: True 33 | 34 | - name: Download Kubernetes node binaries 35 | unarchive: 36 | src: https://dl.k8s.io/{{ agorakube_base_components.kubernetes.release }}/kubernetes-node-linux-amd64.tar.gz 37 | dest: /usr/local/bin 38 | remote_src: yes 39 | creates: /usr/local/bin/kubernetes/node/bin/kubelet 40 | notify: 41 | - restart kubelet 42 | - restart kube-proxy 43 | when: 44 | - ansible_fqdn in groups['workers'] or ansible_fqdn in groups['storage'] 45 | - ansible_fqdn not in groups['masters'] 46 | 47 | - name: Create kubelet service 48 | template: 49 | src: kubelet.service.j2 50 | dest: /etc/systemd/system/kubelet.service 51 | owner: root 52 | group: root 53 | mode: '644' 54 | notify: 55 | - restart kubelet 56 | 57 | - name: Create kubelet config file 58 | template: 59 | src: kubelet-config.yaml.j2 60 | dest: /etc/kubernetes/manifests/kubelet-config.yaml 61 | owner: root 62 | group: root 63 | mode: '644' 64 | notify: 65 | - restart kubelet 66 | 67 | - name: Import kubelet and kube-proxy kubeconfig files 68 | copy: 69 | src: "{{ pki_path }}/kubeconfigs/{{ item }}/{{ ansible_fqdn }}/{{ item }}.conf" 70 | dest: /etc/kubernetes/manifests/{{ item }}.conf 71 | owner: root 72 | group: root 73 | mode: '644' 74 | with_items: 75 | - kubelet 76 | - proxier 77 | notify: 78 | - restart kubelet 79 | - restart kube-proxy 80 | 81 | - name: Create kube-proxy service 82 | template: 83 | src: kube-proxy.service.j2 84 | dest: /etc/systemd/system/kube-proxy.service 85 | owner: root 86 | group: root 87 | mode: '644' 88 | notify: 89 | - restart kube-proxy 90 | 91 | - name: Create kube-proxy config file 92 | template: 93 | src: kube-proxy-config.yaml.j2 94 | dest: /etc/kubernetes/manifests/kube-proxy-config.yaml 95 | owner: root 96 | group: root 97 | mode: '644' 98 | notify: 99 | - restart kube-proxy 100 | 101 | - name: Set Kernel vm.overcommit_memory to 1 102 | sysctl: 103 | name: vm.overcommit_memory 104 | value: '1' 105 | sysctl_set: yes 106 | state: present 107 | reload: yes 108 | 109 | - name: Set Kernel kernel.panic to 10 110 | sysctl: 111 | name: kernel.panic 112 | value: '10' 113 | sysctl_set: yes 114 | state: present 115 | reload: yes 116 | 117 | - name: Set Kernel kernel.panic_on_oops to 1 118 | sysctl: 119 | name: kernel.panic_on_oops 120 | value: '1' 121 | sysctl_set: yes 122 | state: present 123 | reload: yes 124 | 125 | - name: Start kubelet and kube-proxy services 126 | systemd: 127 | state: started 128 | daemon_reload: yes 129 | enabled: yes 130 | name: "{{ item }}" 131 | with_items: 132 | - kubelet 133 | - kube-proxy 134 | -------------------------------------------------------------------------------- /roles/setup-master/templates/kube-apiserver.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server {{ agorakube_base_components.kubernetes.release }} 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | {% set etcd_servers = [] %} 5 | {% for host in groups['etcd'] %} 6 | {{ etcd_servers.append( "https://"+hostvars[host].ansible_host+":2379" ) }} 7 | {% endfor %} 8 | {% set enabled_admission_plugins = [] %} 9 | {% for plugin in agorakube_kube_apiserver_enable_admission_plugins %} 10 | {{ enabled_admission_plugins.append( plugin ) }} 11 | {% endfor %} 12 | [Service] 13 | ExecStart=/usr/local/bin/kubernetes/server/bin/kube-apiserver \ 14 | --advertise-address={{ hostvars[ansible_fqdn].ansible_host}} \ 15 | --allow-privileged=true \ 16 | --anonymous-auth=false \ 17 | --apiserver-count={{ groups['masters'] | length }} \ 18 | --audit-log-maxage=30 \ 19 | --audit-log-maxbackup=10 \ 20 | --audit-log-maxsize=100 \ 21 | --audit-policy-file=/etc/kubernetes/manifests/audit-policy.yaml \ 22 | --authorization-mode=Node,RBAC \ 23 | --bind-address=0.0.0.0 \ 24 | --client-ca-file=/etc/kubernetes/pki/ca.crt \ 25 | --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt \ 26 | --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt \ 27 | --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key \ 28 | --etcd-servers={{etcd_servers|join(',')}} \ 29 | --event-ttl=1h \ 30 | --encryption-provider-config=/etc/kubernetes/manifests/encryption-provider-config.yaml \ 31 | --enable-admission-plugins={{enabled_admission_plugins|join(',')}} \ 32 | --admission-control-config-file=/etc/kubernetes/manifests/admission-control-config-file.yaml \ 33 | --kubelet-certificate-authority=/etc/kubernetes/pki/ca.crt \ 34 | --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt \ 35 | --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key \ 36 | {% if agorakube_base_components.kubernetes.release is version("v1.22", "<") %} 37 | --kubelet-https=true \ 38 | {% endif %} 39 | --runtime-config=api/all=true \ 40 | --service-cluster-ip-range={{ agorakube_network.cidr.service }} \ 41 | --service-node-port-range={{ agorakube_network.nodeport.range }} \ 42 | --tls-cert-file=/etc/kubernetes/pki/apiserver.crt \ 43 | --service-account-key-file=/etc/kubernetes/pki/sa/sa.pub \ 44 | --tls-private-key-file=/etc/kubernetes/pki/apiserver.key \ 45 | --v=2 \ 46 | --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt \ 47 | --requestheader-allowed-names=front-proxy-client \ 48 | --requestheader-extra-headers-prefix=X-Remote-Extra- \ 49 | --requestheader-group-headers=X-Remote-Group \ 50 | --requestheader-username-headers=X-Remote-User \ 51 | --request-timeout=60s \ 52 | --secure-port=6443 \ 53 | --service-account-lookup=true \ 54 | --profiling=false \ 55 | --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt \ 56 | --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key \ 57 | --service-account-issuer=https://kubernetes.default.svc.cluster.local \ 58 | --log-file=/var/log/kubernetes/kube-apiserver.log \ 59 | --log-file-max-size=1800 \ 60 | --logtostderr=false \ 61 | --audit-log-path=/var/log/kubernetes/kube-apiserver-audit.log \ 62 | {% if agorakube_features.keycloak_oidc.enabled | bool == True %} 63 | {% if agorakube_features.keycloak_oidc.auto_bootstrap.bootstrap_kube_apiserver | bool == True %} 64 | --oidc-issuer-url=https://{{ agorakube_features.keycloak_oidc.auto_bootstrap.host }}/auth/realms/local \ 65 | --oidc-client-id=kube \ 66 | --oidc-ca-file=/etc/kubernetes/pki/oidc/oidc-ca.crt \ 67 | --oidc-groups-prefix=oidc: \ 68 | --oidc-username-prefix=oidc: \ 69 | --oidc-groups-claim=groups \ 70 | --oidc-username-claim=email \ 71 | {% endif %} 72 | {% endif %} 73 | --service-account-signing-key-file=/etc/kubernetes/pki/sa/sa.key 74 | Restart=on-failure 75 | RestartSec=5 76 | 77 | [Install] 78 | WantedBy=multi-user.target 79 | -------------------------------------------------------------------------------- /docs/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute 2 | 3 | We are really glad you're reading this, because we need volunteer developers to help this project come to fruition. 4 | 5 | We want you working on things you're excited about. 6 | 7 | You can contact us by mail agorakube@ilki.fr or you can join our AGORAKUBE's Slack community for discussion and ask questions : [AGORAKUBE's Slack](http://slack.agorakube.ilkilabs.io/) 8 | 9 | Channels : 10 | - **#general** - For general purpose (news, events...) 11 | - **#developpers** - For people who contribute to Agorakube by developing features 12 | - **#end-users** - For end users who want to give us feedbacks 13 | - **#random** - As its name suggests, for random discussions :) 14 | 15 | ## Code Overview 16 | 17 | See below Agorakube code architecture: 18 | 19 | * .github/ - Contains all Github Configurations including: 20 | * workflows/* - Contains 'GitHub Actions' workflows 21 | * ISSUE_TEMPLATE/* - Contains all Isuues Templates display when we create Issues 22 | * actions/* - Contains a folder for each custom GitHubActions. 23 | * ansible-lint/* - Contains all sources used for testing Anisble code in GithubAction using GitHub Actions 24 | * docs/* - Contains all the official Agorakube documentation. 25 | * translations/* - Contains a transation in a specific language of the Agorakube documentation 26 | * group_vars/all.yaml - Contains all defaults parameters used in Agorakube Deployments 27 | * images/* - Contains images used in the Agorakube Doc 28 | * roles/* - Contains all the Ansible roles used to install Agorakube. Each role install/manage a specific component 29 | * test/inventory - Contains a test inventory file used by "test_lab" 30 | * labs/* - Contains test labs used for Agorakube development/test. This test labs use Vagrant/virtualBox 31 | * tools/* - Contains folders for some specific management actions 32 | * etcd - Contains some playbooks for ETCD management lagorakube backup/restore etcd 33 | * hosts - Inventory file that define your Agorakube Cluster 34 | * agorakube.yaml - Ansible Playbook used to deploy Agorakube 35 | 36 | ## Set up a local test lab 37 | 38 | You can set a local test lab for Agorakube using Vagrant and VirtualBox. 39 | See [LOCAL_ENVIRONMENT](../LOCAL_ENVIRONMENT.md) for more details. 40 | 41 | ## Testing 42 | 43 | We use Ansible-lint with GithuActions to test Ansible code quality. 44 | 45 | 46 | ## Sample needs 47 | 48 | All contributions are welcome :) 49 | 50 | Here is a non-exhaustive list of contributions needed for this project: 51 | 52 | * Documentation 53 | * Add features lagorakube new runtime support, new Ingress Controller installation, new CNI plugin, etc... 54 | * Improve the code quality 55 | * ... 56 | 57 | ## Submitting changes 58 | 59 | Please send a [GitHub Pull Request to AGORAKUBE-CORE](https://github.com/ilkilabs/agorakube) with a clear list of what you've done (read more about [pull requests](https://help.github.com/en/articles/about-pull-requests/)). When you send a pull request, we will love you forever if your code stay idempotent. We can always use more test coverage. Please follow our coding conventions (below) and make sure all of your commits are atomic (one feature per commit). 60 | 61 | Always write a clear log message for your commits. One-line messages are fine for small changes, but bigger changes should look lagorakube this: 62 | 63 | $ git commit -m "A brief summary of the commit 64 | > 65 | > A paragraph describing what changed and its impact." 66 | 67 | ## Coding conventions 68 | 69 | Start reading our code and you'll get the hang of it. We optimize for readability: 70 | 71 | * We indent using two spaces (soft tabs) 72 | * We use task's name as comment in Ansible Playbooks. All names have to explicit the task goal. 73 | * This is open source software. Consider the people who will read your code, and make it look nice for them. It's sort of lagorakube driving a car: Perhaps you love doing donuts when you're alone, but with passengers the goal is to make the ride as smooth as possible. 74 | * You can use common tools lagorakube "VisualStudioCode", or "Atom" to make your Ansible code ! 75 | 76 | 77 | Thanks, 78 | 79 | Ilkilabs team 80 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/default_dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Apply templates 3 | command: | 4 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 5 | apply -f https://raw.githubusercontent.com/kubernetes/dashboard/{{ agorakube_features.dashboard.release }}/aio/deploy/recommended.yaml 6 | register: apply_dashboard 7 | changed_when: > 8 | apply_dashboard.stdout is search("created") 9 | or apply_dashboard.stdout is search("configured") 10 | 11 | - name: Check dashboard admin user 12 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get serviceaccounts -n kubernetes-dashboard dashboardadmin 13 | register: check_dashboard_admin 14 | failed_when: check_dashboard_admin.rc > 1 15 | when: agorakube_features.dashboard.generate_admin_token | bool == True 16 | changed_when: False 17 | 18 | - name: Create dashboard admin user 19 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf create serviceaccount dashboardadmin -n kubernetes-dashboard 20 | register: create_dashboard_admin 21 | changed_when: > 22 | create_dashboard_admin.stdout is search("created") 23 | or create_dashboard_admin.stdout is search("configured") 24 | when: 25 | - agorakube_features.dashboard.generate_admin_token | bool == True 26 | - check_dashboard_admin.stderr is search("NotFound") 27 | 28 | - name: Check cluster role binding for admin user 29 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get clusterrolebinding agora-dashboard-admin-role-binding 30 | register: check_dashboard_admin_role 31 | failed_when: check_dashboard_admin_role.rc > 1 32 | when: agorakube_features.dashboard.generate_admin_token | bool == True 33 | changed_when: False 34 | 35 | - name: Create cluster role binding for admin user 36 | command: | 37 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 38 | create clusterrolebinding agora-dashboard-admin-role-binding --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboardadmin 39 | register: create_dashboard_admin_role 40 | changed_when: > 41 | create_dashboard_admin_role.stdout is search("created") 42 | or create_dashboard_admin_role.stdout is search("configured") 43 | when: 44 | - agorakube_features.dashboard.generate_admin_token | bool == True 45 | - check_dashboard_admin_role.stderr is search("NotFound") 46 | 47 | - name: Get the admin service account 48 | command: | 49 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 50 | get serviceaccount -n kubernetes-dashboard dashboardadmin -o jsonpath={.secrets[0].name} 51 | register: dashboard_admin_service_account 52 | when: 53 | - agorakube_features.dashboard.generate_admin_token | bool == True 54 | - create_dashboard_admin.changed 55 | 56 | - name: Get the admin token 57 | command: | 58 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 59 | get secret -n kubernetes-dashboard {{ dashboard_admin_service_account.stdout }} -o jsonpath={.data.token} 60 | register: dashboard_admin_token 61 | when: 62 | - agorakube_features.dashboard.generate_admin_token | bool == True 63 | - create_dashboard_admin.changed 64 | 65 | - name: "Copying dashboard admin token in /root/.kube/dashboardadmin, Please copy and store the token to safe place, it will only be stored once" 66 | copy: 67 | content: "{{ dashboard_admin_token.stdout | b64decode }}" 68 | dest: /root/.kube/dashboardadmin 69 | mode: 0600 70 | when: 71 | - agorakube_features.dashboard.generate_admin_token | bool == True 72 | - dashboard_admin_token.changed 73 | 74 | - name: Verify if metric scraper pod has successfully started 75 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n kubernetes-dashboard dashboard-metrics-scraper 76 | changed_when: false 77 | register: scraper_deployment 78 | until: scraper_deployment.stdout.find("1/1") != -1 79 | retries: 300 80 | delay: 10 81 | run_once: true 82 | 83 | - name: Verify if dashboard pod has successfully started 84 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n kubernetes-dashboard kubernetes-dashboard 85 | changed_when: false 86 | register: dashboard_deployment 87 | until: dashboard_deployment.stdout.find("1/1") != -1 88 | retries: 300 89 | delay: 10 90 | run_once: true 91 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/metrics.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: system:aggregated-metrics-reader 6 | labels: 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 9 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 10 | rules: 11 | - apiGroups: ["metrics.k8s.io"] 12 | resources: ["pods", "nodes"] 13 | verbs: ["get", "list", "watch"] 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: metrics-server:system:auth-delegator 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: system:auth-delegator 23 | subjects: 24 | - kind: ServiceAccount 25 | name: metrics-server 26 | namespace: kube-system 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1 29 | kind: RoleBinding 30 | metadata: 31 | name: metrics-server-auth-reader 32 | namespace: kube-system 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: Role 36 | name: extension-apiserver-authentication-reader 37 | subjects: 38 | - kind: ServiceAccount 39 | name: metrics-server 40 | namespace: kube-system 41 | --- 42 | {% if agorakube_base_components.kubernetes.release is version('v1.22', '>=') %} 43 | apiVersion: apiregistration.k8s.io/v1 44 | {% endif %} 45 | {% if agorakube_base_components.kubernetes.release is version('v1.22', '<') %} 46 | apiVersion: apiregistration.k8s.io/v1beta1 47 | {% endif %} 48 | kind: APIService 49 | metadata: 50 | name: v1beta1.metrics.k8s.io 51 | spec: 52 | service: 53 | name: metrics-server 54 | namespace: kube-system 55 | group: metrics.k8s.io 56 | version: v1beta1 57 | insecureSkipTLSVerify: true 58 | groupPriorityMinimum: 100 59 | versionPriority: 100 60 | --- 61 | apiVersion: v1 62 | kind: ServiceAccount 63 | metadata: 64 | name: metrics-server 65 | namespace: kube-system 66 | --- 67 | apiVersion: apps/v1 68 | kind: Deployment 69 | metadata: 70 | name: metrics-server 71 | namespace: kube-system 72 | labels: 73 | k8s-app: metrics-server 74 | spec: 75 | selector: 76 | matchLabels: 77 | k8s-app: metrics-server 78 | template: 79 | metadata: 80 | name: metrics-server 81 | labels: 82 | k8s-app: metrics-server 83 | spec: 84 | serviceAccountName: metrics-server 85 | volumes: 86 | # mount in tmp so we can safely use from-scratch images and/or read-only containers 87 | - name: tmp-dir 88 | emptyDir: {} 89 | containers: 90 | - name: metrics-server 91 | image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2 92 | imagePullPolicy: IfNotPresent 93 | args: 94 | - --cert-dir=/tmp 95 | - --secure-port=4443 96 | - --v=2 97 | - --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP 98 | ports: 99 | - name: main-port 100 | containerPort: 4443 101 | protocol: TCP 102 | securityContext: 103 | readOnlyRootFilesystem: true 104 | runAsNonRoot: true 105 | runAsUser: 1000 106 | volumeMounts: 107 | - name: tmp-dir 108 | mountPath: /tmp 109 | nodeSelector: 110 | kubernetes.io/os: linux 111 | kubernetes.io/arch: "amd64" 112 | --- 113 | apiVersion: v1 114 | kind: Service 115 | metadata: 116 | name: metrics-server 117 | namespace: kube-system 118 | labels: 119 | kubernetes.io/name: "Metrics-server" 120 | kubernetes.io/cluster-service: "true" 121 | spec: 122 | selector: 123 | k8s-app: metrics-server 124 | ports: 125 | - port: 443 126 | protocol: TCP 127 | targetPort: main-port 128 | --- 129 | apiVersion: rbac.authorization.k8s.io/v1 130 | kind: ClusterRole 131 | metadata: 132 | name: system:metrics-server 133 | rules: 134 | - apiGroups: 135 | - "" 136 | resources: 137 | - pods 138 | - nodes 139 | - nodes/stats 140 | - namespaces 141 | - configmaps 142 | verbs: 143 | - get 144 | - list 145 | - watch 146 | --- 147 | apiVersion: rbac.authorization.k8s.io/v1 148 | kind: ClusterRoleBinding 149 | metadata: 150 | name: system:metrics-server 151 | roleRef: 152 | apiGroup: rbac.authorization.k8s.io 153 | kind: ClusterRole 154 | name: system:metrics-server 155 | subjects: 156 | - kind: ServiceAccount 157 | name: metrics-server 158 | namespace: kube-system 159 | -------------------------------------------------------------------------------- /roles/post-scripts/tasks/label-hosts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Make sure hosts with cloud controller manager enabled not have taint node.cloudprovider.kubernetes.io/uninitialized 3 | command: | 4 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 5 | taint nodes {{ item }} node.cloudprovider.kubernetes.io/uninitialized- --overwrite 6 | changed_when: false 7 | with_items: 8 | - "{{ groups['masters'] }}" 9 | - "{{ groups['workers'] }}" 10 | - "{{ groups['storage'] }}" 11 | ignore_errors: yes 12 | when: agorakube_base_components.cloud_controller_manager.enabled | bool == True 13 | 14 | - name: Wait for nodes to be ready 15 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get node {{ item }} 16 | changed_when: false 17 | register: node_deployment 18 | until: node_deployment.stdout.find("Ready") != -1 19 | retries: 300 20 | delay: 10 21 | run_once: true 22 | with_items: 23 | - "{{ groups['masters'] }}" 24 | - "{{ groups['workers'] }}" 25 | - "{{ groups['storage'] }}" 26 | 27 | - name: Label Woker Nodes 28 | command: "kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf label node {{ item }} node-role.kubernetes.io/worker=true --overwrite" 29 | changed_when: false 30 | with_items: "{{ groups['workers'] }}" 31 | 32 | - name: Label Storage Nodes 33 | command: "kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf label node {{ item }} node-role.kubernetes.io/storage=true --overwrite" 34 | changed_when: false 35 | with_items: "{{ groups['storage'] }}" 36 | 37 | - name: Label Master Nodes 38 | command: "kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf label node {{ item }} node-role.kubernetes.io/master=true --overwrite" 39 | changed_when: false 40 | with_items: "{{ groups['masters'] }}" 41 | 42 | - name: remove label node-role.kubernetes.io/worker=true 43 | command: "kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf label node {{ item }} node-role.kubernetes.io/worker=true- --overwrite" 44 | changed_when: false 45 | with_items: 46 | - "{{ groups['masters'] }}" 47 | - "{{ groups['storage'] }}" 48 | when: 49 | - item not in groups['workers'] 50 | ignore_errors: yes 51 | 52 | - name: remove label node-role.kubernetes.io/master=true 53 | command: "kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf label node {{ item }} node-role.kubernetes.io/master=true- --overwrite" 54 | changed_when: false 55 | with_items: 56 | - "{{ groups['workers'] }}" 57 | - "{{ groups['storage'] }}" 58 | when: 59 | - item not in groups['masters'] 60 | ignore_errors: yes 61 | 62 | - name: remove label node-role.kubernetes.io/storage=true 63 | command: "kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf label node {{ item }} node-role.kubernetes.io/storage=true- --overwrite" 64 | changed_when: false 65 | with_items: 66 | - "{{ groups['workers'] }}" 67 | - "{{ groups['masters'] }}" 68 | when: 69 | - item not in groups['storage'] 70 | ignore_errors: yes 71 | 72 | - name: Make hosts with master role not NoSchedulabe (if not a worker or storage) 73 | command: | 74 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 75 | taint nodes {{ item }} node-role.kubernetes.io/master=true:NoSchedule --overwrite 76 | changed_when: false 77 | with_items: 78 | - "{{ groups['masters'] }}" 79 | when: 80 | - item not in groups['workers'] 81 | - item not in groups['storage'] 82 | 83 | - name: Make hosts with Storage role not NoSchedulabe (if not a worker, but can be a master) 84 | command: | 85 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 86 | taint nodes {{ item }} node-role.kubernetes.io/storage=true:NoSchedule --overwrite 87 | changed_when: false 88 | with_items: 89 | - "{{ groups['storage'] }}" 90 | when: 91 | - item not in groups['workers'] 92 | 93 | - name: Make sure hosts with worker role not have taint node-role.kubernetes.io/storage 94 | command: | 95 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 96 | taint nodes {{ item }} node-role.kubernetes.io/storage- --overwrite 97 | changed_when: false 98 | with_items: 99 | - "{{ groups['workers'] }}" 100 | ignore_errors: yes 101 | 102 | - name: Make sure hosts with worker role not have taint node-role.kubernetes.io/master 103 | command: | 104 | kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf \ 105 | taint nodes {{ item }} node-role.kubernetes.io/master- --overwrite 106 | changed_when: false 107 | with_items: 108 | - "{{ groups['workers'] }}" 109 | ignore_errors: yes 110 | -------------------------------------------------------------------------------- /tools/oidc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | KEYCLOAK_URL=https://oidc.local.lan 4 | KEYCLOAK_REALM=local 5 | KEYCLOAK_CLIENT_ID=kube 6 | KEYCLOAK_CLIENT_SECRET=79e34f70-581a-4cc3-a2b4-10b5a4d670df 7 | KEYCLOAK_CA_CRT=/var/agorakube/pki/oidc/oidc-ca.crt 8 | 9 | cat < ${TOKEN}"; 59 | exit ${RET} 60 | fi 61 | 62 | ERROR=`echo ${TOKEN} | jq .error -r` 63 | if [ "${ERROR}" != "null" ];then 64 | echo "# Failed ==> ${TOKEN}" >&2 65 | exit 1 66 | fi 67 | 68 | ID_TOKEN=`echo ${TOKEN} | jq .id_token -r` 69 | REFRESH_TOKEN=`echo ${TOKEN} | jq .refresh_token -r` 70 | 71 | 72 | cat < 18 | apply_keycloak.stdout is search("created") 19 | or apply_keycloak.stdout is search("configured") 20 | 21 | - name: Create TLS secret for Keycloak Ingress 22 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf -n keycloak create secret tls keycloak-tls --cert=/var/agorakube/pki/oidc/oidc-end.crt --key=/var/agorakube/pki/oidc/oidc-end.key 23 | register: apply_keycloak_tls 24 | changed_when: > 25 | apply_keycloak_tls.stdout is search("created") 26 | or apply_keycloak_tls.stdout is search("configured") 27 | ignore_errors: yes 28 | when: agorakube_features.keycloak_oidc.auto_bootstrap.bootstrap_keycloak | bool == True 29 | 30 | - name: Render templates Ingress OIDC 31 | template: 32 | dest: "{{ keycloak_tempdir.path }}/keycloak-ingress.yaml" 33 | src: "keycloak-ingress.yaml.j2" 34 | changed_when: false 35 | 36 | - name: Apply templates Ingress OIDC 37 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf apply -f {{ keycloak_tempdir.path }}/keycloak-ingress.yaml 38 | register: apply_keycloak_ingress 39 | changed_when: > 40 | apply_keycloak_ingress.stdout is search("created") 41 | or apply_keycloak_ingress.stdout is search("configured") 42 | 43 | - name: Cleanup tempdir 44 | file: 45 | state: absent 46 | path: "{{ keycloak_tempdir.path }}" 47 | changed_when: false 48 | 49 | - name: Verify if keycloak pod has successfully started 50 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get deploy -n keycloak keycloak 51 | changed_when: false 52 | register: keycloak_deployment 53 | until: keycloak_deployment.stdout.find("1/1") != -1 54 | retries: 300 55 | delay: 10 56 | run_once: true 57 | 58 | - name: Get Ingress keycloak IP if Ingress is Nginx 59 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get services ingress-nginx-controller --namespace ingress-nginx --output jsonpath='{.spec.clusterIP}' 60 | changed_when: false 61 | register: keycloak_ip_nginx 62 | run_once: true 63 | when: agorakube_features.ingress.controller == 'nginx' 64 | 65 | - name: Get Ingress keycloak IP if Ingress is Traefik 66 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get services traefik --namespace ingress-traefik --output jsonpath='{.spec.clusterIP}' 67 | changed_when: false 68 | register: keycloak_ip_traefik 69 | run_once: true 70 | when: agorakube_features.ingress.controller == 'traefik' 71 | 72 | - name: Get Ingress keycloak IP if Ingress is HAProxy 73 | command: kubectl --kubeconfig {{ pki_path }}/kubeconfigs/admin/admin.conf get services haproxy-ingress --namespace haproxy-controller --output jsonpath='{.spec.clusterIP}' 74 | register: keycloak_ip_haproxy 75 | run_once: true 76 | when: agorakube_features.ingress.controller == 'haproxy' 77 | changed_when: false 78 | 79 | - name: Configure /etc/hosts file for OIDC when ingress is nginx 80 | lineinfile: 81 | path: /etc/hosts 82 | line: "{{ keycloak_ip_nginx.stdout }} {{ agorakube_features.keycloak_oidc.auto_bootstrap.host }}" 83 | create: yes 84 | when: 85 | - agorakube_features.keycloak_oidc.auto_bootstrap.populate_etc_hosts | bool == True 86 | - agorakube_features.ingress.controller == 'nginx' 87 | delegate_to: "{{ item }}" 88 | loop: "{{ groups['all'] }}" 89 | 90 | - name: Configure /etc/hosts file for OIDC whn ingress is traefik 91 | lineinfile: 92 | path: /etc/hosts 93 | line: "{{ keycloak_ip_traefik.stdout }} {{ agorakube_features.keycloak_oidc.auto_bootstrap.host }}" 94 | create: yes 95 | when: 96 | - agorakube_features.keycloak_oidc.auto_bootstrap.populate_etc_hosts | bool == True 97 | - agorakube_features.ingress.controller == 'traefik' 98 | delegate_to: "{{ item }}" 99 | loop: "{{ groups['all'] }}" 100 | 101 | - name: Configure /etc/hosts file for OIDC when ingress is haproxy 102 | lineinfile: 103 | path: /etc/hosts 104 | line: "{{ keycloak_ip_haproxy.stdout }} {{ agorakube_features.keycloak_oidc.auto_bootstrap.host }}" 105 | create: yes 106 | when: 107 | - agorakube_features.keycloak_oidc.auto_bootstrap.populate_etc_hosts | bool == True 108 | - agorakube_features.ingress.controller == 'haproxy' 109 | delegate_to: "{{ item }}" 110 | loop: "{{ groups['all'] }}" 111 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/coredns.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | labels: 11 | kubernetes.io/bootstrapping: rbac-defaults 12 | name: system:coredns 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - endpoints 18 | - services 19 | - pods 20 | - namespaces 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - "" 26 | resources: 27 | - nodes 28 | verbs: 29 | - get 30 | - apiGroups: 31 | - discovery.k8s.io 32 | resources: 33 | - endpointslices 34 | verbs: 35 | - list 36 | - watch 37 | --- 38 | apiVersion: rbac.authorization.k8s.io/v1 39 | kind: ClusterRoleBinding 40 | metadata: 41 | annotations: 42 | rbac.authorization.kubernetes.io/autoupdate: "true" 43 | labels: 44 | kubernetes.io/bootstrapping: rbac-defaults 45 | name: system:coredns 46 | roleRef: 47 | apiGroup: rbac.authorization.k8s.io 48 | kind: ClusterRole 49 | name: system:coredns 50 | subjects: 51 | - kind: ServiceAccount 52 | name: coredns 53 | namespace: kube-system 54 | --- 55 | apiVersion: v1 56 | kind: ConfigMap 57 | metadata: 58 | name: coredns 59 | namespace: kube-system 60 | data: 61 | Corefile: | 62 | .:53 { 63 | errors 64 | log 65 | health 66 | ready 67 | kubernetes cluster.local {{ agorakube_network.cidr.service }} {{ agorakube_network.cidr.pod }} { 68 | pods insecure 69 | fallthrough in-addr.arpa ip6.arpa 70 | ttl 30 71 | } 72 | cache 30 73 | prometheus :9153 74 | forward . {{ agorakube_network.dns.primary_forwarder }} {{ agorakube_network.dns.secondary_forwarder }} 75 | loop 76 | reload 77 | loadbalance 78 | } 79 | --- 80 | apiVersion: apps/v1 81 | kind: Deployment 82 | metadata: 83 | name: coredns 84 | namespace: kube-system 85 | labels: 86 | k8s-app: kube-dns 87 | kubernetes.io/name: "CoreDNS" 88 | spec: 89 | replicas: {{ agorakube_features.coredns.replicas }} 90 | strategy: 91 | type: RollingUpdate 92 | rollingUpdate: 93 | maxUnavailable: 1 94 | selector: 95 | matchLabels: 96 | k8s-app: kube-dns 97 | template: 98 | metadata: 99 | labels: 100 | k8s-app: kube-dns 101 | spec: 102 | priorityClassName: system-cluster-critical 103 | serviceAccountName: coredns 104 | tolerations: 105 | - key: "CriticalAddonsOnly" 106 | operator: "Exists" 107 | nodeSelector: 108 | beta.kubernetes.io/os: linux 109 | containers: 110 | - name: coredns 111 | image: coredns/coredns:{{ agorakube_features.coredns.release }} 112 | imagePullPolicy: IfNotPresent 113 | resources: 114 | limits: 115 | memory: 340Mi 116 | requests: 117 | cpu: 100m 118 | memory: 70Mi 119 | args: [ "-conf", "/etc/coredns/Corefile" ] 120 | volumeMounts: 121 | - name: config-volume 122 | mountPath: /etc/coredns 123 | readOnly: true 124 | ports: 125 | - containerPort: 53 126 | name: dns 127 | protocol: UDP 128 | - containerPort: 53 129 | name: dns-tcp 130 | protocol: TCP 131 | - containerPort: 9153 132 | name: metrics 133 | protocol: TCP 134 | securityContext: 135 | allowPrivilegeEscalation: false 136 | capabilities: 137 | add: 138 | - NET_BIND_SERVICE 139 | drop: 140 | - all 141 | readOnlyRootFilesystem: true 142 | livenessProbe: 143 | httpGet: 144 | path: /health 145 | port: 8080 146 | scheme: HTTP 147 | initialDelaySeconds: 60 148 | timeoutSeconds: 5 149 | successThreshold: 1 150 | failureThreshold: 5 151 | readinessProbe: 152 | httpGet: 153 | path: /ready 154 | port: 8181 155 | scheme: HTTP 156 | dnsPolicy: Default 157 | volumes: 158 | - name: config-volume 159 | configMap: 160 | name: coredns 161 | items: 162 | - key: Corefile 163 | path: Corefile 164 | --- 165 | apiVersion: v1 166 | kind: Service 167 | metadata: 168 | name: kube-dns 169 | namespace: kube-system 170 | annotations: 171 | prometheus.io/port: "9153" 172 | prometheus.io/scrape: "true" 173 | labels: 174 | k8s-app: kube-dns 175 | kubernetes.io/cluster-service: "true" 176 | kubernetes.io/name: "CoreDNS" 177 | spec: 178 | selector: 179 | k8s-app: kube-dns 180 | clusterIP: {{ agorakube_network.service_ip.coredns }} 181 | ports: 182 | - name: dns 183 | port: 53 184 | protocol: UDP 185 | - name: dns-tcp 186 | port: 53 187 | protocol: TCP 188 | - name: metrics 189 | port: 9153 190 | protocol: TCP 191 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/haproxy.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: haproxy-controller 6 | 7 | --- 8 | apiVersion: v1 9 | kind: ServiceAccount 10 | metadata: 11 | name: haproxy-ingress-service-account 12 | namespace: haproxy-controller 13 | 14 | --- 15 | kind: ClusterRole 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | metadata: 18 | name: haproxy-ingress-cluster-role 19 | rules: 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - configmaps 24 | - endpoints 25 | - nodes 26 | - pods 27 | - services 28 | - namespaces 29 | - events 30 | - serviceaccounts 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiGroups: 36 | - "extensions" 37 | - "networking.k8s.io" 38 | resources: 39 | - ingresses 40 | - ingresses/status 41 | - ingressclasses 42 | verbs: 43 | - get 44 | - list 45 | - watch 46 | - apiGroups: 47 | - "extensions" 48 | - "networking.k8s.io" 49 | resources: 50 | - ingresses/status 51 | verbs: 52 | - update 53 | - apiGroups: 54 | - "" 55 | resources: 56 | - secrets 57 | verbs: 58 | - get 59 | - list 60 | - watch 61 | - create 62 | - patch 63 | - update 64 | 65 | --- 66 | kind: ClusterRoleBinding 67 | apiVersion: rbac.authorization.k8s.io/v1 68 | metadata: 69 | name: haproxy-ingress-cluster-role-binding 70 | namespace: haproxy-controller 71 | roleRef: 72 | apiGroup: rbac.authorization.k8s.io 73 | kind: ClusterRole 74 | name: haproxy-ingress-cluster-role 75 | subjects: 76 | - kind: ServiceAccount 77 | name: haproxy-ingress-service-account 78 | namespace: haproxy-controller 79 | 80 | --- 81 | apiVersion: v1 82 | kind: ConfigMap 83 | metadata: 84 | name: haproxy 85 | namespace: haproxy-controller 86 | data: 87 | 88 | --- 89 | apiVersion: apps/v1 90 | kind: Deployment 91 | metadata: 92 | labels: 93 | run: ingress-default-backend 94 | name: ingress-default-backend 95 | namespace: haproxy-controller 96 | spec: 97 | replicas: 1 98 | selector: 99 | matchLabels: 100 | run: ingress-default-backend 101 | template: 102 | metadata: 103 | labels: 104 | run: ingress-default-backend 105 | spec: 106 | containers: 107 | - name: ingress-default-backend 108 | image: gcr.io/google_containers/defaultbackend:1.0 109 | ports: 110 | - containerPort: 8080 111 | 112 | --- 113 | apiVersion: v1 114 | kind: Service 115 | metadata: 116 | labels: 117 | run: ingress-default-backend 118 | name: ingress-default-backend 119 | namespace: haproxy-controller 120 | spec: 121 | selector: 122 | run: ingress-default-backend 123 | ports: 124 | - name: port-1 125 | port: 8080 126 | protocol: TCP 127 | targetPort: 8080 128 | 129 | --- 130 | apiVersion: apps/v1 131 | kind: Deployment 132 | metadata: 133 | labels: 134 | run: haproxy-ingress 135 | name: haproxy-ingress 136 | namespace: haproxy-controller 137 | spec: 138 | replicas: 1 139 | selector: 140 | matchLabels: 141 | run: haproxy-ingress 142 | template: 143 | metadata: 144 | labels: 145 | run: haproxy-ingress 146 | spec: 147 | serviceAccountName: haproxy-ingress-service-account 148 | containers: 149 | - name: haproxy-ingress 150 | image: haproxytech/kubernetes-ingress:{{ agorakube_features.ingress.release }} 151 | args: 152 | - --configmap=haproxy-controller/haproxy 153 | - --default-backend-service=haproxy-controller/ingress-default-backend 154 | securityContext: 155 | runAsUser: 1000 156 | runAsGroup: 1000 157 | capabilities: 158 | drop: 159 | - ALL 160 | add: 161 | - NET_BIND_SERVICE 162 | resources: 163 | requests: 164 | cpu: "500m" 165 | memory: "50Mi" 166 | livenessProbe: 167 | httpGet: 168 | path: /healthz 169 | port: 1042 170 | ports: 171 | - name: http 172 | containerPort: 80 173 | - name: https 174 | containerPort: 443 175 | - name: stat 176 | containerPort: 1024 177 | env: 178 | - name: TZ 179 | value: "Etc/UTC" 180 | - name: POD_NAME 181 | valueFrom: 182 | fieldRef: 183 | fieldPath: metadata.name 184 | - name: POD_NAMESPACE 185 | valueFrom: 186 | fieldRef: 187 | fieldPath: metadata.namespace 188 | initContainers: 189 | - name: sysctl 190 | image: busybox:musl 191 | command: 192 | - /bin/sh 193 | - -c 194 | - sysctl -w net.ipv4.ip_unprivileged_port_start=0 195 | securityContext: 196 | privileged: true 197 | --- 198 | apiVersion: v1 199 | kind: Service 200 | metadata: 201 | labels: 202 | run: haproxy-ingress 203 | name: haproxy-ingress 204 | namespace: haproxy-controller 205 | spec: 206 | selector: 207 | run: haproxy-ingress 208 | type: NodePort 209 | ports: 210 | - name: http 211 | port: 80 212 | protocol: TCP 213 | targetPort: 80 214 | - name: https 215 | port: 443 216 | protocol: TCP 217 | targetPort: 443 218 | - name: stat 219 | port: 1024 220 | protocol: TCP 221 | targetPort: 1024 222 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/backup-etcd-cronjob.yaml.j2: -------------------------------------------------------------------------------- 1 | {% set etcd_initial_cluster = [] %} 2 | {% for host in groups['etcd'] %} 3 | {{ etcd_initial_cluster.append( "https://"+hostvars[host].ansible_host+":2379" ) }} 4 | {% endfor %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: script-backup-etcd 10 | namespace: kube-system 11 | data: 12 | backup-etcd.sh: | 13 | #!/bin/sh 14 | mkdir $BACKUP_ETCD_DIR -p 15 | etcdctl \ 16 | --endpoints=$(etcdctl --endpoints={{ etcd_initial_cluster|join(',') }} endpoint status | grep ', true, false,' | awk '{ print $1}' | sed 's/,//g') \ 17 | snapshot save $BACKUP_ETCD_DIR/snapshot_etcd_cluster.`date +%m-%d-%y_%H-%M-%S`.db 18 | --- 19 | apiVersion: batch/v1beta1 20 | kind: CronJob 21 | metadata: 22 | name: backup-etcd 23 | namespace: kube-system 24 | spec: 25 | schedule: "{{ agorakube_base_components.etcd.backup.crontab }}" 26 | successfulJobsHistoryLimit: 1 27 | failedJobsHistoryLimit: 3 28 | jobTemplate: 29 | spec: 30 | template: 31 | spec: 32 | tolerations: 33 | # Make sure backup pod gets scheduled on all nodes. 34 | - effect: NoSchedule 35 | operator: Exists 36 | # Mark the pod as a critical add-on for rescheduling. 37 | - key: CriticalAddonsOnly 38 | operator: Exists 39 | - effect: NoExecute 40 | operator: Exists 41 | {% if agorakube_base_components.etcd.backup.storage.enabled | bool == True %} 42 | {% if agorakube_base_components.etcd.backup.storage.type == "hostpath" %} 43 | nodeSelector: 44 | kubernetes.io/hostname: {{ agorakube_base_components.etcd.backup.storage.hostpath.nodename }} 45 | {% endif %} 46 | {% endif %} 47 | restartPolicy: Never 48 | containers: 49 | - name: backup-etcd 50 | image: quay.io/coreos/etcd:{{ agorakube_base_components.etcd.release }} 51 | command: ["/opt/backup_etcd/backup-etcd.sh"] 52 | env: 53 | - name: ETCDCTL_API 54 | value: "3" 55 | - name: ETCDCTL_CACERT 56 | value: "/etc/agorakube/etcd/etcd-api.crt/ca.crt" 57 | - name: ETCDCTL_CERT 58 | value: "/etc/agorakube/etcd/etcdctl.crt/kube-etcd-healthcheck-client.crt" 59 | - name: ETCDCTL_KEY 60 | value: "/etc/agorakube/etcd/etcdctl.key/kube-etcd-healthcheck-client.key" 61 | - name: BACKUP_ETCD_DIR 62 | value: "/var/backup_etcd" 63 | volumeMounts: 64 | - name: etcd-api 65 | mountPath: /etc/agorakube/etcd/etcd-api.crt 66 | readOnly: true 67 | - name: etcdctl-crt 68 | mountPath: /etc/agorakube/etcd/etcdctl.crt 69 | readOnly: true 70 | - name: etcdctl-key 71 | mountPath: /etc/agorakube/etcd/etcdctl.key 72 | readOnly: true 73 | - name: script-backup-etcd 74 | mountPath: /opt/backup_etcd/ 75 | readOnly: false 76 | {% if agorakube_base_components.etcd.backup.storage.enabled | bool == True %} 77 | - name: backup-etcd-dir 78 | mountPath: /var/backup_etcd/ 79 | {% endif %} 80 | volumes: 81 | - name: script-backup-etcd 82 | configMap: 83 | name: script-backup-etcd 84 | defaultMode: 0700 85 | - name: etcd-api 86 | configMap: 87 | name: etcd-ca.crt 88 | - name: etcdctl-crt 89 | configMap: 90 | name: etcd-healthcheck-client.crt 91 | - name: etcdctl-key 92 | secret: 93 | secretName: etcd-healthcheck-client.key 94 | {% if agorakube_base_components.etcd.backup.storage.enabled | bool == True %} 95 | - name: backup-etcd-dir 96 | persistentVolumeClaim: 97 | claimName: backup-etcd 98 | {% endif %} 99 | {% if agorakube_base_components.etcd.backup.storage.enabled | bool == True %} 100 | --- 101 | apiVersion: v1 102 | kind: PersistentVolumeClaim 103 | metadata: 104 | name: backup-etcd 105 | namespace: kube-system 106 | spec: 107 | {% if agorakube_base_components.etcd.backup.storage.type == "hostpath" %} 108 | storageClassName: etcd-backup-hostpath 109 | {% endif %} 110 | {% if agorakube_base_components.etcd.backup.storage.type == "storageclass" %} 111 | storageClassName: "{{ agorakube_base_components.etcd.backup.storage.storageclass.name }}" 112 | {% endif %} 113 | {% if agorakube_base_components.etcd.backup.storage.type == "persistentvolume" %} 114 | storageClassName: "{{ agorakube_base_components.etcd.backup.storage.persistentvolume.storageclass }}" 115 | {% endif %} 116 | resources: 117 | requests: 118 | storage: {{ agorakube_base_components.etcd.backup.storage.capacity }} 119 | volumeMode: Filesystem 120 | accessModes: 121 | - ReadWriteOnce 122 | {% if agorakube_base_components.etcd.backup.storage.type == "persistentvolume" %} 123 | volumeName: "{{ agorakube_base_components.etcd.backup.storage.persistentvolume.name }}" 124 | {% endif %} 125 | --- 126 | {% if agorakube_base_components.etcd.backup.storage.type == "hostpath" %} 127 | apiVersion: v1 128 | kind: PersistentVolume 129 | metadata: 130 | name: backup-etcd-hostpath 131 | namespace: kube-system 132 | spec: 133 | storageClassName: etcd-backup-hostpath 134 | capacity: 135 | storage: {{ agorakube_base_components.etcd.backup.storage.capacity }} 136 | accessModes: 137 | - ReadWriteOnce 138 | hostPath: 139 | path: "{{ agorakube_base_components.etcd.backup.storage.hostpath.path }}" 140 | {% endif %} 141 | {% endif %} 142 | -------------------------------------------------------------------------------- /group_vars/all.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | agorakube: 3 | global: 4 | data_path: /var/agorakube 5 | 6 | agorakube_pki: 7 | infos: 8 | state: "Ile-De-France" 9 | locality: "Paris" 10 | country: "FR" 11 | root_cn: "ILKI Kubernetes Engine" 12 | expirity: "+3650d" 13 | management: 14 | rotate_certificats: False 15 | 16 | agorakube_base_components: 17 | etcd: 18 | release: v3.4.16 19 | upgrade: False 20 | check: true 21 | data_path: /var/lib/etcd 22 | backup: 23 | enabled: False 24 | crontab: "*/30 * * * *" 25 | storage: 26 | capacity: 10Gi 27 | enabled: False 28 | type: "storageclass" 29 | storageclass: 30 | name: "default-jiva" 31 | persistentvolume: 32 | name: "my-pv-backup-etcd" 33 | storageclass: "my-storageclass-name" 34 | hostpath: 35 | nodename: "master1" 36 | path: /var/etcd-backup 37 | kubernetes: 38 | release: v1.23.5 39 | upgrade: False 40 | cloud_controller_manager: 41 | enabled: False 42 | container: 43 | engine: containerd 44 | # release : Only Supported if container engine is set to docker 45 | release: "" 46 | # upgrade: false 47 | 48 | agorakube_network: 49 | cni_plugin: calico 50 | calico_autodetection_method: "first-found" 51 | enable_vpn: true 52 | mtu: 0 53 | cidr: 54 | pod: 10.33.0.0/16 55 | service: 10.32.0.0/16 56 | service_ip: 57 | kubernetes: 10.32.0.1 58 | coredns: 10.32.0.10 59 | dns: 60 | primary_forwarder: 8.8.8.8 61 | secondary_forwarder: 8.8.4.4 62 | nodeport: 63 | range: 30000-32000 64 | external_loadbalancing: 65 | enabled: False 66 | ip_range: 10.10.20.50-10.10.20.250 67 | secret_key: LGyt2l9XftOxEUIeFf2w0eCM7KjyQdkHform0gldYBKMORWkfQIsfXW0sQlo1VjJBB17shY5RtLg0klDNqNq4PAhNaub+olSka61LxV73KN2VaJY/snrZmHbdf/a7DfdzaeQ5pzP6D5O7zbUZwfb5ASOhNrG8aDMY3rkf4ZzHkc= 68 | kube_proxy: 69 | mode: ipvs 70 | algorithm: rr 71 | 72 | agorakube_features: 73 | coredns: 74 | release: "1.9.1" 75 | replicas: 2 76 | reloader: 77 | enabled: False 78 | release: "0.0.89" 79 | storage: 80 | enabled: False 81 | release: "2.9.0" 82 | jiva: 83 | data_path: /var/openebs 84 | fs_type: ext4 85 | hostpath: 86 | data_path: /var/local-hostpath 87 | dashboard: 88 | enabled: False 89 | generate_admin_token: False 90 | release: v2.2.0 91 | metrics_server: 92 | enabled: True 93 | ingress: 94 | controller: nginx 95 | release: v1.1.0 96 | 97 | supervision: 98 | monitoring: 99 | enabled: False 100 | dashboard: True 101 | persistent: 102 | enabled: False 103 | storage: 104 | capacity: 4Gi 105 | type: "storageclass" 106 | storageclass: 107 | name: "default-jiva" 108 | persistentvolume: 109 | name: "my-pv-monitoring" 110 | storageclass: "my-storageclass-name" 111 | hostpath: 112 | nodename: "worker1" 113 | path: /var/monitoring-persistent 114 | dashboard: 115 | admin: 116 | user: administrator 117 | password: P@ssw0rd 118 | persistent: 119 | enabled: False 120 | storage: 121 | capacity: 4Gi 122 | type: "storageclass" 123 | storageclass: 124 | name: "default-jiva" 125 | persistentvolume: 126 | name: "my-pv-monitoring" 127 | storageclass: "my-storageclass-name" 128 | hostpath: 129 | nodename: "worker1" 130 | path: /var/grafana-persistent 131 | logging: 132 | enabled: False 133 | dashboard: True 134 | persistent: 135 | enabled: False 136 | storage: 137 | capacity: 4Gi 138 | type: "storageclass" 139 | storageclass: 140 | name: "default-jiva" 141 | persistentvolume: 142 | name: "my-pv-monitoring" 143 | storageclass: "my-storageclass-name" 144 | hostpath: 145 | nodename: "worker1" 146 | path: /var/logging-persistent 147 | logrotate: 148 | enabled: False 149 | crontab: "* 2 * * *" 150 | day_retention: 14 151 | gatekeeper: 152 | enabled: False 153 | release: v3.4.0 154 | replicas: 155 | #audit: 1 156 | controller_manager: 3 157 | #argocd is an Alpha feature and do not support persistence wet. Use it only for test purpose. 158 | argocd: 159 | enabled: False 160 | 161 | # keycloak_oidc is an Alpha feature. 162 | keycloak_oidc: 163 | enabled: False 164 | admin: 165 | user: administrator 166 | password: P@ssw0rd 167 | auto_bootstrap: 168 | bootstrap_keycloak: true 169 | bootstrap_kube_apiserver: true 170 | populate_etc_hosts: true 171 | host: oidc.local.lan 172 | storage: 173 | enabled: False 174 | capacity: 10Gi 175 | type: "storageclass" 176 | storageclass: 177 | name: "default-jiva" 178 | persistentvolume: 179 | name: "my-pv-backup-etcd" 180 | storageclass: "my-storageclass-name" 181 | hostpath: 182 | nodename: "master1" 183 | path: /var/keycloak 184 | 185 | etc_hosts: 186 | - hostname: "localhost" 187 | ip: "127.0.0.1" 188 | 189 | # Populate /etc/hosts using all inventory groups 190 | # Note: This will not remove /etc/hosts entries when removed from inventory 191 | agorakube_populate_etc_hosts: True 192 | 193 | # Remove ALL /etc/hosts entries that are NOT defined in the etc_hosts group or etc_hosts variable 194 | agorakube_remove_etc_hosts: False 195 | 196 | # Optionally backup /etc/hosts each time a change is made 197 | agorakube_backup_etc_hosts: False 198 | 199 | # Security 200 | agorakube_encrypt_etcd_keys: 201 | # Warrning: If multiple keys are defined ONLY LAST KEY is used for encrypt and decrypt. 202 | # Other keys are used only for decrypt purpose. Keys can be generated with command: head -c 32 /dev/urandom | base64 203 | key1: 204 | secret: 1fJcKt6vBxMt+AkBanoaxFF2O6ytHIkETNgQWv4b/+Q= 205 | 206 | #restoration_snapshot_file: /path/snopshot/file Located on {{ etcd_data_directory }} 207 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/kube-router.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: kube-router-cfg 6 | namespace: kube-system 7 | labels: 8 | tier: node 9 | k8s-app: kube-router 10 | data: 11 | cni-conf.json: | 12 | { 13 | "cniVersion":"0.3.0", 14 | "name":"mynet", 15 | "plugins":[ 16 | { 17 | "name":"kubernetes", 18 | "type":"bridge", 19 | "bridge":"kube-bridge", 20 | "isDefaultGateway":true, 21 | "ipam":{ 22 | "type":"host-local" 23 | } 24 | } 25 | ] 26 | } 27 | kubeconfig: | 28 | apiVersion: v1 29 | kind: Config 30 | clusterCIDR: {{ agorakube_network.cidr.pod }} 31 | clusters: 32 | - name: cluster 33 | cluster: 34 | certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 35 | server: https://{{ agorakube_network.service_ip.kubernetes }} 36 | users: 37 | - name: kube-router 38 | user: 39 | tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 40 | contexts: 41 | - context: 42 | cluster: cluster 43 | user: kube-router 44 | name: kube-router-context 45 | current-context: kube-router-context 46 | 47 | --- 48 | apiVersion: apps/v1 49 | kind: DaemonSet 50 | metadata: 51 | labels: 52 | k8s-app: kube-router 53 | tier: node 54 | name: kube-router 55 | namespace: kube-system 56 | spec: 57 | selector: 58 | matchLabels: 59 | k8s-app: kube-router 60 | tier: node 61 | template: 62 | metadata: 63 | labels: 64 | k8s-app: kube-router 65 | tier: node 66 | spec: 67 | priorityClassName: system-node-critical 68 | serviceAccountName: kube-router 69 | containers: 70 | - name: kube-router 71 | image: docker.io/cloudnativelabs/kube-router:v1.0.0 72 | imagePullPolicy: Always 73 | args: 74 | - "--run-router=true" 75 | - "--run-firewall=true" 76 | - "--run-service-proxy=true" 77 | - "--bgp-graceful-restart=true" 78 | - "--kubeconfig=/var/lib/kube-router/kubeconfig" 79 | env: 80 | - name: NODE_NAME 81 | valueFrom: 82 | fieldRef: 83 | fieldPath: spec.nodeName 84 | - name: KUBE_ROUTER_CNI_CONF_FILE 85 | value: /etc/cni/net.d/10-kuberouter.conflist 86 | livenessProbe: 87 | httpGet: 88 | path: /healthz 89 | port: 20244 90 | initialDelaySeconds: 10 91 | periodSeconds: 3 92 | resources: 93 | requests: 94 | cpu: 250m 95 | memory: 250Mi 96 | securityContext: 97 | privileged: true 98 | volumeMounts: 99 | - name: lib-modules 100 | mountPath: /lib/modules 101 | readOnly: true 102 | - name: cni-conf-dir 103 | mountPath: /etc/cni/net.d 104 | - name: kubeconfig 105 | mountPath: /var/lib/kube-router 106 | readOnly: true 107 | - name: xtables-lock 108 | mountPath: /run/xtables.lock 109 | readOnly: false 110 | initContainers: 111 | - name: install-cni 112 | image: busybox:1.32.0 113 | imagePullPolicy: Always 114 | command: 115 | - /bin/sh 116 | - -c 117 | - set -e -x; 118 | if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then 119 | if [ -f /etc/cni/net.d/*.conf ]; then 120 | rm -f /etc/cni/net.d/*.conf; 121 | fi; 122 | TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; 123 | cp /etc/kube-router/cni-conf.json ${TMP}; 124 | mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; 125 | fi; 126 | if [ ! -f /var/lib/kube-router/kubeconfig ]; then 127 | TMP=/var/lib/kube-router/.tmp-kubeconfig; 128 | cp /etc/kube-router/kubeconfig ${TMP}; 129 | mv ${TMP} /var/lib/kube-router/kubeconfig; 130 | fi 131 | volumeMounts: 132 | - mountPath: /etc/cni/net.d 133 | name: cni-conf-dir 134 | - mountPath: /etc/kube-router 135 | name: kube-router-cfg 136 | - name: kubeconfig 137 | mountPath: /var/lib/kube-router 138 | hostNetwork: true 139 | tolerations: 140 | - key: CriticalAddonsOnly 141 | operator: Exists 142 | - effect: NoSchedule 143 | key: node-role.kubernetes.io/master 144 | operator: Exists 145 | - effect: NoSchedule 146 | key: node.kubernetes.io/not-ready 147 | operator: Exists 148 | - effect: NoSchedule 149 | key: NoSchedulabe 150 | operator: Exists 151 | volumes: 152 | - name: lib-modules 153 | hostPath: 154 | path: /lib/modules 155 | - name: cni-conf-dir 156 | hostPath: 157 | path: /etc/cni/net.d 158 | - name: kube-router-cfg 159 | configMap: 160 | name: kube-router-cfg 161 | - name: kubeconfig 162 | hostPath: 163 | path: /var/lib/kube-router 164 | - name: xtables-lock 165 | hostPath: 166 | path: /run/xtables.lock 167 | type: FileOrCreate 168 | 169 | --- 170 | apiVersion: v1 171 | kind: ServiceAccount 172 | metadata: 173 | name: kube-router 174 | namespace: kube-system 175 | 176 | --- 177 | kind: ClusterRole 178 | apiVersion: rbac.authorization.k8s.io/v1beta1 179 | metadata: 180 | name: kube-router 181 | namespace: kube-system 182 | rules: 183 | - apiGroups: 184 | - "" 185 | resources: 186 | - namespaces 187 | - pods 188 | - services 189 | - nodes 190 | - endpoints 191 | verbs: 192 | - list 193 | - get 194 | - watch 195 | - apiGroups: 196 | - "networking.k8s.io" 197 | resources: 198 | - networkpolicies 199 | verbs: 200 | - list 201 | - get 202 | - watch 203 | - apiGroups: 204 | - extensions 205 | resources: 206 | - networkpolicies 207 | verbs: 208 | - get 209 | - list 210 | - watch 211 | --- 212 | kind: ClusterRoleBinding 213 | apiVersion: rbac.authorization.k8s.io/v1beta1 214 | metadata: 215 | name: kube-router 216 | roleRef: 217 | apiGroup: rbac.authorization.k8s.io 218 | kind: ClusterRole 219 | name: kube-router 220 | subjects: 221 | - kind: ServiceAccount 222 | name: kube-router 223 | namespace: kube-system 224 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/traefik.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: ingress-traefik 6 | --- 7 | apiVersion: apiextensions.k8s.io/v1beta1 8 | kind: CustomResourceDefinition 9 | metadata: 10 | name: ingressroutes.traefik.containo.us 11 | 12 | spec: 13 | group: traefik.containo.us 14 | version: v1alpha1 15 | names: 16 | kind: IngressRoute 17 | plural: ingressroutes 18 | singular: ingressroute 19 | scope: Namespaced 20 | 21 | --- 22 | apiVersion: apiextensions.k8s.io/v1beta1 23 | kind: CustomResourceDefinition 24 | metadata: 25 | name: middlewares.traefik.containo.us 26 | 27 | spec: 28 | group: traefik.containo.us 29 | version: v1alpha1 30 | names: 31 | kind: Middleware 32 | plural: middlewares 33 | singular: middleware 34 | scope: Namespaced 35 | 36 | --- 37 | apiVersion: apiextensions.k8s.io/v1beta1 38 | kind: CustomResourceDefinition 39 | metadata: 40 | name: ingressroutetcps.traefik.containo.us 41 | 42 | spec: 43 | group: traefik.containo.us 44 | version: v1alpha1 45 | names: 46 | kind: IngressRouteTCP 47 | plural: ingressroutetcps 48 | singular: ingressroutetcp 49 | scope: Namespaced 50 | 51 | --- 52 | apiVersion: apiextensions.k8s.io/v1beta1 53 | kind: CustomResourceDefinition 54 | metadata: 55 | name: ingressrouteudps.traefik.containo.us 56 | 57 | spec: 58 | group: traefik.containo.us 59 | version: v1alpha1 60 | names: 61 | kind: IngressRouteUDP 62 | plural: ingressrouteudps 63 | singular: ingressrouteudp 64 | scope: Namespaced 65 | 66 | --- 67 | apiVersion: apiextensions.k8s.io/v1beta1 68 | kind: CustomResourceDefinition 69 | metadata: 70 | name: tlsoptions.traefik.containo.us 71 | 72 | spec: 73 | group: traefik.containo.us 74 | version: v1alpha1 75 | names: 76 | kind: TLSOption 77 | plural: tlsoptions 78 | singular: tlsoption 79 | scope: Namespaced 80 | 81 | --- 82 | apiVersion: apiextensions.k8s.io/v1beta1 83 | kind: CustomResourceDefinition 84 | metadata: 85 | name: tlsstores.traefik.containo.us 86 | 87 | spec: 88 | group: traefik.containo.us 89 | version: v1alpha1 90 | names: 91 | kind: TLSStore 92 | plural: tlsstores 93 | singular: tlsstore 94 | scope: Namespaced 95 | 96 | --- 97 | apiVersion: apiextensions.k8s.io/v1beta1 98 | kind: CustomResourceDefinition 99 | metadata: 100 | name: traefikservices.traefik.containo.us 101 | 102 | spec: 103 | group: traefik.containo.us 104 | version: v1alpha1 105 | names: 106 | kind: TraefikService 107 | plural: traefikservices 108 | singular: traefikservice 109 | scope: Namespaced 110 | 111 | --- 112 | kind: ClusterRole 113 | apiVersion: rbac.authorization.k8s.io/v1beta1 114 | metadata: 115 | name: traefik-ingress-controller 116 | 117 | rules: 118 | - apiGroups: 119 | - "" 120 | resources: 121 | - services 122 | - endpoints 123 | - secrets 124 | verbs: 125 | - get 126 | - list 127 | - watch 128 | - apiGroups: 129 | - extensions 130 | resources: 131 | - ingresses 132 | verbs: 133 | - get 134 | - list 135 | - watch 136 | - apiGroups: 137 | - extensions 138 | resources: 139 | - ingresses/status 140 | verbs: 141 | - update 142 | - apiGroups: 143 | - traefik.containo.us 144 | resources: 145 | - middlewares 146 | - ingressroutes 147 | - traefikservices 148 | - ingressroutetcps 149 | - ingressrouteudps 150 | - tlsoptions 151 | - tlsstores 152 | - serverstransports 153 | verbs: 154 | - get 155 | - list 156 | - watch 157 | 158 | --- 159 | kind: ClusterRoleBinding 160 | apiVersion: rbac.authorization.k8s.io/v1beta1 161 | metadata: 162 | name: traefik-ingress-controller 163 | 164 | roleRef: 165 | apiGroup: rbac.authorization.k8s.io 166 | kind: ClusterRole 167 | name: traefik-ingress-controller 168 | subjects: 169 | - kind: ServiceAccount 170 | name: traefik-ingress-controller 171 | namespace: ingress-traefik 172 | --- 173 | apiVersion: v1 174 | kind: Service 175 | metadata: 176 | name: traefik 177 | namespace: ingress-traefik 178 | spec: 179 | ports: 180 | - protocol: TCP 181 | name: web 182 | port: 80 183 | - protocol: TCP 184 | name: admin 185 | port: 8080 186 | - protocol: TCP 187 | name: websecure 188 | port: 443 189 | selector: 190 | app: traefik 191 | type: NodePort 192 | --- 193 | apiVersion: v1 194 | kind: ServiceAccount 195 | metadata: 196 | namespace: ingress-traefik 197 | name: traefik-ingress-controller 198 | 199 | --- 200 | kind: Deployment 201 | apiVersion: apps/v1 202 | metadata: 203 | namespace: ingress-traefik 204 | name: traefik 205 | labels: 206 | app: traefik 207 | 208 | spec: 209 | replicas: 1 210 | selector: 211 | matchLabels: 212 | app: traefik 213 | template: 214 | metadata: 215 | labels: 216 | app: traefik 217 | spec: 218 | serviceAccountName: traefik-ingress-controller 219 | containers: 220 | - name: traefik 221 | image: traefik:{{ agorakube_features.ingress.release }} 222 | args: 223 | - --api.insecure 224 | - --accesslog 225 | - --entrypoints.web.Address=:80 226 | - --entrypoints.websecure.Address=:443 227 | - --providers.kubernetescrd 228 | #- --certificatesresolvers.myresolver.acme.tlschallenge 229 | #- --certificatesresolvers.myresolver.acme.email=foo@you.com 230 | #- --certificatesresolvers.myresolver.acme.storage=acme.json 231 | # Please note that this is the staging Let's Encrypt server. 232 | # Once you get things working, you should remove that whole line altogether. 233 | #- --certificatesresolvers.myresolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory 234 | ports: 235 | - name: web 236 | containerPort: 80 237 | - name: websecure 238 | containerPort: 443 239 | - name: admin 240 | containerPort: 8080 241 | --- 242 | kind: ClusterRoleBinding 243 | apiVersion: rbac.authorization.k8s.io/v1beta1 244 | metadata: 245 | name: traefik-ingress-controller 246 | 247 | roleRef: 248 | apiGroup: rbac.authorization.k8s.io 249 | kind: ClusterRole 250 | name: traefik-ingress-controller 251 | subjects: 252 | - kind: ServiceAccount 253 | name: traefik-ingress-controller 254 | namespace: ingress-traefik 255 | -------------------------------------------------------------------------------- /roles/post-scripts/templates/logging-efk.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: fluentd-config 6 | namespace: kube-login 7 | data: 8 | kubernetes.conf: | 9 | 10 | @type tail 11 | @id in_tail_kube_apiserver_audit 12 | multiline_flush_interval 5s 13 | path /var/log/kubernetes/kube-apiserver.log 14 | pos_file /var/log/kube-apiserver-audit.log.pos 15 | tag kube-apiserver-audit 16 | 17 | @type json 18 | 19 | 20 | 21 | 22 | @type elasticsearch 23 | host elasticsearch 24 | port 9200 25 | logstash_format true 26 | 27 | --- 28 | apiVersion: v1 29 | kind: ServiceAccount 30 | metadata: 31 | name: fluentd 32 | namespace: kube-login 33 | --- 34 | apiVersion: rbac.authorization.k8s.io/v1 35 | kind: ClusterRole 36 | metadata: 37 | name: fluentd 38 | namespace: kube-login 39 | rules: 40 | - apiGroups: 41 | - "" 42 | resources: 43 | - pods 44 | - namespaces 45 | verbs: 46 | - get 47 | - list 48 | - watch 49 | --- 50 | kind: ClusterRoleBinding 51 | apiVersion: rbac.authorization.k8s.io/v1 52 | metadata: 53 | name: fluentd 54 | roleRef: 55 | kind: ClusterRole 56 | name: fluentd 57 | apiGroup: rbac.authorization.k8s.io 58 | subjects: 59 | - kind: ServiceAccount 60 | name: fluentd 61 | namespace: kube-login 62 | --- 63 | apiVersion: apps/v1 64 | kind: StatefulSet 65 | metadata: 66 | name: es-cluster 67 | namespace: kube-login 68 | spec: 69 | serviceName: elasticsearch 70 | replicas: 1 71 | selector: 72 | matchLabels: 73 | app: elasticsearch 74 | template: 75 | metadata: 76 | labels: 77 | app: elasticsearch 78 | spec: 79 | containers: 80 | - name: elasticsearch 81 | image: docker.elastic.co/elasticsearch/elasticsearch:7.12.0 82 | resources: 83 | limits: 84 | cpu: 1000m 85 | requests: 86 | cpu: 100m 87 | ports: 88 | - containerPort: 9200 89 | name: rest 90 | protocol: TCP 91 | - containerPort: 9300 92 | name: inter-node 93 | protocol: TCP 94 | volumeMounts: 95 | - name: data 96 | mountPath: /usr/share/elasticsearch/data 97 | env: 98 | - name: cluster.name 99 | value: k8s-logs 100 | - name: node.name 101 | valueFrom: 102 | fieldRef: 103 | fieldPath: metadata.name 104 | - name: discovery.seed_hosts 105 | value: "es-cluster-0.elasticsearch" 106 | # value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch" 107 | - name: cluster.initial_master_nodes 108 | value: "es-cluster-0" 109 | # value: "es-cluster-0,es-cluster-1,es-cluster-2" 110 | - name: ES_JAVA_OPTS 111 | value: "-Xms512m -Xmx512m" 112 | initContainers: 113 | - name: fix-permissions 114 | image: busybox 115 | command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"] 116 | securityContext: 117 | privileged: true 118 | volumeMounts: 119 | - name: data 120 | mountPath: /usr/share/elasticsearch/data 121 | - name: increase-vm-max-map 122 | image: busybox 123 | command: ["sysctl", "-w", "vm.max_map_count=262144"] 124 | securityContext: 125 | privileged: true 126 | - name: increase-fd-ulimit 127 | image: busybox 128 | command: ["sh", "-c", "ulimit -n 65536"] 129 | securityContext: 130 | privileged: true 131 | volumes: 132 | - name: data 133 | emptyDir: {} 134 | --- 135 | kind: Service 136 | apiVersion: v1 137 | metadata: 138 | name: elasticsearch 139 | namespace: kube-login 140 | labels: 141 | app: elasticsearch 142 | spec: 143 | selector: 144 | app: elasticsearch 145 | ports: 146 | - port: 9200 147 | name: rest 148 | - port: 9300 149 | name: inter-node 150 | --- 151 | apiVersion: v1 152 | kind: Service 153 | metadata: 154 | name: kibana 155 | namespace: kube-login 156 | labels: 157 | app: kibana 158 | spec: 159 | ports: 160 | - port: 5601 161 | selector: 162 | app: kibana 163 | type: NodePort 164 | --- 165 | apiVersion: apps/v1 166 | kind: Deployment 167 | metadata: 168 | name: kibana 169 | namespace: kube-login 170 | labels: 171 | app: kibana 172 | spec: 173 | replicas: 1 174 | selector: 175 | matchLabels: 176 | app: kibana 177 | template: 178 | metadata: 179 | labels: 180 | app: kibana 181 | spec: 182 | containers: 183 | - name: kibana 184 | image: docker.elastic.co/kibana/kibana:7.12.0 185 | resources: 186 | limits: 187 | cpu: 1000m 188 | requests: 189 | cpu: 100m 190 | env: 191 | - name: ELASTICSEARCH_URL 192 | value: http://elasticsearch:9200 193 | ports: 194 | - containerPort: 5601 195 | --- 196 | apiVersion: apps/v1 197 | kind: DaemonSet 198 | metadata: 199 | name: fluentd-elasticsearch 200 | namespace: kube-login 201 | labels: 202 | k8s-app: fluentd-logging 203 | spec: 204 | selector: 205 | matchLabels: 206 | name: fluentd-elasticsearch 207 | template: 208 | metadata: 209 | labels: 210 | name: fluentd-elasticsearch 211 | spec: 212 | serviceAccount: fluentd 213 | serviceAccountName: fluentd 214 | tolerations: 215 | - effect: NoSchedule 216 | operator: Exists 217 | containers: 218 | - name: fluentd-elasticsearch 219 | image: quay.io/fluentd_elasticsearch/fluentd:v3.2.0 220 | resources: 221 | limits: 222 | memory: 200Mi 223 | requests: 224 | cpu: 100m 225 | memory: 200Mi 226 | volumeMounts: 227 | - name: varlog 228 | mountPath: /var/log 229 | - name: fluentd 230 | mountPath: /etc/fluent/config.d/ 231 | - name: varlibdockercontainers 232 | mountPath: /var/lib/docker/containers 233 | readOnly: true 234 | terminationGracePeriodSeconds: 30 235 | volumes: 236 | - name: varlog 237 | hostPath: 238 | path: /var/log 239 | - name: varlibdockercontainers 240 | hostPath: 241 | path: /var/lib/docker/containers 242 | - name: fluentd 243 | configMap: 244 | name: fluentd-config 245 | --------------------------------------------------------------------------------