├── .gitignore ├── README.md ├── create-fedora-coreos-vm ├── ansible.cfg ├── create-fedora-coreos-vm.yaml └── roles │ └── create-fedora-coreos-vm │ ├── .travis.yml │ ├── README.md │ ├── defaults │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ └── fcos-template.ign.j2 ├── create-ha-cluster ├── README.md ├── Vagrantfile ├── ansible.cfg ├── kube-flannel-v0.16.3.yml ├── playbook │ ├── cluster_inventory.yml │ ├── cluster_playbook.yml │ ├── lb_inventory.yml │ └── lb_playbook.yml └── setup.png ├── create-kvm-guest-vm ├── ansible.cfg ├── create-kvm-guest-vm.yaml └── roles │ └── kvm-provision │ ├── .travis.yml │ ├── README.md │ ├── defaults │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ └── vm-template.xml.j2 ├── create-single-master-cluster ├── README.md ├── Vagrantfile ├── ansible.cfg ├── kube-flannel-v0.24.2.yml ├── playbook │ ├── inventory.yml │ └── playbook.yml └── setup.png ├── create-virtualbox-vm ├── README.md └── Vagrantfile ├── getting-started-with-ansible ├── Ansible.png ├── README.md ├── Vagrantfile ├── inventory.yaml ├── playbook.yml └── sample_inventory.yaml ├── jaegertracing ├── README.md ├── cluster-issuer.yml ├── example-hotrod.yaml ├── jaeger-operator-v1.42.0.yml ├── simplest.yaml └── trust-bundle.yml ├── load-balancing-services ├── README.md ├── my-nginx-deploy.yaml ├── my-nginx-ingress.yaml ├── my-nginx-svc.yaml ├── patch-configmap.yaml └── setup.png ├── metallb ├── README.md ├── my-nginx-deploy.yaml ├── my-nginx-ingress.yaml ├── my-nginx-svc.yaml └── values.yaml ├── nfs-subdir-external-provisioner ├── README.md ├── Setup.png ├── Vagrantfile ├── my-nginx-deploy.yaml ├── my-nginx-pvc.yaml └── my-nginx-svc.yaml ├── rook-ceph ├── README.md ├── Vagrantfile └── toolbox.yml ├── setup-ha-etcd-cluster ├── README.md ├── Vagrantfile ├── ansible.cfg ├── kube-flannel-v0.16.3.yml ├── playbook │ ├── etcd_cluster_inventory.yml │ ├── etcd_cluster_playbook.yml │ ├── k8s_cluster_inventory.yml │ └── k8s_cluster_playbook.yml └── setup.png └── setup-load-balancer-for-kube-apiservers ├── README.md ├── Vagrantfile ├── ansible.cfg ├── kube-flannel-v0.16.3.yml ├── playbook ├── cluster_inventory.yml ├── cluster_playbook.yml ├── load_balancer_inventory.yml └── load_balancer_playbook.yml └── setup.png /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea/ 2 | /create-ha-cluster/.vagrant/ 3 | /create-virtualbox-vm/.vagrant/ 4 | /getting-started-with-ansible/.vagrant/ 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A playground for Kubernetes 2 | 3 | ## VM Provision 4 | * [Create a Virtualbox guest VM by Vagrant](create-virtualbox-vm) 5 | * [Create a KVM guest VM by Ansible](create-kvm-guest-vm) 6 | 7 | ## Cluster Provision 8 | * [Getting Started with Ansible](getting-started-with-ansible) 9 | * [Create a single-master Kubernetes cluster with kubeadm by Ansible](create-single-master-cluster) 10 | * [Setup load balancer for kube-apiservers](setup-load-balancer-for-kube-apiservers) 11 | * [Create a HA Kubernetes cluster with kubeadm, keepalived, and haproxy by Ansible](create-ha-cluster) 12 | * [Set up a High Availability etcd Cluster with kubeadm by Ansible](setup-ha-etcd-cluster) 13 | * [Load balancing requests to Kubernetes services with external haproxy and NGINX Ingress Controller](load-balancing-services) 14 | * [Install MetalLB as a network load balancer on bare-metal cluster](metallb) 15 | 16 | ## Storage 17 | * [Dynamic provision Persistent Volumes with NFS Subdir External Provisioner](nfs-subdir-external-provisioner) 18 | * [Cloud-Native Storage solution with Rook Ceph](rook-ceph) 19 | 20 | ## Observability 21 | * [Distributed tracing with Jaeger](jaegertracing) 22 | -------------------------------------------------------------------------------- /create-fedora-coreos-vm/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | -------------------------------------------------------------------------------- /create-fedora-coreos-vm/create-fedora-coreos-vm.yaml: -------------------------------------------------------------------------------- 1 | - name: Create Fedora CoreOS VM 2 | hosts: all 3 | gather_facts: yes 4 | become: yes 5 | tasks: 6 | - name: Create Fedora CoreOS VM 7 | include_role: 8 | name: create-fedora-coreos-vm 9 | -------------------------------------------------------------------------------- /create-fedora-coreos-vm/roles/create-fedora-coreos-vm/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /create-fedora-coreos-vm/roles/create-fedora-coreos-vm/README.md: -------------------------------------------------------------------------------- 1 | Create Fedora CoreOS VM 2 | ========= 3 | 4 | Create Fedora CoreOS VM. 5 | 6 | ```shell 7 | ansible-playbook -i YOUR_HOST -u YOUR_SSH_USER --extra-vars "vm_name=$VM_NAME vm_vcpus=4 vm_memory_mb=16384 vm_disk 8 | _size=100G ssh_user_password_hash=YOUR_PASSWORD_HASH ssh_authorized_key=YOUR_SSH_PUBLIC_KEY" create-fedora-coreos-vm.yaml 9 | ``` 10 | -------------------------------------------------------------------------------- /create-fedora-coreos-vm/roles/create-fedora-coreos-vm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for create-fedora-coreos-vm 3 | base_image_pool_dir: "/images/base" 4 | base_image_name: fedora-coreos-39.20240128.3.0-qemu.x86_64.qcow2 5 | vms_image_pool_dir: "/images/vms" 6 | vm_name: my-awesome-vm 7 | vm_vcpus: 2 8 | vm_memory_mb: 4096 9 | vm_disk_size: 20G 10 | ssh_user: core 11 | # https://docs.fedoraproject.org/en-US/fedora-coreos/authentication/#_using_password_authentication 12 | ssh_user_password_hash: "WILL_BE_SET_AT_RUNTIME" 13 | ssh_authorized_key: "WILL_BE_SET_AT_RUNTIME" 14 | -------------------------------------------------------------------------------- /create-fedora-coreos-vm/roles/create-fedora-coreos-vm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for create-fedora-coreos-vm 3 | - name: Check preconditions 4 | block: 5 | - name: Check if vm exists 6 | stat: 7 | path: "{{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.xml" 8 | register: stat_result 9 | - name: Fail if vm exists 10 | fail: 11 | msg: VM already exists 12 | when: stat_result.stat.exists 13 | - name: Fail if vm is not named 14 | fail: 15 | msg: No name provided for the new VM 16 | when: vm_name | length == 0 17 | 18 | - name: "Create a VM directory: {{ vms_image_pool_dir }}/{{ vm_name }}" 19 | file: 20 | path: "{{ vms_image_pool_dir }}/{{ vm_name }}" 21 | state: directory 22 | 23 | - name: Copy base image to the VM directory 24 | become: true 25 | copy: 26 | remote_src: true 27 | src: "{{ base_image_pool_dir }}/{{ base_image_name }}" 28 | dest: "{{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.qcow2" 29 | 30 | - name: Resize the VM image 31 | become: true 32 | shell: "qemu-img resize {{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.qcow2 +{{ vm_disk_size }}" 33 | 34 | - name: Generate new mac address for the new VM 35 | shell: bash -c "printf '00:16:3E:%02X:%02X:%02X\n' $[RANDOM%256] $[RANDOM%256] $[RANDOM%256]" 36 | register: new_mac 37 | 38 | - name: "Save new mac address for the new VM: {{ new_mac.stdout }}" 39 | set_fact: 40 | vm_mac_address: "{{ new_mac.stdout }}" 41 | vm_uuid: "{{ ansible_date_time.iso8601_micro | to_uuid }}" 42 | 43 | - name: Copy fcos.ign into the VM directory 44 | template: 45 | src: fcos-template.ign.j2 46 | dest: "{{ vms_image_pool_dir }}/{{ vm_name }}/fcos.ign" 47 | mode: 0644 48 | 49 | - name: "Start the new VM: {{ vm_name }}" 50 | become: true 51 | shell: "virt-install -n {{ vm_name }} --vcpus {{ vm_vcpus }} -r {{ vm_memory_mb }} \ 52 | --os-variant=fedora31 --import \ 53 | --network network=host-bridge,model=virtio,mac={{ vm_mac_address }} \ 54 | --disk={{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.qcow2,format=qcow2,bus=virtio \ 55 | --noautoconsole \ 56 | --qemu-commandline=\"-fw_cfg name=opt/com.coreos/config,file={{ vms_image_pool_dir }}/{{ vm_name }}/fcos.ign\"" 57 | -------------------------------------------------------------------------------- /create-fedora-coreos-vm/roles/create-fedora-coreos-vm/templates/fcos-template.ign.j2: -------------------------------------------------------------------------------- 1 | { 2 | "ignition": { 3 | "version": "3.0.0" 4 | }, 5 | "passwd": { 6 | "users": [ 7 | { 8 | "name": "{{ ssh_user }}", 9 | "groups": [ 10 | "sudo", 11 | "docker" 12 | ], 13 | "passwordHash": "{{ ssh_user_password_hash }}", 14 | "sshAuthorizedKeys": [ 15 | "{{ ssh_authorized_key }}" 16 | ] 17 | } 18 | ] 19 | }, 20 | "storage": { 21 | "files": [ 22 | { 23 | "path": "/etc/hostname", 24 | "contents": { 25 | "compression": "", 26 | "source": "data:,{{ vm_name }}%0A" 27 | } 28 | } 29 | ] 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /create-ha-cluster/README.md: -------------------------------------------------------------------------------- 1 | # Setup a multi-master Kubernetes cluster with kubeadm 2 | 3 | This will set up a new multi-master Kubernetes cluster that has 4 | * 3 masters, each master has 2 CPUs and 2 GBs RAM 5 | * 3 workers, each worker has 2 CPUs and 2 GBs RAM 6 | * 2 load balancers, each load balancer has 1 CPU and 1 GB RAM 7 | 8 | ![setup.png](setup.png?raw=true "setup.png") 9 | 10 | ## Prerequisites 11 | Before you want to try this on your local, here are requirements 12 | * Your computer must have at least 14 CPUs, 14 GBs RAM 13 | * A public key in .ssh/id_rsa.pub in your home directory 14 | * Directly download [Virtualbox](https://www.virtualbox.org/) and install, or use homebrew 15 | ``` 16 | brew install --cask virtualbox 17 | ``` 18 | * Vagrant 19 | ``` 20 | brew install --cask vagrant 21 | ``` 22 | * [(Optional) Vagrant Manager](http://vagrantmanager.com/) 23 | ``` 24 | brew install --cask vagrant-manager 25 | ``` 26 | 27 | ## Provision VMs 28 | ``` 29 | cd create-ha-cluster 30 | vagrant up 31 | ``` 32 | 33 | Let's ssh into the first master VM 34 | ``` 35 | ssh ci@172.16.1.11 36 | ``` 37 | 38 | ## Setup load balancer 39 | ``` 40 | ansible-playbook -i playbook/lb_inventory.yml playbook/lb_playbook.yml --extra-vars "cluster_vip=172.16.0.16" 41 | ``` 42 | 43 | Let's ssh into the load balancer VM by the VIP, and check haproxy service status 44 | ``` 45 | ssh ci@172.16.0.16 46 | systemctl status haproxy 47 | ``` 48 | 49 | ## Setup Kubernetes cluster 50 | ``` 51 | ansible-playbook -i playbook/cluster_inventory.yml playbook/cluster_playbook.yml --extra-vars "cluster_vip=172.16.0.16" 52 | ``` 53 | 54 | Let's ssh into the first master VM, and verify the cluster 55 | ``` 56 | ssh ci@172.16.1.11 57 | kubectl get nodes 58 | ``` 59 | 60 | Download kubeconfig to your local 61 | ``` 62 | mkdir ~/.kube-local 63 | scp ci@172.16.1.11:/home/ci/.kube/config ~/.kube-local/config 64 | export KUBECONFIG=$HOME/.kube-local/config && kubectl get nodes 65 | ``` 66 | -------------------------------------------------------------------------------- /create-ha-cluster/Vagrantfile: -------------------------------------------------------------------------------- 1 | Vagrant.configure("2") do |config| 2 | 3 | # Load Balancer Nodes 4 | LoadBalancerCount = 2 5 | (1..LoadBalancerCount).each do |i| 6 | config.vm.define "k8s-lb-#{i}" do |lb| 7 | lb.vm.box = "generic/ubuntu2004" 8 | lb.vm.box_version = "3.6.8" 9 | lb.vm.hostname = "k8s-lb-#{i}" 10 | lb.vm.network "private_network", ip: "172.16.0.1#{i}" 11 | 12 | lb.vm.provider "virtualbox" do |vb| 13 | vb.name = "k8s-lb-#{i}" 14 | vb.memory = 1024 15 | vb.cpus = 1 16 | end 17 | end 18 | end 19 | 20 | # Master Nodes 21 | MasterCount = 3 22 | (1..MasterCount).each do |i| 23 | config.vm.define "k8s-master-#{i}" do |master| 24 | master.vm.box = "generic/ubuntu2004" 25 | master.vm.box_version = "3.6.8" 26 | master.vm.hostname = "k8s-master-#{i}" 27 | master.vm.network "private_network", ip: "172.16.1.1#{i}" 28 | 29 | master.vm.provider "virtualbox" do |vb| 30 | vb.name = "k8s-master-#{i}" 31 | vb.memory = 2048 32 | vb.cpus = 2 33 | end 34 | end 35 | end 36 | 37 | # Worker Nodes 38 | WorkerCount = 3 39 | (1..WorkerCount).each do |i| 40 | config.vm.define "k8s-worker-#{i}" do |worker| 41 | worker.vm.box = "generic/ubuntu2004" 42 | worker.vm.box_version = "3.6.8" 43 | worker.vm.hostname = "k8s-worker-#{i}" 44 | worker.vm.network "private_network", ip: "172.16.2.1#{i}" 45 | 46 | worker.vm.provider "virtualbox" do |vb| 47 | vb.name = "k8s-worker-#{i}" 48 | vb.memory = 2048 49 | vb.cpus = 2 50 | end 51 | end 52 | end 53 | 54 | config.vm.provision "shell" do |s| 55 | ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub").first.strip 56 | s.inline = <<-SHELL 57 | # Create ci user 58 | useradd -s /bin/bash -d /home/ci/ -m -G sudo ci 59 | echo 'ci ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers 60 | mkdir -p /home/ci/.ssh && chown -R ci /home/ci/.ssh 61 | echo #{ssh_pub_key} >> /home/ci/.ssh/authorized_keys 62 | SHELL 63 | end 64 | 65 | end 66 | -------------------------------------------------------------------------------- /create-ha-cluster/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | -------------------------------------------------------------------------------- /create-ha-cluster/kube-flannel-v0.16.3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: psp.flannel.unprivileged 6 | annotations: 7 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default 8 | seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default 9 | apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default 10 | apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default 11 | spec: 12 | privileged: false 13 | volumes: 14 | - configMap 15 | - secret 16 | - emptyDir 17 | - hostPath 18 | allowedHostPaths: 19 | - pathPrefix: "/etc/cni/net.d" 20 | - pathPrefix: "/etc/kube-flannel" 21 | - pathPrefix: "/run/flannel" 22 | readOnlyRootFilesystem: false 23 | # Users and groups 24 | runAsUser: 25 | rule: RunAsAny 26 | supplementalGroups: 27 | rule: RunAsAny 28 | fsGroup: 29 | rule: RunAsAny 30 | # Privilege Escalation 31 | allowPrivilegeEscalation: false 32 | defaultAllowPrivilegeEscalation: false 33 | # Capabilities 34 | allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] 35 | defaultAddCapabilities: [] 36 | requiredDropCapabilities: [] 37 | # Host namespaces 38 | hostPID: false 39 | hostIPC: false 40 | hostNetwork: true 41 | hostPorts: 42 | - min: 0 43 | max: 65535 44 | # SELinux 45 | seLinux: 46 | # SELinux is unused in CaaSP 47 | rule: 'RunAsAny' 48 | --- 49 | kind: ClusterRole 50 | apiVersion: rbac.authorization.k8s.io/v1 51 | metadata: 52 | name: flannel 53 | rules: 54 | - apiGroups: ['extensions'] 55 | resources: ['podsecuritypolicies'] 56 | verbs: ['use'] 57 | resourceNames: ['psp.flannel.unprivileged'] 58 | - apiGroups: 59 | - "" 60 | resources: 61 | - pods 62 | verbs: 63 | - get 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - nodes 68 | verbs: 69 | - list 70 | - watch 71 | - apiGroups: 72 | - "" 73 | resources: 74 | - nodes/status 75 | verbs: 76 | - patch 77 | --- 78 | kind: ClusterRoleBinding 79 | apiVersion: rbac.authorization.k8s.io/v1 80 | metadata: 81 | name: flannel 82 | roleRef: 83 | apiGroup: rbac.authorization.k8s.io 84 | kind: ClusterRole 85 | name: flannel 86 | subjects: 87 | - kind: ServiceAccount 88 | name: flannel 89 | namespace: kube-system 90 | --- 91 | apiVersion: v1 92 | kind: ServiceAccount 93 | metadata: 94 | name: flannel 95 | namespace: kube-system 96 | --- 97 | kind: ConfigMap 98 | apiVersion: v1 99 | metadata: 100 | name: kube-flannel-cfg 101 | namespace: kube-system 102 | labels: 103 | tier: node 104 | app: flannel 105 | data: 106 | cni-conf.json: | 107 | { 108 | "name": "cbr0", 109 | "cniVersion": "0.3.1", 110 | "plugins": [ 111 | { 112 | "type": "flannel", 113 | "delegate": { 114 | "hairpinMode": true, 115 | "isDefaultGateway": true 116 | } 117 | }, 118 | { 119 | "type": "portmap", 120 | "capabilities": { 121 | "portMappings": true 122 | } 123 | } 124 | ] 125 | } 126 | net-conf.json: | 127 | { 128 | "Network": "10.244.0.0/16", 129 | "Backend": { 130 | "Type": "vxlan" 131 | } 132 | } 133 | --- 134 | apiVersion: apps/v1 135 | kind: DaemonSet 136 | metadata: 137 | name: kube-flannel-ds 138 | namespace: kube-system 139 | labels: 140 | tier: node 141 | app: flannel 142 | spec: 143 | selector: 144 | matchLabels: 145 | app: flannel 146 | template: 147 | metadata: 148 | labels: 149 | tier: node 150 | app: flannel 151 | spec: 152 | affinity: 153 | nodeAffinity: 154 | requiredDuringSchedulingIgnoredDuringExecution: 155 | nodeSelectorTerms: 156 | - matchExpressions: 157 | - key: kubernetes.io/os 158 | operator: In 159 | values: 160 | - linux 161 | hostNetwork: true 162 | priorityClassName: system-node-critical 163 | tolerations: 164 | - operator: Exists 165 | effect: NoSchedule 166 | serviceAccountName: flannel 167 | initContainers: 168 | - name: install-cni-plugin 169 | #image: flannelcni/flannel-cni-plugin:v1.0.1 for ppc64le (dockerhub limitations may apply) 170 | image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1 171 | command: 172 | - cp 173 | args: 174 | - -f 175 | - /flannel 176 | - /opt/cni/bin/flannel 177 | volumeMounts: 178 | - name: cni-plugin 179 | mountPath: /opt/cni/bin 180 | - name: install-cni 181 | #image: flannelcni/flannel:v0.16.1 for ppc64le (dockerhub limitations may apply) 182 | image: rancher/mirrored-flannelcni-flannel:v0.16.1 183 | command: 184 | - cp 185 | args: 186 | - -f 187 | - /etc/kube-flannel/cni-conf.json 188 | - /etc/cni/net.d/10-flannel.conflist 189 | volumeMounts: 190 | - name: cni 191 | mountPath: /etc/cni/net.d 192 | - name: flannel-cfg 193 | mountPath: /etc/kube-flannel/ 194 | containers: 195 | - name: kube-flannel 196 | #image: flannelcni/flannel:v0.16.1 for ppc64le (dockerhub limitations may apply) 197 | image: rancher/mirrored-flannelcni-flannel:v0.16.1 198 | command: 199 | - /opt/bin/flanneld 200 | args: 201 | - --ip-masq 202 | - --kube-subnet-mgr 203 | - --iface=eth1 204 | resources: 205 | requests: 206 | cpu: "100m" 207 | memory: "50Mi" 208 | limits: 209 | cpu: "100m" 210 | memory: "50Mi" 211 | securityContext: 212 | privileged: false 213 | capabilities: 214 | add: ["NET_ADMIN", "NET_RAW"] 215 | env: 216 | - name: POD_NAME 217 | valueFrom: 218 | fieldRef: 219 | fieldPath: metadata.name 220 | - name: POD_NAMESPACE 221 | valueFrom: 222 | fieldRef: 223 | fieldPath: metadata.namespace 224 | volumeMounts: 225 | - name: run 226 | mountPath: /run/flannel 227 | - name: flannel-cfg 228 | mountPath: /etc/kube-flannel/ 229 | - name: xtables-lock 230 | mountPath: /run/xtables.lock 231 | volumes: 232 | - name: run 233 | hostPath: 234 | path: /run/flannel 235 | - name: cni-plugin 236 | hostPath: 237 | path: /opt/cni/bin 238 | - name: cni 239 | hostPath: 240 | path: /etc/cni/net.d 241 | - name: flannel-cfg 242 | configMap: 243 | name: kube-flannel-cfg 244 | - name: xtables-lock 245 | hostPath: 246 | path: /run/xtables.lock 247 | type: FileOrCreate 248 | -------------------------------------------------------------------------------- /create-ha-cluster/playbook/cluster_inventory.yml: -------------------------------------------------------------------------------- 1 | all: 2 | children: 3 | masters: 4 | hosts: 5 | 172.16.1.11: null 6 | 172.16.1.12: null 7 | 172.16.1.13: null 8 | 9 | workers: 10 | hosts: 11 | 172.16.2.11: null 12 | 172.16.2.12: null 13 | 172.16.2.13: null 14 | -------------------------------------------------------------------------------- /create-ha-cluster/playbook/cluster_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: masters, workers 4 | name: Setup masters and workers 5 | remote_user: ci 6 | become: yes 7 | tasks: 8 | # https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin 9 | - name: Disable swap 10 | shell: swapoff -a 11 | 12 | - name: Remove swap entry from /etc/fstab 13 | lineinfile: 14 | dest: /etc/fstab 15 | regexp: swap 16 | state: absent 17 | 18 | - name: Load br_netfilter module 19 | shell: | 20 | cat <ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /create-kvm-guest-vm/roles/kvm-provision/README.md: -------------------------------------------------------------------------------- 1 | KVM Provision 2 | ========= 3 | 4 | Provision a new guest KVM guest VM. 5 | -------------------------------------------------------------------------------- /create-kvm-guest-vm/roles/kvm-provision/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kvm-provision 3 | base_image_pool_dir: "/images/base" 4 | base_image_name: jammy-server-cloudimg-amd64.qcow2 5 | vms_image_pool_dir: "/images/vms" 6 | vm_memory_mb: 4096 7 | vm_vcpus: 2 8 | vm_disk_size: 20G 9 | vm_second_disk_enabled: false 10 | vm_second_disk_size: 40G 11 | -------------------------------------------------------------------------------- /create-kvm-guest-vm/roles/kvm-provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for kvm-provision 3 | - name: Check preconditions 4 | block: 5 | - name: Check if vm exists 6 | stat: 7 | path: "{{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.xml" 8 | register: stat_result 9 | - name: Fail if vm exists 10 | fail: 11 | msg: VM already exists 12 | when: stat_result.stat.exists 13 | - name: Fail if vm is not named 14 | fail: 15 | msg: No name provided for the new VM 16 | when: vm_name | length == 0 17 | 18 | - name: Create VM directory 19 | file: 20 | path: "{{ vms_image_pool_dir }}/{{ vm_name }}" 21 | state: directory 22 | 23 | - name: Copy base image to VM directory 24 | become: true 25 | copy: 26 | remote_src: true 27 | src: "{{ base_image_pool_dir }}/{{ base_image_name }}" 28 | dest: "{{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.qcow2" 29 | 30 | - name: Resize VM image 31 | become: true 32 | shell: "qemu-img resize {{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.qcow2 +{{ vm_disk_size }}" 33 | 34 | - name: Create a new raw disk 35 | become: true 36 | shell: "qemu-img create -f raw {{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}-second-disk {{ vm_second_disk_size }}" 37 | when: vm_second_disk_enabled 38 | 39 | - name: Generate new mac address for new VM 40 | shell: bash -c "printf '00:16:3E:%02X:%02X:%02X\n' $[RANDOM%256] $[RANDOM%256] $[RANDOM%256]" 41 | register: new_mac 42 | 43 | - name: Save new mac address for new VM 44 | set_fact: 45 | vm_mac_address: "{{ new_mac.stdout }}" 46 | vm_uuid: "{{ ansible_date_time.iso8601_micro | to_uuid }}" 47 | 48 | - name: Configure VM image 49 | become: true 50 | command: | 51 | virt-customize -a "{{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.qcow2" \ 52 | --hostname {{ vm_name }} \ 53 | --root-password password:{{ vm_root_password }} \ 54 | --ssh-inject root:file:{{ vm_root_public_key }} \ 55 | --firstboot-command "cat << "EOF" > /etc/netplan/00-installer-config.yaml 56 | network: 57 | ethernets: 58 | enp1s0: 59 | dhcp4: true 60 | version: 2 61 | EOF" \ 62 | --firstboot-command "netplan apply" 63 | --firstboot-command "useradd -u 2000 -s /bin/bash -d /home/ci/ -m -G sudo ci" 64 | --firstboot-command "cp -r /root/.ssh /home/ci && chown -R ci /home/ci/.ssh" 65 | --firstboot-command "dpkg-reconfigure openssh-server" 66 | --firstboot-command "echo 'ci ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers" 67 | --firstboot-command "growpart /dev/vda 1 && resize2fs /dev/vda1" 68 | --uninstall cloud-init 69 | 70 | - name: Copy new VM configuration into VM directory 71 | template: 72 | src: vm-template.xml.j2 73 | dest: "{{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.xml" 74 | 75 | - name: Import new VM configuration 76 | become: true 77 | shell: "virsh define {{ vms_image_pool_dir }}/{{ vm_name }}/{{ vm_name }}.xml" 78 | 79 | - name: Start new VM 80 | become: true 81 | shell: "virsh start {{ vm_name }}" 82 | -------------------------------------------------------------------------------- /create-kvm-guest-vm/roles/kvm-provision/templates/vm-template.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | {{ vm_name }} 3 | {{ vm_uuid }} 4 | {{ vm_memory_mb }} 5 | {{ vm_vcpus }} 6 | 7 | hvm 8 | 9 | 10 | 11 | 12 | /usr/bin/qemu-system-x86_64 13 | 14 | 15 | 16 | 17 | 18 | {% if vm_second_disk_enabled %} 19 | 20 | 21 | 22 | 23 | 24 | {% endif %} 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 |
44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 |