├── .gitignore ├── README.md ├── Vagrantfile ├── common.sh ├── local-storage ├── create-volumes.sh ├── install.sh ├── provisioner.yaml └── storageclass.yaml ├── master.sh └── worker.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | *~ 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k8s-vagrant-libvirt 2 | 3 | A minimal setup for running multi-node kubernetes in vagrant virtual 4 | machines using libvirt on linux. 5 | 6 | Related projects: 7 | 8 | * https://github.com/galexrt/k8s-vagrant-multi-node (virtualbox, many features) 9 | 10 | Current supported configuration(s): 11 | 12 | * guest: centos 7 13 | * network: flannel 14 | 15 | # usage 16 | 17 | Create and provision the cluster 18 | 19 | ```bash 20 | vagrant up --provider=libvirt 21 | ``` 22 | 23 | Set the kubectl configuration file 24 | 25 | ```bash 26 | vagrant ssh master -c "sudo cat /etc/kubernetes/admin.conf" > ${HOME}/.kube/config 27 | ``` 28 | 29 | Test cluster access from your host 30 | 31 | ``` 32 | [~/src/k8s-vagrant-libvirt]$ kubectl get nodes 33 | NAME STATUS ROLES AGE VERSION 34 | master Ready master 30m v1.13.4 35 | worker0 Ready 30m v1.13.4 36 | ``` 37 | 38 | # configuration 39 | 40 | The following options may be set in the `Vagrantfile` 41 | 42 | ```ruby 43 | # number of worker nodes 44 | NUM_WORKERS = 1 45 | # number of extra disks per worker 46 | NUM_DISKS = 1 47 | # size of each disk in gigabytes 48 | DISK_GBS = 10 49 | ``` 50 | 51 | # loading docker images 52 | 53 | Use the [vagrant-docker_load](https://rubygems.org/gems/vagrant-docker_load) plugin to upload Docker images into Vagrant machines 54 | 55 | ```bash 56 | vagrant plugin install vagrant-docker_load 57 | ``` 58 | 59 | An example of loading a [rook@master](https://github.com/rook/rook) build 60 | 61 | ```bash 62 | [~/src/k8s-vagrant-libvirt]$ vagrant docker-load build-2568df12/ceph-amd64 rook/ceph:master 63 | Loaded image: build-2568df12/ceph-amd64:latest 64 | Loaded image: build-2568df12/ceph-amd64:latest 65 | ``` 66 | 67 | # troubleshooting 68 | 69 | The following is a summary of the environments and applications that are known to work 70 | 71 | ``` 72 | [~/src/k8s-vagrant-libvirt]$ lsb_release -d 73 | Description: Fedora release 29 (Twenty Nine) 74 | 75 | [~/src/k8s-vagrant-libvirt]$ vagrant version 76 | Installed Version: 2.1.2 77 | 78 | [~/src/k8s-vagrant-libvirt]$ vagrant plugin list 79 | vagrant-libvirt (0.0.40, system) 80 | ``` 81 | 82 | Ceph distributed storage via Rook 83 | 84 | ``` 85 | [~/src/k8s-vagrant-libvirt]$ kubectl -n rook-ceph-system logs rook-ceph-operator-b996864dd-l5czk | head -n 1 86 | 2019-03-21 16:09:18.168066 I | rookcmd: starting Rook v0.9.0-323.g2447520 with arguments '/usr/local/bin/rook ceph operator' 87 | 88 | [~/src/k8s-vagrant-libvirt]$ kubectl -n rook-ceph get pods 89 | NAME READY STATUS RESTARTS AGE 90 | rook-ceph-mgr-a-6b5cdfcb6f-hg7tr 1/1 Running 0 4m33s 91 | rook-ceph-mon-a-6cb6cfdb95-grgsz 1/1 Running 0 4m56s 92 | rook-ceph-mon-b-6477f5fc8c-m5mzg 1/1 Running 0 4m50s 93 | rook-ceph-mon-c-6cdf75fc4c-pgq5h 1/1 Running 0 4m42s 94 | rook-ceph-osd-0-8b5d9c477-5s989 1/1 Running 0 4m11s 95 | rook-ceph-osd-prepare-worker0-x5qqn 0/2 Completed 0 4m17s 96 | rook-ceph-tools-76c7d559b6-vcxhr 1/1 Running 0 3m48s 97 | ``` 98 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # number of worker nodes 2 | NUM_WORKERS = 1 3 | # number of extra disks per worker 4 | NUM_DISKS = 1 5 | # size of each disk in gigabytes 6 | DISK_GBS = 10 7 | 8 | MASTER_IP = "192.168.73.100" 9 | WORKER_IP_BASE = "192.168.73.2" # 200, 201, ... 10 | TOKEN = "yi6muo.4ytkfl3l6vl8zfpk" 11 | 12 | Vagrant.configure("2") do |config| 13 | config.vm.box = "centos/7" 14 | config.vm.synced_folder ".", "/vagrant", disabled: true 15 | 16 | config.vm.provider :libvirt do |libvirt| 17 | libvirt.cpu_mode = 'host-passthrough' 18 | libvirt.graphics_type = 'none' 19 | libvirt.memory = 2048 20 | libvirt.cpus = 2 21 | libvirt.qemu_use_session = false 22 | end 23 | 24 | config.vm.provision "shell", path: "common.sh" 25 | config.vm.provision "shell", path: "local-storage/create-volumes.sh" 26 | 27 | config.vm.define "master" do |master| 28 | master.vm.hostname = "master" 29 | master.vm.network :private_network, ip: MASTER_IP 30 | master.vm.provision "shell", path: "master.sh", 31 | env: { "MASTER_IP" => MASTER_IP, "TOKEN" => TOKEN } 32 | 33 | master.vm.provision :file do |file| 34 | file.source = "local-storage/storageclass.yaml" 35 | file.destination = "/tmp/local-storage-storageclass.yaml" 36 | end 37 | master.vm.provision :file do |file| 38 | file.source = "local-storage/provisioner.yaml" 39 | file.destination = "/tmp/local-storage-provisioner.yaml" 40 | end 41 | master.vm.provision "shell", path: "local-storage/install.sh" 42 | end 43 | 44 | (0..NUM_WORKERS-1).each do |i| 45 | config.vm.define "worker#{i}" do |worker| 46 | worker.vm.hostname = "worker#{i}" 47 | worker.vm.network :private_network, ip: "#{WORKER_IP_BASE}" + i.to_s.rjust(2, '0') 48 | (1..NUM_DISKS).each do |j| 49 | worker.vm.provider :libvirt do |libvirt| 50 | libvirt.storage :file, :size => "#{DISK_GBS}G" 51 | end 52 | end 53 | worker.vm.provision "shell", path: "worker.sh", 54 | env: { "MASTER_IP" => MASTER_IP, "TOKEN" => TOKEN } 55 | end 56 | end 57 | end 58 | -------------------------------------------------------------------------------- /common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cat << EOF > /etc/yum.repos.d/docker-ce.repo 5 | [docker-ce-stable] 6 | name=Docker CE Stable - x86_64 7 | baseurl=https://download.docker.com/linux/centos/7/x86_64/stable 8 | enabled=1 9 | gpgcheck=1 10 | gpgkey=https://download.docker.com/linux/centos/gpg 11 | exclude=docker* 12 | EOF 13 | 14 | cat << EOF > /etc/yum.repos.d/kubernetes.repo 15 | [kubernetes] 16 | name=kubernetes 17 | baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 18 | enabled=1 19 | gpgcheck=1 20 | repo_gpgcheck=1 21 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg \ 22 | https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 23 | exclude=kube* 24 | EOF 25 | 26 | mkdir -p /etc/docker 27 | cat < /etc/docker/daemon.json 28 | { 29 | "exec-opts": ["native.cgroupdriver=systemd"], 30 | "log-driver": "json-file", 31 | "log-opts": { 32 | "max-size": "100m" 33 | }, 34 | "storage-driver": "overlay2", 35 | "storage-opts": [ 36 | "overlay2.override_kernel_check=true" 37 | ] 38 | } 39 | EOF 40 | 41 | cat << EOF > /etc/sysctl.d/kubernetes.conf 42 | net.bridge.bridge-nf-call-ip6tables = 1 43 | net.bridge.bridge-nf-call-iptables = 1 44 | EOF 45 | 46 | yum install -y device-mapper-persistent-data lvm2 \ 47 | kubelet kubeadm kubectl docker-ce-18.06.2.ce \ 48 | --disableexcludes=kubernetes,docker-ce-stable 49 | 50 | systemctl daemon-reload 51 | systemctl restart docker 52 | systemctl enable docker.service 53 | systemctl enable --now kubelet 54 | 55 | setenforce 0 56 | sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 57 | 58 | modprobe br_netfilter 59 | sysctl --system 60 | 61 | swapoff -a 62 | sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab 63 | -------------------------------------------------------------------------------- /local-storage/create-volumes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | for i in `seq 1 10`; do 5 | srcdir="/mnt/local-storage-srcs/vol${i}" 6 | dstdir="/mnt/local-storage/vol${i}" 7 | mkdir -p ${srcdir} 8 | mkdir -p ${dstdir} 9 | mount --bind ${srcdir} ${dstdir} 10 | done 11 | -------------------------------------------------------------------------------- /local-storage/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl --kubeconfig=/etc/kubernetes/admin.conf \ 4 | create -f /tmp/local-storage-storageclass.yaml 5 | 6 | kubectl --kubeconfig=/etc/kubernetes/admin.conf \ 7 | create -f /tmp/local-storage-provisioner.yaml 8 | -------------------------------------------------------------------------------- /local-storage/provisioner.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: provisioner/templates/provisioner.yaml 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: local-provisioner-config 7 | namespace: default 8 | labels: 9 | heritage: "Tiller" 10 | release: "release-name" 11 | chart: provisioner-2.3.2 12 | data: 13 | storageClassMap: | 14 | local-storage: 15 | hostDir: /mnt/local-storage 16 | mountDir: /mnt/local-storage 17 | blockCleanerCommand: 18 | - "/scripts/shred.sh" 19 | - "2" 20 | volumeMode: Filesystem 21 | fsType: ext4 22 | --- 23 | apiVersion: apps/v1 24 | kind: DaemonSet 25 | metadata: 26 | name: local-volume-provisioner 27 | namespace: default 28 | labels: 29 | app: local-volume-provisioner 30 | heritage: "Tiller" 31 | release: "release-name" 32 | chart: provisioner-2.3.2 33 | spec: 34 | selector: 35 | matchLabels: 36 | app: local-volume-provisioner 37 | template: 38 | metadata: 39 | labels: 40 | app: local-volume-provisioner 41 | spec: 42 | serviceAccountName: local-storage-admin 43 | containers: 44 | - image: "quay.io/external_storage/local-volume-provisioner:v2.3.2" 45 | name: provisioner 46 | securityContext: 47 | privileged: true 48 | env: 49 | - name: MY_NODE_NAME 50 | valueFrom: 51 | fieldRef: 52 | fieldPath: spec.nodeName 53 | - name: MY_NAMESPACE 54 | valueFrom: 55 | fieldRef: 56 | fieldPath: metadata.namespace 57 | - name: JOB_CONTAINER_IMAGE 58 | value: "quay.io/external_storage/local-volume-provisioner:v2.3.2" 59 | volumeMounts: 60 | - mountPath: /etc/provisioner/config 61 | name: provisioner-config 62 | readOnly: true 63 | - mountPath: /dev 64 | name: provisioner-dev 65 | - mountPath: /mnt/local-storage 66 | name: local-storage 67 | mountPropagation: "HostToContainer" 68 | volumes: 69 | - name: provisioner-config 70 | configMap: 71 | name: local-provisioner-config 72 | - name: provisioner-dev 73 | hostPath: 74 | path: /dev 75 | - name: local-storage 76 | hostPath: 77 | path: /mnt/local-storage 78 | 79 | --- 80 | # Source: provisioner/templates/provisioner-service-account.yaml 81 | 82 | apiVersion: v1 83 | kind: ServiceAccount 84 | metadata: 85 | name: local-storage-admin 86 | namespace: default 87 | labels: 88 | heritage: "Tiller" 89 | release: "release-name" 90 | chart: provisioner-2.3.2 91 | 92 | --- 93 | # Source: provisioner/templates/provisioner-cluster-role-binding.yaml 94 | 95 | apiVersion: rbac.authorization.k8s.io/v1 96 | kind: ClusterRoleBinding 97 | metadata: 98 | name: local-storage-provisioner-pv-binding 99 | labels: 100 | heritage: "Tiller" 101 | release: "release-name" 102 | chart: provisioner-2.3.2 103 | subjects: 104 | - kind: ServiceAccount 105 | name: local-storage-admin 106 | namespace: default 107 | roleRef: 108 | kind: ClusterRole 109 | name: system:persistent-volume-provisioner 110 | apiGroup: rbac.authorization.k8s.io 111 | --- 112 | apiVersion: rbac.authorization.k8s.io/v1 113 | kind: ClusterRole 114 | metadata: 115 | name: local-storage-provisioner-node-clusterrole 116 | labels: 117 | heritage: "Tiller" 118 | release: "release-name" 119 | chart: provisioner-2.3.2 120 | rules: 121 | - apiGroups: [""] 122 | resources: ["nodes"] 123 | verbs: ["get"] 124 | --- 125 | apiVersion: rbac.authorization.k8s.io/v1 126 | kind: ClusterRoleBinding 127 | metadata: 128 | name: local-storage-provisioner-node-binding 129 | labels: 130 | heritage: "Tiller" 131 | release: "release-name" 132 | chart: provisioner-2.3.2 133 | subjects: 134 | - kind: ServiceAccount 135 | name: local-storage-admin 136 | namespace: default 137 | roleRef: 138 | kind: ClusterRole 139 | name: local-storage-provisioner-node-clusterrole 140 | apiGroup: rbac.authorization.k8s.io 141 | 142 | --- 143 | # Source: provisioner/templates/namespace.yaml 144 | 145 | 146 | --- 147 | # Source: provisioner/templates/pod-security-policy.yaml 148 | 149 | 150 | -------------------------------------------------------------------------------- /local-storage/storageclass.yaml: -------------------------------------------------------------------------------- 1 | # Only create this for K8s 1.9+ 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: local-storage 6 | provisioner: kubernetes.io/no-provisioner 7 | volumeBindingMode: WaitForFirstConsumer 8 | # Supported policies: Delete, Retain 9 | reclaimPolicy: Delete 10 | -------------------------------------------------------------------------------- /master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | kubeadm config images pull 5 | kubeadm init --pod-network-cidr=10.244.0.0/16 \ 6 | --token ${TOKEN} --apiserver-advertise-address=${MASTER_IP} 7 | KUBECONFIG=/etc/kubernetes/admin.conf kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 8 | 9 | sudo kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node master node-role.kubernetes.io/master:NoSchedule- 10 | -------------------------------------------------------------------------------- /worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | kubeadm join ${MASTER_IP}:6443 --token ${TOKEN} \ 5 | --discovery-token-unsafe-skip-ca-verification 6 | --------------------------------------------------------------------------------