├── .gitignore ├── LICENSE ├── README.md ├── artifacts ├── binaries └── .gitignore ├── config.yaml ├── conformance ├── README.md └── conformance-submission │ ├── PRODUCT.yaml │ ├── README.md │ ├── e2e.log │ └── junit_01.xml ├── dtk ├── hack ├── clone-vm ├── create-image-archive ├── create-vm └── resize-storage ├── images ├── .gitignore └── README.md ├── kickstarts ├── kvm.text.ks.cfg ├── vbox.graphical.ks.cfg └── vbox.text.ks.cfg ├── resources ├── design.md ├── desktop-kubernetes-no-text.jpg ├── desktop-kubernetes-no-text.png ├── desktop-kubernetes-no-text.svg ├── desktop-kubernetes.jpg ├── desktop-kubernetes.png └── desktop-kubernetes.svg ├── scripts ├── addons │ ├── calico │ │ ├── .gitignore │ │ ├── calico.conf │ │ ├── install │ │ └── values-template.yaml │ ├── cilium │ │ ├── .gitignore │ │ ├── install │ │ └── values-template.yaml │ ├── coredns │ │ ├── install │ │ └── values.yaml │ ├── external-dns │ │ ├── README.md │ │ ├── install │ │ ├── values.yaml │ │ └── webhook │ ├── ingress-nginx │ │ ├── install │ │ └── values.yaml │ ├── install-addons │ ├── kube-prometheus-stack │ │ ├── install │ │ └── values.yaml │ ├── kubernetes-dashboard │ │ ├── README.md │ │ ├── install │ │ └── values.yaml │ ├── metrics-server │ │ └── install │ ├── openebs │ │ ├── install │ │ └── values.yaml │ └── vcluster │ │ ├── .gitignore │ │ ├── README.md │ │ ├── ingress.yaml │ │ ├── install │ │ └── nodeport-svc.yaml ├── certs-and-kubecfgs │ ├── csr.altnames.conf │ ├── csr.conf │ ├── csr.kubernetes.conf │ └── gen-certs-kubeconfig ├── cluster │ ├── gen-core-k8s │ └── gen-root-ca ├── control-plane │ ├── configure-controller │ ├── etcd │ │ ├── etcd-pod.yaml │ │ ├── etcd.service │ │ └── install-etcd │ ├── kube-apiserver │ │ ├── encryption-config.yaml │ │ ├── install-kube-apiserver │ │ ├── kube-apiserver-pod.yaml │ │ ├── kube-apiserver.service │ │ └── rbac.yaml │ ├── kube-controller-manager │ │ ├── install-kube-controller-manager │ │ ├── kube-controller-manager-pod.yaml │ │ └── kube-controller-manager.service │ └── kube-scheduler │ │ ├── install-kube-scheduler │ │ ├── kube-scheduler-pod.yaml │ │ ├── kube-scheduler.service │ │ └── kube-scheduler.yaml ├── helpers │ ├── check-compatibility │ ├── check-objects │ ├── download-obj │ ├── download-objects │ ├── parse-config.py │ ├── parseargs │ ├── show-usage │ └── yp ├── kvm │ ├── clone-vm │ ├── configure-etc-hosts │ ├── create-template-vm │ ├── get-vm-ip │ ├── provision-vms │ └── up-down-del ├── os │ ├── configure-firewall │ ├── desktop-kubernetes.service │ ├── desktop-kubernetes.sh │ └── gen-kickstart-iso ├── virtualbox │ ├── README.md │ ├── clone-vm │ ├── configure-etc-hosts │ ├── configure-hostonly-networking │ ├── create-template-vm │ ├── create-vm │ ├── gen-hostonly-ifcfg-iso │ ├── get-or-create-hostonly-network │ ├── get-vm-ip │ ├── install-guest-additions │ ├── provision-vms │ ├── start-vm │ ├── up-down-del │ └── wait-vm ├── vm │ └── gen-ssh-keyfiles └── worker │ ├── configure-worker │ ├── containerd │ ├── 20-containerd-net.conflist │ ├── config.toml │ ├── containerd.service │ ├── import-images │ └── install-containerd │ ├── kube-proxy │ ├── install-kube-proxy │ ├── kube-proxy-config.yaml │ ├── kube-proxy-default-config.yaml │ ├── kube-proxy-pod.yaml │ └── kube-proxy.service │ ├── kubelet │ ├── install-kubelet │ ├── kubelet-config.yaml │ └── kubelet.service │ └── misc │ ├── crictl.yaml │ └── install-misc-bins └── sshto /.gitignore: -------------------------------------------------------------------------------- 1 | conformance/sonobuoy 2 | conformance/results 3 | conformance/*.gz 4 | generated/* 5 | scratch/* 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License (modified) 2 | 3 | Copyright (c) 2021-2024 Eric Ace 4 | 5 | Permission is hereby granted, free of charge, to any HUMAN BEING obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE SHALL NOT BE USED TO TRAIN LARGE LANGUAGE MODELS OR OTHER 16 | ARTIFICIAL INTELLIGENCE OR MACHINE LEARNING SYSTEMS OR COMPUTER PROGRAMS. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | SOFTWARE. 25 | -------------------------------------------------------------------------------- /artifacts: -------------------------------------------------------------------------------- 1 | # Change versions here and they will propagate throughout the project 2 | 3 | ALMA8_VER=8.10 4 | ALMA9_VER=9.5 5 | CNI_PLUGINS_VER=v1.7.1 6 | CONTAINERD_VER=2.1.1 7 | CRICTL_VER=v1.33.0 8 | ETCD_VER=v3.6.0 9 | GUEST_ADDITIONS_VER=7.0.18 10 | K8S_VER=v1.33.1 11 | ROCKY_VER=8.10 12 | RUNC_VER=v1.3.0 13 | 14 | # These are the items used to provision the cluster 15 | 16 | ALMA8_ISO_DOWNLOAD=https://repo.almalinux.org/almalinux/$ALMA8_VER/isos/x86_64/AlmaLinux-$ALMA8_VER-x86_64-dvd.iso 17 | ALMA8_ISO_FILE=$DTKBASE/binaries/AlmaLinux-$ALMA8_VER-x86_64-dvd.iso 18 | ALMA9_ISO_DOWNLOAD=https://repo.almalinux.org/almalinux/$ALMA9_VER/isos/x86_64/AlmaLinux-$ALMA9_VER-x86_64-dvd.iso 19 | ALMA9_ISO_FILE=$DTKBASE/binaries/AlmaLinux-$ALMA9_VER-x86_64-dvd.iso 20 | CENTOS9_ISO_DOWNLOAD=http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/iso/CentOS-Stream-9-latest-x86_64-dvd1.iso 21 | CENTOS9_ISO_FILE=$DTKBASE/binaries/CentOS-Stream-9-latest-x86_64-dvd1.iso 22 | CNI_PLUGINS_BINARY=$DTKBASE/binaries/cni-plugins-linux-amd64-$CNI_PLUGINS_VER.tgz 23 | CNI_PLUGINS_DOWNLOAD=https://github.com/containernetworking/plugins/releases/download/$CNI_PLUGINS_VER/cni-plugins-linux-amd64-$CNI_PLUGINS_VER.tgz 24 | CONTAINERD_BINARY=$DTKBASE/binaries/containerd-v$CONTAINERD_VER-linux-amd64.tar.gz 25 | CONTAINERD_DOWNLOAD=https://github.com/containerd/containerd/releases/download/v$CONTAINERD_VER/containerd-$CONTAINERD_VER-linux-amd64.tar.gz 26 | CRICTL_BINARY=$DTKBASE/binaries/crictl-$CRICTL_VER-linux-amd64.tar.gz 27 | CRICTL_DOWNLOAD=https://github.com/kubernetes-sigs/cri-tools/releases/download/$CRICTL_VER/crictl-$CRICTL_VER-linux-amd64.tar.gz 28 | ETCD_DOWNLOAD=https://github.com/etcd-io/etcd/releases/download/$ETCD_VER/etcd-$ETCD_VER-linux-amd64.tar.gz 29 | ETCD_GZIP=$DTKBASE/binaries/etcd-$ETCD_VER-linux-amd64.tar.gz 30 | GUEST_ADDITIONS_ISO_DOWNLOAD=https://download.virtualbox.org/virtualbox/$GUEST_ADDITIONS_VER/VBoxGuestAdditions_$GUEST_ADDITIONS_VER.iso 31 | GUEST_ADDITIONS_ISO_FILE=$DTKBASE/binaries/VBoxGuestAdditions_$GUEST_ADDITIONS_VER.iso 32 | KUBE_APISERVER_BINARY=$DTKBASE/binaries/kube-apiserver-$K8S_VER 33 | KUBE_APISERVER_DOWNLOAD=https://dl.k8s.io/$K8S_VER/bin/linux/amd64/kube-apiserver 34 | KUBE_CONTROLLER_MANAGER_BINARY=$DTKBASE/binaries/kube-controller-manager-$K8S_VER 35 | KUBE_CONTROLLER_MANAGER_DOWNLOAD=https://dl.k8s.io/$K8S_VER/bin/linux/amd64/kube-controller-manager 36 | KUBELET_BINARY=$DTKBASE/binaries/kubelet-$K8S_VER 37 | KUBELET_DOWNLOAD=https://dl.k8s.io/$K8S_VER/bin/linux/amd64/kubelet 38 | KUBE_PROXY_BINARY=$DTKBASE/binaries/kube-proxy-$K8S_VER 39 | KUBE_PROXY_DOWNLOAD=https://dl.k8s.io/$K8S_VER/bin/linux/amd64/kube-proxy 40 | KUBE_SCHEDULER_BINARY=$DTKBASE/binaries/kube-scheduler-$K8S_VER 41 | KUBE_SCHEDULER_DOWNLOAD=https://dl.k8s.io/$K8S_VER/bin/linux/amd64/kube-scheduler 42 | ROCKY_ISO_DOWNLOAD=https://download.rockylinux.org/pub/rocky/${ROCKY_VER%%\.*}/isos/x86_64/Rocky-$ROCKY_VER-x86_64-dvd1.iso 43 | ROCKY_ISO_FILE=$DTKBASE/binaries/Rocky-$ROCKY_VER-x86_64-dvd1.iso 44 | RUNC_BINARY=$DTKBASE/binaries/runc-$RUNC_VER 45 | RUNC_DOWNLOAD=https://github.com/opencontainers/runc/releases/download/$RUNC_VER/runc.amd64 46 | -------------------------------------------------------------------------------- /binaries/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | */ 3 | !.gitignore -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | # for help see the README 2 | virt: kvm 3 | addons: 4 | #- name: calico 5 | - name: cilium 6 | - name: coredns 7 | - name: openebs 8 | - name: metrics-server 9 | - name: kube-prometheus-stack 10 | - name: kubernetes-dashboard 11 | - name: ingress-nginx 12 | k8s: 13 | containerized-cplane: 14 | cluster-cidr: 10.200.0.0/16 15 | cluster-dns: 10.32.0.10 16 | kube-proxy: false 17 | containerd-mirror: 18 | # name: _default 19 | # config: | 20 | # [host."http://192.168.0.49:8080"] 21 | # capabilities = ["pull", "resolve"] 22 | # skip_verify = true 23 | kvm: 24 | network: nat 25 | kickstart: kvm.text.ks.cfg 26 | os-variant: almalinux9 27 | vbox: 28 | host-network-interface: 29 | host-only-network: 192.168.56 30 | kickstart: vbox.text.ks.cfg 31 | vboxdir: 32 | vm: 33 | linux: alma9 34 | create-template: true 35 | template-vmname: alma9 36 | vms: 37 | - name: vm1 38 | cpu: 3 39 | mem: 8192 40 | ip: 200 41 | disk: 42 | pod-cidr: 10.200.0.0/24 43 | - name: vm2 44 | cpu: 3 45 | mem: 8192 46 | ip: 201 47 | disk: 48 | pod-cidr: 10.200.1.0/24 49 | - name: vm3 50 | cpu: 3 51 | mem: 8192 52 | ip: 202 53 | disk: 54 | pod-cidr: 10.200.2.0/24 55 | cluster: 56 | -------------------------------------------------------------------------------- /conformance/README.md: -------------------------------------------------------------------------------- 1 | # Sonobuoy conformance testing 2 | 03-September-2024 3 | 4 | This README assumes you're in the repo root. E.g.: 5 | 6 | ``` 7 | $ pwd 8 | ~/projects/desktop-kubernetes 9 | ``` 10 | 11 | # Get Sonobuoy 12 | ``` 13 | SONOVER=0.57.2 14 | SONOGZIP=https://github.com/vmware-tanzu/sonobuoy/releases/download/v$SONOVER/sonobuoy_${SONOVER}_linux_amd64.tar.gz 15 | rm -f conformance/sonobuoy 16 | curl -sL $SONOGZIP | tar zxvf - -C conformance sonobuoy 17 | ``` 18 | 19 | ## Smoke test - should run one test successfully 20 | 21 | ``` 22 | conformance/sonobuoy run --mode=quick --dns-namespace coredns 23 | watch 'conformance/sonobuoy status --json | jq' 24 | conformance/sonobuoy delete --wait 25 | ``` 26 | 27 | ## Run conformance tests 28 | 29 | ``` 30 | conformance/sonobuoy run --mode=certified-conformance --timeout=30000 --dns-namespace coredns 31 | ``` 32 | 33 | ## Watch the tests run in one console window 34 | 35 | ``` 36 | watch 'conformance/sonobuoy status --json | jq' 37 | ``` 38 | 39 | ## Watch the logs in another console window 40 | 41 | ``` 42 | conformance/sonobuoy logs -f 43 | ``` 44 | 45 | ## Get the test results upon completion 46 | 47 | ``` 48 | outfile=$(conformance/sonobuoy retrieve) &&\ 49 | mv $outfile conformance &&\ 50 | rm -rf conformance/results &&\ 51 | mkdir -p conformance/results &&\ 52 | tar xzf conformance/$outfile -C conformance/results 53 | ``` 54 | 55 | ## Clean up the cluster 56 | 57 | ``` 58 | conformance/sonobuoy delete --wait 59 | ``` 60 | 61 | ## Certification submission process 62 | 63 | ### This repo 64 | 65 | 1. Download Sono / Update Sono version in main README / Run Sono per above. If PASS: 66 | 2. Copy two Sono result files to a staging dir in this project: 67 | ``` 68 | find conformance/results \( -name e2e.log -o -name junit_01.xml \) | xargs -I% cp % conformance/conformance-submission 69 | ``` 70 | 3. Hand edit this README, plus `PRODUCT.yaml` and `README.md` in `conformance/conformance-submission` as needed 71 | 4. Git commit and push 72 | 5. Tag `desktop-kubernetes` with a tag matching the Kubernetes version: `git tag -a v1.31.0 -m "Kubernetes 1.31.0 passes Sonobuoy conformance v0.57.2"` 73 | 6. Git push the tag: `git push origin v1.31.0` 74 | 75 | ## Conformance fork 76 | 77 | E.g.: `~/projects/k8s-conformance-esace-fork` 78 | 79 | 1. Sync fork https://github.com/aceeric/k8s-conformance/tree/master 80 | 2. Do a `git pull` 81 | 3. Create branch: `git checkout -b v1.31-desktop-kubernetes` 82 | 4. Create directory: `mkdir ./v1.31/desktop-kubernetes` 83 | 5. Populate the directory: `cp ~/projects/desktop-kubernetes/conformance/conformance-submission/* ./v1.31/desktop-kubernetes` 84 | 6. Verify 85 | ``` 86 | $ ls -l ./v1.31/desktop-kubernetes 87 | total 2264 88 | -rw-r--r-- 1 eace eace 7883 Sep 3 19:45 e2e.log 89 | -rw-r--r-- 1 eace eace 2296506 Sep 3 19:45 junit_01.xml 90 | -rw-rw-r-- 1 eace eace 549 Sep 3 19:45 PRODUCT.yaml 91 | -rw-rw-r-- 1 eace eace 4253 Sep 3 19:45 README.md 92 | ``` 93 | 7. Git add and commit to the branch with message AND signoff: 94 | ``` 95 | git commit -m 'Conformance results for v1.31/desktop-kubernetes 96 | Signed-off-by: Eric Ace <24485843+aceeric@users.noreply.github.com>' 97 | ``` 98 | 8. Push to GitHub 99 | 9. Create a Pull Request to https://github.com/cncf/k8s-conformance from the branch in the fork per https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork 100 | 101 | -------------------------------------------------------------------------------- /conformance/conformance-submission/PRODUCT.yaml: -------------------------------------------------------------------------------- 1 | vendor: Eric Ace 2 | name: Desktop Kubernetes 3 | version: v1.31.0 4 | website_url: https://github.com/aceeric/desktop-kubernetes 5 | repo_url: https://github.com/aceeric/desktop-kubernetes 6 | documentation_url: https://github.com/aceeric/desktop-kubernetes 7 | product_logo_url: https://raw.githubusercontent.com/aceeric/desktop-kubernetes/master/resources/desktop-kubernetes.svg 8 | type: distribution 9 | description: 'Stands up a three-VM Kubernetes development cluster on the desktop using KVM, by running a single Bash script.' 10 | contact_email_address: ericace@protonmail.com 11 | -------------------------------------------------------------------------------- /conformance/conformance-submission/README.md: -------------------------------------------------------------------------------- 1 | # Reproducing the test results 2 | 3 | The project can use either KVM or VirtualBox to provision the cluster VMs. The default is to use KVM. 4 | 5 | ## Prerequisites / compatibility 6 | 7 | - This project has been tested under Ubuntu Focal 22.04.4 LTS with 12 hyper-threaded cores and 64 gigs of RAM 8 | - It uses KVM to provision VMs to run Kubernetes 9 | - It is a Kubernetes 1.31.0 distribution and has been tested with kubectl v1.31.0 10 | - This project creates a k8s cluster consisting of three Alma 8 guest VMs configured per the project root `config.yaml` file with each vm having 8 gigs of RAM and 3 CPUs 11 | - So you need sufficient CPU and RAM on the desktop environment to stand up the cluster 12 | 13 | ## Get Desktop Kubernetes 14 | 15 | ```shell 16 | $ git clone --branch v1.31.0 https://github.com/aceeric/desktop-kubernetes.git 17 | $ cd desktop-kubernetes 18 | ``` 19 | 20 | Tag v1.31.0 is the current release tested, which mirrors the release of Kubernetes that the project deploys. 21 | 22 | ## Check requirements 23 | 24 | This is a Bash shell script project and requires certain command-line utilities on the desktop. Run the `dtk` script first with the `--check-compatibility` option. This will compare your versions to what has been tested: 25 | 26 | ```shell 27 | $ ./dtk --check-compatibility 28 | checking version compatibility 29 | component tested found matches? 30 | --------- ------ ----- -------- 31 | openssl 3.0.2 3.0.2 Yes 32 | openssh OpenSSH_8.9p1 OpenSSH_8.9p1 Yes 33 | genisoimage 1.1.11 1.1.11 Yes 34 | virtual box 7.0.18_Ubuntur162988 7.0.18_Ubuntur162988 Yes 35 | host operating system Ubuntu 22.04.4 LTS Ubuntu 22.04.4 LTS Yes 36 | kubectl (client only) v1.31.0 v1.31.0 Yes 37 | curl 7.81.0 7.81.0 Yes 38 | helm v3.13.1 v3.13.1 Yes 39 | yq 4.40.5 4.40.5 Yes 40 | virt-install 4.0.0 4.0.0 Yes 41 | virsh 8.0.0 8.0.0 Yes 42 | qemu-img 6.2.0 6.2.0 Yes 43 | ``` 44 | 45 | Version incompatibilities may not be an issue. You have to use your judgement. (Since KVM is the default virtualization, any VirtualBox discrepancies are not relevant for conformance.) 46 | 47 | ## Create the cluster 48 | 49 | If all requirements are reasonably satistfied you create a cluster by running the `dtk` script which in turn reads the cluster configuration from `config.yaml`: 50 | 51 | ```shell 52 | $ ./dtk 53 | ``` 54 | 55 | The example above will create the cluster by first creating a template VM named `alma8`. It will then clone that template VM into three VMs: `vm1`, `vm2`, and `vm3`, and then proceed to install Kubernetes. On completion, it will display a message telling you how to set your KUBECONFIG environment variable to access the cluster as a cluster admin. The KVM networking is NAT. This provides host-to-guest, guest-to-guest, and guest-to-internet. The example also installs calico cluster networking. CoreDNS is installed by default. OpenEBS is installed for dynamic volume provisioning. (The hostpath provisioner is installed because it is simple and light weight.) 56 | 57 | ## Run the conformance tests 58 | 59 | My procedure follows the guidance at: https://github.com/cncf/k8s-conformance/blob/master/instructions.md 60 | 61 | ### Here are the steps I performed: 62 | 63 | ```shell 64 | SONOVER=0.57.2 65 | SONOGZIP=https://github.com/vmware-tanzu/sonobuoy/releases/download/v$SONOVER/sonobuoy_${SONOVER}_linux_amd64.tar.gz 66 | rm -f conformance/sonobuoy 67 | curl -sL $SONOGZIP | tar zxvf - -C conformance sonobuoy 68 | conformance/sonobuoy run --mode=certified-conformance --timeout=30000 --dns-namespace coredns 69 | ``` 70 | 71 | ### Watch the tests run in one console window 72 | ``` 73 | $ watch 'sonobuoy status --json | json_pp' 74 | ``` 75 | 76 | ### Watch the logs as the tests run in another console window 77 | ``` 78 | $ conformance/sonobuoy logs -f 79 | ``` 80 | 81 | ### Get the test results upon completion 82 | ``` 83 | $ outfile=$(conformance/sonobuoy retrieve) &&\ 84 | mv $outfile conformance &&\ 85 | rm -rf conformance/results &&\ 86 | mkdir -p conformance/results &&\ 87 | tar xzf conformance/$outfile -C conformance/results 88 | ``` 89 | -------------------------------------------------------------------------------- /conformance/conformance-submission/e2e.log: -------------------------------------------------------------------------------- 1 | I0903 20:48:02.965737 22 e2e.go:109] Starting e2e run "be3fe695-ac69-4d12-b629-9ff0cfb9a203" on Ginkgo node 1 2 | Running Suite: Kubernetes e2e suite - /usr/local/bin 3 | ==================================================== 4 | Random Seed: 1725396482 - will randomize all specs 5 | 6 | Will run 404 of 6603 specs 7 | SSSSS•SSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSS•SSSSSS•SSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSS•SSSSSSSS•SSSSSSSSSSSS•SSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSS•SSSSS•S•SS•SSSSSSSSSSSSSSSSS•S•SSSSSS•SSSSSS•SSSSSS•SSSSSSSSS••SSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•S•SSSSSSSSSSSSSSSSSSS•SSSSSSS•SSSSSSSS•SS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•S•SSSSSSSSSSSS•SSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSS•SSSSS•SSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSS•SSSSSSSSS•SSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSS•SSSSSS•SSSSSSSSSSSSSSSSS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSS•SS•SSSS•SSSSSSSSSSS••SSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSS•S•SSSSSS•SSS•SSSSSSSSSSSSSSSSSSSS•SSS•S•SSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSS•SSSSSSSSSSSSSSSSSSSSSS•SSSSSS•SSS•SS•SSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSS•SSSSSSSSSSSSSSSSS•SS•SSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSS•S•SS•SSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSS•SSSS••SSSS•SS•SSSSSSSSSS•SS•SSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSS•SSSSSSSSSSSSSSSSS•SSSSSSSSSS•SSS••SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSS•SSSSSSSSSSSSSSSSSSSSSSS•SSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•S•SSSSSSSSS•SSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSS•SS•SSSS•SSS•SSSSS•SSSSSSS•SSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSS•SSSSSS•SSSSSSSSSSSSS•S•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSS••SSSSSS•SSSSSSSSSSSSSSS•S•SS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS••SSSSSSSSSSSSSSSSSSSSS••SSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSS•SSSSSSSS•SSSSS•SSSSSSSSS•SSSSSS•SSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSS•SSSSS••SSS•SSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSS•SSSSSSSSSSSSSS•SSSSSSSSS••SSSSSS•SSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSS••SSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSS••SSSSSSSSSSSSSS•SSSSSSSSSSSSSS•SSS•SSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSS••SSSSSSSSSSSSSS•SSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•S•SSSSSSSSSSSSSS•SSSSSS•SSSS•SSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•S•SSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSS•S•••SSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSS•SSSSSSSSSSSS•SSS•SS•SS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SS•SSSSSSSS•SSSSSS•SSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSS•SSSSSSSSS•SSSSS•SSSSSSS•SSSSSSSSSSSSSSS••SSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSS•S•SSSSSSSSSSSSSSSSSSSSSSSSSSSSS•S•SSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSS•SSSSSSSSSS•S•SSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSS•SSSSS••SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSS•SSS•SSSSSSSSSSSS•SSS•SSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSS•SSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SS•SSSSSSSS•S•SSSSSSSSSSSSSSS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSS•SSSSSSSSSSS•SSSSSS•SS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSS••SSSSSSSSSSSSS•SSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSSSS••SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSS•S•SSSSSSSSSSSSS•SSSS•SSS•SSSSSSSS•SSSSSS•SSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSS•SSSSSSSSSSSS••SSSSSSSSSS•SSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS••SSSSSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSS•SSSSSSSSSSSSS•SSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSS•SS•SSSS•S•SSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSS•SSS•SSSSSSSSSS•SSS•SS•SSSSSSSSSSSSSSSSSSSSSSSSSSS•SS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSS•SS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSS•S•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSS•SSSSSS•SSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSS•S•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS••SSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSS•SSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS••SSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSS•S•SSSSS•SSS•SSSSSSSSSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSSSS•SSSSSSSSSSSSSS•SSSSS•SSSSSSSSSS•SSSSSSSSSSSSSSSSSSSSSS•SSSSSSSSSSSSSS•SSSSSS•SSSSSSSS•SS•S 8 | 9 | Ran 404 of 6603 Specs in 6508.638 seconds 10 | SUCCESS! -- 404 Passed | 0 Failed | 0 Pending | 6199 Skipped 11 | PASS 12 | 13 | Ginkgo ran 1 suite in 1h48m29.192005907s 14 | Test Suite Passed 15 | -------------------------------------------------------------------------------- /dtk: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=help,check-compatibility,up:,down:,delete:,verify:,config:,create-template:,install-addon:,no-create-vms 6 | 7 | # This snippet enables all scripts to exec all other scripts without knowing any 8 | # other script's path, as long all the scripts (except this one) are children 9 | # of the 'scripts' directory. 10 | 11 | export DTKBASE="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 12 | function xec() { 13 | if echo $1 | grep -q /; then 14 | f=$(find $DTKBASE/scripts -type d -name $(dirname $1) | xargs -I% find % -type f -name $(basename $1)) 15 | else 16 | f=$(find $DTKBASE/scripts -name $1) 17 | fi 18 | [[ -n $f ]] && $f "${@:2}" 19 | } 20 | export -f xec 21 | 22 | # set from cmdline 23 | help=0 24 | check_compatibility=0 25 | up= 26 | down= 27 | delete= 28 | verify= 29 | config=$DTKBASE/config.yaml 30 | create_template= 31 | install_addon= 32 | no_create_vms=false 33 | 34 | # set by scripts/helpers/parse-config.py: 35 | virt= 36 | k8s_containerized_cplane= 37 | k8s_cluster_cidr= 38 | k8s_cluster_dns= 39 | k8s_kube_proxy= 40 | kvm_network= 41 | kvm_kickstart= 42 | kvm_os_variant= 43 | vbox_host_network_interface= 44 | vbox_host_only_network= 45 | vbox_kickstart= 46 | vbox_vboxdir= 47 | vm_linux= 48 | vm_create_template=1 49 | vm_template_vmname= 50 | 51 | # entry point 52 | 53 | set -a 54 | source $DTKBASE/artifacts 55 | set +a 56 | 57 | # parse cmdline 58 | if ! parsed=$(xec parseargs $long_opts "$@"); then 59 | echo "$parsed" 60 | exit 1 61 | fi 62 | eval $(echo -e "$parsed") 63 | 64 | if [[ $help -eq 1 ]]; then 65 | xec show-usage 66 | exit 0 67 | fi 68 | 69 | if [[ $check_compatibility -eq 1 ]]; then 70 | xec check-compatibility 71 | exit 0 72 | fi 73 | 74 | if [[ -n "$verify" ]]; then 75 | if [[ "$verify" != "upstreams" ]] && [[ "$verify" != "files" ]]; then 76 | echo "unsupported value for --verify option: $verify" 77 | exit 1 78 | fi 79 | xec check-objects "$verify" 80 | exit 0 81 | fi 82 | 83 | # ensure config.yaml exists 84 | config=$(realpath $config) 85 | if [[ ! -f "$config" ]]; then 86 | echo "config file not found: $config" 87 | exit 1 88 | fi 89 | 90 | # parse config.yaml 91 | xec parse-config.py $config check || exit 92 | eval $(xec parse-config.py $config) 93 | 94 | if [[ -n "$up" ]]; then 95 | xec $virt/up-down-del up $up $config 96 | exit 0 97 | elif [[ -n "$down" ]]; then 98 | xec $virt/up-down-del down $down $config 99 | exit 0 100 | elif [[ -n "$delete" ]]; then 101 | xec $virt/up-down-del delete $delete $config 102 | exit 0 103 | fi 104 | 105 | if [[ -n "$install_addon" ]]; then 106 | xec install-addons\ 107 | --config=$config\ 108 | --admin-kubeconfig=$DTKBASE/generated/kubeconfig/admin.kubeconfig\ 109 | --priv-key=$DTKBASE/generated/kickstart/id_ed25519\ 110 | --addon=$install_addon 111 | exit 0 112 | fi 113 | 114 | if [[ $virt == "virtualbox" ]]; then 115 | if [[ -z "$vbox_host_network_interface" ]] && [[ -z "$vbox_host_only_network" ]]; then 116 | echo "either --host-network-interface or --host-only-network is required" 117 | exit 1 118 | elif [[ ! -z "$vbox_host_network_interface" ]] && [[ ! -z "$vbox_host_only_network" ]]; then 119 | echo "--host-network-interface and --host-only-network are exclusive of each other" 120 | exit 1 121 | fi 122 | if [[ -z "$vbox_vboxdir" ]]; then 123 | vbox_vboxdir=$(vboxmanage list systemproperties | grep folder | awk -F: '{print $2}' | xargs) 124 | fi 125 | if [[ -z "$vbox_vboxdir" ]]; then 126 | echo "directory for virtualbox VMs is not defined" 127 | exit 1 128 | elif [[ ! -d $vbox_vboxdir ]]; then 129 | echo "directory for virtualbox VMs does not exist: $vbox_vboxdir" 130 | exit 1 131 | fi 132 | fi 133 | 134 | supported_linux=("=centos9=" "=rocky=" "=alma8=" "=alma9=") 135 | if [[ ! ${supported_linux[@]} =~ "=$vm_linux=" ]]; then 136 | echo "unsupported value for linux config: $vm_linux" 137 | exit 1 138 | fi 139 | 140 | # --create-template on the cmdline overrides config.yaml 141 | if [[ "$create_template" == "true" ]]; then 142 | vm_create_template=1 143 | elif [[ "$create_template" == "false" ]]; then 144 | vm_create_template=0 145 | fi 146 | 147 | echo "creating directories to generate various files into" 148 | mkdir -p $DTKBASE/generated/kickstart\ 149 | $DTKBASE/generated/kubeconfig\ 150 | $DTKBASE/generated/cert\ 151 | $DTKBASE/generated/hostonly-netcfg\ 152 | $DTKBASE/generated/iso 153 | 154 | echo "downloading core cluster components" 155 | xec download-objects\ 156 | --create-template=$vm_create_template\ 157 | --linux=$vm_linux\ 158 | --virt=$virt 159 | 160 | if [[ $no_create_vms == false ]]; then 161 | echo "provisioning vms" 162 | if [[ $virt == "virtualbox" ]]; then 163 | xec virtualbox/provision-vms\ 164 | --create-template=$vm_create_template\ 165 | --linux=$vm_linux\ 166 | --host-network-interface=$vbox_host_network_interface\ 167 | --host-only-network=$vbox_host_only_network\ 168 | --vboxdir=$vbox_vboxdir\ 169 | --template-vmname=$vm_template_vmname\ 170 | --config=$config 171 | else 172 | xec kvm/provision-vms\ 173 | --create-template=$vm_create_template\ 174 | --linux=$vm_linux\ 175 | --template-vmname=$vm_template_vmname\ 176 | --config=$config\ 177 | --os-variant=$kvm_os_variant 178 | fi 179 | fi 180 | 181 | echo "generating root CA to $DTKBASE/generated/cert/ca.pem (and ca-key.pem)" 182 | xec gen-root-ca 183 | 184 | echo "generating Kubernetes core cluster" 185 | xec gen-core-k8s\ 186 | --containerized-cplane=$k8s_containerized_cplane\ 187 | --kube-proxy-enabled=$k8s_kube_proxy\ 188 | --priv-key=$DTKBASE/generated/kickstart/id_ed25519\ 189 | --ca-cert=$DTKBASE/generated/cert/ca.pem\ 190 | --ca-key=$DTKBASE/generated/cert/ca-key.pem\ 191 | --config=$config\ 192 | --cluster-cidr=$k8s_cluster_cidr\ 193 | --virt=$virt 194 | 195 | echo "installing add-ons" 196 | xec install-addons\ 197 | --config=$config\ 198 | --admin-kubeconfig=$DTKBASE/generated/kubeconfig/admin.kubeconfig\ 199 | --priv-key=$DTKBASE/generated/kickstart/id_ed25519 200 | 201 | echo 202 | echo "finished provisioning cluster. To interact with the cluster:" 203 | echo " export KUBECONFIG=$DTKBASE/generated/kubeconfig/admin.kubeconfig" 204 | echo 205 | echo "use the 'sshto' script to ssh into a VM" 206 | -------------------------------------------------------------------------------- /hack/clone-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Clones a template VM the way the 'dtk' script does it. 4 | # 5 | # Args: 6 | # 7 | # --host-only-network 8 | # --host-network-interface 9 | # --os-iso 10 | # --ga-iso 11 | # --vm-name 12 | # --vbox-dir 13 | # --virt 'virtualbox'|'kvm' 14 | # 15 | # Examples: 16 | # 17 | # If the template is host only networking: 18 | # 19 | # hack/clone-vm\ 20 | # --host-only-network 192.168.56\ 21 | # --ip 202\ 22 | # --cpu 3\ 23 | # --ram 4096\ 24 | # --template-vm-name alma93\ 25 | # --vm-name alma93clone\ 26 | # --virt virtualbox 27 | # 28 | # If the template is bridged networking: 29 | # 30 | # hack/clone-vm\ 31 | # --host-network-interface enp0s31f6\ 32 | # --cpu 3\ 33 | # --ram 4096\ 34 | # --template-vm-name alma93\ 35 | # --vm-name alma93clone\ 36 | # --virt virtualbox 37 | # 38 | # KVM clone 39 | # 40 | # hack/clone-vm\ 41 | # --cpu 3\ 42 | # --ram 4096\ 43 | # --template-vm-name alma8\ 44 | # --vm-name alma8clone\ 45 | # --virt kvm 46 | # 47 | 48 | export DTKBASE="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" 49 | function xec() { 50 | if echo $1 | grep -q /; then 51 | f=$(find . -type d -name $(dirname $1) | xargs -I% find % -type f -name $(basename $1)) 52 | else 53 | f=$(find $DTKBASE/scripts -name $1) 54 | fi 55 | [[ -n $f ]] && $f "${@:2}" 56 | } 57 | export -f xec 58 | 59 | hostonly= 60 | hni= 61 | cpu= 62 | ram= 63 | ip= 64 | template_vmname= 65 | vmname= 66 | vboxdir=$(vboxmanage list systemproperties | grep folder | awk -F: '{print $2}' | xargs) 67 | virt= 68 | 69 | for ((i = 1; i <= $#; i++ )); do 70 | if [[ ${!i} == "--virt" ]]; then 71 | ((i++)) 72 | virt=${!i} 73 | elif [[ ${!i} == "--cpu" ]]; then 74 | ((i++)) 75 | cpu=${!i} 76 | elif [[ ${!i} == "--ram" ]]; then 77 | ((i++)) 78 | ram=${!i} 79 | elif [[ ${!i} == "--ip" ]]; then 80 | ((i++)) 81 | ip=${!i} 82 | elif [[ ${!i} == "--host-only-network" ]]; then 83 | ((i++)) 84 | hostonly=${!i} 85 | elif [[ ${!i} == "--host-network-interface" ]]; then 86 | ((i++)) 87 | hni=${!i} 88 | elif [[ ${!i} == "--template-vm-name" ]]; then 89 | ((i++)) 90 | template_vmname=${!i} 91 | elif [[ ${!i} == "--vm-name" ]]; then 92 | ((i++)) 93 | vmname=${!i} 94 | elif [[ ${!i} == "--vbox-dir" ]]; then 95 | ((i++)) 96 | vboxdir=${!i} 97 | else 98 | echo unknown param: ${!i} 99 | exit 1 100 | fi 101 | done 102 | 103 | if [[ "$virt" == "kvm" ]]; then 104 | xec kvm/clone-vm\ 105 | --priv-key=$DTKBASE/generated/kickstart/id_ed25519\ 106 | --template-vmname=$template_vmname\ 107 | --clone-vmname=$vmname\ 108 | --clone-ram=$ram\ 109 | --clone-cpus=$cpu 110 | elif [[ "$virt" == "virtualbox" ]]; then 111 | xec virtualbox/clone-vm\ 112 | --priv-key=$DTKBASE/generated/kickstart/id_ed25519\ 113 | --template-vmname=$template_vmname\ 114 | --clone-vmname=$vmname\ 115 | --clone-ram=$ram\ 116 | --clone-cpus=$cpu\ 117 | --host-only-network=$hostonly\ 118 | --host-only-octet=$ip\ 119 | --vboxdir=$vboxdir\ 120 | --shutdown=false 121 | else 122 | echo "need --virt arg" 123 | exit 1 124 | fi 125 | -------------------------------------------------------------------------------- /hack/create-image-archive: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Exports the contents of the containerd image cache to a tarball. If you place 4 | # exported images in '.tar' and '.tgz' files into the 'images' directory of the 5 | # desktop-kubernetes project on your workstation the installer will copy them 6 | # to each host and populate the containerd cache from them which can save you 7 | # some heartburn associated with Docker rate-limiting. 8 | # 9 | # Usage: 10 | # 11 | # 1. Create a single-node cluster and set it up with CNI, monitoring, etc., the 12 | # way you commonly use it including any preferred workloads. (Creating a single- 13 | # node cluster is important because then you only have one containerd cache.) 14 | # 2. Copy this script to the host, ssh into the host and run this script. 15 | # 3. Exit back to the desktop and: 16 | # scp -i ./generated/kickstart/id_ed25519 root@:/tmp/images/images.tgz ./images 17 | # 4. The next time you create a cluster, the tarball will get automatically copied 18 | # to each host and loaded into each hosts's containerd cache. See 'scripts/worker/ 19 | # containerd/install-containerd' 20 | # 21 | # if --dockeronly, only do the docker.io images otherwise do all. 22 | # 23 | 24 | image_path=/tmp/images 25 | mkdir -p $image_path 26 | 27 | ctr -n k8s.io -a /var/run/containerd/containerd.sock image ls -q | grep -v '^sha256' | while read image; do 28 | if [[ "$1" == "--dockeronly" ]] && [[ $image != docker.io* ]]; then 29 | continue 30 | fi 31 | tarfile=$image_path/$(echo $image | sed "s|[/:]|-|g").tar 32 | echo "exporting $image to $tarfile" 33 | ctr -n k8s.io -a /var/run/containerd/containerd.sock image export $tarfile $image 34 | done 35 | 36 | echo "creating a tarball from the exported images" 37 | pushd $image_path 38 | tar -czvf images.tgz *.tar 39 | popd 40 | -------------------------------------------------------------------------------- /hack/create-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Creates a template VM the way the 'dtk' script does it. 4 | # 5 | # Args: 6 | # 7 | # --host-only-network 8 | # --host-network-interface 9 | # --os-iso 10 | # --ga-iso 11 | # --vm-name 12 | # --vbox-dir 13 | # --ks 14 | # 15 | # Examples: 16 | # 17 | # hack/create-vm\ 18 | # --host-only-network 192.168.56\ 19 | # --os-iso $PWD/binaries/CentOS-Stream-9-latest-x86_64-dvd1.iso\ 20 | # --ga-iso $PWD/binaries/VBoxGuestAdditions_7.0.18.iso\ 21 | # --vm-name centos9-stream\ 22 | # --ks vbox.text.ks.cfg 23 | # 24 | # hack/create-vm\ 25 | # --host-network-interface enp0s31f6\ 26 | # --os-iso $PWD/binaries/AlmaLinux-9.3-x86_64-dvd.iso\ 27 | # --ga-iso $PWD/binaries/VBoxGuestAdditions_7.0.18.iso\ 28 | # --vm-name alma93 29 | # 30 | 31 | export DTKBASE="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" 32 | function xec() { 33 | if echo $1 | grep -q /; then 34 | f=$(find . -type d -name $(dirname $1) | xargs -I% find % -type f -name $(basename $1)) 35 | else 36 | f=$(find $DTKBASE/scripts -name $1) 37 | fi 38 | [[ -n $f ]] && $f "${@:2}" 39 | } 40 | export -f xec 41 | 42 | hostonly= 43 | hni= 44 | os_iso= 45 | ga_iso= 46 | vmname= 47 | ks= 48 | vboxdir=$(vboxmanage list systemproperties | grep folder | awk -F: '{print $2}' | xargs) 49 | 50 | for ((i = 1; i <= $#; i++ )); do 51 | if [[ ${!i} == "--os-iso" ]]; then 52 | ((i++)) 53 | os_iso=${!i} 54 | elif [[ ${!i} == "--ga-iso" ]]; then 55 | ((i++)) 56 | ga_iso=${!i} 57 | elif [[ ${!i} == "--host-only-network" ]]; then 58 | ((i++)) 59 | hostonly=${!i} 60 | elif [[ ${!i} == "--host-network-interface" ]]; then 61 | ((i++)) 62 | hni=${!i} 63 | elif [[ ${!i} == "--vm-name" ]]; then 64 | ((i++)) 65 | vmname=${!i} 66 | elif [[ ${!i} == "--vbox-dir" ]]; then 67 | ((i++)) 68 | vboxdir=${!i} 69 | elif [[ ${!i} == "--ks" ]]; then 70 | ((i++)) 71 | ks=${!i} 72 | else 73 | echo unknown param: ${!i} 74 | exit 1 75 | fi 76 | done 77 | 78 | xec virtualbox/create-template-vm\ 79 | --template-vmname=$vmname\ 80 | --linux-iso-path=$os_iso\ 81 | --guest-additions-path=$ga_iso\ 82 | --host-network-interface=$hni\ 83 | --host-only-network=$hostonly\ 84 | --vboxdir=$vboxdir\ 85 | --kickstart=$ks 86 | -------------------------------------------------------------------------------- /hack/resize-storage: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | echo "***DEPRECATED DO NOT USE***" 4 | exit 1 5 | # 6 | # Background: The kickstart file that this project uses configures the VM 7 | # storage like so (with a 40 gig drive): 8 | # 9 | # [root@doc ~]# lsblk 10 | # NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT 11 | # sda 8:0 0 39.1G 0 disk 12 | # ├─sda1 8:1 0 1G 0 part /boot 13 | # └─sda2 8:2 0 38.1G 0 part 14 | # ├─cs_cloneme-root 253:0 0 34.2G 0 lvm / 15 | # └─cs_cloneme-swap 253:1 0 3.9G 0 lvm [SWAP] 16 | # 17 | # Assuming the storage scheme above, this script (when run in the guest) will 18 | # "upsize" the /dev/sda device and the cs_cloneme-root logical volume etc. 19 | # 20 | # Usage: 21 | # 1) Increase the size of the virtual storage in VirtualBox (VM must be 22 | # stopped first). Example: 23 | # 24 | # vboxdir=/sdb1/virtualboxvms 25 | # vmname=frobozz 26 | # vboxmanage modifymedium disk $vboxdir/$vmname/$vmname.vdi --resize 80000 27 | # 28 | # 2) Copy this script into the host 29 | # 3) SSH into the host and run the script in the host 30 | # 31 | # WARNING: This script doesn't have any safety checks - it can be used safely if: 32 | # 1 - The guest storage was partitioned by the kickstart in this project 33 | # 2 - You increased (not decreased) the size of the virtual disk using 34 | # vboxmanage 35 | 36 | parted -s -a optimal /dev/sda "resizepart 2 100%" 37 | echo 1 > /sys/block/sda/device/rescan 38 | pvresize /dev/sda2 39 | lvextend -l +100%FREE /dev/cs_cloneme/root 40 | xfs_growfs /dev/cs_cloneme/root 41 | -------------------------------------------------------------------------------- /images/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | */ 3 | !.gitignore 4 | !README.md -------------------------------------------------------------------------------- /images/README.md: -------------------------------------------------------------------------------- 1 | # Images 2 | 3 | If you place `.tar.` and `.tgz` files here, the installer will copy them to each host as the cluster is bring provisioned and then load the images into the containerd cache on each host to help minimize the impact of Docker rate limiting. See `scripts/worker/containerd/install-containerd` and `hack/create-image-archive` for additional information. 4 | -------------------------------------------------------------------------------- /kickstarts/kvm.text.ks.cfg: -------------------------------------------------------------------------------- 1 | # source: kvm.text.ks.cfg 2 | # ignore everything except vda when auto partitioning 3 | ignoredisk --only-use=vda 4 | 5 | # creates a root partition (1 GB or larger), a swap partition, and an appropriate /boot 6 | # partition for the architecture. 7 | autopart --type=plain 8 | 9 | # docs say this sets the timeout before the default boot is selected but - affects the 10 | # installed OS (i.e. on second boot and thereafter) rather than the kickstart startup 11 | # which always takes 60 seconds... 12 | bootloader --timeout=5 13 | 14 | # don't run the setup agent on first boot 15 | firstboot --disabled 16 | 17 | # Shutdown after installation 18 | shutdown 19 | 20 | # do not remove any partitions and create a default disk label for all disks 21 | # in their respective architecture that have been designated for formatting 22 | clearpart --none --initlabel 23 | 24 | # non-graphical install 25 | text 26 | 27 | # use CDROM installation media 28 | cdrom 29 | 30 | # keyboard layouts 31 | keyboard --vckeymap=us --xlayouts='us' 32 | 33 | # system language 34 | lang en_US.UTF-8 35 | 36 | # Since we're only setting up a template VM to clone from and when the clones are created - their hostnames will 37 | # be changed to match their VM names so this hostname doesn't mean anything 38 | network --bootproto=dhcp --device=enp1s0 --ipv6=auto --activate --hostname=cloneme 39 | 40 | # required CentOS / Rocky repo 41 | repo --name="AppStream" --baseurl=file:///run/install/sources/mount-0000-cdrom/AppStream 42 | 43 | # root password 44 | rootpw frobozz 45 | 46 | # keep the local host's time synchronized with the time server 47 | services --enabled=chronyd 48 | 49 | # system time zone. --utc means the system clock is UTC, which can be verified with 'sudo hwclock' 50 | timezone America/New_York --utc 51 | 52 | # no selinux 53 | selinux --disabled 54 | 55 | %packages 56 | @core 57 | # kexec-tools supports fast reboot 58 | kexec-tools 59 | # net-tools for network debugging 60 | net-tools 61 | # tar for dtk 62 | tar 63 | # for re-sizing the disk 64 | cloud-utils-growpart 65 | gdisk 66 | # for Kubernetes 67 | socat 68 | conntrack 69 | ipset 70 | %end 71 | 72 | # kernel crash dump config 73 | %addon com_redhat_kdump --enable --reserve-mb='auto' 74 | %end 75 | 76 | # 23-May-2025 %anaconda is deprecated but I can't figure out how to set the password 77 | # policy on Alma 9.5 without it so - commenting password policy out for now 78 | # password policy 79 | ###%anaconda 80 | ###pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty 81 | ###%end 82 | 83 | # configure passwordless SSH 84 | %post --interpreter=/bin/bash --log=/root/kickstart.log 85 | mkdir -p /root/.ssh 86 | cat <| /root/.ssh/authorized_keys 87 | REPLACE-WITH-SSHKEY 88 | EOF 89 | %end 90 | -------------------------------------------------------------------------------- /kickstarts/vbox.graphical.ks.cfg: -------------------------------------------------------------------------------- 1 | # source: vbox.graphical.ks.cfg 2 | # ignore everything except sda when auto partitioning 3 | ignoredisk --only-use=sda 4 | 5 | # creates a root partition (1 GB or larger), a swap partition, and an appropriate /boot 6 | # partition for the architecture. 7 | autopart --type=lvm 8 | 9 | # docs say this sets the timeout before the default boot is selected but - affects the 10 | # installed OS (i.e. on second boot and thereafter) rather than the kickstart startup 11 | # which always takes 60 seconds... 12 | bootloader --timeout=5 13 | 14 | # don't run the setup agent on first boot 15 | firstboot --disabled 16 | 17 | # agree the EULA 18 | eula --agreed 19 | 20 | # Shutdown after installation 21 | shutdown 22 | 23 | # do not remove any partitions and create a default disk label for all disks 24 | # in their respective architecture that have been designated for formatting 25 | clearpart --none --initlabel 26 | 27 | # graphical install 28 | graphical 29 | 30 | # use CDROM installation media 31 | cdrom 32 | 33 | # keyboard layouts 34 | keyboard --vckeymap=us --xlayouts='us' 35 | 36 | # system language 37 | lang en_US.UTF-8 38 | 39 | # this could either be the NAT network or the bridged network. Both have identical representation in the kickstart 40 | # file. The host only network will be configured by the desktop-kubernetes service. 41 | network --bootproto=dhcp --device=enp0s3 --onboot=on --ipv6=auto 42 | 43 | # Since we're only setting up a template VM to clone from and when the clones are created - their hostnames will 44 | # be changed to match their VM names so this hostname doesn't mean anything 45 | network --hostname=cloneme 46 | 47 | # required CentOS / Rocky repo 48 | repo --name="AppStream" --baseurl=file:///run/install/sources/mount-0000-cdrom/AppStream 49 | 50 | # root password 51 | rootpw frobozz 52 | 53 | # X window system configuration information 54 | xconfig --startxonboot 55 | 56 | # keep the local host's time synchronized with the time server 57 | services --enabled="chronyd" 58 | 59 | # system time zone. --utc means the system clock is UTC, which can be verified with 'sudo hwclock' 60 | timezone America/New_York --utc 61 | 62 | # no selinux 63 | selinux --disabled 64 | 65 | # kexec-tools supports fast reboot 66 | %packages 67 | @^graphical-server-environment 68 | kexec-tools 69 | # kernel-devel for vbox guest additions install 70 | kernel-devel 71 | # net-tools for network debugging 72 | net-tools 73 | # for Kubernetes 74 | socat 75 | conntrack 76 | ipset 77 | %end 78 | 79 | # kernel crash dump config 80 | %addon com_redhat_kdump --enable --reserve-mb='auto' 81 | %end 82 | 83 | # post-install section 84 | # 1) Write gnome-initial-setup-done so we don't get the gnome config screens 85 | # 2) Copy the desktop-kubernetes service unit and associated script from the same ISO that holds this 86 | # kickstart script, and configure the service so it starts on the next boot, and does the unattended guest 87 | # additions install. This helped me: 88 | # https://unix.stackexchange.com/questions/513008/copying-files-from-bootable-usb-to-local-drive-in-post-script-of-kickstart 89 | # 3) Initialize ssh by copying the public key from the ISO to /root/.ssh/authorized_keys 90 | 91 | %post --interpreter=/bin/bash --log=/root/kickstart.log 92 | mkdir -p /root/.config 93 | echo yes > /root/.config/gnome-initial-setup-done 94 | mkdir /mnt/myks 95 | mount /dev/disk/by-label/OEMDRV /mnt/myks 96 | cp /mnt/myks/desktop-kubernetes.sh /root/ && chmod +x /root/desktop-kubernetes.sh 97 | cp /mnt/myks/desktop-kubernetes.service /etc/systemd/system/desktop-kubernetes.service 98 | mkdir -p /root/.ssh && cp /mnt/myks/id_ed25519.pub /root/.ssh/authorized_keys 99 | umount /mnt/myks 100 | rm -rf /mnt/myks 101 | systemctl enable desktop-kubernetes 102 | # next time the system starts it will run desktop-kubernetes.sh 103 | %end 104 | 105 | # password policy 106 | %anaconda 107 | pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty 108 | %end 109 | -------------------------------------------------------------------------------- /kickstarts/vbox.text.ks.cfg: -------------------------------------------------------------------------------- 1 | # source: vbox.text.ks.cfg 2 | # ignore everything except sda when auto partitioning 3 | ignoredisk --only-use=sda 4 | 5 | # creates a root partition (1 GB or larger), a swap partition, and an appropriate /boot 6 | # partition for the architecture. 7 | autopart --type=plain 8 | 9 | # docs say this sets the timeout before the default boot is selected but - affects the 10 | # installed OS (i.e. on second boot and thereafter) rather than the kickstart startup 11 | # which always takes 60 seconds... 12 | bootloader --timeout=5 13 | 14 | # don't run the setup agent on first boot 15 | firstboot --disabled 16 | 17 | # agree the EULA 18 | eula --agreed 19 | 20 | # Shutdown after installation 21 | shutdown 22 | 23 | # do not remove any partitions and create a default disk label for all disks 24 | # in their respective architecture that have been designated for formatting 25 | clearpart --none --initlabel 26 | 27 | # non-graphical install 28 | text 29 | 30 | # use CDROM installation media 31 | cdrom 32 | 33 | # keyboard layouts 34 | keyboard --vckeymap=us --xlayouts='us' 35 | 36 | # system language 37 | lang en_US.UTF-8 38 | 39 | # this could either be the NAT network or the bridged network. Both have identical representation in the kickstart 40 | # file. The host only network will be configured by the desktop-kubernetes service. 41 | network --bootproto=dhcp --device=enp0s3 --onboot=on --ipv6=auto 42 | 43 | # Since we're only setting up a template VM to clone from and when the clones are created - their hostnames will 44 | # be changed to match their VM names so this hostname doesn't mean anything 45 | network --hostname=cloneme 46 | 47 | # required CentOS / Rocky repo 48 | repo --name="AppStream" --baseurl=file:///run/install/sources/mount-0000-cdrom/AppStream 49 | 50 | # root password 51 | rootpw frobozz 52 | 53 | # X window system configuration information 54 | xconfig --startxonboot 55 | 56 | # keep the local host's time synchronized with the time server 57 | services --enabled="chronyd" 58 | 59 | # system time zone. --utc means the system clock is UTC, which can be verified with 'sudo hwclock' 60 | timezone America/New_York --utc 61 | 62 | # no selinux 63 | selinux --disabled 64 | 65 | %packages 66 | @core 67 | # kexec-tools supports fast reboot 68 | kexec-tools 69 | # kernel-devel for vbox guest additions install 70 | kernel-devel 71 | # net-tools for network debugging 72 | net-tools 73 | # for Kubernetes 74 | socat 75 | conntrack 76 | ipset 77 | %end 78 | 79 | # kernel crash dump config 80 | %addon com_redhat_kdump --enable --reserve-mb='auto' 81 | %end 82 | 83 | # post-install section 84 | # 1) Write gnome-initial-setup-done so we don't get the gnome config screens 85 | # 2) Copy the desktop-kubernetes service unit and associated script from the same ISO that holds this 86 | # kickstart script, and configure the service so it starts on the next boot, and does the unattended guest 87 | # additions install. This helped me: 88 | # https://unix.stackexchange.com/questions/513008/copying-files-from-bootable-usb-to-local-drive-in-post-script-of-kickstart 89 | # 3) Initialize ssh by copying the public key from the ISO to /root/.ssh/authorized_keys 90 | 91 | %post --interpreter=/bin/bash --log=/root/kickstart.log 92 | mkdir -p /root/.config 93 | echo yes > /root/.config/gnome-initial-setup-done 94 | mkdir /mnt/myks 95 | mount /dev/disk/by-label/OEMDRV /mnt/myks 96 | cp /mnt/myks/desktop-kubernetes.sh /root/ && chmod +x /root/desktop-kubernetes.sh 97 | cp /mnt/myks/desktop-kubernetes.service /etc/systemd/system/desktop-kubernetes.service 98 | mkdir -p /root/.ssh && cp /mnt/myks/id_ed25519.pub /root/.ssh/authorized_keys 99 | umount /mnt/myks 100 | rm -rf /mnt/myks 101 | systemctl enable desktop-kubernetes 102 | # next time the system starts it will run desktop-kubernetes.sh 103 | %end 104 | 105 | # password policy 106 | %anaconda 107 | pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty 108 | %end 109 | -------------------------------------------------------------------------------- /resources/design.md: -------------------------------------------------------------------------------- 1 | # Design 2 | 3 | This project might seem like a lot of bash code but it's actually pretty simple. There are about 30-odd primary shell scripts, and a number of *helper* scripts. This README focuses on the *primary* scripts. 4 | 5 | ## Directory structure 6 | 7 | The script directory structure is organized around related areas of functionality. The scripts generate numerous files as part of provisioning a cluster. These generated files are all placed into the `generated` directory in the project root. Most of these are purely transitory, **except:** 8 | 9 | | File | Purpose | 10 | | ---- | ------- | 11 | | generated/kickstart/id_ed25519 | This is the private key corresponding to the public key that Desktop Kubernetes adds to the template VM `authorized_keys` file. As long as the template VM is used to provision new Desktop Kubernetes clusters, this private key must be retained for ssh'ing into the cluster VMs. Desktop Kubernetes only generates the SSH keys when a template is created - and only if the keypair does not already exist. | 12 | | generated/kubeconfig/admin.kubeconfig | This is the admin kubeconfig. It is regenerated for each new cluster you provision. You need this kubeconfig to run `kubectl` commands against the cluster. | 13 | 14 | ## Call structure 15 | 16 | All of the scripts in the call tree except for `dtk` and `artifacts` are in the `scripts` directory. All of the scripts are invoked by `dtk`. The tree below shows the scripts as they are called to create a template VM, and then provision a cluster using the template. See the _Narrative_ section that follows for a description of each numeric annotation: 17 | 18 | ``` 19 | dtk 20 | ├─ source artifacts (1) 21 | │ 22 | ├─ scripts/helpers/download-objects (2) 23 | │ └─ scripts/helpers/download-obj 24 | │ 25 | ├─ scripts/kvm/provision-vms (3) (if kvm) 26 | │ ├─ scripts/kvm/create-template-vm 27 | │ │ └─ scripts/vm/gen-ssh-keyfiles 28 | │ ├─ scripts/kvm/clone-vm 29 | │ └─ scripts/kvm/configure-etc-hosts 30 | │ 31 | ├─ scripts/virtualbox/provision-vms (3) (if vbox) 32 | │ ├─ scripts/virtualbox/create-template-vm 33 | │ │ ├─ scripts/vm/gen-ssh-keyfiles 34 | │ │ ├─ scripts/os/gen-kickstart-iso 35 | │ │ ├─ scripts/virtualbox/create-vm 36 | │ │ └─ scripts/virtualbox/install-guest-additions 37 | │ ├─ scripts/virtualbox/clone-vm 38 | │ │ └─ scripts/virtualbox/configure-hostonly-networking 39 | │ │ └─ scripts/virtualbox/gen-hostonly-ifcfg-iso 40 | │ └─ scripts/virtualbox/configure-etc-hosts 41 | │ 42 | ├─ scripts/cluster/gen-root-ca (4) 43 | │ 44 | ├─ scripts/cluster/gen-core-k8s (5) 45 | │ ├─ scripts/worker/configure-worker (5a) 46 | │ │ ├─ scripts/os/configure-firewall 47 | │ │ ├─ scripts/worker/misc/install-misc-bins 48 | │ │ ├─ scripts/worker/containerd/install-containerd 49 | │ │ ├─ scripts/worker/kubelet/install-kubelet 50 | │ │ └─ scripts/networking/kube-proxy/install-kube-proxy 51 | │ └─ scripts/control-plane/configure-controller (5b) 52 | │ ├─ scripts/os/configure-firewall 53 | │ ├─ scripts/control-plane/etcd/install-etcd 54 | │ ├─ scripts/control-plane/kube-apiserver/install-kube-apiserver 55 | │ ├─ scripts/control-plane/kube-controller-manager/install-kube-controller-manager 56 | │ └─ scripts/control-plane/kube-scheduler/install-kube-scheduler 57 | | 58 | └─ scripts/addons/install-addons (6) 59 | ``` 60 | 61 | ## Narrative 62 | 63 | 1. The `artifacts` file is sourced, which defines all the upstream URLs and local filesystem locations for the core objects needed to provision the cluster. 64 | 2. All the binaries, ISOs, manifests, and tarballs needed to provision the core cluster are downloaded into the `binaries` directory based on configuration options. E.g. if config specifies `linux: rocky` then `Rocky-X.X-x86_64-dvd.iso` (X.X based on whatever is hard-coded in the `artifacts` file.) 65 | 3. All the VMs are created: 66 | - If config specifies `create-template: true` then ssh keys are generated, and a template VM is created using Kickstart and a CentOS / Alma / Rocky ISO depending on the `linux` selection. The ssh public key is copied into the VM in the `authorized-keys` file. 67 | - The template VM (the one created in the prior step, or one that was already there identified by the `template-vmname` config) is cloned to create the VM(s) that comprise the Kubernetes cluster, so each VM has an identical configuration. 68 | 4. A root CA is generated for the cluster if one does not already exist. This CA is used to sign cluster certs throughout the remainder of the cluster provisioning process. 69 | 5. The core Kubernetes cluster is created by installing the canonical Kubernetes components on each VM: 70 | - 5a: Each worker gets a unique TLS cert/key for its `kubelet`, a few binaries: `crictl`, `runc`, and `cni plugins`, and of course the `kubelet` and `containerd`. 71 | - 5b: The controller is provisioned with cluster TLS, `etcd`, the `api server`, `controller manager`, and `scheduler`. This project runs with a single controller to minimize the desktop footprint. 72 | 6. The `install-addons` script is called. It walks its own directory and for each subdirectory that matches an entry in the `addons` section of the `config.yaml`, it looks for and invokes an `install` script in that directory to install the add-on. 73 | 74 | On completion, you have a functional Kubernetes cluster consisting of one or one or more physical VMs, the first of which is always a controller, and the remainder of which are workers. 75 | -------------------------------------------------------------------------------- /resources/desktop-kubernetes-no-text.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aceeric/desktop-kubernetes/4ba2b18420d2ab612d4d7f8858f67817a1c54c7a/resources/desktop-kubernetes-no-text.jpg -------------------------------------------------------------------------------- /resources/desktop-kubernetes-no-text.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aceeric/desktop-kubernetes/4ba2b18420d2ab612d4d7f8858f67817a1c54c7a/resources/desktop-kubernetes-no-text.png -------------------------------------------------------------------------------- /resources/desktop-kubernetes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aceeric/desktop-kubernetes/4ba2b18420d2ab612d4d7f8858f67817a1c54c7a/resources/desktop-kubernetes.jpg -------------------------------------------------------------------------------- /resources/desktop-kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aceeric/desktop-kubernetes/4ba2b18420d2ab612d4d7f8858f67817a1c54c7a/resources/desktop-kubernetes.png -------------------------------------------------------------------------------- /scripts/addons/calico/.gitignore: -------------------------------------------------------------------------------- 1 | values.yaml -------------------------------------------------------------------------------- /scripts/addons/calico/calico.conf: -------------------------------------------------------------------------------- 1 | [keyfile] 2 | unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali 3 | -------------------------------------------------------------------------------- /scripts/addons/calico/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | TIGERA_VER=v3.30.0 6 | 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | url=https://github.com/projectcalico/calico/releases/download/$TIGERA_VER/tigera-operator-$TIGERA_VER.tgz 9 | tgz=$DTKBASE/binaries/$(basename $url) 10 | xec download-obj --url=$url --dest=$tgz 11 | 12 | priv_key=$(yq .cluster.priv-key $config) 13 | cluster_cidr=$(yq .k8s.cluster-cidr $config) 14 | virt=$(yq .virt $config) 15 | kube_proxy=$(yq .k8s.kube-proxy $config) 16 | 17 | controller_ip= 18 | for ((i = 0; i < $(yq '.vms | length' $config); ++i)); do 19 | vmname=$(yq .vms[$i].name $config) 20 | vmip=$(xec $virt/get-vm-ip $vmname) 21 | if [[ $i -eq 0 ]]; then 22 | controller_ip=$vmip 23 | fi 24 | # per https://docs.tigera.io/calico-enterprise/latest/operations/troubleshoot/troubleshooting#configure-networkmanager 25 | # last eyeballed on 26-May-2025 26 | scp -i $priv_key $script_dir/calico.conf root@$vmip:/etc/NetworkManager/conf.d/calico.conf 27 | ssh -i $priv_key root@$vmip "systemctl daemon-reload && systemctl restart NetworkManager" 28 | EOF 29 | done 30 | 31 | sed "s|CLUSTERCIDR|$cluster_cidr|" $script_dir/values-template.yaml >| $script_dir/values.yaml 32 | 33 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 34 | helm upgrade --install tigera-operator\ 35 | --namespace tigera-operator\ 36 | --create-namespace\ 37 | --kubeconfig $admin_kubeconfig\ 38 | --wait\ 39 | --values $script_dir/values.yaml\ 40 | $tgz 41 | 42 | # these steps from https://docs.tigera.io/calico/latest/operations/ebpf/enabling-ebpf to be able 43 | # to run without kube-proxy 44 | 45 | cat <| $script_dir/values.yaml 14 | 15 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 16 | helm upgrade --install cilium\ 17 | --namespace cilium\ 18 | --create-namespace\ 19 | --kubeconfig $admin_kubeconfig\ 20 | --wait\ 21 | --values $script_dir/values.yaml\ 22 | $tgz 23 | 24 | echo "waiting (indefinitely) for cilium daemonset" 25 | while true; do 26 | if kubectl --kubeconfig $admin_kubeconfig -n cilium rollout status ds cilium; then 27 | break 28 | fi 29 | sleep 10s 30 | done 31 | -------------------------------------------------------------------------------- /scripts/addons/cilium/values-template.yaml: -------------------------------------------------------------------------------- 1 | k8sServiceHost: K8SSERVICEHOST 2 | k8sServicePort: 6443 3 | kubeProxyReplacement: "true" 4 | operator: 5 | replicas: 1 6 | ipam: 7 | # mode kubernetes per - https://docs.cilium.io/en/stable/network/concepts/ipam/kubernetes/ 8 | mode: kubernetes 9 | #hubble: 10 | # relay: 11 | # enabled: true 12 | # ui: 13 | # enabled: true 14 | # metrics: 15 | # enabled: 16 | # - dns 17 | # - drop 18 | # - tcp 19 | # - flow 20 | # - port-distribution 21 | # - icmp 22 | # - dns:labelsContext=source_namespace,destination_namespace 23 | # - drop:labelsContext=source_namespace,destination_namespace 24 | # - httpV2:sourceContext=workload-name|pod-name|reserved-identity;destinationContext=workload-name|pod-name|reserved-identity;labelsContext=source_namespace,destination_namespace,traffic_direction -------------------------------------------------------------------------------- /scripts/addons/coredns/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | COREDNS_VER=1.42.1 6 | 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | url=https://github.com/coredns/helm/releases/download/coredns-$COREDNS_VER/coredns-$COREDNS_VER.tgz 9 | tgz=$DTKBASE/binaries/$(basename $url) 10 | xec download-obj --url=$url --dest=$tgz 11 | 12 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 13 | helm upgrade --install coredns\ 14 | --namespace kube-system\ 15 | --create-namespace\ 16 | --kubeconfig $admin_kubeconfig\ 17 | --wait\ 18 | --values $script_dir/values.yaml\ 19 | $tgz 20 | -------------------------------------------------------------------------------- /scripts/addons/coredns/values.yaml: -------------------------------------------------------------------------------- 1 | priorityClassName: system-cluster-critical 2 | 3 | service: 4 | clusterIP: 10.32.0.10 5 | 6 | serviceAccount: 7 | create: true 8 | name: coredns 9 | 10 | servers: 11 | - zones: 12 | - zone: . 13 | port: 53 14 | plugins: 15 | - name: log 16 | - name: errors 17 | - name: health 18 | - name: ready 19 | - name: kubernetes 20 | parameters: cluster.local in-addr.arpa ip6.arpa 21 | configBlock: |- 22 | pods verified 23 | fallthrough in-addr.arpa ip6.arpa 24 | - name: prometheus 25 | parameters: :9153 26 | - name: forward 27 | parameters: . /etc/resolv.conf 28 | - name: cache 29 | parameters: 30 30 | - name: loop 31 | - name: reload 32 | - name: loadbalance 33 | -------------------------------------------------------------------------------- /scripts/addons/external-dns/README.md: -------------------------------------------------------------------------------- 1 | # External DNS 2 | 3 | This is a toy. This directory contains a simple Flask app that runs a webhook for External DNS to talk to. The way External DNS recommends to implement a webhook is as a sidecar - but this approach instead just runs the webhook on the desktop and has External DNS talk to it from inside the cluster. 4 | 5 | When you create an Ingress, External DNS will call the webook, and the webhook will add the Ingress host into the `/etc/hosts` file on your desktop. Then you can access the hostname as though it had been provisioned by an actual DNS server. (So basically this just saves hand-editing `/etc/hosts`.) Here's how you use it with Desktop Kubernetes: 6 | 7 | 1. Make sure ingress-nginx is installed in the cluster. 8 | 1. Pip install Flask. 9 | 1. Get your desktop IP address and put it into the `values.yaml` in the `--webhook-provider-url` arg. (Leave the port as `5000`.) 10 | 1. Run the webhook: `sudo -E ./webhook --ip-address 192.168.56.200` in a separate terminal window. The `--ip-address` is the address of any Desktop Kubernetes node running the ingress controller. 11 | 1. Install External DNS: `./dtk --install-addon external-dns`. 12 | 1. Create a Deployment running Nginx, a Service, and an Ingress. Annotate the Ingress with `external-dns.alpha.kubernetes.io/target: ` matching the host name of the Ingress. (You have to do this becuase of how External DNS parses the Ingress resource to determine whether to send the webbook new hosts. See the example below.) 13 | 1. External DNS will call the webhook which will append the host name from the Ingress annotation to `/etc/hosts` on your desktop, with the IP address of your cluster VM. 14 | 1. Then you can curl that host or access it from your browser. 15 | 16 | ## Example Ingress 17 | 18 | Observe that the annotation below has the same hostname as the `host` in the `rules` list - `frobozz.dtk.io`: 19 | 20 | ``` 21 | apiVersion: networking.k8s.io/v1 22 | kind: Ingress 23 | metadata: 24 | annotations: 25 | external-dns.alpha.kubernetes.io/target: frobozz.dtk.io 26 | name: test 27 | spec: 28 | ingressClassName: nginx 29 | rules: 30 | - host: frobozz.dtk.io 31 | http: 32 | paths: 33 | - backend: 34 | service: 35 | name: test 36 | port: 37 | number: 80 38 | path: / 39 | pathType: Prefix 40 | ``` 41 | 42 | Once Nginx configures the Ingress and External DNS calls the webhook you can observe that the host has been added to your `/etc/hosts` file. When you're done with the cluster, just CTRL-C the webhook script. (You have to clean up `/etc/hosts` yourself.) 43 | 44 | Result in `/etc/hosts`: 45 | ``` 46 | ... 47 | 192.168.56.200 frobozz.dtk.io 48 | ``` 49 | 50 | Curl the endpoint: `curl http://frobozz.dtk.io`. Observe the Nginx response: 51 | ``` 52 | ... 53 |

Welcome to nginx!

54 |

If you see this page, the nginx web server is successfully installed and 55 | working. Further configuration is required.

56 | ... 57 | ``` -------------------------------------------------------------------------------- /scripts/addons/external-dns/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | EXTDNS_VER=1.13.1 6 | 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | url=https://github.com/kubernetes-sigs/external-dns/releases/download/external-dns-helm-chart-$EXTDNS_VER/external-dns-$EXTDNS_VER.tgz 9 | tgz=$DTKBASE/binaries/$(basename $url) 10 | xec download-obj --url=$url --dest=$tgz 11 | 12 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 13 | helm upgrade --install external-dns\ 14 | --namespace external-dns\ 15 | --create-namespace\ 16 | --kubeconfig $admin_kubeconfig\ 17 | --wait\ 18 | --values $script_dir/values.yaml\ 19 | $tgz 20 | -------------------------------------------------------------------------------- /scripts/addons/external-dns/values.yaml: -------------------------------------------------------------------------------- 1 | provider: webhook 2 | 3 | extraArgs: 4 | - --webhook-provider-url=http://192.168.0.49:5000 5 | 6 | # v0.14.0 has the webhook functionality 7 | image: 8 | tag: v0.14.0 9 | -------------------------------------------------------------------------------- /scripts/addons/external-dns/webhook: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Usage: 4 | # 5 | # sudo -E webhook --ip-address 6 | # 7 | # Example: 8 | # 9 | # sudo -E ./webhook --ip-address 192.168.56.200 10 | # 11 | # Sudo is needed because the app modifies /etc/hosts. -E is needed so Python can 12 | # find all its modules/packages when running as root. 13 | # 14 | 15 | from flask import Flask, request, Response 16 | import json, re, sys 17 | 18 | app = Flask(__name__) 19 | mt = "application/external.dns.webhook+json;version=1" 20 | recs = "[]" 21 | filters = {"include":["dtk.io"]} 22 | cluster_ip = "192.168.56.200" 23 | 24 | @app.route("/", methods=["GET"]) 25 | def negotiate(): 26 | print("\nnegotiate") 27 | debug() 28 | return Response(json.dumps(filters, separators=(",", ":")), mimetype=mt) 29 | 30 | @app.route("/records", methods=["GET"]) 31 | def records(): 32 | print("\nget records") 33 | global recs 34 | debug() 35 | return Response(recs, mimetype=mt) 36 | 37 | @app.route("/adjustendpoints", methods=["POST"]) 38 | def adjust_endpoints(): 39 | print("\nadjust endpoints") 40 | global recs 41 | recs = request.data.decode().strip() 42 | update_etc_hosts(json.loads(recs)) 43 | debug() 44 | return Response("[]", mimetype=mt) 45 | 46 | @app.route("/records", methods=["POST"]) 47 | def apply_changes(): 48 | print("\npost records") 49 | debug() 50 | return Response(mimetype=mt) 51 | 52 | def update_etc_hosts(recs_list): 53 | fname = "/etc/hosts" 54 | for rec in recs_list: 55 | new_entry = "%s %s\n" % (cluster_ip, rec["dnsName"]) 56 | found = False 57 | expr = r"(^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})[\s]+" + rec["dnsName"] + "$" 58 | with open(fname, "r") as file: 59 | lines = file.readlines() 60 | for idx, line in enumerate(lines): 61 | result = re.match(expr, line) 62 | if result is not None: 63 | found = True 64 | if result.groups()[0] != cluster_ip: 65 | print("updating entry for %s" % rec["dnsName"]) 66 | lines[idx] = new_entry 67 | else: 68 | print("entry matches for %s - no changes made" % rec["dnsName"]) 69 | break 70 | if not found: 71 | print("adding entry for %s" % rec["dnsName"]) 72 | lines.extend([new_entry]) 73 | 74 | with open(fname, "w") as file: 75 | file.writelines(lines) 76 | 77 | def debug(): 78 | print("REQ DATA:", request.data) 79 | for k in request.headers.keys(): 80 | print("HDR %s = %s" % (k, request.headers.get(k))) 81 | 82 | if __name__ == "__main__": 83 | i = 0 84 | while i < len(sys.argv): 85 | if sys.argv[i].startswith("--ip-address="): 86 | cluster_ip = sys.argv[i].split("=")[1] 87 | elif sys.argv[i] == "--ip-address": 88 | i += 1 89 | cluster_ip = sys.argv[i] 90 | i += 1 91 | print("mapping hosts to ip address %s" % cluster_ip) 92 | app.run(host="0.0.0.0") 93 | -------------------------------------------------------------------------------- /scripts/addons/ingress-nginx/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | INGRESS_NGINX_VER=4.12.2 6 | 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | url=https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-$INGRESS_NGINX_VER/ingress-nginx-$INGRESS_NGINX_VER.tgz 9 | tgz=$DTKBASE/binaries/$(basename $url) 10 | xec download-obj --url=$url --dest=$tgz 11 | 12 | ssl_passthrough=$(yq '.addons[] | select(.name == "ingress-nginx").ssl-passthrough' $config) 13 | 14 | if [[ $ssl_passthrough == "true" ]]; then 15 | ssl_passthrough="--set controller.extraArgs.enable-ssl-passthrough=true" 16 | else 17 | ssl_passthrough="" 18 | fi 19 | 20 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 21 | helm upgrade --install ingress-nginx\ 22 | --namespace ingress-nginx\ 23 | --create-namespace\ 24 | --kubeconfig $admin_kubeconfig\ 25 | --values $script_dir/values.yaml\ 26 | $ssl_passthrough\ 27 | $tgz 28 | -------------------------------------------------------------------------------- /scripts/addons/ingress-nginx/values.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | hostNetwork: true 3 | hostPort: 4 | enabled: true 5 | service: 6 | type: NodePort 7 | kind: DaemonSet 8 | -------------------------------------------------------------------------------- /scripts/addons/install-addons: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=config:,admin-kubeconfig:,priv-key:,addon: 6 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | 8 | config= 9 | admin_kubeconfig= 10 | priv_key= 11 | addon= 12 | 13 | if ! parsed=$(xec parseargs $long_opts "$@"); then 14 | echo "$parsed" 15 | exit 1 16 | fi 17 | eval $(echo -e "$parsed") 18 | 19 | # write info to config yaml for addons 20 | yq -i '.cluster.admin-kubeconfig = "'$admin_kubeconfig'"' $config 21 | yq -i '.cluster.priv-key = "'$priv_key'"' $config 22 | 23 | if [[ -n "$addon" ]]; then 24 | if [[ -d $script_dir/$addon ]]; then 25 | echo "installing $addon in $script_dir/$addon" 26 | $script_dir/$addon/install $config 27 | else 28 | echo "addon directory not found: $script_dir/$addon" 29 | fi 30 | else 31 | for ((i = 0; i < $(yq '.addons | length' $config); ++i)); do 32 | addon="$(yq .addons[$i].name $config)" 33 | if [[ -d $script_dir/$addon ]]; then 34 | echo "installing $addon in $script_dir/$addon" 35 | $script_dir/$addon/install $config 36 | else 37 | echo "addon directory not found (skipping): $script_dir/$addon" 38 | fi 39 | done 40 | fi 41 | -------------------------------------------------------------------------------- /scripts/addons/kube-prometheus-stack/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | KUBE_PROM_VER=72.6.2 6 | 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | 9 | url=https://github.com/prometheus-community/helm-charts/releases/download/kube-prometheus-stack-$KUBE_PROM_VER/kube-prometheus-stack-$KUBE_PROM_VER.tgz 10 | tgz=$DTKBASE/binaries/$(basename $url) 11 | xec download-obj --url=$url --dest=$tgz 12 | 13 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 14 | 15 | etcd_arg="" 16 | scheduler_arg="" 17 | ctrllrmgr_arg="" 18 | 19 | endpoint=$(kubectl --kubeconfig $admin_kubeconfig get node -l node-role.kubernetes.io/controller\ 20 | -o json | jq -r '.items[].status.addresses[] | select(.type=="InternalIP").address') 21 | containerized_cplane=$(yq .k8s.containerized-cplane $config) 22 | 23 | # if k8s components are not running as pods then it requires additional helm values 24 | 25 | if [[ "$containerized_cplane" != *etcd* && "$containerized_cplane" != all ]]; then 26 | etcd_arg="--set kubeEtcd.endpoints={$endpoint}" 27 | fi 28 | if [[ "$containerized_cplane" != *kube-scheduler* && "$containerized_cplane" != all ]]; then 29 | scheduler_arg="--set kubeScheduler.endpoints={$endpoint}" 30 | fi 31 | if [[ "$containerized_cplane" != *kube-controller-manager* && "$containerized_cplane" != all ]]; then 32 | ctrllrmgr_arg="--set kubeControllerManager.endpoints={$endpoint}" 33 | fi 34 | 35 | helm upgrade --install kube-prometheus\ 36 | --namespace kube-prometheus\ 37 | --create-namespace\ 38 | --kubeconfig $admin_kubeconfig\ 39 | $etcd_arg\ 40 | $scheduler_arg\ 41 | $ctrllrmgr_arg\ 42 | --values $script_dir/values.yaml\ 43 | $tgz 44 | -------------------------------------------------------------------------------- /scripts/addons/kube-prometheus-stack/values.yaml: -------------------------------------------------------------------------------- 1 | extraManifests: 2 | - apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: grafana-nodeport 6 | spec: 7 | ports: 8 | - name: http 9 | nodePort: 30300 10 | port: 3000 11 | protocol: TCP 12 | targetPort: 3000 13 | selector: 14 | app.kubernetes.io/instance: kube-prometheus 15 | app.kubernetes.io/name: grafana 16 | type: NodePort 17 | - apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: prometheus-nodeport 21 | spec: 22 | ports: 23 | - name: http 24 | nodePort: 30900 25 | port: 9090 26 | protocol: TCP 27 | targetPort: 9090 28 | selector: 29 | app.kubernetes.io/name: prometheus 30 | type: NodePort 31 | 32 | grafana: 33 | adminPassword: admin 34 | 35 | coreDns: 36 | service: 37 | selector: 38 | k8s-app: coredns 39 | 40 | prometheus: 41 | prometheusSpec: 42 | maximumStartupDurationSeconds: 60 43 | -------------------------------------------------------------------------------- /scripts/addons/kubernetes-dashboard/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Dashboard 2 | 3 | To access the UI, use the provided NodePort service. E.g., say your cluster is like this: 4 | 5 | ``` 6 | NAME STATUS ROLES AGE VERSION INTERNAL-IP etc. 7 | host-one Ready controller,worker 70m v1.28.1 192.168.56.200 etc. 8 | ``` 9 | Then, in your browser: http://192.168.56.200:30443 10 | 11 | Create a token: 12 | 13 | ``` 14 | k -n kubernetes-dashboard create token kubernetes-dashboard-kong 15 | ``` 16 | -------------------------------------------------------------------------------- /scripts/addons/kubernetes-dashboard/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | KUBE_DASHBOARD_VER=7.12.0 6 | 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | url=https://github.com/kubernetes/dashboard/releases/download/kubernetes-dashboard-$KUBE_DASHBOARD_VER/kubernetes-dashboard-$KUBE_DASHBOARD_VER.tgz 9 | tgz=$DTKBASE/binaries/$(basename $url) 10 | xec download-obj --url=$url --dest=$tgz 11 | 12 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 13 | helm upgrade --install kubernetes-dashboard\ 14 | --namespace kubernetes-dashboard\ 15 | --create-namespace\ 16 | --kubeconfig $admin_kubeconfig\ 17 | --values $script_dir/values.yaml\ 18 | $tgz 19 | -------------------------------------------------------------------------------- /scripts/addons/kubernetes-dashboard/values.yaml: -------------------------------------------------------------------------------- 1 | extras: 2 | manifests: 3 | - apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: kubernetes-dashboard-nodeport 7 | spec: 8 | ports: 9 | - name: https 10 | nodePort: 30443 11 | port: 8443 12 | protocol: TCP 13 | targetPort: proxy-tls 14 | selector: 15 | app.kubernetes.io/component: app 16 | app.kubernetes.io/instance: kubernetes-dashboard 17 | app.kubernetes.io/name: kong 18 | type: NodePort 19 | - apiVersion: rbac.authorization.k8s.io/v1 20 | kind: ClusterRoleBinding 21 | metadata: 22 | name: kubernetes-dashboard-admin-user 23 | roleRef: 24 | apiGroup: rbac.authorization.k8s.io 25 | kind: ClusterRole 26 | name: cluster-admin 27 | subjects: 28 | - kind: ServiceAccount 29 | name: kubernetes-dashboard-kong 30 | namespace: kubernetes-dashboard 31 | -------------------------------------------------------------------------------- /scripts/addons/metrics-server/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | METRICS_SERVER_VER=3.12.2 6 | 7 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 8 | 9 | url=https://github.com/kubernetes-sigs/metrics-server/releases/download/metrics-server-helm-chart-$METRICS_SERVER_VER/metrics-server-$METRICS_SERVER_VER.tgz 10 | tgz=$DTKBASE/binaries/$(basename $url) 11 | xec download-obj --url=$url --dest=$tgz 12 | 13 | helm upgrade --install metrics-server\ 14 | --namespace metrics-server\ 15 | --create-namespace\ 16 | --wait\ 17 | --kubeconfig $admin_kubeconfig\ 18 | $tgz 19 | -------------------------------------------------------------------------------- /scripts/addons/openebs/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | OPENEBS_VER=4.2.0 6 | 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | 9 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 10 | 11 | url=https://openebs.github.io/dynamic-localpv-provisioner/localpv-provisioner-$OPENEBS_VER.tgz 12 | tgz=$DTKBASE/binaries/$(basename $url) 13 | xec download-obj --url=$url --dest=$tgz 14 | 15 | helm upgrade --install openebs\ 16 | --namespace openebs\ 17 | --create-namespace\ 18 | --kubeconfig $admin_kubeconfig\ 19 | --values $script_dir/values.yaml\ 20 | $tgz 21 | -------------------------------------------------------------------------------- /scripts/addons/openebs/values.yaml: -------------------------------------------------------------------------------- 1 | hostpathClass: 2 | isDefaultClass: true 3 | -------------------------------------------------------------------------------- /scripts/addons/vcluster/.gitignore: -------------------------------------------------------------------------------- 1 | values.yaml -------------------------------------------------------------------------------- /scripts/addons/vcluster/README.md: -------------------------------------------------------------------------------- 1 | # After installing 2 | 3 | So far, I cannot get this working in Nginx using instructions here: https://www.vcluster.com/docs/using-vclusters/access without also creating a hostname in `/etc/hosts` and using that host name in the ingress. (In other words a host-less rule does not work in the ingress and I don't know why.) 4 | 5 | So this example uses `NodePort` for now based on the same link above jsut a little further down the page. 6 | 7 | As a result, this is a little more complicated than the `Ingress` approach but doesn't require dealing with `/etc/hosts`. The key thing to understand is that vcluster stores the kube config for the virtual cluster in a secret in the vcluster namespace. If you look through the [vclusterctl](https://github.com/loft-sh/vcluster/tree/main/cmd/vclusterctl) code you will see that when their documentation says to do this: 8 | ``` 9 | vcluster connect my-vcluster -n my-vcluster --update-current=false --server=https://x.x.x.x 10 | ``` 11 | 12 | ...all it does is go get the kubeconfig from the secret, and replace the `server` value with what you specify as the `--server=` on the command line. This is what is accomplished by this snippet. (This snippet requires `yq`): 13 | 14 | ## Get the NodePort service port num 15 | ``` 16 | port=$(kubectl -n vcluster get svc vcluster-nodeport -oyaml -ojsonpath='{.spec.ports[0].nodePort}') 17 | ``` 18 | 19 | ## Get the secret that has the kubeconfig 20 | ``` 21 | kubectl -n vcluster get secret vc-vcluster -oyaml >| ./vc-kubeconfig.yaml 22 | ``` 23 | 24 | ## Get the kubeconfig from the secret 25 | ``` 26 | kcfg=$(yq '.data.config' vc-kubeconfig.yaml) 27 | ``` 28 | 29 | ## Base 64 decode the kubeconfig to the file system 30 | ``` 31 | echo $kcfg | base64 -d >| ./vc-kubeconfig.yaml 32 | ``` 33 | 34 | ## Get the IP address of the k8s controller 35 | ``` 36 | controller_ip=$(kubectl get nodes -l node-role.kubernetes.io/controller=\ 37 | -o jsonpath={.items[*].status.addresses[?\(@.type==\"InternalIP\"\)].address}) 38 | ``` 39 | 40 | ## Patch in the Controller IP and NodePort port num 41 | ``` 42 | yq -i '.clusters[0].cluster.server = "https://'$controller_ip:$port'"' ./vc-kubeconfig.yaml 43 | ``` 44 | 45 | ## Select the vcluster kubeconfig 46 | ``` 47 | export KUBECONFIG=./vc-kubeconfig.yaml 48 | ``` 49 | 50 | ## Then 51 | ``` 52 | kubectl get nodes -owide 53 | ``` 54 | 55 | ## Output 56 | ``` 57 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 58 | vm1 Ready 22h v1.29.1+k3s2 10.32.0.34 Fake Kubernetes Image 4.19.76-fakelinux docker://19.3.12 59 | ``` 60 | -------------------------------------------------------------------------------- /scripts/addons/vcluster/ingress.yaml: -------------------------------------------------------------------------------- 1 | # 30-Mar-2024 based on: https://www.vcluster.com/docs/using-vclusters/access 2 | --- 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | annotations: 7 | nginx.ingress.kubernetes.io/backend-protocol: HTTPS 8 | nginx.ingress.kubernetes.io/ssl-passthrough: "true" 9 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 10 | name: vcluster-ingress 11 | spec: 12 | ingressClassName: nginx 13 | rules: 14 | - http: 15 | paths: 16 | - backend: 17 | service: 18 | name: vcluster 19 | port: 20 | number: 443 21 | path: / 22 | pathType: ImplementationSpecific 23 | -------------------------------------------------------------------------------- /scripts/addons/vcluster/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=$1 4 | 5 | VCLUSTER_VER=0.19.5 6 | 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | 9 | admin_kubeconfig=$(yq .cluster.admin-kubeconfig $config) 10 | 11 | url=https://charts.loft.sh/charts/vcluster-$VCLUSTER_VER.tgz 12 | tgz=$DTKBASE/binaries/$(basename $url) 13 | xec download-obj --url=$url --dest=$tgz 14 | 15 | node_ips=$(kubectl --kubeconfig $admin_kubeconfig get nodes\ 16 | -o jsonpath={.items[*].status.addresses[?\(@.type==\"InternalIP\"\)].address}\ 17 | | sed 's/ /,/g') 18 | 19 | cat <| $script_dir/values.yaml 20 | syncer: 21 | extraArgs: 22 | - --tls-san=$node_ips 23 | vcluster: 24 | image: rancher/k3s:v1.29.1-k3s2 25 | EOF 26 | 27 | helm upgrade --install vcluster\ 28 | --namespace vcluster\ 29 | --create-namespace\ 30 | --kubeconfig $admin_kubeconfig\ 31 | --values $script_dir/values.yaml\ 32 | $tgz 33 | 34 | # TODO ingress: false 35 | #kubectl --kubeconfig $admin_kubeconfig -n vcluster apply -f $script_dir/ingress.yaml 36 | kubectl --kubeconfig $admin_kubeconfig -n vcluster apply -f $script_dir/nodeport-svc.yaml 37 | -------------------------------------------------------------------------------- /scripts/addons/vcluster/nodeport-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: vcluster-nodeport 5 | spec: 6 | selector: 7 | app: vcluster 8 | ports: 9 | - name: https 10 | port: 443 11 | targetPort: 8443 12 | protocol: TCP 13 | type: NodePort -------------------------------------------------------------------------------- /scripts/certs-and-kubecfgs/csr.altnames.conf: -------------------------------------------------------------------------------- 1 | [ req ] 2 | default_bits = 2048 3 | prompt = no 4 | default_md = sha256 5 | distinguished_name = dn 6 | req_extensions = req_ext 7 | 8 | [ dn ] 9 | O = ORGANIZATION 10 | CN = COMMONNAME 11 | 12 | [ req_ext ] 13 | subjectAltName = @alt_names 14 | 15 | [ alt_names ] 16 | DNS.1 = HOSTNAME 17 | IP.1 = IPADDRESS 18 | 19 | [ v3_ext ] 20 | authorityKeyIdentifier=keyid,issuer:always 21 | basicConstraints=CA:FALSE 22 | keyUsage=keyEncipherment,dataEncipherment 23 | extendedKeyUsage=serverAuth,clientAuth 24 | subjectAltName=@alt_names 25 | -------------------------------------------------------------------------------- /scripts/certs-and-kubecfgs/csr.conf: -------------------------------------------------------------------------------- 1 | [ req ] 2 | default_bits = 2048 3 | prompt = no 4 | default_md = sha256 5 | distinguished_name = dn 6 | 7 | [ dn ] 8 | O = ORGANIZATION 9 | CN = COMMONNAME 10 | 11 | [ v3_ext ] 12 | authorityKeyIdentifier=keyid,issuer:always 13 | basicConstraints=CA:FALSE 14 | keyUsage=keyEncipherment,dataEncipherment 15 | extendedKeyUsage=serverAuth,clientAuth 16 | -------------------------------------------------------------------------------- /scripts/certs-and-kubecfgs/csr.kubernetes.conf: -------------------------------------------------------------------------------- 1 | [ req ] 2 | default_bits = 2048 3 | prompt = no 4 | default_md = sha256 5 | req_extensions = req_ext 6 | distinguished_name = dn 7 | 8 | [ dn ] 9 | CN = kubernetes 10 | 11 | [ req_ext ] 12 | subjectAltName = @alt_names 13 | 14 | [ alt_names ] 15 | DNS.1 = kubernetes 16 | DNS.2 = kubernetes.default 17 | DNS.3 = kubernetes.default.svc 18 | DNS.4 = kubernetes.default.svc.cluster 19 | DNS.5 = kubernetes.default.svc.cluster.local 20 | DNS.6 = HOSTNAME 21 | IP.1 = IPADDRESS 22 | IP.2 = 10.32.0.1 23 | IP.3 = 127.0.0.1 24 | 25 | [ v3_ext ] 26 | authorityKeyIdentifier=keyid,issuer:always 27 | basicConstraints=CA:FALSE 28 | keyUsage=keyEncipherment,dataEncipherment,digitalSignature,nonRepudiation 29 | extendedKeyUsage=serverAuth,clientAuth 30 | subjectAltName=@alt_names 31 | -------------------------------------------------------------------------------- /scripts/certs-and-kubecfgs/gen-certs-kubeconfig: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Generates TLS cert/key and optionally a kubeconfig for components: 4 | # - kube-proxy 5 | # - kubelet 6 | # - kube-controller-manager 7 | # - kube-scheduler 8 | # - admin (the admin kubeconfig allows the script (and you) to access the cluster API) 9 | # - kubernetes (for etcd and api server) 10 | # 11 | 12 | set -e 13 | 14 | long_opts=host-name:,host-ip:,subject-org:,subject-ou:,subject-cn:,identity:,\ 15 | csr-type:,controller-ip:,ca-cert:,ca-key:,gen-kubeconfig: 16 | 17 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 18 | 19 | host_name= 20 | host_ip= 21 | subject_org= 22 | subject_cn= 23 | identity= 24 | csr_type= 25 | ca_cert= 26 | ca_key= 27 | controller_ip= 28 | gen_kubeconfig=1 29 | 30 | if ! parsed=$(xec parseargs $long_opts "$@"); then 31 | echo "$parsed" 32 | exit 1 33 | fi 34 | eval $(echo -e "$parsed") 35 | 36 | echo "creating generated/cert/$identity.pem and $identity-key.pem" 37 | 38 | tmp_dir=$(mktemp -d) 39 | 40 | if [[ "$csr_type" == altnames ]]; then 41 | sed -e "s/HOSTNAME/$host_name/g"\ 42 | -e "s/IPADDRESS/$host_ip/g"\ 43 | -e "s/ORGANIZATION/$subject_org/g"\ 44 | -e "s/COMMONNAME/$subject_cn/g"\ 45 | $script_dir/csr.altnames.conf >| $tmp_dir/csr.conf 46 | elif [[ "$csr_type" == simple ]]; then 47 | sed -e "s/ORGANIZATION/$subject_org/g"\ 48 | -e "s/COMMONNAME/$subject_cn/g"\ 49 | $script_dir/csr.conf >| $tmp_dir/csr.conf 50 | elif [[ "$csr_type" == kubernetes ]]; then 51 | sed -e "s/HOSTNAME/$host_name/g"\ 52 | -e "s/IPADDRESS/$host_ip/g"\ 53 | $script_dir/csr.kubernetes.conf >| $tmp_dir/csr.conf 54 | fi 55 | 56 | subject="" 57 | if [[ -n "$subject_org" ]]; then 58 | subject="$subject/O=$subject_org" 59 | fi 60 | if [[ -n "$subject_ou" ]]; then 61 | subject="$subject/OU=$subject_ou" 62 | fi 63 | subject="$subject/CN=$subject_cn" 64 | 65 | if [[ -n "$csr_type" ]]; then 66 | openssl genrsa\ 67 | -out $DTKBASE/generated/cert/$identity-key.pem\ 68 | 2048 69 | 70 | openssl req\ 71 | -new\ 72 | -key $DTKBASE/generated/cert/$identity-key.pem\ 73 | -out $tmp_dir/$identity.csr\ 74 | -config $tmp_dir/csr.conf\ 75 | 2>/dev/null 76 | 77 | openssl x509\ 78 | -req\ 79 | -in $tmp_dir/$identity.csr\ 80 | -CA $ca_cert\ 81 | -CAkey $ca_key\ 82 | -CAcreateserial\ 83 | -out $DTKBASE/generated/cert/$identity.pem\ 84 | -days 10000\ 85 | -extensions v3_ext\ 86 | -extfile $tmp_dir/csr.conf\ 87 | 2>/dev/null 88 | else 89 | openssl req\ 90 | -newkey rsa:2048\ 91 | -nodes\ 92 | -keyout $DTKBASE/generated/cert/$identity-key.pem\ 93 | -subj $subject\ 94 | -out $tmp_dir/$identity.csr\ 95 | 2>/dev/null 96 | 97 | openssl x509\ 98 | -req\ 99 | -in $tmp_dir/$identity.csr\ 100 | -CA $ca_cert\ 101 | -CAkey $ca_key\ 102 | -CAcreateserial\ 103 | -sha256\ 104 | -out $DTKBASE/generated/cert/$identity.pem\ 105 | -days 10000\ 106 | 2>/dev/null 107 | fi 108 | 109 | rm -rf $tmp_dir 110 | 111 | if [[ $gen_kubeconfig -eq 0 ]]; then 112 | exit 113 | fi 114 | 115 | echo "creating generated/kubeconfig/$identity.kubeconfig" 116 | 117 | kubectl config set-cluster kubernetes\ 118 | --certificate-authority=$ca_cert\ 119 | --embed-certs=true\ 120 | --server=https://$controller_ip:6443\ 121 | --kubeconfig=$DTKBASE/generated/kubeconfig/$identity.kubeconfig 122 | 123 | kubectl config set-credentials $subject_cn\ 124 | --client-certificate=$DTKBASE/generated/cert/$identity.pem\ 125 | --client-key=$DTKBASE/generated/cert/$identity-key.pem\ 126 | --embed-certs=true\ 127 | --kubeconfig=$DTKBASE/generated/kubeconfig/$identity.kubeconfig 128 | 129 | kubectl config set-context default\ 130 | --cluster=kubernetes\ 131 | --user=$subject_cn\ 132 | --kubeconfig=$DTKBASE/generated/kubeconfig/$identity.kubeconfig 133 | 134 | kubectl config use-context default\ 135 | --kubeconfig=$DTKBASE/generated/kubeconfig/$identity.kubeconfig 136 | -------------------------------------------------------------------------------- /scripts/cluster/gen-core-k8s: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Generates the Kubernetes core cluster by installing only the canonical 4 | # Kubernetes controller and worker components in each VM based on the VM's 5 | # ordinal position in the cluster. (Node 0 is a controller+worker, Nodes 6 | # >=1 are workers.) 7 | # 8 | 9 | set -e 10 | 11 | long_opts=containerized-cplane:,kube-proxy-enabled:,priv-key:,ca-cert:,ca-key:,config:,cluster-cidr:,virt: 12 | 13 | containerized_cplane= 14 | kube_proxy_enabled= 15 | priv_key= 16 | ca_cert= 17 | ca_key= 18 | config= 19 | cluster_cidr= 20 | virt= 21 | 22 | if ! parsed=$(xec parseargs $long_opts "$@"); then 23 | echo "$parsed" 24 | exit 1 25 | fi 26 | eval $(echo -e "$parsed") 27 | 28 | # The control plane node will get the worker components for two reasons: 1) it supports 29 | # a containerized control plane, and 2) it lets the controller be also a worker 30 | 31 | vmcnt=$(yq '.vms | length' $config) 32 | vm_names=() 33 | 34 | for ((i = 0; i < $vmcnt; ++i)); do 35 | vm_name=$(yq .vms[$i].name $config) 36 | pod_cidr=$(yq .vms[$i].pod-cidr $config) 37 | vm_ip=$(xec $virt/get-vm-ip $vm_name) 38 | 39 | if [[ $i -eq 0 ]]; then 40 | controller_ip=$vm_ip 41 | xec gen-certs-kubeconfig\ 42 | --subject-org=system:masters\ 43 | --subject-cn=admin\ 44 | --identity=admin\ 45 | --controller-ip=$controller_ip\ 46 | --ca-cert=$ca_cert\ 47 | --ca-key=$ca_key 48 | admin_kubeconfig=$DTKBASE/generated/kubeconfig/admin.kubeconfig 49 | fi 50 | 51 | ssh -i $priv_key root@$vm_ip mkdir -p /etc/desktop-kubernetes/static-pods 52 | 53 | xec configure-worker\ 54 | --controller-ip=$controller_ip\ 55 | --kube-proxy-enabled=$kube_proxy_enabled\ 56 | --worker-hostname=$vm_name\ 57 | --priv-key=$priv_key\ 58 | --admin-kubeconfig=$admin_kubeconfig\ 59 | --pod-cidr=$pod_cidr\ 60 | --containerized-cplane=$containerized_cplane\ 61 | --config=$config\ 62 | --worker-ip=$vm_ip\ 63 | --ca-cert=$ca_cert\ 64 | --ca-key=$ca_key 65 | 66 | if [[ $i -eq 0 ]]; then 67 | xec configure-controller\ 68 | --controller-hostname=$vm_name\ 69 | --controller-ip=$vm_ip\ 70 | --priv-key=$priv_key\ 71 | --admin-kubeconfig=$admin_kubeconfig\ 72 | --containerized-cplane=$containerized_cplane\ 73 | --ca-cert=$ca_cert\ 74 | --ca-key=$ca_key\ 75 | --cluster-cidr=$cluster_cidr 76 | fi 77 | 78 | vm_names+=($vm_name) 79 | done 80 | 81 | echo "Waiting for all nodes to be Ready" 82 | for ((i = 0; i < $vmcnt; ++i)); do 83 | kubectl --kubeconfig $admin_kubeconfig wait node "${vm_names[$i]}" --for=condition=Ready --timeout=30s 84 | done 85 | 86 | echo "Labeling node(s) - first node is controller & worker, all other nodes are workers" 87 | for ((i = 0; i < $vmcnt; ++i)); do 88 | if [[ $i -eq 0 ]]; then 89 | labels=(controller worker) 90 | else 91 | labels=(worker) 92 | fi 93 | for label in "${labels[@]}"; do 94 | kubectl --kubeconfig $admin_kubeconfig label node "${vm_names[$i]}" node-role.kubernetes.io/$label= 95 | done 96 | done 97 | 98 | # if kube-proxy enabled, configure routes on each vm to route pod ip addresses 99 | host_only_network=$(yq .vbox.host-only-network $config) 100 | 101 | if [[ -n "$host_only_network" ]] && [[ $kube_proxy_enabled -eq 1 ]]; then 102 | echo "configuring kube-proxy routes" 103 | for ((i = 0; i < $vmcnt; ++i)); do 104 | this_vm=$(yq .vms[$i].name $config) 105 | this_ip=$(xec $virt/get-vm-ip $this_vm) 106 | for ((j = 0; j < $vmcnt; ++j)); do 107 | if [[ $i -eq $j ]]; then 108 | continue 109 | fi 110 | other_vm=$(yq .vms[$j].name $config) 111 | other_ip=$(xec $virt/get-vm-ip $other_vm) 112 | other_cidr=$(yq .vms[$j].pod-cidr $config) 113 | ssh -i $priv_key root@$this_ip "echo $other_cidr via $other_ip dev enp0s8 >> /etc/sysconfig/network-scripts/route-enp0s8" 114 | done 115 | ssh -i $priv_key root@$this_ip systemctl restart NetworkManager 116 | done 117 | fi 118 | 119 | echo "Core Kubernetes cluster creation successful" 120 | -------------------------------------------------------------------------------- /scripts/cluster/gen-root-ca: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Generates root CA cert and key if they don't already exist. These will be used to sign 4 | # every other cert generated for the cluster. 5 | # 6 | # Outputs: 7 | # - $DTKBASE/generated/cert/ca-key.pem 8 | # - $DTKBASE/generated/cert/ca.pem 9 | 10 | if [[ -f $DTKBASE/generated/cert/ca{,-key}.pem ]]; then 11 | echo "CA cert and key already exist - skipping" 12 | exit 13 | fi 14 | 15 | openssl genrsa\ 16 | -out "$DTKBASE/generated/cert/ca-key.pem"\ 17 | 2048 18 | 19 | openssl req\ 20 | -x509\ 21 | -new\ 22 | -nodes\ 23 | -key "$DTKBASE/generated/cert/ca-key.pem"\ 24 | -sha256\ 25 | -subj "/CN=internalca"\ 26 | -days 10000\ 27 | -out "$DTKBASE/generated/cert/ca.pem" 28 | -------------------------------------------------------------------------------- /scripts/control-plane/configure-controller: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Configures a controller node 4 | 5 | set -e 6 | 7 | long_opts=controller-hostname:,priv-key:,admin-kubeconfig:,containerized-cplane:,ca-cert:,ca-key:,cluster-cidr:,\ 8 | controller-ip: 9 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | 11 | controller_hostname= 12 | priv_key= 13 | admin_kubeconfig= 14 | containerized_cplane= 15 | ca_cert= 16 | ca_key= 17 | cluster_cidr= 18 | controller_ip= 19 | 20 | if ! parsed=$(xec parseargs $long_opts "$@"); then 21 | echo "$parsed" 22 | exit 1 23 | fi 24 | eval $(echo -e "$parsed") 25 | 26 | echo "configuring firewall rules" 27 | xec configure-firewall $controller_ip $priv_key --controller 28 | 29 | echo "creating kubernetes.pem and kubernetes-key.pem" 30 | xec gen-certs-kubeconfig\ 31 | --host-name=$controller_hostname\ 32 | --host-ip=$controller_ip\ 33 | --subject-cn=kubernetes\ 34 | --identity=kubernetes\ 35 | --csr-type=kubernetes\ 36 | --gen-kubeconfig=0\ 37 | --ca-cert=$ca_cert\ 38 | --ca-key=$ca_key 39 | 40 | echo "installing etcd into controller" 41 | xec install-etcd\ 42 | --priv-key=$priv_key\ 43 | --controller-ip=$controller_ip\ 44 | --controller-hostname=$controller_hostname\ 45 | --etcd-gzip=$ETCD_GZIP\ 46 | --kubernetes-cert=$DTKBASE/generated/cert/kubernetes.pem\ 47 | --kubernetes-key=$DTKBASE/generated/cert/kubernetes-key.pem\ 48 | --admin-kubeconfig=$admin_kubeconfig\ 49 | --containerized-cplane=$containerized_cplane\ 50 | --ca-cert=$ca_cert 51 | 52 | echo "installing kube-apiserver into controller" 53 | xec install-kube-apiserver\ 54 | --priv-key=$priv_key\ 55 | --controller-ip=$controller_ip\ 56 | --kube-apiserver-binary=$KUBE_APISERVER_BINARY\ 57 | --kubernetes-cert=$DTKBASE/generated/cert/kubernetes.pem\ 58 | --kubernetes-key=$DTKBASE/generated/cert/kubernetes-key.pem\ 59 | --containerized-cplane=$containerized_cplane\ 60 | --admin-kubeconfig=$admin_kubeconfig\ 61 | --ca-cert=$ca_cert\ 62 | --ca-key=$ca_key 63 | 64 | echo "installing kube-controller-manager into controller" 65 | xec install-kube-controller-manager\ 66 | --priv-key=$priv_key\ 67 | --controller-ip=$controller_ip\ 68 | --controller-hostname=$controller_hostname\ 69 | --kube-controller-manager-binary=$KUBE_CONTROLLER_MANAGER_BINARY\ 70 | --containerized-cplane=$containerized_cplane\ 71 | --admin-kubeconfig=$admin_kubeconfig\ 72 | --ca-cert=$ca_cert\ 73 | --ca-key=$ca_key\ 74 | --cluster-cidr=$cluster_cidr 75 | 76 | echo "installing kube-scheduler into controller" 77 | xec install-kube-scheduler\ 78 | --priv-key=$priv_key\ 79 | --controller-ip=$controller_ip\ 80 | --controller-hostname=$controller_hostname\ 81 | --kube-scheduler-binary=$KUBE_SCHEDULER_BINARY\ 82 | --containerized-cplane=$containerized_cplane\ 83 | --admin-kubeconfig=$admin_kubeconfig\ 84 | --ca-cert=$ca_cert\ 85 | --ca-key=$ca_key 86 | 87 | healthy_count=0 88 | for i in {1..10}; do 89 | healthy_count=$(kubectl --kubeconfig=$DTKBASE/generated/kubeconfig/kube-controller-manager.kubeconfig\ 90 | get componentstatuses --no-headers 2>/dev/null | awk '{print $2}' | grep Healthy | wc -l) 91 | if [[ "$healthy_count" -eq 3 ]]; then 92 | break 93 | fi 94 | sleep 1s 95 | done 96 | 97 | if [[ "$healthy_count" -ne 3 ]]; then 98 | echo "unable to verify that controller was successfully configured" 99 | exit 1 100 | fi 101 | 102 | echo "finished configuring controller" 103 | -------------------------------------------------------------------------------- /scripts/control-plane/etcd/etcd-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | component: etcd 6 | tier: control-plane 7 | name: etcd 8 | namespace: kube-system 9 | spec: 10 | containers: 11 | - name: etcd 12 | command: 13 | - /usr/local/bin/etcd 14 | - --advertise-client-urls=https://INTERNAL_IP:2379 15 | - --cert-file=/etc/etcd/kubernetes.pem 16 | - --client-cert-auth 17 | - --data-dir=/var/lib/etcd 18 | - --initial-advertise-peer-urls=https://INTERNAL_IP:2380 19 | - --initial-cluster=ETCD_NAME=https://INTERNAL_IP:2380 20 | - --initial-cluster-state=new 21 | - --initial-cluster-token=etcd-cluster-0 22 | - --key-file=/etc/etcd/kubernetes-key.pem 23 | - --listen-client-urls=https://0.0.0.0:2379 24 | - --listen-metrics-urls=http://0.0.0.0:2381 25 | - --listen-peer-urls=https://0.0.0.0:2380 26 | - --logger=zap 27 | - --log-level=info 28 | - --log-outputs=stderr 29 | - --name=ETCD_NAME 30 | - --peer-cert-file=/etc/etcd/kubernetes.pem 31 | - --peer-client-cert-auth 32 | - --peer-key-file=/etc/etcd/kubernetes-key.pem 33 | - --peer-trusted-ca-file=/etc/etcd/ca.pem 34 | - --trusted-ca-file=/etc/etcd/ca.pem 35 | image: quay.io/coreos/etcd:ETCD_VER 36 | imagePullPolicy: IfNotPresent 37 | livenessProbe: 38 | failureThreshold: 8 39 | httpGet: 40 | host: localhost 41 | path: /health?serializable=true 42 | port: 2381 43 | scheme: HTTP 44 | initialDelaySeconds: 15 45 | timeoutSeconds: 15 46 | resources: {} 47 | securityContext: 48 | privileged: false 49 | volumeMounts: 50 | - name: d1 51 | mountPath: /etc/etcd/ 52 | readOnly: true 53 | - name: d2 54 | mountPath: /var/lib/etcd/ 55 | hostNetwork: true 56 | priorityClassName: system-cluster-critical 57 | volumes: 58 | - name: d1 59 | hostPath: 60 | path: /etc/etcd/ 61 | type: DirectoryOrCreate 62 | - name: d2 63 | hostPath: 64 | path: /var/lib/etcd 65 | type: DirectoryOrCreate 66 | -------------------------------------------------------------------------------- /scripts/control-plane/etcd/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | Documentation=https://github.com/coreos 4 | 5 | [Service] 6 | Type=notify 7 | ExecStart=/usr/local/bin/etcd\ 8 | --advertise-client-urls=https://INTERNAL_IP:2379\ 9 | --cert-file=/etc/etcd/kubernetes.pem\ 10 | --client-cert-auth\ 11 | --data-dir=/var/lib/etcd\ 12 | --initial-advertise-peer-urls=https://INTERNAL_IP:2380\ 13 | --initial-cluster=ETCD_NAME=https://INTERNAL_IP:2380\ 14 | --initial-cluster-state=new\ 15 | --initial-cluster-token=etcd-cluster-0\ 16 | --key-file=/etc/etcd/kubernetes-key.pem\ 17 | --listen-client-urls=https://INTERNAL_IP:2379,https://127.0.0.1:2379\ 18 | --listen-metrics-urls=http://INTERNAL_IP:2381,http://127.0.0.1:2331\ 19 | --listen-peer-urls=https://INTERNAL_IP:2380,https://127.0.0.1:2380\ 20 | --logger=zap\ 21 | --log-level=info\ 22 | --log-outputs=stderr\ 23 | --name=ETCD_NAME\ 24 | --peer-cert-file=/etc/etcd/kubernetes.pem\ 25 | --peer-client-cert-auth\ 26 | --peer-key-file=/etc/etcd/kubernetes-key.pem\ 27 | --peer-trusted-ca-file=/etc/etcd/ca.pem\ 28 | --trusted-ca-file=/etc/etcd/ca.pem 29 | Restart=on-failure 30 | RestartSec=5 31 | 32 | [Install] 33 | WantedBy=multi-user.target 34 | -------------------------------------------------------------------------------- /scripts/control-plane/etcd/install-etcd: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=priv-key:,controller-ip:,controller-hostname:,etcd-gzip:,kubernetes-cert:,kubernetes-key:,\ 6 | containerized-cplane:,admin-kubeconfig:,ca-cert: 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | 9 | priv_key= 10 | controller_ip= 11 | controller_hostname= 12 | etcd_gzip= 13 | kubernetes_cert= 14 | kubernetes_key= 15 | containerize_cplane=0 16 | admin_kubeconfig= 17 | ca_cert= 18 | 19 | if ! parsed=$(xec parseargs $long_opts "$@"); then 20 | echo "$parsed" 21 | exit 1 22 | fi 23 | eval $(echo -e "$parsed") 24 | 25 | if [[ "$containerized_cplane" == *etcd* || "$containerized_cplane" == all ]]; then 26 | containerized_cplane=1 27 | fi 28 | 29 | echo "copying etcd TLS material" 30 | ssh -i $priv_key root@$controller_ip "mkdir -p /etc/etcd /var/lib/etcd && chmod 700 /var/lib/etcd" 31 | scp -i $priv_key $kubernetes_cert $kubernetes_key $ca_cert root@$controller_ip:/etc/etcd 32 | 33 | subdir=$(basename -s .tar.gz $etcd_gzip) 34 | echo "extracting etcd binaries to controller" 35 | cat "$etcd_gzip" | ssh -i $priv_key root@$controller_ip \ 36 | "tar zxvf - --no-same-owner --strip-components 1 -C /usr/local/bin/ $subdir/etcd $subdir/etcdctl" 37 | 38 | if [[ $containerized_cplane -eq 0 ]]; then 39 | echo "generating etcd service file" 40 | sed $script_dir/etcd.service\ 41 | -e "s|ETCD_NAME|$controller_hostname|g"\ 42 | -e "s|INTERNAL_IP|$controller_ip|g"\ 43 | | ssh -i $priv_key root@$controller_ip "cat > /etc/systemd/system/etcd.service" 44 | 45 | echo "starting etcd service" 46 | ssh -i $priv_key root@$controller_ip "systemctl daemon-reload && systemctl enable --now etcd" 47 | else 48 | echo "copying etcd static pod manifest to controller VM" 49 | sed $script_dir/etcd-pod.yaml\ 50 | -e "s|ETCD_VER|$ETCD_VER|g"\ 51 | -e "s|ETCD_NAME|$controller_hostname|g"\ 52 | -e "s|INTERNAL_IP|$controller_ip|g"\ 53 | | ssh -i $priv_key root@$controller_ip "cat > /etc/desktop-kubernetes/static-pods/etcd-pod.yaml" 54 | fi 55 | 56 | echo "verifying etcd is running" 57 | 58 | for i in {1..10}; do 59 | if ssh -i $priv_key root@$controller_ip\ 60 | "etcdctl --cert=/etc/etcd/kubernetes.pem --key=/etc/etcd/kubernetes-key.pem --cacert=/etc/etcd/ca.pem member list"; then 61 | break 62 | elif [[ $i -eq 10 ]]; then 63 | echo "Can't verify etcd installation" 64 | exit 1 65 | fi 66 | sleep 10s 67 | done 68 | 69 | echo "no errors detected with etcd installation" 70 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-apiserver/encryption-config.yaml: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | - name: key1 10 | secret: ENCRYPTION_KEY 11 | - identity: {} 12 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-apiserver/install-kube-apiserver: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # aggregator configs based on: 4 | # - https://github.com/kubernetes-sigs/metrics-server#requirements 5 | # - https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/ 6 | # - https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ 7 | # 8 | 9 | set -e 10 | 11 | long_opts=priv-key:,controller-ip:,kube-apiserver-binary:,kubernetes-cert:,kubernetes-key:,\ 12 | containerized-cplane:,admin-kubeconfig:,ca-cert:,ca-key: 13 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 14 | 15 | priv_key= 16 | controller_ip= 17 | kube_apiserver_binary= 18 | kubernetes_cert= 19 | kubernetes_key= 20 | containerize_cplane=0 21 | admin_kubeconfig= 22 | ca_cert= 23 | ca_key= 24 | 25 | if ! parsed=$(xec parseargs $long_opts "$@"); then 26 | echo "$parsed" 27 | exit 1 28 | fi 29 | eval $(echo -e "$parsed") 30 | 31 | if [[ "$containerized_cplane" == *kube-apiserver* || "$containerized_cplane" == all ]]; then 32 | containerized_cplane=1 33 | fi 34 | 35 | echo "generating service-account and aggregator certs and copying to controller VM" 36 | 37 | xec gen-certs-kubeconfig\ 38 | --subject-cn=service-account\ 39 | --identity=service-account\ 40 | --gen-kubeconfig=0\ 41 | --ca-cert=$ca_cert\ 42 | --ca-key=$ca_key 43 | 44 | xec gen-certs-kubeconfig\ 45 | --subject-org=k8s\ 46 | --subject-ou=system\ 47 | --subject-cn=aggregator\ 48 | --identity=aggregator\ 49 | --gen-kubeconfig=0\ 50 | --ca-cert=$ca_cert\ 51 | --ca-key=$ca_key 52 | 53 | ssh -i $priv_key root@$controller_ip "mkdir -p /var/lib/kubernetes/" 54 | scp -i $priv_key $DTKBASE/generated/cert/service-account*.pem $DTKBASE/generated/cert/aggregator*.pem\ 55 | root@$controller_ip:/var/lib/kubernetes 56 | 57 | echo "copying additional kube-apiserver TLS material to controller VM" 58 | scp -i $priv_key $ca_cert $ca_key $kubernetes_cert $kubernetes_key root@$controller_ip:/var/lib/kubernetes 59 | 60 | echo "generating kube-apiserver encryption config yaml" 61 | encryption_key=$(head -c 32 /dev/urandom | base64) 62 | sed $script_dir/encryption-config.yaml -e "s|ENCRYPTION_KEY|$encryption_key|g"\ 63 | | ssh -i $priv_key root@$controller_ip "cat > /var/lib/kubernetes/encryption-config.yaml" 64 | 65 | if [[ $containerized_cplane -eq 0 ]]; then 66 | echo "copying kube-apiserver binary to controller" 67 | scp -i $priv_key $kube_apiserver_binary root@$controller_ip:/usr/local/bin/kube-apiserver 68 | 69 | echo "generating kube-apiserver service file" 70 | sed $script_dir/kube-apiserver.service -e "s|CONTROLLER_IP|$controller_ip|g"\ 71 | | ssh -i $priv_key root@$controller_ip "cat > /etc/systemd/system/kube-apiserver.service" 72 | 73 | echo "starting kube-apiserver service" 74 | ssh -i $priv_key root@$controller_ip\ 75 | "systemctl daemon-reload && systemctl enable --now kube-apiserver" 76 | 77 | echo "verifying kube-apiserver is running" 78 | set +e 79 | for i in {1..10}; do 80 | http_code=$(curl -so /dev/null -w "%{http_code}" --cacert $ca_cert https://$controller_ip:6443/version) 81 | if [[ "$http_code" == "200" ]]; then 82 | break 83 | fi 84 | sleep 1s 85 | done 86 | 87 | if [[ "$http_code" != "200" ]]; then 88 | echo "ERROR: Unable to verify kube-apiserver installation" 89 | exit 1 90 | fi 91 | else 92 | echo "copying kube-apiserver static pod manifest to controller VM" 93 | sed $script_dir/kube-apiserver-pod.yaml -e "s|CONTROLLER_IP|$controller_ip|g"\ 94 | -e "s|K8S_VER|$K8S_VER|g"\ 95 | | ssh -i $priv_key root@$controller_ip "cat > /etc/desktop-kubernetes/static-pods/kube-apiserver-pod.yaml" 96 | for i in {1..20}; do 97 | if kubectl --kubeconfig $admin_kubeconfig -n kube-system wait pod\ 98 | -lcomponent=kube-apiserver --for=condition=ready --timeout=5s; then 99 | break 100 | elif [[ $i -eq 20 ]]; then 101 | echo "Can't verify kube-apiserver installation" 102 | exit 1 103 | fi 104 | sleep 5s 105 | done 106 | fi 107 | 108 | echo "generating RBAC for kubelet authorization" 109 | 110 | # Per: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization 111 | # Snip: "...ensure the user identified by the --kubelet-client-certificate and --kubelet-client-key flags passed 112 | # to the apiserver is authorized..." etc. The 'kubernetes' cert and key are created with 'CN=kubernetes'. 113 | # Also see 'authorization.mode' in 'scripts/worker/kubelet/kubelet-config.yaml'. All these configs 114 | # together implement the guidance. 115 | kubectl --kubeconfig $admin_kubeconfig create -f $script_dir/rbac.yaml 116 | 117 | echo "no errors detected with kube-apiserver installation" 118 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-apiserver/kube-apiserver-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | component: kube-apiserver 6 | tier: control-plane 7 | name: kube-apiserver 8 | namespace: kube-system 9 | spec: 10 | containers: 11 | - name: kube-apiserver 12 | command: 13 | - kube-apiserver 14 | - --advertise-address=CONTROLLER_IP 15 | - --allow-privileged=true 16 | - --apiserver-count=3 17 | - --audit-log-maxage=30 18 | - --audit-log-maxbackup=3 19 | - --audit-log-maxsize=100 20 | - --audit-log-path=/var/log/audit.log 21 | - --authorization-mode=Node,RBAC 22 | - --bind-address=0.0.0.0 23 | - --client-ca-file=/var/lib/kubernetes/ca.pem 24 | - --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota 25 | - --enable-aggregator-routing=true 26 | - --encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml 27 | - --etcd-cafile=/var/lib/kubernetes/ca.pem 28 | - --etcd-certfile=/var/lib/kubernetes/kubernetes.pem 29 | - --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem 30 | - --etcd-servers=https://CONTROLLER_IP:2379 31 | - --event-ttl=1h 32 | - --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem 33 | - --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem 34 | - --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem 35 | - --proxy-client-cert-file=/var/lib/kubernetes/aggregator.pem 36 | - --proxy-client-key-file=/var/lib/kubernetes/aggregator-key.pem 37 | - --requestheader-allowed-names=aggregator 38 | - --requestheader-client-ca-file=/var/lib/kubernetes/ca.pem 39 | - --requestheader-extra-headers-prefix=X-Remote-Extra- 40 | - --requestheader-group-headers=X-Remote-Group 41 | - --requestheader-username-headers=X-Remote-User 42 | - --runtime-config=api/all=true 43 | - --service-account-issuer=https://kubernetes.default.svc 44 | - --service-account-key-file=/var/lib/kubernetes/service-account-key.pem 45 | - --service-account-signing-key-file=/var/lib/kubernetes/service-account-key.pem 46 | - --service-cluster-ip-range=10.32.0.0/24 47 | - --service-node-port-range=30000-32767 48 | - --tls-cert-file=/var/lib/kubernetes/kubernetes.pem 49 | - --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem 50 | - --v=2 51 | image: registry.k8s.io/kube-apiserver:K8S_VER 52 | imagePullPolicy: IfNotPresent 53 | resources: 54 | requests: 55 | cpu: 250m 56 | securityContext: 57 | privileged: false 58 | volumeMounts: 59 | - name: d1 60 | mountPath: /var/lib/kubernetes/ 61 | readOnly: true 62 | - name: d2 63 | mountPath: /var/log 64 | hostNetwork: true 65 | priorityClassName: system-cluster-critical 66 | volumes: 67 | - name: d1 68 | hostPath: 69 | path: /var/lib/kubernetes/ 70 | type: DirectoryOrCreate 71 | - name: d2 72 | hostPath: 73 | path: /var/log 74 | type: DirectoryOrCreate 75 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-apiserver/kube-apiserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-apiserver\ 7 | --requestheader-client-ca-file=/var/lib/kubernetes/ca.pem\ 8 | --requestheader-allowed-names=aggregator\ 9 | --requestheader-extra-headers-prefix=X-Remote-Extra-\ 10 | --requestheader-group-headers=X-Remote-Group\ 11 | --requestheader-username-headers=X-Remote-User\ 12 | --proxy-client-cert-file=/var/lib/kubernetes/aggregator.pem\ 13 | --proxy-client-key-file=/var/lib/kubernetes/aggregator-key.pem\ 14 | --enable-aggregator-routing=true\ 15 | --advertise-address=CONTROLLER_IP\ 16 | --allow-privileged=true\ 17 | --apiserver-count=3\ 18 | --audit-log-maxage=30\ 19 | --audit-log-maxbackup=3\ 20 | --audit-log-maxsize=100\ 21 | --audit-log-path=/var/log/audit.log\ 22 | --authorization-mode=Node,RBAC\ 23 | --bind-address=0.0.0.0\ 24 | --client-ca-file=/var/lib/kubernetes/ca.pem\ 25 | --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota\ 26 | --etcd-cafile=/var/lib/kubernetes/ca.pem\ 27 | --etcd-certfile=/var/lib/kubernetes/kubernetes.pem\ 28 | --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem\ 29 | --etcd-servers=https://CONTROLLER_IP:2379\ 30 | --event-ttl=1h\ 31 | --encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml\ 32 | --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem\ 33 | --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem\ 34 | --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem\ 35 | --runtime-config=api/all=true\ 36 | --service-account-key-file=/var/lib/kubernetes/service-account-key.pem\ 37 | --service-account-signing-key-file=/var/lib/kubernetes/service-account-key.pem\ 38 | --service-account-issuer=https://kubernetes.default.svc\ 39 | --service-cluster-ip-range=10.32.0.0/24\ 40 | --service-node-port-range=30000-32767\ 41 | --tls-cert-file=/var/lib/kubernetes/kubernetes.pem\ 42 | --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem\ 43 | --v=2 44 | Restart=on-failure 45 | RestartSec=5 46 | 47 | [Install] 48 | WantedBy=multi-user.target 49 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-apiserver/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: 5 | rbac.authorization.kubernetes.io/autoupdate: "true" 6 | labels: 7 | kubernetes.io/bootstrapping: rbac-defaults 8 | name: system:kube-apiserver-to-kubelet 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - nodes/proxy 14 | - nodes/stats 15 | - nodes/log 16 | - nodes/spec 17 | - nodes/metrics 18 | verbs: 19 | - "*" 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: ClusterRoleBinding 23 | metadata: 24 | name: system:kube-apiserver 25 | namespace: "" 26 | roleRef: 27 | apiGroup: rbac.authorization.k8s.io 28 | kind: ClusterRole 29 | name: system:kube-apiserver-to-kubelet 30 | subjects: 31 | - apiGroup: rbac.authorization.k8s.io 32 | kind: User 33 | name: kubernetes 34 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-controller-manager/install-kube-controller-manager: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # installs the kube-controller-manager. 4 | 5 | set -e 6 | 7 | long_opts=priv-key:,controller-ip:,controller-hostname:,kube-controller-manager-binary:,\ 8 | containerized-cplane:,admin-kubeconfig:,ca-cert:,ca-key:,cluster-cidr: 9 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | 11 | priv_key= 12 | controller_ip= 13 | controller_hostname= 14 | kube_controller_manager_binary= 15 | containerize_cplane=0 16 | admin_kubeconfig= 17 | ca_cert= 18 | ca_key= 19 | cluster_cidr= 20 | 21 | if ! parsed=$(xec parseargs $long_opts "$@"); then 22 | echo "$parsed" 23 | exit 1 24 | fi 25 | eval $(echo -e "$parsed") 26 | 27 | if [[ "$containerized_cplane" == *kube-controller-manager* || "$containerized_cplane" == all ]]; then 28 | containerized_cplane=1 29 | fi 30 | 31 | xec gen-certs-kubeconfig\ 32 | --subject-cn=system:kube-controller-manager\ 33 | --identity=kube-controller-manager\ 34 | --controller-ip=$controller_ip\ 35 | --ca-cert=$ca_cert\ 36 | --ca-key=$ca_key 37 | 38 | echo "copying kube-controller-manager kubeconfig file to controller VM" 39 | scp -i $priv_key $DTKBASE/generated/kubeconfig/kube-controller-manager.kubeconfig root@$controller_ip:/var/lib/kubernetes/ 40 | 41 | if [[ $containerized_cplane -eq 0 ]]; then 42 | echo "copying kube-controller-manager binary to controller VM" 43 | scp -i $priv_key $kube_controller_manager_binary root@$controller_ip:/usr/local/bin/kube-controller-manager 44 | 45 | echo "copying kube-controller-manager service file to controller VM" 46 | sed $script_dir/kube-controller-manager.service -e "s|CLUSTER_CIDR|$cluster_cidr|g"\ 47 | | ssh -i $priv_key root@$controller_ip "cat > /etc/systemd/system/kube-controller-manager.service" 48 | 49 | echo "starting kube-controller-manager service" 50 | ssh -i "$priv_key" root@$controller_ip\ 51 | "systemctl daemon-reload && systemctl enable --now kube-controller-manager" 52 | exit 53 | 54 | echo "verifying kube-controller-manager is running" 55 | set +e 56 | for i in {1..10}; do 57 | ssh -i "$priv_key" root@$controller_ip "systemctl is-active --quiet kube-controller-manager" && break 58 | sleep 1s 59 | done 60 | else 61 | echo "copying kube-controller-manager static pod manifest to controller VM" 62 | sed $script_dir/kube-controller-manager-pod.yaml -e "s|K8S_VER|$K8S_VER|g" -e "s|CLUSTER_CIDR|$cluster_cidr|g"\ 63 | | ssh -i $priv_key root@$controller_ip "cat > /etc/desktop-kubernetes/static-pods/kube-controller-manager-pod.yaml" 64 | for i in {1..20}; do 65 | if kubectl --kubeconfig $admin_kubeconfig -n kube-system wait pod\ 66 | -lcomponent=kube-controller-manager --for=condition=ready --timeout=5s; then 67 | break 68 | elif [[ $i -eq 20 ]]; then 69 | echo "Can't verify kube-scheduler installation" 70 | exit 1 71 | fi 72 | sleep 5s 73 | done 74 | fi 75 | 76 | echo "no errors detected with kube-controller-manager installation" 77 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-controller-manager/kube-controller-manager-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | component: kube-controller-manager 6 | tier: control-plane 7 | name: kube-controller-manager 8 | namespace: kube-system 9 | spec: 10 | containers: 11 | - name: kube-controller-manager 12 | command: 13 | - kube-controller-manager 14 | - --allocate-node-cidrs=true 15 | - --bind-address=0.0.0.0 16 | - --cluster-cidr=CLUSTER_CIDR 17 | - --cluster-name=kubernetes 18 | - --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem 19 | - --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem 20 | - --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig 21 | - --leader-elect=true 22 | - --root-ca-file=/var/lib/kubernetes/ca.pem 23 | - --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem 24 | - --service-cluster-ip-range=10.32.0.0/24 25 | - --use-service-account-credentials=true 26 | - --authorization-always-allow-paths=/healthz,/readyz,/livez,/metrics 27 | - --v=2 28 | image: registry.k8s.io/kube-controller-manager:K8S_VER 29 | imagePullPolicy: IfNotPresent 30 | livenessProbe: 31 | failureThreshold: 8 32 | httpGet: 33 | host: localhost 34 | path: /healthz 35 | port: 10257 36 | scheme: HTTPS 37 | initialDelaySeconds: 15 38 | timeoutSeconds: 15 39 | resources: 40 | requests: 41 | cpu: 200m 42 | securityContext: 43 | privileged: false 44 | volumeMounts: 45 | - name: d1 46 | mountPath: /var/lib/kubernetes/ 47 | readOnly: true 48 | hostNetwork: true 49 | priorityClassName: system-cluster-critical 50 | volumes: 51 | - name: d1 52 | hostPath: 53 | path: /var/lib/kubernetes/ 54 | type: DirectoryOrCreate 55 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-controller-manager/kube-controller-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-controller-manager \ 7 | --allocate-node-cidrs=true \ 8 | --bind-address=0.0.0.0 \ 9 | --cluster-cidr=CLUSTER_CIDR \ 10 | --cluster-name=kubernetes \ 11 | --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \ 12 | --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \ 13 | --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \ 14 | --leader-elect=true \ 15 | --root-ca-file=/var/lib/kubernetes/ca.pem \ 16 | --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \ 17 | --service-cluster-ip-range=10.32.0.0/24 \ 18 | --use-service-account-credentials=true \ 19 | --authorization-always-allow-paths=/healthz,/readyz,/livez,/metrics \ 20 | --v=2 21 | Restart=on-failure 22 | RestartSec=5 23 | 24 | [Install] 25 | WantedBy=multi-user.target 26 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-scheduler/install-kube-scheduler: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=priv-key:,controller-ip:,controller-hostname:,kube-scheduler-binary:,\ 6 | containerized-cplane:,admin-kubeconfig:,ca-cert:,ca-key: 7 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | 9 | priv_key= 10 | controller_ip= 11 | controller_hostname= 12 | kube_scheduler_binary= 13 | containerize_cplane=0 14 | admin_kubeconfig= 15 | ca_cert= 16 | ca_key= 17 | 18 | if ! parsed=$(xec parseargs $long_opts "$@"); then 19 | echo "$parsed" 20 | exit 1 21 | fi 22 | eval $(echo -e "$parsed") 23 | 24 | if [[ "$containerized_cplane" == *kube-scheduler* || "$containerized_cplane" == all ]]; then 25 | containerized_cplane=1 26 | fi 27 | 28 | xec gen-certs-kubeconfig\ 29 | --subject-cn=system:kube-scheduler\ 30 | --identity=kube-scheduler\ 31 | --controller-ip=$controller_ip\ 32 | --ca-cert=$ca_cert\ 33 | --ca-key=$ca_key 34 | 35 | echo "copying configuration files to controller VM" 36 | ssh -i $priv_key root@$controller_ip "mkdir -p /var/lib/kubernetes /etc/kubernetes/config" 37 | scp -i $priv_key $DTKBASE/generated/kubeconfig/kube-scheduler.kubeconfig root@$controller_ip:/var/lib/kubernetes/ 38 | scp -i $priv_key $script_dir/kube-scheduler.yaml root@$controller_ip:/etc/kubernetes/config/ 39 | 40 | if [[ $containerized_cplane -eq 0 ]]; then 41 | echo "copying kube-scheduler binary to controller VM" 42 | scp -i $priv_key $kube_scheduler_binary root@$controller_ip:/usr/local/bin/kube-scheduler 43 | 44 | echo "copying kube-scheduler service file to controller VM" 45 | scp -i $priv_key $script_dir/kube-scheduler.service root@$controller_ip:/etc/systemd/system/ 46 | 47 | echo "starting kube-scheduler service" 48 | ssh -i $priv_key root@$controller_ip\ 49 | "systemctl daemon-reload && systemctl enable --now kube-scheduler" 50 | 51 | echo "verifying kube-scheduler is running" 52 | set +e 53 | for i in {1..10}; do 54 | ssh -i $priv_key root@$controller_ip "systemctl is-active --quiet kube-scheduler" && break 55 | sleep 1s 56 | done 57 | else 58 | echo "copying kube-scheduler static pod manifest to controller VM" 59 | sed $script_dir/kube-scheduler-pod.yaml -e "s|K8S_VER|$K8S_VER|g"\ 60 | | ssh -i $priv_key root@$controller_ip "cat > /etc/desktop-kubernetes/static-pods/kube-scheduler-pod.yaml" 61 | for i in {1..20}; do 62 | if kubectl --kubeconfig $admin_kubeconfig -n kube-system wait pod\ 63 | -lcomponent=kube-scheduler --for=condition=ready --timeout=5s; then 64 | break 65 | elif [[ $i -eq 20 ]]; then 66 | echo "Can't verify kube-scheduler installation" 67 | exit 1 68 | fi 69 | sleep 5s 70 | done 71 | fi 72 | 73 | echo "no errors detected with kube-scheduler installation" 74 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-scheduler/kube-scheduler-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | component: kube-scheduler 6 | tier: control-plane 7 | name: kube-scheduler 8 | namespace: kube-system 9 | spec: 10 | containers: 11 | - name: kube-scheduler 12 | command: 13 | - kube-scheduler 14 | - --kubeconfig=/var/lib/kubernetes/kube-scheduler.kubeconfig 15 | - --authorization-always-allow-paths=/healthz,/readyz,/livez,/metrics 16 | - --v=2 17 | image: registry.k8s.io/kube-scheduler:K8S_VER 18 | imagePullPolicy: IfNotPresent 19 | livenessProbe: 20 | failureThreshold: 8 21 | httpGet: 22 | host: localhost 23 | path: /healthz 24 | port: 10259 25 | scheme: HTTPS 26 | initialDelaySeconds: 15 27 | timeoutSeconds: 15 28 | resources: 29 | requests: 30 | cpu: 100m 31 | securityContext: 32 | privileged: false 33 | volumeMounts: 34 | - name: d1 35 | mountPath: /var/lib/kubernetes 36 | readOnly: true 37 | hostNetwork: true 38 | priorityClassName: system-cluster-critical 39 | volumes: 40 | - name: d1 41 | hostPath: 42 | path: /var/lib/kubernetes 43 | type: DirectoryOrCreate 44 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-scheduler/kube-scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-scheduler \ 7 | --config=/etc/kubernetes/config/kube-scheduler.yaml \ 8 | --authorization-always-allow-paths=/healthz,/readyz,/livez,/metrics \ 9 | --v=2 10 | Restart=on-failure 11 | RestartSec=5 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /scripts/control-plane/kube-scheduler/kube-scheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kubescheduler.config.k8s.io/v1 2 | kind: KubeSchedulerConfiguration 3 | clientConnection: 4 | kubeconfig: /var/lib/kubernetes/kube-scheduler.kubeconfig 5 | leaderElection: 6 | leaderElect: true 7 | -------------------------------------------------------------------------------- /scripts/helpers/check-compatibility: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | mismatch=0 4 | tested_openssl_version="3.0.2" 5 | tested_genisoimage_version="1.1.11" 6 | tested_vboxmanage_version="7.0.18_Ubuntur162988" 7 | tested_ssh_version="OpenSSH_8.9p1" 8 | tested_host_os_version="Ubuntu 22.04.5 LTS" 9 | tested_kubectl_version="v1.33.1" 10 | tested_curl_version="7.81.0" 11 | tested_helm_version="v3.18.0" 12 | tested_yq_version="4.40.5" 13 | tested_virt_install_version="4.0.0" 14 | tested_virsh_version="8.0.0" 15 | tested_qemu_img_version="6.2.0" 16 | 17 | if which openssl &>/dev/null; then 18 | openssl_version=$(openssl version | awk '{print $2}') 19 | [[ $openssl_version == "$tested_openssl_version" ]] && openssl_ok=Yes || { openssl_ok=No; mismatch=1; } 20 | else 21 | openssl_version="not found" 22 | fi 23 | 24 | if which genisoimage &>/dev/null; then 25 | genisoimage_version=$(genisoimage --version | awk '{print $2}') 26 | [[ $genisoimage_version == "$tested_genisoimage_version" ]] && genisoimage_ok=Yes || { genisoimage_ok=No; mismatch=1; } 27 | else 28 | genisoimage_version="not found" 29 | fi 30 | 31 | if which vboxmanage &>/dev/null; then 32 | vboxmanage_version=$(vboxmanage --version) 33 | [[ $vboxmanage_version == $tested_vboxmanage_version* ]] && vboxmanage_ok=Yes || { vboxmanage_ok=No; mismatch=1; } 34 | else 35 | vboxmanage_version="not found" 36 | fi 37 | 38 | if which ssh &>/dev/null; then 39 | ssh_version=$(ssh -V 2> >(awk '{print $1}')) 40 | [[ $ssh_version == "$tested_ssh_version" ]] && ssh_ok=Yes || { ssh_ok=No; mismatch=1; } 41 | else 42 | ssh_version="not found" 43 | fi 44 | 45 | if which lsb_release &>/dev/null; then 46 | host_os_version=$(lsb_release -a 2>/dev/null | grep Description | cut -d: -f2- | xargs) 47 | [[ $host_os_version == "$tested_host_os_version" ]] && os_ok=Yes || { os_ok=No; mismatch=1; } 48 | else 49 | host_os_version="unable to determine" 50 | fi 51 | 52 | if which kubectl &>/dev/null; then 53 | kubectl_version=$(kubectl version 2>/dev/null --client | grep 'Client Version' | cut -d: -f2- | xargs) 54 | [[ $kubectl_version == "$tested_kubectl_version" ]] && kubectl_ok=Yes || { kubectl_ok=No; mismatch=1; } 55 | else 56 | kubectl_version="not found" 57 | fi 58 | 59 | if which curl &>/dev/null; then 60 | curl_version=$(curl --version | head -n1 | awk '{print $2}') 61 | [[ $curl_version == "$tested_curl_version" ]] && curl_ok=Yes || { curl_ok=No; mismatch=1; } 62 | else 63 | curl_version="not found" 64 | fi 65 | 66 | if which helm &>/dev/null; then 67 | helm_version=$(helm version --short | cut -d+ -f1) 68 | [[ $helm_version == "$tested_helm_version" ]] && helm_ok=Yes || { helm_ok=No; mismatch=1; } 69 | else 70 | helm_version="not found" 71 | fi 72 | 73 | if which yq &>/dev/null; then 74 | yq_version=$(yq --version) 75 | regex=".*version v([0-9.]*)$" 76 | [[ $yq_version =~ $regex ]] && yq_version="${BASH_REMATCH[1]}" 77 | [[ $yq_version == "$tested_yq_version" ]] && yq_ok=Yes || { yq_ok=No; mismatch=1; } 78 | else 79 | yq_version="not found" 80 | fi 81 | 82 | if which virt-install &>/dev/null; then 83 | virt_install_version=$(virt-install --version) 84 | [[ $virt_install_version == "$tested_virt_install_version" ]] && virt_install_ok=Yes || { virt_install_ok=No; mismatch=1; } 85 | else 86 | tested_virt_install_version="not found" 87 | fi 88 | 89 | if which virsh &>/dev/null; then 90 | virsh_version=$(virsh --version) 91 | [[ $virsh_version == "$tested_virsh_version" ]] && virsh_ok=Yes || { virsh_ok=No; mismatch=1; } 92 | else 93 | tested_virsh_version="not found" 94 | fi 95 | 96 | if which qemu-img &>/dev/null; then 97 | qemu_img_version=$(qemu-img --version) 98 | regex=".*version ([0-9.]*).*" 99 | [[ $qemu_img_version =~ $regex ]] && qemu_img_version="${BASH_REMATCH[1]}" 100 | [[ $qemu_img_version == "$tested_qemu_img_version" ]] && qemu_img_ok=Yes || { qemu_img_ok=No; mismatch=1; } 101 | else 102 | tested_qemu_img_version="not found" 103 | fi 104 | 105 | 106 | cat <&2 21 | else 22 | echo "OK: $f" 23 | fi 24 | done 25 | else 26 | for f in $ALMA8_ISO_DOWNLOAD\ 27 | $ALMA9_ISO_DOWNLOAD\ 28 | $CENTOS9_ISO_DOWNLOAD\ 29 | $CNI_PLUGINS_DOWNLOAD\ 30 | $CONTAINERD_DOWNLOAD\ 31 | $CRICTL_DOWNLOAD\ 32 | $ETCD_DOWNLOAD\ 33 | $GUEST_ADDITIONS_ISO_DOWNLOAD\ 34 | $KUBE_APISERVER_DOWNLOAD\ 35 | $KUBE_CONTROLLER_MANAGER_DOWNLOAD\ 36 | $KUBELET_DOWNLOAD\ 37 | $KUBE_PROXY_DOWNLOAD\ 38 | $KUBE_SCHEDULER_DOWNLOAD\ 39 | $ROCKY_ISO_DOWNLOAD\ 40 | $RUNC_DOWNLOAD; do 41 | if ! curl -sL $f -o /dev/null --head --fail; then 42 | echo "missing: $f" >&2 43 | else 44 | echo "OK: $f" 45 | fi 46 | done 47 | fi 48 | -------------------------------------------------------------------------------- /scripts/helpers/download-obj: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Downloads an object to the filesystem if it does not already exist there. 4 | # 5 | # Usage: 6 | # 7 | # download-obj --url=https://foobar.com/downloads/frobozz.tar.gz --dest="/tmp/downloads/frobozz.tar.gz" 8 | # 9 | # The --dest arg specifies a fully qualified path and file name. The script will create all intermediate 10 | # directories if they don't exist. Optional --executable arg does a chmod +x on the downloaded file 11 | # 12 | 13 | set -e 14 | long_opts=url:,dest:,executable 15 | 16 | url= 17 | dest= 18 | executable=0 19 | 20 | if ! parsed=$(xec parseargs $long_opts "$@"); then 21 | echo "$parsed" 22 | exit 1 23 | fi 24 | eval $(echo -e "$parsed") 25 | 26 | if [[ -z "$url" ]] || [[ -z "$dest" ]]; then 27 | echo "invalid args" 28 | exit 1 29 | fi 30 | 31 | mkdir -p $(dirname "$dest") 32 | 33 | if [[ ! -f "$dest" ]]; then 34 | echo "downloading $url" 35 | curl -sL "$url" -o "$dest" 36 | if [[ $executable -eq 1 ]]; then 37 | chmod +x "$dest" 38 | fi 39 | else 40 | echo "$dest already exists, skipping download" 41 | fi 42 | -------------------------------------------------------------------------------- /scripts/helpers/download-objects: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Download core cluster components. (Downloads for add-ons like Calico etc. are 4 | # handle by the various add-ons - see: 'scripts/addons'.) 5 | # 6 | 7 | set -e 8 | long_opts=create-template:,linux:,virt: 9 | 10 | create_template= 11 | linux= 12 | virt= 13 | 14 | if ! parsed=$(xec parseargs $long_opts "$@"); then 15 | echo "$parsed" 16 | exit 1 17 | fi 18 | eval $(echo -e "$parsed") 19 | 20 | if [[ $create_template -eq 1 ]]; then 21 | if [[ $linux == "alma8" ]]; then 22 | xec download-obj --url=$ALMA8_ISO_DOWNLOAD --dest=$ALMA8_ISO_FILE 23 | elif [[ $linux == "alma9" ]]; then 24 | xec download-obj --url=$ALMA9_ISO_DOWNLOAD --dest=$ALMA9_ISO_FILE 25 | elif [[ $linux == "centos9" ]]; then 26 | xec download-obj --url=$CENTOS9_ISO_DOWNLOAD --dest=$CENTOS9_ISO_FILE 27 | elif [[ $linux == "rocky" ]]; then 28 | xec download-obj --url=$ROCKY_ISO_DOWNLOAD --dest=$ROCKY_ISO_FILE 29 | fi 30 | if [[ $virt == "virtualbox" ]]; then 31 | xec download-obj --url=$GUEST_ADDITIONS_ISO_DOWNLOAD --dest=$GUEST_ADDITIONS_ISO_FILE 32 | fi 33 | fi 34 | 35 | xec download-obj --url=$CNI_PLUGINS_DOWNLOAD --dest=$CNI_PLUGINS_BINARY 36 | xec download-obj --url=$CONTAINERD_DOWNLOAD --dest=$CONTAINERD_BINARY 37 | xec download-obj --url=$CRICTL_DOWNLOAD --dest=$CRICTL_BINARY 38 | xec download-obj --url=$ETCD_DOWNLOAD --dest=$ETCD_GZIP 39 | xec download-obj --url=$KUBE_APISERVER_DOWNLOAD --dest=$KUBE_APISERVER_BINARY --executable 40 | xec download-obj --url=$KUBE_CONTROLLER_MANAGER_DOWNLOAD --dest=$KUBE_CONTROLLER_MANAGER_BINARY --executable 41 | xec download-obj --url=$KUBELET_DOWNLOAD --dest=$KUBELET_BINARY --executable 42 | xec download-obj --url=$KUBE_PROXY_DOWNLOAD --dest=$KUBE_PROXY_BINARY --executable 43 | xec download-obj --url=$KUBE_SCHEDULER_DOWNLOAD --dest=$KUBE_SCHEDULER_BINARY --executable 44 | xec download-obj --url=$RUNC_DOWNLOAD --dest=$RUNC_BINARY --executable 45 | -------------------------------------------------------------------------------- /scripts/helpers/parse-config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import yaml, sys, os 4 | 5 | valid_configs = [ 6 | "k8s.containerized-cplane", 7 | "k8s.cluster-cidr", 8 | "k8s.cluster-dns", 9 | "k8s.kube-proxy", 10 | "kvm.network", 11 | "kvm.kickstart", 12 | "kvm.os-variant", 13 | "vbox.host-network-interface", 14 | "vbox.host-only-network", 15 | "vbox.kickstart", 16 | "vbox.vboxdir", 17 | "vm.linux", 18 | "vm.create-template", 19 | "vm.template-vmname" 20 | ] 21 | 22 | skip_configs = [ 23 | "k8s.containerd-mirror" 24 | ] 25 | 26 | quiet = False 27 | config_file = sys.argv[1] 28 | if len(sys.argv) == 3: 29 | quiet = True 30 | 31 | with open(config_file, "r") as file: 32 | cfg = yaml.safe_load(file) 33 | 34 | vars = "" 35 | newline = "" 36 | 37 | for key in list(cfg.keys()): 38 | # virt is a top level key 39 | if key == "virt": 40 | vars = vars + "%svirt=%s" % (newline, cfg[key]) 41 | newline = "\n" 42 | elif key not in ["k8s", "kvm", "vbox", "vm"]: 43 | continue 44 | else: 45 | for subkey in list(cfg[key].keys()): 46 | config = "%s.%s" % (key, subkey) 47 | if config in skip_configs: 48 | continue 49 | if not config in valid_configs: 50 | print("error: unsupported configuration: " + config) 51 | os._exit(1) 52 | val = "" if cfg[key][subkey] is None else cfg[key][subkey] 53 | if str(val) == "True": 54 | val = "1" 55 | elif str(val) == "False": 56 | val = "0" 57 | var = (key + "-" + subkey).replace("-", "_") 58 | vars = vars + "%s%s=%s" % (newline, var, val) 59 | newline = "\n" 60 | 61 | if not quiet: 62 | print(vars) 63 | -------------------------------------------------------------------------------- /scripts/helpers/parseargs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Parses command line options somewhat like 'getopt'. Returns a multi-line string 4 | # that can be provided to the 'eval' command to set variables from parsed options. 5 | # For example, if cmd line has '--some-opt=1 --other-opt=foo' then returns 6 | # 'some_opt=1\nother_opt=foo'. 7 | # 8 | # Args: 9 | # $1 - like the longopts passed to getopt 10 | # $2..$n actual options to parse 11 | # 12 | # Two kinds of options are parsed: 13 | # 1. With a value in two forms: --foo=bar or --foo bar 14 | # 2. Y/N with no value, like --enabled. If present then assigned a value of '1' 15 | # 16 | # Example 17 | # 18 | # scripts/helpers/parseargs foo:,baz,foo-bar: --foo=bar --baz --foo-bar frobozz 19 | # 20 | # Would return 21 | # foo=bar\nbaz=1\nfoo_bar=frobozz 22 | 23 | vars= 24 | sep= 25 | IFS=',' read -ra optsarr <<<"$1" 26 | valsarr=() 27 | 28 | # parse valid opts, remove trailing colon, set whether opt accepts a value 29 | for ((i = 0; i < ${#optsarr[@]}; ++i)); do 30 | opt=${optsarr[$i]} 31 | if [[ $opt == *: ]]; then 32 | # accepts a value, strip the colon from the opt name 33 | opt=${opt::-1} 34 | valsarr[$i]=1 35 | optsarr[$i]=$opt 36 | else 37 | valsarr[$i]=0 38 | fi 39 | done 40 | 41 | # parse command line args after the first (longopts) arg 42 | for ((i = 2; i <= "$#"; ++i)); do 43 | opt="${!i}" 44 | # strip leading dbl-dash 45 | opt="${opt#--}" 46 | val= 47 | have_equals=0 48 | 49 | # if there's an =, the opt value is after the = 50 | if [[ $opt =~ = ]]; then 51 | have_equals=1 52 | val=$(echo $opt | cut -d= -f2) 53 | opt=$(echo $opt | cut -d= -f1) 54 | fi 55 | 56 | # find the option in the list of valid options 57 | for ((j = 0; j < "${#optsarr[@]}"; ++j)); do 58 | if [[ "$opt" == "${optsarr[$j]}" ]]; then 59 | break 60 | fi 61 | done 62 | if [[ j -ge "${#optsarr[@]}" ]]; then 63 | echo "unknown option: ${!i} (longopts=$1)" 64 | exit 1 65 | fi 66 | 67 | # handle opts that accept values 68 | if [[ ${valsarr[$j]} -eq 1 ]]; then 69 | if [[ $have_equals -eq 0 ]]; then 70 | # didn't already get it from opt=val 71 | ((i++)) 72 | val="${!i}" 73 | if [[ ${val:0:2} == "--" ]]; then 74 | echo "option $opt expects value, got $val instead" 75 | exit 1 76 | elif [[ -z "$val" ]]; then 77 | echo "option $opt expects value" 78 | exit 1 79 | fi 80 | fi 81 | else 82 | if [[ -n "$val" ]]; then 83 | echo "option does not take a value: $opt" 84 | exit 1 85 | fi 86 | # bool opt 87 | val=1 88 | fi 89 | 90 | # replace dash with underscore 91 | varname=${opt//-/_} 92 | vars="$vars$sep$varname=$val" 93 | sep="\n" 94 | done 95 | 96 | echo -e "$vars" -------------------------------------------------------------------------------- /scripts/helpers/show-usage: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | while IFS= read -r line; do 4 | echo "${line:2}" 5 | done <<< ' 6 | dtk 7 | --- 8 | Creates a Kubernetes cluster on the desktop using KVM or VirtualBox and kickstart for hands-free 9 | CentOS, Rocky, or Alma Linux installation. The cluster consists of a configurable number of nodes. 10 | The nodes are configured with networking support for host-to-guest, guest-to-guest, and 11 | guest-to-internet. 12 | 13 | The script creates a "template" VM, and then clones that for each cluster node. The initial 14 | template VM creation runs Kickstart to install the OS. 15 | 16 | Usage: dtk [--config=] [--create-template=] [--no-create-vms] [--install-addon=] 17 | [--verify=] [--check-compatibility] [--up=] [--down=] 18 | [--delete=] [--help] 19 | 20 | Options: 21 | 22 | --config Optional. The path to a configuration yaml file that specifies the cluster 23 | options. If not provided, uses the config.yaml file in the project root. 24 | See the config.yaml file for structure and supported configs and values. 25 | --create-template Optional. Overrides the setting specified in the configuration referenced by 26 | the "--config" arg. Allowed values: "true" and "false". 27 | --no-create-vms Optional. Do not create VMs. If this option is specified, then the VMs in the 28 | config.yaml file must be up and running, and the installer will simply install 29 | Kubernetes on them. 30 | --install-addon Optional. Installs the addon in the "scripts/addons" directory matching the 31 | specified add-on into the running cluster. E.g.: "--install-addon openebs". 32 | --verify Optional. Looks for all the upstreams or filesystem objects used by the script. 33 | Valid options are "upstreams" and "files". If "upstreams", then the script does 34 | a curl HEAD request for each upstream (e.g. OS ISO, Kubernetes binaries, etc.). 35 | If "files", then the same check is performed for the downloaded filesystem 36 | objects. This is a useful option to see all the objects that are required to 37 | provision a cluster. 38 | --check-compatibility Optional. Checks the installed versions of various utils used by the project 39 | (curl, kubectl, etc) against what the project has been tested on - and then exits, 40 | taking no further action. You should do this at least once. 41 | --up Optional. Takes a comma-separated list of VM names, and starts them all. 42 | --down Optional. Opposite of --up. Note - this is very low-tech at present: Order the 43 | args with the workers first and the controller last. 44 | --delete Optional. Tears down the cluster: Force stops all VMs and removes associated 45 | files from the virtual machine directory. 46 | --help Optional. Displays this help and exits. 47 | 48 | Examples: 49 | 50 | ./dtk 51 | 52 | Creates a cluster as defined in the provided "config.yaml" file. 53 | 54 | ./dtk --create-template=false 55 | 56 | Same as above except re-uses the existing template from a prior cluster creation. 57 | 58 | ./dtk --up=vm1,vm2,vm3 59 | 60 | Starts the named VMs.' 61 | -------------------------------------------------------------------------------- /scripts/helpers/yp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Super-simple yaml config-file value getter. Three capabilities: 4 | # 5 | # 1) (base use case): return a value at a path: 6 | # 7 | # ./yp k8s/containerized-cplane 8 | # ./yp vbox/template-vmname 9 | # 10 | # 2) Return length of a list (prefix path with "?"): 11 | # ./yp ?vms 12 | # 13 | # 3) Variant of #1 - get a value from the nth dict in a list. 14 | # Any path segment that is an int is assumed to be a 0-rel index 15 | # into the list represented by the path segment to the immediate 16 | # left of the index: 17 | # 18 | # Usage: 19 | # 20 | # ./yp vms/1/name 21 | # 22 | # Note - Boolean values are returned as "1" or "0" 23 | # 24 | 25 | import yaml, sys 26 | 27 | config_file = sys.argv[1] 28 | 29 | with open(config_file, 'r') as file: 30 | y = yaml.safe_load(file) 31 | 32 | want_len = False 33 | 34 | if sys.argv[2][:1] == "?": 35 | want_len = True 36 | sys.argv[2] = sys.argv[2][1:] 37 | 38 | for segment in sys.argv[2].split('/'): 39 | try: 40 | idx = int(segment) 41 | y = y[idx] 42 | except: 43 | try: 44 | y = y[segment] 45 | except: # no such key 46 | y = "" 47 | break 48 | 49 | if str(y) == "True": 50 | y = "1" 51 | elif str(y) == "False": 52 | y = "0" 53 | 54 | print(len(y) if want_len else y) 55 | -------------------------------------------------------------------------------- /scripts/kvm/clone-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=priv-key:,template-vmname:,clone-vmname:,clone-ram:,clone-cpus:,clone-disk: 6 | 7 | priv_key= 8 | template_vmname= 9 | clone_vmname= 10 | clone_ram= 11 | clone_cpus= 12 | clone_disk= 13 | 14 | if ! parsed=$(xec parseargs $long_opts "$@"); then 15 | echo "$parsed" 16 | exit 1 17 | fi 18 | eval $(echo -e "$parsed") 19 | 20 | echo "getting directory of template" 21 | dir=$(virsh domblklist $template_vmname | grep vda | awk '{print $2}') 22 | dir=${dir%/*} 23 | 24 | echo "cloning VM" 25 | virt-clone\ 26 | --original=$template_vmname\ 27 | --name=$clone_vmname\ 28 | --file=$dir/$clone_vmname.qcow2\ 29 | --mac=RANDOM 30 | 31 | virsh setvcpus $clone_vmname $clone_cpus --config --maximum 32 | virsh setvcpus $clone_vmname $clone_cpus --config 33 | virsh setmaxmem $clone_vmname "$clone_ram"M 34 | virsh setmem $clone_vmname "$clone_ram"M --config 35 | 36 | if [[ -n $clone_disk ]]; then 37 | echo "resizing disk vda to $clone_disk gigs" 38 | img=$(virsh domblklist $clone_vmname | grep vda | awk '{print $2}') 39 | sudo qemu-img resize $img ${clone_disk}G 40 | fi 41 | 42 | echo "starting VM" 43 | virsh start $clone_vmname 44 | 45 | echo "setting the hostname for cloned VM" 46 | clone_ip=$(xec kvm/get-vm-ip $clone_vmname) 47 | ssh-keygen -f ~/.ssh/known_hosts -R $clone_ip 48 | ssh -o "StrictHostKeyChecking no" -i $priv_key root@$clone_ip "hostnamectl set-hostname $clone_vmname" 49 | 50 | if [[ -n $clone_disk ]]; then 51 | echo "resizing partition 3 on /dev/vda" 52 | ssh -i $priv_key root@$clone_ip "growpart /dev/vda 3 && xfs_growfs /" 53 | fi 54 | 55 | echo "done cloning \"$clone_vmname\"" 56 | -------------------------------------------------------------------------------- /scripts/kvm/configure-etc-hosts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # configures /etc/hosts on each VM with the hostname of each VM. Usage: 4 | # configure-etc-hosts generated/kickstart/id_ed25519 vm1 vm2 ... vmN 5 | 6 | set -e 7 | 8 | priv_key="$1" 9 | shift 10 | etc_hosts="" 11 | 12 | for vm in "$@" 13 | do 14 | ip=$(xec kvm/get-vm-ip $vm) 15 | etc_hosts="$etc_hosts$ip $vm\n" 16 | done 17 | 18 | for vm in "$@" 19 | do 20 | ip=$(xec kvm/get-vm-ip $vm) 21 | echo -e "$etc_hosts" | ssh -i $priv_key root@$ip "cat >> /etc/hosts" 22 | done 23 | -------------------------------------------------------------------------------- /scripts/kvm/create-template-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=template-vmname:,linux-iso-path:,host-only-network:,kickstart:,os-variant: 6 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | 8 | template_vmname= 9 | linux_iso_path= 10 | host_only_network= 11 | kickstart= 12 | os_variant= 13 | 14 | if ! parsed=$(xec parseargs $long_opts "$@"); then 15 | echo "$parsed" 16 | exit 1 17 | fi 18 | eval $(echo -e "$parsed") 19 | 20 | echo "checking/generating SSH keys" 21 | xec gen-ssh-keyfiles 22 | 23 | echo "interpolating SSH key into kickstart file" 24 | ksout=$DTKBASE/generated/kickstart 25 | sshkey=$(<$DTKBASE/generated/kickstart/id_ed25519.pub) 26 | sed "s/REPLACE-WITH-SSHKEY/$sshkey/" $DTKBASE/kickstarts/$kickstart >| $ksout/ks.cfg 27 | 28 | # memory is MiB 29 | echo "creating VM" 30 | virt-install\ 31 | --noautoconsole\ 32 | --name $template_vmname\ 33 | --memory 2000\ 34 | --vcpus 2\ 35 | --disk size=30\ 36 | --cdrom $linux_iso_path\ 37 | --location $linux_iso_path\ 38 | --boot hd,cdrom\ 39 | --graphics none\ 40 | --os-variant $os_variant\ 41 | --initrd-inject $ksout/ks.cfg\ 42 | --extra-args="inst.ks=file:/ks.cfg console=tty0 console=ttyS0,115200n8" 43 | 44 | echo "waiting (indefinitely) for the machine to power off" 45 | while true; do 46 | virsh list --all | grep $template_vmname | grep -iv running && break || sleep 10s 47 | done 48 | 49 | echo "template VM fully provisioned" 50 | -------------------------------------------------------------------------------- /scripts/kvm/get-vm-ip: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | vm="$1" 4 | 5 | while true; do 6 | if virsh domifaddr $vm | grep -qE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'; then 7 | break 8 | fi 9 | sleep 1s 10 | done 11 | 12 | ip=$(virsh domifaddr $vm | grep -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | awk '{print $4}') 13 | echo "${ip%/*}" 14 | -------------------------------------------------------------------------------- /scripts/kvm/provision-vms: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=create-template:,linux:,vboxdir:,template-vmname:,config:,os-variant: 6 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | 8 | create_template=0 9 | linux= 10 | vboxdir= 11 | template_vmname= 12 | config= 13 | os_variant= 14 | 15 | if ! parsed=$(xec parseargs $long_opts "$@"); then 16 | echo "$parsed" 17 | exit 1 18 | fi 19 | eval $(echo -e "$parsed") 20 | 21 | linux_iso_path= 22 | if [[ $linux == "alma8" ]]; then 23 | linux_iso_path=$ALMA8_ISO_FILE 24 | elif [[ $linux == "alma9" ]]; then 25 | linux_iso_path=$ALMA9_ISO_FILE 26 | elif [[ $linux == "rocky" ]]; then 27 | linux_iso_path=$ROCKY_ISO_FILE 28 | elif [[ $linux == "centos9" ]]; then 29 | linux_iso_path=$CENTOS9_ISO_FILE 30 | else 31 | echo "unknown/unspecified linux distro" 32 | exit 1 33 | fi 34 | 35 | # create-template-vm gens SSH keypair $DTKBASE/generated/kickstart/id_ed25519 36 | # if it does not already exist - which is used below when cloning the VM 37 | 38 | if [[ $create_template -eq 1 ]] ; then 39 | kickstart=$(yq .kvm.kickstart $config) 40 | echo "creating a template VM" 41 | $script_dir/create-template-vm\ 42 | --template-vmname=$template_vmname\ 43 | --linux-iso-path=$linux_iso_path\ 44 | --kickstart=$kickstart\ 45 | --os-variant=$os_variant 46 | fi 47 | 48 | vmcnt=$(yq '.vms | length' $config) 49 | vm_names=() 50 | 51 | for ((i = 0; i < $vmcnt; ++i)); do 52 | vm_name=$(yq .vms[$i].name $config) 53 | vm_cpu=$(yq .vms[$i].cpu $config) 54 | vm_mem=$(yq .vms[$i].mem $config) 55 | vm_ip=$(yq .vms[$i].ip $config) 56 | vm_disk=$(yq '.vms['$i'].disk // ""' $config) 57 | 58 | if [[ -n $vm_disk ]] && [[ $vm_disk -le 30 ]]; then 59 | echo "shrinking the disk is not supported - ignoring" 60 | vm_disk= 61 | fi 62 | 63 | $script_dir/clone-vm\ 64 | --priv-key=$DTKBASE/generated/kickstart/id_ed25519\ 65 | --template-vmname=$template_vmname\ 66 | --clone-vmname=$vm_name\ 67 | --clone-ram=$vm_mem\ 68 | --clone-cpus=$vm_cpu\ 69 | --clone-disk=$vm_disk 70 | 71 | vm_names+=($vm_name) 72 | done 73 | 74 | # do this now to avoid needing DNS before CoreDNS is installed 75 | echo "configuring /etc/hosts in VMs" 76 | xec kvm/configure-etc-hosts $DTKBASE/generated/kickstart/id_ed25519 "${vm_names[@]}" 77 | -------------------------------------------------------------------------------- /scripts/kvm/up-down-del: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | IFS=',' read -ra vms <<< "$2" 6 | for vm in "${vms[@]}"; do 7 | if [[ $1 == "up" ]]; then 8 | virsh start $vm 9 | elif [[ $1 == "down" ]]; then 10 | virsh shutdown $vm 11 | elif [[ $1 == "delete" ]]; then 12 | virsh shutdown $vm || : 13 | virsh destroy $vm || : 14 | while true; do 15 | if virsh undefine $vm --remove-all-storage; then 16 | break 17 | fi 18 | done 19 | fi 20 | done 21 | -------------------------------------------------------------------------------- /scripts/os/configure-firewall: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Firewall rules per: 4 | # https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#check-required-ports 5 | 6 | vmip="$1" 7 | priv_key="$2" 8 | 9 | # todo what combination of firewall rules will allow coredns to work on CentOS? 10 | # note UDP/9153 when kubectl get svc --namespace=kube-system ...? 11 | # https://medium.com/platformer-blog/kubernetes-on-centos-7-with-firewalld-e7b53c1316af 12 | # says --add-port=8472/udp and --add-masquerade --permanent on controller AND worker... 13 | # but does not match kubernetes docs 14 | 15 | # interesting: 16 | # https://upcloud.com/community/tutorials/install-kubernetes-cluster-centos-8/ 17 | # https://docs.projectcalico.org/getting-started/kubernetes/requirements 18 | 19 | ssh -i "$priv_key" root@$vmip "systemctl stop firewalld && systemctl disable firewalld" 20 | exit 0 21 | 22 | if [[ "$3" == "--controller" ]]; then 23 | ssh -i "$priv_key" root@$vmip << EOF 24 | firewall-cmd --permanent --add-port=6443/tcp 25 | firewall-cmd --permanent --add-port=2379-2380/tcp 26 | firewall-cmd --permanent --add-port=10250/tcp 27 | firewall-cmd --permanent --add-port=10251/tcp 28 | firewall-cmd --permanent --add-port=10252/tcp 29 | systemctl restart firewalld 30 | EOF 31 | fi 32 | 33 | # UDP ports are experimental and don't work - CoreDNS nslookup fails 34 | if [[ "$3" == "--worker" ]]; then 35 | ssh -i "$priv_key" root@$vmip << EOF 36 | firewall-cmd --permanent --add-port=10250/tcp 37 | firewall-cmd --permanent --add-port=30000-32767/tcp 38 | firewall-cmd --permanent --add-port=53/udp 39 | firewall-cmd --permanent --add-port=5353/udp 40 | firewall-cmd --permanent --add-port=138/udp 41 | systemctl restart firewalld 42 | EOF 43 | fi 44 | -------------------------------------------------------------------------------- /scripts/os/desktop-kubernetes.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Desktop Kubernetes 3 | After=default.target 4 | 5 | [Service] 6 | Type=simple 7 | RemainAfterExit=yes 8 | ExecStart=/usr/bin/bash /root/desktop-kubernetes.sh 9 | TimeoutStartSec=0 10 | 11 | [Install] 12 | WantedBy=default.target 13 | -------------------------------------------------------------------------------- /scripts/os/desktop-kubernetes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # 3 | # Configures the host only network adapter. Installs VirtualBox Guest Additions in 4 | # two steps with a reboot in between. Note - the Guest Additions install seems sensitive 5 | # to the Linux version. I got this working with Centos Stream / Rocky 9 then happened 6 | # to test w/ Centos 7 and it didn't work any more. It seems too much trouble to support 7 | # multiple versions. 8 | # 9 | 10 | set -e 11 | 12 | touch /root/desktop-kubernetes.log 13 | 14 | function msg() { 15 | echo "$(date '+%Y-%m-%d %H:%M:%S.%3N') [$(hostname)] -- $1"\ 16 | | tee -a /root/desktop-kubernetes.log 17 | } 18 | 19 | # If host only networking, the project mounts a config ISO to the cloned VM to 20 | # configure enp0s8 (the host-only interface.) Label CFGENP0S8 is assigned to this 21 | # ISO by 'scripts/virtualbox/gen-hostonly-ifcfg-iso'. If mounted, then copy the enp0s8 22 | # config file from the CD to the network config directory. This is how we configure the 23 | # network in each VM in the cluster with a different IPv4 address for host only 24 | # networking. If bridge networking, then the CD is not mounted to the clone and 25 | # so this if block is never entered. And - we never mount the ISO in the template 26 | # VM, only in a clone. 27 | 28 | if blkid | grep 'LABEL="CFGENP0S8"'; then 29 | msg "begin network configuration" 30 | msg "copying host only network config file" 31 | mkdir -p /mnt/cdrom 32 | mount -r /dev/cdrom /mnt/cdrom 33 | yes | /bin/cp /mnt/cdrom/ifcfg-enp0s8 /etc/sysconfig/network-scripts/ifcfg-enp0s8 34 | umount /mnt/cdrom 35 | rm -rf /mnt/cdrom 36 | msg "host only network config file copied" 37 | msg "before restart enp0s8 interface" 38 | nmcli connection reload 39 | nmcli connection up enp0s8 || : 40 | msg "after restart enp0s8 interface" 41 | msg "completed network configuration - exiting" 42 | exit 0 43 | fi 44 | 45 | # Guest Additions install is done in the template when it is created. Thereafter, 46 | # the template is cloned to create each cluster VM and so each clone automatically 47 | # has guest additions. Guest Additions install logic was adapted from: 48 | # https://www.tecmint.com/install-virtualbox-guest-additions-on-centos-8/ 49 | 50 | if ! grep -q 'completed guest additions step 1' /root/desktop-kubernetes.log; then 51 | if grep -q 'begin guest additions step 1' /root/desktop-kubernetes.log; then 52 | msg "error - guest additions step 1 failed" 53 | exit 1 54 | fi 55 | msg "begin guest additions step 1" 56 | dnf -y install epel-release 57 | dnf -y remove kernel-devel 58 | dnf -y install gcc make perl kernel-devel kernel-headers bzip2 dkms 59 | dnf -y update kernel-* 60 | msg "completed guest additions step 1 - rebooting" 61 | reboot -f 62 | fi 63 | 64 | if ! grep -q 'completed guest additions step 2' /root/desktop-kubernetes.log; then 65 | if grep -q 'begin guest additions step 2' /root/desktop-kubernetes.log; then 66 | msg "error - guest additions step 2 failed" 67 | exit 1 68 | fi 69 | msg "begin guest additions step 2" 70 | mkdir -p /mnt/cdrom 71 | mount -r /dev/cdrom /mnt/cdrom 72 | /mnt/cdrom/VBoxLinuxAdditions.run 73 | umount /mnt/cdrom 74 | rm -rf /mnt/cdrom 75 | msg "completed guest additions step 2 - shutting down" 76 | shutdown now 77 | fi 78 | -------------------------------------------------------------------------------- /scripts/os/gen-kickstart-iso: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Creates an ISO from the script directory and writes the ISO into the project 'generated/kickstart' 4 | # directory. Expects the name of a kickstart config file in arg 1 in the same directory as the script. 5 | # 6 | 7 | set -e 8 | 9 | kickstart=$1 10 | 11 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 12 | 13 | cp $script_dir/desktop-kubernetes.service $script_dir/desktop-kubernetes.sh $DTKBASE/generated/kickstart 14 | cp $DTKBASE/kickstarts/$kickstart $DTKBASE/generated/kickstart/ks.cfg 15 | 16 | rm -f $DTKBASE/generated/iso/kickstart.iso 17 | genisoimage -J -R -input-charset utf-8 -volid OEMDRV -o $DTKBASE/generated/iso/kickstart.iso $DTKBASE/generated/kickstart 18 | -------------------------------------------------------------------------------- /scripts/virtualbox/README.md: -------------------------------------------------------------------------------- 1 | As of VirtualBox 7.0.10, the vboxmanage utility seemed more prone to locking errors: 2 | 3 | ``` 4 | VBoxManage: error: The machine 'foo' is already locked for a session (or being unlocked) 5 | VBoxManage: error: Details: code VBOX_E_INVALID_OBJECT_STATE (0x80bb0007), component MachineWrap, interface IMachine, callee nsISupports 6 | VBoxManage: error: Context: "LockMachine(a->session, LockType_Write)" at line 640 of file VBoxManageModifyVM.cpp 7 | ``` 8 | 9 | Hence the various sleep statements in these scripts. 10 | 11 | -------------------------------------------------------------------------------- /scripts/virtualbox/clone-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Creates a clone VM from the passed template VM. The cloned VM will have the passed 4 | # characteristics (VM name, mem, disk size). Unless shutdown=false, the VM will be shut down after it 5 | # is created. The host name of the VM will be set to the same value as the VM name. The clone_ram arg 6 | # is interpreted as kilobytes. 7 | 8 | set -e 9 | 10 | long_opts=priv-key:,template-vmname:,clone-vmname:,clone-ram:,clone-cpus:,\ 11 | host-only-network:,host-only-octet:,vboxdir:,shutdown: 12 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 13 | 14 | priv_key= 15 | template_vmname= 16 | clone_vmname= 17 | clone_ram= 18 | clone_cpus= 19 | host_only_network= 20 | host_only_octet= 21 | vboxdir= 22 | shutdown=true 23 | 24 | if ! parsed=$(xec parseargs $long_opts "$@"); then 25 | echo "$parsed" 26 | exit 1 27 | fi 28 | eval $(echo -e "$parsed") 29 | 30 | echo "cloning VM \"$clone_vmname\" from template VM \"$template_vmname\"" 31 | VBoxManage clonevm $template_vmname\ 32 | --basefolder="$vboxdir"\ 33 | --mode=machine\ 34 | --name=$clone_vmname\ 35 | --register 36 | 37 | if [[ ! -z $host_only_network ]]; then 38 | echo "configure hostonly networking" 39 | sleep 10s && $script_dir/configure-hostonly-networking $clone_vmname $host_only_network $host_only_octet $clone_vmname 40 | fi 41 | 42 | echo "setting VM characteristics" 43 | sleep 10s && VBoxManage modifyvm $clone_vmname --cpus $clone_cpus --memory $clone_ram 44 | 45 | # this script section clears the IP address guest property after the cloned VM starts. What I experienced was - if 46 | # I started the template (i.e. for troubleshooting), it would pick up an IP address property and the clone would 47 | # have the same property value (the clone would appear to have the same IP address as the template for some period 48 | # of time until VirtualBox assigned a new one.) So this is a work-around. After clearing the IP address property, 49 | # a subsequent call to 'guestproperty get' blocks until VirtualBox gets around to assigning it a new IP address. 50 | # Since the project can configure bridged networking with one adaptor - or host only + NAT with two adaptors, 51 | # the script clears two addresses and ignores any errors. 52 | 53 | echo "starting cloned VM" 54 | sleep 10s && VBoxManage startvm $clone_vmname 55 | $script_dir/wait-vm $clone_vmname --started 56 | 57 | echo "clearing existing IP address" 58 | VBoxManage guestproperty delete $clone_vmname "/VirtualBox/GuestInfo/Net/0/V4/IP" || : 59 | VBoxManage guestproperty delete $clone_vmname "/VirtualBox/GuestInfo/Net/1/V4/IP" || : 60 | 61 | echo "waiting for VM IP assignment - $host_only_network.$host_only_octet" 62 | while true; do 63 | clone_ip=$(xec virtualbox/get-vm-ip $clone_vmname) 64 | if [[ $clone_ip == "$host_only_network.$host_only_octet" ]]; then 65 | break 66 | fi 67 | sleep 3s 68 | done 69 | 70 | # this will be the very first occurrence of SSH'ing into the VM 71 | 72 | echo "setting the hostname for cloned VM" 73 | ssh-keygen -f ~/.ssh/known_hosts -R $clone_ip 74 | ssh -o "StrictHostKeyChecking no" -i $priv_key root@$clone_ip "hostnamectl set-hostname $clone_vmname" 75 | 76 | if [[ $shutdown == true ]]; then 77 | echo "shutting down cloned VM" 78 | sleep 10s && VBoxManage controlvm $clone_vmname acpipowerbutton 79 | $script_dir/wait-vm $clone_vmname --stopped 80 | sleep 10s 81 | fi 82 | 83 | echo "done cloning \"$clone_vmname\"" 84 | -------------------------------------------------------------------------------- /scripts/virtualbox/configure-etc-hosts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # configures /etc/hosts on each VM with the hostname of each VM. Usage: 4 | # configure-etc-hosts kickstart/id_ed25519 vm1 vm2 ... vmN 5 | 6 | set -e 7 | 8 | priv_key="$1" 9 | shift 10 | etc_hosts="" 11 | 12 | for vm in "$@" 13 | do 14 | ip=$(xec virtualbox/get-vm-ip $vm) 15 | etc_hosts="$etc_hosts$ip $vm\n" 16 | done 17 | 18 | for vm in "$@" 19 | do 20 | ip=$(xec virtualbox/get-vm-ip $vm) 21 | echo -e "$etc_hosts" | ssh -i $priv_key root@$ip "cat >> /etc/hosts" 22 | done 23 | -------------------------------------------------------------------------------- /scripts/virtualbox/configure-hostonly-networking: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Configures host only networking for the passed VM. Does this by calling 'gen-hostonly-ifcfg-iso' to create 4 | # an ISO containing a network config file. Mounts the ISO to the VM and starts the VM. The presence of this 5 | # ISO is detected by the desktop-kubernetes systemd service which copies it to /etc/sysconfig/network-scripts 6 | # in the VM. So this is how we configure each node in the cluster with a unique static IP address in the 7 | # host only network. 8 | # 9 | # The interface is named enp0s8 because the way this project works - for host only networking - a NAT interface is 10 | # configured in network position one, and the host only interface takes position two. As a result, the host only 11 | # network will pick up the name enp0s8. See https://forums.virtualbox.org/viewtopic.php?f=7&t=103195. Snip: 12 | # 13 | # "VirtualBox can emulate two chipsets: PIIX3 and ICH9 (see System > Motherboard > Chipset). Depending on the chipset, 14 | # VirtualBox defines the layout of the virtual PCI busses that are provided to the guest. When using the PIIX3 chipset 15 | # (default), the first four network adapters are devices 3, 8, 9 and 10 on PCI bus 0. On most modern Linux 16 | # distributions, the network adapters are enumerated on the PCI busses and named enp0s3, enp0s8, enp0s9 17 | # and enp0s10 accordingly. ... In consequence, the naming is indeed deterministic and predictable." 18 | # 19 | # The NAT interface is used for external access for the VM (i.e. internet) and the host only network implements the 20 | # k8s cluster network. 21 | # 22 | # Usage: configure-hostonly-networking 23 | # 24 | # ...where are the left three octets of an IPv4 address, and is the right-most 25 | # octet of the IP address to configure the adaptor with. E.g.: 26 | # 27 | # configure-hostonly-networking 200.200.200 202 my-cloned-vm 28 | # 29 | 30 | set -e 31 | 32 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 33 | 34 | vmname="$1" 35 | network_octets="$2" 36 | host_octet="$3" 37 | clone_vmname="$4" 38 | 39 | # create ifcfg ISO with IP Address from passed args 40 | echo "creating ifcfg-enp0s8.iso with network $network_octets and octet $host_octet" 41 | $script_dir/gen-hostonly-ifcfg-iso $network_octets $host_octet 42 | 43 | # mount ISO to VM 44 | echo "attaching ifcfg-enp0s8 ISO to VM $vmname" 45 | sleep 5s && VBoxManage storageattach $vmname --storagectl IDE --port 0 --device 0 --type dvddrive\ 46 | --medium $DTKBASE/generated/iso/ifcfg-enp0s8.iso 47 | 48 | # start VM - this will cause the desktop-kubernetes systemd service to copy the config file 'ifcfg-enp0s8' from 49 | # the ISO to the /etc/sysconfig/network-scripts directory in the guest which configures network interface. See 50 | # 'scripts/os/desktop-kubernetes.sh' 51 | echo "starting VM $vmname" 52 | VBoxManage startvm $vmname 53 | 54 | echo "waiting indefinitely for VM IP assignment" 55 | while true; do 56 | clone_ip=$(xec virtualbox/get-vm-ip $clone_vmname) 57 | if [[ $clone_ip == "$network_octets.$host_octet" ]]; then 58 | break 59 | fi 60 | sleep 3s 61 | done 62 | 63 | echo "stopping VM $vmname" 64 | VBoxManage controlvm $vmname acpipowerbutton 65 | 66 | # unmount 67 | echo "VM $vmname stopped - detaching ifcfg-enp0s8 ISO" 68 | 69 | sleep 10s && VBoxManage storageattach $vmname --storagectl IDE --port 0 --device 0 --medium none 70 | -------------------------------------------------------------------------------- /scripts/virtualbox/create-template-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Creates a template VM suitable to clone into a cluster VM. Specifically: 5 | # 6 | # 1. *may* generate SSH key pair into the 'generated/kickstart' directory to support SSH access to the VM 7 | # 2. generates the kickstart ISO into the 'generated/iso' directory 8 | # 3. creates a VM from the passed Linux ISO and generated kickstart ISO 9 | # 4. installs guest additions from the passed ISO to support other scripts getting the IP address from the VM 10 | # 11 | 12 | set -e 13 | 14 | long_opts=template-vmname:,linux-iso-path:,guest-additions-path:,host-network-interface:,host-only-network:,vboxdir:,kickstart: 15 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 16 | 17 | template_vmname= 18 | linux_iso_path= 19 | guest_additions_path= 20 | host_network_interface= 21 | host_only_network= 22 | vboxdir= 23 | kickstart= 24 | 25 | #internal 26 | host_only_network_name= 27 | 28 | if ! parsed=$(xec parseargs $long_opts "$@"); then 29 | echo "$parsed" 30 | exit 1 31 | fi 32 | eval $(echo -e "$parsed") 33 | 34 | echo "checking/generating SSH keys -- the public key will be copied to the VM by the kickstart script" 35 | xec gen-ssh-keyfiles 36 | 37 | echo "generating kickstart ISO" 38 | xec gen-kickstart-iso $kickstart 39 | kickstart_iso_path=$DTKBASE/generated/iso/kickstart.iso 40 | 41 | if [[ ! -z "$host_only_network" ]]; then 42 | echo "creating new host only network for $host_only_network" 43 | host_only_network_name=$($script_dir/get-or-create-hostonly-network $host_only_network) 44 | echo "host only network: $host_only_network_name" 45 | fi 46 | 47 | echo "creating VM" 48 | $script_dir/create-vm --template-vmname=$template_vmname\ 49 | --linux-iso-path=$linux_iso_path\ 50 | --kickstart-iso-path=$kickstart_iso_path\ 51 | --host-network-interface=$host_network_interface\ 52 | --host-only-network-name=$host_only_network_name\ 53 | --vboxdir=$vboxdir 54 | 55 | echo "installing guest additions" 56 | $script_dir/install-guest-additions\ 57 | --template-vmname=$template_vmname\ 58 | --guest-additions-path=$guest_additions_path 59 | 60 | echo "guest additions installation completed - template VM fully provisioned" 61 | -------------------------------------------------------------------------------- /scripts/virtualbox/create-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Creates a Linux VM from a Linux ISO, and installs the OS using Kickstart from a Kickstart ISO. 5 | # Intent is - this VM will be the template VM from which the cluster VMs will subsequently be cloned. 6 | # Expects ISO files on the file system. Configures bridged networking or host only networking based on 7 | # passed options. This script just creates the VM. To install guest additions, you would need to call 8 | # 'install-guest-additions' (which is done by the 'create-template-vm' script.) 9 | # 10 | 11 | set -e 12 | 13 | long_opts=template-vmname:,linux-iso-path:,kickstart-iso-path:,host-network-interface:,host-only-network-name:,vboxdir: 14 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 15 | 16 | template_vmname= 17 | linux_iso_path= 18 | kickstart_iso_path= 19 | host_network_interface= 20 | host_only_network_name= 21 | vboxdir= 22 | 23 | if ! parsed=$(xec parseargs $long_opts "$@"); then 24 | echo "$parsed" 25 | exit 1 26 | fi 27 | eval $(echo -e "$parsed") 28 | 29 | if [[ ! -z "$host_network_interface" ]] && [[ ! -z "$host_only_network_name" ]]; then 30 | echo "specify either --host-only-network-name or --host-network-interface" 31 | exit 1 32 | elif [[ -z "$host_network_interface" ]] && [[ -z "$host_only_network_name" ]]; then 33 | echo "specify either --host-only-network-name or --host-network-interface" 34 | exit 1 35 | fi 36 | 37 | echo "creating VM" 38 | 39 | VBoxManage createvm\ 40 | --name $template_vmname\ 41 | --ostype RedHat_64\ 42 | --register\ 43 | --basefolder $vboxdir 44 | 45 | # create the VM within minimal CPU & RAM 46 | VBoxManage modifyvm $template_vmname\ 47 | --ioapic on\ 48 | --cpus 2\ 49 | --memory 4096\ 50 | --vram 128\ 51 | --graphicscontroller vmsvga\ 52 | --boot1 dvd 53 | 54 | # if bridged, there is only one network adaptor. If host only, then nic1 is the NAT network for internet access, and 55 | # nic2 is the host only network adaptor. See the kickstart file for related configuration. If the caller passes the 56 | # host network interface then that means bridged networking is desired because bridged networking requires the 57 | # host's interface name. If the caller passes the name of the host only network, then that means host only. 58 | 59 | if [[ ! -z "$host_network_interface" ]]; then 60 | echo "configuring template VM for bridge networking with host network interface: $host_network_interface" 61 | VBoxManage modifyvm $template_vmname\ 62 | --nic1 bridged\ 63 | --nictype1 82540EM\ 64 | --cableconnected1 on\ 65 | --nicpromisc1 deny\ 66 | --macaddress1 auto\ 67 | --bridgeadapter1 $host_network_interface 68 | elif [[ ! -z "$host_only_network_name" ]]; then 69 | echo "configuring template VM for host only networking with host only network name: $host_only_network_name" 70 | VBoxManage modifyvm $template_vmname\ 71 | --nic1 nat\ 72 | --nictype1 82540EM\ 73 | --cableconnected1 on\ 74 | --nicpromisc1 deny\ 75 | --macaddress1 auto\ 76 | --nic2 hostonly\ 77 | --cableconnected2 on\ 78 | --macaddress2 auto\ 79 | --hostonlyadapter2 $host_only_network_name 80 | fi 81 | 82 | # create and attach a hard drive 83 | VBoxManage createhd --filename $vboxdir/$template_vmname/$template_vmname.vdi --size 40000 --format VDI 84 | VBoxManage storagectl $template_vmname --name SATA --add sata --controller IntelAHCI 85 | VBoxManage storageattach $template_vmname --storagectl SATA --port 1 --device 0 --type hdd\ 86 | --medium $vboxdir/$template_vmname/$template_vmname.vdi 87 | 88 | # configure and attach the Linux installation ISO 89 | VBoxManage storagectl $template_vmname --name IDE --add ide --controller PIIX4 90 | VBoxManage storageattach $template_vmname --storagectl IDE --port 1 --device 0 --type dvddrive\ 91 | --medium $linux_iso_path 92 | 93 | # attach the kickstart iso created by the gen-kickstart-iso script 94 | VBoxManage storageattach $template_vmname --storagectl IDE --port 1 --device 1 --type dvddrive\ 95 | --medium $kickstart_iso_path 96 | 97 | # set boot from DVD 98 | VBoxManage modifyvm $template_vmname --boot1 dvd --boot2 disk --boot3 none --boot4 none 99 | 100 | echo "starting VM to begin kickstart installation" 101 | VBoxManage startvm $template_vmname 102 | 103 | echo "waiting for VM to start" 104 | $script_dir/wait-vm $template_vmname --started 105 | 106 | echo "waiting for kickstart to complete and VM to stop" 107 | $script_dir/wait-vm $template_vmname --stopped 108 | 109 | # for virtual box or the detach calls below fail 110 | sleep 5s 111 | 112 | echo "detaching ISO files from VM" 113 | VBoxManage storageattach $template_vmname --storagectl IDE --port 1 --device 0 --medium none 114 | VBoxManage storageattach $template_vmname --storagectl IDE --port 1 --device 1 --medium none 115 | -------------------------------------------------------------------------------- /scripts/virtualbox/gen-hostonly-ifcfg-iso: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Creates an ISO that the desktop-kubernetes service (installed by kickstart) can use to configure 5 | # host only networking. The label - CFGENP0S8 - that is assigned to the ISO is important: 6 | # it is checked by the desktop-kubernetes.sh script that is run by the desktop-kubernetes 7 | # systemd unit. 8 | # 9 | # This script is structurally a clone of gen-kickstart-iso. See that script for additional info. 10 | # 11 | # Usage: 12 | # gen-hostonly-ifcfg-iso 13 | 14 | # ...where are the left three octets of an IPv4 address, and is the right-most 15 | # octet of the IP address to configure the adaptor with. E.g.: 16 | # 17 | # gen-hostonly-ifcfg-iso 200.200.200 202 18 | # 19 | # The example above would configure the adaptor with IP4 address 200.200.200.202. Note - don't specify '1' 20 | # as the host octet because that's reserved for the gateway in the ifcfg file generated by this script. The 21 | # octets aren't validated by this script - caller must ensure they are valid. 22 | # 23 | 24 | set -e 25 | 26 | if [[ "$#" -ne 2 ]]; then 27 | echo "usage: gen-hostonly-ifcfg-iso NETWORK HOST" 28 | exit 1 29 | fi 30 | 31 | # create a config file with the octets patched in 32 | cat <| $DTKBASE/generated/hostonly-netcfg/ifcfg-enp0s8 33 | TYPE=Ethernet 34 | DEVICE=enp0s8 35 | ONBOOT=yes 36 | BOOTPROTO=none 37 | NAME="enp0s8" 38 | IPADDR=NETWORK-OCTETS.HOST-OCTET 39 | NETMASK=255.255.255.0 40 | NETWORK=NETWORK-OCTETS.0 41 | BROADCAST=NETWORK-OCTETS.255 42 | GATEWAY=NETWORK-OCTETS.1 43 | EOF 44 | 45 | # create the ISO 46 | rm -f $DTKBASE/generated/iso/ifcfg-enp0s8.iso 47 | genisoimage -J -R -input-charset utf-8 -volid CFGENP0S8 -o $DTKBASE/generated/iso/ifcfg-enp0s8.iso $DTKBASE/generated/hostonly-netcfg 48 | -------------------------------------------------------------------------------- /scripts/virtualbox/get-or-create-hostonly-network: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Creates - or returns the name of - a host only network using the passed octets. The passed octets 5 | # are the left three octets of an IPv4 address. E.g.: 192.168.55. 6 | # 7 | # If there is an existing VirtualBox host only network with these left three octets, its name is 8 | # returned by the script. Otherwise a new host only network with these left three octets is 9 | # created and the new name is returned. 10 | # 11 | # Please note that VirtualBox imposes constraints on the IP addresses you can use for a host 12 | # only network. 13 | # 14 | # Usage: 15 | # get-or-create-hostonly-network 16 | # 17 | # Example: 18 | # get-or-create-hostonly-network 192.168.55 19 | 20 | set -e 21 | 22 | if [[ "$#" -ne 1 ]]; then 23 | 1>&2 echo "usage: get-or-create-hostonly-network " 24 | exit 1 25 | fi 26 | 27 | # first see if there is already a host only network matching the octets 28 | name= 29 | octets= 30 | while read line; do 31 | if grep '^Name:' <<<"$line" &> /dev/null; then 32 | name=$(echo "$line" | awk '{print $2}') 33 | elif grep '^IPAddress:' <<<"$line" &> /dev/null; then 34 | octets=$(echo "$line" | awk '{print $2}' | cut -d. -f1-3) 35 | fi 36 | if [[ ! -z "$name" ]] && [[ ! -z "$octets" ]]; then 37 | if [[ "$octets" == "$1" ]]; then 38 | # a network exists: return network name to caller 39 | echo "$name" 40 | exit 0 41 | else 42 | name= 43 | octets= 44 | fi 45 | fi 46 | done < <(VBoxManage list hostonlyifs) 47 | 48 | # the new host only network name to be generated by VirtualBox that will be returned to the caller 49 | netname= 50 | 51 | # the progress line goes to stderr and the cmd output goes to stdout but mask stderr so all the caller sees is 52 | # the net name created by vbox 53 | output=$(VBoxManage hostonlyif create 2>/dev/null) 54 | 55 | # parse the created network name 56 | regex="Interface '([0-9a-z]+)' was successfully created" 57 | [[ $output =~ $regex ]] && netname="${BASH_REMATCH[1]}" 58 | 59 | # set the static network address 60 | VBoxManage hostonlyif ipconfig $netname --ip $1.1 --netmask 255.255.255.0 61 | 62 | # return newly created network name to caller 63 | echo $netname 64 | -------------------------------------------------------------------------------- /scripts/virtualbox/get-vm-ip: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | vm="$1" 4 | 5 | netnum= 6 | 7 | while [[ -z $netnum ]]; do 8 | for interface in 0 1; do 9 | netname=$(VBoxManage guestproperty get "$vm" "/VirtualBox/GuestInfo/Net/$interface/Name" 2>/dev/null | awk '{print $2}') 10 | if [[ $netname == "enp0s3" ]]; then 11 | netnum=$interface 12 | elif [[ $netname == "enp0s8" ]]; then 13 | netnum=$interface 14 | # if we find enp0s8 we're done 15 | break 16 | fi 17 | done 18 | done 19 | 20 | ip=$(VBoxManage guestproperty get "$vm" "/VirtualBox/GuestInfo/Net/$netnum/V4/IP" 2>/dev/null) 21 | 22 | echo "$ip" | awk '{print $2}' 23 | -------------------------------------------------------------------------------- /scripts/virtualbox/install-guest-additions: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Mount the guest additions ISO and boot the VM. On boot, the desktop-kubernetes service that was 4 | # installed by the kickstart script will run, and install the guest additions unattended, booting the VM 5 | # when needed. To watch the guest additions being installed, you can log into the VM, open a terminal, 6 | # and: 'journalctl -u desktop-kubernetes -f'. Upon return from this script, the guest additions install 7 | # has completed, the VM is shut down, and the guest additions ISO is detached. 8 | 9 | set -e 10 | 11 | long_opts=template-vmname:,guest-additions-path: 12 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 13 | 14 | template_vmname= 15 | guest_additions_path= 16 | 17 | if ! parsed=$(xec parseargs $long_opts "$@"); then 18 | echo "$parsed" 19 | exit 1 20 | fi 21 | eval $(echo -e "$parsed") 22 | 23 | # attach guest additions ISO and set boot from disk 24 | VBoxManage storageattach $template_vmname --storagectl IDE --port 0 --device 0 --type dvddrive\ 25 | --medium $guest_additions_path 26 | VBoxManage modifyvm $template_vmname --boot1 disk --boot2 none --boot3 none --boot4 none 27 | 28 | # If successful it will be possible to get the IP address from the VM 29 | echo "starting VM to begin guest additions installation" 30 | VBoxManage startvm $template_vmname 31 | 32 | echo "waiting for the VM to start and install guest additions" 33 | $script_dir/wait-vm $template_vmname --started 34 | 35 | echo "waiting for the VM to stop - the guest additions install script performs a shutdown when it is done" 36 | $script_dir/wait-vm $template_vmname --stopped 37 | sleep 5s 38 | 39 | echo "detaching guest additions ISO" 40 | VBoxManage storageattach $template_vmname --storagectl IDE --port 0 --device 0 --medium none 41 | -------------------------------------------------------------------------------- /scripts/virtualbox/provision-vms: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Provisions VBox VMs. If --create-template then creates a template. Otherwise expects a 4 | # VM named by the --template-vmname param to exist. Clones that template VM into one or 5 | # more VMs based on the config.yaml per --config arg. 6 | # 7 | 8 | set -e 9 | 10 | long_opts=create-template:,linux:,host-network-interface:,host-only-network:,vboxdir:,template-vmname:,config: 11 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 12 | 13 | create_template=0 14 | linux= 15 | host_network_interface= 16 | host_only_network= 17 | vboxdir= 18 | template_vmname= 19 | config= 20 | 21 | if ! parsed=$(xec parseargs $long_opts "$@"); then 22 | echo "$parsed" 23 | exit 1 24 | fi 25 | eval $(echo -e "$parsed") 26 | 27 | linux_iso_path= 28 | if [[ $linux == "alma8" ]]; then 29 | linux_iso_path=$ALMA8_ISO_FILE 30 | elif [[ $linux == "alma9" ]]; then 31 | linux_iso_path=$ALMA9_ISO_FILE 32 | elif [[ $linux == "rocky" ]]; then 33 | linux_iso_path=$ROCKY_ISO_FILE 34 | elif [[ $linux == "centos9" ]]; then 35 | linux_iso_path=$CENTOS9_ISO_FILE 36 | else 37 | echo "unknown/unspecified linux distro" 38 | exit 1 39 | fi 40 | 41 | # create-template-vm gens SSH keypair $DTKBASE/generated/kickstart/id_ed25519 42 | # if it does not already exist - which is used below when cloning the VM 43 | 44 | if [[ $create_template -eq 1 ]] ; then 45 | kickstart=$(yq .vbox.kickstart $config) 46 | echo "creating a template VM" 47 | $script_dir/create-template-vm\ 48 | --template-vmname=$template_vmname\ 49 | --linux-iso-path=$linux_iso_path\ 50 | --guest-additions-path=$GUEST_ADDITIONS_ISO_FILE\ 51 | --host-network-interface=$host_network_interface\ 52 | --host-only-network=$host_only_network\ 53 | --vboxdir=$vboxdir\ 54 | --kickstart=$kickstart 55 | fi 56 | 57 | vmcnt=$(yq '.vms | length' $config) 58 | vm_names=() 59 | 60 | for ((i = 0; i < $vmcnt; ++i)); do 61 | vm_name=$(yq .vms[$i].name $config) 62 | vm_cpu=$(yq .vms[$i].cpu $config) 63 | vm_mem=$(yq .vms[$i].mem $config) 64 | vm_ip=$(yq .vms[$i].ip $config) 65 | 66 | $script_dir/clone-vm\ 67 | --priv-key=$DTKBASE/generated/kickstart/id_ed25519\ 68 | --template-vmname=$template_vmname\ 69 | --clone-vmname=$vm_name\ 70 | --clone-ram=$vm_mem\ 71 | --clone-cpus=$vm_cpu\ 72 | --host-only-network=$host_only_network\ 73 | --host-only-octet=$vm_ip\ 74 | --vboxdir=$vboxdir\ 75 | --shutdown=false 76 | 77 | vm_names+=($vm_name) 78 | done 79 | 80 | # do this now to avoid needing DNS before CoreDNS is installed 81 | echo "configuring /etc/hosts in VMs" 82 | xec virtualbox/configure-etc-hosts $DTKBASE/generated/kickstart/id_ed25519 "${vm_names[@]}" 83 | -------------------------------------------------------------------------------- /scripts/virtualbox/start-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Start a VM if it is not already running. VM name in arg 1. 4 | # 5 | # Usage 6 | # 7 | # start-vm 8 | # 9 | 10 | vmname=$1 11 | 12 | run_count=$(VBoxManage list runningvms | awk '{print $1}' | grep -e'"'$vmname'"' | wc -l) 13 | 14 | if [[ $run_count -eq 0 ]]; then 15 | echo "Starting worker VM" 16 | VBoxManage startvm $vmname 17 | fi 18 | -------------------------------------------------------------------------------- /scripts/virtualbox/up-down-del: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Usage: 4 | # 5 | # up-down-del 6 | # 7 | # E.g.: 8 | # 9 | # up-down-del up vm1,vm2,vm3 ~/my-config.yaml 10 | # 11 | 12 | set -e 13 | 14 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 15 | 16 | if [[ $1 == "up" ]]; then 17 | IFS=',' read -ra vms <<< "$2" 18 | for vm in "${vms[@]}"; do 19 | VBoxManage startvm $vm 20 | done 21 | sleep 5s 22 | elif [[ $1 == "down" ]]; then 23 | IFS=',' read -ra vms <<< "$2" 24 | for vm in "${vms[@]}"; do 25 | echo "shutting down $vm" 26 | VBoxManage controlvm $vm acpipowerbutton 27 | $script_dir/wait-vm $vm --stopped 28 | done 29 | elif [[ $1 == "delete" ]]; then 30 | IFS=',' read -ra vms <<< "$2" 31 | for vm in "${vms[@]}"; do 32 | echo "shutting down $vm if running" 33 | VBoxManage controlvm $vm poweroff &>/dev/null && $script_dir/wait-vm $vm --stopped || echo "(not running)" 34 | echo "removing $vm" 35 | sleep 5s 36 | VBoxManage unregistervm $vm --delete 37 | done 38 | fi 39 | -------------------------------------------------------------------------------- /scripts/virtualbox/wait-vm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Waits (forever) for a VM to be either stopped or started based on the passed option of '--stopped' 4 | # or '--started'. Usage: 5 | # 6 | # wait-vm MYVMNAME [ --started | --stopped ] 7 | 8 | vm= 9 | action= 10 | 11 | for var in "$@"; do 12 | if [[ "${var:0:1}" == - ]]; then 13 | action="$var" 14 | else 15 | vm="$var" 16 | fi 17 | done 18 | 19 | if [[ -z "$vm" ]]; then 20 | echo "no VM name supplied" 21 | exit 1 22 | fi 23 | 24 | expected= 25 | if [[ "$action" == "--started" ]]; then 26 | expected=1 27 | elif [[ "$action" == "--stopped" ]]; then 28 | expected=0 29 | else 30 | echo "unknown option: $action. Supported options: --started and --stopped" 31 | exit 1 32 | fi 33 | 34 | if ! vboxmanage showvminfo "$vm" &>/dev/null; then 35 | echo "no VM found matching name: $vm" 36 | exit 1 37 | fi 38 | 39 | while true; do 40 | running=$(vboxmanage list runningvms | grep '"'"$vm"'"' | wc -l) 41 | if [[ $expected -eq $running ]]; then 42 | exit 0 43 | fi 44 | done 45 | -------------------------------------------------------------------------------- /scripts/vm/gen-ssh-keyfiles: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Key pair eventually gets wired into the template VM. So the private key can be used to SSH 4 | # into any VM cloned from the template. 5 | 6 | set -e 7 | 8 | if [[ ! -f $DTKBASE/generated/kickstart/id_ed25519 ]] || [[ ! -f $DTKBASE/generated/kickstart/id_ed25519.pub ]]; then 9 | echo "generating key pair" 10 | ssh-keygen -t ed25519 -N '' -f $DTKBASE/generated/kickstart/id_ed25519 << config.toml' AFTER containerd installed, 29 | # then copy back into this project plus tweak cgroup: 30 | scp -i $priv_key $script_dir/config.toml root@$worker_ip:/etc/containerd/ 31 | 32 | # July 2022 per https://kubernetes.io/docs/setup/production-environment/container-runtimes/ 33 | echo "Configuring networking" 34 | ssh -i $priv_key root@$worker_ip << EOF 35 | echo overlay > /etc/modules-load.d/k8s.conf 36 | echo br_netfilter >> /etc/modules-load.d/k8s.conf 37 | 38 | modprobe overlay 39 | modprobe br_netfilter 40 | 41 | echo net.bridge.bridge-nf-call-iptables = 1 >> /etc/sysctl.d/k8s.conf 42 | echo net.bridge.bridge-nf-call-ip6tables = 1 >> /etc/sysctl.d/k8s.conf 43 | echo net.ipv4.ip_forward = 1 >> /etc/sysctl.d/k8s.conf 44 | 45 | sysctl --system 46 | EOF 47 | 48 | # 20-containerd-net.conflist based on: https://github.com/containerd/containerd/blob/main/script/setup/install-cni 49 | # name it "20-" because Calico and Cilium lay down a "10-" and so this guarantees they will sort higher if installed. 50 | # This containerd .conflist can run the cluster fine without Calico/Cilium but requires kube-proxy to be installed 51 | # if running without Calico/Cilium. 52 | 53 | echo "Copying 20-containerd-net.conflist to /etc/cni/net.d/ in worker VM" 54 | ssh -i $priv_key root@$worker_ip "mkdir -p /etc/cni/net.d /var/log/desktop-kubernetes/cni" 55 | sed $script_dir/20-containerd-net.conflist\ 56 | -e "s|PODCIDR|$pod_cidr|g" | ssh -i $priv_key root@$worker_ip "cat > /etc/cni/net.d/20-containerd-net.conflist" 57 | 58 | # configure containerd mirroring 59 | 60 | mirror=$(yq .k8s.containerd-mirror.name $config) 61 | if [[ -n $mirror && $mirror != "null" ]]; then 62 | echo "Configure containerd mirroring" 63 | ssh -i $priv_key root@$worker_ip "mkdir -p /etc/containerd/certs.d/$mirror" 64 | mirrorcfg=$(yq .k8s.containerd-mirror.config $config) 65 | echo "$mirrorcfg" | ssh -i $priv_key root@$worker_ip "cat >| /etc/containerd/certs.d/$mirror/hosts.toml" 66 | fi 67 | 68 | echo "Starting containerd service" 69 | ssh -i $priv_key root@$worker_ip "systemctl daemon-reload && systemctl enable --now containerd" 70 | 71 | echo "Verifying containerd is running" 72 | set +e 73 | for i in {1..10}; do 74 | ssh -i $priv_key root@$worker_ip "systemctl is-active --quiet containerd" && break 75 | sleep 1s 76 | done 77 | 78 | echo "No errors detected with containerd installation" 79 | 80 | # this section of code populates the containerd cache on each host if files are populated 81 | # in the 'images' dir of this project to minimize the impact of Docker rate-limiting. See 82 | # the 'hack/create-image-archive' script. 83 | 84 | imagecnt=$(find $DTKBASE/images \( -name '*.tar' -o -name '*.tgz' \) | wc -l) 85 | 86 | if [[ $imagecnt -ne 0 ]]; then 87 | echo "Copying images to host" 88 | ssh -i $priv_key root@$worker_ip "mkdir -p /tmp/images" 89 | find $DTKBASE/images \( -name '*.tar' -o -name '*.tgz' \) | while read image; do 90 | scp -i $priv_key $image root@$worker_ip:/tmp/images/ 91 | done 92 | echo "Importing images into host containerd cache" 93 | scp -i $priv_key $script_dir/import-images root@$worker_ip:/tmp/images/ 94 | ssh -i $priv_key root@$worker_ip "chmod +x /tmp/images/import-images && /tmp/images/import-images" 95 | fi 96 | -------------------------------------------------------------------------------- /scripts/worker/kube-proxy/install-kube-proxy: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=kube-proxy-binary:,priv-key:,containerized-cplane:,admin-kubeconfig:,worker-hostname:,cluster-cidr:,worker-ip: 6 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | 8 | kube_proxy_binary= 9 | priv_key= 10 | containerize_cplane=0 11 | admin_kubeconfig= 12 | worker_hostname= 13 | cluster_cidr= 14 | worker_ip= 15 | 16 | if ! parsed=$(xec parseargs $long_opts "$@"); then 17 | echo "$parsed" 18 | exit 1 19 | fi 20 | eval $(echo -e "$parsed") 21 | 22 | if [[ "$containerized_cplane" == *kube-proxy* || "$containerized_cplane" == all ]]; then 23 | containerized_cplane=1 24 | fi 25 | 26 | echo "configuring kube-proxy for node $worker_hostname (ip $worker_ip)" 27 | ssh -i $priv_key root@$worker_ip "mkdir -p /var/lib/kube-proxy/kubeconfig" 28 | sed $script_dir/kube-proxy-config.yaml -e "s|CLUSTER_CIDR|$cluster_cidr|g"\ 29 | | ssh -i $priv_key root@$worker_ip "cat > /var/lib/kube-proxy/kube-proxy-config.yaml" 30 | scp -i $priv_key $DTKBASE/generated/kubeconfig/kube-proxy.kubeconfig root@$worker_ip:/var/lib/kube-proxy/kubeconfig/ 31 | 32 | ssh -i $priv_key root@$worker_ip << EOF 33 | echo -e "[keyfile]\nunmanaged-devices=interface-name:cni*;interface-name:veth*" >| /etc/NetworkManager/conf.d/dtk.conf 34 | systemctl daemon-reload && systemctl restart NetworkManager 35 | EOF 36 | 37 | if [[ $containerized_cplane -eq 0 ]]; then 38 | scp -i $priv_key $kube_proxy_binary root@$worker_ip:/usr/local/bin/kube-proxy 39 | scp -i $priv_key $script_dir/kube-proxy.service root@$worker_ip:/etc/systemd/system/kube-proxy.service 40 | ssh -i $priv_key root@$worker_ip "systemctl daemon-reload && systemctl enable --now kube-proxy" 41 | echo "verifying kube-proxy is running" 42 | set +e 43 | for i in {1..10}; do 44 | ssh -i "$priv_key" root@$worker_ip "systemctl is-active --quiet kube-proxy" && break 45 | sleep 1s 46 | done 47 | else 48 | echo "copying kube-proxy static pod manifest to controller VM" 49 | sed $script_dir/kube-proxy-pod.yaml -e "s|K8S_VER|$K8S_VER|g"\ 50 | | ssh -i $priv_key root@$worker_ip "cat > /etc/desktop-kubernetes/static-pods/kube-proxy-pod.yaml" 51 | for i in {1..20}; do 52 | if kubectl --kubeconfig $admin_kubeconfig -n kube-system wait pod\ 53 | -lcomponent=kube-proxy --for=condition=ready --timeout=5s; then 54 | break 55 | elif [[ $i -eq 20 ]]; then 56 | echo "Can't verify kube-proxy installation" 57 | exit 1 58 | fi 59 | sleep 5s 60 | done 61 | fi 62 | echo "no errors detected with kube-proxy installation for VM $worker_ip" 63 | -------------------------------------------------------------------------------- /scripts/worker/kube-proxy/kube-proxy-config.yaml: -------------------------------------------------------------------------------- 1 | kind: KubeProxyConfiguration 2 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 3 | clientConnection: 4 | kubeconfig: "/var/lib/kube-proxy/kubeconfig/kube-proxy.kubeconfig" 5 | mode: "iptables" 6 | metricsBindAddress: "0.0.0.0:10249" 7 | clusterCIDR: "CLUSTER_CIDR" 8 | #iptables: 9 | # masqueradeAll: true -------------------------------------------------------------------------------- /scripts/worker/kube-proxy/kube-proxy-default-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 2 | bindAddress: 0.0.0.0 3 | bindAddressHardFail: false 4 | clientConnection: 5 | acceptContentTypes: "" 6 | burst: 10 7 | contentType: application/vnd.kubernetes.protobuf 8 | kubeconfig: "" 9 | qps: 5 10 | clusterCIDR: "" 11 | configSyncPeriod: 15m0s 12 | conntrack: 13 | maxPerCore: 32768 14 | min: 131072 15 | tcpCloseWaitTimeout: 1h0m0s 16 | tcpEstablishedTimeout: 24h0m0s 17 | detectLocal: 18 | bridgeInterface: "" 19 | interfaceNamePrefix: "" 20 | detectLocalMode: ClusterCIDR 21 | enableProfiling: false 22 | healthzBindAddress: 0.0.0.0:10256 23 | hostnameOverride: "" 24 | iptables: 25 | localhostNodePorts: true 26 | masqueradeAll: false 27 | masqueradeBit: 14 28 | minSyncPeriod: 1s 29 | syncPeriod: 30s 30 | ipvs: 31 | excludeCIDRs: null 32 | minSyncPeriod: 0s 33 | scheduler: "" 34 | strictARP: false 35 | syncPeriod: 30s 36 | tcpFinTimeout: 0s 37 | tcpTimeout: 0s 38 | udpTimeout: 0s 39 | kind: KubeProxyConfiguration 40 | logging: 41 | flushFrequency: 5s 42 | format: text 43 | options: 44 | json: 45 | infoBufferSize: "0" 46 | verbosity: 0 47 | metricsBindAddress: 127.0.0.1:10249 48 | mode: iptables 49 | nodePortAddresses: null 50 | oomScoreAdj: -999 51 | portRange: "" 52 | showHiddenMetricsForVersion: "" 53 | winkernel: 54 | enableDSR: false 55 | forwardHealthCheckVip: false 56 | networkName: "" 57 | rootHnsEndpointName: "" 58 | sourceVip: "" 59 | -------------------------------------------------------------------------------- /scripts/worker/kube-proxy/kube-proxy-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | component: kube-proxy 6 | tier: control-plane 7 | name: kube-proxy 8 | namespace: kube-system 9 | spec: 10 | containers: 11 | - name: kube-proxy 12 | command: 13 | - /usr/local/bin/kube-proxy 14 | - --config=/var/lib/kube-proxy/kube-proxy-config.yaml 15 | image: registry.k8s.io/kube-proxy:K8S_VER 16 | imagePullPolicy: IfNotPresent 17 | resources: 18 | requests: 19 | cpu: 200m 20 | securityContext: 21 | privileged: true 22 | volumeMounts: 23 | - name: d1 24 | mountPath: /var/lib/kube-proxy/ 25 | readOnly: true 26 | hostNetwork: true 27 | priorityClassName: system-cluster-critical 28 | volumes: 29 | - name: d1 30 | hostPath: 31 | path: /var/lib/kube-proxy/ 32 | type: DirectoryOrCreate 33 | -------------------------------------------------------------------------------- /scripts/worker/kube-proxy/kube-proxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kube Proxy 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-proxy \ 7 | --config=/var/lib/kube-proxy/kube-proxy-config.yaml 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /scripts/worker/kubelet/install-kubelet: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=priv-key:,controller-ip:,worker-ip:,worker-hostname:,kubelet-binary:,pod-cidr:,ca-cert:,ca-key: 6 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | 8 | priv_key= 9 | controller_ip= 10 | worker_ip= 11 | worker_hostname= 12 | kubelet_binary= 13 | pod_cidr= 14 | ca_cert= 15 | ca_key= 16 | 17 | if ! parsed=$(xec parseargs $long_opts "$@"); then 18 | echo "$parsed" 19 | exit 1 20 | fi 21 | eval $(echo -e "$parsed") 22 | 23 | xec gen-certs-kubeconfig\ 24 | --host-name=$worker_hostname\ 25 | --host-ip=$worker_ip\ 26 | --subject-org=system:nodes\ 27 | --subject-cn=system:node:$worker_hostname\ 28 | --identity=kubelet-$worker_hostname\ 29 | --csr-type=altnames\ 30 | --controller-ip=$controller_ip\ 31 | --ca-cert=$ca_cert\ 32 | --ca-key=$ca_key 33 | 34 | echo "Copying kubelet files to worker VM" 35 | 36 | scp -i $priv_key $kubelet_binary root@$worker_ip:/usr/local/bin/kubelet 37 | ssh -i $priv_key root@$worker_ip "mkdir -p /var/lib/kubelet/kubeconfig /var/lib/kubernetes" 38 | scp -i $priv_key $DTKBASE/generated/cert/kubelet-$worker_hostname.pem root@$worker_ip:/var/lib/kubelet/kubelet.pem 39 | scp -i $priv_key $DTKBASE/generated/cert/kubelet-$worker_hostname-key.pem root@$worker_ip:/var/lib/kubelet/kubelet-key.pem 40 | scp -i $priv_key $DTKBASE/generated/kubeconfig/kubelet-$worker_hostname.kubeconfig root@$worker_ip:/var/lib/kubelet/kubeconfig/kubelet.kubeconfig 41 | scp -i $priv_key $ca_cert root@$worker_ip:/var/lib/kubernetes/ 42 | sed $script_dir/kubelet-config.yaml\ 43 | -e "s|PODCIDR|$pod_cidr|g" | ssh -i $priv_key root@$worker_ip "cat > /var/lib/kubelet/kubelet-config.yaml" 44 | scp -i $priv_key $script_dir/kubelet.service root@$worker_ip:/etc/systemd/system/ 45 | 46 | echo "Starting kubelet service" 47 | ssh -i "$priv_key" root@$worker_ip "systemctl daemon-reload && systemctl enable --now kubelet" 48 | 49 | echo "Verifying kubelet is running" 50 | set +e 51 | for i in {1..10}; do 52 | ssh -i "$priv_key" root@$worker_ip "systemctl is-active --quiet kubelet" && break 53 | sleep 1s 54 | done 55 | 56 | echo "No errors detected with kubelet installation" 57 | -------------------------------------------------------------------------------- /scripts/worker/kubelet/kubelet-config.yaml: -------------------------------------------------------------------------------- 1 | kind: KubeletConfiguration 2 | apiVersion: kubelet.config.k8s.io/v1beta1 3 | authentication: 4 | anonymous: 5 | enabled: false 6 | webhook: 7 | enabled: true 8 | x509: 9 | clientCAFile: /var/lib/kubernetes/ca.pem 10 | authorization: 11 | mode: Webhook 12 | cgroupDriver: systemd 13 | clusterDNS: 14 | - 10.32.0.10 15 | clusterDomain: cluster.local 16 | containerRuntimeEndpoint: unix:///var/run/containerd/containerd.sock 17 | #TODO REMOVE podCIDR: PODCIDR 18 | registerNode: true 19 | resolvConf: /etc/resolv.conf 20 | runtimeRequestTimeout: 15m 21 | staticPodPath: /etc/desktop-kubernetes/static-pods 22 | tlsCertFile: /var/lib/kubelet/kubelet.pem 23 | tlsPrivateKeyFile: /var/lib/kubelet/kubelet-key.pem 24 | -------------------------------------------------------------------------------- /scripts/worker/kubelet/kubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | After=containerd.service 5 | Requires=containerd.service 6 | 7 | [Service] 8 | ExecStart=/usr/local/bin/kubelet \ 9 | --config=/var/lib/kubelet/kubelet-config.yaml \ 10 | --kubeconfig=/var/lib/kubelet/kubeconfig/kubelet.kubeconfig \ 11 | --v=2 12 | Restart=on-failure 13 | RestartSec=5 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /scripts/worker/misc/crictl.yaml: -------------------------------------------------------------------------------- 1 | runtime-endpoint: unix:///var/run/containerd/containerd.sock 2 | image-endpoint: unix:///var/run/containerd/containerd.sock 3 | -------------------------------------------------------------------------------- /scripts/worker/misc/install-misc-bins: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | long_opts=priv-key:,worker-ip:,crictl-binary:,runc-binary:,cni-plugins-binary: 6 | script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | 8 | priv_key= 9 | worker_ip= 10 | crictl_binary= 11 | runc_binary= 12 | cni_plugins_binary= 13 | 14 | if ! parsed=$(xec parseargs $long_opts "$@"); then 15 | echo "$parsed" 16 | exit 1 17 | fi 18 | eval $(echo -e "$parsed") 19 | 20 | echo "Copying misc kubernetes binaries" 21 | 22 | echo "Copying crictl binary and config to worker" 23 | cat $crictl_binary | ssh -i $priv_key root@$worker_ip\ 24 | "tar zxvf - --no-same-owner -C /usr/local/bin/ crictl && chmod +x /usr/local/bin/crictl" 25 | scp -i $priv_key $script_dir/crictl.yaml root@$worker_ip:/etc/ 26 | 27 | echo "Copying runc binary to worker" 28 | scp -i $priv_key $runc_binary root@$worker_ip:/usr/local/bin/runc 29 | 30 | echo "Copying cni plugins binary to worker" 31 | ssh -i $priv_key root@$worker_ip "mkdir -p /opt/cni/bin" 32 | cat $cni_plugins_binary | ssh -i $priv_key root@$worker_ip "tar zxvf - --no-same-owner -C /opt/cni/bin/" 33 | 34 | echo "Done copying binaries" 35 | -------------------------------------------------------------------------------- /sshto: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Usage: ./sshto [vbox | kvm] 4 | # 5 | 6 | if [[ "$2" == vbox ]]; then 7 | ip=$(scripts/virtualbox/get-vm-ip $1) 8 | elif [[ "$2" == kvm ]]; then 9 | ip=$(scripts/kvm/get-vm-ip $1) 10 | else 11 | echo "need vbox or kvm in arg 2" 12 | exit 1 13 | fi 14 | ssh -i ./generated/kickstart/id_ed25519 root@$ip 15 | --------------------------------------------------------------------------------