├── README.md └── kindscaler.sh /README.md: -------------------------------------------------------------------------------- 1 | # KindScaler: Node Management for KinD Clusters 2 | 3 | KinD is a solution that allows to quickly create local Kubernetes clusters, ideal for development or testing tasks. However, once these clusters are created, KinD does not have built-in features to modify the cluster configuration by adding or removing nodes, whether they are control-planes or workers, and the entire cluster would need to be regenerated from scratch. 4 | 5 | KindScaler comes to facilitate this task. After dissecting how KinD creates and adds different nodes and roles to the cluster, this bash script has been created and allows to add both workers and control planes. 6 | 7 | # How-to 8 | 9 | ``` 10 | ./kindscaler.sh -r -c 11 | ``` 12 | 13 | For example, adding 3 workers to cluster `kind`: 14 | 15 | ``` 16 | $kind create cluster --config cluster.yaml 17 | Creating cluster "kind" ... 18 | ✓ Ensuring node image (kindest/node:v1.28.0) 🖼 19 | ✓ Preparing nodes 📦 📦 📦 20 | ✓ Writing configuration 📜 21 | ✓ Starting control-plane 🕹 22 | ✓ Installing CNI 🔌 23 | ✓ Installing StorageClass 💾 24 | ✓ Joining worker nodes 🚜 25 | Set kubectl context to "kind-kind" 26 | You can now use your cluster with: 27 | 28 | kubectl cluster-info --context kind-kind 29 | 30 | Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/ 31 | 32 | $kubectl get nodes 33 | NAME STATUS ROLES AGE VERSION 34 | kind-control-plane Ready control-plane 34s v1.28.0 35 | kind-worker Ready 13s v1.28.0 36 | kind-worker2 Ready 8s v1.28.0 37 | 38 | $./kindscaler.sh kind -r worker -c 3 39 | Adding kind-worker3 node to kind cluster... Done! 40 | Adding kind-worker4 node to kind cluster... Done! 41 | Adding kind-worker5 node to kind cluster... Done! 42 | 43 | $kubectl get nodes 44 | NAME STATUS ROLES AGE VERSION 45 | kind-control-plane Ready control-plane 109s v1.28.0 46 | kind-worker Ready 88s v1.28.0 47 | kind-worker2 Ready 83s v1.28.0 48 | kind-worker3 Ready 49s v1.28.0 49 | kind-worker4 Ready 36s v1.28.0 50 | kind-worker5 Ready 7s v1.28.0 51 | 52 | ``` 53 | 54 | ## Deleting nodes 55 | 56 | ``` 57 | kubectl delete node 58 | docker stop 59 | docker container rm 60 | ``` 61 | -------------------------------------------------------------------------------- /kindscaler.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | # Check for required commands 5 | if ! command -v kind &> /dev/null; then 6 | echo "kind command not found, please install kind to use this script." 7 | exit 1 8 | fi 9 | 10 | # Check input parameters 11 | if [ $# -lt 4 ]; then 12 | echo "Usage: $0 --role --count " 13 | echo "--role must be either 'control-plane' or 'worker'" 14 | echo "--count must be a positive integer" 15 | exit 1 16 | fi 17 | 18 | # Parse command line arguments 19 | CLUSTER_NAME=$1 20 | shift # shift the first parameter off the parameters list 21 | while [[ "$#" -gt 0 ]]; do 22 | case $1 in 23 | -r|--role) ROLE="$2"; shift ;; 24 | -c|--count) COUNT="$2"; shift ;; 25 | *) echo "Unknown parameter passed: $1"; exit 1 ;; 26 | esac 27 | shift 28 | done 29 | 30 | # Validate role 31 | if [ "$ROLE" != "control-plane" ] && [ "$ROLE" != "worker" ]; then 32 | echo "Role must be 'control-plane' or 'worker'" 33 | exit 1 34 | fi 35 | 36 | # Validate count 37 | if ! [[ "$COUNT" =~ ^[0-9]+$ ]] || [ "$COUNT" -le 0 ]; then 38 | echo "Count must be a positive integer" 39 | exit 1 40 | fi 41 | 42 | # Get existing nodes and determine the highest node index for the given role 43 | highest_index=0 44 | existing_nodes=$(kind get nodes --name "$CLUSTER_NAME") 45 | for node in $existing_nodes; do 46 | if [[ $node == "$CLUSTER_NAME-$ROLE"* ]]; then 47 | suffix=$(echo $node | sed -e "s/^$CLUSTER_NAME-$ROLE//") 48 | if [[ "$suffix" =~ ^[0-9]+$ ]] && [ "$suffix" -gt "$highest_index" ]; then 49 | highest_index=$suffix 50 | fi 51 | fi 52 | done 53 | 54 | # Add nodes based on the highest found index and the count specified 55 | start_index=$(($highest_index + 1)) 56 | end_index=$(($highest_index + $COUNT)) 57 | for i in $(seq $start_index $end_index); do 58 | # Determine the name of the container for the specified role 59 | CONTAINER_NAME=$CLUSTER_NAME-$ROLE 60 | 61 | # Copy the kubeadm file from the container 62 | docker cp $CONTAINER_NAME:/kind/kubeadm.conf kubeadm-$i.conf > /dev/null 2>&1 63 | 64 | # Replace the container role name with specific node name in the kubeadm file 65 | sed -i "s/$CONTAINER_NAME$/$CONTAINER_NAME$i/g" "./kubeadm-$i.conf" 66 | 67 | # Update IP addresses 68 | # Assume the file contains parameters 'advertiseAddress' and 'node-ip' with typical IP values 69 | # Extract the IP address used, increment it, and replace it in the file 70 | ORIGINAL_IP=$(grep -oP '(advertiseAddress|node-ip):\s*\K([0-9]{1,3}(\.[0-9]{1,3}){3})' "./kubeadm-$i.conf" | head -1) 71 | IMAGE=$(docker ps | grep $CLUSTER_NAME | awk '{print $2}' | head -1) 72 | if [ "$ROLE" == "worker" ]; then 73 | # Command for worker nodes 74 | echo -n "Adding $CLUSTER_NAME-$ROLE$i node to $CLUSTER_NAME cluster... " 75 | docker run --name $CLUSTER_NAME-$ROLE$i --hostname $CLUSTER_NAME-$ROLE$i \ 76 | --label io.x-k8s.kind.role=$ROLE --privileged \ 77 | --security-opt seccomp=unconfined --security-opt apparmor=unconfined \ 78 | --tmpfs /tmp --tmpfs /run --volume /var \ 79 | --volume /lib/modules:/lib/modules:ro -e KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER \ 80 | --detach --tty --label io.x-k8s.kind.cluster=$CLUSTER_NAME --net kind \ 81 | --restart=on-failure:1 --init=false $IMAGE > /dev/null 2>&1 82 | NEW_IP=$(docker inspect $CLUSTER_NAME-$ROLE$i | grep IPAddress | tail -1 | cut -d "\"" -f 4) 83 | sed -i -r "s/$ORIGINAL_IP/$NEW_IP/g" "./kubeadm-$i.conf" 84 | sleep 5 85 | docker cp kubeadm-$i.conf $CLUSTER_NAME-$ROLE$i:/kind/kubeadm.conf > /dev/null 2>&1 86 | docker exec --privileged $CLUSTER_NAME-$ROLE$i kubeadm join --config /kind/kubeadm.conf --skip-phases=preflight --v=6 > /dev/null 2>&1 87 | rm -f kubeadm-*.conf 88 | echo "Done!" 89 | elif [ "$ROLE" == "control-plane" ]; then 90 | # Generate a random port number between 36000 and 36999 for control-plane nodes 91 | PORT=$(shuf -i 39000-39999 -n 1) 92 | # Command for control-plane nodes 93 | echo -n "Adding $CLUSTER_NAME-$ROLE$i node to $CLUSTER_NAME cluster... " 94 | docker run --name $CLUSTER_NAME-$ROLE$i --hostname $CLUSTER_NAME-$ROLE$i \ 95 | --label io.x-k8s.kind.role=$ROLE --privileged \ 96 | --security-opt seccomp=unconfined --publish=127.0.0.1:$PORT:6443/TCP \ 97 | --security-opt apparmor=unconfined --tmpfs /tmp --tmpfs /run --volume /var \ 98 | --volume /lib/modules:/lib/modules:ro -e KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER \ 99 | --detach --tty --label io.x-k8s.kind.cluster=$CLUSTER_NAME --net kind \ 100 | --restart=on-failure:1 --init=false $IMAGE > /dev/null 2>&1 101 | NEW_IP=$(docker inspect $CLUSTER_NAME-$ROLE$i | grep IPAddress | tail -1 | cut -d "\"" -f 4) 102 | sed -i -r "s/$ORIGINAL_IP/$NEW_IP/g" "./kubeadm-$i.conf" 103 | sleep 10 104 | docker exec --privileged $CLUSTER_NAME-$ROLE$i mkdir /etc/kubernetes/pki/ 105 | docker exec --privileged $CLUSTER_NAME-$ROLE$i mkdir /etc/kubernetes/pki/etcd 106 | docker cp kubeadm-$i.conf $CLUSTER_NAME-$ROLE$i:/kind/kubeadm.conf > /dev/null 2>&1 107 | mkdir .kindadd 108 | docker cp $CLUSTER_NAME-$ROLE:/etc/kubernetes/pki/ca.crt .kindadd/ca.crt 109 | docker cp .kindadd/ca.crt $CLUSTER_NAME-$ROLE$i:/etc/kubernetes/pki/ca.crt 110 | 111 | docker cp $CLUSTER_NAME-$ROLE:/etc/kubernetes/pki/ca.key .kindadd/ca.key 112 | docker cp .kindadd/ca.key $CLUSTER_NAME-$ROLE$i:/etc/kubernetes/pki/ca.key 113 | 114 | docker cp $CLUSTER_NAME-$ROLE:/etc/kubernetes/pki/front-proxy-ca.crt .kindadd/front-proxy-ca.crt 115 | docker cp .kindadd/front-proxy-ca.crt $CLUSTER_NAME-$ROLE$i:/etc/kubernetes/pki/front-proxy-ca.crt 116 | 117 | docker cp $CLUSTER_NAME-$ROLE:/etc/kubernetes/pki/front-proxy-ca.key .kindadd/front-proxy-ca.key 118 | docker cp .kindadd/front-proxy-ca.key $CLUSTER_NAME-$ROLE$i:/etc/kubernetes/pki/front-proxy-ca.key 119 | 120 | docker cp $CLUSTER_NAME-$ROLE:/etc/kubernetes/pki/sa.pub .kindadd/sa.pub 121 | docker cp .kindadd/sa.pub $CLUSTER_NAME-$ROLE$i:/etc/kubernetes/pki/sa.pub 122 | 123 | docker cp $CLUSTER_NAME-$ROLE:/etc/kubernetes/pki/sa.key .kindadd/sa.key 124 | docker cp .kindadd/sa.key $CLUSTER_NAME-$ROLE$i:/etc/kubernetes/pki/sa.key 125 | mkdir .kindadd/etcd 126 | docker cp $CLUSTER_NAME-$ROLE:/etc/kubernetes/pki/etcd/ca.crt .kindadd/etcd/ca.crt 127 | docker cp .kindadd/etcd/ca.crt $CLUSTER_NAME-$ROLE$i:/etc/kubernetes/pki/etcd/ca.crt 128 | 129 | docker cp $CLUSTER_NAME-$ROLE:/etc/kubernetes/pki/etcd/ca.key .kindadd/etcd/ca.key 130 | docker cp .kindadd/etcd/ca.key $CLUSTER_NAME-$ROLE$i:/etc/kubernetes/pki/etcd/ca.key 131 | docker exec --privileged $CLUSTER_NAME-$ROLE$i kubeadm join --config /kind/kubeadm.conf --skip-phases=preflight --v=6 > /dev/null 2>&1 132 | rm -Rf .kindadd kubeadm-*.conf 133 | echo "Done!" 134 | else 135 | echo "Invalid role specified: $ROLE" 136 | exit 1 137 | fi 138 | 139 | done 140 | 141 | 142 | 143 | --------------------------------------------------------------------------------