├── assets ├── scale-to-zero-preview.gif └── scale-to-zero-preview.mp4 ├── 02-keda └── main.tf ├── 01-clusters └── main.tf ├── .gitignore ├── 03-demo ├── 03-scaled-object.yaml ├── 02-ingress.yaml ├── 01-podinfo.yaml └── 04-locust.yaml ├── modules ├── cluster │ └── main.tf └── keda │ └── main.tf ├── README.md └── dashboard ├── index.html └── app.js /assets/scale-to-zero-preview.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/learnk8s/scale-to-zero/HEAD/assets/scale-to-zero-preview.gif -------------------------------------------------------------------------------- /assets/scale-to-zero-preview.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/learnk8s/scale-to-zero/HEAD/assets/scale-to-zero-preview.mp4 -------------------------------------------------------------------------------- /02-keda/main.tf: -------------------------------------------------------------------------------- 1 | module "keda" { 2 | source = "../modules/keda" 3 | 4 | kubeconfig_path = abspath("../kubeconfig") 5 | } 6 | 7 | output "lb_ip" { 8 | value = module.keda.lb_ip 9 | } 10 | -------------------------------------------------------------------------------- /01-clusters/main.tf: -------------------------------------------------------------------------------- 1 | module "eu" { 2 | source = "../modules/cluster" 3 | 4 | name = "eu" 5 | region = "eu-west" 6 | } 7 | 8 | resource "local_file" "kubeconfig_eu" { 9 | filename = "../kubeconfig" 10 | content = module.eu.kubeconfig 11 | } 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | istio 2 | karmada 3 | terraform.tfstate 4 | terraform.tfstate.backup 5 | karmada-config 6 | kube-config-* 7 | kubeconfig-* 8 | node_ip.txt 9 | .terraform.lock.hcl 10 | terraform.tfstate.* 11 | .terraform 12 | .terraform.tfstate.lock.info 13 | kubeconfig -------------------------------------------------------------------------------- /03-demo/03-scaled-object.yaml: -------------------------------------------------------------------------------- 1 | kind: HTTPScaledObject 2 | apiVersion: http.keda.sh/v1alpha1 3 | metadata: 4 | name: podinfo 5 | spec: 6 | host: example.com 7 | targetPendingRequests: 100 8 | scaleTargetRef: 9 | deployment: podinfo 10 | service: podinfo 11 | port: 80 12 | replicas: 13 | min: 0 14 | max: 10 15 | -------------------------------------------------------------------------------- /03-demo/02-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: podinfo 5 | annotations: 6 | nginx.ingress.kubernetes.io/upstream-vhost: example.com 7 | spec: 8 | ingressClassName: nginx 9 | rules: 10 | - http: 11 | paths: 12 | - path: / 13 | pathType: Prefix 14 | backend: 15 | service: 16 | name: keda-add-ons-http-interceptor-proxy 17 | port: 18 | number: 8080 19 | -------------------------------------------------------------------------------- /03-demo/01-podinfo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: podinfo 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: podinfo 9 | template: 10 | metadata: 11 | labels: 12 | app: podinfo 13 | spec: 14 | initContainers: 15 | - name: init-myservice 16 | image: busybox:1.28 17 | command: ["sh", "-c", "sleep 10"] 18 | containers: 19 | - name: podinfo 20 | image: stefanprodan/podinfo 21 | ports: 22 | - containerPort: 9898 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: podinfo 28 | spec: 29 | ports: 30 | - port: 80 31 | targetPort: 9898 32 | selector: 33 | app: podinfo 34 | -------------------------------------------------------------------------------- /modules/cluster/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | linode = { 4 | source = "linode/linode" 5 | version = "1.28.0" 6 | } 7 | } 8 | } 9 | 10 | variable "name" { 11 | type = string 12 | } 13 | 14 | variable "region" { 15 | type = string 16 | } 17 | 18 | resource "linode_lke_cluster" "this" { 19 | label = var.name 20 | k8s_version = "1.23" 21 | region = var.region 22 | 23 | pool { 24 | type = "g6-standard-2" 25 | count = 1 26 | 27 | autoscaler { 28 | min = 1 29 | max = 10 30 | } 31 | } 32 | 33 | # Prevent the count field from overriding autoscaler-created nodes 34 | lifecycle { 35 | ignore_changes = [ 36 | pool.0.count 37 | ] 38 | } 39 | } 40 | 41 | output "kubeconfig" { 42 | value = base64decode(linode_lke_cluster.this.kubeconfig) 43 | sensitive = true 44 | } 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Scaling apps to zero with Kubernetes and KEDA 2 | 3 | This project helps you create a cluster that scales apps to zero with KEDA and the HTTP scaler. 4 | 5 | ![Scaling Kubernetes deployment to zero](assets/scale-to-zero-preview.gif) 6 | 7 | ## Getting started 8 | 9 | You need to create a Linode token to access the API: 10 | 11 | ```bash 12 | linode-cli profile token-create 13 | export LINODE_TOKEN= 14 | ``` 15 | 16 | ```bash 17 | # Create the cluster 18 | terraform -chdir=01-clusters init 19 | terraform -chdir=01-clusters apply -auto-approve 20 | 21 | # Install KEDA & Nginx 22 | terraform -chdir=02-keda init 23 | terraform -chdir=02-keda apply -auto-approve 24 | 25 | # Tidy up: remove all Kubernetes resources first 26 | kubectl delete -f 03-demo 27 | 28 | # Tidy up 29 | terraform -chdir=02-keda destroy -auto-approve 30 | terraform -chdir=01-clusters destroy -auto-approve 31 | ``` 32 | 33 | ## Demo 34 | 35 | Make sure that your kubectl is configured with the current kubeconfig file: 36 | 37 | ```bash 38 | export KUBECONFIG="${PWD}/kubeconfig" 39 | ``` 40 | 41 | The execute: 42 | 43 | ```bash 44 | kubectl apply -f 03-demo/01-podinfo.yaml 45 | kubectl apply -f 03-demo/02-ingress.yaml 46 | kubectl apply -f 03-demo/03-scaled-object.yaml 47 | kubectl apply -f 03-demo/04-locust.yaml 48 | ``` 49 | 50 | ## Dashboard 51 | 52 | Open the dashboard and enter the IP address of the Ingress load balancer. 53 | 54 | ```bash 55 | kubectl proxy --www=./dashboard 56 | ``` 57 | -------------------------------------------------------------------------------- /modules/keda/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | kubernetes = { 4 | source = "hashicorp/kubernetes" 5 | version = "2.12.1" 6 | } 7 | helm = { 8 | source = "hashicorp/helm" 9 | version = "2.6.0" 10 | } 11 | } 12 | } 13 | 14 | provider "kubernetes" { 15 | config_path = var.kubeconfig_path 16 | } 17 | 18 | provider "helm" { 19 | kubernetes { 20 | config_path = var.kubeconfig_path 21 | } 22 | } 23 | 24 | variable "kubeconfig_path" { 25 | type = string 26 | } 27 | 28 | locals { 29 | keda_namespace = "default" 30 | nginx_namespace = "ingress-nginx" 31 | } 32 | 33 | resource "helm_release" "keda" { 34 | name = "keda" 35 | chart = "https://kedacore.github.io/charts/keda-2.7.2.tgz" 36 | namespace = local.keda_namespace 37 | } 38 | 39 | resource "helm_release" "http_addon" { 40 | name = "http-addon" 41 | chart = "https://kedacore.github.io/charts/keda-add-ons-http-0.3.0.tgz" 42 | namespace = local.keda_namespace 43 | depends_on = [ 44 | helm_release.keda 45 | ] 46 | } 47 | 48 | resource "helm_release" "nginx" { 49 | name = "nginx" 50 | chart = "https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.2.1/ingress-nginx-4.2.1.tgz" 51 | namespace = local.nginx_namespace 52 | create_namespace = true 53 | } 54 | 55 | data "kubernetes_service" "nginx_lb" { 56 | metadata { 57 | name = "nginx-ingress-nginx-controller" 58 | namespace = local.nginx_namespace 59 | } 60 | depends_on = [ 61 | helm_release.nginx 62 | ] 63 | } 64 | 65 | output "lb_ip" { 66 | value = data.kubernetes_service.nginx_lb.status.0.load_balancer.0.ingress.0.ip 67 | } 68 | -------------------------------------------------------------------------------- /03-demo/04-locust.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: locust-script 5 | data: 6 | locustfile.py: |- 7 | from locust import HttpUser, task, between 8 | class QuickstartUser(HttpUser): 9 | wait_time = between(0.7, 1.3) 10 | @task 11 | def hello_world(self): 12 | self.client.get("/", headers={"Host": "example.com"}) 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: locust 18 | spec: 19 | selector: 20 | matchLabels: 21 | app: locust-primary 22 | template: 23 | metadata: 24 | labels: 25 | app: locust-primary 26 | spec: 27 | containers: 28 | - name: locust 29 | image: locustio/locust 30 | args: ["--master"] 31 | ports: 32 | - containerPort: 5557 33 | name: comm 34 | - containerPort: 5558 35 | name: comm-plus-1 36 | - containerPort: 8089 37 | name: web-ui 38 | volumeMounts: 39 | - mountPath: /home/locust 40 | name: locust-script 41 | volumes: 42 | - name: locust-script 43 | configMap: 44 | name: locust-script 45 | --- 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: locust 50 | spec: 51 | ports: 52 | - port: 5557 53 | name: communication 54 | - port: 5558 55 | name: communication-plus-1 56 | - port: 80 57 | targetPort: 8089 58 | name: web-ui 59 | selector: 60 | app: locust-primary 61 | type: LoadBalancer 62 | --- 63 | apiVersion: apps/v1 64 | kind: DaemonSet 65 | metadata: 66 | name: locust 67 | spec: 68 | selector: 69 | matchLabels: 70 | app: locust-worker 71 | template: 72 | metadata: 73 | labels: 74 | app: locust-worker 75 | spec: 76 | containers: 77 | - name: locust 78 | image: locustio/locust 79 | args: ["--worker", "--master-host=locust"] 80 | volumeMounts: 81 | - mountPath: /home/locust 82 | name: locust-script 83 | volumes: 84 | - name: locust-script 85 | configMap: 86 | name: locust-script 87 | -------------------------------------------------------------------------------- /dashboard/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | K8bit 8 | 9 | 38 | 39 | 40 | 41 |
42 |
43 | 44 |
45 | 46 |
47 |
48 |
49 |
50 | 60 |
61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /dashboard/app.js: -------------------------------------------------------------------------------- 1 | const app = App(); 2 | const jsConfetti = new JSConfetti(); 3 | let lastResourceVersion; 4 | 5 | fetch("/api/v1/pods") 6 | .then((response) => response.json()) 7 | .then((response) => { 8 | const pods = response.items; 9 | lastResourceVersion = response.metadata.resourceVersion; 10 | pods.forEach((pod) => { 11 | const podId = `${pod.metadata.namespace}-${pod.metadata.name}`; 12 | app.upsert(podId, pod); 13 | }); 14 | }) 15 | .then(() => streamUpdates()); 16 | 17 | function streamUpdates() { 18 | fetch(`/api/v1/pods?watch=1&resourceVersion=${lastResourceVersion}`) 19 | .then((response) => { 20 | const stream = response.body.getReader(); 21 | const utf8Decoder = new TextDecoder("utf-8"); 22 | let buffer = ""; 23 | 24 | return stream.read().then(function processText({ done, value }) { 25 | if (done) { 26 | console.log("Request terminated"); 27 | return; 28 | } 29 | buffer += utf8Decoder.decode(value); 30 | buffer = onNewLine(buffer, (chunk) => { 31 | if (chunk.trim().length === 0) { 32 | return; 33 | } 34 | try { 35 | const event = JSON.parse(chunk); 36 | const pod = event.object; 37 | console.log("PROCESSING EVENT: ", event, pod); 38 | const podId = `${pod.metadata.namespace}-${pod.metadata.name}`; 39 | switch (event.type) { 40 | case "ADDED": { 41 | if (pod.status.phase === "Running") { 42 | app.upsert(podId, pod); 43 | } 44 | break; 45 | } 46 | case "DELETED": { 47 | app.remove(podId); 48 | break; 49 | } 50 | case "MODIFIED": { 51 | if (pod.status.phase === "Running") { 52 | app.upsert(podId, pod); 53 | } 54 | break; 55 | } 56 | default: 57 | break; 58 | } 59 | lastResourceVersion = event.object.metadata.resourceVersion; 60 | } catch (error) { 61 | console.log("Error while parsing", chunk, "\n", error); 62 | } 63 | }); 64 | return stream.read().then(processText); 65 | }); 66 | }) 67 | .catch(() => { 68 | console.log("Error! Retrying in 5 seconds..."); 69 | setTimeout(() => streamUpdates(), 5000); 70 | }); 71 | 72 | function onNewLine(buffer, fn) { 73 | const newLineIndex = buffer.indexOf("\n"); 74 | if (newLineIndex === -1) { 75 | return buffer; 76 | } 77 | const chunk = buffer.slice(0, buffer.indexOf("\n")); 78 | const newBuffer = buffer.slice(buffer.indexOf("\n") + 1); 79 | fn(chunk); 80 | return onNewLine(newBuffer, fn); 81 | } 82 | } 83 | 84 | function App() { 85 | let queueSize = 0; 86 | const allPods = new Map(); 87 | const content = document.querySelector("#content"); 88 | const queue = document.querySelector("#size"); 89 | 90 | function render() { 91 | const pods = Array.from(allPods.values()).filter((it) => 92 | /podinfo|keda|nginx/.test(it.name) 93 | ); 94 | if (pods.length === 0) { 95 | return; 96 | } 97 | const podsByNode = groupBy(pods, (it) => it.nodeName); 98 | const nodeTemplates = Object.keys(podsByNode).map((nodeName) => { 99 | const pods = podsByNode[nodeName].sort((a, b) => 100 | a.name.localeCompare(b.name) 101 | ); 102 | return [ 103 | '
  • ', 104 | "
    ", 105 | `
    ${renderNode( 106 | pods 107 | )}
    `, 108 | "
    ", 109 | "
  • ", 110 | ].join(""); 111 | }); 112 | 113 | content.innerHTML = ``; 116 | queue.innerHTML = `${queueSize}`; 117 | 118 | function renderNode(pods) { 119 | const n = ``; 120 | const octopus = ``; 121 | return [ 122 | '", 147 | ].join(""); 148 | } 149 | } 150 | 151 | return { 152 | upsert(podId, pod) { 153 | if (!pod.spec.nodeName) { 154 | return; 155 | } 156 | allPods.set(podId, { 157 | name: pod.metadata.name, 158 | namespace: pod.metadata.namespace, 159 | nodeName: pod.spec.nodeName, 160 | }); 161 | render(); 162 | }, 163 | updateQueueSize(size) { 164 | queueSize = size; 165 | render(); 166 | }, 167 | remove(podId) { 168 | allPods.delete(podId); 169 | render(); 170 | }, 171 | }; 172 | } 173 | 174 | function groupBy(arr, groupByKeyFn) { 175 | return arr.reduce((acc, c) => { 176 | const key = groupByKeyFn(c); 177 | if (!(key in acc)) { 178 | acc[key] = []; 179 | } 180 | acc[key].push(c); 181 | return acc; 182 | }, {}); 183 | } 184 | 185 | setInterval(() => { 186 | fetch( 187 | "/api/v1/namespaces/default/services/keda-add-ons-http-interceptor-admin:9090/proxy/queue" 188 | ) 189 | .then((response) => response.json()) 190 | .then((response) => { 191 | app.updateQueueSize(response["example.com"]); 192 | }); 193 | }, 300); 194 | 195 | document.querySelector("#go")?.addEventListener("click", (e) => { 196 | jsConfetti.addConfetti({ 197 | emojis: ["🌈", "⚡️", "💥", "✨", "💫", "🌸"], 198 | emojiSize: 200, 199 | confettiNumber: 20, 200 | }); 201 | const url = start.querySelector("input").value; 202 | fetch( 203 | url.startsWith("http://") 204 | ? `${url}?${Date.now()}` 205 | : `http://${url}?${Date.now()}`, 206 | { 207 | mode: "no-cors", 208 | cache: "no-cache", 209 | } 210 | ); 211 | }); 212 | 213 | document.querySelector("#reset")?.addEventListener("click", (e) => { 214 | fetch("/apis/apps/v1/namespaces/default/deployments/podinfo", { 215 | method: "PATCH", 216 | body: JSON.stringify({ spec: { replicas: 0 } }), 217 | headers: { 218 | "Content-Type": "application/strategic-merge-patch+json", 219 | }, 220 | }).then((response) => response.json()); 221 | }); 222 | 223 | start.querySelector("#start button")?.addEventListener( 224 | "click", 225 | () => { 226 | start.classList.add("dn"); 227 | root.classList.remove("dn"); 228 | }, 229 | { once: true } 230 | ); 231 | --------------------------------------------------------------------------------