├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml └── workflows │ ├── release_tags.yml │ └── test_build.yml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── assets └── varnish.repo ├── build ├── ci │ └── .goreleaser.yml └── package │ └── docker │ ├── Dockerfile │ └── GoReleaser.Dockerfile ├── chart ├── .helmignore ├── Chart.yaml ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── configmap.yaml │ ├── deployment.yaml │ ├── hpa.yaml │ ├── ingress.yaml │ ├── pdb.yaml │ ├── rbac.yaml │ ├── secret.yaml │ ├── service.yaml │ ├── serviceaccount.yaml │ ├── servicemonitor.yaml │ └── statefulset.yaml └── values.yaml ├── cmd └── kube-httpcache │ ├── internal │ └── flags.go │ └── main.go ├── deploy └── kubernetes │ └── rbac.yaml ├── go.mod ├── go.sum ├── pkg ├── controller │ ├── run.go │ ├── types.go │ ├── wait.go │ └── watch.go ├── signaller │ ├── run.go │ └── types.go └── watcher │ ├── endpoints.go │ ├── endpoints_watch.go │ ├── template_watch.go │ ├── template_watch_poll.go │ └── types.go └── test ├── test-values.yaml └── test.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | test/ 2 | build/ 3 | deploy/ 4 | LICENSE 5 | .travis.yml 6 | .gitignore 7 | .goreleaser.yml 8 | README.md 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Environment:** 24 | - Kubernetes version: [e.g. 1.18] 25 | - kube-httpcache version: [e.g. v0.2.2] 26 | 27 | **Configuration** 28 | If applicable, add your VCL configuration file and other relevant configuration settings 29 | 30 | **Additional context** 31 | Add any other context about the problem here. 32 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /.github/workflows/release_tags.yml: -------------------------------------------------------------------------------- 1 | name: Release Tags 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | build: 10 | name: Build and release stable image 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v2 15 | with: 16 | fetch-depth: 0 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@master 19 | with: 20 | platforms: all 21 | - name: Set up Go 22 | uses: actions/setup-go@v2 23 | with: 24 | go-version: "1.20" 25 | - name: Docker login 26 | run: docker login -u "${{ secrets.QUAY_IO_USER }}" -p "${{ secrets.QUAY_IO_TOKEN }}" quay.io 27 | - name: Run GoReleaser 28 | uses: goreleaser/goreleaser-action@v2 29 | with: 30 | version: "0.181.1" 31 | args: release --rm-dist -f build/ci/.goreleaser.yml 32 | env: 33 | GITHUB_TOKEN: ${{ secrets.RELEASE_USER_TOKEN }} 34 | - name: Run chart version bump 35 | uses: mittwald/bump-app-version-action@v1 36 | with: 37 | mode: 'publish' 38 | chartYaml: './chart/Chart.yaml' 39 | env: 40 | GITHUB_TOKEN: "${{ secrets.RELEASE_USER_TOKEN }}" 41 | HELM_REPO_PASSWORD: "${{ secrets.HELM_REPO_PASSWORD }}" 42 | -------------------------------------------------------------------------------- /.github/workflows/test_build.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'master' 7 | pull_request: 8 | 9 | jobs: 10 | verify_helm: 11 | name: Verify Helm chart 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | helm: [ '3.1.2' ] 16 | steps: 17 | - uses: actions/checkout@v2 18 | 19 | - name: Set up Helm 20 | run: | 21 | wget https://get.helm.sh/helm-v${{ matrix.helm }}-linux-amd64.tar.gz -O /tmp/helm.tar.gz 22 | tar xzf /tmp/helm.tar.gz -C /tmp --strip-components=1 23 | chmod +x /tmp/helm 24 | - name: Test template rendering 25 | run: /tmp/helm template ./chart/. 26 | 27 | - name: Lint chart 28 | run: /tmp/helm lint ./chart/ 29 | 30 | build: 31 | name: Build 32 | runs-on: ubuntu-latest 33 | steps: 34 | - name: Checkout 35 | uses: actions/checkout@v2 36 | with: 37 | fetch-depth: 0 38 | - name: Set up QEMU 39 | uses: docker/setup-qemu-action@master 40 | with: 41 | platforms: all 42 | - name: Set up Go 43 | uses: actions/setup-go@v2 44 | with: 45 | go-version: "1.20" 46 | - name: Run GoReleaser 47 | uses: goreleaser/goreleaser-action@v2 48 | with: 49 | version: "0.181.1" 50 | args: release -f build/ci/.goreleaser.yml --snapshot --skip-publish --rm-dist 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /vendor 3 | /kube-httpcache 4 | dist 5 | *.vcl 6 | *.vcl.tmpl 7 | *.env 8 | *.docker-compose.override.yml -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution guide 2 | 3 | ## Deployment on a local cluster 4 | 5 | This guide explains how to build the kube-httpcache Docker image locally and test it in a local KinD[^1] cluster. 6 | 7 | 1. Build image and load into kind: 8 | 9 | ``` 10 | $ docker build -t quay.io/mittwald/kube-httpcache:dev -f build/packages/docker/Dockerfile . 11 | $ kind load docker-image quay.io/mittwald/kube-httpcache:dev 12 | ``` 13 | 14 | 2. Deploy an example backend workload: 15 | 16 | ``` 17 | $ kubectl apply -f examples/test.yaml 18 | ``` 19 | 20 | 3. Deploy Helm chart with example configuration: 21 | 22 | ``` 23 | $ helm upgrade --install -f ./test/test-values.yaml kube-httpcache ./chart 24 | ``` 25 | 26 | 4. Port-forward to the cache: 27 | 28 | ``` 29 | $ kubectl port-forward svc/kube-httpcache 8080:80 30 | ``` 31 | 32 | [^1]: https://kind.sigs.k8s.io 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Mittwald CM Service GmbH & Co. KG 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Varnish on Kubernetes 2 | 3 | ![GitHub Workflow Status](https://img.shields.io/github/workflow/status/mittwald/kube-httpcache/Test) 4 | 5 | This repository contains a controller that allows you to operate a [Varnish cache](https://varnish-cache.org/) on Kubernetes. 6 | 7 | --- 8 | :warning: **COMPATIBILITY NOTICE**: As of version v0.3, the image tag name of this project was renamed from `quay.io/spaces/kube-httpcache` to `quay.io/mittwald/kube-httpcache`. The old image will remain available (for the time being), but only the new image name will receive any updates. **Please remember to adjust the image name when upgrading**. 9 | 10 | --- 11 | 12 | ## Table of Contents 13 | 14 | 15 | 16 | 17 | 18 | - [How it works](#how-it-works) 19 | - [High-Availability mode](#high-availability-mode) 20 | - [Getting started](#getting-started) 21 | - [Create a VCL template](#create-a-vcl-template) 22 | - [Create a Secret](#create-a-secret) 23 | - [[Optional] Configure RBAC roles](#optional-configure-rbac-roles) 24 | - [Deploy Varnish](#deploy-varnish) 25 | - [Logging](#logging) 26 | - [Detailed how-tos](#detailed-how-tos) 27 | - [Using built in signaller component](#using-built-in-signaller-component) 28 | - [Proxying to external services](#proxying-to-external-services) 29 | - [Helm Chart installation](#helm-chart-installation) 30 | - [Developer notes](#developer-notes) 31 | - [Build the Docker image locally](#build-the-docker-image-locally) 32 | 33 | 34 | 35 | ## How it works 36 | 37 | This controller is not intended to be a replacement of a regular [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress/). Instead, it is intended to be used between your regular Ingress controller and your application's service. 38 | 39 | ``` 40 | ┌─────────┐ ┌─────────┐ ┌─────────────┐ 41 | | Ingress | ----> | Varnish | ----> | Application | 42 | └─────────┘ └─────────┘ └─────────────┘ 43 | ``` 44 | 45 | The Varnish controller needs the following prerequisites to run: 46 | 47 | - A [Go-template](https://golang.org/pkg/text/template/) that will be used to generate a [VCL](https://varnish-cache.org/docs/trunk/users-guide/vcl.html) configuration file 48 | - An application [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/) that will be used as backend for the Varnish controller 49 | - A Varnish [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/) that will be used as frontend for the Varnish controller 50 | - If RBAC is enabled in your cluster, you'll need a ServiceAccount with a role that grants `WATCH` access to the `endpoints` resource in the respective namespace 51 | 52 | After starting, the Varnish controller will watch the configured Varnish service's endpoints and application service's endpoints; on startup and whenever these change, it will use the supplied VCL template to generate a new Varnish configuration and load this configuration at runtime. 53 | 54 | The controller does not ship with any preconfigured configuration; the upstream connection and advanced features like load balancing are possible, but need to be configured in the VCL template supplied by you. 55 | 56 | ## High-Availability mode 57 | 58 | It can run in high avalability mode using multiple Varnish and application pods. 59 | 60 | ``` 61 | ┌─────────┐ 62 | │ Ingress │ 63 | └────┬────┘ 64 | | 65 | ┌────┴────┐ 66 | │ Service │ 67 | └───┬┬────┘ 68 | ┌───┘└───┐ 69 | ┌────────────┴──┐ ┌──┴────────────┐ 70 | │ Varnish 1 ├──┤ Varnish 2 │ 71 | │ Signaller 1 ├──┤ Signaller 2 │ 72 | └─────────┬┬────┘ └────┬┬─────────┘ 73 | │└─────┌──────┘│ 74 | │┌─────┘└─────┐│ 75 | ┌─────────┴┴────┐ ┌────┴┴─────────┐ 76 | │ Application 1 │ | Application 2 │ 77 | └───────────────┘ └───────────────┘ 78 | ``` 79 | 80 | The Signaller component supports broadcasting PURGE and BAN requests to all Varnish nodes. 81 | 82 | ## Getting started 83 | 84 | ### Create a VCL template 85 | 86 |
87 | 88 | :warning: **NOTE**: The current implementation (supplying a VCL template as `ConfigMap`) may still be subject to change. Future implementations might for example use a Kubernetes Custom Resource for the entire configuration set. 89 | 90 |
91 | 92 | Start by creating a `ConfigMap` that contains a VCL template: 93 | 94 | ```yaml 95 | apiVersion: v1 96 | kind: ConfigMap 97 | metadata: 98 | name: vcl-template 99 | data: 100 | default.vcl.tmpl: | 101 | vcl 4.0; 102 | 103 | import std; 104 | import directors; 105 | 106 | // ".Frontends" is a slice that contains all known Varnish instances 107 | // (as selected by the service specified by -frontend-service). 108 | // The backend name needs to be the Pod name, since this value is compared 109 | // to the server identity ("server.identity" [1]) later. 110 | // 111 | // [1]: https://varnish-cache.org/docs/6.4/reference/vcl.html#local-server-remote-and-client 112 | {{ range .Frontends }} 113 | backend {{ .Name }} { 114 | .host = "{{ .Host }}"; 115 | .port = "{{ .Port }}"; 116 | } 117 | {{- end }} 118 | 119 | backend fe-primary { 120 | .host = "{{ .PrimaryFrontend.Host }}"; 121 | .port = "{{ .PrimaryFrontend.Port }}"; 122 | } 123 | 124 | {{ range .Backends }} 125 | backend be-{{ .Name }} { 126 | .host = "{{ .Host }}"; 127 | .port = "{{ .Port }}"; 128 | } 129 | {{- end }} 130 | 131 | backend be-primary { 132 | .host = "{{ .PrimaryBackend.Host }}"; 133 | .port = "{{ .PrimaryBackend.Port }}"; 134 | } 135 | 136 | acl purgers { 137 | "127.0.0.1"; 138 | "localhost"; 139 | "::1"; 140 | {{- range .Frontends }} 141 | "{{ .Host }}"; 142 | {{- end }} 143 | {{- range .Backends }} 144 | "{{ .Host }}"; 145 | {{- end }} 146 | } 147 | 148 | sub vcl_init { 149 | new cluster = directors.hash(); 150 | 151 | {{ range .Frontends -}} 152 | cluster.add_backend({{ .Name }}, 1); 153 | {{ end }} 154 | 155 | new lb = directors.round_robin(); 156 | 157 | {{ range .Backends -}} 158 | lb.add_backend(be-{{ .Name }}); 159 | {{ end }} 160 | } 161 | 162 | sub vcl_recv 163 | { 164 | # Set backend hint for non cachable objects. 165 | set req.backend_hint = lb.backend(); 166 | 167 | # ... 168 | 169 | # Routing logic. Pass a request to an appropriate Varnish node. 170 | # See https://info.varnish-software.com/blog/creating-self-routing-varnish-cluster for more info. 171 | unset req.http.x-cache; 172 | set req.backend_hint = cluster.backend(req.url); 173 | set req.http.x-shard = req.backend_hint; 174 | if (req.http.x-shard != server.identity) { 175 | return(pass); 176 | } 177 | set req.backend_hint = lb.backend(); 178 | 179 | # ... 180 | 181 | return(hash); 182 | } 183 | 184 | # ... 185 | ``` 186 | 187 | Environment variables can be used from the template. `{{ .Env.ENVVAR }}` is replaced with the 188 | environment variable value. This can be used to set for example the Host-header for the external 189 | service. 190 | 191 | ### Create a Secret 192 | 193 | Create a `Secret` object that contains the secret for the Varnish administration port: 194 | 195 | ``` 196 | $ kubectl create secret generic varnish-secret --from-literal=secret=$(head -c32 /dev/urandom | base64) 197 | ``` 198 | 199 | ### [Optional] Configure RBAC roles 200 | 201 | If RBAC is enabled in your cluster, you will need to create a `ServiceAccount` with a respective `Role`. 202 | 203 | ``` 204 | $ kubectl create serviceaccount kube-httpcache 205 | $ kubectl apply -f https://raw.githubusercontent.com/mittwald/kube-httpcache/master/deploy/kubernetes/rbac.yaml 206 | $ kubectl create rolebinding kube-httpcache --clusterrole=kube-httpcache --serviceaccount=kube-httpcache 207 | ``` 208 | 209 | ### Deploy Varnish 210 | 211 | 1. Create a `StatefulSet` for the Varnish controller: 212 | 213 | ```yaml 214 | apiVersion: apps/v1 215 | kind: StatefulSet 216 | metadata: 217 | name: cache-statefulset 218 | labels: 219 | app: cache 220 | spec: 221 | serviceName: cache-service 222 | replicas: 2 223 | updateStrategy: 224 | type: RollingUpdate 225 | selector: 226 | matchLabels: 227 | app: cache 228 | template: 229 | metadata: 230 | labels: 231 | app: cache 232 | spec: 233 | containers: 234 | - name: cache 235 | image: quay.io/mittwald/kube-httpcache:stable 236 | imagePullPolicy: Always 237 | args: 238 | - -admin-addr=0.0.0.0 239 | - -admin-port=6083 240 | - -signaller-enable 241 | - -signaller-port=8090 242 | - -frontend-watch 243 | - -frontend-namespace=$(NAMESPACE) 244 | - -frontend-service=frontend-service 245 | - -frontend-port=8080 246 | - -backend-watch 247 | - -backend-namespace=$(NAMESPACE) 248 | - -backend-service=backend-service 249 | - -varnish-secret-file=/etc/varnish/k8s-secret/secret 250 | - -varnish-vcl-template=/etc/varnish/tmpl/default.vcl.tmpl 251 | - -varnish-storage=malloc,128M 252 | env: 253 | - name: NAMESPACE 254 | valueFrom: 255 | fieldRef: 256 | fieldPath: metadata.namespace 257 | volumeMounts: 258 | - name: template 259 | mountPath: /etc/varnish/tmpl 260 | - name: secret 261 | mountPath: /etc/varnish/k8s-secret 262 | ports: 263 | - containerPort: 8080 264 | name: http 265 | - containerPort: 8090 266 | name: signaller 267 | serviceAccountName: kube-httpcache # when using RBAC 268 | restartPolicy: Always 269 | volumes: 270 | - name: template 271 | configMap: 272 | name: vcl-template 273 | - name: secret 274 | secret: 275 | secretName: varnish-secret 276 | ``` 277 | 278 | **NOTE**: Using a `StatefulSet` is particularly important when using a stateful, self-routed Varnish cluster. Otherwise, you could also use a `Deployment` resource, instead. 279 | 280 | 2. Create a service for the Varnish controller: 281 | 282 | ```yaml 283 | apiVersion: v1 284 | kind: Service 285 | metadata: 286 | name: cache-service 287 | labels: 288 | app: cache 289 | spec: 290 | ports: 291 | - name: "http" 292 | port: 80 293 | targetPort: http 294 | - name: "signaller" 295 | port: 8090 296 | targetPort: signaller 297 | selector: 298 | app: cache 299 | ``` 300 | 301 | 3. Create an `Ingress` to forward requests to cache service. Typically, you should only need an Ingress for the Services `http` port, and not for the `signaller` port (if for some reason you do, make sure to implement proper access controls) 302 | 303 | ### Logging 304 | Logging uses [glog](https://github.com/golang/glog). 305 | Detailed logging e.g. for troubleshooting can be activated by passing command line parameter `-v7` (where 7 is requested logging level). 306 | 307 | ## Detailed how-tos 308 | 309 | ### Using built in signaller component 310 | 311 | The signaller component is responsible for broadcasting HTTP requests to all nodes of a Varnish cluster. This is useful in HA cluster setups, when `BAN` or `PURGE` requests should be broadcast across the entire cluster. 312 | 313 | To broadcast a `BAN` or `PURGE` request to all Varnish endpoints, run one of the following commands, respectively: 314 | 315 | $ curl -H "X-Url: /path" -X BAN http://cache-service:8090 316 | $ curl -H "X-Host: www.example.com" -X PURGE http://cache-service:8090/path 317 | 318 | When running from outside the cluster, you can use `kubectl port-forward` to forward the signaller port to your local machine (and then send your requests to `http://localhost:8090`): 319 | 320 | $ kubectl port-forward service/cache-service 8090:8090 321 | 322 | **NOTE:** Specific headers for `PURGE`/`BAN` requests depend on your Varnish configuration. E.g. `X-Host` header is set for convenience, because signaller is listening on other URL than Varnish. However, you need to support such headers in your VCL. 323 | 324 | ```vcl 325 | sub vcl_recv { 326 | # ... 327 | 328 | # Purge logic 329 | if (req.method == "PURGE") { 330 | if (client.ip !~ purgers) { 331 | return (synth(403, "Not allowed.")); 332 | } 333 | if (req.http.X-Host) { 334 | set req.http.host = req.http.X-Host; 335 | } 336 | return (purge); 337 | } 338 | 339 | # Ban logic 340 | if (req.method == "BAN") { 341 | if (client.ip !~ purgers) { 342 | return (synth(403, "Not allowed.")); 343 | } 344 | if (req.http.Cache-Tags) { 345 | ban("obj.http.Cache-Tags ~ " + req.http.Cache-Tags); 346 | return (synth(200, "Ban added " + req.http.host)); 347 | } 348 | if (req.http.X-Url) { 349 | ban("obj.http.X-Url == " + req.http.X-Url); 350 | return (synth(200, "Ban added " + req.http.host)); 351 | } 352 | return (synth(403, "Cache-Tags or X-Url header missing.")); 353 | } 354 | 355 | # ... 356 | } 357 | ``` 358 | 359 | ### Proxying to external services 360 | 361 |
362 | 363 | **NOTE**: Native support for `ExternalName` services is a requested feature. Have a look at [#39](https://github.com/mittwald/kube-httpcache/issues/39) if you're willing to help out. 364 | 365 |
366 | 367 | In some cases, you might want to cache content from a cluster-external resource. In this case, create a new Kubernetes service of type `ExternalName` for your backend: 368 | 369 | ```yaml 370 | apiVersion: v1 371 | kind: Service 372 | metadata: 373 | name: external-service 374 | namespace: default 375 | spec: 376 | type: ExternalName 377 | externalName: external-service.example 378 | ``` 379 | 380 | In your VCL template, you can then simply use this service as static backend (since there are no dynamic endpoints, you do not need to iterate over `.Backends` in your VCL template): 381 | 382 | ```yaml 383 | kind: ConfigMap 384 | apiVersion: v1 385 | metadata: # [...] 386 | data: 387 | default.vcl.tmpl: | 388 | vcl 4.0; 389 | 390 | {{ range .Frontends }} 391 | backend {{ .Name }} { 392 | .host = "{{ .Host }}"; 393 | .port = "{{ .Port }}"; 394 | } 395 | {{- end }} 396 | 397 | backend backend { 398 | .host = "external-service.svc"; 399 | } 400 | 401 | // ... 402 | ``` 403 | 404 | When starting kube-httpcache, remember to set the `--backend-watch=false` flag to disable watching the (non-existent) backend endpoints. 405 | 406 | ## Helm Chart installation 407 | 408 | You can use the [Helm chart](chart/) to rollout an instance of kube-httpcache: 409 | 410 | ``` 411 | $ helm repo add mittwald https://helm.mittwald.de 412 | $ helm install -f your-values.yaml kube-httpcache mittwald/kube-httpcache 413 | ``` 414 | 415 | For possible values, have a look at the comments in the provided [`values.yaml` file](./chart/values.yaml). Take special note that you'll most likely have to overwrite the `vclTemplate` value with your own VCL configuration file. 416 | 417 | Ensure your defined backend services have a port named `http`: 418 | 419 | ``` 420 | apiVersion: v1 421 | kind: Service 422 | metadata: 423 | name: backend-service 424 | spec: 425 | ports: 426 | - name: http 427 | port: 80 428 | protocol: TCP 429 | targetPort: 8080 430 | type: ClusterIP 431 | ``` 432 | 433 | An ingress points to the kube-httpcache service which cached 434 | your backend service: 435 | 436 | ``` 437 | apiVersion: networking.k8s.io/v1 438 | kind: Ingress 439 | metadata: 440 | name: example-ingress 441 | spec: 442 | rules: 443 | - host: www.example.com 444 | http: 445 | paths: 446 | - backend: 447 | service: 448 | name: kube-httpcache 449 | port: 450 | number: 80 451 | path: / 452 | pathType: Prefix 453 | ``` 454 | 455 | Look at the `vclTemplate` property in [chart/values.yaml](chart/values.yaml) to define 456 | your own Varnish cluster rules or load with `extraVolume` an extra file 457 | as initContainer if your ruleset is really big. 458 | 459 | ## Developer notes 460 | 461 | ### Build the Docker image locally 462 | 463 | A Dockerfile for building the container image yourself is located in `build/package/docker`. Invoke `docker build` as follows: 464 | 465 | ``` 466 | $ docker build -t $IMAGE_NAME -f build/package/docker/Dockerfile . 467 | ``` 468 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | In case you have discovered a vulnerability, please reach out privately via email to opensource@mittwald.de instead of opening an issue. 6 | -------------------------------------------------------------------------------- /assets/varnish.repo: -------------------------------------------------------------------------------- 1 | [varnishcache_varnish60] 2 | name=varnishcache_varnish60 3 | baseurl=https://packagecloud.io/varnishcache/varnish60/el/7/$basearch 4 | repo_gpgcheck=1 5 | gpgcheck=0 6 | enabled=1 7 | gpgkey=https://packagecloud.io/varnishcache/varnish60/gpgkey 8 | sslverify=1 9 | sslcacert=/etc/pki/tls/certs/ca-bundle.crt 10 | metadata_expire=300 11 | -------------------------------------------------------------------------------- /build/ci/.goreleaser.yml: -------------------------------------------------------------------------------- 1 | project_name: kube-httpcache 2 | before: 3 | hooks: 4 | - go mod download 5 | - go vet ./... 6 | - go test ./... 7 | builds: 8 | - env: 9 | - CGO_ENABLED=0 10 | - GO111MODULE=on 11 | binary: kube-httpcache 12 | main: ./cmd/kube-httpcache/main.go 13 | goos: 14 | - linux 15 | goarch: 16 | - amd64 17 | - arm64 18 | checksum: 19 | name_template: 'checksums.txt' 20 | snapshot: 21 | name_template: "{{ .Tag }}-next" 22 | changelog: 23 | sort: asc 24 | filters: 25 | exclude: 26 | - '^docs:' 27 | - '^test:' 28 | dockers: 29 | - 30 | image_templates: 31 | - quay.io/mittwald/kube-httpcache:latest-amd64 32 | - quay.io/mittwald/kube-httpcache:v{{ .Major }}-amd64 33 | - quay.io/mittwald/kube-httpcache:v{{ .Major }}.{{ .Minor }}-amd64 34 | - quay.io/mittwald/kube-httpcache:{{ .Tag }}-amd64 35 | - quay.io/mittwald/kube-httpcache:stable-amd64 36 | use: buildx 37 | dockerfile: build/package/docker/GoReleaser.Dockerfile 38 | build_flag_templates: 39 | - "--platform=linux/amd64" 40 | - "--build-arg=ARCH=amd64/" 41 | ids: 42 | - kube-httpcache 43 | goos: linux 44 | goarch: amd64 45 | goarm: '' 46 | - 47 | image_templates: 48 | - quay.io/mittwald/kube-httpcache:latest-arm64 49 | - quay.io/mittwald/kube-httpcache:v{{ .Major }}-arm64 50 | - quay.io/mittwald/kube-httpcache:v{{ .Major }}.{{ .Minor }}-arm64 51 | - quay.io/mittwald/kube-httpcache:{{ .Tag }}-arm64 52 | - quay.io/mittwald/kube-httpcache:stable-arm64 53 | use: buildx 54 | dockerfile: build/package/docker/GoReleaser.Dockerfile 55 | build_flag_templates: 56 | - "--platform=linux/arm64/v8" 57 | - "--build-arg=ARCH=arm64v8/" 58 | ids: 59 | - kube-httpcache 60 | goos: linux 61 | goarch: arm64 62 | goarm: '' 63 | docker_manifests: 64 | - name_template: quay.io/mittwald/kube-httpcache:latest 65 | image_templates: 66 | - quay.io/mittwald/kube-httpcache:latest-amd64 67 | - quay.io/mittwald/kube-httpcache:latest-arm64 68 | - name_template: quay.io/mittwald/kube-httpcache:stable 69 | image_templates: 70 | - quay.io/mittwald/kube-httpcache:stable-amd64 71 | - quay.io/mittwald/kube-httpcache:stable-arm64 72 | - name_template: quay.io/mittwald/kube-httpcache:v{{ .Major }} 73 | image_templates: 74 | - quay.io/mittwald/kube-httpcache:v{{ .Major }}-amd64 75 | - quay.io/mittwald/kube-httpcache:v{{ .Major }}-arm64 76 | - name_template: quay.io/mittwald/kube-httpcache:v{{ .Major }}.{{ .Minor }} 77 | image_templates: 78 | - quay.io/mittwald/kube-httpcache:v{{ .Major }}.{{ .Minor }}-amd64 79 | - quay.io/mittwald/kube-httpcache:v{{ .Major }}.{{ .Minor }}-arm64 80 | - name_template: quay.io/mittwald/kube-httpcache:{{ .Tag }} 81 | image_templates: 82 | - quay.io/mittwald/kube-httpcache:{{ .Tag }}-amd64 83 | - quay.io/mittwald/kube-httpcache:{{ .Tag }}-arm64 -------------------------------------------------------------------------------- /build/package/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.20-bullseye AS builder 2 | 3 | WORKDIR /workspace 4 | COPY . . 5 | RUN CGO_ENABLED=0 GOOS=linux \ 6 | go build \ 7 | -installsuffix cgo \ 8 | -o kube-httpcache \ 9 | -a cmd/kube-httpcache/main.go 10 | 11 | FROM golang:1.20-bullseye AS builder-exporter 12 | ENV EXPORTER_VERSION=1.6.1 13 | 14 | WORKDIR /workspace 15 | RUN apt-get update \ 16 | && apt-get install -y \ 17 | git 18 | RUN git clone https://github.com/jonnenauha/prometheus_varnish_exporter.git --depth 1 --branch ${EXPORTER_VERSION} 19 | WORKDIR /workspace/prometheus_varnish_exporter 20 | RUN go build 21 | 22 | 23 | FROM debian:bullseye-slim AS final 24 | 25 | LABEL MAINTAINER="Martin Helmich " 26 | 27 | WORKDIR / 28 | 29 | RUN apt-get -qq update && apt-get -qq upgrade && apt-get -qq install curl && \ 30 | curl -s https://packagecloud.io/install/repositories/varnishcache/varnish73/script.deb.sh | bash && \ 31 | apt-get -qq update && apt-get -qq install varnish && \ 32 | apt-get -qq purge curl gnupg && \ 33 | apt-get -qq autoremove && apt-get -qq autoclean && \ 34 | rm -rf /var/cache/* 35 | 36 | RUN mkdir /exporter && chown varnish /exporter 37 | 38 | COPY --from=builder /workspace/kube-httpcache . 39 | COPY --from=builder-exporter /workspace/prometheus_varnish_exporter/prometheus_varnish_exporter /exporter/ 40 | 41 | ENTRYPOINT [ "/kube-httpcache" ] 42 | -------------------------------------------------------------------------------- /build/package/docker/GoReleaser.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ARCH= 2 | FROM ${ARCH}debian:bullseye-slim 3 | 4 | ENV EXPORTER_VERSION=1.6.1 5 | LABEL MAINTAINER="Martin Helmich " 6 | 7 | WORKDIR / 8 | 9 | RUN apt-get -qq update && apt-get -qq upgrade && apt-get -qq install curl && \ 10 | curl -s https://packagecloud.io/install/repositories/varnishcache/varnish73/script.deb.sh | bash && \ 11 | apt-get -qq update && apt-get -qq install varnish && \ 12 | apt-get -qq purge curl gnupg && \ 13 | apt-get -qq autoremove && apt-get -qq autoclean && \ 14 | rm -rf /var/cache/* 15 | 16 | RUN mkdir /exporter && \ 17 | chown varnish /exporter 18 | 19 | ADD --chown=varnish https://github.com/jonnenauha/prometheus_varnish_exporter/releases/download/${EXPORTER_VERSION}/prometheus_varnish_exporter-${EXPORTER_VERSION}.linux-amd64.tar.gz /tmp 20 | 21 | RUN cd /exporter && \ 22 | tar -xzf /tmp/prometheus_varnish_exporter-${EXPORTER_VERSION}.linux-amd64.tar.gz && \ 23 | ln -sf /exporter/prometheus_varnish_exporter-${EXPORTER_VERSION}.linux-amd64/prometheus_varnish_exporter prometheus_varnish_exporter 24 | 25 | COPY kube-httpcache . 26 | 27 | ENTRYPOINT [ "/kube-httpcache" ] 28 | -------------------------------------------------------------------------------- /chart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kube-httpcache 3 | description: Varnish on Kubernetes Helm Chart 4 | version: 0.8.1 5 | appVersion: v0.8.1 6 | home: https://varnish-cache.org 7 | icon: https://varnish-cache.org/_static/varnish-bunny.png 8 | sources: 9 | - https://github.com/mittwald/kube-httpcache 10 | -------------------------------------------------------------------------------- /chart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Ensure your backend-service has a port name 'http' and create an ingress 2 | points to the '{{ include "kube-httpcache.fullname" . }}' service. Enjoy your Varnish cache! 3 | 4 | {{- if and (.Values.rbac.enabled) (.Values.cache.backendServiceNamespace) }} 5 | {{- if (ne .Values.cache.backendServiceNamespace .Release.Namespace) }} 6 | 7 | ATTENTION REQUIRED: 8 | Your backend service is configured as {{ .Values.cache.backendServiceNamespace }}/{{ .Values.cache.backendService}} and is in a different namespace 9 | than your Helm release ({{ .Release.Namespace}}). To be able to watch the endpoints of this service, 10 | the '{{ include "kube-httpcache.serviceAccountName" . }}' service account will need to be granted WATCH access 11 | to the "endpoints" resource in the namespace '{{ .Values.cache.backendServiceNamespace }}' using RBAC. 12 | {{- end }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /chart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "kube-httpcache.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "kube-httpcache.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "kube-httpcache.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "kube-httpcache.labels" -}} 37 | helm.sh/chart: {{ include "kube-httpcache.chart" . }} 38 | {{ include "kube-httpcache.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "kube-httpcache.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "kube-httpcache.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "kube-httpcache.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.enabled }} 58 | {{- default (include "kube-httpcache.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /chart/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.configmap.enabled -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ include "kube-httpcache.fullname" . }} 6 | data: 7 | default.vcl.tmpl: | 8 | {{ .Values.vclTemplate | indent 4}} 9 | {{- end }} 10 | -------------------------------------------------------------------------------- /chart/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.useStatefulset.enabled -}} 2 | --- 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: {{ include "kube-httpcache.fullname" . }} 7 | labels: 8 | {{- include "kube-httpcache.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | spec: 14 | {{- if not .Values.autoscaling.enabled }} 15 | replicas: {{ .Values.replicaCount }} 16 | {{- end }} 17 | selector: 18 | matchLabels: 19 | {{- include "kube-httpcache.selectorLabels" . | nindent 6 }} 20 | template: 21 | metadata: 22 | {{- with .Values.podAnnotations }} 23 | annotations: 24 | {{- toYaml . | nindent 8 }} 25 | {{- end }} 26 | labels: 27 | {{- include "kube-httpcache.selectorLabels" . | nindent 8 }} 28 | {{- with .Values.podLabels }} 29 | {{- toYaml . | nindent 8 }} 30 | {{- end }} 31 | spec: 32 | {{- with .Values.imagePullSecrets }} 33 | imagePullSecrets: 34 | {{- toYaml . | nindent 8 }} 35 | {{- end }} 36 | serviceAccountName: {{ include "kube-httpcache.serviceAccountName" . }} 37 | securityContext: 38 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 39 | {{- with .Values.topologySpreadConstraints }} 40 | topologySpreadConstraints: 41 | {{- toYaml . | nindent 8 }} 42 | {{- end }} 43 | {{- if .Values.initContainers }} 44 | initContainers: 45 | {{- with .Values.initContainers }} 46 | {{- tpl . $ | nindent 8 }} 47 | {{- end }} 48 | {{- end }} 49 | containers: 50 | - name: {{ .Chart.Name }} 51 | securityContext: 52 | {{- toYaml .Values.securityContext | nindent 12 }} 53 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 54 | imagePullPolicy: {{ .Values.image.pullPolicy }} 55 | {{- if .Values.livenessProbe }} 56 | livenessProbe: 57 | {{- toYaml .Values.livenessProbe | nindent 12 }} 58 | {{- end }} 59 | {{- if .Values.readinessProbe }} 60 | readinessProbe: 61 | {{- toYaml .Values.readinessProbe | nindent 12 }} 62 | {{- end }} 63 | args: 64 | - -admin-addr=0.0.0.0 65 | - -admin-port=6083 66 | - -signaller-enable 67 | - -signaller-port=8090 68 | - -frontend-port={{ .Values.service.target }} 69 | {{- if .Values.cache.frontendWatch }} 70 | - -frontend-watch 71 | {{- else }} 72 | - -frontend-watch=false 73 | {{- end }} 74 | - -frontend-namespace={{ "$(NAMESPACE)" }} 75 | - -frontend-service={{ .Values.cache.frontendService | default (include "kube-httpcache.fullname" .) }} 76 | {{- if .Values.cache.backendWatch }} 77 | - -backend-watch 78 | {{- else }} 79 | - -backend-watch=false 80 | {{- end }} 81 | - -backend-service={{ tpl .Values.cache.backendService . }} 82 | - -backend-namespace={{ .Values.cache.backendServiceNamespace | default "$(NAMESPACE)" }} 83 | - -varnish-secret-file=/etc/varnish/k8s-secret/secret 84 | - -varnish-vcl-template=/etc/varnish/tmpl/default.vcl.tmpl 85 | - -varnish-storage={{ .Values.cache.varnishStorage }},{{ .Values.cache.storageSize }} 86 | {{- if .Values.configmap.enabled }} 87 | {{/* When loading the VCL template from a ConfigMap, inotify is not reliable */}} 88 | - -varnish-vcl-template-poll 89 | {{- end }} 90 | {{- if .Values.cache.varnishTransientStorage }} 91 | - -varnish-transient-storage={{ .Values.cache.varnishTransientStorage }},{{ .Values.cache.transientStorageSize }} 92 | {{- end }} 93 | {{- if .Values.cacheExtraArgs }} 94 | {{- with .Values.cacheExtraArgs }} 95 | {{- tpl . $ | trim | nindent 10 }} 96 | {{- end }} 97 | {{- end }} 98 | env: 99 | - name: NAMESPACE 100 | valueFrom: 101 | fieldRef: 102 | fieldPath: metadata.namespace 103 | {{- with .Values.extraEnvVars }} 104 | {{- toYaml . | nindent 10 }} 105 | {{- end }} 106 | {{- if .Values.extraEnvFromConfig }} 107 | envFrom: 108 | {{- toYaml .Values.extraEnvFromConfig | nindent 12 }} 109 | {{- end }} 110 | {{- if .Values.lifecycle }} 111 | lifecycle: 112 | {{- toYaml .Values.lifecycle | nindent 12 }} 113 | {{- end }} 114 | volumeMounts: 115 | - name: template 116 | mountPath: /etc/varnish/tmpl 117 | - name: secret 118 | mountPath: /etc/varnish/k8s-secret 119 | - name: var 120 | mountPath: /var/lib/varnish 121 | {{- if .Values.extraMounts }} 122 | {{- toYaml .Values.extraMounts | nindent 10 }} 123 | {{- end }} 124 | {{- if .Values.resources }} 125 | resources: 126 | {{- toYaml .Values.resources | nindent 12 }} 127 | {{- end }} 128 | {{- if .Values.exporter.enabled }} 129 | - name: exporter 130 | securityContext: 131 | {{- toYaml .Values.exporter.securityContext | nindent 12 }} 132 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 133 | imagePullPolicy: {{ .Values.image.pullPolicy }} 134 | command: 135 | - /exporter/prometheus_varnish_exporter 136 | args: 137 | - -no-exit 138 | env: 139 | - name: VSM_NOPID 140 | value: "1" 141 | ports: 142 | - name: metrics 143 | containerPort: 9131 144 | volumeMounts: 145 | - name: var 146 | mountPath: /var/lib/varnish 147 | {{- if .Values.exporter.livenessProbe }} 148 | livenessProbe: 149 | {{- toYaml .Values.exporter.livenessProbe | nindent 12 }} 150 | {{- end }} 151 | {{- if .Values.exporter.readinessProbe }} 152 | readinessProbe: 153 | {{- toYaml .Values.exporter.readinessProbe | nindent 12 }} 154 | {{- end }} 155 | {{- if .Values.exporter.resources }} 156 | resources: 157 | {{- toYaml .Values.exporter.resources | nindent 12 }} 158 | {{- end }} 159 | {{- end }} 160 | {{- if .Values.extraContainers }} 161 | {{- toYaml .Values.extraContainers | nindent 8 }} 162 | {{- end }} 163 | volumes: 164 | {{- if .Values.configmap.enabled }} 165 | - name: template 166 | configMap: 167 | name: {{ include "kube-httpcache.fullname" . }} 168 | {{- end }} 169 | - name: secret 170 | secret: 171 | secretName: {{ .Values.cache.existingSecret | default (include "kube-httpcache.fullname" .) }} 172 | - name: var 173 | emptyDir: {} 174 | {{- if .Values.extraVolumes -}} 175 | {{- toYaml .Values.extraVolumes | nindent 6 }} 176 | {{- end }} 177 | {{- with .Values.nodeSelector }} 178 | nodeSelector: 179 | {{- toYaml . | nindent 8 }} 180 | {{- end }} 181 | {{- with .Values.affinity }} 182 | affinity: 183 | {{- toYaml . | nindent 8 }} 184 | {{- end }} 185 | {{- with .Values.tolerations }} 186 | tolerations: 187 | {{- toYaml . | nindent 8 }} 188 | {{- end }} 189 | {{- if .Values.terminationGracePeriodSeconds }} 190 | terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} 191 | {{- end }} 192 | {{- end }} 193 | -------------------------------------------------------------------------------- /chart/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.useStatefulset.enabled -}} 2 | {{- if .Values.autoscaling.enabled }} 3 | {{- if .Capabilities.APIVersions.Has "autoscaling/v2" }} 4 | apiVersion: autoscaling/v2 5 | {{- else if .Capabilities.APIVersions.Has "autoscaling/v2beta2" }} 6 | apiVersion: autoscaling/v2beta2 7 | {{- else }} 8 | apiVersion: autoscaling/v2beta1 9 | {{- end }} 10 | kind: HorizontalPodAutoscaler 11 | metadata: 12 | name: {{ include "kube-httpcache.fullname" . }} 13 | labels: 14 | {{- include "kube-httpcache.labels" . | nindent 4 }} 15 | spec: 16 | scaleTargetRef: 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | name: {{ include "kube-httpcache.fullname" . }} 20 | minReplicas: {{ .Values.autoscaling.minReplicas }} 21 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 22 | metrics: 23 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} 24 | - type: Resource 25 | resource: 26 | name: cpu 27 | {{- if or (.Capabilities.APIVersions.Has "autoscaling/v2") (.Capabilities.APIVersions.Has "autoscaling/v2beta2") }} 28 | target: 29 | type: Utilization 30 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 31 | {{- else }} 32 | targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 33 | {{- end }} 34 | {{- end }} 35 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} 36 | - type: Resource 37 | resource: 38 | name: memory 39 | {{- if or (.Capabilities.APIVersions.Has "autoscaling/v2") (.Capabilities.APIVersions.Has "autoscaling/v2beta2") }} 40 | target: 41 | type: Utilization 42 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 43 | {{- else }} 44 | targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 45 | {{- end }} 46 | {{- end }} 47 | {{- end }} 48 | {{- end }} 49 | -------------------------------------------------------------------------------- /chart/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} 3 | {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} 4 | {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} 5 | {{- end }} 6 | {{- end }} 7 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} 8 | apiVersion: networking.k8s.io/v1 9 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} 10 | apiVersion: networking.k8s.io/v1beta1 11 | {{- else -}} 12 | apiVersion: extensions/v1beta1 13 | {{- end }} 14 | kind: Ingress 15 | metadata: 16 | name: {{ include "kube-httpcache.fullname" . }} 17 | labels: 18 | {{- include "kube-httpcache.labels" . | nindent 4 }} 19 | {{- with .Values.ingress.annotations }} 20 | annotations: 21 | {{- toYaml . | nindent 4 }} 22 | {{- end }} 23 | spec: 24 | {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} 25 | ingressClassName: {{ .Values.ingress.className }} 26 | {{- end }} 27 | {{- if .Values.ingress.tls }} 28 | tls: 29 | {{- range .Values.ingress.tls }} 30 | - hosts: 31 | {{- range .hosts }} 32 | - {{ . }} 33 | {{- end }} 34 | secretName: {{ .secretName }} 35 | {{- end }} 36 | {{- end }} 37 | rules: 38 | {{- range .Values.ingress.hosts }} 39 | - host: {{ .host }} 40 | http: 41 | paths: {{ .paths | toYaml | nindent 10 }} 42 | {{- end }} 43 | {{- end }} 44 | -------------------------------------------------------------------------------- /chart/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: {{ include "kube-httpcache.fullname" . }} 5 | labels: 6 | {{- include "kube-httpcache.labels" . | nindent 4 }} 7 | spec: 8 | maxUnavailable: 1 9 | selector: 10 | matchLabels: 11 | {{- include "kube-httpcache.selectorLabels" . | nindent 6 }} 12 | -------------------------------------------------------------------------------- /chart/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.enabled -}} 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | name: {{ include "kube-httpcache.fullname" . }} 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - endpoints 12 | - pods 13 | verbs: 14 | - watch 15 | - get 16 | {{- if .Values.podSecurityPolicy.enabled -}} 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - endpoints 21 | - pods 22 | verbs: 23 | - watch 24 | - get 25 | - apiGroups: 26 | - extensions 27 | resourceNames: 28 | - {{ .Values.podSecurityPolicy.name }} 29 | resources: 30 | - podsecuritypolicies 31 | verbs: 32 | - use 33 | {{- end }} 34 | --- 35 | apiVersion: rbac.authorization.k8s.io/v1 36 | kind: RoleBinding 37 | metadata: 38 | labels: 39 | {{- include "kube-httpcache.labels" . | nindent 4 }} 40 | name: {{ include "kube-httpcache.fullname" . }} 41 | roleRef: 42 | apiGroup: rbac.authorization.k8s.io 43 | kind: Role 44 | name: {{ include "kube-httpcache.fullname" . }} 45 | subjects: 46 | - kind: ServiceAccount 47 | name: {{ include "kube-httpcache.serviceAccountName" . }} 48 | {{- end }} 49 | -------------------------------------------------------------------------------- /chart/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.cache.existingSecret -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ include "kube-httpcache.fullname" . }} 6 | labels: 7 | {{- include "kube-httpcache.labels" . | nindent 4 }} 8 | type: Opaque 9 | data: 10 | secret: {{ .Values.cache.secret | default (randAlphaNum 32) | b64enc | quote }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /chart/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "kube-httpcache.fullname" . }} 5 | labels: 6 | {{- include "kube-httpcache.labels" . | nindent 4 }} 7 | {{- with .Values.service.annotations }} 8 | annotations: 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | spec: 12 | type: {{ .Values.service.type }} 13 | ports: 14 | - name: http 15 | port: {{ .Values.service.port }} 16 | targetPort: {{ .Values.service.target }} 17 | protocol: TCP 18 | - name: "signaller" 19 | port: 8090 20 | targetPort: 8090 21 | {{- if .Values.exporter.enabled }} 22 | - name: metrics 23 | port: 9131 24 | targetPort: 9131 25 | {{- end }} 26 | selector: 27 | {{- include "kube-httpcache.selectorLabels" . | nindent 4 }} 28 | -------------------------------------------------------------------------------- /chart/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.enabled -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "kube-httpcache.serviceAccountName" . }} 6 | labels: 7 | {{- include "kube-httpcache.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /chart/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ include "kube-httpcache.fullname" . }} 6 | labels: 7 | {{- include "kube-httpcache.labels" . | nindent 4 }} 8 | {{- if .Values.serviceMonitor.additionalLabels }} 9 | {{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }} 10 | {{- end }} 11 | spec: 12 | selector: 13 | matchLabels: 14 | {{- include "kube-httpcache.selectorLabels" . | nindent 6 }} 15 | endpoints: 16 | - port: metrics 17 | path: /metrics 18 | {{- if .Values.serviceMonitor.interval }} 19 | interval: {{ .Values.serviceMonitor.interval }} 20 | {{- end }} 21 | {{- if .Values.serviceMonitor.scrapeTimeout }} 22 | scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} 23 | {{- end }} 24 | {{- if .Values.serviceMonitor.scrapeSignaller }} 25 | - port: signaller 26 | path: /metrics 27 | {{- if .Values.serviceMonitor.interval }} 28 | interval: {{ .Values.serviceMonitor.interval }} 29 | {{- end }} 30 | {{- if .Values.serviceMonitor.scrapeTimeout }} 31 | scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} 32 | {{- end }} 33 | {{- end }} 34 | {{- end}} 35 | -------------------------------------------------------------------------------- /chart/templates/statefulset.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.useStatefulset.enabled -}} 2 | --- 3 | apiVersion: apps/v1 4 | kind: StatefulSet 5 | metadata: 6 | name: {{ include "kube-httpcache.fullname" . }} 7 | labels: 8 | {{- include "kube-httpcache.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | spec: 14 | serviceName: {{ include "kube-httpcache.fullname" . }} 15 | {{- if not .Values.autoscaling.enabled }} 16 | replicas: {{ .Values.replicaCount }} 17 | {{- end }} 18 | selector: 19 | matchLabels: 20 | {{- include "kube-httpcache.selectorLabels" . | nindent 6 }} 21 | template: 22 | metadata: 23 | {{- with .Values.podAnnotations }} 24 | annotations: 25 | {{- toYaml . | nindent 8 }} 26 | {{- end }} 27 | labels: 28 | {{- include "kube-httpcache.selectorLabels" . | nindent 8 }} 29 | {{- with .Values.podLabels }} 30 | {{- toYaml . | nindent 8 }} 31 | {{- end }} 32 | spec: 33 | {{- with .Values.imagePullSecrets }} 34 | imagePullSecrets: 35 | {{- toYaml . | nindent 8 }} 36 | {{- end }} 37 | serviceAccountName: {{ include "kube-httpcache.serviceAccountName" . }} 38 | securityContext: 39 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 40 | {{- with .Values.topologySpreadConstraints }} 41 | topologySpreadConstraints: 42 | {{- toYaml . | nindent 8 }} 43 | {{- end }} 44 | {{- if .Values.initContainers }} 45 | initContainers: 46 | {{- with .Values.initContainers }} 47 | {{- tpl . $ | nindent 8 }} 48 | {{- end }} 49 | {{- end }} 50 | containers: 51 | - name: {{ .Chart.Name }} 52 | securityContext: 53 | {{- toYaml .Values.securityContext | nindent 12 }} 54 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 55 | imagePullPolicy: {{ .Values.image.pullPolicy }} 56 | args: 57 | - -admin-addr=0.0.0.0 58 | - -admin-port=6083 59 | - -signaller-enable 60 | - -signaller-port=8090 61 | - -frontend-port={{ .Values.service.target }} 62 | {{- if .Values.cache.frontendWatch }} 63 | - -frontend-watch 64 | {{- else }} 65 | - -frontend-watch=false 66 | {{- end }} 67 | - -frontend-namespace={{ "$(NAMESPACE)" }} 68 | - -frontend-service={{ .Values.cache.frontendService | default (include "kube-httpcache.fullname" .) }} 69 | {{- if .Values.cache.backendWatch }} 70 | - -backend-watch 71 | {{- else }} 72 | - -backend-watch=false 73 | {{- end }} 74 | - -backend-namespace={{ .Values.cache.backendServiceNamespace | default "$(NAMESPACE)" }} 75 | - -backend-service={{ tpl .Values.cache.backendService . }} 76 | - -varnish-secret-file=/etc/varnish/k8s-secret/secret 77 | - -varnish-vcl-template=/etc/varnish/tmpl/default.vcl.tmpl 78 | - -varnish-storage={{ .Values.cache.varnishStorage }},{{ .Values.cache.storageSize }} 79 | {{- if .Values.configmap.enabled }} 80 | {{/* When loading the VCL template from a ConfigMap, inotify is not reliable */}} 81 | - -varnish-vcl-template-poll 82 | {{- end }} 83 | {{- if .Values.cache.varnishTransientStorage }} 84 | - -varnish-transient-storage={{ .Values.cache.varnishTransientStorage }},{{ .Values.cache.transientStorageSize }} 85 | {{- end }} 86 | {{- if .Values.cacheExtraArgs }} 87 | {{- with .Values.cacheExtraArgs }} 88 | {{- tpl . $ | trim | nindent 10 }} 89 | {{- end }} 90 | {{- end }} 91 | env: 92 | - name: NAMESPACE 93 | valueFrom: 94 | fieldRef: 95 | fieldPath: metadata.namespace 96 | {{- with .Values.extraEnvVars }} 97 | {{- toYaml . | nindent 10 }} 98 | {{- end }} 99 | {{- if .Values.extraEnvFromConfig }} 100 | envFrom: 101 | {{- toYaml .Values.extraEnvFromConfig | nindent 12 }} 102 | {{- end }} 103 | {{- if .Values.lifecycle }} 104 | lifecycle: 105 | {{- toYaml .Values.lifecycle | nindent 12 }} 106 | {{- end }} 107 | volumeMounts: 108 | - name: template 109 | mountPath: /etc/varnish/tmpl 110 | - name: secret 111 | mountPath: /etc/varnish/k8s-secret 112 | - name: var 113 | mountPath: /var/lib/varnish 114 | {{- if .Values.extraMounts }} 115 | {{- toYaml .Values.extraMounts | nindent 10 }} 116 | {{- end }} 117 | {{- if .Values.livenessProbe }} 118 | livenessProbe: 119 | {{- toYaml .Values.livenessProbe | nindent 12 }} 120 | {{- end }} 121 | {{- if .Values.readinessProbe }} 122 | readinessProbe: 123 | {{- toYaml .Values.readinessProbe | nindent 12 }} 124 | {{- end }} 125 | {{- if .Values.resources }} 126 | resources: 127 | {{- toYaml .Values.resources | nindent 12 }} 128 | {{- end }} 129 | {{- if .Values.exporter.enabled }} 130 | - name: exporter 131 | securityContext: 132 | {{- toYaml .Values.exporter.securityContext | nindent 12 }} 133 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 134 | imagePullPolicy: {{ .Values.image.pullPolicy }} 135 | command: 136 | - /exporter/prometheus_varnish_exporter 137 | args: 138 | - -no-exit 139 | env: 140 | - name: VSM_NOPID 141 | value: "1" 142 | ports: 143 | - name: metrics 144 | containerPort: 9131 145 | volumeMounts: 146 | - name: var 147 | mountPath: /var/lib/varnish 148 | {{- if .Values.exporter.livenessProbe }} 149 | livenessProbe: 150 | {{- toYaml .Values.exporter.livenessProbe | nindent 12 }} 151 | {{- end }} 152 | {{- if .Values.exporter.readinessProbe }} 153 | readinessProbe: 154 | {{- toYaml .Values.exporter.readinessProbe | nindent 12 }} 155 | {{- end }} 156 | {{- if .Values.exporter.resources }} 157 | resources: 158 | {{- toYaml .Values.exporter.resources | nindent 12 }} 159 | {{- end }} 160 | {{- end }} 161 | {{- if .Values.extraContainers }} 162 | {{- toYaml .Values.extraContainers | nindent 8 }} 163 | {{- end }} 164 | volumes: 165 | {{- if .Values.configmap.enabled }} 166 | - name: template 167 | configMap: 168 | name: {{ include "kube-httpcache.fullname" . }} 169 | {{- end }} 170 | - name: secret 171 | secret: 172 | secretName: {{ .Values.cache.existingSecret | default (include "kube-httpcache.fullname" .) }} 173 | - name: var 174 | emptyDir: {} 175 | {{- if .Values.extraVolumes }} 176 | {{- toYaml .Values.extraVolumes | nindent 6 }} 177 | {{- end }} 178 | {{- with .Values.nodeSelector }} 179 | nodeSelector: 180 | {{- toYaml . | nindent 8 }} 181 | {{- end }} 182 | {{- with .Values.affinity }} 183 | affinity: 184 | {{- toYaml . | nindent 8 }} 185 | {{- end }} 186 | {{- with .Values.tolerations }} 187 | tolerations: 188 | {{- toYaml . | nindent 8 }} 189 | {{- end }} 190 | {{- if .Values.terminationGracePeriodSeconds }} 191 | terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} 192 | {{- end }} 193 | {{- end }} 194 | -------------------------------------------------------------------------------- /chart/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for kube-httpcache. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: quay.io/mittwald/kube-httpcache 9 | pullPolicy: IfNotPresent 10 | tag: "stable" 11 | 12 | imagePullSecrets: [] 13 | nameOverride: "" 14 | fullnameOverride: "" 15 | 16 | # Enable StatefulSet (Deployment is default) 17 | useStatefulset: 18 | enabled: true 19 | 20 | # Enable configMap for Varnish Template File (see below vclTemplate) 21 | # OR use extravolume with name "template" if the file is too big 22 | configmap: 23 | enabled: true 24 | 25 | # kube-httpcache specific configuration 26 | cache: 27 | # name of frontend service 28 | # frontendService: kube-httpcache-headless 29 | # name of backend service (this value is evaluated as a template) 30 | backendService: backend-service 31 | # name of backend service namespace 32 | # backendServiceNamespace: backend-service-namespace 33 | # watching for frontend changes is true by default 34 | frontendWatch: true 35 | # watching for backend changes is true by default 36 | backendWatch: true 37 | # Varnish storage backend type (https://varnish-cache.org/docs/trunk/users-guide/storage-backends.html) 38 | varnishStorage: malloc # default,malloc,umem,file... 39 | # Varnish storage backend size 40 | storageSize: 128M # K(ibibytes), M(ebibytes), G(ibibytes), T(ebibytes) ... unlimited 41 | # Varnish transient storage backend type (https://varnish-cache.org/docs/trunk/users-guide/storage-backends.html) 42 | #varnishTransientStorage: malloc 43 | # Varnish transient storage backend size 44 | #transientStorageSize: 128M # K(ibibytes), M(ebibytes), G(ibibytes), T(ebibytes) ... unlimited 45 | # Secret for Varnish admin credentials 46 | #secret: "12345678" 47 | # Read admin credentials from user provided secret 48 | #existingSecret: kubecache-secret 49 | 50 | cacheExtraArgs: {} 51 | # cacheExtraArgs: | 52 | # - -v=8 53 | # - -varnish-additional-parameters=vcc_allow_inline_c=on 54 | 55 | serviceAccount: 56 | # Specifies whether a service account should be created 57 | enabled: true 58 | # Annotations to add to the service account 59 | annotations: {} 60 | # The name of the service account to use. 61 | # If not set and create is true, a name is generated using the fullname template 62 | name: "" 63 | 64 | rbac: 65 | enabled: true 66 | 67 | # create a prometheus operator ServiceMonitor 68 | serviceMonitor: 69 | enabled: false 70 | 71 | additionalLabels: {} 72 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 73 | interval: 10s 74 | ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. 75 | scrapeTimeout: "" 76 | 77 | # Set this to also scrape the signaller metrics in addition to the varnish metrics 78 | scrapeSignaller: false 79 | 80 | podSecurityPolicy: 81 | enabled: false 82 | # name: unrestricted-psp 83 | 84 | annotations: {} 85 | podAnnotations: {} 86 | podLabels: {} 87 | 88 | podSecurityContext: {} 89 | # fsGroup: 2000 90 | 91 | securityContext: {} 92 | # capabilities: 93 | # drop: 94 | # - ALL 95 | # readOnlyRootFilesystem: true 96 | # runAsNonRoot: true 97 | # runAsUser: 1000 98 | 99 | lifecycle: {} 100 | # preStop: 101 | # exec: 102 | # command: 103 | # - /bin/sh 104 | # - -c 105 | # - touch /etc/varnish/fail_probes; sleep 25 106 | 107 | topologySpreadConstraints: {} 108 | # - topologyKey: topology.kubernetes.io/zone 109 | # maxSkew: 1 110 | # whenUnsatisfiable: ScheduleAnyway 111 | # labelSelector: 112 | # matchLabels: 113 | # app.kubernetes.io/name: kube-httpcache 114 | # - topologyKey: kubernetes.io/hostname 115 | # maxSkew: 1 116 | # whenUnsatisfiable: ScheduleAnyway 117 | # labelSelector: 118 | # matchLabels: 119 | # app.kubernetes.io/name: kube-httpcache 120 | 121 | initContainers: {} 122 | # initContainers: | 123 | # - args: 124 | # - -c 125 | # - | 126 | # echo "Copying external varnish template from..." 127 | # command: 128 | # - sh 129 | # image: busybox:latest 130 | # imagePullPolicy: IfNotPresent 131 | # name: varnishtemplate 132 | # resources: {} 133 | # terminationMessagePath: /dev/termination-log 134 | # terminationMessagePolicy: File 135 | # volumeMounts: 136 | # - name: template 137 | # mountPath: /etc/varnish/tmpl 138 | 139 | extraContainers: [] 140 | # - name: my-sidecar 141 | # image: myapp/my-sidecar 142 | # command: 143 | # - my-sidecar-command 144 | 145 | extraVolumes: {} 146 | # extraVolumes: 147 | # - emptyDir: {} 148 | # name: template 149 | 150 | extraMounts: {} 151 | # extraMounts: 152 | # - name: geoip 153 | # mountPath: /var/lib/geoip 154 | 155 | extraEnvVars: {} 156 | #extraEnvVars: 157 | # - name: foo 158 | # value: bar 159 | 160 | 161 | extraEnvFromConfig: {} 162 | #extraEnvFromConfig: 163 | # - configMapRef: 164 | # name: my-configmap-name 165 | # - secretRef: 166 | # name: my-secret-name 167 | 168 | 169 | exporter: 170 | enabled: false 171 | securityContext: {} 172 | # capabilities: 173 | # drop: 174 | # - ALL 175 | # readOnlyRootFilesystem: true 176 | # runAsNonRoot: true 177 | # runAsUser: 1000 178 | resources: {} 179 | livenessProbe: {} 180 | # livenessProbe: 181 | # httpGet: 182 | # path: / 183 | # port: 6083 184 | readinessProbe: {} 185 | 186 | service: 187 | type: ClusterIP 188 | port: 80 189 | target: 8080 190 | # annotations: {} 191 | 192 | ingress: 193 | enabled: false 194 | annotations: {} 195 | # kubernetes.io/tls-acme: "true" 196 | className: nginx 197 | hosts: [] 198 | # hosts: 199 | # - host: www.example.com 200 | # paths: 201 | # - path: / 202 | # pathType: Prefix 203 | # backend: 204 | # service: 205 | # name: kube-httpcache 206 | # port: 207 | # number: 80 208 | # - path: /backend 209 | # backend: 210 | # name: backend-service 211 | # port: 212 | # number: 8080 213 | # - host: www2.example.com 214 | # paths: 215 | # - path: / 216 | # pathType: Prefix 217 | # backend: 218 | # name: kube-httpcache 219 | # port: 220 | # number: 80 221 | tls: [] 222 | # - secretName: chart-example-tls 223 | # hosts: 224 | # - chart-example.local 225 | 226 | resources: {} 227 | # We usually recommend not to specify default resources and to leave this as a conscious 228 | # choice for the user. This also increases chances charts run on environments with little 229 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 230 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 231 | # limits: 232 | # cpu: 100m 233 | # memory: 128Mi 234 | # requests: 235 | # cpu: 100m 236 | # memory: 128Mi 237 | 238 | autoscaling: 239 | enabled: false 240 | minReplicas: 1 241 | maxReplicas: 100 242 | targetCPUUtilizationPercentage: 80 243 | # targetMemoryUtilizationPercentage: 80 244 | 245 | nodeSelector: {} 246 | 247 | tolerations: [] 248 | 249 | #terminationGracePeriodSeconds: 60 250 | 251 | affinity: {} 252 | 253 | livenessProbe: {} 254 | # livenessProbe: 255 | # httpGet: 256 | # path: / 257 | # port: 6083 258 | readinessProbe: {} 259 | 260 | vclTemplate: | 261 | vcl 4.0; 262 | 263 | import std; 264 | import directors; 265 | 266 | // ".Frontends" is a slice that contains all known Varnish instances 267 | // (as selected by the service specified by -frontend-service). 268 | // The backend name needs to be the Pod name, since this value is compared 269 | // to the server identity ("server.identity" [1]) later. 270 | // 271 | // [1]: https://varnish-cache.org/docs/6.4/reference/vcl.html#local-server-remote-and-client 272 | {{ range .Frontends }} 273 | backend {{ .Name }} { 274 | .host = "{{ .Host }}"; 275 | .port = "{{ .Port }}"; 276 | } 277 | {{- end }} 278 | 279 | {{ range .Backends }} 280 | backend be-{{ .Name }} { 281 | .host = "{{ .Host }}"; 282 | .port = "{{ .Port }}"; 283 | } 284 | {{- end }} 285 | 286 | sub vcl_init { 287 | new cluster = directors.hash(); 288 | 289 | {{ range .Frontends -}} 290 | cluster.add_backend({{ .Name }}, 1); 291 | {{ end }} 292 | 293 | new lb = directors.round_robin(); 294 | 295 | {{ range .Backends -}} 296 | lb.add_backend(be-{{ .Name }}); 297 | {{ end }} 298 | } 299 | 300 | sub vcl_recv 301 | { 302 | # Set backend hint for non cachable objects. 303 | set req.backend_hint = lb.backend(); 304 | 305 | # ... 306 | 307 | # Routing logic. Pass a request to an appropriate Varnish node. 308 | # See https://info.varnish-software.com/blog/creating-self-routing-varnish-cluster for more info. 309 | unset req.http.x-cache; 310 | set req.backend_hint = cluster.backend(req.url); 311 | set req.http.x-shard = req.backend_hint; 312 | if (req.http.x-shard != server.identity) { 313 | return(pass); 314 | } 315 | set req.backend_hint = lb.backend(); 316 | 317 | # ... 318 | 319 | return(hash); 320 | } 321 | -------------------------------------------------------------------------------- /cmd/kube-httpcache/internal/flags.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "flag" 5 | "time" 6 | 7 | "github.com/golang/glog" 8 | ) 9 | 10 | type KubeHTTPProxyFlags struct { 11 | Kubernetes struct { 12 | Config string 13 | RetryBackoffString string 14 | RetryBackoff time.Duration 15 | } 16 | Frontend struct { 17 | Address string 18 | Port int 19 | Watch bool 20 | Namespace string 21 | Service string 22 | PortName string 23 | } 24 | Backend struct { 25 | Watch bool 26 | Namespace string 27 | Service string 28 | Port string 29 | PortName string 30 | } 31 | Signaller struct { 32 | Enable bool 33 | Address string 34 | Port int 35 | WorkersCount int 36 | MaxRetries int 37 | RetryBackoffString string 38 | RetryBackoff time.Duration 39 | QueueLength int 40 | MaxConnsPerHost int 41 | MaxIdleConns int 42 | MaxIdleConnsPerHost int 43 | UpstreamRequestTimeoutString string 44 | UpstreamRequestTimeout time.Duration 45 | } 46 | Admin struct { 47 | Address string 48 | Port int 49 | } 50 | Varnish struct { 51 | SecretFile string 52 | Storage string 53 | TransientStorage string 54 | AdditionalParameters string 55 | VCLTemplate string 56 | VCLTemplatePoll bool 57 | WorkingDir string 58 | } 59 | Readiness struct { 60 | Enable bool 61 | Address string 62 | } 63 | } 64 | 65 | func (f *KubeHTTPProxyFlags) Parse() error { 66 | var err error 67 | 68 | flag.StringVar(&f.Kubernetes.Config, "kubeconfig", "", "kubeconfig file") 69 | flag.StringVar(&f.Kubernetes.RetryBackoffString, "retry-backoff", "30s", "backoff for Kubernetes API reconnection attempts") 70 | 71 | flag.StringVar(&f.Frontend.Address, "frontend-addr", "0.0.0.0", "TCP address to listen on") 72 | flag.IntVar(&f.Frontend.Port, "frontend-port", 8080, "TCP port to listen on") 73 | 74 | flag.BoolVar(&f.Frontend.Watch, "frontend-watch", false, "watch for Kubernetes frontend updates") 75 | flag.StringVar(&f.Frontend.Namespace, "frontend-namespace", "", "name of Kubernetes frontend namespace") 76 | flag.StringVar(&f.Frontend.Service, "frontend-service", "", "name of Kubernetes frontend service") 77 | flag.StringVar(&f.Frontend.PortName, "frontend-portname", "http", "name of frontend port") 78 | 79 | flag.BoolVar(&f.Backend.Watch, "backend-watch", true, "watch for Kubernetes backend updates") 80 | flag.StringVar(&f.Backend.Namespace, "backend-namespace", "", "name of Kubernetes backend namespace") 81 | flag.StringVar(&f.Backend.Service, "backend-service", "", "name of Kubernetes backend service") 82 | flag.StringVar(&f.Backend.Port, "backend-port", "", "deprecated: name of backend port") 83 | flag.StringVar(&f.Backend.PortName, "backend-portname", "http", "name of backend port") 84 | 85 | flag.BoolVar(&f.Signaller.Enable, "signaller-enable", false, "enable signaller functionality for broadcasting PURGE and BAN requests") 86 | flag.StringVar(&f.Signaller.Address, "signaller-addr", "0.0.0.0", "TCP address for the signaller") 87 | flag.IntVar(&f.Signaller.Port, "signaller-port", 8090, "TCP port for the signaller") 88 | flag.IntVar(&f.Signaller.WorkersCount, "signaller-workers", 1, "number of workers to process requests") 89 | flag.IntVar(&f.Signaller.MaxRetries, "signaller-retries", 5, "maximum number of attempts for signalling request") 90 | flag.StringVar(&f.Signaller.RetryBackoffString, "signaller-backoff", "30s", "backoff for signalling request attempts") 91 | flag.IntVar(&f.Signaller.QueueLength, "signaller-queue-length", 0, "length of signaller's processing queue") 92 | flag.IntVar(&f.Signaller.MaxConnsPerHost, "signaller-max-conns-per-host", -1, 93 | "set http.Transport.MaxConnsPerHost in signaller http-client, available then upstream connection reuse is enabled") 94 | flag.IntVar(&f.Signaller.MaxIdleConns, "signaller-max-idle-conns", -1, 95 | "set http.Transport.MaxIdleConns in signaller http-client, available then upstream connection reuse is enabled") 96 | flag.IntVar(&f.Signaller.MaxIdleConnsPerHost, "signaller-max-idle-conns-per-host", -1, 97 | "set http.Transport.MaxIdleConnsPerHost in signaller http-client, available then upstream connection reuse is enabled") 98 | flag.StringVar(&f.Signaller.UpstreamRequestTimeoutString, "signaller-request-timeout", "", "timeout for an outgoing signaller request") 99 | 100 | flag.StringVar(&f.Admin.Address, "admin-addr", "127.0.0.1", "TCP address for the Varnish admin") 101 | flag.IntVar(&f.Admin.Port, "admin-port", 6082, "TCP port for the Varnish admin") 102 | 103 | flag.StringVar(&f.Varnish.SecretFile, "varnish-secret-file", "/etc/varnish/secret", "Varnish secret file") 104 | flag.StringVar(&f.Varnish.Storage, "varnish-storage", "file,/tmp/varnish-data,1G", "varnish storage config") 105 | flag.StringVar(&f.Varnish.TransientStorage, "varnish-transient-storage", "malloc,128m", "varnish transient storage config") 106 | flag.StringVar(&f.Varnish.VCLTemplate, "varnish-vcl-template", "/etc/varnish/default.vcl.tmpl", "VCL template file") 107 | flag.StringVar(&f.Varnish.AdditionalParameters, "varnish-additional-parameters", "", "Additional Varnish start parameters (-p, seperated by comma), like 'ban_dups=on,cli_timeout=30'") 108 | flag.BoolVar(&f.Varnish.VCLTemplatePoll, "varnish-vcl-template-poll", false, "poll for file changes instead of using inotify (useful on some network filesystems)") 109 | flag.StringVar(&f.Varnish.WorkingDir, "varnish-working-dir", "", "varnish working directory (-n)") 110 | 111 | // present for BC only; no effect until #36 [1] has resolved 112 | // [1]: https://github.com/mittwald/kube-httpcache/issues/36 113 | flag.BoolVar(&f.Readiness.Enable, "readiness-enable", true, "enable readiness probe") 114 | flag.StringVar(&f.Readiness.Address, "readiness-addr", "0.0.0.0:9102", "address for the readiness probe to listen on") 115 | 116 | flag.Parse() 117 | 118 | if len(f.Backend.Port) > 0 { 119 | f.Backend.PortName = f.Backend.Port 120 | glog.Warningf("-backend-port flag has been deprecated in favor of -backend-portname and will be removed in future versions") 121 | } 122 | 123 | f.Kubernetes.RetryBackoff, err = time.ParseDuration(f.Kubernetes.RetryBackoffString) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | f.Signaller.RetryBackoff, err = time.ParseDuration(f.Signaller.RetryBackoffString) 129 | if err != nil { 130 | return err 131 | } 132 | 133 | if f.Signaller.UpstreamRequestTimeoutString != "" { 134 | f.Signaller.UpstreamRequestTimeout, err = time.ParseDuration(f.Signaller.UpstreamRequestTimeoutString) 135 | if err != nil { 136 | return err 137 | } 138 | } 139 | 140 | return nil 141 | } 142 | -------------------------------------------------------------------------------- /cmd/kube-httpcache/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "os" 7 | "os/signal" 8 | "syscall" 9 | 10 | "github.com/golang/glog" 11 | "github.com/mittwald/kube-httpcache/cmd/kube-httpcache/internal" 12 | "github.com/mittwald/kube-httpcache/pkg/controller" 13 | "github.com/mittwald/kube-httpcache/pkg/signaller" 14 | "github.com/mittwald/kube-httpcache/pkg/watcher" 15 | "k8s.io/client-go/kubernetes" 16 | "k8s.io/client-go/rest" 17 | "k8s.io/client-go/tools/clientcmd" 18 | ) 19 | 20 | var opts internal.KubeHTTPProxyFlags 21 | 22 | func init() { 23 | flag.Set("logtostderr", "true") 24 | } 25 | 26 | func main() { 27 | if err := opts.Parse(); err != nil { 28 | panic(err) 29 | } 30 | 31 | glog.Infof("running kube-httpcache with following options: %+v", opts) 32 | 33 | var config *rest.Config 34 | var err error 35 | var client kubernetes.Interface 36 | 37 | if opts.Kubernetes.Config == "" { 38 | glog.Infof("using in-cluster configuration") 39 | config, err = rest.InClusterConfig() 40 | } else { 41 | glog.Infof("using configuration from '%s'", opts.Kubernetes.Config) 42 | config, err = clientcmd.BuildConfigFromFlags("", opts.Kubernetes.Config) 43 | } 44 | 45 | if err != nil { 46 | panic(err) 47 | } 48 | 49 | client = kubernetes.NewForConfigOrDie(config) 50 | ctx, cancel := context.WithCancel(context.Background()) 51 | 52 | var frontendUpdates chan *watcher.EndpointConfig 53 | var frontendErrors chan error 54 | if opts.Frontend.Watch { 55 | frontendWatcher := watcher.NewEndpointWatcher( 56 | client, 57 | opts.Frontend.Namespace, 58 | opts.Frontend.Service, 59 | opts.Frontend.PortName, 60 | opts.Kubernetes.RetryBackoff, 61 | ) 62 | frontendUpdates, frontendErrors = frontendWatcher.Run(ctx) 63 | } 64 | 65 | var backendUpdates chan *watcher.EndpointConfig 66 | var backendErrors chan error 67 | if opts.Backend.Watch { 68 | backendWatcher := watcher.NewEndpointWatcher( 69 | client, 70 | opts.Backend.Namespace, 71 | opts.Backend.Service, 72 | opts.Backend.PortName, 73 | opts.Kubernetes.RetryBackoff, 74 | ) 75 | backendUpdates, backendErrors = backendWatcher.Run(ctx) 76 | } 77 | 78 | templateWatcher := watcher.MustNewTemplateWatcher(opts.Varnish.VCLTemplate, opts.Varnish.VCLTemplatePoll) 79 | templateUpdates, templateErrors := templateWatcher.Run() 80 | 81 | var varnishSignaller *signaller.Signaller 82 | var varnishSignallerErrors chan error 83 | if opts.Signaller.Enable { 84 | varnishSignaller = signaller.NewSignaller( 85 | opts.Signaller.Address, 86 | opts.Signaller.Port, 87 | opts.Signaller.WorkersCount, 88 | opts.Signaller.MaxRetries, 89 | opts.Signaller.RetryBackoff, 90 | opts.Signaller.QueueLength, 91 | opts.Signaller.MaxConnsPerHost, 92 | opts.Signaller.MaxIdleConns, 93 | opts.Signaller.MaxIdleConnsPerHost, 94 | opts.Signaller.UpstreamRequestTimeout, 95 | ) 96 | varnishSignallerErrors = varnishSignaller.GetErrors() 97 | 98 | go func() { 99 | err = varnishSignaller.Run() 100 | if err != nil { 101 | panic(err) 102 | } 103 | }() 104 | } 105 | 106 | go func() { 107 | for { 108 | select { 109 | case err := <-frontendErrors: 110 | glog.Errorf("error while watching frontends: %s", err.Error()) 111 | case err := <-backendErrors: 112 | glog.Errorf("error while watching backends: %s", err.Error()) 113 | case err := <-templateErrors: 114 | glog.Errorf("error while watching template changes: %s", err.Error()) 115 | case err := <-varnishSignallerErrors: 116 | glog.Errorf("error while running varnish signaller: %s", err.Error()) 117 | } 118 | } 119 | }() 120 | 121 | varnishController, err := controller.NewVarnishController( 122 | opts.Varnish.SecretFile, 123 | opts.Varnish.Storage, 124 | opts.Varnish.TransientStorage, 125 | opts.Varnish.AdditionalParameters, 126 | opts.Varnish.WorkingDir, 127 | opts.Frontend.Address, 128 | opts.Frontend.Port, 129 | opts.Admin.Address, 130 | opts.Admin.Port, 131 | frontendUpdates, 132 | backendUpdates, 133 | templateUpdates, 134 | varnishSignaller, 135 | opts.Varnish.VCLTemplate, 136 | ) 137 | if err != nil { 138 | panic(err) 139 | } 140 | 141 | signals := make(chan os.Signal, 1) 142 | 143 | signal.Notify(signals, syscall.SIGINT) 144 | signal.Notify(signals, syscall.SIGTERM) 145 | 146 | go func() { 147 | s := <-signals 148 | 149 | glog.Infof("received signal %s", s) 150 | cancel() 151 | }() 152 | 153 | err = varnishController.Run(ctx) 154 | if err != nil { 155 | panic(err) 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /deploy/kubernetes/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: kube-httpcache 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - endpoints 10 | - pods 11 | verbs: 12 | - watch 13 | - get -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/mittwald/kube-httpcache 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/fsnotify/fsnotify v1.7.0 7 | github.com/golang/glog v1.2.1 8 | github.com/martin-helmich/go-varnish-client v0.3.1 9 | github.com/prometheus/client_golang v1.19.1 10 | k8s.io/api v0.29.4 11 | k8s.io/apimachinery v0.29.4 12 | k8s.io/client-go v0.29.4 13 | ) 14 | 15 | require ( 16 | github.com/beorn7/perks v1.0.1 // indirect 17 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 18 | github.com/davecgh/go-spew v1.1.1 // indirect 19 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 20 | github.com/go-logr/logr v1.3.0 // indirect 21 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 22 | github.com/go-openapi/jsonreference v0.20.2 // indirect 23 | github.com/go-openapi/swag v0.22.3 // indirect 24 | github.com/gogo/protobuf v1.3.2 // indirect 25 | github.com/golang/protobuf v1.5.4 // indirect 26 | github.com/google/gnostic-models v0.6.8 // indirect 27 | github.com/google/gofuzz v1.2.0 // indirect 28 | github.com/google/uuid v1.3.0 // indirect 29 | github.com/imdario/mergo v0.3.6 // indirect 30 | github.com/josharian/intern v1.0.0 // indirect 31 | github.com/json-iterator/go v1.1.12 // indirect 32 | github.com/mailru/easyjson v0.7.7 // indirect 33 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 34 | github.com/modern-go/reflect2 v1.0.2 // indirect 35 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 36 | github.com/prometheus/client_model v0.5.0 // indirect 37 | github.com/prometheus/common v0.48.0 // indirect 38 | github.com/prometheus/procfs v0.12.0 // indirect 39 | github.com/spf13/pflag v1.0.5 // indirect 40 | golang.org/x/net v0.23.0 // indirect 41 | golang.org/x/oauth2 v0.16.0 // indirect 42 | golang.org/x/sys v0.18.0 // indirect 43 | golang.org/x/term v0.18.0 // indirect 44 | golang.org/x/text v0.14.0 // indirect 45 | golang.org/x/time v0.3.0 // indirect 46 | google.golang.org/appengine v1.6.7 // indirect 47 | google.golang.org/protobuf v1.33.0 // indirect 48 | gopkg.in/inf.v0 v0.9.1 // indirect 49 | gopkg.in/yaml.v2 v2.4.0 // indirect 50 | gopkg.in/yaml.v3 v3.0.1 // indirect 51 | k8s.io/klog/v2 v2.110.1 // indirect 52 | k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect 53 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect 54 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 55 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 56 | sigs.k8s.io/yaml v1.3.0 // indirect 57 | ) 58 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 2 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 3 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 4 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 5 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 6 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= 10 | github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= 11 | github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= 12 | github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= 13 | github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= 14 | github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 15 | github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= 16 | github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= 17 | github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= 18 | github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= 19 | github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= 20 | github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= 21 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= 22 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 23 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 24 | github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= 25 | github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= 26 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 27 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 28 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 29 | github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= 30 | github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= 31 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 32 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 33 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 34 | github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= 35 | github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 36 | github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= 37 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= 38 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 39 | github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= 40 | github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= 41 | github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= 42 | github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= 43 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 44 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 45 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 46 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 47 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 48 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 49 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 50 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 51 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 52 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 53 | github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= 54 | github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= 55 | github.com/martin-helmich/go-varnish-client v0.3.1 h1:d3HL/wNRQ/dKm3sA/PgjbyHjX18QSDAIkVpKRgrQCj0= 56 | github.com/martin-helmich/go-varnish-client v0.3.1/go.mod h1:TSeYD0BPAFHLpYrCH2sYcF6T14yh4QlyuYsMwLBqqes= 57 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 58 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 59 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 60 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 61 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 62 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 63 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 64 | github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= 65 | github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= 66 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 67 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 68 | github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= 69 | github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= 70 | github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= 71 | github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= 72 | github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= 73 | github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= 74 | github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= 75 | github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= 76 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 77 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 78 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 79 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 80 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 81 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 82 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 83 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 84 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 85 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 86 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 87 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 88 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 89 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 90 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 91 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 92 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 93 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 94 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 95 | golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= 96 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 97 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 98 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 99 | golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= 100 | golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= 101 | golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= 102 | golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= 103 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 104 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 105 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 106 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 107 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 108 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 109 | golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= 110 | golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 111 | golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= 112 | golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= 113 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 114 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 115 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 116 | golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= 117 | golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= 118 | golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= 119 | golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 120 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 121 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 122 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 123 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 124 | golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= 125 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 126 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 127 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 128 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 129 | google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= 130 | google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= 131 | google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= 132 | google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= 133 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 134 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 135 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 136 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 137 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 138 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 139 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 140 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 141 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 142 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 143 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 144 | k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= 145 | k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= 146 | k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= 147 | k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= 148 | k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= 149 | k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= 150 | k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= 151 | k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= 152 | k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= 153 | k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= 154 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= 155 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= 156 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= 157 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= 158 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= 159 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= 160 | sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= 161 | sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= 162 | -------------------------------------------------------------------------------- /pkg/controller/run.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "os/exec" 8 | "strings" 9 | 10 | "github.com/golang/glog" 11 | "github.com/mittwald/kube-httpcache/pkg/watcher" 12 | ) 13 | 14 | func (v *VarnishController) Run(ctx context.Context) error { 15 | glog.Infof("waiting for initial configuration before starting Varnish") 16 | 17 | v.frontend = watcher.NewEndpointConfig() 18 | if v.frontendUpdates != nil { 19 | v.frontend = <-v.frontendUpdates 20 | if v.varnishSignaller != nil { 21 | v.varnishSignaller.SetEndpoints(v.frontend) 22 | } 23 | } 24 | 25 | v.backend = watcher.NewEndpointConfig() 26 | if v.backendUpdates != nil { 27 | v.backend = <-v.backendUpdates 28 | } 29 | 30 | target, err := os.Create(v.configFile) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | glog.Infof("creating initial VCL config") 36 | err = v.renderVCL(target, v.frontend.Endpoints, v.frontend.Primary, v.backend.Endpoints, v.backend.Primary) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | cmd, errChan := v.startVarnish(ctx) 42 | 43 | if err := v.waitForAdminPort(ctx); err != nil { 44 | return err 45 | } 46 | 47 | watchErrors := make(chan error) 48 | go v.watchConfigUpdates(ctx, cmd, watchErrors) 49 | 50 | go func() { 51 | for err := range watchErrors { 52 | if err != nil { 53 | glog.Warningf("error while watching for updates: %s", err.Error()) 54 | } 55 | } 56 | }() 57 | 58 | return <-errChan 59 | } 60 | 61 | func (v *VarnishController) startVarnish(ctx context.Context) (*exec.Cmd, <-chan error) { 62 | c := exec.CommandContext( 63 | ctx, 64 | "varnishd", 65 | v.generateArgs()..., 66 | ) 67 | 68 | c.Dir = "/" 69 | c.Stdout = os.Stdout 70 | c.Stderr = os.Stderr 71 | 72 | r := make(chan error) 73 | 74 | go func() { 75 | err := c.Run() 76 | r <- err 77 | }() 78 | 79 | return c, r 80 | } 81 | 82 | func (v *VarnishController) generateArgs() []string { 83 | args := []string{ 84 | "-F", 85 | "-f", v.configFile, 86 | "-S", v.SecretFile, 87 | "-s", fmt.Sprintf("Cache=%s", v.Storage), 88 | "-s", fmt.Sprintf("Transient=%s", v.TransientStorage), 89 | "-a", fmt.Sprintf("%s:%d", v.FrontendAddr, v.FrontendPort), 90 | "-T", fmt.Sprintf("%s:%d", v.AdminAddr, v.AdminPort), 91 | } 92 | 93 | if v.AdditionalParameters != "" { 94 | for _, val := range strings.Split(v.AdditionalParameters, ",") { 95 | args = append(args, "-p") 96 | args = append(args, val) 97 | } 98 | } 99 | 100 | if v.WorkingDir != "" { 101 | args = append(args, "-n", v.WorkingDir) 102 | } 103 | 104 | return args 105 | } 106 | -------------------------------------------------------------------------------- /pkg/controller/types.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "github.com/golang/glog" 5 | "io" 6 | "os" 7 | "strings" 8 | "text/template" 9 | 10 | "github.com/mittwald/kube-httpcache/pkg/signaller" 11 | "github.com/mittwald/kube-httpcache/pkg/watcher" 12 | ) 13 | 14 | type TemplateData struct { 15 | Frontends watcher.EndpointList 16 | PrimaryFrontend *watcher.Endpoint 17 | Backends watcher.EndpointList 18 | PrimaryBackend *watcher.Endpoint 19 | Env map[string]string 20 | } 21 | 22 | type VarnishController struct { 23 | SecretFile string 24 | Storage string 25 | TransientStorage string 26 | AdditionalParameters string 27 | WorkingDir string 28 | FrontendAddr string 29 | FrontendPort int 30 | AdminAddr string 31 | AdminPort int 32 | 33 | vclTemplate *template.Template 34 | // md5 hash of unparsed template 35 | vclTemplateHash string 36 | vclTemplateUpdates chan []byte 37 | frontendUpdates chan *watcher.EndpointConfig 38 | frontend *watcher.EndpointConfig 39 | backendUpdates chan *watcher.EndpointConfig 40 | backend *watcher.EndpointConfig 41 | varnishSignaller *signaller.Signaller 42 | configFile string 43 | localAdminAddr string 44 | currentVCLName string 45 | } 46 | 47 | func NewVarnishController( 48 | secretFile string, 49 | storage string, 50 | transientStorage string, 51 | additionalParameter string, 52 | workingDir string, 53 | frontendAddr string, 54 | frontendPort int, 55 | adminAddr string, 56 | adminPort int, 57 | frontendUpdates chan *watcher.EndpointConfig, 58 | backendUpdates chan *watcher.EndpointConfig, 59 | templateUpdates chan []byte, 60 | varnishSignaller *signaller.Signaller, 61 | vclTemplateFile string, 62 | ) (*VarnishController, error) { 63 | contents, err := os.ReadFile(vclTemplateFile) 64 | if err != nil { 65 | return nil, err 66 | } 67 | 68 | v := VarnishController{ 69 | SecretFile: secretFile, 70 | Storage: storage, 71 | TransientStorage: transientStorage, 72 | AdditionalParameters: additionalParameter, 73 | WorkingDir: workingDir, 74 | FrontendAddr: frontendAddr, 75 | FrontendPort: frontendPort, 76 | AdminAddr: adminAddr, 77 | AdminPort: adminPort, 78 | vclTemplateUpdates: templateUpdates, 79 | frontendUpdates: frontendUpdates, 80 | backendUpdates: backendUpdates, 81 | varnishSignaller: varnishSignaller, 82 | configFile: "/tmp/vcl", 83 | } 84 | err = v.setTemplate(contents) 85 | if err != nil { 86 | return nil, err 87 | } 88 | 89 | return &v, nil 90 | } 91 | 92 | func getEnvironment() map[string]string { 93 | items := make(map[string]string) 94 | for _, e := range os.Environ() { 95 | pair := strings.SplitN(e, "=", 2) 96 | items[pair[0]] = pair[1] 97 | } 98 | return items 99 | } 100 | 101 | func (v *VarnishController) renderVCL(target io.Writer, frontendList watcher.EndpointList, primaryFrontend *watcher.Endpoint, backendList watcher.EndpointList, primaryBackend *watcher.Endpoint) error { 102 | glog.V(6).Infof("rendering VCL (source md5sum: %s, Frontends:%v, PrimaryFrontend:%v, Backends:%v, PrimaryBackend:%v)", 103 | v.vclTemplateHash, frontendList, primaryFrontend, backendList, primaryBackend) 104 | 105 | err := v.vclTemplate.Execute(target, &TemplateData{ 106 | Frontends: frontendList, 107 | PrimaryFrontend: primaryFrontend, 108 | Backends: backendList, 109 | PrimaryBackend: primaryBackend, 110 | Env: getEnvironment(), 111 | }) 112 | 113 | return err 114 | } 115 | -------------------------------------------------------------------------------- /pkg/controller/wait.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/golang/glog" 7 | "github.com/martin-helmich/go-varnish-client" 8 | "time" 9 | ) 10 | 11 | func (v *VarnishController) waitForAdminPort(ctx context.Context) error { 12 | glog.Infof("probing admin port until it is available") 13 | addr := fmt.Sprintf("127.0.0.1:%d", v.AdminPort) 14 | 15 | t := time.NewTicker(time.Second) 16 | defer t.Stop() 17 | 18 | for { 19 | select { 20 | case <-t.C: 21 | _, err := varnishclient.DialTCP(ctx, addr) 22 | if err == nil { 23 | glog.Infof("admin port is available") 24 | return nil 25 | } 26 | 27 | glog.V(6).Infof("admin port is not available yet. waiting") 28 | case <-ctx.Done(): 29 | return ctx.Err() 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /pkg/controller/watch.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/md5" 7 | "encoding/hex" 8 | "fmt" 9 | "os" 10 | "os/exec" 11 | "sort" 12 | "strconv" 13 | "strings" 14 | "text/template" 15 | "time" 16 | 17 | "github.com/golang/glog" 18 | varnishclient "github.com/martin-helmich/go-varnish-client" 19 | ) 20 | 21 | func (v *VarnishController) watchConfigUpdates(ctx context.Context, c *exec.Cmd, errors chan<- error) { 22 | for { 23 | select { 24 | case tmplContents := <-v.vclTemplateUpdates: 25 | glog.Infof("VCL template has been updated") 26 | 27 | err := v.setTemplate(tmplContents) 28 | if err != nil { 29 | errors <- err 30 | continue 31 | } 32 | errors <- v.rebuildConfig(ctx) 33 | 34 | case newConfig := <-v.frontendUpdates: 35 | glog.Infof("received new frontend configuration: %+v", newConfig) 36 | 37 | v.frontend = newConfig 38 | 39 | if v.varnishSignaller != nil { 40 | v.varnishSignaller.SetEndpoints(v.frontend) 41 | } 42 | 43 | errors <- v.rebuildConfig(ctx) 44 | 45 | case newConfig := <-v.backendUpdates: 46 | glog.Infof("received new backend configuration: %+v", newConfig) 47 | 48 | v.backend = newConfig 49 | 50 | errors <- v.rebuildConfig(ctx) 51 | 52 | case <-ctx.Done(): 53 | errors <- ctx.Err() 54 | return 55 | } 56 | } 57 | } 58 | 59 | func (v *VarnishController) setTemplate(tmplContents []byte) error { 60 | parsedTemplate, err := template.New("vcl").Parse(string(tmplContents)) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | v.vclTemplate = parsedTemplate 66 | hash := md5.Sum(tmplContents) 67 | hashStr := hex.EncodeToString(hash[:]) 68 | v.vclTemplateHash = hashStr 69 | 70 | return nil 71 | } 72 | 73 | func (v *VarnishController) rebuildConfig(ctx context.Context) error { 74 | buf := new(bytes.Buffer) 75 | 76 | err := v.renderVCL(buf, v.frontend.Endpoints, v.frontend.Primary, v.backend.Endpoints, v.backend.Primary) 77 | if err != nil { 78 | return err 79 | } 80 | 81 | vcl := buf.Bytes() 82 | glog.V(8).Infof("new VCL: %s", string(vcl)) 83 | 84 | client, err := varnishclient.DialTCP(ctx, fmt.Sprintf("127.0.0.1:%d", v.AdminPort)) 85 | if err != nil { 86 | return err 87 | } 88 | 89 | secret, err := os.ReadFile(v.SecretFile) 90 | if err != nil { 91 | return err 92 | } 93 | 94 | err = client.Authenticate(ctx, secret) 95 | if err != nil { 96 | return err 97 | } 98 | 99 | maxVclParam, err := client.GetParameter(ctx, "max_vcl") 100 | if err != nil { 101 | return err 102 | } 103 | 104 | maxVcl, err := strconv.Atoi(maxVclParam.Value) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | loadedVcl, err := client.ListVCL(ctx) 110 | if err != nil { 111 | return err 112 | } 113 | 114 | availableVcl := make([]varnishclient.VCLConfig, 0) 115 | 116 | for i := range loadedVcl { 117 | if loadedVcl[i].Status == varnishclient.VCLAvailable { 118 | availableVcl = append(availableVcl, loadedVcl[i]) 119 | } 120 | } 121 | 122 | if len(loadedVcl) >= maxVcl { 123 | // we're abusing the fact that "boot" < "reload" 124 | sort.Slice(availableVcl, func(i, j int) bool { 125 | return availableVcl[i].Name < availableVcl[j].Name 126 | }) 127 | 128 | for i := 0; i < len(loadedVcl)-maxVcl+1; i++ { 129 | glog.V(6).Infof("discarding VCL: %s", availableVcl[i].Name) 130 | 131 | err = client.DiscardVCL(ctx, availableVcl[i].Name) 132 | if err != nil { 133 | return err 134 | } 135 | } 136 | } 137 | 138 | configname := strings.ReplaceAll(time.Now().Format("reload_20060102_150405.00000"), ".", "_") 139 | 140 | glog.V(6).Infof("about to create new VCL: %s", string(configname)) 141 | err = client.DefineInlineVCL(ctx, configname, vcl, varnishclient.VCLStateAuto) 142 | if err != nil { 143 | return err 144 | } 145 | 146 | err = client.UseVCL(ctx, configname) 147 | if err != nil { 148 | return err 149 | } 150 | glog.V(6).Infof("activated new VCL: %s", string(configname)) 151 | 152 | if v.currentVCLName == "" { 153 | v.currentVCLName = "boot" 154 | } 155 | 156 | if err := client.SetVCLState(ctx, v.currentVCLName, varnishclient.VCLStateCold); err != nil { 157 | glog.V(1).Infof("error while changing state of VCL %s: %s", v.currentVCLName, err) 158 | } 159 | glog.V(6).Infof("deactivated old VCL: %s", string(v.currentVCLName)) 160 | 161 | v.currentVCLName = configname 162 | 163 | return nil 164 | } 165 | -------------------------------------------------------------------------------- /pkg/signaller/run.go: -------------------------------------------------------------------------------- 1 | package signaller 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "strconv" 9 | "time" 10 | 11 | "github.com/golang/glog" 12 | "github.com/mittwald/kube-httpcache/pkg/watcher" 13 | 14 | "github.com/prometheus/client_golang/prometheus" 15 | "github.com/prometheus/client_golang/prometheus/promauto" 16 | "github.com/prometheus/client_golang/prometheus/promhttp" 17 | ) 18 | 19 | var ( 20 | signallerRequestsTotal = promauto.NewCounter(prometheus.CounterOpts{ 21 | Name: "kube_httpcache_signaller_requests_total", 22 | Help: "The total number of incoming requests to Signaller", 23 | }) 24 | 25 | signallerErrorsTotal = promauto.NewCounter(prometheus.CounterOpts{ 26 | Name: "kube_httpcache_signaller_errors_total", 27 | Help: "The total number of errors for incomming requests to Signaller", 28 | }) 29 | 30 | signallerResponseTime = promauto.NewSummary(prometheus.SummaryOpts{ 31 | Name: "kube_httpcache_signaller_durations_seconds", 32 | Help: "The Signaller response time", 33 | Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 34 | }) 35 | 36 | signallerUpstreamErrorsTotal = promauto.NewCounter(prometheus.CounterOpts{ 37 | Name: "kube_httpcache_signaller_upstream_errors_total", 38 | Help: "The total number of errors for outgoing requests to upstreams", 39 | }) 40 | 41 | signallerQueueLength = promauto.NewGauge(prometheus.GaugeOpts{ 42 | Name: "kube_httpcache_signaller_queue_length", 43 | Help: "The length of signaller queue", 44 | }) 45 | 46 | signallerQueueLatency = promauto.NewSummary(prometheus.SummaryOpts{ 47 | Name: "kube_httpcache_signaller_queue_latency", 48 | Help: "The Signaller queue latency", 49 | Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 50 | }) 51 | ) 52 | 53 | func (b *Signaller) Run() error { 54 | server := &http.Server{ 55 | Addr: b.Address + ":" + strconv.Itoa(b.Port), 56 | } 57 | 58 | for i := 0; i < b.WorkersCount; i++ { 59 | go b.ProcessSignalQueue() 60 | } 61 | 62 | http.Handle("/metrics", promhttp.Handler()) 63 | http.HandleFunc("/", b.Serve) 64 | 65 | return server.ListenAndServe() 66 | } 67 | 68 | func (b *Signaller) Serve(w http.ResponseWriter, r *http.Request) { 69 | t := time.Now() 70 | 71 | body, err := io.ReadAll(r.Body) 72 | if err != nil { 73 | b.errors <- err 74 | http.Error(w, err.Error(), http.StatusInternalServerError) 75 | 76 | signallerErrorsTotal.Inc() 77 | return 78 | } 79 | 80 | glog.V(5).Infof("received a signal request: %+v", r) 81 | signallerRequestsTotal.Inc() 82 | 83 | b.mutex.RLock() 84 | endpoints := make([]watcher.Endpoint, len(b.endpoints.Endpoints)) 85 | copy(endpoints, b.endpoints.Endpoints) 86 | b.mutex.RUnlock() 87 | 88 | for _, endpoint := range endpoints { 89 | url := fmt.Sprintf("%s://%s:%s%s", b.EndpointScheme, endpoint.Host, endpoint.Port, r.RequestURI) 90 | request, err := http.NewRequest(r.Method, url, bytes.NewReader(body)) 91 | if err != nil { 92 | b.errors <- err 93 | } 94 | request.Header = r.Header.Clone() 95 | request.Host = r.Host 96 | request.Header.Set("X-Forwarded-For", r.RemoteAddr) 97 | signallerQueueLength.Set(float64(len(b.signalQueue))) 98 | tt := time.Now() 99 | 100 | b.signalQueue <- Signal{request, 0} 101 | 102 | signallerQueueLatency.Observe(time.Since(tt).Seconds()) 103 | } 104 | 105 | fmt.Fprintf(w, "Signal request is being broadcasted.") 106 | signallerResponseTime.Observe(time.Since(t).Seconds()) 107 | } 108 | 109 | func (b *Signaller) ProcessSignalQueue() { 110 | client := &http.Client{} 111 | transport := http.DefaultTransport.(*http.Transport).Clone() 112 | 113 | if b.MaxConnsPerHost != -1 { 114 | transport.MaxConnsPerHost = b.MaxConnsPerHost 115 | } 116 | 117 | if b.MaxIdleConns != -1 { 118 | transport.MaxIdleConns = b.MaxIdleConns 119 | } 120 | 121 | if b.MaxIdleConnsPerHost != -1 { 122 | transport.MaxIdleConnsPerHost = b.MaxIdleConnsPerHost 123 | } 124 | 125 | client.Transport = transport 126 | 127 | if b.UpstreamRequestTimeout != 0 { 128 | client.Timeout = b.UpstreamRequestTimeout 129 | } 130 | 131 | for signal := range b.signalQueue { 132 | response, err := client.Do(signal.Request) 133 | if err != nil { 134 | glog.Errorf("signal broadcast error: %v", err.Error()) 135 | signallerUpstreamErrorsTotal.Inc() 136 | b.Retry(signal) 137 | } else if response.StatusCode >= 400 && response.StatusCode <= 599 { 138 | glog.Warningf("signal broadcast error: unusual status code from %s: %v", response.Request.URL.Host, response.Status) 139 | signallerUpstreamErrorsTotal.Inc() 140 | b.Retry(signal) 141 | } else { 142 | glog.V(5).Infof("received a signal response from %s: %+v", response.Request.URL.Host, response) 143 | } 144 | 145 | if response != nil { 146 | if _, err := io.Copy(io.Discard, response.Body); err != nil { 147 | glog.Error("error on discarding response body for connection reuse:", err) 148 | } 149 | 150 | if err := response.Body.Close(); err != nil { 151 | glog.Error("error on closing response body:", err) 152 | } 153 | } 154 | } 155 | } 156 | 157 | func (b *Signaller) Retry(signal Signal) { 158 | signal.Attempt++ 159 | if signal.Attempt < b.MaxRetries { 160 | go func() { 161 | glog.Infof("retrying in %v", b.RetryBackoff) 162 | time.Sleep(b.RetryBackoff) 163 | b.signalQueue <- signal 164 | }() 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /pkg/signaller/types.go: -------------------------------------------------------------------------------- 1 | package signaller 2 | 3 | import ( 4 | "net/http" 5 | "sync" 6 | "time" 7 | 8 | "github.com/golang/glog" 9 | "github.com/mittwald/kube-httpcache/pkg/watcher" 10 | ) 11 | 12 | type Signal struct { 13 | Request *http.Request 14 | Attempt int 15 | } 16 | 17 | type Signaller struct { 18 | Address string 19 | Port int 20 | WorkersCount int 21 | MaxRetries int 22 | RetryBackoff time.Duration 23 | MaxConnsPerHost int 24 | MaxIdleConns int 25 | MaxIdleConnsPerHost int 26 | UpstreamRequestTimeout time.Duration 27 | EndpointScheme string 28 | endpoints *watcher.EndpointConfig 29 | signalQueue chan Signal 30 | errors chan error 31 | mutex sync.RWMutex 32 | } 33 | 34 | func NewSignaller( 35 | address string, 36 | port int, 37 | workersCount int, 38 | maxRetries int, 39 | retryBackoff time.Duration, 40 | queueLength int, 41 | maxConnsPerHost int, 42 | maxIdleConns int, 43 | maxIdleConnsPerHost int, 44 | upstreamRequestTimeout time.Duration, 45 | ) *Signaller { 46 | if queueLength < 0 { 47 | queueLength = 0 48 | glog.Warningf("signaller processing queue cannot have a negative length, falling back to default value: %d", queueLength) 49 | } 50 | 51 | return &Signaller{ 52 | Address: address, 53 | Port: port, 54 | WorkersCount: workersCount, 55 | MaxRetries: maxRetries, 56 | RetryBackoff: retryBackoff, 57 | MaxConnsPerHost: maxConnsPerHost, 58 | MaxIdleConns: maxIdleConns, 59 | MaxIdleConnsPerHost: maxIdleConnsPerHost, 60 | UpstreamRequestTimeout: upstreamRequestTimeout, 61 | EndpointScheme: "http", 62 | endpoints: watcher.NewEndpointConfig(), 63 | signalQueue: make(chan Signal, queueLength), 64 | errors: make(chan error), 65 | } 66 | } 67 | 68 | func (b *Signaller) GetErrors() chan error { 69 | return b.errors 70 | } 71 | 72 | func (b *Signaller) SetEndpoints(e *watcher.EndpointConfig) { 73 | b.mutex.Lock() 74 | defer b.mutex.Unlock() 75 | 76 | b.endpoints = e 77 | } 78 | -------------------------------------------------------------------------------- /pkg/watcher/endpoints.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | v1 "k8s.io/api/core/v1" 8 | ) 9 | 10 | type EndpointProbe struct { 11 | URL string 12 | Interval int 13 | Timeout int 14 | Window int 15 | Threshold int 16 | } 17 | 18 | type Endpoint struct { 19 | Name string 20 | Host string 21 | Port string 22 | Probe *EndpointProbe 23 | } 24 | 25 | type EndpointList []Endpoint 26 | 27 | func (l EndpointList) EqualsEndpoints(ep v1.EndpointSubset) bool { 28 | if len(l) != len(ep.Addresses) { 29 | return false 30 | } 31 | 32 | matchingAddresses := map[string]bool{} 33 | for i := range l { 34 | matchingAddresses[l[i].Host] = true 35 | } 36 | 37 | for i := range ep.Addresses { 38 | h := ep.Addresses[i].IP 39 | _, ok := matchingAddresses[h] 40 | if !ok { 41 | return false 42 | } 43 | } 44 | 45 | return true 46 | } 47 | 48 | func (l EndpointList) Contains(b *Endpoint) bool { 49 | if b == nil { 50 | return false 51 | } 52 | 53 | for i := range l { 54 | if l[i].Host == b.Host && l[i].Port == b.Port { 55 | return true 56 | } 57 | } 58 | 59 | return false 60 | } 61 | 62 | func EndpointListFromSubset(ep v1.EndpointSubset, portName string) (EndpointList, error) { 63 | var port int32 64 | 65 | l := make(EndpointList, len(ep.Addresses)) 66 | 67 | for i := range ep.Ports { 68 | if ep.Ports[i].Name == portName { 69 | port = ep.Ports[i].Port 70 | } 71 | } 72 | 73 | if port == 0 { 74 | return nil, fmt.Errorf("port '%s' not found in endpoint list", portName) 75 | } 76 | 77 | for i := range ep.Addresses { 78 | a := &ep.Addresses[i] 79 | 80 | if a.TargetRef != nil { 81 | l[i].Name = a.TargetRef.Name 82 | } 83 | 84 | l[i].Host = a.IP 85 | l[i].Port = strconv.Itoa(int(port)) 86 | } 87 | 88 | return l, nil 89 | } 90 | 91 | // EndpointSubsetIndex returns the index of EndpointSubset for which port.Name 92 | // matches provided portName 93 | func EndpointSubsetIndex(es []v1.EndpointSubset, portName string) (int, error) { 94 | for i, set := range es { 95 | for _, port := range set.Ports { 96 | if port.Name == portName { 97 | return i, nil 98 | } 99 | } 100 | } 101 | 102 | return -1, fmt.Errorf("port %q is not found in endpoint subsets", portName) 103 | } 104 | -------------------------------------------------------------------------------- /pkg/watcher/endpoints_watch.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/golang/glog" 8 | v1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/fields" 11 | "k8s.io/apimachinery/pkg/watch" 12 | ) 13 | 14 | func (v *EndpointWatcher) Run(ctx context.Context) (chan *EndpointConfig, chan error) { 15 | updates := make(chan *EndpointConfig) 16 | errors := make(chan error) 17 | 18 | go v.watch(ctx, updates, errors) 19 | 20 | return updates, errors 21 | } 22 | 23 | func (v *EndpointWatcher) watch(ctx context.Context, updates chan *EndpointConfig, errors chan error) { 24 | for { 25 | w, err := v.client.CoreV1().Endpoints(v.namespace).Watch(ctx, metav1.ListOptions{ 26 | FieldSelector: fields.OneTermEqualSelector("metadata.name", v.serviceName).String(), 27 | }) 28 | 29 | if err != nil { 30 | glog.Errorf("error while establishing watch: %s", err.Error()) 31 | glog.Infof("retrying after %s", v.retryBackoff.String()) 32 | 33 | time.Sleep(v.retryBackoff) 34 | continue 35 | } 36 | 37 | c := w.ResultChan() 38 | for ev := range c { 39 | if ev.Type == watch.Error { 40 | glog.Warningf("error while watching: %+v", ev.Object) 41 | continue 42 | } 43 | 44 | if ev.Type != watch.Added && ev.Type != watch.Modified { 45 | continue 46 | } 47 | 48 | endpoint := ev.Object.(*v1.Endpoints) 49 | 50 | if len(endpoint.Subsets) == 0 { 51 | glog.Warningf("service '%s' has no endpoints", v.serviceName) 52 | 53 | v.endpointConfig = NewEndpointConfig() 54 | 55 | continue 56 | } 57 | 58 | endpointSubsetIndex, err := EndpointSubsetIndex(endpoint.Subsets, v.portName) 59 | if err != nil { 60 | glog.Warning(err) 61 | v.endpointConfig = NewEndpointConfig() 62 | 63 | continue 64 | } 65 | 66 | if len(endpoint.Subsets[endpointSubsetIndex].Addresses) == 0 { 67 | glog.Warningf("service '%s' has no endpoints", v.serviceName) 68 | v.endpointConfig = NewEndpointConfig() 69 | 70 | continue 71 | } 72 | 73 | if v.endpointConfig.Endpoints.EqualsEndpoints(endpoint.Subsets[endpointSubsetIndex]) { 74 | glog.Infof("endpoints did not change") 75 | continue 76 | } 77 | 78 | var addresses []v1.EndpointAddress 79 | for _, a := range endpoint.Subsets[endpointSubsetIndex].Addresses { 80 | puid := string(a.TargetRef.UID) 81 | 82 | po, err := v.client.CoreV1().Pods(v.namespace).Get(ctx, a.TargetRef.Name, metav1.GetOptions{}) 83 | 84 | if err != nil { 85 | glog.Errorf("error while locating endpoint : %s", err.Error()) 86 | continue 87 | } 88 | 89 | if len(po.Status.Conditions) > 0 && po.Status.Conditions[0].Status != v1.ConditionTrue { 90 | glog.Infof("skipping endpoint (not healthy): %s", puid) 91 | continue 92 | } 93 | 94 | addresses = append(addresses, a) 95 | } 96 | 97 | if len(addresses) == 0 { 98 | glog.Warningf("service '%s' has no endpoint that is ready", v.serviceName) 99 | v.endpointConfig = NewEndpointConfig() 100 | continue 101 | } 102 | 103 | endpoint.Subsets[endpointSubsetIndex].Addresses = addresses 104 | 105 | newConfig := NewEndpointConfig() 106 | 107 | newBackendList, err := EndpointListFromSubset(endpoint.Subsets[endpointSubsetIndex], v.portName) 108 | if err != nil { 109 | glog.Errorf("error while building backend list: %s", err.Error()) 110 | continue 111 | } 112 | 113 | if v.endpointConfig.Primary != nil && newBackendList.Contains(v.endpointConfig.Primary) { 114 | newConfig.Primary = v.endpointConfig.Primary 115 | } else { 116 | newConfig.Primary = &newBackendList[0] 117 | } 118 | 119 | newConfig.Endpoints = newBackendList 120 | 121 | v.endpointConfig = newConfig 122 | updates <- newConfig 123 | } 124 | 125 | glog.V(5).Info("watch has ended. starting new watch") 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /pkg/watcher/template_watch.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "github.com/fsnotify/fsnotify" 5 | "github.com/golang/glog" 6 | "os" 7 | ) 8 | 9 | func (t *fsnotifyTemplateWatcher) Run() (chan []byte, chan error) { 10 | updates := make(chan []byte) 11 | errors := make(chan error) 12 | 13 | go t.watch(updates, errors) 14 | 15 | return updates, errors 16 | } 17 | 18 | func (t *fsnotifyTemplateWatcher) watch(updates chan []byte, errors chan error) { 19 | for ev := range t.watcher.Events { 20 | if ev.Op&(fsnotify.Write|fsnotify.Create) > 0 { 21 | glog.V(6).Infof("observed %s event on %s", ev.String(), ev.Name) 22 | 23 | content, err := os.ReadFile(t.filename) 24 | if err != nil { 25 | glog.Warningf("error while reading file %s: %s", t.filename, err.Error()) 26 | 27 | errors <- err 28 | continue 29 | } 30 | 31 | updates <- content 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /pkg/watcher/template_watch_poll.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "crypto/md5" 5 | "encoding/hex" 6 | "github.com/golang/glog" 7 | "os" 8 | "time" 9 | ) 10 | 11 | const ( 12 | // how often to check for template file changes 13 | POLL_INTERVAL = 15 * time.Second 14 | // how often print template info for troubleshooting 15 | TIMESTAMP_DISPLAY_INTERVAL = 1 * time.Hour 16 | ) 17 | 18 | func (t *pollingTemplateWatcher) Run() (chan []byte, chan error) { 19 | updates := make(chan []byte) 20 | errors := make(chan error) 21 | 22 | go t.watch(updates, errors) 23 | 24 | return updates, errors 25 | } 26 | 27 | func (t *pollingTemplateWatcher) watch(updates chan []byte, errors chan error) { 28 | stat, err := os.Stat(t.filename) 29 | if err != nil { 30 | errors <- err 31 | } 32 | 33 | t.lastObservedTimestamp = stat.ModTime() 34 | glog.V(6).Infof("observed modification time on %s (%s)", t.filename, t.lastObservedTimestamp.String()) 35 | 36 | var i uint64 = 0 37 | logTemplateInfoCount := uint64(TIMESTAMP_DISPLAY_INTERVAL / POLL_INTERVAL) 38 | for { 39 | time.Sleep(POLL_INTERVAL) 40 | 41 | stat, err := os.Stat(t.filename) 42 | if err != nil { 43 | errors <- err 44 | continue 45 | } 46 | 47 | modtime := stat.ModTime() 48 | i++ 49 | if glog.V(6) && (i%logTemplateInfoCount == 0) { 50 | logTemplateInfo(t.filename, modtime, errors) 51 | } 52 | 53 | if modtime != t.lastObservedTimestamp { 54 | glog.V(6).Infof("observed new modification time on %s (%s)", t.filename, modtime.String()) 55 | 56 | t.lastObservedTimestamp = modtime 57 | 58 | content, err := os.ReadFile(t.filename) 59 | if err != nil { 60 | glog.Warningf("error while reading file %s: %s", t.filename, err.Error()) 61 | 62 | errors <- err 63 | continue 64 | } 65 | 66 | updates <- content 67 | } 68 | } 69 | } 70 | 71 | // print template info to assist troubleshooting 72 | func logTemplateInfo(filename string, modtime time.Time, errors chan error) { 73 | content, err := os.ReadFile(filename) 74 | if err != nil { 75 | glog.Warningf("error while reading file %s: %s", filename, err.Error()) 76 | errors <- err 77 | return 78 | } 79 | 80 | hash := md5.Sum(content) 81 | hashStr := hex.EncodeToString(hash[:]) 82 | glog.Infof("current template modification time: %s, md5sum: %s", modtime.String(), hashStr) 83 | } 84 | -------------------------------------------------------------------------------- /pkg/watcher/types.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/fsnotify/fsnotify" 7 | 8 | "k8s.io/client-go/kubernetes" 9 | ) 10 | 11 | type EndpointConfig struct { 12 | Endpoints EndpointList 13 | Primary *Endpoint 14 | } 15 | 16 | func NewEndpointConfig() *EndpointConfig { 17 | return &EndpointConfig{ 18 | Endpoints: []Endpoint{}, 19 | Primary: nil, 20 | } 21 | } 22 | 23 | type EndpointWatcher struct { 24 | client kubernetes.Interface 25 | namespace string 26 | serviceName string 27 | portName string 28 | 29 | endpointConfig *EndpointConfig 30 | retryBackoff time.Duration 31 | } 32 | 33 | func NewEndpointWatcher(client kubernetes.Interface, namespace, serviceName, portName string, retryBackoff time.Duration) *EndpointWatcher { 34 | return &EndpointWatcher{ 35 | client: client, 36 | namespace: namespace, 37 | serviceName: serviceName, 38 | portName: portName, 39 | endpointConfig: NewEndpointConfig(), 40 | retryBackoff: retryBackoff, 41 | } 42 | } 43 | 44 | type fsnotifyTemplateWatcher struct { 45 | filename string 46 | watcher *fsnotify.Watcher 47 | } 48 | 49 | type pollingTemplateWatcher struct { 50 | filename string 51 | lastObservedTimestamp time.Time 52 | } 53 | 54 | type TemplateWatcher interface { 55 | Run() (chan []byte, chan error) 56 | } 57 | 58 | func MustNewTemplateWatcher(filename string, polling bool) TemplateWatcher { 59 | w, err := NewTemplateWatcher(filename, polling) 60 | if err != nil { 61 | panic(err) 62 | } 63 | 64 | return w 65 | } 66 | 67 | func NewTemplateWatcher(filename string, polling bool) (TemplateWatcher, error) { 68 | if polling { 69 | return &pollingTemplateWatcher{ 70 | filename: filename, 71 | }, nil 72 | } 73 | 74 | watcher, err := fsnotify.NewWatcher() 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | err = watcher.Add(filename) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | return &fsnotifyTemplateWatcher{ 85 | filename: filename, 86 | watcher: watcher, 87 | }, nil 88 | } 89 | -------------------------------------------------------------------------------- /test/test-values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: quay.io/mittwald/kube-httpcache 3 | pullPolicy: Never 4 | tag: "dev" 5 | 6 | cache: 7 | backendService: test-backend 8 | 9 | vclTemplate: | 10 | vcl 4.0; 11 | 12 | import std; 13 | import directors; 14 | 15 | {{ range .Backends }} 16 | backend be-{{ .Name }} { 17 | .host = "{{ .Host }}"; 18 | .port = "{{ .Port }}"; 19 | } 20 | {{- end }} 21 | 22 | sub vcl_init { 23 | new lb = directors.round_robin(); 24 | 25 | {{ range .Backends -}} 26 | lb.add_backend(be-{{ .Name }}); 27 | {{ end }} 28 | } 29 | 30 | sub vcl_recv 31 | { 32 | set req.backend_hint = lb.backend(); 33 | } 34 | -------------------------------------------------------------------------------- /test/test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-backend 5 | labels: 6 | app: test-backend 7 | spec: 8 | replicas: 4 9 | selector: 10 | matchLabels: 11 | app: test-backend 12 | template: 13 | metadata: 14 | labels: 15 | app: test-backend 16 | spec: 17 | containers: 18 | - name: app 19 | image: ealen/echo-server 20 | ports: 21 | - containerPort: 80 22 | name: http 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: test-backend 28 | spec: 29 | selector: 30 | app: test-backend 31 | clusterIP: None 32 | ports: 33 | - port: 80 34 | targetPort: http 35 | name: http 36 | --------------------------------------------------------------------------------