├── .github
└── workflows
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .releaserc
├── Dockerfile
├── LICENSE
├── Makefile
├── PROJECT
├── README.md
├── api
└── v1
│ ├── groupversion_info.go
│ ├── rest_types.go
│ ├── template_types.go
│ └── zz_generated.deepcopy.go
├── config
├── base
│ ├── deploy.yml
│ └── kustomization.yaml
├── certmanager
│ ├── certificate.yaml
│ ├── kustomization.yaml
│ └── kustomizeconfig.yaml
├── crd
│ ├── bases
│ │ ├── templating.flanksource.com_rests.yaml
│ │ └── templating.flanksource.com_templates.yaml
│ ├── kustomization.yaml
│ ├── kustomizeconfig.yaml
│ └── patches
│ │ ├── cainjection_in_templates.yaml
│ │ └── webhook_in_templates.yaml
├── default
│ ├── kustomization.yaml
│ ├── manager_webhook_patch.yaml
│ └── webhookcainjection_patch.yaml
├── deploy
│ ├── crd.yml
│ └── operator.yml
├── manager
│ ├── kustomization.yaml
│ └── manager.yaml
├── prometheus
│ ├── kustomization.yaml
│ └── monitor.yaml
├── rbac
│ ├── auth_proxy_client_clusterrole.yaml
│ ├── auth_proxy_role.yaml
│ ├── auth_proxy_role_binding.yaml
│ ├── auth_proxy_service.yaml
│ ├── kustomization.yaml
│ ├── leader_election_role.yaml
│ ├── leader_election_role_binding.yaml
│ ├── role.yaml
│ ├── role_binding.yaml
│ ├── service_account.yaml
│ ├── template_editor_role.yaml
│ └── template_viewer_role.yaml
├── samples
│ ├── example_ingress.yaml
│ └── templating.flanksource.com_v1_template.yaml
└── webhook
│ ├── kustomization.yaml
│ ├── kustomizeconfig.yaml
│ ├── manifests.yaml
│ └── service.yaml
├── controllers
├── client.go
├── crd_controller.go
├── rest_controller.go
├── suite_test.go
└── template_controller.go
├── docs
├── template-operator-intro-part-1.md
└── template-operator-intro-part-2.md
├── example-public-apis.yaml
├── examples
├── awx-operator.yaml
├── for-each-test.yml
├── for-each.yml
├── git-repository.yaml
├── namespace-request-a.yml
├── namespacerequest.yml
├── postgres-operator.yml
├── postgresqldb.yml
├── rest.yml
├── static-secret.yaml
├── tutorial-crd.yaml
└── when.yaml
├── go.mod
├── go.sum
├── hack
└── boilerplate.go.txt
├── k8s
├── patches.go
├── patches_test.go
├── rest_manager.go
├── schema_cache.go
├── schema_manager.go
├── schema_manager_test.go
├── suite_test.go
├── template_manager.go
├── template_manager_test.go
└── watcher.go
├── main.go
└── test
├── config.yaml
├── e2e.go
├── e2e.sh
├── fixtures
├── awx-operator.yml
├── copy-to-namespace.yml
├── depends-on.yaml
├── git-repository.yaml
└── mockserver.yml
└── patch1.yaml
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Create Release
2 | on:
3 | push:
4 | branches:
5 | - main
6 | - master
7 |
8 | jobs:
9 | semantic-release:
10 | runs-on: ubuntu-latest
11 | outputs:
12 | release-version: ${{ steps.semantic.outputs.release-version }}
13 | new-release-published: ${{ steps.semantic.outputs.new-release-published }}
14 | steps:
15 | - uses: actions/checkout@v2
16 | - uses: codfish/semantic-release-action@v1
17 | id: semantic
18 | env:
19 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
20 |
21 | operator:
22 | needs: semantic-release
23 | runs-on: ubuntu-latest
24 | if: needs.semantic-release.outputs.new-release-published == 'true'
25 | env:
26 | VERSION: v${{ needs.semantic-release.outputs.release-version }}
27 | steps:
28 | - uses: actions/checkout@v2
29 | - name: Build operator
30 | working-directory: ./config/default/
31 | run: |
32 | kustomize edit set image flanksource/template-operator:v${{ needs.semantic-release.outputs.release-version }}
33 | kustomize build . > operator.yml
34 | - name: Upload binaries to release
35 | uses: svenstaro/upload-release-action@v2
36 | with:
37 | repo_token: ${{ secrets.GITHUB_TOKEN }}
38 | file: ./config/default/operator.yml
39 | tag: v${{ needs.semantic-release.outputs.release-version }}
40 | asset_name: operator.yml
41 | overwrite: true
42 |
43 | docker:
44 | needs: semantic-release
45 | runs-on: ubuntu-latest
46 | if: needs.semantic-release.outputs.new-release-published == 'true'
47 | steps:
48 | - uses: actions/checkout@v2
49 | - name: Publish to Registry
50 | uses: elgohr/Publish-Docker-Github-Action@v5
51 | env:
52 | VERSION: v${{ needs.semantic-release.outputs.release-version }}"
53 | with:
54 | name: flanksource/template-operator
55 | username: ${{ secrets.DOCKER_USERNAME }}
56 | password: ${{ secrets.DOCKER_PASSWORD }}
57 | snapshot: true
58 | tags: "latest,v${{ needs.semantic-release.outputs.release-version }}"
59 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | on: [push]
2 | name: Test
3 | jobs:
4 | test:
5 | strategy:
6 | matrix:
7 | go-version: [1.19.x]
8 | platform: [ubuntu-latest]
9 | k8s:
10 | - v1.18.6
11 | - v1.20.7
12 | runs-on: ${{ matrix.platform }}
13 | steps:
14 | - name: Install Go
15 | uses: actions/setup-go@v2
16 | with:
17 | go-version: ${{ matrix.go-version }}
18 | - name: Checkout code
19 | uses: actions/checkout@v2
20 | - name: Test
21 | env:
22 | KUBERNETES_VERSION: ${{matrix.k8s}}
23 | run: ./test/e2e.sh
24 | - name: Export logs
25 | if: always()
26 | run: kind --name kind-kind export logs ./logs
27 | - name: Upload logs
28 | if: always()
29 | uses: actions/upload-artifact@v2
30 | with:
31 | name: log
32 | path: ./logs
33 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | bin/
2 | cover.out
3 | .env
4 | /.bin
5 | /.certs
6 | /karina*
7 |
8 | #intelij project files
9 | .idea/
10 | *.iml
--------------------------------------------------------------------------------
/.releaserc:
--------------------------------------------------------------------------------
1 | plugins:
2 | - - "@semantic-release/commit-analyzer"
3 | - releaseRules:
4 | - { type: doc, scope: README, release: patch }
5 | - { type: fix, release: patch }
6 | - { type: chore, release: patch }
7 | - { type: refactor, release: patch }
8 | - { type: feat, release: minor }
9 | - { type: ci, release: patch }
10 | - { type: style, release: patch }
11 | parserOpts:
12 | noteKeywords:
13 | - MAJOR RELEASE
14 | - "@semantic-release/release-notes-generator"
15 | - - "@semantic-release/github"
16 | - assets:
17 | - path: ./config/default/operator.yml
18 | name: operator.yml
19 | branches:
20 | - main
21 | - master
22 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build the manager binary
2 | FROM golang:1.19 as builder
3 |
4 | WORKDIR /workspace
5 | # Copy the Go Modules manifests
6 | COPY go.mod go.mod
7 | COPY go.sum go.sum
8 | # cache deps before building and copying source so that we don't need to re-download as much
9 | # and so that source changes don't invalidate our downloaded layer
10 | RUN go mod download
11 |
12 | # Copy the go source
13 | COPY main.go main.go
14 | COPY api/ api/
15 | COPY controllers/ controllers/
16 | COPY k8s/ k8s/
17 |
18 | # Build
19 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go
20 |
21 | # Use distroless as minimal base image to package the manager binary
22 | # Refer to https://github.com/GoogleContainerTools/distroless for more details
23 | FROM gcr.io/distroless/static:nonroot
24 | WORKDIR /
25 | COPY --from=builder /workspace/manager .
26 | USER nonroot:nonroot
27 |
28 | ENTRYPOINT ["/manager"]
29 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ifeq ($(VERSION),)
5 | VERSION_TAG=$(shell git describe --abbrev=0 --tags --exact-match 2>/dev/null || echo dev)
6 | else
7 | VERSION_TAG=$(VERSION)
8 | endif
9 |
10 | # Image URL to use all building/pushing image targets
11 | IMG ?= flanksource/template-operator:${VERSION_TAG}
12 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
13 | CRD_OPTIONS ?= "crd:trivialVersions=false"
14 |
15 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
16 | ifeq (,$(shell go env GOBIN))
17 | GOBIN=$(shell go env GOPATH)/bin
18 | else
19 | GOBIN=$(shell go env GOBIN)
20 | endif
21 |
22 | all: manager
23 |
24 | # Run tests
25 | test: generate fmt vet manifests
26 | go test ./... -coverprofile cover.out
27 |
28 | # Build manager binary
29 | # manager: generate fmt vet
30 | manager:
31 | go build -o bin/manager main.go
32 |
33 | .PHONY: linux
34 | linux:
35 | GOOS=linux go build -o bin/manager main.go
36 |
37 | # Run against the configured Kubernetes cluster in ~/.kube/config
38 | run: generate fmt vet manifests
39 | go run ./main.go
40 |
41 | # Install CRDs into a cluster
42 | install: manifests
43 | kustomize build config/crd | kubectl apply -f -
44 |
45 | # Uninstall CRDs from a cluster
46 | uninstall: manifests
47 | kustomize build config/crd | kubectl delete -f -
48 |
49 | # Deploy controller in the configured Kubernetes cluster in ~/.kube/config
50 | deploy: manifests
51 | cd config/manager && kustomize edit set image controller=${IMG}
52 | kustomize build config/default | kubectl apply -f -
53 |
54 | # Generate manifests e.g. CRD, RBAC etc.
55 | manifests: controller-gen .bin/yq
56 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
57 | $(YQ) eval -I2 -i '.spec.versions.0.schema.openAPIV3Schema.properties.spec.properties.resources.items.x-kubernetes-preserve-unknown-fields = true' config/crd/bases/templating.flanksource.com_templates.yaml
58 |
59 | static: manifests
60 | mkdir -p config/deploy
61 | cd config/manager && kustomize edit set image controller=${IMG}
62 | kustomize build config/crd > config/deploy/crd.yml
63 | kustomize build config/default > config/deploy/operator.yml
64 | kustomize build config/base > config/base/deploy.yml
65 |
66 | # Run go fmt against code
67 | fmt:
68 | go fmt ./...
69 |
70 | # Run go vet against code
71 | vet:
72 | go vet ./...
73 |
74 | # Generate code
75 | generate: controller-gen
76 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
77 |
78 | # Build the docker image
79 | docker-build:
80 | docker build . -t ${IMG}
81 |
82 | # Push the docker image
83 | docker-push:
84 | docker push ${IMG}
85 |
86 | # find or download controller-gen
87 | # download controller-gen if necessary
88 | controller-gen:
89 | ifeq (, $(shell which controller-gen))
90 | @{ \
91 | set -e ;\
92 | CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
93 | cd $$CONTROLLER_GEN_TMP_DIR ;\
94 | go mod init tmp ;\
95 | go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.0 ;\
96 | rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
97 | }
98 | CONTROLLER_GEN=$(GOBIN)/controller-gen
99 | else
100 | CONTROLLER_GEN=$(shell which controller-gen)
101 | endif
102 |
103 | OS = $(shell uname -s | tr '[:upper:]' '[:lower:]')
104 | ARCH = $(shell uname -m | sed 's/x86_64/amd64/')
105 |
106 | .bin/yq:
107 | mkdir -p .bin
108 | curl -sSLo .bin/yq https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_$(OS)_$(ARCH) && chmod +x .bin/yq
109 | YQ = $(realpath ./.bin/yq)
110 |
--------------------------------------------------------------------------------
/PROJECT:
--------------------------------------------------------------------------------
1 | domain: flanksource.com
2 | repo: github.com/flanksource/template-operator
3 | resources:
4 | - group: templating.flanksource.com
5 | kind: Template
6 | version: v1
7 | version: "2"
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Template Operator
2 |
3 |
4 | **Simple, reconciliation-based runtime templating**
5 |
6 |
7 | The Template Operator is for platform engineers needing an easy and reliable way to create, copy and update kubernetes resources.
8 |
9 | ## Design principles
10 |
11 | - **100% YAML** – `Templates` are valid YAML and IDE validation and autocomplete of k8s resources works as normal.
12 | - **Simple** – Easy to use and quick to get started.
13 | - **Reconciliation based** – Changes are applied quickly and resiliently (unlike webhooks) at runtime.
14 |
15 | ## Further reading
16 |
17 | This README replicates much of the content from [Simple, reconciliation-based runtime templating](/docs/template-operator-intro-part-1.md).
18 |
19 | For further examples, see part 2 in the series: [Powering up with Custom Resource Definitions (CRDs)](/docs/template-operator-intro-part-2.md).
20 |
21 | ### Alternatives
22 |
23 | There are alternative templating systems in use by the k8s community – each has valid use cases and noting the downsides for runtime templating is not intended as an indictment – all are excellent choices under the right conditions.
24 |
25 |
26 | | Alternative | Downside for templating |
27 | | ------------------------ | :------------------------------------------------------- |
28 | | [crossplane][crossplane] | Complex due to design for infrastructure composition |
29 | | [kyverno][kyverno] | Webhook based
Designed as a policy engine |
30 | | [helm][helm] | Not 100% YAML
Not reconciliation based (build time) |
31 |
32 |
33 | ## Installation
34 |
35 | API documentation available [here](https://pkg.go.dev/github.com/flanksource/template-operator/api/v1).
36 |
37 | ### Prerequisites
38 |
39 | This guide assumes you have either a [kind cluster](https://kind.sigs.k8s.io/docs/user/quick-start/) or [minikube cluster](https://minikube.sigs.k8s.io/docs/start/) running, or have some other way of interacting with a cluster via [kubectl](https://kubernetes.io/docs/tasks/tools/).
40 |
41 | ### Install
42 |
43 | ```bash
44 | export VERSION=0.4.0
45 | # For the latest release version: https://github.com/flanksource/template-operator/releases
46 |
47 | # Apply the operator
48 | kubectl apply -f https://github.com/flanksource/template-operator/releases/download/v${VERSION}/operator.yml
49 | ```
50 |
51 | Run `kubectl get pods -A` and you should see something similar to the following in your terminal output:
52 |
53 | ```bash
54 | NAMESPACE NAME READY
55 | template-operator template-operator-controller-manager-6bd8c5ff58-sz8q6 2/2
56 | ```
57 |
58 | ### Following the logs
59 |
60 | To follow the manager logs, open a new terminal and, changing what needs to be changed, run :
61 |
62 | ```bash
63 | kubectl logs -f --since 10m -n template-operator deploy/template-operator-controller-manager
64 | -c manager
65 | ```
66 |
67 | These logs are where reconciliation successes and errors show up – and the best place to look when debugging.
68 |
69 | ## Use case: Creating resources per namespace
70 |
71 | > *As a platform engineer, I need to quickly provision Namespaces for application teams so that they are able to spin up environments quickly.*
72 |
73 | As organisations grow, platform teams are often tasked with creating `Namespaces` for continuous integration or for development.
74 |
75 | To configure a `Namespace`, platform teams may need to commit or apply many boilerplate objects.
76 |
77 | For this example, suppose you need a set of `Roles` and `RoleBindings` to automatically deploy for a `Namespace` .
78 |
79 | ### Step 1: Adding a namespace and a template
80 |
81 | Add a `Namespace`. You might add this after applying the `Template`, but it's helpful to see that the Template Operator doesn't care when objects are applied – a feature of the reconciliation-based approach. Note the label – this tags the `Namespace` as one that should produce `RoleBindings`.
82 |
83 | ```yaml
84 | cat < *As a platform engineer, I need to automatically copy appropriate Secrets to newly created Namespaces so that application teams have access to the Secrets they need by default.*
201 |
202 | Suppose you have a `Namespace` containing `Secrets` you want to copy to every development `Namespace`.
203 |
204 | ### Step 1: Add secrets and namespace
205 |
206 | Apply the following manifests to set up the `Namespace` with the `Secrets`.
207 |
208 | ```yaml
209 | cat < r.ResourceVersion {
67 | log.V(2).Info("Newer resourceVersion detected, resetting cache")
68 | if err := r.resetCache(); err != nil {
69 | return reconcile.Result{}, err
70 | }
71 | r.ResourceVersion = resourceVersion
72 | }
73 | return reconcile.Result{}, nil
74 | }
75 |
76 | func (r *CRDReconciler) reconcileV1beta1(ctx context.Context, req ctrl.Request, log logr.Logger) (ctrl.Result, error) {
77 | crd := &apiv1beta1.CustomResourceDefinition{}
78 | if err := r.ControllerClient.Get(ctx, req.NamespacedName, crd); err != nil {
79 | return reconcile.Result{}, err
80 | }
81 | resourceVersion, err := strconv.Atoi(crd.ResourceVersion)
82 | if err != nil {
83 | return reconcile.Result{}, err
84 | }
85 |
86 | if resourceVersion > r.ResourceVersion {
87 | log.V(2).Info("Newer resourceVersion detected, resetting cache")
88 | if err := r.resetCache(); err != nil {
89 | return reconcile.Result{}, err
90 | }
91 | r.ResourceVersion = resourceVersion
92 | }
93 | return reconcile.Result{}, nil
94 | }
95 |
96 | func (r *CRDReconciler) resetCache() error {
97 | if err := r.Cache.ExpireSchema(); err != nil {
98 | return err
99 | }
100 | return nil
101 | }
102 |
103 | func (r *CRDReconciler) SetupWithManager(mgr ctrl.Manager) error {
104 | r.ControllerClient = mgr.GetClient()
105 | r.Events = mgr.GetEventRecorderFor("template-operator")
106 | c, err := controller.New("crd-monitor", mgr, controller.Options{Reconciler: r})
107 | if err != nil {
108 | return err
109 | }
110 | config, err := controllercliconfig.GetConfig()
111 | if err != nil {
112 | return err
113 | }
114 | r.Discovery, err = discovery.NewDiscoveryClientForConfig(config)
115 | if err != nil {
116 | return err
117 | }
118 | v1, err := r.HasKind(CRDV1Group, CRDV1Version)
119 | if err != nil {
120 | return err
121 | }
122 | if v1 {
123 | return c.Watch(source.Kind(mgr.GetCache(), &apiv1.CustomResourceDefinition{}), &handler.EnqueueRequestForObject{})
124 | }
125 | return c.Watch(source.Kind(mgr.GetCache(), &apiv1beta1.CustomResourceDefinition{}), &handler.EnqueueRequestForObject{})
126 | }
127 |
--------------------------------------------------------------------------------
/controllers/rest_controller.go:
--------------------------------------------------------------------------------
1 | /*
2 |
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controllers
18 |
19 | import (
20 | "context"
21 | "encoding/json"
22 | "reflect"
23 | "strings"
24 | "time"
25 |
26 | "github.com/flanksource/commons/utils"
27 | templatev1 "github.com/flanksource/template-operator/api/v1"
28 | "github.com/flanksource/template-operator/k8s"
29 | "github.com/pkg/errors"
30 | "github.com/prometheus/client_golang/prometheus"
31 | kerrors "k8s.io/apimachinery/pkg/api/errors"
32 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33 | "k8s.io/apimachinery/pkg/types"
34 | "k8s.io/apimachinery/pkg/util/wait"
35 | ctrl "sigs.k8s.io/controller-runtime"
36 | "sigs.k8s.io/controller-runtime/pkg/metrics"
37 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
38 | )
39 |
40 | const (
41 | objectModifiedError = "the object has been modified; please apply your changes to the latest version and try again"
42 | )
43 |
44 | var (
45 | RESTDeleteFinalizer = "termination.flanksource.com/protect"
46 | )
47 |
48 | var (
49 | restCount = prometheus.NewGaugeVec(
50 | prometheus.GaugeOpts{
51 | Name: "template_operator_rest_count",
52 | Help: "Total rest runs count",
53 | },
54 | []string{"rest"},
55 | )
56 | restSuccess = prometheus.NewGaugeVec(
57 | prometheus.GaugeOpts{
58 | Name: "template_operator_rest_success",
59 | Help: "Total successful rest runs count",
60 | },
61 | []string{"rest"},
62 | )
63 | restFailed = prometheus.NewGaugeVec(
64 | prometheus.GaugeOpts{
65 | Name: "template_operator_rest_failed",
66 | Help: "Total failed rest runs count",
67 | },
68 | []string{"test"},
69 | )
70 | )
71 |
72 | func init() {
73 | metrics.Registry.MustRegister(restCount, restSuccess, restFailed)
74 | }
75 |
76 | // RESTReconciler reconciles a REST object
77 | type RESTReconciler struct {
78 | Client
79 | }
80 |
81 | // +kubebuilder:rbac:groups="*",resources="*",verbs="*"
82 |
83 | func (r *RESTReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
84 | log := r.Log.WithValues("rest", req.NamespacedName, "requestID", utils.RandomString(10))
85 | name := req.NamespacedName.String()
86 |
87 | log.V(2).Info("Started reconciling")
88 |
89 | rest := &templatev1.REST{}
90 | if err := r.ControllerClient.Get(ctx, req.NamespacedName, rest); err != nil {
91 | if kerrors.IsNotFound(err) {
92 | log.Error(err, "rest not found")
93 | return reconcile.Result{}, nil
94 | }
95 | log.Error(err, "failed to get template")
96 | incRESTFailed(name)
97 | return reconcile.Result{}, err
98 | }
99 |
100 | if rest.Status == nil {
101 | rest.Status = map[string]string{}
102 | }
103 | oldStatus := cloneMap(rest.Status)
104 |
105 | //If the TemplateManager will fetch a new schema, ensure the kommons.client also does so in order to ensure they contain the same information
106 | if r.Cache.SchemaHasExpired() {
107 | r.KommonsClient.ResetRestMapper()
108 | }
109 | tm, err := k8s.NewRESTManager(r.KommonsClient, log)
110 | if err != nil {
111 | incRESTFailed(name)
112 | return reconcile.Result{}, err
113 | }
114 |
115 | hasFinalizer := false
116 | for _, finalizer := range rest.ObjectMeta.Finalizers {
117 | if finalizer == RESTDeleteFinalizer {
118 | hasFinalizer = true
119 | }
120 | }
121 |
122 | if rest.ObjectMeta.DeletionTimestamp != nil {
123 | log.V(2).Info("Object marked as deleted")
124 | if err = tm.Delete(ctx, rest); err != nil {
125 | return reconcile.Result{}, err
126 | }
127 | if err := r.removeFinalizers(rest); err != nil {
128 | return ctrl.Result{}, err
129 | }
130 | return ctrl.Result{}, nil
131 | }
132 |
133 | if !hasFinalizer {
134 | log.V(2).Info("Setting finalizer")
135 | rest.ObjectMeta.Finalizers = append(rest.ObjectMeta.Finalizers, RESTDeleteFinalizer)
136 | if err := r.ControllerClient.Update(ctx, rest); err != nil {
137 | log.Error(err, "failed to add finalizer to object")
138 | return ctrl.Result{}, err
139 | }
140 | log.V(2).Info("Finalizer set, exiting reconcile")
141 |
142 | return ctrl.Result{}, nil
143 | }
144 |
145 | statusUpdates, err := tm.Update(ctx, rest)
146 | if err != nil {
147 | log.Error(err, "Failed to run update REST")
148 | incRESTFailed(name)
149 | return reconcile.Result{}, err
150 | }
151 |
152 | if err := r.updateStatus(ctx, rest, statusUpdates, oldStatus); err != nil {
153 | return reconcile.Result{}, err
154 | }
155 |
156 | incRESTSuccess(name)
157 | log.V(2).Info("Finished reconciling", "generation", rest.ObjectMeta.Generation)
158 | return ctrl.Result{}, nil
159 | }
160 |
161 | func (r *RESTReconciler) updateStatus(ctx context.Context, rest *templatev1.REST, statusUpdates, oldStatus map[string]string) error {
162 | backoff := wait.Backoff{
163 | Duration: 50 * time.Millisecond,
164 | Factor: 1.5,
165 | Jitter: 2,
166 | Steps: 10,
167 | Cap: 5 * time.Second,
168 | }
169 | var err error
170 |
171 | r.addStatusUpdates(rest, statusUpdates)
172 |
173 | if reflect.DeepEqual(rest.Status, oldStatus) {
174 | r.Log.V(2).Info("REST status did not change, skipping")
175 | return nil
176 | }
177 |
178 | setRestStatus(rest)
179 |
180 | js, _ := json.Marshal(rest.Status)
181 | js2, _ := json.Marshal(oldStatus)
182 | r.Log.V(2).Info("Checking:", "status", string(js), "oldStatus", string(js2))
183 |
184 | for backoff.Steps > 0 {
185 | js, err := json.Marshal(statusUpdates)
186 | r.Log.V(2).Info("Updating status: setting", "statusUpdates", string(js), "err", err)
187 | if err = r.ControllerClient.Status().Update(ctx, rest); err == nil {
188 | return nil
189 | }
190 | sleepDuration := backoff.Step()
191 | r.Log.Info("update status failed, sleeping", "duration", sleepDuration, "err", err)
192 | time.Sleep(sleepDuration)
193 | if strings.Contains(err.Error(), objectModifiedError) {
194 | if err := r.ControllerClient.Get(context.Background(), types.NamespacedName{Name: rest.Name}, rest); err != nil {
195 | return errors.Wrap(err, "failed to refetch object")
196 | }
197 | r.addStatusUpdates(rest, statusUpdates)
198 | if reflect.DeepEqual(rest.Status, oldStatus) {
199 | return nil
200 | }
201 | setRestStatus(rest)
202 | }
203 | }
204 |
205 | return err
206 | }
207 |
208 | func (r *RESTReconciler) removeFinalizers(rest *templatev1.REST) error {
209 | backoff := wait.Backoff{
210 | Duration: 50 * time.Millisecond,
211 | Factor: 1.5,
212 | Jitter: 2,
213 | Steps: 10,
214 | Cap: 5 * time.Second,
215 | }
216 | var err error
217 |
218 | rest.ObjectMeta.Finalizers = r.removeFinalizer(rest)
219 |
220 | for backoff.Steps > 0 {
221 | if err = r.ControllerClient.Update(context.Background(), rest); err == nil {
222 | return nil
223 | }
224 | sleepDuration := backoff.Step()
225 | r.Log.Info("remove finalizers failed, sleeping", "duration", sleepDuration, "err", err)
226 | time.Sleep(sleepDuration)
227 | if strings.Contains(err.Error(), objectModifiedError) {
228 | if err := r.ControllerClient.Get(context.Background(), types.NamespacedName{Name: rest.Name}, rest); err != nil {
229 | return errors.Wrap(err, "failed to refetch object")
230 | }
231 | rest.ObjectMeta.Finalizers = r.removeFinalizer(rest)
232 | }
233 | }
234 |
235 | return nil
236 | }
237 |
238 | func (r *RESTReconciler) removeFinalizer(rest *templatev1.REST) []string {
239 | finalizers := []string{}
240 | for _, finalizer := range rest.ObjectMeta.Finalizers {
241 | if finalizer != RESTDeleteFinalizer {
242 | finalizers = append(finalizers, finalizer)
243 | }
244 | }
245 | return finalizers
246 | }
247 |
248 | func (r *RESTReconciler) addStatusUpdates(rest *templatev1.REST, statusUpdates map[string]string) {
249 | if rest.Status == nil {
250 | rest.Status = map[string]string{}
251 | }
252 | for k, v := range statusUpdates {
253 | rest.Status[k] = v
254 | }
255 | }
256 |
257 | func (r *RESTReconciler) SetupWithManager(mgr ctrl.Manager) error {
258 | r.ControllerClient = mgr.GetClient()
259 | r.Events = mgr.GetEventRecorderFor("template-operator")
260 |
261 | return ctrl.NewControllerManagedBy(mgr).
262 | For(&templatev1.REST{}).
263 | Complete(r)
264 | }
265 |
266 | func incRESTSuccess(name string) {
267 | restCount.WithLabelValues(name).Inc()
268 | restSuccess.WithLabelValues(name).Inc()
269 | }
270 |
271 | func incRESTFailed(name string) {
272 | restCount.WithLabelValues(name).Inc()
273 | restFailed.WithLabelValues(name).Inc()
274 | }
275 |
276 | func setRestStatus(rest *templatev1.REST) {
277 | rest.Status["lastUpdated"] = metav1.Now().String()
278 | }
279 |
280 | func cloneMap(m map[string]string) map[string]string {
281 | x := map[string]string{}
282 | for k, v := range m {
283 | x[k] = v
284 | }
285 | return x
286 | }
287 |
--------------------------------------------------------------------------------
/controllers/suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 |
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controllers
18 |
19 | import (
20 | "path/filepath"
21 | "testing"
22 |
23 | . "github.com/onsi/ginkgo"
24 | . "github.com/onsi/gomega"
25 | "k8s.io/client-go/kubernetes/scheme"
26 | "k8s.io/client-go/rest"
27 | "sigs.k8s.io/controller-runtime/pkg/client"
28 | "sigs.k8s.io/controller-runtime/pkg/envtest"
29 | logf "sigs.k8s.io/controller-runtime/pkg/log"
30 | "sigs.k8s.io/controller-runtime/pkg/log/zap"
31 |
32 | templatingflanksourcecomv1 "github.com/flanksource/template-operator/api/v1"
33 | // +kubebuilder:scaffold:imports
34 | )
35 |
36 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to
37 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
38 |
39 | var cfg *rest.Config
40 | var k8sClient client.Client
41 | var testEnv *envtest.Environment
42 |
43 | func TestAPIs(t *testing.T) {
44 | RegisterFailHandler(Fail)
45 |
46 | RunSpecs(t, "Controller Suite")
47 | }
48 |
49 | var _ = BeforeSuite(func(done Done) {
50 | logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
51 |
52 | By("bootstrapping test environment")
53 | testEnv = &envtest.Environment{
54 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
55 | }
56 |
57 | var err error
58 | cfg, err = testEnv.Start()
59 | Expect(err).ToNot(HaveOccurred())
60 | Expect(cfg).ToNot(BeNil())
61 |
62 | err = templatingflanksourcecomv1.AddToScheme(scheme.Scheme)
63 | Expect(err).NotTo(HaveOccurred())
64 |
65 | // +kubebuilder:scaffold:scheme
66 |
67 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
68 | Expect(err).ToNot(HaveOccurred())
69 | Expect(k8sClient).ToNot(BeNil())
70 |
71 | close(done)
72 | }, 60)
73 |
74 | var _ = AfterSuite(func() {
75 | By("tearing down the test environment")
76 | err := testEnv.Stop()
77 | Expect(err).ToNot(HaveOccurred())
78 | })
79 |
--------------------------------------------------------------------------------
/controllers/template_controller.go:
--------------------------------------------------------------------------------
1 | /*
2 |
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controllers
18 |
19 | import (
20 | "context"
21 |
22 | templatev1 "github.com/flanksource/template-operator/api/v1"
23 | "github.com/flanksource/template-operator/k8s"
24 | "github.com/prometheus/client_golang/prometheus"
25 | v1 "k8s.io/api/core/v1"
26 | kerrors "k8s.io/apimachinery/pkg/api/errors"
27 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
28 | "k8s.io/apimachinery/pkg/types"
29 | ctrl "sigs.k8s.io/controller-runtime"
30 | "sigs.k8s.io/controller-runtime/pkg/metrics"
31 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
32 | )
33 |
34 | var (
35 | templateCount = prometheus.NewGaugeVec(
36 | prometheus.GaugeOpts{
37 | Name: "template_operator_template_count",
38 | Help: "Total template runs count",
39 | },
40 | []string{"template"},
41 | )
42 | templateSuccess = prometheus.NewGaugeVec(
43 | prometheus.GaugeOpts{
44 | Name: "template_operator_template_success",
45 | Help: "Total successful template runs count",
46 | },
47 | []string{"template"},
48 | )
49 | templateFailed = prometheus.NewGaugeVec(
50 | prometheus.GaugeOpts{
51 | Name: "template_operator_template_failed",
52 | Help: "Total failed template runs count",
53 | },
54 | []string{"template"},
55 | )
56 | )
57 |
58 | func init() {
59 | metrics.Registry.MustRegister(templateCount, templateSuccess, templateFailed)
60 | }
61 |
62 | // TemplateReconciler reconciles a Template object
63 | type TemplateReconciler struct {
64 | Client
65 | }
66 |
67 | // +kubebuilder:rbac:groups="*",resources="*",verbs="*"
68 |
69 | func (r *TemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
70 | log := r.Log.WithValues("template", req.NamespacedName)
71 | name := req.NamespacedName.String()
72 |
73 | template := &templatev1.Template{}
74 | if err := r.ControllerClient.Get(ctx, req.NamespacedName, template); err != nil {
75 | if kerrors.IsNotFound(err) {
76 | log.Error(err, "template not found")
77 | return reconcile.Result{}, nil
78 | }
79 | log.Error(err, "failed to get template")
80 | incFailed(name)
81 | return reconcile.Result{}, err
82 | }
83 | //If the TemplateManager will fetch a new schema, ensure the kommons.client also does so in order to ensure they contain the same information
84 | if r.Cache.SchemaHasExpired() {
85 | r.KommonsClient.ResetRestMapper()
86 | }
87 | tm, err := k8s.NewTemplateManager(r.KommonsClient, log, r.Cache, r.Events, r.Watcher)
88 | if err != nil {
89 | incFailed(name)
90 | return reconcile.Result{}, err
91 | }
92 | result, err := tm.Run(ctx, template, r.reconcileObject(req.NamespacedName))
93 | if err != nil {
94 | incFailed(name)
95 | return reconcile.Result{}, err
96 | }
97 | incSuccess(name)
98 | return result, nil
99 | }
100 |
101 | func (r *TemplateReconciler) SetupWithManager(mgr ctrl.Manager) error {
102 | r.ControllerClient = mgr.GetClient()
103 | r.Events = mgr.GetEventRecorderFor("template-operator")
104 |
105 | return ctrl.NewControllerManagedBy(mgr).
106 | For(&templatev1.Template{}).
107 | Complete(r)
108 | }
109 |
110 | func (r *TemplateReconciler) reconcileObject(namespacedName types.NamespacedName) k8s.CallbackFunc {
111 | return func(obj unstructured.Unstructured) error {
112 | ctx := context.Background()
113 | log := r.Log.WithValues("template", namespacedName)
114 | name := namespacedName.String()
115 | template := &templatev1.Template{}
116 | if err := r.ControllerClient.Get(ctx, namespacedName, template); err != nil {
117 | if kerrors.IsNotFound(err) {
118 | log.Error(err, "template not found")
119 | return err
120 | }
121 | log.Error(err, "failed to get template")
122 | incFailed(name)
123 | return err
124 | }
125 |
126 | //If the TemplateManager will fetch a new schema, ensure the kommons.client also does so in order to ensure they contain the same information
127 | if r.Cache.SchemaHasExpired() {
128 | r.KommonsClient.ResetRestMapper()
129 | }
130 | tm, err := k8s.NewTemplateManager(r.KommonsClient, log, r.Cache, r.Events, r.Watcher)
131 | if err != nil {
132 | log.Error(err, "failed to create template manager")
133 | incFailed(name)
134 | return err
135 | }
136 |
137 | namespaces, err := tm.GetSourceNamespaces(ctx, template)
138 | if err != nil {
139 | log.Error(err, "failed to get source namespaces")
140 | incFailed(name)
141 | return err
142 | }
143 | if len(namespaces) != 1 || namespaces[0] != v1.NamespaceAll {
144 | found := false
145 | for _, n := range namespaces {
146 | if n == obj.GetNamespace() {
147 | found = true
148 | break
149 | }
150 | }
151 | if !found {
152 | log.V(2).Info("Namespace %s not found in namespaces %v\n", obj.GetNamespace(), namespaces)
153 | return nil
154 | }
155 | }
156 |
157 | _, err = tm.HandleSource(ctx, template, obj)
158 | if err != nil {
159 | incFailed(name)
160 | return err
161 | }
162 | incSuccess(name)
163 |
164 | return nil
165 | }
166 | }
167 |
168 | func incSuccess(name string) {
169 | templateCount.WithLabelValues(name).Inc()
170 | templateSuccess.WithLabelValues(name).Inc()
171 | }
172 |
173 | func incFailed(name string) {
174 | templateCount.WithLabelValues(name).Inc()
175 | templateFailed.WithLabelValues(name).Inc()
176 | }
177 |
--------------------------------------------------------------------------------
/docs/template-operator-intro-part-1.md:
--------------------------------------------------------------------------------
1 | ---
2 | Author: Saul Nachman & Moshe Immerman
3 | Last updated: 22/07/2021
4 | ---
5 |
6 |
7 | *This is part 1 of a series demonstrating the Template Operator's capabilities, starting in this post with [Creating resources per namespace](#Use-case-creating-resources-per-namespace) and [Copying secrets between namespaces](#Use-case-Copying-secrets-between-namespaces).*
8 |
9 |
10 | # Template Operator
11 |
12 |
13 | **Simple, reconciliation-based runtime templating**
14 |
15 |
16 | The Template Operator is for platform engineers needing an easy and reliable way to create, copy and update kubernetes resources.
17 |
18 | ## Design principles
19 |
20 | - **100% YAML** – `Templates` are valid YAML and IDE validation and autocomplete of k8s resources works as normal.
21 | - **Simple** – Easy to use and quick to get started.
22 | - **Reconciliation based** – Changes are applied quickly and resiliently (unlike webhooks) at runtime.
23 |
24 | ### Alternatives
25 |
26 | There are alternative templating systems in use by the k8s community – each has valid use cases and noting the downsides for runtime templating is not intended as an indictment – all are excellent choices under the right conditions.
27 |
28 |
29 | | Alternative | Downside for templating |
30 | | ------------------------ | :------------------------------------------------------- |
31 | | [crossplane][crossplane] | Complex due to design for infrastructure composition |
32 | | [kyverno][kyverno] | Webhook based
Designed as a policy engine |
33 | | [helm][helm] | Not 100% YAML
Not reconciliation based (build time) |
34 |
35 |
36 | [crossplane]: https://crossplane.io/ "Crossplane"
37 | [kyverno]: https://kyverno.io/ "Kyverno"
38 | [helm]: https://helm.sh/ "Helm"
39 |
40 | ## Installation
41 |
42 | API documentation available [here](https://pkg.go.dev/github.com/flanksource/template-operator/api/v1).
43 |
44 | ### Prerequisites
45 |
46 | This guide assumes you have either a [kind cluster](https://kind.sigs.k8s.io/docs/user/quick-start/) or [minikube cluster](https://minikube.sigs.k8s.io/docs/start/) running, or have some other way of interacting with a cluster via [kubectl](https://kubernetes.io/docs/tasks/tools/).
47 |
48 | ### Install
49 |
50 | ```bash
51 | export VERSION=0.4.0
52 | # For the latest release version: https://github.com/flanksource/template-operator/releases
53 |
54 | # Apply the operator
55 | kubectl apply -f https://github.com/flanksource/template-operator/releases/download/v${VERSION}/operator.yml
56 | ```
57 |
58 | Run `kubectl get pods -A` and you should see something similar to the following in your terminal output:
59 |
60 | ```bash
61 | NAMESPACE NAME READY
62 | template-operator template-operator-controller-manager-6bd8c5ff58-sz8q6 2/2
63 | ```
64 |
65 | ### Following the logs
66 |
67 | To follow the manager logs, open a new terminal and, changing what needs to be changed, run :
68 |
69 | ```bash
70 | kubectl logs -f --since 10m -n template-operator \
71 | template-operator-controller-manager--6bd8c5ff58-sz8q6 manager
72 | ```
73 |
74 | These logs are where reconciliation successes and errors show up – and the best place to look when debugging.
75 |
76 | ## Use case: Creating resources per namespace
77 |
78 | > *As a platform engineer, I need to quickly provision Namespaces for application teams so that they are able to spin up environments quickly.*
79 |
80 | As organisations grow, platform teams are often tasked with creating `Namespaces` for continuous integration or for development.
81 |
82 | To configure a `Namespace`, platform teams may need to commit or apply many boilerplate objects.
83 |
84 | For this example, suppose you need a set of `Roles` and `RoleBindings` to automatically deploy for a `Namespace` .
85 |
86 | ### Step 1: Adding a namespace and a template
87 |
88 | Add a `Namespace`. You might add this after applying the `Template`, but it's helpful to see that the Template Operator doesn't care when objects are applied – a feature of the reconciliation-based approach. Note the label – this tags the `Namespace` as one that should produce `RoleBindings`.
89 |
90 | ```yaml
91 | cat < github.com/Azure/go-autorest v14.2.0+incompatible
207 | // github.com/go-openapi/spec => github.com/go-openapi/spec v0.19.3
208 | // google.golang.org/grpc => google.golang.org/grpc v1.29.1
209 | // k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.15
210 | k8s.io/client-go => k8s.io/client-go v0.27.2
211 | )
212 |
--------------------------------------------------------------------------------
/hack/boilerplate.go.txt:
--------------------------------------------------------------------------------
1 | /*
2 |
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
--------------------------------------------------------------------------------
/k8s/patches.go:
--------------------------------------------------------------------------------
1 | package k8s
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "path/filepath"
7 | osruntime "runtime"
8 | "strings"
9 | "text/template"
10 |
11 | "github.com/flanksource/kommons/ktemplate"
12 | "github.com/go-logr/logr"
13 | "github.com/pkg/errors"
14 | fyaml "gopkg.in/flanksource/yaml.v3"
15 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
16 | "k8s.io/apimachinery/pkg/runtime/schema"
17 | "k8s.io/client-go/kubernetes"
18 | "sigs.k8s.io/kustomize/api/krusty"
19 | "sigs.k8s.io/kustomize/kyaml/filesys"
20 | "sigs.k8s.io/kustomize/pkg/gvk"
21 | "sigs.k8s.io/kustomize/pkg/patch"
22 | "sigs.k8s.io/kustomize/pkg/types"
23 | "sigs.k8s.io/yaml"
24 | )
25 |
26 | type PatchType string
27 |
28 | var (
29 | PatchTypeYaml PatchType = "yaml"
30 | PatchTypeJSON PatchType = "json"
31 | )
32 |
33 | type PatchApplier struct {
34 | Clientset *kubernetes.Clientset
35 | Log logr.Logger
36 | FuncMap template.FuncMap
37 | SchemaManager *SchemaManager
38 | }
39 |
40 | func NewPatchApplier(clientset *kubernetes.Clientset, schemaManager *SchemaManager, log logr.Logger) (*PatchApplier, error) {
41 | p := &PatchApplier{
42 | Clientset: clientset,
43 | Log: log,
44 | SchemaManager: schemaManager,
45 | }
46 |
47 | functions := ktemplate.NewFunctions(clientset)
48 | p.FuncMap = functions.FuncMap()
49 | return p, nil
50 | }
51 |
52 | func (p *PatchApplier) Apply(resource *unstructured.Unstructured, patchStr string, patchType PatchType) (*unstructured.Unstructured, error) {
53 | // fmt.Printf("Template patch:\n%s\n====\n", patchStr)
54 | t, err := template.New("patch").Funcs(p.FuncMap).Parse(patchStr)
55 | if err != nil {
56 | return nil, errors.Wrap(err, "failed to create template from patch")
57 | }
58 |
59 | var tpl bytes.Buffer
60 | var data = map[string]interface{}{
61 | "source": resource.Object,
62 | }
63 | if err := t.Execute(&tpl, data); err != nil {
64 | return nil, errors.Wrap(err, "failed to execute template")
65 | }
66 |
67 | // create an in memory fs to use for the kustomization
68 | memFS := filesys.MakeFsInMemory()
69 |
70 | fakeDir := "/"
71 | // for Windows we need this to be a drive because kustomize uses filepath.Abs()
72 | // which will add a drive letter if there is none. which drive letter is
73 | // unimportant as the path is on the fake filesystem anyhow
74 | if osruntime.GOOS == "windows" {
75 | fakeDir = `C:\`
76 | }
77 |
78 | // writes the resource to a file in the temp file system
79 | b, err := yaml.Marshal(resource.Object)
80 | if err != nil {
81 | return nil, errors.Wrap(err, "failed to marshal resource object")
82 | }
83 | name := "resource.yaml"
84 | memFS.WriteFile(filepath.Join(fakeDir, name), b) // nolint: errcheck
85 |
86 | kustomizationFile := &types.Kustomization{Resources: []string{name}}
87 |
88 | version := resource.GetAPIVersion()
89 | parts := strings.Split(version, "/")
90 | var apiVersion, apiGroup string
91 | if len(parts) == 1 {
92 | apiGroup = ""
93 | apiVersion = parts[0]
94 | } else {
95 | apiGroup = parts[0]
96 | apiVersion = parts[1]
97 | }
98 | groupVersionKind := schema.GroupVersionKind{Group: apiGroup, Version: apiVersion, Kind: resource.GetKind()}
99 |
100 | if patchType == PatchTypeYaml {
101 | finalPatch := map[string]interface{}{}
102 | templateBytes := tpl.Bytes()
103 | if err := fyaml.Unmarshal(templateBytes, &finalPatch); err != nil {
104 | return nil, errors.Wrap(err, "failed to unmarshal template yaml")
105 | }
106 | patchObject := &unstructured.Unstructured{Object: finalPatch}
107 | if patchObject.GetName() == "" {
108 | patchObject.SetName(resource.GetName())
109 | }
110 | if patchObject.GetNamespace() == "" {
111 | patchObject.SetNamespace(resource.GetNamespace())
112 | }
113 |
114 | if err := p.SchemaManager.DuckType(groupVersionKind, patchObject); err != nil {
115 | p.Log.Error(err, "failed to duck type object")
116 | }
117 |
118 | // writes strategic merge patches to files in the temp file system
119 | kustomizationFile.PatchesStrategicMerge = []patch.StrategicMerge{}
120 | b, err = yaml.Marshal(patchObject.Object)
121 | if err != nil {
122 | return nil, errors.Wrap(err, "failed to marshal patch object")
123 | }
124 |
125 | name = fmt.Sprintf("patch-0.yaml")
126 | memFS.WriteFile(filepath.Join(fakeDir, name), b) // nolint: errcheck
127 | kustomizationFile.PatchesStrategicMerge = []patch.StrategicMerge{patch.StrategicMerge(name)}
128 |
129 | } else if patchType == PatchTypeJSON {
130 | name = fmt.Sprintf("patch-0.json")
131 | templateBytes := tpl.Bytes()
132 | memFS.WriteFile(filepath.Join(fakeDir, name), templateBytes) // nolint: errcheck
133 | // writes json patches to files in the temp file system
134 |
135 | kustomizationFile.PatchesJson6902 = []patch.Json6902{
136 | {
137 | Target: &patch.Target{
138 | Gvk: gvk.Gvk{
139 | Group: apiGroup,
140 | Version: apiVersion,
141 | Kind: resource.GetKind(),
142 | },
143 | Name: resource.GetName(),
144 | Namespace: resource.GetNamespace(),
145 | },
146 | Path: name,
147 | },
148 | }
149 |
150 | } else {
151 | return nil, errors.Errorf("Invalid patch type %s", patchType)
152 | }
153 |
154 | // writes the kustomization file to the temp file system
155 | kbytes, err := yaml.Marshal(kustomizationFile)
156 | if err != nil {
157 | return nil, errors.Wrap(err, "failed to marshal kustomization file")
158 | }
159 | memFS.WriteFile(filepath.Join(fakeDir, "kustomization.yaml"), kbytes) // nolint: errcheck
160 |
161 | // Finally kustomize the target resource
162 | out, err := krusty.MakeKustomizer(krusty.MakeDefaultOptions()).Run(memFS, fakeDir)
163 | if err != nil {
164 | return nil, errors.Wrap(err, "failed to run kustomize build")
165 | }
166 |
167 | for _, r := range out.Resources() {
168 | if b, err := r.AsYAML(); err == nil {
169 | if err := yaml.Unmarshal(b, &resource); err != nil {
170 | return nil, errors.Wrap(err, "failed to unmarshal kustomize output into resource "+string(b))
171 | }
172 | }
173 | }
174 |
175 | return resource, nil
176 | }
177 |
178 | var annotationsBlacklist = []string{
179 | "metadata.annotations.serving.knative.dev/creator",
180 | "metadata.annotations.serving.knative.dev/lastModifier",
181 | }
182 |
183 | func stripAnnotations(obj *unstructured.Unstructured) {
184 | annotations := obj.GetAnnotations()
185 | for _, a := range annotationsBlacklist {
186 | delete(annotations, a)
187 | }
188 | obj.SetAnnotations(annotations)
189 | }
190 |
--------------------------------------------------------------------------------
/k8s/patches_test.go:
--------------------------------------------------------------------------------
1 | package k8s_test
2 |
3 | import (
4 | "strings"
5 |
6 | "github.com/flanksource/template-operator/k8s"
7 | . "github.com/onsi/ginkgo"
8 | . "github.com/onsi/gomega"
9 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
10 | ctrl "sigs.k8s.io/controller-runtime"
11 | "sigs.k8s.io/yaml"
12 | )
13 |
14 | var _ = Describe("Patches", func() {
15 | It("Merges json patch Ingress", func() {
16 | resource := &unstructured.Unstructured{
17 | Object: map[string]interface{}{
18 | "kind": "Ingress",
19 | "apiVersion": "extensions/v1beta1",
20 | "metadata": map[string]interface{}{
21 | "name": "podinfo",
22 | "namespace": "example",
23 | },
24 | "spec": map[string]interface{}{
25 | "rules": []map[string]interface{}{
26 | {
27 | "host": "pod-info",
28 | "http": map[string]interface{}{
29 | "paths": []map[string]interface{}{
30 | {
31 | "backend": map[string]interface{}{
32 | "serviceName": "podinfo",
33 | "servicePort": 9898,
34 | },
35 | },
36 | },
37 | },
38 | },
39 | },
40 | "tls": []map[string]interface{}{
41 | {
42 | "hosts": []string{
43 | "pod-info",
44 | },
45 | "secretName": "podinfo-tls",
46 | },
47 | },
48 | },
49 | },
50 | }
51 |
52 | patch := `
53 | [
54 | {
55 | "op": "replace",
56 | "path": "/spec/rules/0/host",
57 | "value": "{{ jsonPath .source "spec.rules.0.host" }}.{{- kget "cm/quack/quack-config" "data.domain" -}}"
58 | },
59 | {
60 | "op": "replace",
61 | "path": "/spec/tls/0/hosts/0",
62 | "value": "{{ jsonPath .source "spec.tls.0.hosts.0" }}.{{- kget "cm/quack/quack-config" "data.domain" -}}"
63 | }
64 | ]
65 | `
66 | log := ctrl.Log.WithName("test")
67 | patchApplier, err := k8s.NewPatchApplier(clientset(), newSchemaManager(), log)
68 | Expect(err).ToNot(HaveOccurred())
69 | patchApplier.FuncMap["kget"] = func(path, jsonPath string) string {
70 | return "1.2.3.4.nip.io"
71 | }
72 |
73 | newResource, err := patchApplier.Apply(resource, patch, k8s.PatchTypeJSON)
74 | Expect(err).To(BeNil())
75 |
76 | specYaml, err := yaml.Marshal(newResource.Object)
77 | Expect(err).To(BeNil())
78 |
79 | foundYaml := strings.TrimSpace(string(specYaml))
80 |
81 | expectedYaml := strings.TrimSpace(`
82 | apiVersion: extensions/v1beta1
83 | kind: Ingress
84 | metadata:
85 | name: podinfo
86 | namespace: example
87 | spec:
88 | rules:
89 | - host: pod-info.1.2.3.4.nip.io
90 | http:
91 | paths:
92 | - backend:
93 | serviceName: podinfo
94 | servicePort: 9898
95 | tls:
96 | - hosts:
97 | - pod-info.1.2.3.4.nip.io
98 | secretName: podinfo-tls
99 | `)
100 | // fmt.Printf("Found:\n%s\n", foundYaml)
101 | // fmt.Printf("Expected:\n%s\n", expectedYaml)
102 | Expect(foundYaml).To(Equal(expectedYaml))
103 | })
104 |
105 | It("Merges json patch Service", func() {
106 | resource := &unstructured.Unstructured{
107 | Object: map[string]interface{}{
108 | "kind": "Service",
109 | "apiVersion": "v1",
110 | "metadata": map[string]interface{}{
111 | "name": "podinfo",
112 | "namespace": "example",
113 | },
114 | "spec": map[string]interface{}{
115 | "ports": []interface{}{
116 | map[string]interface{}{
117 | "protocol": "TCP",
118 | "port": "80",
119 | "targetPort": "9376",
120 | },
121 | },
122 | },
123 | },
124 | }
125 |
126 | patch := `
127 | [
128 | {
129 | "op": "replace",
130 | "path": "/spec/ports/0/port",
131 | "value": 443
132 | }
133 | ]
134 | `
135 | log := ctrl.Log.WithName("test")
136 | patchApplier, err := k8s.NewPatchApplier(clientset(), newSchemaManager(), log)
137 | Expect(err).ToNot(HaveOccurred())
138 | patchApplier.FuncMap["kget"] = func(path, jsonPath string) string {
139 | return "1.2.3.4.nip.io"
140 | }
141 |
142 | newResource, err := patchApplier.Apply(resource, patch, k8s.PatchTypeJSON)
143 | Expect(err).ToNot(HaveOccurred())
144 |
145 | specYaml, err := yaml.Marshal(newResource.Object)
146 | Expect(err).ToNot(HaveOccurred())
147 |
148 | foundYaml := strings.TrimSpace(string(specYaml))
149 |
150 | expectedYaml := strings.TrimSpace(`
151 | apiVersion: v1
152 | kind: Service
153 | metadata:
154 | name: podinfo
155 | namespace: example
156 | spec:
157 | ports:
158 | - port: 443
159 | protocol: TCP
160 | targetPort: "9376"
161 | `)
162 | // fmt.Printf("Found:\n%s\n", foundYaml)
163 | // fmt.Printf("Expected:\n%s\n", expectedYaml)
164 | Expect(foundYaml).To(Equal(expectedYaml))
165 | })
166 |
167 | It("Merges annotations and labels", func() {
168 | resource := &unstructured.Unstructured{
169 | Object: map[string]interface{}{
170 | "kind": "Ingress",
171 | "apiVersion": "extensions/v1beta1",
172 | "metadata": map[string]interface{}{
173 | "name": "podinfo",
174 | "namespace": "example",
175 | "annotations": map[string]interface{}{
176 | "annotation1.example.com": "value1",
177 | "annotation2.example.com": "value2",
178 | },
179 | "labels": map[string]interface{}{
180 | "label1": "value1",
181 | "label2": "value2",
182 | },
183 | },
184 | "spec": map[string]interface{}{},
185 | },
186 | }
187 |
188 | patch := `
189 | apiVersion: extensions/v1beta1
190 | kind: Ingress
191 | metadata:
192 | labels:
193 | label2: value22
194 | label3: value33
195 | annotations:
196 | annotation2.example.com: value22
197 | annotation3.example.com: foo.{{- kget "cm/quack/quack-config" "data.domain" -}}
198 | `
199 |
200 | log := ctrl.Log.WithName("test")
201 | patchApplier, err := k8s.NewPatchApplier(clientset(), newSchemaManager(), log)
202 | Expect(err).ToNot(HaveOccurred())
203 | patchApplier.FuncMap["kget"] = func(path, jsonPath string) string {
204 | return "1.2.3.4.nip.io"
205 | }
206 |
207 | newResource, err := patchApplier.Apply(resource, patch, k8s.PatchTypeYaml)
208 | Expect(err).ToNot(HaveOccurred())
209 |
210 | specYaml, err := yaml.Marshal(newResource.Object)
211 | Expect(err).ToNot(HaveOccurred())
212 | foundYaml := strings.TrimSpace(string(specYaml))
213 |
214 | expectedYaml := strings.TrimSpace(`
215 | apiVersion: extensions/v1beta1
216 | kind: Ingress
217 | metadata:
218 | annotations:
219 | annotation1.example.com: value1
220 | annotation2.example.com: value22
221 | annotation3.example.com: foo.1.2.3.4.nip.io
222 | labels:
223 | label1: value1
224 | label2: value22
225 | label3: value33
226 | name: podinfo
227 | namespace: example
228 | spec: {}
229 | `)
230 | // fmt.Printf("Found:\n%s\n", foundYaml)
231 | // fmt.Printf("Expected:\n%s\n", expectedYaml)
232 | Expect(foundYaml).To(Equal(expectedYaml))
233 | })
234 |
235 | It("Encodes as json", func() {
236 | resource := &unstructured.Unstructured{
237 | Object: map[string]interface{}{
238 | "kind": "postgresql",
239 | "apiVersion": "acid.zalan.do/v1",
240 | "metadata": map[string]interface{}{
241 | "name": "test",
242 | "namespace": "example",
243 | },
244 | "spec": map[string]interface{}{
245 | "replicas": 1,
246 | },
247 | },
248 | }
249 |
250 | patch := `
251 | apiVersion: acid.zalan.do/v1
252 | kind: postgresql
253 | spec:
254 | postgresql:
255 | parameters: "{{ kget "postgresqldb/postgres-operator/test" "spec.parameters" }}"
256 | `
257 |
258 | log := ctrl.Log.WithName("test")
259 | patchApplier, err := k8s.NewPatchApplier(clientset(), newSchemaManager(), log)
260 | Expect(err).ToNot(HaveOccurred())
261 | patchApplier.FuncMap["kget"] = func(path, jsonPath string) string {
262 | str := "{\"max_connections\":\"1024\",\"shared_buffers\":\"4759MB\",\"work_mem\":\"475MB\",\"maintenance_work_mem\":\"634M\"}"
263 | return strings.ReplaceAll(str, "\"", "\\\"")
264 | }
265 |
266 | newResource, err := patchApplier.Apply(resource, patch, k8s.PatchTypeYaml)
267 | Expect(err).ToNot(HaveOccurred())
268 |
269 | specYaml, err := yaml.Marshal(newResource.Object)
270 | Expect(err).ToNot(HaveOccurred())
271 | foundYaml := strings.TrimSpace(string(specYaml))
272 |
273 | expectedYaml := strings.TrimSpace(`
274 | apiVersion: acid.zalan.do/v1
275 | kind: postgresql
276 | metadata:
277 | name: test
278 | namespace: example
279 | spec:
280 | postgresql:
281 | parameters:
282 | maintenance_work_mem: 634M
283 | max_connections: "1024"
284 | shared_buffers: 4759MB
285 | work_mem: 475MB
286 | replicas: 1
287 | `)
288 | Expect(foundYaml).To(Equal(expectedYaml))
289 | })
290 | })
291 |
--------------------------------------------------------------------------------
/k8s/rest_manager.go:
--------------------------------------------------------------------------------
1 | package k8s
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/base64"
7 | "encoding/json"
8 | "fmt"
9 | "io/ioutil"
10 | "net/http"
11 | "strconv"
12 | "strings"
13 | "text/template"
14 |
15 | "github.com/flanksource/kommons"
16 | "github.com/flanksource/kommons/ktemplate"
17 | templatev1 "github.com/flanksource/template-operator/api/v1"
18 | "github.com/go-logr/logr"
19 | "github.com/pkg/errors"
20 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
21 | "k8s.io/client-go/kubernetes"
22 | )
23 |
24 | type RESTManager struct {
25 | Client *kommons.Client
26 | kubernetes.Interface
27 | Log logr.Logger
28 | FuncMap template.FuncMap
29 | }
30 |
31 | func NewRESTManager(c *kommons.Client, log logr.Logger) (*RESTManager, error) {
32 | clientset, _ := c.GetClientset()
33 |
34 | functions := ktemplate.NewFunctions(clientset)
35 |
36 | tm := &RESTManager{
37 | Client: c,
38 | Interface: clientset,
39 | Log: log,
40 | FuncMap: functions.FuncMap(),
41 | }
42 | return tm, nil
43 | }
44 |
45 | func (r *RESTManager) Update(ctx context.Context, rest *templatev1.REST) (map[string]string, error) {
46 | if sameGeneration(rest) {
47 | return nil, nil
48 | }
49 |
50 | url := rest.Spec.Update.URL
51 | method := rest.Spec.Update.Method
52 | body := rest.Spec.Update.Body
53 |
54 | resp, err := r.doRequest(ctx, rest, url, method, body)
55 | if err != nil {
56 | return nil, errors.Wrap(err, "failed to send request")
57 | }
58 |
59 | respBody := map[string]interface{}{}
60 | if err := json.Unmarshal(resp, &respBody); err != nil {
61 | r.Log.Info("failed to unmarshal response body", "error", err)
62 | }
63 |
64 | statusUpdates := map[string]string{}
65 |
66 | if rest.Spec.Update.Status != nil {
67 | for k, v := range rest.Spec.Update.Status {
68 | value, err := r.templateStatus(rest, respBody, v)
69 | if err != nil {
70 | return nil, errors.Wrapf(err, "failed to template status field %s", k)
71 | }
72 | statusUpdates[k] = value
73 | }
74 | }
75 |
76 | statusUpdates["observedGeneration"] = strconv.FormatInt(rest.ObjectMeta.Generation, 10)
77 |
78 | return statusUpdates, nil
79 | }
80 |
81 | func (r *RESTManager) Delete(ctx context.Context, rest *templatev1.REST) error {
82 | url := rest.Spec.Remove.URL
83 | method := rest.Spec.Remove.Method
84 | body := rest.Spec.Remove.Body
85 |
86 | _, err := r.doRequest(ctx, rest, url, method, body)
87 | if err != nil {
88 | return errors.Wrap(err, "failed to send request")
89 | }
90 |
91 | fmt.Printf("Received delete request\n")
92 | return nil
93 | }
94 |
95 | func (r *RESTManager) doRequest(ctx context.Context, rest *templatev1.REST, url, method, body string) ([]byte, error) {
96 | newBody, err := r.templateField(rest, body)
97 | if err != nil {
98 | return nil, errors.Wrap(err, "failed to template body")
99 | }
100 |
101 | newURL, err := r.templateField(rest, url)
102 | if err != nil {
103 | return nil, errors.Wrap(err, "failed to template url")
104 | }
105 | if newURL == "" {
106 | if rest.Spec.URL == "" {
107 | return nil, errors.Wrap(err, "url cannot be empty")
108 | }
109 | newURL = rest.Spec.URL
110 | }
111 |
112 | client := &http.Client{}
113 |
114 | // set the HTTP method, url, and request body
115 | req, err := http.NewRequest(method, newURL, bytes.NewBuffer([]byte(newBody)))
116 | if err != nil {
117 | return nil, errors.Wrap(err, "failed to create request")
118 | }
119 |
120 | if rest.Spec.Headers != nil {
121 | for k, v := range rest.Spec.Headers {
122 | req.Header.Set(k, v)
123 | }
124 | }
125 |
126 | if rest.Spec.Auth != nil {
127 | basicAuth, err := getRestAuthorization(r.Client, rest.Spec.Auth)
128 | if err != nil {
129 | return nil, errors.Wrap(err, "failed to generate basic auth")
130 | }
131 | req.Header.Set("Authorization", basicAuth)
132 | }
133 |
134 | r.Log.V(3).Info("Sending Request:", "url", newURL, "method", method, "body", newBody)
135 |
136 | resp, err := client.Do(req)
137 | if err != nil {
138 | return nil, errors.Wrap(err, "http request failed")
139 | }
140 | defer resp.Body.Close()
141 |
142 | r.Log.V(3).Info("Response:", "statusCode", resp.StatusCode)
143 |
144 | bodyBytes, err := ioutil.ReadAll(resp.Body)
145 | if err != nil {
146 | return nil, errors.Wrap(err, "failed to read response body")
147 | }
148 |
149 | if resp.StatusCode < 200 || resp.StatusCode > 299 {
150 | return nil, errors.Errorf("expected response status 2xx, received status=%d body=%s", resp.StatusCode, string(bodyBytes))
151 | }
152 |
153 | return bodyBytes, nil
154 | }
155 |
156 | func (r *RESTManager) templateField(rest *templatev1.REST, field string) (string, error) {
157 | t, err := template.New("patch").Option("missingkey=zero").Funcs(r.FuncMap).Parse(field)
158 | // supress/ignore error if it contains text "map has no entry for key" as missingkey=zero doesn't work currently on map[string]interface{}
159 | // workaround for: https://github.com/golang/go/issues/24963
160 | if err != nil && !strings.Contains(err.Error(), "map has no entry for key") {
161 | return "", errors.Wrap(err, "failed to create template from field")
162 | }
163 |
164 | var tpl bytes.Buffer
165 | unstructuredData, err := kommons.ToUnstructured(&unstructured.Unstructured{}, rest)
166 | if err != nil {
167 | return "", errors.Wrap(err, "failed to convert rest to unstructured")
168 | }
169 | data := unstructuredData.Object
170 |
171 | if data["status"] == nil {
172 | data["status"] = map[string]interface{}{}
173 | }
174 |
175 | if err := t.Execute(&tpl, data); err != nil {
176 | return "", errors.Wrap(err, "failed to execute template")
177 | }
178 |
179 | return tpl.String(), nil
180 | }
181 |
182 | func (r *RESTManager) templateStatus(rest *templatev1.REST, response map[string]interface{}, field string) (string, error) {
183 | t, err := template.New("patch").Option("missingkey=zero").Funcs(r.FuncMap).Parse(field)
184 | // supress/ignore error if it contains text "map has no entry for key" as missingkey=zero doesn't work currently on map[string]interface{}
185 | // workaround for: https://github.com/golang/go/issues/24963
186 | if err != nil && !strings.Contains(err.Error(), "map has no entry for key") {
187 | return "", errors.Wrap(err, "failed to create template from field")
188 | }
189 |
190 | var tpl bytes.Buffer
191 |
192 | unstructuredData, err := kommons.ToUnstructured(&unstructured.Unstructured{}, rest)
193 | if err != nil {
194 | return "", errors.Wrap(err, "failed to convert rest to unstructured")
195 | }
196 | data := unstructuredData.Object
197 | data["response"] = response
198 |
199 | if err := t.Execute(&tpl, data); err != nil {
200 | return "", errors.Wrap(err, "failed to execute template")
201 | }
202 |
203 | return tpl.String(), nil
204 | }
205 |
206 | func sameGeneration(rest *templatev1.REST) bool {
207 | if rest.Status == nil {
208 | return false
209 | }
210 |
211 | observedGeneration := rest.Status["observedGeneration"]
212 |
213 | if observedGeneration == "" {
214 | return false
215 | }
216 |
217 | gen, err := strconv.ParseInt(observedGeneration, 10, 64)
218 | if err != nil {
219 | return false
220 | }
221 |
222 | return gen == rest.ObjectMeta.Generation
223 | }
224 |
225 | func getRestAuthorization(client *kommons.Client, auth *templatev1.RESTAuth) (string, error) {
226 | _, username, err := client.GetEnvValue(kommons.EnvVar{Name: "username", ValueFrom: &auth.Username}, auth.Namespace)
227 | if err != nil {
228 | return "", errors.Wrap(err, "failed to get username value")
229 | }
230 | _, password, err := client.GetEnvValue(kommons.EnvVar{Name: "password", ValueFrom: &auth.Password}, auth.Namespace)
231 | if err != nil {
232 | return "", errors.Wrap(err, "failed to get username value")
233 | }
234 |
235 | basicAuth := "Basic " + base64.StdEncoding.EncodeToString([]byte(username+":"+password))
236 | return basicAuth, nil
237 | }
238 |
--------------------------------------------------------------------------------
/k8s/schema_cache.go:
--------------------------------------------------------------------------------
1 | package k8s
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "sync"
8 | "time"
9 |
10 | "github.com/go-logr/logr"
11 | "github.com/go-openapi/spec"
12 | lru "github.com/hashicorp/golang-lru"
13 | "github.com/pkg/errors"
14 | extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
15 | extapi "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
16 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
17 | "k8s.io/apimachinery/pkg/runtime/schema"
18 | "k8s.io/client-go/kubernetes"
19 | )
20 |
21 | type SchemaCache struct {
22 | clientset *kubernetes.Clientset
23 | expire time.Duration
24 | lock *sync.Mutex
25 | crdClient extapi.ApiextensionsV1Interface
26 |
27 | resources []*metav1.APIResourceList
28 | resourcesExpireTimestamp time.Time
29 |
30 | schema *spec.Swagger
31 | schemaExpireTimestamp time.Time
32 |
33 | crds []extv1.CustomResourceDefinition
34 | crdsExpireTimestamp time.Time
35 |
36 | schemaUnmarshalCache *lru.Cache
37 |
38 | log logr.Logger
39 | }
40 |
41 | func NewSchemaCache(clientset *kubernetes.Clientset, crdClient extapi.ApiextensionsV1Interface, expire time.Duration, log logr.Logger) *SchemaCache {
42 | schemaUnmarshalCache, _ := lru.New(100)
43 |
44 | sc := &SchemaCache{
45 | clientset: clientset,
46 | crdClient: crdClient,
47 | expire: expire,
48 | lock: &sync.Mutex{},
49 | log: log,
50 | schemaUnmarshalCache: schemaUnmarshalCache,
51 |
52 | resources: nil,
53 | }
54 | return sc
55 | }
56 |
57 | func (sc *SchemaCache) ExpireSchema() error {
58 | sc.lock.Lock()
59 | defer sc.lock.Unlock()
60 | if sc.schemaExpireTimestamp.After(time.Now()) {
61 | sc.schemaExpireTimestamp = time.Now()
62 | }
63 | if sc.crdsExpireTimestamp.After(time.Now()) {
64 | sc.crdsExpireTimestamp = time.Now()
65 | }
66 | return nil
67 | }
68 |
69 | func (sc *SchemaCache) ExpireResources() error {
70 | sc.lock.Lock()
71 | defer sc.lock.Unlock()
72 | if sc.resourcesExpireTimestamp.After(time.Now()) {
73 | sc.resourcesExpireTimestamp = time.Now()
74 | }
75 | return nil
76 | }
77 |
78 | func (sc *SchemaCache) SchemaHasExpired() bool {
79 | return sc.schemaExpireTimestamp.Before(time.Now())
80 | }
81 |
82 | func (sc *SchemaCache) ResourceHasExpired() bool {
83 | return sc.resourcesExpireTimestamp.Before(time.Now())
84 | }
85 |
86 | func (sc *SchemaCache) FetchSchema() (*spec.Swagger, error) {
87 | sc.lock.Lock()
88 | defer sc.lock.Unlock()
89 |
90 | if sc.resources == nil || time.Now().After(sc.schemaExpireTimestamp) {
91 | sc.log.V(3).Info("before fetch schema")
92 | if err := sc.fetchAndSetSchema(); err != nil {
93 | return nil, errors.Wrap(err, "failed to refetch API schema")
94 | }
95 | sc.log.V(3).Info("after fetch schema")
96 | }
97 |
98 | return sc.schema, nil
99 | }
100 |
101 | func (sc *SchemaCache) FetchResources() ([]*metav1.APIResourceList, error) {
102 | sc.lock.Lock()
103 | defer sc.lock.Unlock()
104 |
105 | if sc.resources == nil || time.Now().After(sc.resourcesExpireTimestamp) {
106 | sc.log.V(3).Info("before fetch resources")
107 | if err := sc.fetchAndSetResources(); err != nil {
108 | return nil, errors.Wrap(err, "failed to refetch API resources")
109 | }
110 | sc.log.V(3).Info("after fetch resources")
111 | }
112 | return sc.resources, nil
113 | }
114 |
115 | func (sc *SchemaCache) FetchCRD() ([]extv1.CustomResourceDefinition, error) {
116 | sc.lock.Lock()
117 | defer sc.lock.Unlock()
118 |
119 | if sc.crds == nil || time.Now().After(sc.crdsExpireTimestamp) {
120 | sc.log.V(3).Info("before fetch crds")
121 | crds, err := sc.crdClient.CustomResourceDefinitions().List(context.Background(), metav1.ListOptions{})
122 | if err != nil {
123 | return nil, errors.Wrap(err, "failed to list customresourcedefinitions")
124 | }
125 | sc.crds = crds.Items
126 | sc.crdsExpireTimestamp = time.Now().Add(sc.expire)
127 | sc.log.V(3).Info("after fetch crds")
128 | }
129 |
130 | return sc.crds, nil
131 | }
132 |
133 | func (sc *SchemaCache) CachedConvertSchema(gvk schema.GroupVersionKind, crd extv1.CustomResourceDefinitionVersion) (*spec.Schema, error) {
134 | key := fmt.Sprintf("group=%s;version=%s;kind=%s", gvk.Group, gvk.Version, gvk.Kind)
135 |
136 | sc.lock.Lock()
137 | defer sc.lock.Unlock()
138 |
139 | schemaI, found := sc.schemaUnmarshalCache.Get(key)
140 | if found {
141 | schema, ok := schemaI.(*spec.Schema)
142 | if ok {
143 | return schema, nil
144 | }
145 | sc.log.Info("failed to fetch schema from lru cache")
146 | }
147 |
148 | schemaBytes, err := json.Marshal(crd.Schema.OpenAPIV3Schema)
149 | if err != nil {
150 | return nil, errors.Wrap(err, "failed to encode crd schema to json")
151 | }
152 |
153 | schema := &spec.Schema{}
154 | if err := json.Unmarshal(schemaBytes, schema); err != nil {
155 | return nil, errors.Wrap(err, "failed to decode json into spec.Schema")
156 | }
157 |
158 | sc.schemaUnmarshalCache.Add(key, schema)
159 | return schema, nil
160 | }
161 |
162 | func (sc *SchemaCache) fetchAndSetSchema() error {
163 | bs, err := sc.clientset.RESTClient().Get().AbsPath("openapi", "v2").DoRaw(context.TODO())
164 | if err != nil {
165 | return errors.Wrap(err, "failed to fetch schema from server")
166 | }
167 | s := &spec.Swagger{}
168 |
169 | if err := json.Unmarshal(bs, &s); err != nil {
170 | return errors.Wrap(err, "failed to unmarshal openapi")
171 | }
172 |
173 | sc.schema = s
174 | sc.schemaExpireTimestamp = time.Now().Add(sc.expire)
175 |
176 | return nil
177 | }
178 |
179 | func (sc *SchemaCache) fetchAndSetResources() error {
180 | serverResources, err := sc.clientset.ServerPreferredResources()
181 | if err != nil {
182 | return errors.Wrap(err, "failed to list server resources")
183 | }
184 | sc.resources = serverResources
185 | sc.resourcesExpireTimestamp = time.Now().Add(sc.expire)
186 | return nil
187 | }
188 |
--------------------------------------------------------------------------------
/k8s/suite_test.go:
--------------------------------------------------------------------------------
1 | package k8s_test
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | . "github.com/onsi/ginkgo"
8 | . "github.com/onsi/gomega"
9 | "k8s.io/apimachinery/pkg/runtime"
10 | )
11 |
12 | func TestK8s(t *testing.T) {
13 | RegisterFailHandler(Fail)
14 | RunSpecs(t, "K8S Suite")
15 | }
16 |
17 | type TestEventRecorder struct{}
18 |
19 | func (r *TestEventRecorder) Event(object runtime.Object, eventtype, reason, message string) {
20 | r.event(object, eventtype, reason, message, map[string]string{})
21 | }
22 |
23 | // Eventf is just like Event, but with Sprintf for the message field.
24 | func (r *TestEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
25 | msg := fmt.Sprintf(messageFmt, args...)
26 | r.event(object, eventtype, reason, msg, map[string]string{})
27 | }
28 |
29 | // AnnotatedEventf is just like eventf, but with annotations attached
30 | func (r *TestEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
31 | msg := fmt.Sprintf(messageFmt, args...)
32 | r.event(object, eventtype, reason, msg, annotations)
33 | }
34 |
35 | func (r *TestEventRecorder) event(object runtime.Object, eventtype, reason, message string, annotations map[string]string) {
36 | fmt.Printf("Received event type=%s reason=%s message='%s' on object %v\n", eventtype, reason, message, object)
37 | }
38 |
--------------------------------------------------------------------------------
/k8s/template_manager_test.go:
--------------------------------------------------------------------------------
1 | package k8s_test
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | "time"
7 |
8 | "github.com/flanksource/template-operator/k8s"
9 | . "github.com/onsi/ginkgo"
10 | . "github.com/onsi/gomega"
11 | ctrl "sigs.k8s.io/controller-runtime"
12 | "sigs.k8s.io/yaml"
13 | )
14 |
15 | var testLog = ctrl.Log.WithName("test")
16 |
17 | var _ = Describe("TemplateManager", func() {
18 | Describe("Template", func() {
19 | It("converts PostgresqlDb to zalando Postgresql", func() {
20 |
21 | db := map[string]interface{}{
22 | "apiVersion": "db.flanksource.com/v1",
23 | "kind": "PostgresqlDB",
24 | "metadata": map[string]interface{}{
25 | "name": "test1",
26 | "namespace": "postgres-operator",
27 | },
28 | "spec": map[string]interface{}{
29 | "replicas": 2,
30 | "parameters": map[string]interface{}{
31 | "max_connections": "1024",
32 | "shared_buffers": "4759MB",
33 | "work_mem": "475MB",
34 | "maintenance_work_mem": "634MB",
35 | },
36 | },
37 | }
38 |
39 | template := `
40 | apiVersion: acid.zalan.do/v1
41 | kind: postgresql
42 | metadata:
43 | name: postgres-{{.metadata.name}}
44 | namespace: postgres-operator
45 | spec:
46 | numberOfInstances: "{{ .spec.replicas }}"
47 | clone: null
48 | postgresql:
49 | parameters: "{{ .spec.parameters | data.ToJSON }}"
50 | synchronous_mode: false
51 | `
52 | templateJSON, err := yaml.YAMLToJSON([]byte(template))
53 | Expect(err).ToNot(HaveOccurred())
54 |
55 | expectedYaml := `
56 | apiVersion: acid.zalan.do/v1
57 | kind: postgresql
58 | metadata:
59 | name: postgres-test1
60 | namespace: postgres-operator
61 | spec:
62 | clone: null
63 | numberOfInstances: 2
64 | postgresql:
65 | parameters:
66 | maintenance_work_mem: 634MB
67 | max_connections: "1024"
68 | shared_buffers: 4759MB
69 | work_mem: 475MB
70 | synchronous_mode: false
71 | `
72 |
73 | eventsRecorder := &TestEventRecorder{}
74 | cache := k8s.NewSchemaCache(clientset(), crdClient(), 5*time.Minute, testLog)
75 | templateManager, err := k8s.NewTemplateManager(kommonsClient(), testLog, cache, eventsRecorder, &k8s.NullWatcher{})
76 | Expect(err).ToNot(HaveOccurred())
77 |
78 | result, err := templateManager.Template([]byte(templateJSON), db)
79 | Expect(err).ToNot(HaveOccurred())
80 |
81 | yml := string(result)
82 |
83 | fmt.Printf("Expected:\n%v\n=======Actual:\n%v\n==========", expectedYaml, string(yml))
84 | Expect(strings.TrimSpace(string(yml))).To(Equal(strings.TrimSpace(expectedYaml)))
85 | })
86 | })
87 | })
88 |
--------------------------------------------------------------------------------
/k8s/watcher.go:
--------------------------------------------------------------------------------
1 | package k8s
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "sync"
8 | "time"
9 |
10 | "github.com/flanksource/commons/logger"
11 | "github.com/flanksource/kommons"
12 | templatev1 "github.com/flanksource/template-operator/api/v1"
13 | "github.com/go-logr/logr"
14 | "github.com/pkg/errors"
15 | v1 "k8s.io/api/core/v1"
16 | "k8s.io/apimachinery/pkg/api/meta"
17 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
18 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
19 | "k8s.io/apimachinery/pkg/runtime"
20 | "k8s.io/apimachinery/pkg/watch"
21 | "k8s.io/client-go/dynamic"
22 | "k8s.io/client-go/informers"
23 | "k8s.io/client-go/kubernetes"
24 | "k8s.io/client-go/tools/cache"
25 | )
26 |
27 | type CallbackFunc func(unstructured.Unstructured) error
28 |
29 | type WatcherInterface interface {
30 | Watch(exampleObject *unstructured.Unstructured, template *templatev1.Template, cb CallbackFunc) error
31 | }
32 |
33 | type NullWatcher struct{}
34 |
35 | func (w *NullWatcher) Watch(exampleObject *unstructured.Unstructured, template *templatev1.Template, cb CallbackFunc) error {
36 | return nil
37 | }
38 |
39 | type Watcher struct {
40 | clientset *kubernetes.Clientset
41 | client *kommons.Client
42 | mtx *sync.Mutex
43 | cache map[string]bool
44 | log logr.Logger
45 | }
46 |
47 | func NewWatcher(client *kommons.Client, log logr.Logger) (WatcherInterface, error) {
48 | clientset, err := client.GetClientset()
49 | if err != nil {
50 | return nil, errors.Wrap(err, "failed to get clientset")
51 | }
52 |
53 | watcher := &Watcher{
54 | clientset: clientset,
55 | client: client,
56 | mtx: &sync.Mutex{},
57 | cache: map[string]bool{},
58 | log: log,
59 | }
60 |
61 | return watcher, nil
62 | }
63 |
64 | func (w *Watcher) Watch(exampleObject *unstructured.Unstructured, template *templatev1.Template, cb CallbackFunc) error {
65 | cacheKey := getCacheKey(exampleObject, template)
66 | w.mtx.Lock()
67 | defer w.mtx.Unlock()
68 | if w.cache[cacheKey] {
69 | return nil
70 | }
71 | w.cache[cacheKey] = true
72 |
73 | logger.Debugf("Deploying new watcher for object=%s", exampleObject.GetObjectKind().GroupVersionKind().Kind)
74 |
75 | factory := informers.NewSharedInformerFactory(w.clientset, 0)
76 |
77 | di, err := w.getDynamicClient(exampleObject)
78 | if err != nil {
79 | return errors.Wrap(err, "failed to get dynamic client")
80 | }
81 |
82 | labelSelector, err := labelSelectorToString(template.Spec.Source.LabelSelector)
83 | if err != nil {
84 | return errors.Wrap(err, "failed to get label selector")
85 | }
86 |
87 | listOptions := metav1.ListOptions{
88 | LabelSelector: labelSelector,
89 | FieldSelector: template.Spec.Source.FieldSelector,
90 | }
91 |
92 | informer := factory.InformerFor(exampleObject, func(i kubernetes.Interface, d time.Duration) cache.SharedIndexInformer {
93 | c := cache.NewSharedIndexInformer(
94 | &cache.ListWatch{
95 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
96 | options.FieldSelector = listOptions.FieldSelector
97 | options.LabelSelector = listOptions.LabelSelector
98 | return di.List(context.TODO(), options)
99 | },
100 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
101 | options.FieldSelector = listOptions.FieldSelector
102 | options.LabelSelector = listOptions.LabelSelector
103 | return di.Watch(context.TODO(), options)
104 | },
105 | },
106 | exampleObject,
107 | d,
108 | cache.Indexers{},
109 | )
110 | return c
111 | })
112 |
113 | informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
114 | AddFunc: func(obj interface{}) {
115 | w.log.V(2).Info("Received callback for object added:", "object", obj)
116 | w.onUpdate(obj, cb)
117 | },
118 | UpdateFunc: func(oldObj interface{}, obj interface{}) {
119 | w.log.V(2).Info("Received callback for object updated:", "object", obj)
120 | w.onUpdate(obj, cb)
121 | },
122 | // When a pod gets deleted
123 | DeleteFunc: func(obj interface{}) {
124 | w.log.V(2).Info("Received callback for object deleted:", "object", obj)
125 | },
126 | })
127 |
128 | stopper := make(chan struct{})
129 | go informer.Run(stopper)
130 |
131 | return nil
132 | }
133 |
134 | func (w *Watcher) onUpdate(obj interface{}, cb CallbackFunc) {
135 | js, err := json.Marshal(obj)
136 | if err != nil {
137 | w.log.Error(err, "failed to marshal object for update")
138 | return
139 | }
140 | unstr := &unstructured.Unstructured{}
141 | if err := json.Unmarshal(js, &unstr.Object); err != nil {
142 | w.log.Error(err, "failed to unmarshal into unstructured for update")
143 | return
144 | }
145 |
146 | if err := cb(*unstr); err != nil {
147 | w.log.Error(err, "failed to run callback")
148 | }
149 | }
150 |
151 | func (w *Watcher) getDynamicClient(obj *unstructured.Unstructured) (dynamic.ResourceInterface, error) {
152 | dynamicClient, err := w.client.GetDynamicClient()
153 | if err != nil {
154 | return nil, errors.Wrap(err, "failed to get dynamic client")
155 | }
156 |
157 | mapping, err := w.client.WaitForRestMapping(obj, 2*time.Minute)
158 | if err != nil {
159 | return nil, err
160 | }
161 |
162 | if mapping.Scope == meta.RESTScopeRoot {
163 | return dynamicClient.Resource(mapping.Resource), nil
164 | }
165 | return dynamicClient.Resource(mapping.Resource).Namespace(v1.NamespaceAll), nil
166 | }
167 |
168 | func getCacheKey(obj runtime.Object, template *templatev1.Template) string {
169 | kind := obj.GetObjectKind().GroupVersionKind().Kind
170 | return fmt.Sprintf("kind=%s;template=%s", kind, template.Name)
171 | }
172 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | /*
2 |
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package main
18 |
19 | import (
20 | "flag"
21 | "os"
22 | "time"
23 |
24 | apiv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
25 |
26 | "github.com/flanksource/commons/logger"
27 | "github.com/flanksource/kommons"
28 | templatingflanksourcecomv1 "github.com/flanksource/template-operator/api/v1"
29 | "github.com/flanksource/template-operator/controllers"
30 | "github.com/flanksource/template-operator/k8s"
31 | zaplogfmt "github.com/sykesm/zap-logfmt"
32 | uzap "go.uber.org/zap"
33 | "go.uber.org/zap/zapcore"
34 | "gopkg.in/yaml.v2"
35 | apiv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
36 | extapi "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
37 | "k8s.io/apimachinery/pkg/runtime"
38 | clientgoscheme "k8s.io/client-go/kubernetes/scheme"
39 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
40 | ctrl "sigs.k8s.io/controller-runtime"
41 | "sigs.k8s.io/controller-runtime/pkg/log/zap"
42 | // +kubebuilder:scaffold:imports
43 | )
44 |
45 | var (
46 | scheme = runtime.NewScheme()
47 | setupLog = ctrl.Log.WithName("setup")
48 | )
49 |
50 | func init() {
51 | _ = clientgoscheme.AddToScheme(scheme)
52 |
53 | _ = templatingflanksourcecomv1.AddToScheme(scheme)
54 | apiv1.AddToScheme(scheme)
55 | apiv1beta1.AddToScheme(scheme)
56 | // +kubebuilder:scaffold:scheme
57 |
58 | yaml.FutureLineWrap()
59 | }
60 |
61 | func setupLogger(opts zap.Options) {
62 | configLog := uzap.NewProductionEncoderConfig()
63 | configLog.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) {
64 | encoder.AppendString(ts.UTC().Format(time.RFC3339Nano))
65 | }
66 | logfmtEncoder := zaplogfmt.NewEncoder(configLog)
67 |
68 | logger := zap.New(zap.UseFlagOptions(&opts), zap.Encoder(logfmtEncoder))
69 | ctrl.SetLogger(logger)
70 | }
71 |
72 | func main() {
73 | var metricsAddr string
74 | var enableLeaderElection bool
75 | var syncPeriod, expire time.Duration
76 | flag.DurationVar(&syncPeriod, "sync-period", 5*time.Minute, "The time duration to run a full reconcile")
77 | flag.DurationVar(&expire, "expire", 15*time.Minute, "The time duration to expire API resources cache")
78 | flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
79 | flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
80 | "Enable leader election for controller manager. "+
81 | "Enabling this will ensure there is only one active controller manager.")
82 |
83 | opts := zap.Options{}
84 | opts.BindFlags(flag.CommandLine)
85 | flag.Parse()
86 | setupLogger(opts)
87 |
88 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
89 | Scheme: scheme,
90 | MetricsBindAddress: metricsAddr,
91 | Port: 9443,
92 | SyncPeriod: &syncPeriod,
93 | LeaderElection: enableLeaderElection,
94 | LeaderElectionID: "ba344e13.flanksource.com",
95 | })
96 | if err != nil {
97 | setupLog.Error(err, "unable to start manager")
98 | os.Exit(1)
99 | }
100 |
101 | client := kommons.NewClient(mgr.GetConfig(), logger.StandardLogger())
102 | clientset, err := client.GetClientset()
103 | if err != nil {
104 | setupLog.Error(err, "failed to get clientset")
105 | os.Exit(1)
106 | }
107 | restConfig, err := client.GetRESTConfig()
108 | if err != nil {
109 | setupLog.Error(err, "failed to get rest config")
110 | os.Exit(1)
111 | }
112 | crdClient, err := extapi.NewForConfig(restConfig)
113 | if err != nil {
114 | setupLog.Error(err, "failed to get crd client")
115 | os.Exit(1)
116 | }
117 | schemaCache := k8s.NewSchemaCache(clientset, crdClient, expire, ctrl.Log.WithName("schema-cache"))
118 |
119 | watcher, err := k8s.NewWatcher(client, ctrl.Log.WithName("watcher"))
120 | if err != nil {
121 | setupLog.Error(err, "failed to setup watcher")
122 | os.Exit(1)
123 | }
124 |
125 | if err = (&controllers.TemplateReconciler{
126 | Client: controllers.Client{
127 | KommonsClient: client,
128 | Cache: schemaCache,
129 | Log: ctrl.Log.WithName("controllers").WithName("Template"),
130 | Scheme: mgr.GetScheme(),
131 | Watcher: watcher,
132 | },
133 | }).SetupWithManager(mgr); err != nil {
134 | setupLog.Error(err, "unable to create controller", "controller", "Template")
135 | os.Exit(1)
136 | }
137 | //CRDReconciler shares a SchemaCache with TemplateReconciler, and resets it if changes to CRDs are reported, so that the TemplateReconciler will pick them up
138 | if err = (&controllers.CRDReconciler{
139 | Client: controllers.Client{
140 | KommonsClient: client,
141 | Cache: schemaCache,
142 | Log: ctrl.Log.WithName("controllers").WithName("Template"),
143 | Scheme: mgr.GetScheme(),
144 | },
145 | ResourceVersion: 0,
146 | }).SetupWithManager(mgr); err != nil {
147 | setupLog.Error(err, "unable to create controller", "controller", "Template")
148 | os.Exit(1)
149 | }
150 | if err = (&controllers.RESTReconciler{
151 | Client: controllers.Client{
152 | KommonsClient: client,
153 | Cache: schemaCache,
154 | Log: ctrl.Log.WithName("controllers").WithName("Template"),
155 | Scheme: mgr.GetScheme(),
156 | Watcher: watcher,
157 | },
158 | }).SetupWithManager(mgr); err != nil {
159 | setupLog.Error(err, "unable to create controller", "controller", "REST")
160 | os.Exit(1)
161 | }
162 | // +kubebuilder:scaffold:builder
163 |
164 | setupLog.Info("starting manager")
165 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
166 | setupLog.Error(err, "problem running manager")
167 | os.Exit(1)
168 | }
169 | }
170 |
--------------------------------------------------------------------------------
/test/config.yaml:
--------------------------------------------------------------------------------
1 | name: kind-kind
2 | patches:
3 | - ./patch1.yaml
4 | domain: 127.0.0.1.nip.io
5 | dex:
6 | disabled: true
7 | ldap:
8 | disabled: true
9 | kubernetes:
10 | version: !!env KUBERNETES_VERSION
11 | kubeletExtraArgs:
12 | node-labels: "ingress-ready=true"
13 | authorization-mode: "AlwaysAllow"
14 | containerRuntime: containerd
15 | versions:
16 | sonobuoy: 0.16.4
17 | ketall: v1.3.0
18 | apacheds: 0.7.0
19 | podSubnet: 100.200.0.0/16
20 | serviceSubnet: 100.100.0.0/16
21 | calico:
22 | ipip: Never
23 | vxlan: Never
24 | version: v3.8.2
25 | s3:
26 | endpoint: http://minio.minio.svc:9000
27 | access_key: minio
28 | secret_key: minio123
29 | region: us-east1
30 | usePathStyle: true
31 | skipTLSVerify: true
32 | minio:
33 | version: RELEASE.2020-09-02T18-19-50Z
34 | access_key: minio
35 | secret_key: minio123
36 | replicas: 1
37 | ca:
38 | cert: ../.certs/root-ca.crt
39 | privateKey: ../.certs/root-ca.key
40 | password: foobar
41 | ingressCA:
42 | cert: ../.certs/ingress-ca.crt
43 | privateKey: ../.certs/ingress-ca.key
44 | password: foobar
45 | monitoring:
46 | disabled: false
47 | templateOperator:
48 | disabled: true
49 | canaryChecker:
50 | disabled: true
51 | postgresOperator:
52 | version: v1.6.2
53 | defaultBackupBucket: cicd-pg-backup
54 | backupPassword: password123456
55 | defaultBackupRetention:
56 | keepLast: 5
57 | keepHourly: 2
58 | keepDaily: 1
59 | platformOperator:
60 | version: v0.7.0
61 | enableClusterResourceQuota: true
62 | whitelistedPodAnnotations:
63 | # used by filebeat
64 | - com.flanksource.infra.logs/enabled
65 | - co.elastic.logs/enabled
66 | flux:
67 | enabled: true
68 | test:
69 | exclude:
70 | - configmap-reloader
71 | - dex
72 | - audit
73 | - encryption
--------------------------------------------------------------------------------
/test/e2e.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | export KARINA_VERSION=v0.50.1
6 | export KARINA="./karina -c test/config.yaml"
7 | export KUBECONFIG=~/.kube/config
8 | export DOCKER_API_VERSION=1.39
9 |
10 | if [[ "$OSTYPE" == "linux-gnu" ]]; then
11 | wget -q https://github.com/flanksource/karina/releases/download/$KARINA_VERSION/karina
12 | chmod +x karina
13 | elif [[ "$OSTYPE" == "darwin"* ]]; then
14 | wget -q https://github.com/flanksource/karina/releases/download/$KARINA_VERSION/karina_osx
15 | cp karina_osx karina
16 | chmod +x karina
17 | else
18 | echo "OS $OSTYPE not supported"
19 | exit 1
20 | fi
21 |
22 | mkdir -p .bin
23 |
24 | KUSTOMIZE=./.bin/kustomize
25 | if [ ! -f "$KUSTOMIZE" ]; then
26 | curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
27 | mv kustomize .bin
28 | fi
29 | export PATH=$(pwd)/.bin:$PATH
30 |
31 | $KARINA ca generate --name root-ca --cert-path .certs/root-ca.crt --private-key-path .certs/root-ca.key --password foobar --expiry 1
32 | $KARINA ca generate --name ingress-ca --cert-path .certs/ingress-ca.crt --private-key-path .certs/ingress-ca.key --password foobar --expiry 1
33 | $KARINA provision kind-cluster -vvvvv
34 |
35 | $KARINA deploy bootstrap
36 | $KARINA deploy postgres-operator
37 | $KARINA deploy flux
38 | export IMG=flanksource/template-operator:v1
39 | make docker-build
40 | kind load docker-image $IMG --name kind-kind
41 |
42 | make deploy
43 |
44 | kubectl apply -f examples/postgres-operator.yml
45 | kubectl apply -f examples/namespacerequest.yml
46 | kubectl apply -f examples/for-each.yml
47 | kubectl apply -f examples/when.yaml
48 | kubectl apply -f test/fixtures/awx-operator.yml
49 | kubectl apply -f test/fixtures/depends-on.yaml
50 | kubectl apply -f test/fixtures/mockserver.yml
51 | kubectl apply -f test/fixtures/git-repository.yaml
52 |
53 | go run test/e2e.go
54 |
55 | go test ./k8s
56 |
--------------------------------------------------------------------------------
/test/fixtures/copy-to-namespace.yml:
--------------------------------------------------------------------------------
1 | apiVersion: templating.flanksource.com/v1
2 | kind: Template
3 | metadata:
4 | name: copy-secret-e2e
5 | spec:
6 | source:
7 | apiVersion: v1
8 | kind: Secret
9 | namespaceSelector:
10 | matchLabels:
11 | e2e-namespace-role: "copy-to-namespace-source"
12 | labelSelector:
13 | matchLabels:
14 | e2e-test: "copy-to-namespace"
15 | copyToNamespaces:
16 | namespaces:
17 | - template-operator-e2e-dest-1
18 | - template-operator-e2e-dest-2
--------------------------------------------------------------------------------
/test/fixtures/depends-on.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: depends.apps.flanksource.com
6 | spec:
7 | group: apps.flanksource.com
8 | versions:
9 | - name: v1
10 | served: true
11 | storage: true
12 | schema:
13 | openAPIV3Schema:
14 | type: object
15 | properties:
16 | spec:
17 | type: object
18 | properties:
19 | replicas:
20 | type: integer
21 | image:
22 | type: string
23 | type:
24 | type: string
25 | status:
26 | type: string
27 | status:
28 | type: object
29 | properties:
30 | conditions:
31 | type: array
32 | items:
33 | type: object
34 | properties:
35 | type:
36 | type: string
37 | status:
38 | type: string
39 | scope: Namespaced
40 | names:
41 | plural: depends
42 | singular: depend
43 | kind: Depend
44 |
45 | ---
46 | apiVersion: apiextensions.k8s.io/v1
47 | kind: CustomResourceDefinition
48 | metadata:
49 | name: samples.apps.flanksource.com
50 | spec:
51 | group: apps.flanksource.com
52 | versions:
53 | - name: v1
54 | served: true
55 | storage: true
56 | schema:
57 | openAPIV3Schema:
58 | type: object
59 | properties:
60 | spec:
61 | type: object
62 | status:
63 | type: object
64 | properties:
65 | conditions:
66 | type: array
67 | items:
68 | type: object
69 | properties:
70 | type:
71 | type: string
72 | status:
73 | type: string
74 | scope: Namespaced
75 | names:
76 | plural: samples
77 | singular: sample
78 | kind: Sample
79 | ---
80 |
81 | apiVersion: templating.flanksource.com/v1
82 | kind: Template
83 | metadata:
84 | name: depend-example
85 | spec:
86 | source:
87 | apiVersion: apps.flanksource.com/v1
88 | kind: Depend
89 | resources:
90 | - id: test
91 | apiVersion: apps.flanksource.com/v1
92 | kind: Sample
93 | metadata:
94 | name: "{{.metadata.name}}"
95 | namespace: "{{.metadata.namespace}}"
96 | spec: {}
97 | status:
98 | conditions:
99 | - type: '{{.spec.type | default "NotReady"}}'
100 | status: '{{.spec.status | default "False"}}'
101 | # will not be created as the dependent object never becomes ready
102 | - depends: ["test"]
103 | apiVersion: apps/v1
104 | kind: Deployment
105 | metadata:
106 | name: "{{.metadata.name}}"
107 | namespace: "{{.metadata.namespace}}"
108 | labels:
109 | app: "{{.metadata.name}}"
110 | spec:
111 | replicas: "{{.spec.replicas | default 1}}"
112 | selector:
113 | matchLabels:
114 | app: "{{.metadata.name}}"
115 | template:
116 | metadata:
117 | labels:
118 | app: "{{.metadata.name}}"
119 | spec:
120 | containers:
121 | - name: web
122 | image: "{{.spec.image}}"
123 | ports:
124 | - containerPort: 80
125 | # will be created as it does not depend on any other object
126 | - id: secret
127 | apiVersion: v1
128 | kind: Secret
129 | metadata:
130 | name: "{{.metadata.name}}"
131 | namespace: "{{.metadata.namespace}}"
132 | data:
133 | some-key: c29tZS12YWx1ZQ==
134 | type: Opaque
--------------------------------------------------------------------------------
/test/fixtures/git-repository.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: source.toolkit.fluxcd.io/v1beta1
2 | kind: GitRepository
3 | metadata:
4 | name: template-operator-dashboards
5 | namespace: default
6 | spec:
7 | interval: 5m
8 | url: https://github.com/flanksource/template-operator-e2e-test
9 | ref:
10 | branch: master
--------------------------------------------------------------------------------
/test/fixtures/mockserver.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: mockserver
5 | ---
6 | apiVersion: apps/v1
7 | kind: Deployment
8 | metadata:
9 | name: mockserver
10 | namespace: mockserver
11 | labels:
12 | app: mockserver
13 | spec:
14 | replicas: 1
15 | selector:
16 | matchLabels:
17 | app: mockserver
18 | template:
19 | metadata:
20 | labels:
21 | app: mockserver
22 | spec:
23 | containers:
24 | - name: mockserver
25 | image: mockserver/mockserver:latest
26 | ports:
27 | - containerPort: 1080
28 | ---
29 | apiVersion: v1
30 | kind: Service
31 | metadata:
32 | name: mockserver
33 | namespace: mockserver
34 | spec:
35 | type: NodePort
36 | selector:
37 | app: mockserver
38 | ports:
39 | - port: 80
40 | targetPort: 1080
41 | ---
42 | apiVersion: networking.k8s.io/v1beta1
43 | kind: Ingress
44 | metadata:
45 | name: mockserver
46 | namespace: mockserver
47 | annotations:
48 | kubernetes.io/tls-acme: "true"
49 | spec:
50 | tls:
51 | - secretName: mockserver-tls
52 | hosts:
53 | - mockserver.127.0.0.1.nip.io
54 | rules:
55 | - host: mockserver.127.0.0.1.nip.io
56 | http:
57 | paths:
58 | - backend:
59 | serviceName: mockserver
60 | servicePort: 80
61 |
--------------------------------------------------------------------------------
/test/patch1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: minio
5 | namespace: minio
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: minio
11 | image: minio/minio:RELEASE.2020-03-06T22-23-56Z
12 | ---
13 | apiVersion: apps/v1
14 | kind: StatefulSet
15 | metadata:
16 | name: vault
17 | namespace: vault
18 | spec:
19 | replicas: 1
20 | ---
21 | apiVersion: apps/v1
22 | kind: Deployment
23 | metadata:
24 | name: dex
25 | namespace: dex
26 | spec:
27 | replicas: 1
28 | template:
29 | spec:
30 | resources:
31 | requests:
32 | cpu: 10m
33 | ---
34 | apiVersion: monitoring.coreos.com/v1
35 | kind: Prometheus
36 | metadata:
37 | name: k8s
38 | namespace: monitoring
39 | spec:
40 | replicas: 1
41 | storage:
42 | emptyDir:
43 | sizeLimit: 10Gi
44 | volumeClaimTemplate: {}
45 | resources:
46 | requests:
47 | memory: 100Mi
48 | cpu: 10m
49 | retention: 10h
50 | ---
51 | apiVersion: monitoring.coreos.com/v1
52 | kind: Alertmanager
53 | metadata:
54 | name: main
55 | namespace: monitoring
56 | spec:
57 | replicas: 1
58 | resources:
59 | requests:
60 | cpu: 10m
61 | ---
62 | apiVersion: integreatly.org/v1alpha1
63 | kind: Grafana
64 | metadata:
65 | name: grafana
66 | namespace: monitoring
67 | spec:
68 | resources:
69 | requests:
70 | cpu: 10m
71 |
72 | ---
73 | kind: DaemonSet
74 | apiVersion: apps/v1
75 | metadata:
76 | name: calico-node
77 | namespace: kube-system
78 | spec:
79 | template:
80 | spec:
81 | containers:
82 | - name: calico-node
83 | resources:
84 | requests:
85 | cpu: 10m
86 | ---
87 | apiVersion: apps/v1
88 | kind: Deployment
89 | metadata:
90 | name: kube-state-metrics
91 | namespace: monitoring
92 | spec:
93 | template:
94 | spec:
95 | containers:
96 | - name: kube-state-metrics
97 | resources:
98 | requests:
99 | cpu: 10m
100 |
101 | ---
102 | apiVersion: apps/v1
103 | kind: Deployment
104 | metadata:
105 | # Disable reload/all in tests
106 | annotations:
107 | $patch: delete
108 | name: quack
109 | namespace: quack
110 | spec:
111 | replicas: 1
112 | template:
113 | metadata:
114 | annotations:
115 | $patch: delete
116 | spec:
117 | containers:
118 | - name: quack
119 | resources:
120 | requests:
121 | cpu: 10m
122 | memory: 10Mi
123 |
124 | ---
125 | apiVersion: apps/v1
126 | kind: Deployment
127 | metadata:
128 | # Disable reload/all in tests
129 | annotations: null
130 | name: platform-operator
131 | namespace: template-operator
132 | spec:
133 | replicas: 1
134 | template:
135 | metadata:
136 | annotations:
137 | $patch: delete
138 | spec:
139 | containers:
140 | - name: manager
141 | resources:
142 | requests:
143 | cpu: 10m
144 | ---
145 | apiVersion: apps/v1
146 | kind: Deployment
147 | metadata:
148 | # Disable reload/all in tests
149 | annotations: null
150 | name: canary-checker
151 | namespace: template-operator
152 | spec:
153 | template:
154 | metadata:
155 | annotations:
156 | $patch: delete
157 | spec:
158 | containers:
159 | - name: canary-checker
160 | resources:
161 | requests:
162 | cpu: 10m
163 | ---
164 | apiVersion: apps/v1
165 | kind: Deployment
166 | metadata:
167 | name: cert-manager-webhook
168 | namespace: cert-manager
169 | spec:
170 | replicas: 1
--------------------------------------------------------------------------------