├── assets
├── policy_as_code_war.png
└── minikube.svg
├── opa-gatekeeper
├── valid-namespace.yaml
├── invalid-namespace.yaml
├── k8srequiredlabels-constraint.yaml
├── k8srequiredlabels-constraint-template.yaml
└── deploy.yaml
├── kyverno
├── validating
│ ├── valid-deployment.yaml
│ ├── requirelabels-clusterpolicy.yaml
│ └── invalid-deployment.yaml
├── mutating
│ ├── deployment.yaml
│ └── set-image-pull-policy-clusterpolicy.yaml
└── generating
│ └── sync-secret-clusterpolicy.yaml
└── README.md
/assets/policy_as_code_war.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developer-guy/policy-as-code-war/HEAD/assets/policy_as_code_war.png
--------------------------------------------------------------------------------
/opa-gatekeeper/valid-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: valid-namespace
5 | labels:
6 | gatekeeper: test
7 |
--------------------------------------------------------------------------------
/opa-gatekeeper/invalid-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | creationTimestamp: null
5 | name: invalid-namespace
6 | spec: {}
7 | status: {}
8 |
--------------------------------------------------------------------------------
/opa-gatekeeper/k8srequiredlabels-constraint.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: constraints.gatekeeper.sh/v1beta1
2 | kind: K8sRequiredLabels
3 | metadata:
4 | name: ns-must-have-gk
5 | spec:
6 | match:
7 | kinds:
8 | - apiGroups: [""]
9 | kinds: ["Namespace"]
10 | parameters:
11 | labels: ["gatekeeper"]
--------------------------------------------------------------------------------
/kyverno/validating/valid-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | app.kubernetes.io/name: nginx
7 | name: nginx
8 | spec:
9 | containers:
10 | - image: nginx
11 | name: nginx
12 | resources: {}
13 | dnsPolicy: ClusterFirst
14 | restartPolicy: Always
15 | status: {}
16 |
--------------------------------------------------------------------------------
/kyverno/mutating/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | app.kubernetes.io/name: nginx
7 | name: nginx
8 | spec:
9 | containers:
10 | - image: nginx:latest
11 | name: nginx
12 | imagePullPolicy: "IfNotPresent"
13 | resources: {}
14 | dnsPolicy: ClusterFirst
15 | restartPolicy: Always
16 | status: {}
17 |
--------------------------------------------------------------------------------
/kyverno/validating/requirelabels-clusterpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: require-labels
5 | spec:
6 | validationFailureAction: enforce
7 | rules:
8 | - name: check-for-labels
9 | match:
10 | resources:
11 | kinds:
12 | - Pod
13 | validate:
14 | message: "label `app.kubernetes.io/name` is required"
15 | pattern:
16 | metadata:
17 | labels:
18 | app.kubernetes.io/name: "?*"
--------------------------------------------------------------------------------
/kyverno/validating/invalid-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | app: nginx
7 | name: nginx
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | app: nginx
19 | spec:
20 | containers:
21 | - image: nginx
22 | name: nginx
23 | resources: {}
24 | status: {}
25 |
--------------------------------------------------------------------------------
/kyverno/generating/sync-secret-clusterpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: sync-secret
5 | spec:
6 | rules:
7 | - name: sync-secret
8 | match:
9 | resources:
10 | kinds:
11 | - Namespace
12 | selector:
13 | matchLabels:
14 | mycorp-rollout: "true"
15 | generate:
16 | kind: Secret
17 | name: corp-secret
18 | namespace: "{{request.object.metadata.name}}"
19 | synchronize : true
20 | clone:
21 | namespace: default
22 | name: corp-secret
--------------------------------------------------------------------------------
/kyverno/mutating/set-image-pull-policy-clusterpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: set-image-pull-policy
5 | spec:
6 | rules:
7 | - name: set-image-pull-policy
8 | match:
9 | resources:
10 | kinds:
11 | - Pod
12 | mutate:
13 | overlay:
14 | spec:
15 | containers:
16 | # match images which end with :latest
17 | - (image): "*:latest"
18 | # set the imagePullPolicy to "IfNotPresent"
19 | imagePullPolicy: "Always"
20 |
--------------------------------------------------------------------------------
/opa-gatekeeper/k8srequiredlabels-constraint-template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: templates.gatekeeper.sh/v1beta1
2 | kind: ConstraintTemplate
3 | metadata:
4 | name: k8srequiredlabels
5 | spec:
6 | crd:
7 | spec:
8 | names:
9 | kind: K8sRequiredLabels
10 | validation:
11 | # Schema for the `parameters` field
12 | openAPIV3Schema:
13 | properties:
14 | labels:
15 | type: array
16 | items: string
17 | targets:
18 | - target: admission.k8s.gatekeeper.sh
19 | rego: |
20 | package k8srequiredlabels
21 |
22 | violation[{"msg": msg, "details": {"missing_labels": missing}}] {
23 | provided := {label | input.review.object.metadata.labels[label]}
24 | required := {label | label := input.parameters.labels[_]}
25 | missing := required - provided
26 | count(missing) > 0
27 | msg := sprintf("you must provide labels: %v", [missing])
28 | }
--------------------------------------------------------------------------------
/assets/minikube.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | # Introduction
4 | In this guide, we are going to demonstrate what OPA Gatekeeper and Kyverno are, what are the differences between them and how we can set up and use them in the Kubernetes cluster by doing hands-on demo.
5 |
6 | So, if you are interested in with one of these topics, please keep reading, there is a lots of good details in the following sections 💪.
7 |
8 | Let's start with defining what Policy-as-Code concept is.
9 |
10 |
11 |
12 |
13 | - 🧰 [Prerequisites](#prerequisites)
14 | - 🛡️ [What is Policy-as-Code?](#what-is-policy-as-code)
15 | - [What is OPA Gatekeeper ?](#what-is-opa-gatekeeper-)
16 | - [What is Kyverno ?](#what-is-kyverno-)
17 | - 🎭 [What are differences between OPA Gatekeeper and Kyverno ?](#what-are-differences-between-opa-gatekeeper-and-kyverno-)
18 | - 🧑💻 [Hands On](#hands-on)
19 | - 👀 [References](#references)
20 |
21 |
22 |
23 | # Prerequisites
24 |
25 | * minikube v1.17.1
26 | * kubectl v1.20.2
27 |
28 | # What is Policy-as-Code?
29 | Similar to the concept of `Infrastructure-as-Code (IaC)` and the benefits you get from codifying your infrastructure setup using the software development practices, `Policy-as-Code (PaC)` is the codification of your policies.
30 |
31 | PaC is the idea of writing code in a high-level language to manage and automate policies. By representing policies as code in text files, proven software development best practices can be adopted such as version control, automated testing, and automated deployment.
32 |
33 | The policies you want to enforce come from your organization’s established guidelines or agreed-upon conventions, and best practices within the industry. It could also be derived from tribal knowledge that has accumulated over the years within your operations and development teams.
34 |
35 | PaC is very general, so, it can be applied to any environment that you want to manage and enforce policies but if want to apply it unto the Kubernetes world, there are two tools became very important: OPA Gatekeeper and Kyverno.
36 |
37 | Let's continue with the description of these tools.
38 |
39 | # What is OPA Gatekeeper ?
40 | Before move on with the description of the OPA Gatekeeper, we should explain the OPA (Open Policy Agent) is first.
41 |
42 | The [OPA](https://github.com/open-policy-agent/opa) is an open-source, general-purpose policy engine that can be used to enforce policies on various types of software systems like microservices, CI/CD pipelines, gateways, Kubernetes, etc. OPA was developed by Styra and is currently a part of CNCF.
43 |
44 | The [OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is the policy controller for Kubernetes. More technically, it is a customizable Kubernetes Admission Webhook that helps enforce policies and strengthen governance.
45 |
46 | The important thing that we should notice is the use of OPA is not tied to the Kubernetes alone. OPA Gatekeeper, on the other hand, is specifically built for Kubernetes Admission Control use case of OPA.
47 |
48 | # What is Kyverno ?
49 | [Kyverno](https://github.com/kyverno/kyverno/) is a policy engine designed for Kubernetes. With Kyverno, policies are managed as Kubernetes resources and no new language is required to write policies. This allows using familiar tools such as kubectl, git, and kustomize to manage policies. Kyverno policies can validate, mutate, and generate Kubernetes resources. The Kyverno CLI can be used to test policies and validate resources as part of a CI/CD pipeline. Kyverno is an open-source and a part of CNCF Sandbox Project also.
50 |
51 | # What are differences between OPA Gatekeeper and Kyverno ?
52 | Let's explain these differences with the table format.
53 |
54 | | Features/Capabilities | Gatekeeper | Kyverno |
55 | |--------------------------------------------- |------------ |--------- |
56 | | Validation | ✓ | ✓ |
57 | | Mutation | ✓* | ✓ |
58 | | Generation | X | ✓ |
59 | | Policy as native resources | ✓ | ✓ |
60 | | Metrics exposed | ✓ | ✓ |
61 | | OpenAPI validation schema (kubectl explain) | X | ✓ |
62 | | High Availability | ✓ | ✓ |
63 | | API object lookup | ✓ | ✓* |
64 | | CLI with test ability | ✓** | ✓ |
65 | | Policy audit ability | ✓ | ✓ |
66 |
67 | `* Alpha status`
68 | `** Separate CLI`
69 |
70 | > Credit: https://neonmirrors.net/post/2021-02/kubernetes-policy-comparison-opa-gatekeeper-vs-kyverno/
71 |
72 | In my opinion, the best advantages of using Kyverno are no need to learn another policy language and the OpenAPI validation schema support that we can use via kubectl explain command. On the other hand side, OPA Gatekeeper has lots of tools developed around the Rego language to help us to write and test our policies such as [conftest](https://github.com/instrumenta/conftest), [konstraint](https://github.com/plexsystems/konstraint) and this is a big plus in my opinion. These are the tools that we can use to implement `Policy-as-Code Pipeline`. Another advantage of using OPA Gatekeeper, therese are lots of libraries that includes ready to use policies written for us such as [gatekeeper-library](https://github.com/open-policy-agent/gatekeeper-library), [konstraint-examples](https://github.com/plexsystems/konstraint/tree/main/examples) and [raspbernetes-policies](https://github.com/raspbernetes/k8s-security-policies/tree/master/policies).
73 |
74 | # Hands On
75 | I created two seperate folders for OPA Gatekeeper and Kyverno resources. We are going to start with the OPA Gatekepeer project first.
76 |
77 | There are various types of installation of OPA Gatekeeper but in this section we are going to use [plain YAML manifest](./opa-gatekeeper/deploy.yaml) to install it. Let's install OPA Gatekeeper using the YAML manifest. In order to do that, we need to start our local Kubernetes cluster using `minikube`, we are going to use two different [Minikube profiles](https://minikube.sigs.k8s.io/docs/commands/profile/) for the OPA Gatekeeper and the Kyverno, that will result with the creating two seperate Kubernetes cluster.
78 | ```bash
79 | $ minikube start -p opa-gatekeeper
80 | 😄 [opa-gatekeeper] minikube v1.17.1 on Darwin 10.15.7
81 | ✨ Using the hyperkit driver based on user configuration
82 | 👍 Starting control plane node opa-gatekeeper in cluster opa-gatekeeper
83 | 🔥 Creating hyperkit VM (CPUs=3, Memory=8192MB, Disk=20000MB) ...
84 | 🌐 Found network options:
85 | ▪ no_proxy=127.0.0.1,localhost
86 | 🐳 Preparing Kubernetes v1.20.2 on Docker 20.10.2 ...
87 | ▪ env NO_PROXY=127.0.0.1,localhost
88 | ▪ Generating certificates and keys ...
89 | ▪ Booting up control plane ...
90 | ▪ Configuring RBAC rules ...
91 | 🔎 Verifying Kubernetes components...
92 | 🌟 Enabled addons: storage-provisioner, default-storageclass
93 | 🏄 Done! kubectl is now configured to use "opa-gatekeeper" cluster and "default" namespace by default
94 | ```
95 |
96 | Let's apply the manifest.
97 | ```bash
98 | $ kubectl apply -f opa-gatekeeper/deploy.yaml
99 | namespace/gatekeeper-system created
100 | Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition
101 | customresourcedefinition.apiextensions.k8s.io/configs.config.gatekeeper.sh created
102 | customresourcedefinition.apiextensions.k8s.io/constraintpodstatuses.status.gatekeeper.sh created
103 | customresourcedefinition.apiextensions.k8s.io/constrainttemplatepodstatuses.status.gatekeeper.sh created
104 | customresourcedefinition.apiextensions.k8s.io/constrainttemplates.templates.gatekeeper.sh created
105 | serviceaccount/gatekeeper-admin created
106 | podsecuritypolicy.policy/gatekeeper-admin created
107 | role.rbac.authorization.k8s.io/gatekeeper-manager-role created
108 | clusterrole.rbac.authorization.k8s.io/gatekeeper-manager-role created
109 | rolebinding.rbac.authorization.k8s.io/gatekeeper-manager-rolebinding created
110 | clusterrolebinding.rbac.authorization.k8s.io/gatekeeper-manager-rolebinding created
111 | secret/gatekeeper-webhook-server-cert created
112 | service/gatekeeper-webhook-service created
113 | deployment.apps/gatekeeper-audit created
114 | deployment.apps/gatekeeper-controller-manager created
115 | Warning: admissionregistration.k8s.io/v1beta1 ValidatingWebhookConfiguration is deprecated in v1.16+, unavailable in v1.22+; use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration
116 | validatingwebhookconfiguration.admissionregistration.k8s.io/gatekeeper-validating-webhook-configuration created
117 | ```
118 |
119 | You should notice that bunch of CRDs created to allow define and enforce policies called `ConstraintTemplate` which describes both the Rego that enforces the constraint and the schema of the constraint.
120 |
121 | In this section, we are going to enforce policy to validate required labels that we want to on resources, if required label exits then we'll approve the request, if not we'll reject it.
122 |
123 | Let's look at the `ConstraintTemplate` that we are going to apply.
124 | ```yaml
125 | apiVersion: templates.gatekeeper.sh/v1beta1
126 | kind: ConstraintTemplate
127 | metadata:
128 | name: k8srequiredlabels
129 | spec:
130 | crd:
131 | spec:
132 | names:
133 | kind: K8sRequiredLabels
134 | validation:
135 | # Schema for the `parameters` field
136 | openAPIV3Schema:
137 | properties:
138 | labels:
139 | type: array
140 | items: string
141 | targets:
142 | - target: admission.k8s.gatekeeper.sh
143 | rego: |
144 | package k8srequiredlabels
145 |
146 | violation[{"msg": msg, "details": {"missing_labels": missing}}] {
147 | provided := {label | input.review.object.metadata.labels[label]}
148 | required := {label | label := input.parameters.labels[_]}
149 | missing := required - provided
150 | count(missing) > 0
151 | msg := sprintf("you must provide labels: %v", [missing])
152 | }
153 | ```
154 |
155 | You should notice that the policy that we define with the Rego language is placed under the `.targets[].rego` section. Once we applied this to the cluster, `K8sRequiredLabels` Custom Resource is going to be created and by using this CR we'll define our policy context, means which resources we want to apply the policy on.
156 |
157 | Let's apply it.
158 | ```bash
159 | $ kubectl apply -f opa-gatekeeper/k8srequiredlabels-constraint-template.yaml
160 | constrainttemplate.templates.gatekeeper.sh/k8srequiredlabels created
161 |
162 | $ kubectl get customresourcedefinitions.apiextensions.k8s.io
163 | Found existing alias for "kubectl". You should use: "k"
164 | NAME CREATED AT
165 | configs.config.gatekeeper.sh 2021-02-25T09:06:10Z
166 | constraintpodstatuses.status.gatekeeper.sh 2021-02-25T09:06:10Z
167 | constrainttemplatepodstatuses.status.gatekeeper.sh 2021-02-25T09:06:10Z
168 | constrainttemplates.templates.gatekeeper.sh 2021-02-25T09:06:10Z
169 | k8srequiredlabels.constraints.gatekeeper.sh 2021-02-25T09:19:39Z
170 | ```
171 |
172 | As you can see, `K8sRequiredLabels` CR is created. Lets define and apply it too.
173 | ```yaml
174 | apiVersion: constraints.gatekeeper.sh/v1beta1
175 | kind: K8sRequiredLabels
176 | metadata:
177 | name: ns-must-have-gk
178 | spec:
179 | match:
180 | kinds:
181 | - apiGroups: [""]
182 | kinds: ["Namespace"]
183 | parameters:
184 | labels: ["gatekeeper"]
185 | ```
186 |
187 | You should notice that we'll enforce the policy on `Namespace` resource and the label value that we want to be available on the Namespace is `gatekepeer`.
188 | ```bash
189 | $ kubectl apply -f opa-gatekeeper/k8srequiredlabels-constraint.yaml
190 | k8srequiredlabels.constraints.gatekeeper.sh/ns-must-have-gk created
191 | ```
192 |
193 | Let's test with creating invalid namespace then a valid one.
194 | ```bash
195 | $ kubectl apply -f opa-gatekeeper/invalid-namespace.yaml
196 | Found existing alias for "kubectl apply -f". You should use: "kaf"
197 | Error from server ([denied by ns-must-have-gk] you must provide labels: {"gatekeeper"}): error when creating "opa-gatekeeper/invalid-namespace.yaml": admission webhook "validation.gatekeeper.sh" denied the request: [denied by ns-must-have-gk] you must provide labels: {"gatekeeper"}
198 | ```
199 |
200 | ```bash
201 | $ kubectl apply -f opa-gatekeeper/valid-namespace.yaml
202 | Found existing alias for "kubectl apply -f". You should use: "kaf"
203 | namespace/valid-namespace created
204 | ```
205 |
206 | Tadaaaa, it worked 🎉🎉🎉🎉
207 |
208 | Let's move on with the Kyverno, again, there are various way to install it unto the Kubernetes, in this case, we are going to use Helm. We said that we'll start up another Minikub cluster with different profile.
209 | Let's start with it.
210 | ```bash
211 | $ minikube start -p kyverno
212 | 😄 [kyverno] minikube v1.17.1 on Darwin 10.15.7
213 | ✨ Using the hyperkit driver based on user configuration
214 | 👍 Starting control plane node kyverno in cluster kyverno
215 | 🔥 Creating hyperkit VM (CPUs=3, Memory=8192MB, Disk=20000MB) ...
216 | 🌐 Found network options:
217 | ▪ no_proxy=127.0.0.1,localhost
218 | 🐳 Preparing Kubernetes v1.20.2 on Docker 20.10.2 ...
219 | ▪ env NO_PROXY=127.0.0.1,localhost
220 | ▪ Generating certificates and keys ...
221 | ▪ Booting up control plane ...
222 | ▪ Configuring RBAC rules ...
223 | 🔎 Verifying Kubernetes components...
224 | 🌟 Enabled addons: storage-provisioner, default-storageclass
225 | 🏄 Done! kubectl is now configured to use "kyverno" cluster and "default" namespace by default
226 |
227 | $ minikube profile list
228 | |----------------|-----------|---------|---------------|------|---------|---------|-------|
229 | | Profile | VM Driver | Runtime | IP | Port | Version | Status | Nodes |
230 | |----------------|-----------|---------|---------------|------|---------|---------|-------|
231 | | kyverno | hyperkit | docker | 192.168.64.17 | 8443 | v1.20.2 | Running | 1 |
232 | | minikube | hyperkit | docker | 192.168.64.15 | 8443 | v1.20.2 | Stopped | 1 |
233 | | opa-gatekeeper | hyperkit | docker | 192.168.64.16 | 8443 | v1.20.2 | Running | 1 |
234 | |----------------|-----------|---------|---------------|------|---------|---------|-------|
235 | ```
236 |
237 | Let's install it by using Helm.
238 | ```bash
239 | $ helm repo add kyverno https://kyverno.github.io/kyverno/
240 | "kyverno" has been added to your repositories
241 |
242 | $ helm repo update
243 | Hang tight while we grab the latest from your chart repositories...
244 | ...Successfully got an update from the "kyverno" chart repository
245 | ...Successfully got an update from the "nats" chart repository
246 | ...Successfully got an update from the "falcosecurity" chart repository
247 | ...Successfully got an update from the "openfaas" chart repository
248 | ...Successfully got an update from the "stable" chart repository
249 | Update Complete. ⎈Happy Helming!⎈
250 |
251 | $ helm install kyverno --namespace kyverno kyverno/kyverno --create-namespace
252 | NAME: kyverno
253 | LAST DEPLOYED: Thu Feb 25 13:16:21 2021
254 | NAMESPACE: kyverno
255 | STATUS: deployed
256 | REVISION: 1
257 | TEST SUITE: None
258 | NOTES:
259 | Thank you for installing kyverno 😀
260 |
261 | Your release is named kyverno.
262 |
263 | We have installed the "default" profile of Pod Security Standards and set them in audit mode.
264 |
265 | Visit https://kyverno.io/policies/ to find more sample policies.
266 | ```
267 |
268 | Let's look at the Custom Resource Definitions list.
269 | ```bash
270 | $ kubectl get customresourcedefinitions.apiextensions.k8s.io
271 | Found existing alias for "kubectl". You should use: "k"
272 | NAME CREATED AT
273 | clusterpolicies.kyverno.io 2021-02-25T10:16:16Z
274 | clusterpolicyreports.wgpolicyk8s.io 2021-02-25T10:16:16Z
275 | clusterreportchangerequests.kyverno.io 2021-02-25T10:16:16Z
276 | generaterequests.kyverno.io 2021-02-25T10:16:16Z
277 | policies.kyverno.io 2021-02-25T10:16:16Z
278 | policyreports.wgpolicyk8s.io 2021-02-25T10:16:16Z
279 | reportchangerequests.kyverno.io 2021-02-25T10:16:16Z
280 | ```
281 |
282 | We can also use `kubectl explain` command to get information easily about the resource using the OpenAPI schema.
283 | ```bash
284 | $ kubectl explain policies
285 | KIND: Policy
286 | VERSION: kyverno.io/v1
287 |
288 | DESCRIPTION:
289 | Policy declares validation, mutation, and generation behaviors for matching
290 | resources. See: https://kyverno.io/docs/writing-policies/ for more
291 | information.
292 |
293 | FIELDS:
294 | apiVersion
295 | APIVersion defines the versioned schema of this representation of an
296 | object. Servers should convert recognized schemas to the latest internal
297 | value, and may reject unrecognized values. More info:
298 | https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
299 |
300 | kind
301 | Kind is a string value representing the REST resource this object
302 | represents. Servers may infer this from the endpoint the client submits
303 | requests to. Cannot be updated. In CamelCase. More info:
304 | https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
305 |
306 | metadata