85 | ```
86 |
87 |
88 |
89 | ---
90 |
91 |
92 |
93 | [:arrow_left: 00-VerifyCluster](../00-VerifyCluster) ||
94 | [02-Deployments-Imperative :arrow_right:](../02-Deployments-Imperative)
95 |
96 |
97 | ---
98 |
99 |
100 | ©CodeWizard LTD
101 |
102 |
103 | 
104 |
105 |
106 |
--------------------------------------------------------------------------------
/Labs/03-Deployments-Declarative/nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment # We use a deployment and not pod !!!!
3 | metadata:
4 | name: nginx # Deployment name
5 | namespace: codewizard
6 | labels:
7 | app: nginx # Deployment label
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels: # Labels for the replica selector
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx # Labels for the replica selector
17 | version: "1.17" # Specify specific verion if required
18 | spec:
19 | containers:
20 | - name: nginx # The name of the pod
21 | image: nginx:1.17 # The image which we will deploy
22 | ports:
23 | - containerPort: 80
24 |
--------------------------------------------------------------------------------
/Labs/06-DataStore/resources/Dockerfile:
--------------------------------------------------------------------------------
1 | # Base Image
2 | FROM node
3 |
4 | # exposed port - same port is defined in the server.js
5 | EXPOSE 5000
6 |
7 | # The "configuration" which we pass in runtime
8 | ENV LANGUAGE Hebrew
9 | ENV TOKEN Hard-To-Guess
10 |
11 | # Copy the server to the container
12 | COPY server.js .
13 |
14 | # start the server
15 | ENTRYPOINT node server.js
--------------------------------------------------------------------------------
/Labs/06-DataStore/resources/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | # This resource is of type Secret
3 | kind: Secret
4 | metadata:
5 | # The name of the secret
6 | name: token
7 | # The content of the secret is defined inside the data object
8 | data:
9 | # The key is :TOKEN
10 | # The value is: `SGFyZC1Uby1HdWVzczM=` which is encoded in base64
11 | # The plaint text value is : "Hard-To-Guess3"
12 |
13 | TOKEN: SGFyZC1Uby1HdWVzczM=
14 | type: Opaque
15 |
--------------------------------------------------------------------------------
/Labs/06-DataStore/resources/server.js:
--------------------------------------------------------------------------------
1 | const // Get those values in runtime
2 | language = process.env.LANGUAGE,
3 | token = process.env.TOKEN;
4 |
5 | require("http")
6 | .createServer((request, response) => {
7 | response.write(`Language: ${language}\n`);
8 | response.write(`Token : ${token}\n`);
9 | response.end(`\n`);
10 | })
11 | // Set the default port to 5000
12 | .listen(process.env.PORT || 5000);
13 |
--------------------------------------------------------------------------------
/Labs/06-DataStore/resources/variables-from-secrets.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: codewizard-secrets
5 | namespace: codewizard
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | name: codewizard-secrets
11 | template:
12 | metadata:
13 | labels:
14 | name: codewizard-secrets
15 | spec:
16 | containers:
17 | # This contaner will use plain ENV parametrs
18 | - name: secrets
19 | image: nirgeier/k8s-secrets-sample
20 | imagePullPolicy: Always
21 | ports:
22 | - containerPort: 5000
23 | env:
24 | - name: LANGUAGE
25 | valueFrom:
26 | configMapKeyRef: # This value will be read from the config map
27 | name: language # The name of the ConfigMap
28 | key: LANGUAGE # The key in the config map
29 | - name: TOKEN
30 | valueFrom:
31 | secretKeyRef: # This value will be read from the secret
32 | name: token # The name of the secret
33 | key: TOKEN # The key in the secret
34 | resources:
35 | limits:
36 | cpu: "500m"
37 | memory: "256Mi"
38 | ---
39 | apiVersion: v1
40 | kind: Service
41 | metadata:
42 | name: codewizard-secrets
43 | namespace: codewizard
44 | spec:
45 | selector:
46 | app: codewizard-secrets
47 | ports:
48 | - protocol: TCP
49 | port: 5000
50 |
51 |
--------------------------------------------------------------------------------
/Labs/06-DataStore/resources/variables-from-yaml.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: codewizard-secrets
5 | namespace: codewizard
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | name: codewizard-secrets
11 | template:
12 | metadata:
13 | labels:
14 | name: codewizard-secrets
15 | spec:
16 | containers:
17 | # This contaner will use plain ENV parametrs
18 | - name: secrets
19 | image: nirgeier/k8s-secrets-sample
20 | imagePullPolicy: Always
21 | ports:
22 | - containerPort: 5000
23 | env:
24 | - name: LANGUAGE
25 | value: Hebrew
26 | - name: TOKEN
27 | value: Hard-To-Guess2
28 | resources:
29 | limits:
30 | cpu: "500m"
31 | memory: "256Mi"
32 | ---
33 | apiVersion: v1
34 | kind: Service
35 | metadata:
36 | name: codewizard-secrets
37 | namespace: codewizard
38 | spec:
39 | selector:
40 | app: codewizard-secrets
41 | ports:
42 | - protocol: TCP
43 | port: 5000
44 |
45 |
--------------------------------------------------------------------------------
/Labs/07-nginx-Ingress/resources/certificate.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDmzCCAoOgAwIBAgIUWlCuTkTKFJ5Mx80Cg1NdgcrMeZowDQYJKoZIhvcNAQEL
3 | BQAwXTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
4 | GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEWMBQGA1UEAwwNaW5ncmVzcy5sb2Nh
5 | bDAeFw0xOTEyMjExOTQzMDlaFw0yMDEyMjAxOTQzMDlaMF0xCzAJBgNVBAYTAkFV
6 | MRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRz
7 | IFB0eSBMdGQxFjAUBgNVBAMMDWluZ3Jlc3MubG9jYWwwggEiMA0GCSqGSIb3DQEB
8 | AQUAA4IBDwAwggEKAoIBAQC8FakvkG4zKIa9xuRrNLAPk2FspF8h5oLC3JuHff00
9 | SkRYJoWGynEdLW31RTVN7pvFg2Z3Y+RpXxpnesDQmkjqKEvjfYX1gVwgRCrpESg1
10 | yRNm3jc7e0RRuwxJglH3gktYDV7+i3v44Tda4K1YTvLvgqOvNSULUAC+m22gzZr9
11 | gHFPmTyTiTCOVJTVmdVqBgmYtZ0q5PE/Pa9+wtYv4tA8bS4dO5BOYYfuN7vXgvkW
12 | kVXR5lZJFz9aSQ3xzj9veERSeHc8m0bNlAU2AicwH0mOpOtuN4KulWgcvqrCQXT5
13 | 7Fxwx7AeiZkrimyBDThPzLzWMikDgojiEJ+HCHI0chgrAgMBAAGjUzBRMB0GA1Ud
14 | DgQWBBT1Lwz4mFcGWiGfQZQR2Z8sgDrFTTAfBgNVHSMEGDAWgBT1Lwz4mFcGWiGf
15 | QZQR2Z8sgDrFTTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAL
16 | gWcvCxe4QwZnrkyJ8eWKgaHn63sub7hR4mT+EgPeja70not0BORzKjIDr3yZSKJU
17 | Kbuj6Wbkq8wsfeD1HahLNuq0dQE808llFLkHv0NbFY7rRG4XV72Rr03L2rtfzZBX
18 | 2N+QG88j6pZQMqYSRlcN4scuJqs9Ug2pmzGOBweZCsg72HJCUjD/FXLWBlY9eNKk
19 | /5tHihqQll42xyUFRrVnuu+1HVaFCRdt6Bct8k2D3I1GzJrz0Se4iHJsq/Izvf7v
20 | InboDQkr6vq1olE8RJBraQ7Fngay7vo12rXCKNt56PIiqbQ8WvQQZfAY7yMOhqAM
21 | ooH8YynHIXY13xyom9EH
22 | -----END CERTIFICATE-----
23 |
--------------------------------------------------------------------------------
/Labs/07-nginx-Ingress/resources/certificate.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC8FakvkG4zKIa9
3 | xuRrNLAPk2FspF8h5oLC3JuHff00SkRYJoWGynEdLW31RTVN7pvFg2Z3Y+RpXxpn
4 | esDQmkjqKEvjfYX1gVwgRCrpESg1yRNm3jc7e0RRuwxJglH3gktYDV7+i3v44Tda
5 | 4K1YTvLvgqOvNSULUAC+m22gzZr9gHFPmTyTiTCOVJTVmdVqBgmYtZ0q5PE/Pa9+
6 | wtYv4tA8bS4dO5BOYYfuN7vXgvkWkVXR5lZJFz9aSQ3xzj9veERSeHc8m0bNlAU2
7 | AicwH0mOpOtuN4KulWgcvqrCQXT57Fxwx7AeiZkrimyBDThPzLzWMikDgojiEJ+H
8 | CHI0chgrAgMBAAECggEARwY2+UslEhR/rTJqF0GyKm+RHqGDex28yzDbWnLtJs3U
9 | uSTyz0+rH0WEfFZCJsev8woHq5YBLvlG00S7gwp/9kx5O9Kuv2K2E0kqmxBrisP/
10 | m5zWZpPJ3MMxhKC9qyV8pieGc8Dgc784VAz76JkHjAJdJVCASKFRZqjy4QJDQO6V
11 | QwTOgSyLBr1osoJmksDFcUXgoAjFEoKIeYnvXbm939ZgaeM3NHaMt2om62tr8uJY
12 | bU2C2Ehvx79bKG0Fmw8u47y2oZrkYQ4hO6/lRs6wJUSuLODCxMlwCaUh4IoXl04m
13 | /Qjp2ARSHkx5ISkpaYryyc9Y/R5dmNmKoZrs+NVfwQKBgQDzGyFh9LaH6lHTyvtc
14 | jocFndZ9r0LWRGGQlN/eKlJvVB+CgMj7RWuC1dq8Bg9N1SeEHWaJVv/+y+GtJEZZ
15 | 56DnKW1UDTqkmynoTab5slve0pXNCrHw0Pc+C7OIze0dIcCE2MuZrAvWs8QaEwbw
16 | ieADyO9lYkfE4YgpaW+ShSxcMwKBgQDGD3Sm9hjVC7J9BLYg608ZTzSBcbA0Gm9E
17 | phYEz44wkAsiJ1qyQCPP58HFC+S3ukCILoAMHCMrCcyZCVw5B1t19NRplbqGhCZQ
18 | R3hr3CUT4nLNRgfpR4qAbhaj8aPo8Huig+r4G7N8g3UlZQWiEkL9mxRPZZILDf1Q
19 | zWRsHvdcKQKBgF1Ri8XzTuHrc4+uOkD0QSZJyV0jmq9vPlhmnWzFqDEuBI5u6zdx
20 | FWz6tGU6mkNRUELpmkOcDtZ64t04sHywalZx05LRJTKskTCoJjFxYsys323+7gE3
21 | 5cB+c2NPUPa+zwzvv2/01/KJvPwZU6+f7UrmpeawDEaqID9tRrPixP17AoGAHc3Z
22 | mf5Sgky+UT3SQmXmg0J9/jSjdVO9BrGPgq3REdG7Oyp85XHtca3IZOSDSHqIl3WX
23 | 4zqguCtDVIwqCpLm2ns7M6BKb0+XjGEU5/Y6xiE/cVBmhF41o1ntokIMjlMR58S+
24 | KRPSEJyflj77eAYTeqJJjiEUtwEl63Dc+cA3LPECgYEAzF6BByLBLbyQgto3sDXn
25 | k9Osh021CBk9i4+BB+YOKwPou9Vpaf1bp8oPbF2pfJSBOSKIRgavxClkZfXeV2Px
26 | 6KfS3+8N8FQhBxv5pnngqfuYnV08nb2CCi1Eb+2p0+fOF4TrU/z/9+07B7C2cxwh
27 | Y7yHWsVYHzmZ+InbYh8m2Nc=
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/Labs/07-nginx-Ingress/resources/certificate.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN DH PARAMETERS-----
2 | MIIBCAKCAQEAzrS+fSz9I9gw2+I5mKenJYXhzV0AJLRdCOVTpYjJKIHMxvvbsfsl
3 | ZH686WkxD1v62yDLbYMmWk8UPQMEx4dA980XnImt2gti7df0XHA0R2aZarvDyg30
4 | k8JMQNBDn8sZJ9QIHgQ/KGvH5Fn5nIjMJ/YtnWxssZAXLq+6azCjzKabKkGYvUmA
5 | TE0g0xwH32Cn1cJlV5pPq2YriGTSLu/HKIV+gyP528F8eXz5IYiOFuV4/hk/UxjX
6 | IlhGiD69dxGaCCxh3lqcDasOxLCwFfWU2XudH6q8Iztifwmgtcuw48kkDL/hTrUQ
7 | +LgrAraFtm20a+Tu6/ZFeASuMX2XRDiVkwIBAg==
8 | -----END DH PARAMETERS-----
9 |
--------------------------------------------------------------------------------
/Labs/07-nginx-Ingress/resources/ingress-nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: default-http-backend
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: default-http-backend
10 | template:
11 | metadata:
12 | labels:
13 | app: default-http-backend
14 | spec:
15 | terminationGracePeriodSeconds: 60
16 | containers:
17 | - name: default-http-backend
18 | # Any image is permissable as long as:
19 | # 1. It serves a 404 page at /
20 | # 2. It serves 200 on a /healthz endpoint
21 | image: gcr.io/google_containers/defaultbackend:1.0
22 | livenessProbe:
23 | httpGet:
24 | path: /healthz
25 | port: 8080
26 | scheme: HTTP
27 | initialDelaySeconds: 30
28 | timeoutSeconds: 5
29 | ports:
30 | - containerPort: 8080
31 | resources:
32 | limits:
33 | cpu: 10m
34 | memory: 20Mi
35 | requests:
36 | cpu: 10m
37 | memory: 20Mi
--------------------------------------------------------------------------------
/Labs/07-nginx-Ingress/resources/ingress-nginx-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: default-http-backend
5 | spec:
6 | selector:
7 | app: default-http-backend
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 8080
12 | type: NodePort
--------------------------------------------------------------------------------
/Labs/07-nginx-Ingress/resources/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: my-first-ingress
5 | annotations:
6 | kubernetes.io/ingress.class: "nginx"
7 | nginx.org/ssl-services: "my-service"
8 | spec:
9 | tls:
10 | - hosts:
11 | - myapp.local
12 | secretName: tls-certificate
13 | rules:
14 | - host: myapp.local
15 | http:
16 | paths:
17 | - path: /
18 | backend:
19 | serviceName: ingress-pods
20 | servicePort: 5000
21 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/01-commonAnnotation/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ### FileName: kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | # This will add annotation under every metadata entry
6 | # ex: main metadata, spec.metadata etc
7 | commonAnnotations:
8 | author: nirgeier@gmail.com
9 | version: v1
10 | bases:
11 | - ../_base
12 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/01-commonAnnotation/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | author: nirgeier@gmail.com
6 | version: v1
7 | labels:
8 | app: postgres
9 | name: postgres
10 | namespace: codewizard
11 | spec:
12 | ports:
13 | - port: 5432
14 | selector:
15 | app: postgres
16 | type: NodePort
17 | ---
18 | apiVersion: apps/v1
19 | kind: Deployment
20 | metadata:
21 | annotations:
22 | author: nirgeier@gmail.com
23 | version: v1
24 | name: myapp
25 | namespace: codewizard
26 | spec:
27 | selector:
28 | matchLabels:
29 | app: myapp
30 | template:
31 | metadata:
32 | annotations:
33 | author: nirgeier@gmail.com
34 | version: v1
35 | labels:
36 | app: myapp
37 | spec:
38 | containers:
39 | - image: __image__
40 | name: myapp
41 | - image: nginx
42 | name: nginx
43 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/02-commonLabels/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | # This will add annotation under every metadata entry
5 | # ex: main metadata, spec.metadata etc
6 | commonLabels:
7 | author: nirgeier@gmail.com
8 | env: qa
9 | version: v1
10 | bases:
11 | - ../_base
12 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/02-commonLabels/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: postgres
6 | author: nirgeier@gmail.com
7 | env: codeWizard-cluster
8 | version: v1
9 | name: postgres
10 | namespace: codewizard
11 | spec:
12 | ports:
13 | - port: 5432
14 | selector:
15 | app: postgres
16 | author: nirgeier@gmail.com
17 | env: codeWizard-cluster
18 | version: v1
19 | type: NodePort
20 | ---
21 | apiVersion: apps/v1
22 | kind: Deployment
23 | metadata:
24 | labels:
25 | author: nirgeier@gmail.com
26 | env: codeWizard-cluster
27 | version: v1
28 | name: myapp
29 | namespace: codewizard
30 | spec:
31 | selector:
32 | matchLabels:
33 | app: myapp
34 | author: nirgeier@gmail.com
35 | env: codeWizard-cluster
36 | version: v1
37 | template:
38 | metadata:
39 | labels:
40 | app: myapp
41 | author: nirgeier@gmail.com
42 | env: codeWizard-cluster
43 | version: v1
44 | spec:
45 | containers:
46 | - image: __image__
47 | name: myapp
48 | - image: nginx
49 | name: nginx
50 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/ConfigMap/01-FromEnv/.env:
--------------------------------------------------------------------------------
1 | key1=value1
2 | env=qa
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/ConfigMap/01-FromEnv/ConfigMap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | env: qa
4 | key1: value1
5 | kind: ConfigMap
6 | metadata:
7 | name: configMapFromEnv-c9655hf97k
8 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/ConfigMap/01-FromEnv/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml for ConfigMap
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | configMapGenerator:
6 | # Generate config file from env file
7 | - name: configMapFromEnv
8 | env: .env
9 | #
10 | # Optional flag to mark if we want hashed suffix or not
11 | #
12 | # generatorOptions:
13 | # disableNameSuffixHash: true
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/ConfigMap/02-FromFile/.env:
--------------------------------------------------------------------------------
1 | key1=value1
2 | env=qa
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/ConfigMap/02-FromFile/ConfigMap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | .env: |-
4 | key1=value1
5 | env=qa
6 | kind: ConfigMap
7 | metadata:
8 | name: configFromFile-456kgtfd6m
9 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/ConfigMap/02-FromFile/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml for ConfigMap
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | # Generate config file from file
6 | configMapGenerator:
7 | - name: configFromFile
8 | files:
9 | - .env
10 | #
11 | # Optional flag to mark if we want hashed suffix or not
12 | #
13 | #generatorOptions:
14 | # disableNameSuffixHash: true
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/ConfigMap/03-FromLiteral/ConfigMap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | Key1: value1
4 | Key2: value2
5 | kind: ConfigMap
6 | metadata:
7 | name: configFromLiterals-h777b4gdf5
8 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/ConfigMap/03-FromLiteral/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml for ConfigMap
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | configMapGenerator:
6 | # Generate config file from key value pairs file
7 | # Generate configMap from direct input
8 | - name: configFromLiterals
9 | literals:
10 | - Key1=value1
11 | - Key2=value2
12 | #
13 | # Optional flag to mark if we want hashed suffix or not
14 | #
15 | #generatorOptions:
16 | # disableNameSuffixHash: true
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/Secret/.env:
--------------------------------------------------------------------------------
1 | NODE_ENV=dev
2 | REGION=weu
3 | APP_ENV=development
4 | LOG_DEBUG=true
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/Secret/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml for ConfigMap
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | secretGenerator:
6 | # Generate secret from env file
7 | - name: secretMapFromFile
8 | env: .env
9 | type: Opaque
10 | generatorOptions:
11 | disableNameSuffixHash: true
12 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/03-generators/Secret/output.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/Labs/08-Kustomization/samples/03-generators/Secret/output.yaml
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/04-images/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | bases:
6 | - ../_base
7 |
8 | images:
9 | # The image as its defined in the Deployment file
10 | - name: __image__
11 | # The new name to set
12 | newName: my-registry/my-image
13 | # optional: image tag
14 | newTag: v3
15 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/04-images/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: postgres
6 | name: postgres
7 | namespace: codewizard
8 | spec:
9 | ports:
10 | - port: 5432
11 | selector:
12 | app: postgres
13 | type: NodePort
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: myapp
19 | namespace: codewizard
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: myapp
24 | template:
25 | metadata:
26 | labels:
27 | app: myapp
28 | spec:
29 | containers:
30 | - image: my-registry/my-image:v3
31 | name: myapp
32 | - image: nginx
33 | name: nginx
34 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/05-Namespace/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | # Add the desired namespace to all resources
6 | namespace: kustomize-namespace
7 |
8 | bases:
9 | - ../_base
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/05-Namespace/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: postgres
6 | name: postgres
7 | namespace: kustomize-namespace
8 | spec:
9 | ports:
10 | - port: 5432
11 | selector:
12 | app: postgres
13 | type: NodePort
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: myapp
19 | namespace: kustomize-namespace
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: myapp
24 | template:
25 | metadata:
26 | labels:
27 | app: myapp
28 | spec:
29 | containers:
30 | - image: __image__
31 | name: myapp
32 | - image: nginx
33 | name: nginx
34 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/06-Prefix-Suffix/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | # Add the desired Prefix to all resources
6 | namePrefix: prefix-codeWizard-
7 | nameSuffix: -suffix-codeWizard
8 |
9 | bases:
10 | - ../_base
11 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/06-Prefix-Suffix/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: postgres
6 | name: prefix-codeWizard-postgres-suffix-codeWizard
7 | namespace: codewizard
8 | spec:
9 | ports:
10 | - port: 5432
11 | selector:
12 | app: postgres
13 | type: NodePort
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: prefix-codeWizard-myapp-suffix-codeWizard
19 | namespace: codewizard
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: myapp
24 | template:
25 | metadata:
26 | labels:
27 | app: myapp
28 | spec:
29 | containers:
30 | - image: __image__
31 | name: myapp
32 | - image: nginx
33 | name: nginx
34 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/07-replicas/deployment.yaml:
--------------------------------------------------------------------------------
1 | # deployment.yaml
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: deployment
6 | spec:
7 | replicas: 5
8 | selector:
9 | name: deployment
10 | template:
11 | containers:
12 | - name: container
13 | image: registry/conatiner:latest
14 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/07-replicas/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | replicas:
6 | - name: deployment
7 | count: 10
8 |
9 | resources:
10 | - deployment.yaml
11 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/07-replicas/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: deployment
5 | spec:
6 | replicas: 10
7 | selector:
8 | name: deployment
9 | template:
10 | containers:
11 | - image: registry/conatiner:latest
12 | name: container
13 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-add-update/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | bases:
6 | - ../../_base
7 |
8 | patchesStrategicMerge:
9 | - patch-memory.yaml
10 | - patch-replicas.yaml
11 | - patch-service.yaml
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-add-update/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: postgres
6 | name: postgres
7 | namespace: codewizard
8 | spec:
9 | ports:
10 | - port: 15432
11 | - port: 5432
12 | selector:
13 | app: postgres
14 | type: NodePort
15 | ---
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: myapp
20 | namespace: codewizard
21 | spec:
22 | replicas: 3
23 | selector:
24 | matchLabels:
25 | app: myapp
26 | template:
27 | metadata:
28 | labels:
29 | app: myapp
30 | spec:
31 | containers:
32 | - name: patch-name
33 | resources:
34 | limits:
35 | memory: 512Mi
36 | - image: __image__
37 | name: myapp
38 | - image: nginx
39 | name: nginx
40 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-add-update/patch-memory.yaml:
--------------------------------------------------------------------------------
1 | # File: patch-memory.yaml
2 | # Patch limits.memory
3 | apiVersion: apps/v1
4 | kind: Deployment
5 | # Set the desired deployment to patch
6 | metadata:
7 | name: myapp
8 | spec:
9 | # pathc the memory limit
10 | template:
11 | spec:
12 | containers:
13 | - name: patch-name
14 | resources:
15 | limits:
16 | memory: 512Mi
17 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-add-update/patch-replicas.yaml:
--------------------------------------------------------------------------------
1 | # File: patch-replicas.yaml
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | # Set the desired deployment to patch
5 | metadata:
6 | name: myapp
7 | spec:
8 | # This is the patch for this demo
9 | replicas: 3
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-add-update/patch-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: postgres
5 | spec:
6 | # The default port for postgres
7 | ports:
8 | # Add additional port to the ports list
9 | - port: 15432
10 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-delete/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | bases:
6 | - ../../_base
7 |
8 | patchesStrategicMerge:
9 | - patch-delete.yaml
10 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-delete/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: postgres
6 | name: postgres
7 | namespace: codewizard
8 | spec:
9 | ports:
10 | - port: 5432
11 | selector:
12 | app: postgres
13 | type: NodePort
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: myapp
19 | namespace: codewizard
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: myapp
24 | template:
25 | metadata:
26 | labels:
27 | app: myapp
28 | spec:
29 | containers:
30 | - image: nginx
31 | name: nginx
32 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-delete/patch-delete.yaml:
--------------------------------------------------------------------------------
1 | # patch-delete.yaml
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: myapp
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | # Remove this section, in this demo it will remove the
11 | # image with the `name: myapp`
12 | - $patch: delete
13 | name: myapp
14 | image: __image__
15 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-replace/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | bases:
6 | - ../../_base
7 |
8 | patchesStrategicMerge:
9 | - patch-replace-ports.yaml
10 | - patch-replace-image.yaml
11 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-replace/output.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: postgres
6 | name: postgres
7 | namespace: postgres
8 | spec:
9 | ports:
10 | - port: 80
11 | selector:
12 | app: postgres
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: myapp
18 | namespace: codewizard
19 | spec:
20 | selector:
21 | matchLabels:
22 | app: myapp
23 | template:
24 | metadata:
25 | labels:
26 | app: myapp
27 | spec:
28 | containers:
29 | - args:
30 | - one
31 | - two
32 | image: nginx:latest
33 | name: myapp
34 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-replace/patch-replace-image.yaml:
--------------------------------------------------------------------------------
1 | # patch-replace.yaml
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: myapp
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | # Remove this section, in this demo it will remove the
11 | # image with the `name: myapp`
12 | - $patch: replace
13 | - name: myapp
14 | image: nginx:latest
15 | args:
16 | - one
17 | - two
18 |
19 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/08-Patches/patch-replace/patch-replace-ports.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: postgres
5 | namespace: postgres
6 | labels:
7 | app: postgres
8 | spec:
9 | selector:
10 | app: postgres
11 | $patch: replace
12 | # Replace the current ports with port 80
13 | ports:
14 | - port: 80
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/09-Add-Files/add-files.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Loop over the resources folder
4 | for filePath in "_base"/*
5 | do
6 | # Add the yaml file to the kustomization file
7 | kustomize edit add resource $filePath
8 | done
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/_base/deployment.yaml:
--------------------------------------------------------------------------------
1 | # This is the base file for all the demos in this folder
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: myapp
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: myapp
10 | template:
11 | metadata:
12 | labels:
13 | app: myapp
14 | spec:
15 | containers:
16 | - name: myapp
17 | image: __image__
18 | - name: nginx
19 | image: nginx
20 |
21 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/_base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: codewizard
4 |
5 | resources:
6 | - deployment.yaml
7 | - service.yaml
8 |
--------------------------------------------------------------------------------
/Labs/08-Kustomization/samples/_base/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: postgres
5 | labels:
6 | app: postgres
7 | spec:
8 | selector:
9 | app: postgres
10 | # Service of type NodePort
11 | type: NodePort
12 | # The default port for postgres
13 | ports:
14 | - port: 5432
15 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: codewizard
5 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/base/ConfigMap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: postgres-config
5 | labels:
6 | app: postgres
7 | data:
8 | # The following names are the one defined in the official postgres docs
9 |
10 | # The name of the database we will use in this demo
11 | POSTGRES_DB: codewizard
12 | # the user name for this demo
13 | POSTGRES_USER: codewizard
14 | # The password for this demo
15 | POSTGRES_PASSWORD: admin123
16 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/base/PersistentVolumeClaim.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: postgres-pv-claim
5 | labels:
6 | app: postgres
7 | spec:
8 | # in this demo we use GCP so we are using the 'standard' StorageClass
9 | # We can of course define our own StorageClass resource
10 | storageClassName: standard
11 |
12 | ### Access Modes
13 | # The access modes are:
14 | # ReadWriteOnce (RWO) - The volume can be mounted as read-write by a single node
15 | # ReadWriteMany (RWX) - The volume can be mounted as read-write by a multiple nodes
16 | # ReadOnlyMany (ROX) - The volume can be mounted as read-only by a multiple nodes
17 | accessModes:
18 | - ReadWriteMany
19 | resources:
20 | requests:
21 | storage: 1Gi
22 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/base/Service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: postgres
5 | labels:
6 | app: postgres
7 | spec:
8 | selector:
9 | app: postgres
10 | # Service of type nodeport
11 | type: NodePort
12 | # The deafult port for postgres
13 | ports:
14 | - port: 5432
15 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/base/StatefulSet.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: postgres
5 | spec:
6 | replicas: 1
7 | # StatefulSet must contain a serviceName
8 | serviceName: postgres
9 | selector:
10 | matchLabels:
11 | app: postgres # has to match .spec.template.metadata.labels
12 | template:
13 | metadata:
14 | labels:
15 | app: postgres # has to match .spec.selector.matchLabels
16 | spec:
17 | containers:
18 | - name: postgres
19 | image: postgres:10.4
20 | imagePullPolicy: "IfNotPresent"
21 | # The default DB port
22 | ports:
23 | - containerPort: 5432
24 | # Load the required configuration env values form the configMap
25 | envFrom:
26 | - configMapRef:
27 | name: postgres-config
28 | # Use volume for storage
29 | volumeMounts:
30 | - mountPath: /var/lib/postgresql/data
31 | name: postgredb
32 | # We can use PersistentVolume or PersistentVolumeClaim.
33 | # In this sample we are useing PersistentVolumeClaim
34 | volumes:
35 | - name: postgredb
36 | persistentVolumeClaim:
37 | # reference to Pre-Define PVC
38 | claimName: postgres-pv-claim
39 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/base/Storage.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolume
2 | apiVersion: v1
3 | metadata:
4 | name: postgres-pv-volume
5 | labels:
6 | type: local
7 | app: postgres
8 | spec:
9 | storageClassName: manual
10 | #persistentVolumeReclaimPolicy: Retain
11 | capacity:
12 | storage: 5Gi
13 | accessModes:
14 | - ReadWriteMany
15 | hostPath:
16 | path: "/mnt/data"
17 | ---
18 | kind: PersistentVolumeClaim
19 | apiVersion: v1
20 | metadata:
21 | name: postgres-pv-claim
22 | namespace: codewizard
23 | labels:
24 | app: postgres
25 | spec:
26 | storageClassName: standard
27 | accessModes:
28 | - ReadWriteMany
29 | resources:
30 | requests:
31 | storage: 1Gi
32 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | # Set the default namespace for all the resources
4 | namespace: codewizard
5 |
6 | # The files to be processed
7 | # Kustomization will re-order the kinds if required
8 | resources:
9 | - ConfigMap.yaml
10 | - Service.yaml
11 | - PersistentVolumeClaim.yaml
12 | - StatefulSet.yaml
13 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | # Set the default namespace for all the resources
4 | namespace: codewizard
5 |
6 | bases:
7 | - base/
8 | resources:
9 | - Namespace.yaml
10 |
--------------------------------------------------------------------------------
/Labs/09-StatefulSet/PostgreSQL/testDB.sh:
--------------------------------------------------------------------------------
1 | ### Test to see if the StatefulSet "saves" the state of the pods
2 |
3 | # Programaticlly get the port and the IP
4 | export CLUSTER_IP=$(kubectl get nodes \
5 | --selector=node-role.kubernetes.io/control-plane \
6 | -o jsonpath='{$.items[*].status.addresses[?(@.type=="InternalIP")].address}')
7 |
8 | export NODE_PORT=$(kubectl get \
9 | services postgres \
10 | -o jsonpath="{.spec.ports[0].nodePort}" \
11 | -n codewizard)
12 |
13 | export POSTGRES_DB=$(kubectl get \
14 | configmap postgres-config \
15 | -o jsonpath='{.data.POSTGRES_DB}' \
16 | -n codewizard)
17 |
18 | export POSTGRES_USER=$(kubectl get \
19 | configmap postgres-config \
20 | -o jsonpath='{.data.POSTGRES_USER}' \
21 | -n codewizard)
22 |
23 | export PGPASSWORD=$(kubectl get \
24 | configmap postgres-config \
25 | -o jsonpath='{.data.POSTGRES_PASSWORD}' \
26 | -n codewizard)
27 |
28 | # Echo check to see if we have all the required variables
29 | printenv | grep POST*
30 |
31 | # Connect to postgres and create table if required.
32 | # Once the table exists - add row into the table
33 | psql \
34 | -U ${POSTGRES_USER} \
35 | -h ${CLUSTER_IP} \
36 | -d ${POSTGRES_DB} \
37 | -p ${NODE_PORT} \
38 | -c "CREATE TABLE IF NOT EXISTS stateful (str VARCHAR); INSERT INTO stateful values (1); SELECT count(*) FROM stateful"
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/.env:
--------------------------------------------------------------------------------
1 | #
2 | # Demo using .env file for docker-compose
3 | #
4 |
5 | # The image which we wish to build
6 | PROXY_IMAGE_NAME=docker.io/nirgeier/istio-proxy-sample
7 | SERVER_IMAGE_NAME=docker.io/nirgeier/istio-web-server-sample
8 |
9 | # Listen on the desired ports
10 | PROXY_PORT=5050
11 | SERVER_PORT=6060
12 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/AuthorizationPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: security.istio.io/v1beta1
2 | kind: AuthorizationPolicy
3 | metadata:
4 | name: authorization-policy
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: webserver
9 | rules:
10 | - from:
11 | - source:
12 | principals: ["cluster.local/ns/default/sa/sleep"]
13 | to:
14 | - operation:
15 | methods: ["GET"]
16 | when:
17 | - key: request.headers[version]
18 | values: ["v1", "v2"]
19 |
20 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: codewizard
5 | labels:
6 | istio-injection: enabled
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/kiali-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | meta.helm.sh/release-name: kiali-server
6 | meta.helm.sh/release-namespace: istio-system
7 | labels:
8 | app: kiali
9 | app.kubernetes.io/instance: kiali
10 | app.kubernetes.io/managed-by: Helm
11 | app.kubernetes.io/name: kiali
12 | app.kubernetes.io/part-of: kiali
13 | name: kiali
14 | namespace: istio-system
15 | spec:
16 | ipFamilyPolicy: SingleStack
17 | ports:
18 | - name: http
19 | port: 20001
20 | protocol: TCP
21 | targetPort: 20001
22 | - name: http-metrics
23 | port: 9090
24 | protocol: TCP
25 | targetPort: 9090
26 | selector:
27 | app.kubernetes.io/instance: kiali
28 | app.kubernetes.io/name: kiali
29 | sessionAffinity: None
30 | type: ClusterIP
31 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # kustomization.yaml
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: codewizard
5 |
6 | resources:
7 | #- AuthorizationPolicy.yaml
8 | - kiali-service.yaml
9 | - Namespace.yaml
10 | - proxy-deployment.yaml
11 | - proxy-service.yaml
12 | - web-server1-deployment.yaml
13 | - web-server1-service.yaml
14 | - web-server2-deployment.yaml
15 | - web-server2-service.yaml
16 | - web-server2-v2-service.yaml
17 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/proxy-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: proxy-server
5 | spec:
6 | progressDeadlineSeconds: 60
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: proxy-server
11 | template:
12 | metadata:
13 | labels:
14 | app: proxy-server
15 | version: v1
16 | spec:
17 | containers:
18 | - name: proxy-server
19 | image: docker.io/nirgeier/istio-proxy-sample
20 | imagePullPolicy: Always
21 | ports:
22 | - name: web
23 | protocol: TCP
24 | containerPort: 5050
25 | env:
26 | - name: PROXY_URL_TO_SERVE
27 | value: http://webserver
28 | resources:
29 | limits:
30 | cpu: 100m
31 | requests:
32 | cpu: 100m
33 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/proxy-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: proxy-service
5 | spec:
6 | ports:
7 | # We use the - as istion require
8 | - name: http-web
9 | port: 80
10 | protocol: TCP
11 | targetPort: 5050
12 | selector:
13 | app: proxy-server
14 | type: LoadBalancer
15 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/web-server1-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: webserver
6 | version: v1
7 | name: webserverv1
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: webserver
12 | version: v1
13 | strategy:
14 | type: Recreate
15 | template:
16 | metadata:
17 | labels:
18 | app: webserver
19 | version: v1
20 | spec:
21 | containers:
22 | - env:
23 | - name: SERVER_NAME
24 | value: WebServerV1
25 | image: docker.io/nirgeier/istio-web-server-sample
26 | imagePullPolicy: Always
27 | name: simpleserver
28 | ports:
29 | - containerPort: 5050
30 | name: web
31 | protocol: TCP
32 | resources:
33 | limits:
34 | cpu: 100m
35 | requests:
36 | cpu: 100m
37 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/web-server1-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: webserverv1
5 | spec:
6 | ports:
7 | - name: http-web
8 | port: 80
9 | protocol: TCP
10 | targetPort: 5050
11 | selector:
12 | app: webserver
13 | version: v1
14 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/web-server2-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: webserver
6 | version: v2
7 | name: webserverv2
8 | spec:
9 | replicas: 3
10 | selector:
11 | matchLabels:
12 | app: webserver
13 | version: v2
14 | strategy:
15 | type: Recreate
16 | template:
17 | metadata:
18 | labels:
19 | app: webserver
20 | version: v2
21 | spec:
22 | affinity: {}
23 | containers:
24 | - env:
25 | - name: SERVER_NAME
26 | value: WebServerV2
27 | image: docker.io/nirgeier/istio-web-server-sample
28 | imagePullPolicy: Always
29 | name: simpleserver
30 | ports:
31 | - name: web
32 | protocol: TCP
33 | containerPort: 5050
34 | resources:
35 | limits:
36 | cpu: 100m
37 | requests:
38 | cpu: 100m
39 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/web-server2-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: webserver
5 | spec:
6 | ports:
7 | - name: http-web
8 | port: 80
9 | protocol: TCP
10 | targetPort: 5050
11 | selector:
12 | app: webserver
13 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/K8S/web-server2-v2-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: webserverv2
5 | spec:
6 | ports:
7 | - name: http-web
8 | port: 80
9 | protocol: TCP
10 | targetPort: 5050
11 | selector:
12 | app: webserver
13 | version: v2
14 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/buildImages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Login to docker hub in order to push the images
4 | docker login -u nirgeier
5 |
6 | # Build and push the images
7 | docker-compose build && docker-compose push
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | proxy:
4 | build: istio-proxy
5 | image: ${PROXY_IMAGE_NAME}
6 | ports:
7 | - ${PROXY_PORT}:${PROXY_PORT}
8 | environment:
9 | port: ${PROXY_PORT}
10 | web-server:
11 | build: istio-web-server
12 | image: ${SERVER_IMAGE_NAME}
13 | ports:
14 | - ${SERVER_PORT}:${SERVER_PORT}
15 | environment:
16 | port: ${SERVER_PORT}
17 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/istio-proxy/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine
2 |
3 | # Update
4 | RUN apk add --update nodejs npm
5 |
6 | # Change to the desired folder
7 | WORKDIR /src
8 |
9 | # Copy the desired files
10 | COPY . .
11 |
12 | # Install app dependencies
13 | RUN npm i
14 |
15 | # Start the server
16 | CMD ["node", "proxy-server.js"]
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/istio-proxy/Utils.js:
--------------------------------------------------------------------------------
1 | const
2 | ip = require('ip'),
3 | os = require('os'),
4 | dns = require('dns'),
5 | cp = require('child_process'),
6 | fs = require('fs-extra'),
7 | promisify = require('util').promisify,
8 | lookup = promisify(dns.lookup),
9 | exec = promisify(cp.exec);
10 |
11 | // https://stackoverflow.com/a/37015387
12 | function isInDocker() {
13 | const platform = os.platform();
14 | // Assume this module is running in linux containers
15 | if (platform === "darwin" || platform === "win32") return false;
16 | const file = fs.readFileSync("/proc/self/cgroup", "utf-8");
17 | return file.indexOf("/docker") !== -1;
18 | };
19 |
20 | function getIP() {
21 | return new Promise((resolve, reject) => {
22 | if (!isInDocker()) {
23 | return resolve(ip.address());
24 | }
25 | lookup(
26 | "gateway.docker.internal", {
27 | family: 4,
28 | hints: dns.ADDRCONFIG | dns.V4MAPPED,
29 | }).then(info => {
30 | return resolve(info.address);
31 | });
32 |
33 | });
34 | };
35 |
36 | module.exports = {
37 | getIP
38 | }
39 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/istio-proxy/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "istio-proxy-sample-server",
3 | "author": "Nir Geier",
4 | "version": "1.0.0",
5 | "description": "Sample istio app with a dummy proxy server",
6 | "main": "proxy-server.js",
7 | "scripts": {
8 | "start": "node proxy-server.js"
9 | },
10 | "keywords": [],
11 | "license": "ISC",
12 | "dependencies": {
13 | "fs-extra": "^10.0.0",
14 | "ip": "^1.1.5",
15 | "request": "^2.88.2"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/istio-proxy/proxy-server.js:
--------------------------------------------------------------------------------
1 | // content of index.js
2 | const
3 | ip = require('ip'),
4 | os = require('os'),
5 | http = require('http'),
6 | request = require('request'),
7 | port = process.env.port || 5050,
8 | proxyUrl = process.env.PROXY_URL_TO_SERVE || "https://raw.githubusercontent.com/nirgeier/KubernetesLabs/master/Labs/10-Istio/01-demo-services/mock-data/external-mock1.txt";
9 |
10 | /**
11 | * This is the requestHandler which will process the requests
12 | * @param {*} request
13 | * @param {*} response
14 | */
15 | function requestHandler(req, res) {
16 |
17 | const
18 | path = req.url,
19 | start = new Date().getTime();
20 |
21 | request(proxyUrl + (path == "/" ? "" : path),
22 | (err, response, body) => {
23 | const duration = new Date().getTime() - start;
24 | res.end(`Proxying reply: ${err ? err.toString() : body} - Took ${duration} milliseconds${os.EOL}`);
25 | });
26 | }
27 |
28 | /**
29 | * Create the server
30 | */
31 | http
32 | .createServer(requestHandler)
33 | .listen(port, (err) => {
34 | if (err) {
35 | return console.log('Error while trying to create server', err);
36 | }
37 | console.log(`Proxy Server is listening on http://${ip.address()}:${port}`);
38 | });
39 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/istio-web-server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine
2 |
3 | # Update
4 | RUN apk add --update nodejs npm
5 |
6 | # Change to the desired folder
7 | WORKDIR /src
8 |
9 | # Copy the desired files
10 | COPY . .
11 |
12 | # Install app dependencies
13 | RUN npm i
14 |
15 | # Start the server
16 | CMD ["node", "web-server.js"]
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/istio-web-server/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "istio-proxy-sample-server",
3 | "author": "Nir Geier",
4 | "version": "1.0.0",
5 | "description": "Sample istio app with a dummy proxy server",
6 | "main": "web-server.js",
7 | "scripts": {
8 | "start": "node web-server.js"
9 | },
10 | "keywords": [],
11 | "license": "ISC",
12 | "dependencies": {
13 | "fs-extra": "^10.0.0",
14 | "ip": "^1.1.5",
15 | "request": "^2.88.2"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/istio-web-server/web-server.js:
--------------------------------------------------------------------------------
1 | const
2 | ip = require('ip'),
3 | os = require('os'),
4 | http = require('http'),
5 | port = process.env.port || 5050,
6 | content = process.env.SERVER_NAME || "Hello world";
7 |
8 | const requestHandler = (request, response) => {
9 |
10 | if (request.url == "/failsometimes") {
11 | if (Math.floor((Math.random() * 3)) == 0) {
12 | response.statusCode = 500;
13 | }
14 | }
15 |
16 | response.end(`WebServer reply: ${content} requested from ${request.url} on ${os.hostname()} with code ${response.statusCode}`);
17 | };
18 |
19 | http.createServer(requestHandler)
20 | .listen(port, (err) => {
21 | if (err) {
22 | return console.log('Error while trying to create server', err);
23 | }
24 |
25 | console.log(`Web Server is listening on http://${ip.address()}:${port}`);
26 | });
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/mock-data/external-mock1.txt:
--------------------------------------------------------------------------------
1 | This is the content of external-mock1.txt
--------------------------------------------------------------------------------
/Labs/10-Istio/01-demo-services/mock-data/external-mock2.txt:
--------------------------------------------------------------------------------
1 | This is the content of external-mock2.txt
--------------------------------------------------------------------------------
/Labs/10-Istio/02-network-fault/K8S/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | # We assume since we are build upon demo 01 - we already have the namespace
5 | namespace: codewizard
6 |
7 | resources:
8 | - resources/web-server1-network-faults.yaml
9 | #- resources/web-server1-network-faults.yaml
10 |
--------------------------------------------------------------------------------
/Labs/10-Istio/02-network-fault/K8S/resources/web-server1-VirtualService.yaml:
--------------------------------------------------------------------------------
1 | #
2 | # Add network fault to 50% of the traffic
3 | # We should get error like this one:
4 | # >> Proxying reply: fault filter abort - Took 3 milliseconds
5 | #
6 | apiVersion: networking.istio.io/v1beta1
7 | kind: VirtualService
8 | metadata:
9 | name: webserver
10 | namespace: codewizard
11 | spec:
12 | # hostname of a request that this VirtualService resource will match
13 | hosts:
14 | - webserver
15 | # rules to direct any HTTP traffic that matches the above hostname
16 | http:
17 | - route:
18 | - destination:
19 | host: webserverv1
--------------------------------------------------------------------------------
/Labs/10-Istio/02-network-fault/K8S/resources/web-server1-network-faults.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: VirtualService
3 | metadata:
4 | name: webserver
5 | spec:
6 | hosts:
7 | - webserver
8 | http:
9 | - route:
10 | - destination:
11 | host: webserverv1
12 | fault:
13 | abort:
14 | percentage:
15 | value: 50
16 | httpStatus: 400
--------------------------------------------------------------------------------
/Labs/10-Istio/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Step 1: Install Istio using istioctl
4 | echo "Installing Istio..."
5 | curl -L https://istio.io/downloadIstio | sh -
6 | cd istio-*
7 | export PATH=$PWD/bin:$PATH
8 | istioctl install --set profile=demo -y
9 |
10 | # Step 2: Install Kiali using Helm
11 | echo "Installing Kiali..."
12 | helm repo add kiali https://kiali.org/helm-charts
13 | helm repo update
14 | helm install kiali-server kiali/kiali-server --namespace istio-system --set auth.strategy="anonymous"
15 | helm install \
16 | --namespace kiali-operator \
17 | --create-namespace \
18 | kiali-operator \
19 | kiali/kiali-operator
20 |
21 |
22 | # Step 3: Enable Istio sidecar injection for all namespaces
23 | echo "Enabling Istio sidecar injection for default namespace..."
24 | kubectl label namespace default istio-injection=enabled
25 | kubectl label namespace codewizard istio-injection=enabled
26 | kubectl label namespace monitoring istio-injection=enabled
27 |
28 |
29 | # Step 4: Deploy the Bookinfo demo application
30 | echo "Deploying Bookinfo sample app..."
31 | kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
32 |
33 | # Step 5: Expose the Bookinfo app through Istio gateway
34 | echo "Exposing Bookinfo app through Istio gateway..."
35 | kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
36 |
37 | # Step 6: Apply VirtualService to route traffic to v2 of ratings
38 | echo "Creating VirtualService for ratings..."
39 | cat <
30 |
31 | ---
32 |
33 |
38 |
39 | ---
40 |
41 |
42 | ©CodeWizard LTD
43 |
44 |
45 | 
46 |
47 |
--------------------------------------------------------------------------------
/Labs/11-CRD-Custom-Resource-Definition/resources/crd-object.yaml:
--------------------------------------------------------------------------------
1 | # The apiVerison is taken from the crd defintion
2 | # ./
3 | apiVersion: "codewizard.co.il/v1"
4 | kind: "CodeWizardCRD"
5 | metadata:
6 | name: "codewizard-object"
7 | spec:
8 | crdSpec: "--" # String
9 | image: "--" # String
10 | replicas: 3 # Integer
11 |
--------------------------------------------------------------------------------
/Labs/11-CRD-Custom-Resource-Definition/resources/crd.yaml:
--------------------------------------------------------------------------------
1 | # The required apiVersion for the CRD
2 | apiVersion: apiextensions.k8s.io/v1
3 |
4 | # The Kind is: 'CustomResourceDefinition'
5 | kind: CustomResourceDefinition
6 | metadata:
7 | # name must match the spec fields below, and be in the form: .
8 | # In this sample we define the name & group:
9 | # Refer to the `spec.names` below
10 | name: custom-crd.codewizard.co.il
11 | spec:
12 | # The CRD can be applied to either Namespaced or Cluster
13 | # In this case we set it to Name
14 | scope: Namespaced
15 |
16 | # group name to use for REST API: /apis//
17 | # same group as defined under `metadata.name`
18 | group: codewizard.co.il
19 |
20 | names:
21 | # plural name to be used in the URL: /apis///
22 | plural: custom-crd
23 | # singular name to be used as an alias on the CLI and for display
24 | singular: cwcrd
25 | # kind is normally the CamelCased singular type. Your resource manifests use this.
26 | kind: CodeWizardCRD
27 | # shortNames allow shorter string to match your resource on the CLI
28 | shortNames:
29 | - cwcrd
30 |
31 | # list of versions supported by this CustomResourceDefinition
32 | versions:
33 | - name: v1
34 | # Each version can be enabled/disabled by Served flag.
35 | served: true
36 | # One and only one version must be marked as the storage version.
37 | storage: true
38 | schema:
39 | openAPIV3Schema:
40 | required: ["spec"]
41 | type: object
42 | # The properties which be defined under the `spec`
43 | properties:
44 | spec:
45 | type: object
46 | # The properties which can be defined and their type
47 | properties:
48 | crdSpec:
49 | type: string
50 | image:
51 | type: string
52 | replicas:
53 | type: integer
54 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: wp-demo
5 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | # K8S Hands-on
4 | 
5 |
6 | ---
7 | # WordPress, MySQL, PVC
8 |
9 | - In This tutorial you will deploy a WordPress site and a MySQL database.
10 | - You will use `PersistentVolumes` and `PersistentVolumeClaims` as storage.
11 |
12 | ---
13 |
14 | ## Walkthrough
15 | - Patch `minikube` so we can use `Service: LoadBalancer`
16 | ```sh
17 | # Sourse:
18 | # https://github.com/knative/serving/blob/b31d96e03bfa1752031d0bc4ae2a3a00744d6cd5/docs/creating-a-kubernetes-cluster.md#loadbalancer-support-in-minikube
19 | sudo ip route add \
20 | $(cat ~/.minikube/profiles/minikube/config.json | \
21 | jq -r ".KubernetesConfig.ServiceCIDR") \
22 | via $(minikube ip)
23 |
24 | kubectl run minikube-lb-patch \
25 | --replicas=1 \
26 | --image=elsonrodriguez/minikube-lb-patch:0.1 \--namespace=kube-system
27 | ```
28 | - Create the desired Namespace
29 | - Create the MySQL resources
30 | - Create `Service`
31 | - Create `PersistentVolumeClaims`
32 | - Create `Deployment`
33 | - Create password file
34 | - Create the WordPress resources
35 | - Create `Service`
36 | - Create `PersistentVolumeClaims`
37 | - Create `Deployment`
38 | - Create a `kustomization.yaml` with
39 | - Secret generator
40 | - MySQL resources
41 | - WordPress resources
42 | - Deploy the stack
43 | - Port forward from the host to the application
44 | - We use a port forward so we will be able to test and verify if the WordPress is actually running
45 | ```sh
46 | kubectl port-forward service/wordpress 8080:32267 -n wp-demo
47 | ```
48 |
49 |
50 |
51 | ---
52 |
53 |
58 |
59 | ---
60 |
61 |
62 | ©CodeWizard LTD
63 |
64 |
65 | 
66 |
67 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/all.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: wp-demo
5 | ---
6 | apiVersion: v1
7 | data:
8 | mysql-password.txt: bm90MkhhcmQyR3Vlc3M=
9 | kind: Secret
10 | metadata:
11 | name: mysql-pass-8ttc4k2t5f
12 | namespace: wp-demo
13 | type: Opaque
14 | ---
15 | apiVersion: v1
16 | kind: Service
17 | metadata:
18 | labels:
19 | app: wordpress
20 | tier: mysql
21 | name: mysql
22 | namespace: wp-demo
23 | spec:
24 | clusterIP: None
25 | ports:
26 | - port: 3306
27 | selector:
28 | app: wordpress
29 | tier: mysql
30 | ---
31 | apiVersion: v1
32 | kind: Service
33 | metadata:
34 | labels:
35 | app: wordpress
36 | name: wordpress
37 | namespace: wp-demo
38 | spec:
39 | ports:
40 | - port: 8089
41 | selector:
42 | app: wordpress
43 | tier: frontend
44 | type: LoadBalancer
45 | ---
46 | apiVersion: apps/v1
47 | kind: Deployment
48 | metadata:
49 | labels:
50 | app: wordpress
51 | tier: mysql
52 | name: mysql
53 | namespace: wp-demo
54 | spec:
55 | selector:
56 | matchLabels:
57 | app: wordpress
58 | tier: mysql
59 | strategy:
60 | type: Recreate
61 | template:
62 | metadata:
63 | labels:
64 | app: wordpress
65 | tier: mysql
66 | spec:
67 | containers:
68 | - env:
69 | - name: MYSQL_ROOT_PASSWORD
70 | valueFrom:
71 | secretKeyRef:
72 | key: mysql-password.txt
73 | name: mysql-pass-8ttc4k2t5f
74 | image: mysql:5.6
75 | name: mysql
76 | ports:
77 | - containerPort: 3306
78 | name: mysql
79 | volumeMounts:
80 | - mountPath: /var/lib/mysql
81 | name: mysql-persistent-storage
82 | volumes:
83 | - name: mysql-persistent-storage
84 | persistentVolumeClaim:
85 | claimName: mysql-wordpress
86 | ---
87 | apiVersion: apps/v1
88 | kind: Deployment
89 | metadata:
90 | labels:
91 | app: wordpress
92 | name: wordpress
93 | namespace: wp-demo
94 | spec:
95 | selector:
96 | matchLabels:
97 | app: wordpress
98 | tier: frontend
99 | strategy:
100 | type: Recreate
101 | template:
102 | metadata:
103 | labels:
104 | app: wordpress
105 | tier: frontend
106 | spec:
107 | containers:
108 | - env:
109 | - name: WORDPRESS_DB_HOST
110 | value: mysql
111 | - name: WORDPRESS_DB_PASSWORD
112 | valueFrom:
113 | secretKeyRef:
114 | key: mysql-password.txt
115 | name: mysql-pass-8ttc4k2t5f
116 | image: wordpress:4.8-apache
117 | name: wordpress
118 | ports:
119 | - containerPort: 80
120 | name: wordpress
121 | volumeMounts:
122 | - mountPath: /var/www/html
123 | name: wordpress-persistent-storage
124 | volumes:
125 | - name: wordpress-persistent-storage
126 | persistentVolumeClaim:
127 | claimName: wp-pv-claim
128 | ---
129 | apiVersion: v1
130 | kind: PersistentVolumeClaim
131 | metadata:
132 | labels:
133 | app: mysql-wordpress
134 | tier: mysql
135 | name: mysql-pvc
136 | namespace: wp-demo
137 | spec:
138 | accessModes:
139 | - ReadWriteOnce
140 | resources:
141 | requests:
142 | storage: 20Gi
143 | ---
144 | apiVersion: v1
145 | kind: PersistentVolumeClaim
146 | metadata:
147 | labels:
148 | app: wordpress
149 | name: wp-pv-claim
150 | namespace: wp-demo
151 | spec:
152 | accessModes:
153 | - ReadWriteOnce
154 | resources:
155 | requests:
156 | storage: 20Gi
157 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | # Set the default namespace for all the resources
4 | namespace: wp-demo
5 |
6 | # The files to be processed
7 | # Kustomization will re-order the kinds if required
8 | bases:
9 | - /resources/MySQL
10 | - /resources/Wordpress
11 |
12 | resources:
13 | - Namespace.yaml
14 |
15 | # kubectl expose rc \
16 | # example \
17 | # --port=8765 \
18 | # --target-port=9376 \
19 | # -name=example-service \
20 | # --type=LoadBalancer
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/original/all.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress-mysql
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 3306
10 | selector:
11 | app: wordpress
12 | tier: mysql
13 | clusterIP: None
14 | ---
15 | apiVersion: v1
16 | kind: PersistentVolumeClaim
17 | metadata:
18 | name: mysql-pv-claim
19 | labels:
20 | app: wordpress
21 | spec:
22 | accessModes:
23 | - ReadWriteOnce
24 | resources:
25 | requests:
26 | storage: 20Gi
27 | ---
28 | apiVersion: apps/v1
29 | kind: Deployment
30 | metadata:
31 | name: wordpress-mysql
32 | labels:
33 | app: wordpress
34 | spec:
35 | selector:
36 | matchLabels:
37 | app: wordpress
38 | tier: mysql
39 | strategy:
40 | type: Recreate
41 | template:
42 | metadata:
43 | labels:
44 | app: wordpress
45 | tier: mysql
46 | spec:
47 | containers:
48 | - image: mysql:5.6
49 | name: mysql
50 | env:
51 | - name: MYSQL_ROOT_PASSWORD
52 | valueFrom:
53 | secretKeyRef:
54 | name: mysql-pass
55 | key: password
56 | ports:
57 | - containerPort: 3306
58 | name: mysql
59 | volumeMounts:
60 | - name: mysql-persistent-storage
61 | mountPath: /var/lib/mysql
62 | volumes:
63 | - name: mysql-persistent-storage
64 | persistentVolumeClaim:
65 | claimName: mysql-pv-claim
66 | ---
67 | apiVersion: v1
68 | kind: Service
69 | metadata:
70 | name: wordpress
71 | labels:
72 | app: wordpress
73 | spec:
74 | ports:
75 | - port: 80
76 | selector:
77 | app: wordpress
78 | tier: frontend
79 | type: LoadBalancer
80 | ---
81 | apiVersion: v1
82 | kind: PersistentVolumeClaim
83 | metadata:
84 | name: wp-pv-claim
85 | labels:
86 | app: wordpress
87 | spec:
88 | accessModes:
89 | - ReadWriteOnce
90 | resources:
91 | requests:
92 | storage: 20Gi
93 | ---
94 | apiVersion: apps/v1
95 | kind: Deployment
96 | metadata:
97 | name: wordpress
98 | labels:
99 | app: wordpress
100 | spec:
101 | selector:
102 | matchLabels:
103 | app: wordpress
104 | tier: frontend
105 | strategy:
106 | type: Recreate
107 | template:
108 | metadata:
109 | labels:
110 | app: wordpress
111 | tier: frontend
112 | spec:
113 | containers:
114 | - image: wordpress:4.8-apache
115 | name: wordpress
116 | env:
117 | - name: WORDPRESS_DB_HOST
118 | value: wordpress-mysql
119 | - name: WORDPRESS_DB_PASSWORD
120 | valueFrom:
121 | secretKeyRef:
122 | name: mysql-pass
123 | key: password
124 | ports:
125 | - containerPort: 80
126 | name: wordpress
127 | volumeMounts:
128 | - name: wordpress-persistent-storage
129 | mountPath: /var/www/html
130 | volumes:
131 | - name: wordpress-persistent-storage
132 | persistentVolumeClaim:
133 | claimName: wp-pv-claim
134 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/original/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | secretGenerator:
5 | - name: mysql-pass
6 | literals:
7 | - password=YOUR_PASSWORD
8 |
9 | resources:
10 | - all.yaml
11 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/original/wp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | password: WU9VUl9QQVNTV09SRA==
4 | kind: Secret
5 | metadata:
6 | name: mysql-pass-c57bb4t7mf
7 | type: Opaque
8 | ---
9 | apiVersion: v1
10 | kind: Service
11 | metadata:
12 | labels:
13 | app: wordpress
14 | name: wordpress-mysql
15 | spec:
16 | clusterIP: None
17 | ports:
18 | - port: 3306
19 | selector:
20 | app: wordpress
21 | tier: mysql
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | labels:
27 | app: wordpress
28 | name: wordpress
29 | spec:
30 | ports:
31 | - port: 80
32 | selector:
33 | app: wordpress
34 | tier: frontend
35 | type: LoadBalancer
36 | ---
37 | apiVersion: apps/v1
38 | kind: Deployment
39 | metadata:
40 | labels:
41 | app: wordpress
42 | name: wordpress-mysql
43 | spec:
44 | selector:
45 | matchLabels:
46 | app: wordpress
47 | tier: mysql
48 | strategy:
49 | type: Recreate
50 | template:
51 | metadata:
52 | labels:
53 | app: wordpress
54 | tier: mysql
55 | spec:
56 | containers:
57 | - env:
58 | - name: MYSQL_ROOT_PASSWORD
59 | valueFrom:
60 | secretKeyRef:
61 | key: password
62 | name: mysql-pass-c57bb4t7mf
63 | image: mysql:5.6
64 | name: mysql
65 | ports:
66 | - containerPort: 3306
67 | name: mysql
68 | volumeMounts:
69 | - mountPath: /var/lib/mysql
70 | name: mysql-persistent-storage
71 | volumes:
72 | - name: mysql-persistent-storage
73 | persistentVolumeClaim:
74 | claimName: mysql-pv-claim
75 | ---
76 | apiVersion: apps/v1
77 | kind: Deployment
78 | metadata:
79 | labels:
80 | app: wordpress
81 | name: wordpress
82 | spec:
83 | selector:
84 | matchLabels:
85 | app: wordpress
86 | tier: frontend
87 | strategy:
88 | type: Recreate
89 | template:
90 | metadata:
91 | labels:
92 | app: wordpress
93 | tier: frontend
94 | spec:
95 | containers:
96 | - env:
97 | - name: WORDPRESS_DB_HOST
98 | value: wordpress-mysql
99 | - name: WORDPRESS_DB_PASSWORD
100 | valueFrom:
101 | secretKeyRef:
102 | key: password
103 | name: mysql-pass-c57bb4t7mf
104 | image: wordpress:4.8-apache
105 | name: wordpress
106 | ports:
107 | - containerPort: 80
108 | name: wordpress
109 | volumeMounts:
110 | - mountPath: /var/www/html
111 | name: wordpress-persistent-storage
112 | volumes:
113 | - name: wordpress-persistent-storage
114 | persistentVolumeClaim:
115 | claimName: wp-pv-claim
116 | ---
117 | apiVersion: v1
118 | kind: PersistentVolumeClaim
119 | metadata:
120 | labels:
121 | app: wordpress
122 | name: mysql-pv-claim
123 | spec:
124 | accessModes:
125 | - ReadWriteOnce
126 | resources:
127 | requests:
128 | storage: 20Gi
129 | ---
130 | apiVersion: v1
131 | kind: PersistentVolumeClaim
132 | metadata:
133 | labels:
134 | app: wordpress
135 | name: wp-pv-claim
136 | spec:
137 | accessModes:
138 | - ReadWriteOnce
139 | resources:
140 | requests:
141 | storage: 20Gi
142 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/MySQL/01-mysql-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mysql
5 | labels:
6 | app: wordpress
7 | tier: mysql
8 | spec:
9 | ports:
10 | - port: 3306
11 | selector:
12 | app: wordpress
13 | tier: mysql
14 | clusterIP: None
15 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/MySQL/02-mysql-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: mysql-pvc
5 | labels:
6 | app: mysql-wordpress
7 | tier: mysql
8 | spec:
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 20Gi
14 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/MySQL/03-mysql-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: mysql
5 | labels:
6 | app: wordpress
7 | tier: mysql
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: wordpress
12 | tier: mysql
13 | strategy:
14 | type: Recreate
15 | template:
16 | metadata:
17 | labels:
18 | app: wordpress
19 | tier: mysql
20 | spec:
21 | containers:
22 | - image: mysql:5.6
23 | name: mysql
24 | env:
25 | - name: MYSQL_ROOT_PASSWORD
26 | valueFrom:
27 | secretKeyRef:
28 | name: mysql-pass
29 | key: mysql-password.txt
30 | ports:
31 | - containerPort: 3306
32 | name: mysql
33 | volumeMounts:
34 | - name: mysql-persistent-storage
35 | mountPath: /var/lib/mysql
36 | volumes:
37 | - name: mysql-persistent-storage
38 | persistentVolumeClaim:
39 | claimName: mysql-wordpress
40 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/MySQL/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | # Generate the DB password from the given file
5 | secretGenerator:
6 | - name: mysql-pass
7 | behavior: create
8 | files:
9 | - mysql-password.txt
10 |
11 | # The files to be processed
12 | # Kustomization will re-order the kinds if required
13 | resources:
14 | - 01-mysql-service.yaml
15 | - 02-mysql-pvc.yaml
16 | - 03-mysql-deployment.yaml
17 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/MySQL/mysql-password.txt:
--------------------------------------------------------------------------------
1 | not2Hard2Guess
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/WordPress/01-wordpress-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 8089
10 | selector:
11 | app: wordpress
12 | tier: frontend
13 | type: LoadBalancer
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/WordPress/02-wordpress-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: wp-pv-claim
5 | labels:
6 | app: wordpress
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | resources:
11 | requests:
12 | storage: 20Gi
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/WordPress/03-wordpress-deployment.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: wordpress
6 | labels:
7 | app: wordpress
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: wordpress
12 | tier: frontend
13 | strategy:
14 | type: Recreate
15 | template:
16 | metadata:
17 | labels:
18 | app: wordpress
19 | tier: frontend
20 | spec:
21 | containers:
22 | - image: wordpress:4.8-apache
23 | name: wordpress
24 | env:
25 | - name: WORDPRESS_DB_HOST
26 | value: mysql
27 | - name: WORDPRESS_DB_PASSWORD
28 | valueFrom:
29 | secretKeyRef:
30 | name: mysql-pass
31 | key: mysql-password.txt
32 | ports:
33 | - containerPort: 80
34 | name: wordpress
35 | volumeMounts:
36 | - name: wordpress-persistent-storage
37 | mountPath: /var/www/html
38 | volumes:
39 | - name: wordpress-persistent-storage
40 | persistentVolumeClaim:
41 | claimName: wp-pv-claim
42 |
--------------------------------------------------------------------------------
/Labs/12-Wordpress-MySQL-PVC/resources/WordPress/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | # The files to be processed
5 | # Kustomization will re-order the kinds if required
6 | resources:
7 | - 01-wordpress-service.yaml
8 | - 02-wordpress-pvc.yaml
9 | - 03-wordpress-deployment.yaml
10 |
11 |
12 |
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo-0.1.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/Labs/13-HelmChart/codewizard-helm-demo-0.1.0.tgz
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | .DS_Store
3 | # Common VCS dirs
4 | .git/
5 | .gitignore
6 | .bzr/
7 | .bzrignore
8 | .hg/
9 | .hgignore
10 | .svn/
11 |
12 | # Common backup files
13 | *.swp
14 | *.bak
15 | *.tmp
16 | *.orig
17 | *~
18 |
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 | .history
25 |
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: codewizard-helm-demo
3 | description: A Helm chart demo for Kubernetes
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer.
11 | # They're included as a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline.
13 | #
14 | # Note:
15 | # Library charts do not define any templates and therefore cannot be deployed.
16 | type: application
17 |
18 | # This is the chart version.
19 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
20 | version: 0.1.0
21 |
22 | # This is the version number of the application being deployed.
23 | # In this demo we deploy nginx so we get the version from DockerHub
24 | # https://hub.docker.com/_/nginx
25 | appVersion: 1.19.7
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo/templates/ConfigMap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: {{ include "webserver.fullname" . }}-config
5 | namespace: codewizard
6 | data:
7 | nginx.conf: |-
8 | server {
9 | listen 80;
10 | server_name localhost;
11 |
12 | location / {
13 | return 200 '{{ .Values.nginx.conf.message }}\n';
14 | add_header Content-Type text/plain;
15 | }
16 |
17 | location /release/name {
18 | return 200 '{{ .Release.Name }}\n';
19 | add_header Content-Type text/plain;
20 | }
21 |
22 | location /release/revision {
23 | return 200 '{{ .Release.Revision }}\n';
24 | add_header Content-Type text/plain;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo/templates/Deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "webserver.fullname" . }}
5 | namespace: codewizard
6 | labels:
7 | {{- include "webserver.labels" . | nindent 4 }}
8 | spec:
9 | {{- if not .Values.autoscaling.enabled }}
10 | replicas: {{ .Values.replicaCount }}
11 | {{- end }}
12 | selector:
13 | matchLabels:
14 | {{- include "webserver.selectorLabels" . | nindent 6 }}
15 | template:
16 | metadata:
17 | annotations:
18 | checksum/config: {{ include (print $.Template.BasePath "/ConfigMap.yaml") . | sha256sum }}
19 | labels:
20 | {{- include "webserver.selectorLabels" . | nindent 8 }}
21 | spec:
22 | containers:
23 | - name: {{ .Chart.Name }}
24 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
25 | imagePullPolicy: {{ .Values.image.pullPolicy }}
26 | volumeMounts:
27 | - name: nginx-config
28 | mountPath: /etc/nginx/conf.d/default.conf
29 | subPath: nginx.conf
30 | ports:
31 | - name: http
32 | containerPort: 80
33 | protocol: TCP
34 | volumes:
35 | - name: nginx-config
36 | configMap:
37 | name: {{ include "webserver.fullname" . }}-config
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo/templates/Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: codewizard
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo/templates/Service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "webserver.fullname" . }}
5 | namespace: codewizard
6 | labels:
7 | {{- include "webserver.labels" . | nindent 4 }}
8 | spec:
9 | type: {{ .Values.service.type }}
10 | ports:
11 | - port: {{ .Values.service.port }}
12 | targetPort: http
13 | protocol: TCP
14 | name: http
15 | selector:
16 | {{- include "webserver.selectorLabels" . | nindent 4 }}
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Note:
4 | We truncate at 63 chars where required,
5 | because some Kubernetes name fields are
6 | limited to this length (by the DNS naming spec).
7 | */}}
8 |
9 | {{/* ............ Templates section ............ */}}
10 |
11 | {{/* Define the name of this demo webserver */}}
12 | {{- define "webserver.name" -}}
13 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
14 | {{- end }}
15 |
16 | {{/*
17 | Create a default fully qualified app name.
18 | If release name contains chart name it will be used as a full name.
19 | */}}
20 | {{- define "webserver.fullname" -}}
21 | {{- if .Values.fullnameOverride }}
22 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
23 | {{- else }}
24 | {{- $name := default .Chart.Name .Values.nameOverride }}
25 | {{- if contains $name .Release.Name }}
26 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
27 | {{- else }}
28 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
29 | {{- end }}
30 | {{- end }}
31 | {{- end }}
32 |
33 | {{/*
34 | Create chart name and version as used by the chart label.
35 | */}}
36 | {{- define "webserver.chart" -}}
37 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
38 | {{- end }}
39 |
40 | {{/*
41 | Common labels
42 | */}}
43 | {{- define "webserver.labels" -}}
44 | helm.sh/chart: {{ include "webserver.chart" . }}
45 | {{ include "webserver.selectorLabels" . }}
46 | {{- if .Chart.AppVersion }}
47 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
48 | {{- end }}
49 | app.kubernetes.io/managed-by: {{ .Release.Service }}
50 | {{- end }}
51 |
52 | {{/*
53 | Selector labels
54 | */}}
55 | {{- define "webserver.selectorLabels" -}}
56 | app.kubernetes.io/name: {{ include "webserver.name" . }}
57 | app.kubernetes.io/instance: {{ .Release.Name }}
58 | {{- end }}
--------------------------------------------------------------------------------
/Labs/13-HelmChart/codewizard-helm-demo/values.yaml:
--------------------------------------------------------------------------------
1 | replicaCount: 2
2 |
3 | image:
4 | repository: nginx
5 | pullPolicy: IfNotPresent
6 | tag: ""
7 |
8 | service:
9 | type: ClusterIP
10 | port: 80
11 |
12 | autoscaling:
13 | enabled: false
14 |
15 | nginx:
16 | conf:
17 | message: "CodeWizard Helm Demo"
18 |
--------------------------------------------------------------------------------
/Labs/13-HelmChart/install.sh:
--------------------------------------------------------------------------------
1 | # Remove old chart if its already exists
2 | helm uninstall codewizard-helm-demo
3 | sleep 10
4 |
5 | # Pack the Helm in the desired folder
6 | helm package codewizard-helm-demo
7 |
8 | # install the helm and view the output
9 | helm install codewizard-helm-demo codewizard-helm-demo-0.1.0.tgz
10 |
11 | sleep 10
12 | # verify that the chart installed
13 | kubectl get all -n codewizard
14 |
15 | # Check the response from the chart
16 | kubectl delete pod busybox --force --grace-period=0 2&>/dev/null
17 | kubectl run busybox \
18 | --image=busybox \
19 | --rm \
20 | -it \
21 | --restart=Never \
22 | -- /bin/sh -c "wget -qO- http://codewizard-helm-demo.codewizard.svc.cluster.local"
23 |
--------------------------------------------------------------------------------
/Labs/14-Logging/fluentd-configuration/ConfigMap.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: fluentd-conf
5 | namespace: kube-system
6 | data:
7 | kubernetes.conf: "
8 |
9 | @type stdout
10 |
11 |
12 | @type null
13 |
14 |
15 |
16 | @type file
17 | path /var/log/fluent/docker.log
18 | time_slice_format %Y%m%d
19 | time_slice_wait 10m
20 | time_format %Y%m%dT%H%M%S%z
21 | compress gzip
22 | utc
23 |
24 |
25 |
26 | @type tail
27 | @id in_tail_container_logs
28 | path /var/log/containers/*.log
29 | pos_file /var/log/fluentd-containers.log.pos
30 | tag kubernetes.*
31 | read_from_head true
32 | <% if is_v1 %>
33 |
34 | @type json
35 | time_format %Y-%m-%dT%H:%M:%S.%NZ
36 |
37 | <% else %>
38 | format json
39 | time_format %Y-%m-%dT%H:%M:%S.%NZ
40 | <% end %>
41 |
42 | "
43 |
--------------------------------------------------------------------------------
/Labs/14-Logging/fluentd-configuration/kubernetes.conf:
--------------------------------------------------------------------------------
1 |
2 | @type stdout
3 |
4 |
5 | @type null
6 |
7 |
8 | @type file
9 | path /var/log/fluent/docker.log
10 | time_slice_format %Y%m%d
11 | time_slice_wait 10m
12 | time_format %Y%m%dT%H%M%S%z
13 | compress gzip
14 | utc
15 |
16 |
17 | @type tail
18 | @id in_tail_container_logs
19 | path /var/log/containers/*.log
20 | pos_file /var/log/fluentd-containers.log.pos
21 | tag kubernetes.*
22 | read_from_head true
23 | <% if is_v1 %>
24 |
25 | @type json
26 | time_format %Y-%m-%dT%H:%M:%S.%NZ
27 |
28 | <% else %>
29 | format json
30 | time_format %Y-%m-%dT%H:%M:%S.%NZ
31 | <% end %>
32 |
--------------------------------------------------------------------------------
/Labs/14-Logging/resources/ClusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: fluentd
5 | namespace: kube-system
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - pods
11 | - namespaces
12 | verbs:
13 | - get
14 | - list
15 | - watch
16 |
--------------------------------------------------------------------------------
/Labs/14-Logging/resources/ClusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: fluentd
5 | namespace: kube-system
6 | roleRef:
7 | kind: ClusterRole
8 | name: fluentd
9 | apiGroup: rbac.authorization.k8s.io
10 | subjects:
11 | - kind: ServiceAccount
12 | name: fluentd
13 | namespace: kube-system
14 |
--------------------------------------------------------------------------------
/Labs/14-Logging/resources/DaemonSet.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: fluentd-azureblob
5 | namespace: kube-system
6 | labels:
7 | k8s-app: fluentd-logging
8 | version: v1
9 | spec:
10 | selector:
11 | matchLabels:
12 | k8s-app: fluentd-logging
13 | version: v1
14 | template:
15 | metadata:
16 | labels:
17 | k8s-app: fluentd-logging
18 | version: v1
19 | spec:
20 | serviceAccount: fluentd
21 | serviceAccountName: fluentd
22 | tolerations:
23 | - key: node-role.kubernetes.io/control-plane
24 | effect: NoSchedule
25 | containers:
26 | - name: fluentd-azureblob
27 | image: fluent/fluentd-kubernetes-daemonset:v1-debian-azureblob
28 | imagePullPolicy: Always
29 | env:
30 | - name: AZUREBLOB_ACCOUNT_NAME
31 | value: ""
32 | # Use AZUREBLOB_ACCOUNT_KEY for access key authorization, AZUREBLOB_SAS_TOKEN for shared access signature authorization,
33 | # AZUREBLOB_CONNECTION_STRING to use the full connection string generated in the Azure Portal or neither to use Managed Service Identity.
34 | - name: AZUREBLOB_ACCOUNT_KEY
35 | value: ""
36 | - name: AZUREBLOB_CONNECTION_STRING
37 | value: ""
38 | - name: AZUREBLOB_SAS_TOKEN
39 | value: ""
40 | - name: AZUREBLOB_CONTAINER
41 | value: ""
42 | - name: AZUREBLOB_LOG_PATH
43 | value: ""
44 | resources:
45 | limits:
46 | memory: 200Mi
47 | requests:
48 | cpu: 100m
49 | memory: 200Mi
50 | volumeMounts:
51 | - name: varlog
52 | mountPath: /var/log
53 | - name: varlibdockercontainers
54 | mountPath: /var/lib/docker/containers
55 | readOnly: true
56 | terminationGracePeriodSeconds: 30
57 | volumes:
58 | - name: varlog
59 | hostPath:
60 | path: /var/log
61 | - name: varlibdockercontainers
62 | hostPath:
63 | path: /var/lib/docker/containers
64 |
--------------------------------------------------------------------------------
/Labs/14-Logging/resources/ServiceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: fluentd
5 | namespace: kube-system
6 |
--------------------------------------------------------------------------------
/Labs/14-Logging/resources/kustomization.yaml:
--------------------------------------------------------------------------------
1 | namespace: kube-system
2 |
3 | resources:
4 | - ServiceAccount.yaml
5 | - ClusterRole.yaml
6 | - ClusterRoleBinding.yaml
7 | - DaemonSet.yaml
8 |
--------------------------------------------------------------------------------
/Labs/14-Logging/runMe.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Build the resources folder
4 | kubectl kustomize resources/ > logger.yaml && kubectl delete -f logger.yaml
5 | kubectl kustomize resources/ > logger.yaml && kubectl apply -f logger.yaml
6 |
--------------------------------------------------------------------------------
/Labs/15-Prometheus-Grafana/archive/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node
2 |
3 | WORKDIR /app
4 |
5 | # Copy the content of the application
6 | COPY . .
7 |
8 | # Install requirements
9 | RUN npm install
10 |
11 | # Run the server
12 | CMD node .
--------------------------------------------------------------------------------
/Labs/15-Prometheus-Grafana/archive/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | nodejs-application:
4 | build:
5 | context: ./app
6 | container_name: nodejs-app
7 | image: nodejs-application
8 | ports:
9 | - "5000:5000"
10 | prometheus:
11 | container_name: prometheus-svc
12 | image: prom/prometheus
13 | ports:
14 | - "9091:9090"
15 | command: --config.file=/etc/prometheus/prometheus.yaml
16 | volumes:
17 | - ./prometheus.yaml:/etc/prometheus/prometheus.yaml
18 | grafana:
19 | image: grafana/grafana
20 | ports:
21 | - "3000:3000"
22 | environment:
23 | - GF_AUTH_BASIC_ENABLED=false
24 | - GF_AUTH_ANONYMOUS_ENABLED=true
25 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
26 | grafana-dashboards:
27 | image: alpine
28 | depends_on:
29 | - grafana
30 | volumes:
31 | - ./grafana-data:/grafana
32 | command: >
33 | /bin/sh -c "
34 | apk add --no-cache curl
35 | echo 'waiting for grafana'
36 | sleep 5s
37 | cd /grafana
38 | curl --request POST http://grafana:3000/api/datasources --header 'Content-Type: application/json' -d @datasources.json
39 | curl --request POST http://grafana:3000/api/dashboards/db --header 'Content-Type: application/json' -d @dashboard.json"
40 |
--------------------------------------------------------------------------------
/Labs/15-Prometheus-Grafana/archive/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "prometheus-grafana",
3 | "version": "0.0.1",
4 | "description": "15-Prometheus-grafana K8S demo app",
5 | "main": "server.js",
6 | "author": "Nir Geier nirgeier@gmail.com",
7 | "license": "ISC",
8 | "dependencies": {
9 | "express": "^4.17.1",
10 | "prom-client": "^13.1.0"
11 | }
12 | }
--------------------------------------------------------------------------------
/Labs/15-Prometheus-Grafana/archive/prometheus.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 5s
3 | evaluation_interval: 30s
4 | scrape_configs:
5 | - job_name: k8s-premetheus-grafana-demo
6 | honor_labels: true
7 | static_configs:
8 | - targets: ["app:5000"]
9 |
--------------------------------------------------------------------------------
/Labs/15-Prometheus-Grafana/archive/server.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const
4 | // Express server
5 | express = require('express'),
6 | app = express(),
7 | // Prometheus client for node.js
8 | client = require('prom-client'),
9 |
10 | // Prometheus metric (Counter)- count number of requests
11 | counter = new client.Counter({
12 | name: 'node_request_operations_total',
13 | help: 'The total number of processed requests'
14 | }),
15 | // Prometheus metric (Histogram)- duration of requests in seconds
16 | histogram = new client.Histogram({
17 | name: 'node_request_duration_seconds',
18 | help: 'Histogram for the duration in seconds.',
19 | buckets: [1, 2, 5, 6, 10]
20 | }),
21 |
22 | PORT = 5000,
23 | HOST = '127.0.0.1';
24 |
25 |
26 | // Probe every 5th second.
27 | client.collectDefaultMetrics({ timeout: 5000 });
28 |
29 | app.get('/', (req, res) => {
30 |
31 | // Simulate a sleep
32 | let start = new Date(),
33 | simulateTime = 1000;
34 |
35 | setTimeout(function (argument) {
36 | // execution time simulated with setTimeout function
37 | var end = new Date() - start
38 | histogram.observe(end / 1000); //convert to seconds
39 | }, simulateTime)
40 |
41 | // Increment the counter on every new request
42 | counter.inc();
43 |
44 | // Send reply to the user
45 | res.send('Hello world\n');
46 | });
47 |
48 |
49 | // Metrics endpoint for the collector
50 | app.get('/metrics', (req, res) => {
51 | res.set('Content-Type', client.register.contentType)
52 | res.end(client.register.metrics())
53 | })
54 |
55 | // Start the server
56 | app.listen(PORT, HOST, () => {
57 | console.log(`Server is running on http://${HOST}:${PORT}`);
58 | });
59 |
--------------------------------------------------------------------------------
/Labs/15-Prometheus-Grafana/demo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #set -x
4 |
5 | export CLUSTER_NAME=prometheus-cluster
6 | export PROMETHEUS_NS=prometheus-stack
7 |
8 | # Install kind if not already installed
9 | # eval "$(/opt/homebrew/bin/brew shellenv)"
10 | # brew install kind derailed/k9s/k9s
11 |
12 | ## Add helm charts
13 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
14 | helm repo update
15 |
16 | # Create the demo folder
17 | rm -rf demo
18 | mkdir -p demo
19 | cd demo
20 |
21 | # Create the cluster.yaml
22 | # cat << EOF > cluster.yaml
23 | # ###
24 | # ### Auto Generated file from the script.
25 | # ### Do not edit !!!
26 | # ###
27 | # ###
28 | # apiVersion: kind.x-k8s.io/v1alpha4
29 | # kind: Cluster
30 | # nodes:
31 | # - role: control-plane
32 | # kubeadmConfigPatches:
33 | # - |
34 | # kind: InitConfiguration
35 | # nodeRegistration:
36 | # kubeletExtraArgs:
37 | # #
38 | # # node-labels:
39 | # # only allow the ingress controller to run on a
40 | # # specific node(s) matching the label selector
41 | # #
42 | # node-labels: "ingress-ready=true"
43 | # #
44 | # # extraPortMappings:
45 | # # allow the local host to make requests to the
46 | # # Ingress controller over ports 80/443
47 | # #
48 | # extraPortMappings:
49 | # - containerPort: 80
50 | # hostPort: 8080
51 | # protocol: TCP
52 | # - containerPort: 443
53 | # hostPort: 6443
54 | # protocol: TCP
55 | # - role: worker
56 | # - role: worker
57 | # EOF
58 |
59 | # # Delete old cluster
60 | # kind delete \
61 | # cluster \
62 | # --name $CLUSTER_NAME
63 |
64 | # # Start the cluster
65 | # kind create \
66 | # cluster \
67 | # --name $CLUSTER_NAME \
68 | # --config ./cluster.yaml
69 |
70 | # # Wait for nodes
71 | # kubectl wait node \
72 | # --all \
73 | # --for condition=ready \
74 | # --timeout=600s
75 |
76 | # Verify that the cluster is running
77 | kubectl get nodes -o wide
78 | kind get clusters
79 |
80 | # Insatll prometeus
81 | kubectl delete ns $PROMETHEUS_NS
82 | kubectl create ns $PROMETHEUS_NS
83 |
84 | # Swicth to the new namespace as default namespace
85 | kubectl config \
86 | set-context $(kubectl config current-context) \
87 | --namespace=$PROMETHEUS_NS
88 |
89 | ###
90 | ### Install prometheus
91 | ###
92 |
93 | ### Install prometheus-stack
94 | helm install \
95 | prometheus-stack \
96 | prometheus-community/kube-prometheus-stack
97 |
98 | ### Check the installation
99 | kubectl get \
100 | pods -l "release=prometheus-stack" \
101 | -n $PROMETHEUS_NS
102 |
103 | kubectl wait \
104 | pod -l "release=prometheus-stack" \
105 | --for=condition=ready \
106 | -n $PROMETHEUS_NS
107 |
108 | ## Open the Grafan ports
109 | kubectl port-forward \
110 | svc/$PROMETHEUS_NS-grafana \
111 | -n $PROMETHEUS_NS \
112 | 3000:80 &
113 |
114 | kubectl port-forward \
115 | svc/$PROMETHEUS_NS-kube-prom-prometheus \
116 | -n $PROMETHEUS_NS \
117 | 9090:9090 &
118 |
119 | # Extract the values of the secret
120 | export GRAFANA_USER_NAME=$(kubectl get secret \
121 | prometheus-stack-grafana \
122 | -o jsonpath='{.data.admin-user}' \
123 | | base64 -d)
124 |
125 | export GRAFANA_PASSWORD=$(kubectl get secret \
126 | prometheus-stack-grafana \
127 | -o jsonpath='{.data.admin-password}' \
128 | | base64 -d)
129 |
130 | echo ''
131 | echo ''
132 | echo 'User : ' $GRAFANA_USER_NAME
133 | echo 'Password: ' $GRAFANA_PASSWORD
134 | echo ''
135 | echo ''
136 |
137 |
--------------------------------------------------------------------------------
/Labs/15-Prometheus-Grafana/demo/cluster.yaml:
--------------------------------------------------------------------------------
1 | ###
2 | ### Auto Generated file from the script.
3 | ### Do not edit !!!
4 | ###
5 | ###
6 | apiVersion: kind.x-k8s.io/v1alpha4
7 | kind: Cluster
8 | nodes:
9 | - role: control-plane
10 | kubeadmConfigPatches:
11 | - |
12 | kind: InitConfiguration
13 | nodeRegistration:
14 | kubeletExtraArgs:
15 | #
16 | # node-labels:
17 | # only allow the ingress controller to run on a
18 | # specific node(s) matching the label selector
19 | #
20 | node-labels: "ingress-ready=true"
21 | #
22 | # extraPortMappings:
23 | # allow the local host to make requests to the
24 | # Ingress controller over ports 80/443
25 | #
26 | extraPortMappings:
27 | - containerPort: 80
28 | hostPort: 8080
29 | protocol: TCP
30 | - containerPort: 443
31 | hostPort: 6443
32 | protocol: TCP
33 | - role: worker
34 | - role: worker
35 |
--------------------------------------------------------------------------------
/Labs/15-Prometheus-Grafana/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Step 1: Add Helm Repositories
4 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
5 | helm repo add grafana https://grafana.github.io/helm-charts
6 | helm repo update
7 |
8 | # Step 2: Install Prometheus Stack
9 | helm install prometheus prometheus-community/kube-prometheus-stack --namespace monitoring --create-namespace
10 |
11 | # Step 3: Install Grafana
12 | helm install grafana grafana/grafana --namespace monitoring
13 |
14 | # Step 4: Wait for Prometheus and Grafana to be deployed
15 | echo "Waiting for Prometheus and Grafana to be deployed..."
16 | kubectl rollout status deployment/prometheus-operated -n monitoring
17 | kubectl rollout status deployment/grafana -n monitoring
18 |
19 | # Step 5: Get Grafana Admin Password
20 | GRAFANA_PASSWORD=$(kubectl get secret --namespace monitoring grafana -o jsonpath='{.data.admin-password}' | base64 --decode)
21 |
22 | # Step 6: Port-forward Grafana
23 | echo "Grafana is ready. Port forwarding to localhost:3000..."
24 | kubectl port-forward --namespace monitoring service/grafana 3000:80 &
25 |
26 | # Step 7: Output Grafana credentials
27 | echo "Grafana URL: http://localhost:3000"
28 | echo "Username: admin"
29 | echo "Password: $GRAFANA_PASSWORD"
30 |
31 | # Optional: Print instructions for manual steps
32 | echo "To configure Grafana and Prometheus, please follow the instructions provided in the guide."
33 |
34 | kubectl port-forward --namespace monitoring svc/prometheus-operated 9090:9090 &
35 |
--------------------------------------------------------------------------------
/Labs/16-Affinity-Taint-Tolleration/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | # K8S Hands-on
4 | 
5 |
6 | ---
--------------------------------------------------------------------------------
/Labs/17-PodDisruptionBudgets-PDB/resources/50MB-ram.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: busybox
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | app: busybox
10 | template:
11 | metadata:
12 | labels:
13 | app: busybox
14 | spec:
15 | containers:
16 | - name: busybox
17 | image: busybox
18 | resources:
19 | requests:
20 | memory: "50Mi"
21 | cpu: "250m"
22 | limits:
23 | memory: "128Mi"
24 | cpu: "500m"
25 |
--------------------------------------------------------------------------------
/Labs/17-PodDisruptionBudgets-PDB/resources/Deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | namespace: codewizard
6 | labels:
7 | app: nginx
8 | spec:
9 | replicas: 2
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx
17 | spec:
18 | containers:
19 | - name: nginx
20 | image: nginx
21 | ports:
22 | - containerPort: 80
23 | resources:
24 | limits:
25 | memory: 256Mi
26 | cpu: 500ms
27 |
--------------------------------------------------------------------------------
/Labs/17-PodDisruptionBudgets-PDB/resources/PodDisruptionBudget.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | name: nginx-pdb
5 | spec:
6 | minAvailable: 1 # <--- This will insure that we will have at least 1
7 | selector:
8 | matchLabels:
9 | app: nginx
--------------------------------------------------------------------------------
/Labs/17-PodDisruptionBudgets-PDB/startMinikube.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # For more details about Feature Gates read:
4 | # https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-stages
5 | #
6 | # For more details about eviction-signals
7 | # https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/#eviction-signals
8 |
9 | minikube start \
10 | --extra-config=kubelet.eviction-hard="memory.available<480M" \
11 | --extra-config=kubelet.eviction-pressure-transition-period="30s" \
12 | --extra-config=kubelet.feature-gates="ExperimentalCriticalPodAnnotation=true"
13 |
14 | kubectl describe node minikube | grep MemoryPressure
--------------------------------------------------------------------------------
/Labs/18-ArgoCD/ArgoCD.sh:
--------------------------------------------------------------------------------
1 | # Define the desired namespace
2 | NAMESPACE=argocd
3 |
4 | # start the minikube cluster
5 | # minikube start
6 |
7 | # Download ArgoCD CLI
8 | VERSION=$(curl --silent "https://api.github.com/repos/argoproj/argo-cd/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/')
9 | sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-linux-amd64
10 | sudo chmod +x /usr/local/bin/argocd
11 |
12 | # Deploy ArgoCD - create namespace
13 | kubectl create namespace $NAMESPACE
14 |
15 | # Set the default namesapce
16 | kubectl config set-context --current --namespace=$NAMESPACE
17 |
18 | # Change the argocd-server service type to LoadBalancer:
19 | #kubectl patch svc argocd-server -n $NAMESPACE -p '{"spec": {"type": "NodePort"}}'
20 |
21 | # Set the new desired Deployment
22 | cat << EOF > kustomization.yaml
23 | apiVersion: kustomize.config.k8s.io/v1beta1
24 | kind: Kustomization
25 | namespace: argocd
26 | resources:
27 | - https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
28 |
29 | patchesStrategicMerge:
30 | - patch-replace.yaml
31 | EOF
32 |
33 | # Set the desired patch
34 | cat << EOF > patch-replace.yaml
35 | apiVersion: apps/v1
36 | kind: Deployment
37 | metadata:
38 | name: argocd-server
39 | spec:
40 | selector:
41 | matchLabels:
42 | app.kubernetes.io/name: argocd-server
43 | template:
44 | spec:
45 | containers:
46 | - name: argocd-server
47 | command:
48 | - argocd-server
49 | - --insecure
50 | - --staticassets
51 | - /shared/app
52 | EOF
53 |
54 | kubectl kustomize . | kubectl apply -f -
55 | sleep 30
56 |
57 | echo '---------------------------------------------------------------'
58 | echo 'User : admin'
59 | echo 'Password: ' $(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
60 | echo '---------------------------------------------------------------'
61 |
62 |
63 | kubectl port-forward svc/argocd-server -n argocd 8085:80
64 |
--------------------------------------------------------------------------------
/Labs/18-ArgoCD/install.sh:
--------------------------------------------------------------------------------
1 |
2 |
3 | kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo
4 |
--------------------------------------------------------------------------------
/Labs/18-ArgoCD/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: argocd
4 | resources:
5 | - https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
6 |
7 | patchesStrategicMerge:
8 | - patch-replace.yaml
9 |
--------------------------------------------------------------------------------
/Labs/18-ArgoCD/patch-replace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: argocd-server
5 | spec:
6 | selector:
7 | matchLabels:
8 | app.kubernetes.io/name: argocd-server
9 | template:
10 | spec:
11 | containers:
12 | - name: argocd-server
13 | command:
14 | - argocd-server
15 | - --insecure
16 | - --staticassets
17 | - /shared/app
18 |
--------------------------------------------------------------------------------
/Labs/19-CustomScheduler/codeWizardScheduler.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Author: Nir Geier
3 |
4 | # `set -o pipefail`
5 | # When executing the sequence of commands connected to the pipe,
6 | # as long as any one command returns a non-zero value,
7 | # the entire pipe returns a non-zero value,
8 | # even if the last command returns 0.
9 | #
10 | # In other words, the chain of command will fail if any of the command fail
11 |
12 | set -eo pipefail
13 | # `set -x`
14 | # Shell mode, where all executed commands are printed to the terminal
15 | # Remark if you dont wish to view full log
16 | set -x
17 |
18 | # Start minikube if required
19 | source ../../scripts/startMinikube.sh
20 |
21 | # Deploy our demo pods
22 | kubectl kustomize ./resources | kubectl apply -f -
23 |
24 | # Start the API and listen on port 8081
25 | kubectl proxy --port=8081 &
26 |
27 | # Syntax:
28 | # ${parameter:-word}
29 | # If parameter is unset or null,
30 | # the expansion of word is substituted.
31 | # Otherwise,
32 | # the value of parameter is substituted.
33 |
34 | # You can set those paramters out side of this script
35 | # export CLUSTER_URL=
36 | CLUSTER_URL="${CLUSTER_URL:-127.0.0.1:8081}"
37 | CUSTOM_SCHEDULER="${CUSTOM_SCHEDULER:-codeWizardScheduler}"
38 |
39 | # Scheduler should always run
40 | while true; do
41 | # Get a list of all our pods in pending state
42 | for POD in $(kubectl get pods \
43 | --server ${CLUSTER_URL} \
44 | --output jsonpath='{.items..metadata.name}' \
45 | --field-selector=status.phase==Pending);
46 | do
47 |
48 | # Get the desired schedulerName if the pod has defined any schedulerName
49 | CUSTOM_SCHEDULER_NAME=$(kubectl get pod ${POD} \
50 | --output jsonpath='{.spec.schedulerName}')
51 |
52 | # Check if the desired schedulerName is our custome one
53 | # If its a match this is where our custom scheduler will "jump in"
54 | if [ "${CUSTOM_SCHEDULER_NAME}" == "${CUSTOM_SCHEDULER}" ];
55 | then
56 | # Get the pod namespace
57 | NAMESPACE=$(kubectl get pod ${POD} \
58 | --output jsonpath='{.metadata.namespace}')
59 |
60 | # Get an array for of all the nodes
61 | NODES=($(kubectl get nodes \
62 | --server ${CLUSTER_URL} \
63 | --output jsonpath='{.items..metadata.name}'));
64 |
65 | # Store a number for the length of our NODES array
66 | NODES_LENGTH=${#NODES[@]}
67 |
68 | # Randomly select a node from the array
69 | # $RANDOM % $NODES_LENGTH will be the remainder
70 | # of a random number divided by the length of our nodes
71 | # In the case of 1 node this is always ${NODES[0]}
72 | NODE=${NODES[$[$RANDOM % $NODES_LENGTH]]}
73 |
74 | # Bind the current pod to the node selected above
75 | # The "binding" is done using API call to pods/.../binding
76 | curl \
77 | --request POST \
78 | --silent \
79 | --fail \
80 | --header "Content-Type:application/json" \
81 | --data '{"apiVersion":"v1",
82 | "kind": "Binding",
83 | "metadata": {
84 | "name": "'${POD}'"
85 | },
86 | "target": {
87 | "apiVersion": "v1",
88 | "kind": "Node",
89 | "name": "'${NODE}'"
90 | }
91 | }' \
92 | http://${CLUSTER_URL}/api/v1/namespaces/${NAMESPACE}/pods/${POD}/binding/ >/dev/null \
93 | && echo "${POD} was assigned to ${NODE}" \
94 | || echo "Failed to assign ${POD} to ${NODE}"
95 | fi
96 | done
97 | # Current scheduling done, sleep and wake up for the next iteration
98 | echo "Scheduler ig going to sleep"
99 |
100 | sleep 15s
101 | done
--------------------------------------------------------------------------------
/Labs/19-CustomScheduler/resources/Deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | app: nginx
10 | template:
11 | metadata:
12 | labels:
13 | app: nginx
14 | spec:
15 | # This is the import part of this file.
16 | # Here we define our custom scheduler
17 | schedulerName: codeWizardScheduler
18 | containers:
19 | - name: nginx
20 | image: nginx
21 | resources:
22 | limits:
23 | memory: "64Mi"
24 | cpu: "250m"
25 | ports:
26 | - containerPort: 80
--------------------------------------------------------------------------------
/Labs/19-CustomScheduler/resources/Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: codewizard
--------------------------------------------------------------------------------
/Labs/19-CustomScheduler/resources/_KubeSchedulerConfiguration.yaml:
--------------------------------------------------------------------------------
1 | ###
2 | # Sample KubeSchedulerConfiguration
3 | ###
4 | #
5 | # You can configure `kube-scheduler` to run more than one profile.
6 | # Each profile has an associated scheduler name and can have a different
7 | # set of plugins configured in its extension points.
8 |
9 | # With the following sample configuration,
10 | # the scheduler will run with two profiles:
11 | # - default plugins
12 | # - all scoring plugins disabled.
13 |
14 | apiVersion: kubescheduler.config.k8s.io/v1beta1
15 | kind: KubeSchedulerConfiguration
16 | profiles:
17 | - schedulerName: default-scheduler
18 | - schedulerName: no-scoring-scheduler
19 | plugins:
20 | preScore:
21 | disabled:
22 | - name: '*'
23 | score:
24 | disabled:
25 | - name: '*'
--------------------------------------------------------------------------------
/Labs/19-CustomScheduler/resources/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | # Set the default namespace for all the resources
4 | namespace: codewizard
5 |
6 | resources:
7 | - Namespace.yaml
8 | - Deployment.yaml
9 |
--------------------------------------------------------------------------------
/Labs/20-CronJob/K8S/ClusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | # This name will be used in `subjects.name`
5 | name: cronjob-rbac
6 | # This name will be used in `subjects.namespace`
7 | namespace: test
8 | subjects:
9 | - kind: ServiceAccount
10 | # Same value as upper's `metadata.name`
11 | name: default
12 | # Same value as upper's `metadata.namespace`
13 | namespace: test
14 | roleRef:
15 | kind: ClusterRole
16 | name: cluster-admin
17 | apiGroup: rbac.authorization.k8s.io
--------------------------------------------------------------------------------
/Labs/20-CronJob/K8S/CronJob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: cron-test
5 | namespace: test
6 | spec:
7 | schedule: '*/1 * * * *'
8 | jobTemplate:
9 | spec:
10 | template:
11 | spec:
12 | containers:
13 | - command:
14 | - /bin/sh
15 | - '-c'
16 | - >-
17 | apk update \
18 | && apk add curl \
19 | && curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \
20 | && chmod +x ./kubectl \
21 | && ./kubectl get pods -A
22 | image: alpine
23 | imagePullPolicy: IfNotPresent
24 | name: alpine-cronjob
25 | dnsPolicy: ClusterFirst
26 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/Labs/20-CronJob/K8S/Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: codewizard
--------------------------------------------------------------------------------
/Labs/20-CronJob/K8S/ServiceAccount.yaml:
--------------------------------------------------------------------------------
1 | # NOTE:
2 | # The service account `default:default` already exists in k8s cluster when the cluster is created.
3 | # In this sample we will create a default user under our namespace
4 | apiVersion: v1
5 | kind: ServiceAccount
6 | metadata:
7 | name: default
8 | namespace: test
9 |
--------------------------------------------------------------------------------
/Labs/20-CronJob/K8S/kustomization.yaml:
--------------------------------------------------------------------------------
1 | namespace: codewizard
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | - ClusterRoleBinding.yaml
6 | - CronJob.yaml
7 | - Namespace.yaml
8 | - ServiceAccount.yaml
9 |
--------------------------------------------------------------------------------
/Labs/20-CronJob/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | # K8S Hands-on
4 | 
5 |
6 | ---
7 |
8 | # Writing custom Scheduler
9 |
10 | ### Pre-Requirements
11 | - K8S cluster - Setting up minikube cluster instruction
12 |
13 | [](https://console.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https://github.com/nirgeier/KubernetesLabs)
14 | **CTRL + click to open in new window**
15 |
16 | ---
17 |
18 | # CronJobs
19 |
--------------------------------------------------------------------------------
/Labs/21-Auditing/demo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Debug mode
4 | set -x
5 |
6 | # Stop minikube if its running and delet prevoiud data
7 | minikube stop
8 |
9 | # Set the minikube home directory
10 | export MINIKUBE_HOME=~/.minikube
11 |
12 | # The AuditPolicy file
13 | AUDIT_POLICY_FILE=$MINIKUBE_HOME/files/etc/ssl/certs/Audit-Policy.yaml
14 |
15 | # Create the desired folder(s)
16 | mkdir -p resources
17 | mkdir -p logs
18 |
19 | # Check to see if we have a pre defined Audit Policy file
20 | if [[ ! -f $AUDIT_POLICY_FILE ]];
21 | then
22 | # Create the Policy file if its not exist
23 | cat < $AUDIT_POLICY_FILE
24 | # Log all requests at the Metadata level.
25 | apiVersion: audit.k8s.io/v1
26 | kind: Policy
27 | rules:
28 | - level: Metadata
29 | EOF
30 | fi;
31 |
32 | # Start minikube with the AuditPolicy
33 | minikube start \
34 | --extra-config=apiserver.audit-policy-file=$AUDIT_POLICY_FILE \
35 | --extra-config=apiserver.audit-log-path=${PWD}/logs/audit.log \
36 | --extra-config=kubelet.cgroup-driver=systemd \
37 | --alsologtostderr \
38 | -v=8
39 |
40 | # Test the audit policy
41 | kubectl create ns TestAudit
42 |
43 | # Print out the Audit log
44 | kubectl logs kube-apiserver-minikube -n kube-system | grep audit.k8s.io/v1
--------------------------------------------------------------------------------
/Labs/21-Auditing/resources/Audit-Policy.yaml:
--------------------------------------------------------------------------------
1 | # Log all requests at the Metadata level....
2 | apiVersion: audit.k8s.io/v1
3 | kind: Policy
4 | rules:
5 | - level: Metadata
6 |
--------------------------------------------------------------------------------
/Labs/21-KubeAPI/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine
2 |
3 | # Update and install dependencies
4 | RUN apk add --update nodejs npm curl
5 |
6 | # Copy the endpoint script
7 | COPY api_query.sh .
8 |
9 | # Set the execution bit
10 | RUN chmod +x api_query.sh .
--------------------------------------------------------------------------------
/Labs/21-KubeAPI/README.md:
--------------------------------------------------------------------------------
1 | ## 
2 |
3 | # K8S Hands-on
4 |
5 | 
6 |
7 | ### Verify pre-requirements
8 |
9 | - **`kubectl`** - short for Kubernetes Controller - is the CLI for Kubernetes cluster and is required in order to be able to run the labs.
10 | - In order to install `kubectl` and if required creating a local cluster, please refer to [Kubernetes - Install Tools](https://kubernetes.io/docs/tasks/tools/)
11 |
12 |
13 |
14 | ---
15 | ## Lab Highlights:
16 | - [01. Build the docker image](#01-Build-the-docker-image)
17 | - [01.01. The script which will be used for query K8S API](#0101-The-script-which-will-be-used-for-query-K8S-API)
18 | - [01.02. Build the docker image](#0102-Build-the-docker-image)
19 | - [02. Deploy the Pod to K8S](#02-Deploy-the-Pod-to-K8S)
20 | - [02.01. Run kustomization to deploy](#0201-Run-kustomization-to-deploy)
21 | - [02.02. Query the K8S API](#0202-Query-the-K8S-API)
22 |
23 | ---
24 |
25 |
26 |
27 | ### 01. Build the docker image
28 |
29 | - In order to demonstrate the APi query we will build a custom docker image.
30 | - You can use the pre-build image and skip this step
31 |
32 | ### 01.01. The script which will be used for query K8S API
33 |
34 | - In order to be able to access K8S api from within a pod we will be using the following script:
35 | - `api_query.sh`
36 |
37 | ```sh
38 | #!/bin/sh
39 |
40 | #################################
41 | ## Access the internal K8S API ##
42 | #################################
43 | # Point to the internal API server hostname
44 | API_SERVER_URL=https://kubernetes.default.svc
45 |
46 | # Path to ServiceAccount token
47 | # The service account is mapped by the K8S Api server in the pods
48 | SERVICE_ACCOUNT_FOLDER=/var/run/secrets/kubernetes.io/serviceaccount
49 |
50 | # Read this Pod's namespace if required
51 | # NAMESPACE=$(cat ${SERVICE_ACCOUNT_FOLDER}/namespace)
52 |
53 | # Read the ServiceAccount bearer token
54 | TOKEN=$(cat ${SERVICE_ACCOUNT_FOLDER}/token)
55 |
56 | # Reference the internal certificate authority (CA)
57 | CACERT=${SERVICE_ACCOUNT_FOLDER}/ca.crt
58 |
59 | # Explore the API with TOKEN and the Certificate
60 | curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X GET ${API_SERVER_URL}/api
61 | ```
62 |
63 | ### 01.02. Build the docker image
64 |
65 | - For the pod image we will use the following Dockerfile
66 | - `Dockerfile`
67 |
68 | ```Dockerfile
69 | FROM alpine
70 |
71 | # Update and install dependencies
72 | RUN apk add --update nodejs npm curl
73 |
74 | # Copy the endpoint script
75 | COPY api_query.sh .
76 |
77 | # Set the execution bit
78 | RUN chmod +x api_query.sh .
79 | ```
80 |
81 | ### 02. Deploy the Pod to K8S
82 |
83 | - Once the image is ready we can deploy the image as pod to the cluster
84 | - The required resources are under the k8s folder
85 |
86 | ### 02.01. Run kustomization to deploy
87 |
88 | - Deploy to the cluster
89 |
90 | ```sh
91 | # Remove old content if any
92 | kubectl kustomize k8s | kubectl delete -f -
93 |
94 | # Deploy the content
95 | kubectl kustomize k8s | kubectl apply -f -
96 | ```
97 |
98 | ### 02.02. Query the K8S API
99 |
100 | - Run the following script to verify that the connection to the API is working
101 |
102 | ```sh
103 | # Get the deployment pod name
104 | POD_NAME=$(kubectl get pod -A -l app=monitor-app -o jsonpath="{.items[0].metadata.name}")
105 |
106 | # Print out the logs to verify that the pods is connected to the API
107 | kubectl exec -it -n codewizard $POD_NAME sh ./api_query.sh
108 | ```
109 |
110 |
111 |
112 | ---
113 |
114 |
119 |
120 | ---
121 |
122 |
123 | ©CodeWizard LTD
124 |
125 |
126 | 
127 |
128 |
--------------------------------------------------------------------------------
/Labs/21-KubeAPI/api_query.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | #################################
4 | ## Access the internal K8S API ##
5 | #################################
6 | # Point to the internal API server hostname
7 | APISERVER=https://kubernetes.default.svc
8 |
9 | # Path to ServiceAccount token
10 | # The service account is mapped by the K8S Api server in the pods
11 | SERVICE_ACCOUNT_FOLDER=/var/run/secrets/kubernetes.io/serviceaccount
12 |
13 | # Read this Pod's namespace if required
14 | # NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace)
15 |
16 | # Read the ServiceAccount bearer token
17 | TOKEN=$(cat ${SERVICE_ACCOUNT_FOLDER}/token)
18 |
19 | # Reference the internal certificate authority (CA)
20 | CACERT=${SERVICE_ACCOUNT_FOLDER}/ca.crt
21 |
22 | # Explore the API with TOKEN
23 | curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X GET ${APISERVER}/api
--------------------------------------------------------------------------------
/Labs/21-KubeAPI/k8s/Deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: monitor-app
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: monitor-app
9 | template:
10 | metadata:
11 | labels:
12 | app: monitor-app
13 | spec:
14 | containers:
15 | - name: monitor-app
16 | image: nirgeier/monitor-app
17 | args:
18 | - sleep
19 | - "86400"
20 | resources:
21 | limits:
22 | memory: "128Mi"
23 | cpu: "500m"
24 | ports:
25 | - containerPort: 8080
26 |
--------------------------------------------------------------------------------
/Labs/21-KubeAPI/k8s/Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: codewizard
--------------------------------------------------------------------------------
/Labs/21-KubeAPI/k8s/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: codewizard
5 |
6 | commonLabels:
7 | app: monitor-app
8 |
9 | resources:
10 | - Namespace.yaml
11 | - Deployment.yaml
--------------------------------------------------------------------------------
/Labs/21-KubeAPI/script.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x
2 |
3 | # Build
4 | docker build -t nirgeier/monitor-app .
5 |
6 | # push image to docker hub
7 | docker push nirgeier/monitor-app
8 |
9 | # Deploy the pod to the cluster
10 | kubectl kustomize k8s | kubectl delete -f -
11 | kubectl kustomize k8s | kubectl apply -f -
12 |
13 | # Get the deployment pod name
14 | POD_NAME=$(kubectl get pod -A -l app=monitor-app -o jsonpath="{.items[0].metadata.name}")
15 |
16 | # Print out the logs to verify that the pods is conneted to the API
17 | kubectl exec -it -n codewizard $POD_NAME sh ./api_query.sh
--------------------------------------------------------------------------------
/Labs/22-Rancher/01-pre-requirements.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ### Install k3 & other tools on MacOS
4 | # brew install k3d kubectl helm
5 |
6 | # In case you are not using mac -
7 | curl -sfL https://get.k3s.io | sh -
8 |
9 | # Install cmctl
10 | # cmctl is a CLI tool that can help you to manage cert-manager resources inside your cluster.
11 | # https://cert-manager.io/docs/usage/cmctl/
12 | OS=$(go env GOOS);
13 | ARCH=$(go env GOARCH);
14 |
15 | ## create forlder for the installation
16 | mkdir -p cmctl
17 | cd cmctl
18 | ## Download cmctl
19 | ### -> cmctl is a CLI tool that can help you to manage cert-manager resources inside your cluster.
20 | curl -sSL -o cmctl.tar.gz https://github.com/cert-manager/cert-manager/releases/download/v1.8.0/cmctl-$OS-$ARCH.tar.gz
21 | # Extract the xzip file
22 | tar xzf cmctl.tar.gz
23 | # Add it to the path
24 | sudo mv cmctl /usr/local/bin
25 |
26 | # Delete the installtion fodler
27 | cd ..
28 | rm -rf cmctl
29 |
30 | ### Install k3s - Will be used later on for Rancher
31 | wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
32 |
33 | ### Add the required helm charts
34 | helm repo add rancher https://releases.rancher.com/server-charts/latest
35 | helm repo add jetstack https://charts.jetstack.io
36 |
37 | # Update the charts repository
38 | helm repo update
39 |
--------------------------------------------------------------------------------
/Labs/22-Rancher/02-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -x
4 | ### Use this (( for tee the output to a file))
5 | ((
6 |
7 | ### Variables
8 | RANCHER_HOST=rancher.k3d.localhost
9 | CLUSTER_NAME=rancher-cluster
10 | CERT_MANAGER_RELEASE=v1.8.0
11 | API_PORT=6555
12 | SSL_PORT=6443
13 |
14 | ### Clear prevoius content
15 | # docker stop $(docker ps -aq)
16 | # docker rm $(docker ps -aq)
17 |
18 | ### Remove all docker leftovers (containers, network etc)
19 | docker system prune -f
20 |
21 | kubectl delete namespace cattle-system
22 | kubectl delete namespace cert-manager
23 |
24 | ### Remove old cluster in case there are some leftovers
25 | k3d cluster \
26 | delete \
27 | $CLUSTER_NAME
28 |
29 | ### Create a k3d cluster. Use the loadbalancer provided by k3d
30 | k3d cluster \
31 | create \
32 | --wait \
33 | $CLUSTER_NAME \
34 | --servers 1 \
35 | --agents 3 \
36 | --api-port $API_PORT \
37 | --kubeconfig-switch-context \
38 | --port $SSL_PORT:443@loadbalancer
39 | # --k3s-arg "--disable=traefik@server:*" \
40 | # --k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' \
41 | # --k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*' \
42 | # --k3s-arg '--kube-apiserver-arg=feature-gates=EphemeralContainers=false@server:*'
43 |
44 | ### Verify the installation
45 | kubectl cluster-info
46 | k3d cluster list
47 |
48 | ### Add the k3s to the kubeconfig
49 | k3d kubeconfig merge \
50 | $CLUSTER_NAME \
51 | --kubeconfig-switch-context
52 |
53 | ### Create the namespace(s) for Rancher & cert-manager
54 | #kubectl create namespace cattle-system
55 | #kubectl create namespace cert-manager
56 |
57 | ### Install Cert-manager
58 | helm install \
59 | --wait \
60 | --create-namespace \
61 | --set installCRDs=true \
62 | --namespace cert-manager \
63 | --set prometheus.enabled=true \
64 | --version $CERT_MANAGER_RELEASE \
65 | cert-manager jetstack/cert-manager
66 |
67 | ### Verify cert-manager installation
68 | kubectl rollout \
69 | status \
70 | deploy/cert-manager \
71 | --namespace cert-manager
72 |
73 | ### Install racnher
74 | helm install \
75 | --wait \
76 | --create-namespace \
77 | rancher rancher/rancher \
78 | --namespace cattle-system \
79 | --set hostname=$RANCHER_HOST
80 |
81 | ### Verify rancher installation
82 | kubectl rollout status \
83 | deploy/racnher \
84 | -n cattle-system
85 |
86 | ### Check that the cert-manager API is ready
87 | ### We expect to see the foloowing message: 'The cert-manager API is ready'
88 | cmctl check api
89 |
90 | ### Open broswer in: https://rancher.k3d.localhost
91 | ######
92 | ###### Important, once on this page type; thisisunsafe
93 | ######
94 |
95 | ### Get the rancher password
96 | kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{"\n"}}'
97 |
98 | ### Verify the cluster nodes
99 | kubectl get nodes
100 |
101 | ### Get the pods status in the background
102 | kubectl get pods -A --watch &
103 |
104 | ) 2>&1 ) | tee install.txt
105 |
--------------------------------------------------------------------------------
/Labs/22-Rancher/03-rancher-airgap.sh:
--------------------------------------------------------------------------------
1 |
2 | ###
3 | ### rke2 relase
4 | RKE2_RELEASE="https://github.com/rancher/rke2/releases/download/v1.30.3-rc4%2Brke2r1"
5 |
6 |
7 | # # Setup network
8 | # ip link add dummy0 type dummy
9 | # ip link set dummy0 up
10 | # ip addr add 203.0.113.254/31 dev dummy0
11 | # ip route add default via 203.0.113.255 dev dummy0 metric 1000
12 |
13 | # Install helm
14 | wget https://get.helm.sh/helm-v3.15.3-linux-amd64.tar.gz
15 | tar -zxvf helm-v3.15.3-linux-amd64.tar.gz
16 | mv linux-amd64/helm /usr/local/bin/helm
17 |
18 | # Download load rke2 binary
19 | wget $RKE2_RELEASE/rke2.linux-amd64
20 | chmod +x rke2.linux-amd64
21 | mv rke2.linux-amd64 /usr/local/bin/rke2
22 |
23 | # Get kubectl
24 | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
25 | chmod +x kubectl
26 | mv kubectl /usr/local/bin/kubectl
27 |
28 | # Download load rke2 installation files
29 | mkdir ~/rke2-artifacts && cd ~/rke2-artifacts/
30 | curl -OLs $RKE2_RELEASE/rke2-images.linux-amd64.tar.zst
31 | curl -OLs $RKE2_RELEASE/rke2.linux-amd64.tar.gz
32 | curl -OLs $RKE2_RELEASE/sha256sum-amd64.txt
33 | curl -sfL https://get.rke2.io --output install.sh
34 |
35 | INSTALL_RKE2_ARTIFACT_PATH=~/rke2-artifacts sh install.sh
36 | # systemctl enable rke2-server.service
37 | # systemctl start rke2-server.service
38 | rke2 server
39 |
40 | # Set the kubeconfig
41 | mkdir -p ~/.kube
42 | ln -s /etc/rancher/rke2/rke2.yaml ~/.kube/config
43 |
44 | # Install k9s
45 | wget https://github.com/derailed/k9s/releases/download/v0.32.5/k9s_Linux_amd64.tar.gz
46 | gunzip k9s_Linux_amd64.tar.gz
47 | tar -xvf k9s_Linux_amd64.tar
48 | chmod +x k9s
49 | mv k9s /usr/local/bin/k9s
50 |
51 | # Set the kubeconfig
52 | mkdir -p ~/.kube/
53 | cp /etc/rancher/rke2/rke2.yaml ~/.kube/config
54 |
55 | # Check the server status
56 | kubectl get pods -A
57 |
58 | # Add the rancher helm repository
59 | helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
60 |
61 | # Download the helm
62 | helm fetch rancher-latest/rancher
63 | helm repo add jetstack https://charts.jetstack.io
64 | helm repo update
65 | helm fetch jetstack/cert-manager
66 |
67 | curl -L -o cert-manager-crd.yaml https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.crds.yaml
68 | kubectl create ns cert-manager
69 | kubectl apply -n cert-manager -f cert-manager-crd.yaml
70 | helm --debug install cert-manager --create-namespace -n cert-manager cert-manager-v1.15.2.tgz
71 |
72 |
73 |
74 | # Setuup the registry for rancher
75 | # cat << "EOF" > /etc/rancher/rke2/registries.yaml
76 | # mirrors:
77 | # docker.io:
78 | # endpoint:
79 | # - "https://globalrepo.pe.jfrog.io/remote-docker-hub"
80 | # EOF
81 |
82 | # docker pull quay.io/jetstack/cert-manager-ctl
83 | # docker pull quay.io/jetstack/cert-manager-acmesolver
84 | # docker pull quay.io/jetstack/cert-manager-cainjector
85 | # docker pull quay.io/jetstack/cert-manager-webhook
86 |
87 | # helm repo add jetstack https://charts.jetstack.io
88 |
89 |
--------------------------------------------------------------------------------
/Labs/23-MetricServer/kubelet-config-1.23.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: kubelet-config-1.23
5 | namespace: kube-system
6 | data:
7 | kubelet: |
8 | apiVersion: kubelet.config.k8s.io/v1beta1
9 | authentication:
10 | anonymous:
11 | enabled: false
12 | webhook:
13 | cacheTTL: 0s
14 | enabled: true
15 | x509:
16 | clientCAFile: /var/lib/minikube/certs/ca.crt
17 | authorization:
18 | mode: Webhook
19 | webhook:
20 | cacheAuthorizedTTL: 0s
21 | cacheUnauthorizedTTL: 0s
22 | cgroupDriver: systemd
23 | clusterDNS:
24 | - 10.96.0.10
25 | clusterDomain: cluster.local
26 | cpuManagerReconcilePeriod: 0s
27 | evictionHard:
28 | imagefs.available: 0%
29 | nodefs.available: 0%
30 | nodefs.inodesFree: 0%
31 | evictionPressureTransitionPeriod: 0s
32 | failSwapOn: false
33 | fileCheckFrequency: 0s
34 | healthzBindAddress: 127.0.0.1
35 | healthzPort: 10248
36 | httpCheckFrequency: 0s
37 | imageGCHighThresholdPercent: 100
38 | imageMinimumGCAge: 0s
39 | serverTLSBootstrap: true
40 | kind: KubeletConfiguration
41 | logging:
42 | flushFrequency: 0
43 | options:
44 | json:
45 | infoBufferSize: "0"
46 | verbosity: 0
47 | memorySwap: {}
48 | nodeStatusReportFrequency: 0s
49 | nodeStatusUpdateFrequency: 0s
50 | rotateCertificates: true
51 | runtimeRequestTimeout: 0s
52 | shutdownGracePeriod: 0s
53 | shutdownGracePeriodCriticalPods: 0s
54 | staticPodPath: /etc/kubernetes/manifests
55 | streamingConnectionIdleTimeout: 0s
56 | syncFrequency: 0s
57 | volumeStatsAggPeriod: 0s
58 |
--------------------------------------------------------------------------------
/Labs/23-MetricServer/runMe.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Start minikube
4 | minikube start
5 |
6 | # Download the metric-server resources
7 | wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
8 |
9 | # Apply metric-server
10 | # We know that it will not work under minikube so we will need to fix it
11 | kubectl apply -f components.yaml
12 |
13 | # Check if the metrics-server is working
14 | # We expect to get the following error
15 | ### >>> Error from server (ServiceUnavailable): the server is currently unable to handle the request (get nodes.metrics.k8s.io)
16 | kubectl top nodes
17 |
18 | kubectl get deployment metrics-server -n kube-system
19 | # NAME READY UP-TO-DATE AVAILABLE AGE
20 | # metrics-server 0/1 1 0 71s
21 |
22 | # View the error
23 | ## We should see error like this:
24 | ## "Failed to scrape node" err="Get \"https://192.168.49.2:10250/metrics/resource\":
25 | ## x509: cannot validate certificate for 192.168.49.2 because it doesn't contain any IP SANs" node="minikube"
26 | kubectl logs -n kube-system deploy/metrics-server
27 |
28 | ###
29 | ### Fixing the error
30 | ###
31 | # We need to fix the tls before we can install the mertric-server
32 |
33 | # Get the kubelet configuration
34 | KUBELET_CONFIG=$(kubectl get configmap -n kube-system --no-headers -o custom-columns=":metadata.name" | grep kubelet-config)
35 | kubectl edit configmap $KUBELET_CONFIG -n kube-system
36 |
37 | ## Add to the following configuration under the `kubelet` ConfigMap
38 | serverTLSBootstrap: true
39 |
40 | # We also need to fix the metric server and add the following line under the metric-server Deploymet
41 |
42 | # Edit the deploymnet and add the required lines under the spec
43 | ###
44 | ### vi components.yaml (~line 140)
45 | ###
46 | ### spec:
47 | ### containers:
48 | ### - args
49 | - --kubelet-insecure-tls
50 |
51 | # Stop and start minikube
52 | minikube stop && minikube start
53 |
54 | # Uninstall and re-install the metrics-server
55 | kubectl delete -f components.yaml
56 | kubectl apply -f components.yaml
57 |
58 | # Verify that now the metric server is working
59 | kubectl top nodes
60 | kubectl top pods -A
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Binaries for programs and plugins
3 | *.exe
4 | *.exe~
5 | *.dll
6 | *.so
7 | *.dylib
8 | bin
9 |
10 | # editor and IDE paraphernalia
11 | .idea
12 | *.swp
13 | *.swo
14 | *~
15 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build the manager binary
2 | FROM quay.io/operator-framework/helm-operator:v1.23.0
3 |
4 | ENV HOME=/opt/helm
5 | COPY watches.yaml ${HOME}/watches.yaml
6 | COPY helm-charts ${HOME}/helm-charts
7 | WORKDIR ${HOME}
8 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/PROJECT:
--------------------------------------------------------------------------------
1 | domain: codewizard.co.il
2 | layout:
3 | - helm.sdk.operatorframework.io/v1
4 | plugins:
5 | manifests.sdk.operatorframework.io/v2: {}
6 | scorecard.sdk.operatorframework.io/v2: {}
7 | projectName: nginx-operator
8 | resources:
9 | - api:
10 | crdVersion: v1
11 | namespaced: true
12 | domain: codewizard.co.il
13 | group: demo
14 | kind: Nginx
15 | version: v1alpha1
16 | version: "3"
17 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/crd/bases/demo.codewizard.co.il_nginxes.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: nginxes.demo.codewizard.co.il
6 | spec:
7 | group: demo.codewizard.co.il
8 | names:
9 | kind: Nginx
10 | listKind: NginxList
11 | plural: nginxes
12 | singular: nginx
13 | scope: Namespaced
14 | versions:
15 | - name: v1alpha1
16 | schema:
17 | openAPIV3Schema:
18 | description: Nginx is the Schema for the nginxes API
19 | properties:
20 | apiVersion:
21 | description: 'APIVersion defines the versioned schema of this representation
22 | of an object. Servers should convert recognized schemas to the latest
23 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
24 | type: string
25 | kind:
26 | description: 'Kind is a string value representing the REST resource this
27 | object represents. Servers may infer this from the endpoint the client
28 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
29 | type: string
30 | metadata:
31 | type: object
32 | spec:
33 | description: Spec defines the desired state of Nginx
34 | type: object
35 | x-kubernetes-preserve-unknown-fields: true
36 | status:
37 | description: Status defines the observed state of Nginx
38 | type: object
39 | x-kubernetes-preserve-unknown-fields: true
40 | type: object
41 | served: true
42 | storage: true
43 | subresources:
44 | status: {}
45 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/crd/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # This kustomization.yaml is not intended to be run by itself,
2 | # since it depends on service name and namespace that are out of this kustomize package.
3 | # It should be run by config/default
4 | resources:
5 | - bases/demo.codewizard.co.il_nginxes.yaml
6 | #+kubebuilder:scaffold:crdkustomizeresource
7 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # Adds namespace to all resources.
2 | namespace: nginx-operator-system
3 |
4 | # Value of this field is prepended to the
5 | # names of all resources, e.g. a deployment named
6 | # "wordpress" becomes "alices-wordpress".
7 | # Note that it should also match with the prefix (text before '-') of the namespace
8 | # field above.
9 | namePrefix: nginx-operator-
10 |
11 | # Labels to add to all resources and selectors.
12 | #labels:
13 | #- includeSelectors: true
14 | # pairs:
15 | # someName: someValue
16 |
17 | resources:
18 | - ../crd
19 | - ../rbac
20 | - ../manager
21 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
22 | #- ../prometheus
23 |
24 | patchesStrategicMerge:
25 | # Protect the /metrics endpoint by putting it behind auth.
26 | # If you want your controller-manager to expose the /metrics
27 | # endpoint w/o any authn/z, please comment the following line.
28 | - manager_auth_proxy_patch.yaml
29 |
30 | # Mount the controller config file for loading manager configurations
31 | # through a ComponentConfig type
32 | #- manager_config_patch.yaml
33 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/default/manager_auth_proxy_patch.yaml:
--------------------------------------------------------------------------------
1 | # This patch inject a sidecar container which is a HTTP proxy for the
2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
3 | apiVersion: apps/v1
4 | kind: Deployment
5 | metadata:
6 | name: controller-manager
7 | namespace: system
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: kube-rbac-proxy
13 | securityContext:
14 | allowPrivilegeEscalation: false
15 | capabilities:
16 | drop:
17 | - "ALL"
18 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
19 | args:
20 | - "--secure-listen-address=0.0.0.0:8443"
21 | - "--upstream=http://127.0.0.1:8080/"
22 | - "--logtostderr=true"
23 | - "--v=0"
24 | ports:
25 | - containerPort: 8443
26 | protocol: TCP
27 | name: https
28 | resources:
29 | limits:
30 | cpu: 500m
31 | memory: 128Mi
32 | requests:
33 | cpu: 5m
34 | memory: 64Mi
35 | - name: manager
36 | args:
37 | - "--health-probe-bind-address=:8081"
38 | - "--metrics-bind-address=127.0.0.1:8080"
39 | - "--leader-elect"
40 | - "--leader-election-id=nginx-operator"
41 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/default/manager_config_patch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: controller-manager
5 | namespace: system
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: manager
11 | args:
12 | - "--config=controller_manager_config.yaml"
13 | volumeMounts:
14 | - name: manager-config
15 | mountPath: /controller_manager_config.yaml
16 | subPath: controller_manager_config.yaml
17 | volumes:
18 | - name: manager-config
19 | configMap:
20 | name: manager-config
21 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/manager/controller_manager_config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
2 | kind: ControllerManagerConfig
3 | health:
4 | healthProbeBindAddress: :8081
5 | metrics:
6 | bindAddress: 127.0.0.1:8080
7 |
8 | leaderElection:
9 | leaderElect: true
10 | resourceName: 811c9dc5.codewizard.co.il
11 | # leaderElectionReleaseOnCancel defines if the leader should step down volume
12 | # when the Manager ends. This requires the binary to immediately end when the
13 | # Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
14 | # speeds up voluntary leader transitions as the new leader don't have to wait
15 | # LeaseDuration time first.
16 | # In the default scaffold provided, the program ends immediately after
17 | # the manager stops, so would be fine to enable this option. However,
18 | # if you are doing or is intended to do any operation such as perform cleanups
19 | # after the manager stops then its usage might be unsafe.
20 | # leaderElectionReleaseOnCancel: true
21 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/manager/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - manager.yaml
3 |
4 | generatorOptions:
5 | disableNameSuffixHash: true
6 |
7 | configMapGenerator:
8 | - files:
9 | - controller_manager_config.yaml
10 | name: manager-config
11 | apiVersion: kustomize.config.k8s.io/v1beta1
12 | kind: Kustomization
13 | images:
14 | - name: controller
15 | newName: nirgeier/helm_operator
16 | newTag: latest
17 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/manager/manager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | control-plane: controller-manager
6 | name: system
7 | ---
8 | apiVersion: apps/v1
9 | kind: Deployment
10 | metadata:
11 | name: controller-manager
12 | namespace: system
13 | labels:
14 | control-plane: controller-manager
15 | spec:
16 | selector:
17 | matchLabels:
18 | control-plane: controller-manager
19 | replicas: 1
20 | template:
21 | metadata:
22 | annotations:
23 | kubectl.kubernetes.io/default-container: manager
24 | labels:
25 | control-plane: controller-manager
26 | spec:
27 | securityContext:
28 | runAsNonRoot: true
29 | # TODO(user): For common cases that do not require escalating privileges
30 | # it is recommended to ensure that all your Pods/Containers are restrictive.
31 | # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
32 | # Please uncomment the following code if your project does NOT have to work on old Kubernetes
33 | # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
34 | # seccompProfile:
35 | # type: RuntimeDefault
36 | containers:
37 | - args:
38 | - --leader-elect
39 | - --leader-election-id=nginx-operator
40 | image: nirgeier:helm_operator
41 | name: manager
42 | securityContext:
43 | allowPrivilegeEscalation: false
44 | capabilities:
45 | drop:
46 | - "ALL"
47 | livenessProbe:
48 | httpGet:
49 | path: /healthz
50 | port: 8081
51 | initialDelaySeconds: 15
52 | periodSeconds: 20
53 | readinessProbe:
54 | httpGet:
55 | path: /readyz
56 | port: 8081
57 | initialDelaySeconds: 5
58 | periodSeconds: 10
59 | # TODO(user): Configure the resources accordingly based on the project requirements.
60 | # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
61 | resources:
62 | limits:
63 | cpu: 500m
64 | memory: 128Mi
65 | requests:
66 | cpu: 10m
67 | memory: 64Mi
68 | serviceAccountName: controller-manager
69 | terminationGracePeriodSeconds: 10
70 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/manifests/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # These resources constitute the fully configured set of manifests
2 | # used to generate the 'manifests/' directory in a bundle.
3 | resources:
4 | - bases/nginx-operator.clusterserviceversion.yaml
5 | - ../default
6 | - ../samples
7 | - ../scorecard
8 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/prometheus/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - monitor.yaml
3 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/prometheus/monitor.yaml:
--------------------------------------------------------------------------------
1 |
2 | # Prometheus Monitor Service (Metrics)
3 | apiVersion: monitoring.coreos.com/v1
4 | kind: ServiceMonitor
5 | metadata:
6 | labels:
7 | control-plane: controller-manager
8 | name: controller-manager-metrics-monitor
9 | namespace: system
10 | spec:
11 | endpoints:
12 | - path: /metrics
13 | port: https
14 | scheme: https
15 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
16 | tlsConfig:
17 | insecureSkipVerify: true
18 | selector:
19 | matchLabels:
20 | control-plane: controller-manager
21 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/auth_proxy_client_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: metrics-reader
5 | rules:
6 | - nonResourceURLs:
7 | - "/metrics"
8 | verbs:
9 | - get
10 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/auth_proxy_role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: proxy-role
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
18 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/auth_proxy_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: proxy-rolebinding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: proxy-role
9 | subjects:
10 | - kind: ServiceAccount
11 | name: controller-manager
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/auth_proxy_service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | control-plane: controller-manager
6 | name: controller-manager-metrics-service
7 | namespace: system
8 | spec:
9 | ports:
10 | - name: https
11 | port: 8443
12 | protocol: TCP
13 | targetPort: https
14 | selector:
15 | control-plane: controller-manager
16 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | # All RBAC will be applied under this service account in
3 | # the deployment namespace. You may comment out this resource
4 | # if your manager will use a service account that exists at
5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding
6 | # subjects if changing service account names.
7 | - service_account.yaml
8 | - role.yaml
9 | - role_binding.yaml
10 | - leader_election_role.yaml
11 | - leader_election_role_binding.yaml
12 | # Comment the following 4 lines if you want to disable
13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy)
14 | # which protects your /metrics endpoint.
15 | - auth_proxy_service.yaml
16 | - auth_proxy_role.yaml
17 | - auth_proxy_role_binding.yaml
18 | - auth_proxy_client_clusterrole.yaml
19 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/leader_election_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions to do leader election.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | name: leader-election-role
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - configmaps
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - create
16 | - update
17 | - patch
18 | - delete
19 | - apiGroups:
20 | - coordination.k8s.io
21 | resources:
22 | - leases
23 | verbs:
24 | - get
25 | - list
26 | - watch
27 | - create
28 | - update
29 | - patch
30 | - delete
31 | - apiGroups:
32 | - ""
33 | resources:
34 | - events
35 | verbs:
36 | - create
37 | - patch
38 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/leader_election_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: leader-election-rolebinding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: Role
8 | name: leader-election-role
9 | subjects:
10 | - kind: ServiceAccount
11 | name: controller-manager
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/nginx_editor_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to edit nginxes.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: nginx-editor-role
6 | rules:
7 | - apiGroups:
8 | - demo.codewizard.co.il
9 | resources:
10 | - nginxes
11 | verbs:
12 | - create
13 | - delete
14 | - get
15 | - list
16 | - patch
17 | - update
18 | - watch
19 | - apiGroups:
20 | - demo.codewizard.co.il
21 | resources:
22 | - nginxes/status
23 | verbs:
24 | - get
25 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/nginx_viewer_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to view nginxes.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: nginx-viewer-role
6 | rules:
7 | - apiGroups:
8 | - demo.codewizard.co.il
9 | resources:
10 | - nginxes
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - apiGroups:
16 | - demo.codewizard.co.il
17 | resources:
18 | - nginxes/status
19 | verbs:
20 | - get
21 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: manager-role
5 | rules:
6 | ##
7 | ## Base operator rules
8 | ##
9 | # We need to get namespaces so the operator can read namespaces to ensure they exist
10 | - apiGroups:
11 | - ""
12 | resources:
13 | - namespaces
14 | verbs:
15 | - get
16 | # We need to manage Helm release secrets
17 | - apiGroups:
18 | - ""
19 | resources:
20 | - secrets
21 | verbs:
22 | - "*"
23 | # We need to create events on CRs about things happening during reconciliation
24 | - apiGroups:
25 | - ""
26 | resources:
27 | - events
28 | verbs:
29 | - create
30 |
31 | ##
32 | ## Rules for demo.codewizard.co.il/v1alpha1, Kind: Nginx
33 | ##
34 | - apiGroups:
35 | - demo.codewizard.co.il
36 | resources:
37 | - nginxes
38 | - nginxes/status
39 | - nginxes/finalizers
40 | verbs:
41 | - create
42 | - delete
43 | - get
44 | - list
45 | - patch
46 | - update
47 | - watch
48 | - apiGroups:
49 | - ""
50 | resources:
51 | - pods
52 | - services
53 | - services/finalizers
54 | - endpoints
55 | - persistentvolumeclaims
56 | - events
57 | - configmaps
58 | - secrets
59 | verbs:
60 | - create
61 | - delete
62 | - get
63 | - list
64 | - patch
65 | - update
66 | - watch
67 | - apiGroups:
68 | - apps
69 | resources:
70 | - deployments
71 | - daemonsets
72 | - replicasets
73 | - statefulsets
74 | verbs:
75 | - create
76 | - delete
77 | - get
78 | - list
79 | - patch
80 | - update
81 | - watch
82 |
83 | #+kubebuilder:scaffold:rules
84 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: manager-rolebinding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: manager-role
9 | subjects:
10 | - kind: ServiceAccount
11 | name: controller-manager
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/rbac/service_account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: controller-manager
5 | namespace: system
6 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/samples/demo_v1alpha1_nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: demo.codewizard.co.il/v1alpha1
2 | kind: Nginx
3 | metadata:
4 | name: nginx-sample
5 | spec:
6 | # Default values copied from /helm-charts/nginx/values.yaml
7 | affinity: {}
8 | autoscaling:
9 | enabled: false
10 | maxReplicas: 100
11 | minReplicas: 1
12 | targetCPUUtilizationPercentage: 80
13 | fullnameOverride: ""
14 | image:
15 | pullPolicy: IfNotPresent
16 | repository: nginx
17 | tag: ""
18 | imagePullSecrets: []
19 | ingress:
20 | annotations: {}
21 | className: ""
22 | enabled: false
23 | hosts:
24 | - host: chart-example.local
25 | paths:
26 | - path: /
27 | pathType: ImplementationSpecific
28 | tls: []
29 | nameOverride: ""
30 | nodeSelector: {}
31 | podAnnotations: {}
32 | podSecurityContext: {}
33 | replicaCount: 3 # <----- Our desired value
34 | resources: {}
35 | securityContext: {}
36 | service:
37 | port: 8888 # <----- Our desired value
38 | type: ClusterIP
39 | serviceAccount:
40 | annotations: {}
41 | create: true
42 | name: ""
43 | tolerations: []
44 |
45 |
46 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/samples/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ## Append samples you want in your CSV to this file as resources ##
2 | resources:
3 | - demo_v1alpha1_nginx.yaml
4 | #+kubebuilder:scaffold:manifestskustomizesamples
5 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/scorecard/bases/config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: scorecard.operatorframework.io/v1alpha3
2 | kind: Configuration
3 | metadata:
4 | name: config
5 | stages:
6 | - parallel: true
7 | tests: []
8 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/scorecard/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - bases/config.yaml
3 | patchesJson6902:
4 | - path: patches/basic.config.yaml
5 | target:
6 | group: scorecard.operatorframework.io
7 | version: v1alpha3
8 | kind: Configuration
9 | name: config
10 | - path: patches/olm.config.yaml
11 | target:
12 | group: scorecard.operatorframework.io
13 | version: v1alpha3
14 | kind: Configuration
15 | name: config
16 | #+kubebuilder:scaffold:patchesJson6902
17 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/scorecard/patches/basic.config.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: /stages/0/tests/-
3 | value:
4 | entrypoint:
5 | - scorecard-test
6 | - basic-check-spec
7 | image: quay.io/operator-framework/scorecard-test:v1.23.0
8 | labels:
9 | suite: basic
10 | test: basic-check-spec-test
11 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/config/scorecard/patches/olm.config.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: /stages/0/tests/-
3 | value:
4 | entrypoint:
5 | - scorecard-test
6 | - olm-bundle-validation
7 | image: quay.io/operator-framework/scorecard-test:v1.23.0
8 | labels:
9 | suite: olm
10 | test: olm-bundle-validation-test
11 | - op: add
12 | path: /stages/0/tests/-
13 | value:
14 | entrypoint:
15 | - scorecard-test
16 | - olm-crds-have-validation
17 | image: quay.io/operator-framework/scorecard-test:v1.23.0
18 | labels:
19 | suite: olm
20 | test: olm-crds-have-validation-test
21 | - op: add
22 | path: /stages/0/tests/-
23 | value:
24 | entrypoint:
25 | - scorecard-test
26 | - olm-crds-have-resources
27 | image: quay.io/operator-framework/scorecard-test:v1.23.0
28 | labels:
29 | suite: olm
30 | test: olm-crds-have-resources-test
31 | - op: add
32 | path: /stages/0/tests/-
33 | value:
34 | entrypoint:
35 | - scorecard-test
36 | - olm-spec-descriptors
37 | image: quay.io/operator-framework/scorecard-test:v1.23.0
38 | labels:
39 | suite: olm
40 | test: olm-spec-descriptors-test
41 | - op: add
42 | path: /stages/0/tests/-
43 | value:
44 | entrypoint:
45 | - scorecard-test
46 | - olm-status-descriptors
47 | image: quay.io/operator-framework/scorecard-test:v1.23.0
48 | labels:
49 | suite: olm
50 | test: olm-status-descriptors-test
51 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | appVersion: 1.16.0
3 | description: A Helm chart for Kubernetes
4 | name: nginx
5 | type: application
6 | version: 0.1.0
7 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | 1. Get the application URL by running these commands:
2 | {{- if .Values.ingress.enabled }}
3 | {{- range $host := .Values.ingress.hosts }}
4 | {{- range .paths }}
5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
6 | {{- end }}
7 | {{- end }}
8 | {{- else if contains "NodePort" .Values.service.type }}
9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nginx.fullname" . }})
10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
11 | echo http://$NODE_IP:$NODE_PORT
12 | {{- else if contains "LoadBalancer" .Values.service.type }}
13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available.
14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nginx.fullname" . }}'
15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nginx.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
16 | echo http://$SERVICE_IP:{{ .Values.service.port }}
17 | {{- else if contains "ClusterIP" .Values.service.type }}
18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "nginx.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
20 | echo "Visit http://127.0.0.1:8080 to use your application"
21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "nginx.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "nginx.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "nginx.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "nginx.labels" -}}
37 | helm.sh/chart: {{ include "nginx.chart" . }}
38 | {{ include "nginx.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "nginx.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "nginx.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "nginx.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "nginx.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "nginx.fullname" . }}
5 | labels:
6 | {{- include "nginx.labels" . | nindent 4 }}
7 | spec:
8 | {{- if not .Values.autoscaling.enabled }}
9 | replicas: {{ .Values.replicaCount }}
10 | {{- end }}
11 | selector:
12 | matchLabels:
13 | {{- include "nginx.selectorLabels" . | nindent 6 }}
14 | template:
15 | metadata:
16 | {{- with .Values.podAnnotations }}
17 | annotations:
18 | {{- toYaml . | nindent 8 }}
19 | {{- end }}
20 | labels:
21 | {{- include "nginx.selectorLabels" . | nindent 8 }}
22 | spec:
23 | {{- with .Values.imagePullSecrets }}
24 | imagePullSecrets:
25 | {{- toYaml . | nindent 8 }}
26 | {{- end }}
27 | serviceAccountName: {{ include "nginx.serviceAccountName" . }}
28 | securityContext:
29 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
30 | containers:
31 | - name: {{ .Chart.Name }}
32 | securityContext:
33 | {{- toYaml .Values.securityContext | nindent 12 }}
34 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
35 | imagePullPolicy: {{ .Values.image.pullPolicy }}
36 | ports:
37 | - name: http
38 | containerPort: 80
39 | protocol: TCP
40 | livenessProbe:
41 | httpGet:
42 | path: /
43 | port: http
44 | readinessProbe:
45 | httpGet:
46 | path: /
47 | port: http
48 | resources:
49 | {{- toYaml .Values.resources | nindent 12 }}
50 | {{- with .Values.nodeSelector }}
51 | nodeSelector:
52 | {{- toYaml . | nindent 8 }}
53 | {{- end }}
54 | {{- with .Values.affinity }}
55 | affinity:
56 | {{- toYaml . | nindent 8 }}
57 | {{- end }}
58 | {{- with .Values.tolerations }}
59 | tolerations:
60 | {{- toYaml . | nindent 8 }}
61 | {{- end }}
62 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/templates/hpa.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.autoscaling.enabled }}
2 | apiVersion: autoscaling/v2beta1
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: {{ include "nginx.fullname" . }}
6 | labels:
7 | {{- include "nginx.labels" . | nindent 4 }}
8 | spec:
9 | scaleTargetRef:
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | name: {{ include "nginx.fullname" . }}
13 | minReplicas: {{ .Values.autoscaling.minReplicas }}
14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }}
15 | metrics:
16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
17 | - type: Resource
18 | resource:
19 | name: cpu
20 | targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
21 | {{- end }}
22 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
23 | - type: Resource
24 | resource:
25 | name: memory
26 | targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
27 | {{- end }}
28 | {{- end }}
29 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled -}}
2 | {{- $fullName := include "nginx.fullname" . -}}
3 | {{- $svcPort := .Values.service.port -}}
4 | {{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
5 | {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
6 | {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
7 | {{- end }}
8 | {{- end }}
9 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
10 | apiVersion: networking.k8s.io/v1
11 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
12 | apiVersion: networking.k8s.io/v1beta1
13 | {{- else -}}
14 | apiVersion: extensions/v1beta1
15 | {{- end }}
16 | kind: Ingress
17 | metadata:
18 | name: {{ $fullName }}
19 | labels:
20 | {{- include "nginx.labels" . | nindent 4 }}
21 | {{- with .Values.ingress.annotations }}
22 | annotations:
23 | {{- toYaml . | nindent 4 }}
24 | {{- end }}
25 | spec:
26 | {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
27 | ingressClassName: {{ .Values.ingress.className }}
28 | {{- end }}
29 | {{- if .Values.ingress.tls }}
30 | tls:
31 | {{- range .Values.ingress.tls }}
32 | - hosts:
33 | {{- range .hosts }}
34 | - {{ . | quote }}
35 | {{- end }}
36 | secretName: {{ .secretName }}
37 | {{- end }}
38 | {{- end }}
39 | rules:
40 | {{- range .Values.ingress.hosts }}
41 | - host: {{ .host | quote }}
42 | http:
43 | paths:
44 | {{- range .paths }}
45 | - path: {{ .path }}
46 | {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
47 | pathType: {{ .pathType }}
48 | {{- end }}
49 | backend:
50 | {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
51 | service:
52 | name: {{ $fullName }}
53 | port:
54 | number: {{ $svcPort }}
55 | {{- else }}
56 | serviceName: {{ $fullName }}
57 | servicePort: {{ $svcPort }}
58 | {{- end }}
59 | {{- end }}
60 | {{- end }}
61 | {{- end }}
62 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "nginx.fullname" . }}
5 | labels:
6 | {{- include "nginx.labels" . | nindent 4 }}
7 | spec:
8 | type: {{ .Values.service.type }}
9 | ports:
10 | - port: {{ .Values.service.port }}
11 | targetPort: http
12 | protocol: TCP
13 | name: http
14 | selector:
15 | {{- include "nginx.selectorLabels" . | nindent 4 }}
16 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "nginx.serviceAccountName" . }}
6 | labels:
7 | {{- include "nginx.labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/templates/tests/test-connection.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ include "nginx.fullname" . }}-test-connection"
5 | labels:
6 | {{- include "nginx.labels" . | nindent 4 }}
7 | annotations:
8 | "helm.sh/hook": test
9 | spec:
10 | containers:
11 | - name: wget
12 | image: busybox
13 | command: ['wget']
14 | args: ['{{ include "nginx.fullname" . }}:{{ .Values.service.port }}']
15 | restartPolicy: Never
16 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/helm-charts/nginx/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for nginx.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | replicaCount: 1
6 |
7 | image:
8 | repository: nginx
9 | pullPolicy: IfNotPresent
10 | # Overrides the image tag whose default is the chart appVersion.
11 | tag: ""
12 |
13 | imagePullSecrets: []
14 | nameOverride: ""
15 | fullnameOverride: ""
16 |
17 | serviceAccount:
18 | # Specifies whether a service account should be created
19 | create: true
20 | # Annotations to add to the service account
21 | annotations: {}
22 | # The name of the service account to use.
23 | # If not set and create is true, a name is generated using the fullname template
24 | name: ""
25 |
26 | podAnnotations: {}
27 |
28 | podSecurityContext: {}
29 | # fsGroup: 2000
30 |
31 | securityContext: {}
32 | # capabilities:
33 | # drop:
34 | # - ALL
35 | # readOnlyRootFilesystem: true
36 | # runAsNonRoot: true
37 | # runAsUser: 1000
38 |
39 | service:
40 | type: ClusterIP
41 | port: 80
42 |
43 | ingress:
44 | enabled: false
45 | className: ""
46 | annotations: {}
47 | # kubernetes.io/ingress.class: nginx
48 | # kubernetes.io/tls-acme: "true"
49 | hosts:
50 | - host: chart-example.local
51 | paths:
52 | - path: /
53 | pathType: ImplementationSpecific
54 | tls: []
55 | # - secretName: chart-example-tls
56 | # hosts:
57 | # - chart-example.local
58 |
59 | resources: {}
60 | # We usually recommend not to specify default resources and to leave this as a conscious
61 | # choice for the user. This also increases chances charts run on environments with little
62 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
63 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
64 | # limits:
65 | # cpu: 100m
66 | # memory: 128Mi
67 | # requests:
68 | # cpu: 100m
69 | # memory: 128Mi
70 |
71 | autoscaling:
72 | enabled: false
73 | minReplicas: 1
74 | maxReplicas: 100
75 | targetCPUUtilizationPercentage: 80
76 | # targetMemoryUtilizationPercentage: 80
77 |
78 | nodeSelector: {}
79 |
80 | tolerations: []
81 |
82 | affinity: {}
83 |
--------------------------------------------------------------------------------
/Labs/24-HelmOperator/nginx-operator/watches.yaml:
--------------------------------------------------------------------------------
1 | # Use the 'create api' subcommand to add watches to this file.
2 | - group: demo.codewizard.co.il
3 | version: v1alpha1
4 | kind: Nginx
5 | chart: helm-charts/nginx
6 | #+kubebuilder:scaffold:watch
7 |
--------------------------------------------------------------------------------
/Labs/25-kubebuilder/runMe.sh:
--------------------------------------------------------------------------------
1 | kubebuilder init \
2 | --domain my.domain \
3 | --repo my.domain/guestbook \
4 | --plugins=kustomize/v2-alpha
5 |
--------------------------------------------------------------------------------
/Labs/26-k9s/runMe.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install k8s
4 | curl -sS https://webinstall.dev/k9s | bash
5 |
6 |
7 | # Shortcutrs
8 |
9 | # List all api-resources
10 | CTRL + A
11 |
12 | # switch resource type
13 | :
14 |
15 | # Filters:
16 | /
17 |
--------------------------------------------------------------------------------
/Labs/27-krew/runMe.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | (
3 | set -x; cd "$(mktemp -d)" &&
4 | OS="$(uname | tr '[:upper:]' '[:lower:]')" &&
5 | ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" &&
6 | KREW="krew-${OS}_${ARCH}" &&
7 | curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" &&
8 | tar zxvf "${KREW}.tar.gz" &&
9 | ./"${KREW}" install krew
10 | )
11 |
12 | export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"
13 |
14 | kubectl krew update
15 | kubectl krew install \
16 | access-matrix \
17 | blame \
18 | count \
19 | debug-shell \
20 | get-all \
21 | ingress-rule \
22 | minio \
23 | modify-secret \
24 | node-admin \
25 | node-shell \
26 | pod-inspect \
27 | resource-capacity \
28 | sshd \
29 | view-cert \
30 | view-secret \
31 | view-utilization
32 |
33 |
--------------------------------------------------------------------------------
/Labs/28-kubeapps/runMe.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install the helm package
4 | helm repo add bitnami https://charts.bitnami.com/bitnami
5 | helm repo update
6 | helm install -n kubeapps --create-namespace kubeapps bitnami/kubeapps
7 |
8 | # Create the namespace
9 | kubectl create ns codewizard
10 |
11 | # Create the required service accountt
12 | kubectl create \
13 | serviceaccount kubeapps-operator \
14 | -n codewizard
15 |
16 | # Create the required role binding
17 | kubectl create \
18 | clusterrolebinding kubeapps-operator \
19 | --serviceaccount=codewizard:kubeapps-operator \
20 | --clusterrole=cluster-admin
21 |
22 | # Apply the secret
23 | # It will generate the token for us
24 | cat < =
46 |
47 | # Example adding label to node
48 | kubectl label node1 isProd=false
49 |
50 | # Example adding label to node
51 | kubectl label node1 isProd=false --overwrite
52 |
53 | # remove a label
54 | kubectl label node1 isProd- # The [-] sign will delete the label
55 | ```
56 |
57 | # Set default namespace
58 |
59 | ```sh
60 | kubectl config \
61 | set-context $(kubectl config current-context) \
62 | --namespace=codewizard
63 | ```
64 |
65 | # Get Unused ConfigMaps
66 |
67 | ```sh
68 | volumesCM=$( kubectl get pods -o jsonpath='{.items[*].spec.volumes[*].configMap.name}' | xargs -n1)
69 | volumesProjectedCM=$( kubectl get pods -o jsonpath='{.items[*].spec.volumes[*].projected.sources[*].configMap.name}' | xargs -n1)
70 | envCM=$( kubectl get pods -o jsonpath='{.items[*].spec.containers[*].env[*].ValueFrom.configMapKeyRef.name}' | xargs -n1)
71 | envFromCM=$( kubectl get pods -o jsonpath='{.items[*].spec.containers[*].envFrom[*].configMapKeyRef.name}' | xargs -n1)
72 |
73 | diff \
74 | <(echo "$volumesCM\n$volumesProjectedCM\n$envCM\n$envFromCM" | sort | uniq) \
75 | <(kubectl get configmaps -o jsonpath='{.items[*].metadata.name}' | xargs -n1 | sort | uniq)
76 | ```
77 |
--------------------------------------------------------------------------------
/resources/devops_cycle.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/resources/devops_cycle.jpg
--------------------------------------------------------------------------------
/resources/k8s-istio-gcp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/resources/k8s-istio-gcp.png
--------------------------------------------------------------------------------
/resources/k8s-logos.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/resources/k8s-logos.png
--------------------------------------------------------------------------------
/resources/lab.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/resources/lab.jpg
--------------------------------------------------------------------------------
/resources/next.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/resources/next.png
--------------------------------------------------------------------------------
/resources/prev.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/resources/prev.png
--------------------------------------------------------------------------------
/resources/statefulSet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nirgeier/KubernetesLabs/5664aaf88d505aed45a504f0f3a52359998511c4/resources/statefulSet.png
--------------------------------------------------------------------------------
/runMe.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x
2 |
3 | npm --prefix ./TocBuilder start ../
--------------------------------------------------------------------------------
/scripts/kustomization/buildKustomization.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Set Verbose mode
4 | set -x
5 |
6 | # Check to see if we have the latest version of kustomize
7 | if [ ! -f ./kustomize ]; then
8 | # Install latest verison of Kustomize
9 | curl -sv "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
10 | fi
11 |
12 | # Save the kustomization path
13 | KUSTOMIZATION_PATH=$(pwd)/kustomize
14 |
15 | # Read the desired variable from the CLI
16 | RESOURCES_PATH=$1
17 |
18 | # Set the base path in caase we did not supply one
19 | : ${RESOURCES_PATH:="/K8S/*"}
20 |
21 | # Set the desired output file
22 | KUSTOMIZATION_TARGET_FILE=$2
23 |
24 | # Set the base path in caase we did not supply one
25 | : ${KUSTOMIZATION_TARGET_FILE:="kustomization.yaml"}
26 |
27 | # Verify that the file exist or create a new one
28 | touch $RESOURCES_PATH/$KUSTOMIZATION_TARGET_FILE
29 |
30 | # Switch to the desired kustomization folder
31 | cd $RESOURCES_PATH
32 |
33 | # Loop over the resources folder
34 | for filePath in *
35 | do
36 | # Add the yaml file to the kustomization file
37 | $KUSTOMIZATION_PATH edit add resource $filePath
38 | done
39 |
40 | # Add the desired namespace
41 | $KUSTOMIZATION_PATH edit set namespace codewizard
42 |
43 | # Format the output file
44 | $KUSTOMIZATION_PATH cfg fmt $KUSTOMIZATION_TARGET_FILE
45 |
46 | # print the full structure
47 | ./kustomize.exe cfg tree --all
48 |
49 | # Set the desired namespace
50 | cat $KUSTOMIZATION_TARGET_FILE
51 |
52 | # disable verbose mode
53 | set +x
--------------------------------------------------------------------------------
/scripts/setup-kind-cluster.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export CLUSTER_NAME=codewizard-demo-cluster
4 | export DEMO_NS=codewizard
5 |
6 | # Install kind if not already installed
7 | eval "$(/opt/homebrew/bin/brew shellenv)"
8 | arch -arm64 brew install kind derailed/k9s/k9s
9 |
10 | # Delete old cluster if exists
11 | kind delete \
12 | cluster \
13 | --name $CLUSTER_NAME
14 |
15 | # Create the new cluster
16 | cat << EOF | \
17 | kind create \
18 | cluster \
19 | --name $CLUSTER_NAME \
20 | --config=- \
21 | ###
22 | ### Auto Generated file.
23 | ### Do not edit !!!
24 | ###
25 | ###
26 | apiVersion: kind.x-k8s.io/v1alpha4
27 | kind: Cluster
28 | nodes:
29 | - role: control-plane
30 | kubeadmConfigPatches:
31 | - |
32 | kind: InitConfiguration
33 | nodeRegistration:
34 | kubeletExtraArgs:
35 | #
36 | # node-labels:
37 | # only allow the ingress controller to run on a
38 | # specific node(s) matching the label selector
39 | #
40 | node-labels: "ingress-ready=true"
41 | #
42 | # extraPortMappings:
43 | # allow the local host to make requests to the
44 | # Ingress controller over ports 80/443
45 | #
46 | extraPortMappings:
47 | - containerPort: 80
48 | hostPort: 80
49 | protocol: TCP
50 | - containerPort: 443
51 | hostPort: 443
52 | protocol: TCP
53 | - role: worker
54 | - role: worker
55 | EOF
56 |
57 | # Wait for nodes
58 | kubectl wait node \
59 | --all \
60 | --for condition=ready \
61 | --timeout=600s
62 |
63 | # Verify that the cluster is running
64 | kubectl get nodes -o wide
65 | kind get clusters
66 |
67 | # Create namespaces
68 | kubectl delete ns $DEMO_NS
69 | kubectl create ns $DEMO_NS
70 |
71 | # Switch to the new namespace as default namespace
72 | kubectl config \
73 | set-context $(kubectl config current-context) \
74 | --namespace=$DEMO_NS
75 |
--------------------------------------------------------------------------------
/scripts/setupVScode.sh:
--------------------------------------------------------------------------------
1 | # Download latest VS code
2 | curl -fsSL https://code-server.dev/install.sh | sh
3 |
4 | # Start the VScode in your browser
5 | code-server &
6 |
7 | # Print out the password so we can use it
8 | cat ~/.config/code-server/config.yaml | grep password:
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/scripts/startMinikube.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script will cehck to see if minikube is started
4 | # and if not it will start it
5 |
6 | set -x
7 |
8 | # Extrat the current stauts of minikube
9 | MINIKUBE_STATUS=$(minikube status)
10 |
11 | # The pattern which we look in order to start minikube
12 | MINIKUBE_STOPPED_PATTERN="Stopped|not found"
13 |
14 | # Get latest minkube verison
15 | MINIKUBE_VERSION=$(curl -sL https://api.github.com/repos/kubernetes/minikube/releases/latest | jq -r ".tag_name")
16 |
17 | # Check to see if minikube is already installed or not
18 | if [[ ! -f /usr/local/bin/minikube ]];
19 | then
20 | # Download minikube
21 | echo "Installing minikube..."
22 | curl -Lo minikube https://storage.googleapis.com/minikube/releases/$MINIKUBE_VERSION/minikube-linux-amd64
23 |
24 | # Set the execution bit
25 | chmod +x minikube
26 |
27 | # move monikube to the path
28 | sudo cp minikube /usr/local/bin/
29 |
30 | fi
31 |
32 | # Check to see if minikube is runnig or not
33 | if [[ $MINIKUBE_STATUS =~ $MINIKUBE_STOPPED_PATTERN ]];
34 | then
35 |
36 | # On local minkube you can set the cpu and memory to max
37 | # $ minikube start --memory max --cpu=max
38 |
39 | # start minikube since its stopped
40 | minikube start
41 |
42 | # Start the API server
43 | kubectl proxy --port=8081 &
44 | fi
--------------------------------------------------------------------------------
/updateIndexes.sh:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Function to display usage information
4 | usage() {
5 | echo "Usage: $0 "
6 | echo " The starting index for renaming directories (00 to 99)."
7 | exit 1
8 | }
9 |
10 | # Check if a starting index is provided
11 | if [ -z "$1" ]; then
12 | usage
13 | fi
14 |
15 | # Get the starting index from the first argument
16 | start_index=$1
17 |
18 | # Validate the starting index
19 | if ! [[ $start_index =~ ^[0-9]{2}$ ]]; then
20 | echo "Error: Starting index must be a two-digit number (00 to 99)."
21 | exit 1
22 | fi
23 |
24 | # Initialize an array to hold the changes
25 | declare -a changes
26 |
27 | # Initialize a counter for the new index
28 | new_index=$((10#$start_index + 1))
29 |
30 | # Loop through all directories matching the pattern
31 | for dir in [0-9][0-9]-*; do
32 | # Extract the numeric part and the rest of the name
33 | num=${dir%%-*}
34 | rest=${dir#*-}
35 |
36 | # Only rename if the numeric part is greater than or equal to the starting index
37 | if (( 10#$num >= 10#$start_index )); then
38 | # Form the new directory name with the new index
39 | new_num=$(printf "%02d" $new_index)
40 | new_dir="${new_num}-${rest}"
41 |
42 | # Add the change to the array if the new name is different
43 | if [[ "$dir" != "$new_dir" ]]; then
44 | changes+=("$dir -> $new_dir")
45 | fi
46 |
47 | # Increment the new index and wrap around if it exceeds 99
48 | new_index=$(( (new_index + 1) % 100 ))
49 | fi
50 | done
51 |
52 | # Display the list of changes
53 | echo "The following changes will be made:"
54 | for change in "${changes[@]}"; do
55 | echo "$change"
56 | done
57 |
58 | # Ask for confirmation
59 | echo "Do you want to proceed with these changes? (y/n)"
60 | read -r response
61 | if [[ "$response" == "y" ]]; then
62 | # Apply the changes
63 | new_index=$((10#$start_index + 1))
64 | for dir in [0-9][0-9]-*; do
65 | num=${dir%%-*}
66 | rest=${dir#*-}
67 | if (( 10#$num >= 10#$start_index )); then
68 | new_num=$(printf "%02d" $new_index)
69 | new_dir="${new_num}-${rest}"
70 | if [[ "$dir" != "$new_dir" ]]; then
71 | mv "$dir" "$new_dir"
72 | fi
73 | new_index=$(( (new_index + 1) % 100 ))
74 | fi
75 | done
76 | echo "Changes applied."
77 | else
78 | echo "No changes made."
79 | fi
80 |
--------------------------------------------------------------------------------