├── .gitignore
├── .gitmodules
├── 00-cluster
├── 01-namespaces
│ ├── 01-sample-ns.yaml
│ ├── 02-resource-quota.yaml
│ ├── 03-kuard-pod.yaml
│ ├── 04-kuard-pod.yaml
│ ├── 05-nginx-deployment.yaml
│ ├── 06-limit-range.yaml
│ └── Readme.md
└── Readme.md
├── 01-pods
├── 02-pod.yaml
├── 03-pods-health.yaml
├── 04-pods-resources.yaml
├── 05-pod-volumes.yaml
├── 06-nodeantiaffinity.yaml
├── 07-nodeselector.yaml
├── 08-pod-gitvolume.yaml
├── 09-pod-init-containers.yaml
├── 10-initcontainer-services.yaml
├── 11-pod-presets.yaml
├── 12-poststart.yaml
├── 13-prestop-graceperiod.yaml
├── 14-qos-pod.yaml
├── 15-qos-pod-2.yaml
├── 16-qos-pod-3.yaml
├── 17-qos-pod-4.yaml
├── Readme.md
└── qosClass.md
├── 02-deployments
├── 01-strategy
│ ├── 01-basic-deployment.yaml
│ ├── 01-deployment.yaml
│ └── 02-deployment-recreate.yaml
├── 02-rollout
│ ├── Readme.md
│ └── nginx-deployment.yaml
├── 03-bluegreen
│ ├── Readme.md
│ ├── app-v1.yaml
│ └── app-v2.yaml
└── 04-canary
│ ├── 01-deployment.yaml
│ ├── 02-deployment.yaml
│ └── Readme.md
├── 03-labels-annotations
├── 01-nginx-deployment.yaml
├── 02-pod.yaml
└── 03-frontend-svc.yaml
├── 04-services
├── 01-nginx-deployment.yaml
├── 02-pod.yaml
├── 03-frontend-svc.yaml
├── 04-frontend-svc.yaml
└── 05-frontend-svc.yaml
├── 05-configmapsandsecrets
├── 01-configmaps
│ ├── 01-configmap-sample.yaml
│ ├── 02-configmap-sample.yaml
│ ├── 03-javaapp-pod.yaml
│ ├── 04-javaapp-pod.yaml
│ ├── Readme.md
│ ├── config.txt
│ └── log4j.xml
└── 02-secrets
│ ├── 01-secret-sample.yaml
│ ├── 02-secret-sample.yaml
│ ├── 03-secret-sample.yaml
│ ├── 04-secret-sample.yaml
│ ├── Readme.md
│ └── userpass
├── 06-storage
├── 01-ephemeral-storage
│ ├── 01-emptydir-pod.yaml
│ ├── 02-emptydir-pod.yaml
│ ├── 03-emptydir-pod.yaml
│ ├── 04-emptydir-pod.yaml
│ ├── 05-downward-api.yaml
│ ├── 06-secret-pod.yaml
│ ├── Readme.md
│ └── mysql.conn
├── 02-intro
│ ├── 01-pv
│ │ ├── 01-nfs-pv.yaml
│ │ └── 02-nfs-pvc.yaml
│ ├── 02-selectors
│ │ ├── 01-nfs-pv.yaml
│ │ └── 02-nfs-pvc.yaml
│ └── 03-storageclass
│ │ ├── Readme.md
│ │ ├── azure-redundant-02.yaml
│ │ └── azure-redundant-05.yaml
├── 03-storage
│ ├── 01-networked-storage
│ │ ├── 01-nfs
│ │ │ ├── 01-nfs-pod.yaml
│ │ │ └── Readme.md
│ │ ├── 02-glusterfs
│ │ │ ├── 01-hekiti
│ │ │ │ ├── hekiti-replicated.json
│ │ │ │ ├── hekiti.json
│ │ │ │ └── topology.json
│ │ │ ├── 02-sample
│ │ │ │ ├── 01-deployment.yaml
│ │ │ │ ├── 02-glusrer-pvc.yaml
│ │ │ │ └── 03-gluster-storageclass.yaml
│ │ │ └── Readme.md
│ │ └── 03-template
│ │ │ ├── 01-redis-template.yaml
│ │ │ ├── 02-redis-template.yaml
│ │ │ ├── 03-redis-template.yaml
│ │ │ └── Readme.md
│ ├── 02-block-storage
│ │ ├── 01-azurefile
│ │ │ └── Readme.md
│ │ └── 02-netapp
│ │ │ └── Readme.md
│ └── Readme.md
├── 04-dbench
│ ├── Dockerfile
│ ├── LICENSE
│ ├── README.md
│ ├── dbench.yaml
│ └── docker-entrypoint.sh
└── Readme.md
├── 07-workloads
├── 01-daemonset
│ └── fluentd-daemonset-syslog.yaml
├── 02-job
│ ├── 01-job.yaml
│ ├── 02-job.yaml
│ ├── 03-cronjob.yaml
│ ├── 04-cronjob.yaml
│ └── Readme.md
└── Readme.md
├── 09-ingress
├── 01-nginx-ingress-controller
│ ├── Readme.md
│ ├── http-ingress.yaml
│ ├── ingress-tls.yaml
│ ├── values.yaml
│ ├── web-v1-fixed.yaml
│ ├── web-v1-svc.yaml
│ ├── web-v2-fixed.yaml
│ └── web-v2-svc.yaml
├── 02-traefik-ingress-controller
│ ├── Readme.md
│ ├── http-ingress.yaml
│ ├── values.yaml
│ ├── web-v1-fixed.yaml
│ ├── web-v1-svc.yaml
│ ├── web-v2-fixed.yaml
│ └── web-v2-svc.yaml
├── 03-contour-ingress-controller
│ ├── Readme.md
│ ├── contour-monitoring-svc.yaml
│ ├── controur-servicemonitor.yaml
│ ├── http-ingress.yaml
│ ├── values.yaml
│ ├── web-v1-fixed.yaml
│ ├── web-v1-svc.yaml
│ ├── web-v2-fixed.yaml
│ └── web-v2-svc.yaml
└── README.md
├── 10-monitoring
├── Readme.md
├── aks-kubelet-service-monitor.yaml
└── values.yaml
├── 11-security
└── Readme.md
├── 12-advanced-k8s
├── 01-scheduling
│ ├── 01-nodeaffinity.yaml
│ ├── 02-podaffinity.yaml
│ ├── 03-tolerated-pod.yaml
│ ├── 04-single-pod.yaml
│ └── Readme.md
└── Readme.md
└── ReadMe.md
/.gitignore:
--------------------------------------------------------------------------------
1 | **/*.tls
2 | **/*.crt
3 | **/*.key
4 | **/.vscode
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "08-helm"]
2 | path = 08-helm
3 | url = https://github.com/msdevengers/helm2-workshop.git
4 |
--------------------------------------------------------------------------------
/00-cluster/01-namespaces/01-sample-ns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | creationTimestamp: null
5 | name: samplens
6 | spec: {}
7 | status: {}
8 |
--------------------------------------------------------------------------------
/00-cluster/01-namespaces/02-resource-quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: mem-cpu-demo
5 | spec:
6 | hard:
7 | requests.cpu: "1"
8 | requests.memory: 1Gi
9 | limits.cpu: "2"
10 | limits.memory: 2Gi
--------------------------------------------------------------------------------
/00-cluster/01-namespaces/03-kuard-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | run: kuard
6 | name: kuard
7 | internalVersion: 0.0.2
8 | name: kuard
9 | spec:
10 | containers:
11 | - image: gcr.io/kuar-demo/kuard-amd64:1
12 | imagePullPolicy: IfNotPresent
13 | name: kuard
14 | ports:
15 | - containerPort: 8080
16 | name: http
17 | protocol: TCP
18 | resources: {}
19 | dnsPolicy: ClusterFirst
20 | restartPolicy: Never
21 |
--------------------------------------------------------------------------------
/00-cluster/01-namespaces/04-kuard-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | run: kuard
6 | name: kuard
7 | internalVersion: 0.0.1
8 | name: kuard
9 | spec:
10 | containers:
11 | - image: gcr.io/kuar-demo/kuard-amd64:1
12 | imagePullPolicy: IfNotPresent
13 | name: kuard
14 | resources:
15 | requests:
16 | memory: "512Mi"
17 | cpu: "1"
18 | limits:
19 | memory: "512Mi"
20 | cpu: "1"
21 | ports:
22 | - containerPort: 8080
23 | name: http
24 | protocol: TCP
25 | dnsPolicy: ClusterFirst
26 | restartPolicy: OnFailure
27 |
--------------------------------------------------------------------------------
/00-cluster/01-namespaces/05-nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: nginx
7 | name: nginx
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: nginx
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: nginx
19 | spec:
20 | containers:
21 | - image: nginx:latest
22 | name: nginx
23 | resources:
24 | requests:
25 | memory: "512Mi"
26 | cpu: "1"
27 | limits:
28 | memory: "512Mi"
29 | cpu: "1"
30 |
--------------------------------------------------------------------------------
/00-cluster/01-namespaces/06-limit-range.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "v1"
2 | kind: "LimitRange"
3 | metadata:
4 | name: "core-resource-limits"
5 | spec:
6 | limits:
7 | - type: "Pod"
8 | max:
9 | cpu: "2"
10 | memory: "1Gi"
11 | min:
12 | cpu: "200m"
13 | memory: "6Mi"
14 | - type: "Container"
15 | max:
16 | cpu: "2"
17 | memory: "1Gi"
18 | min:
19 | cpu: "100m"
20 | memory: "4Mi"
21 | default:
22 | cpu: "300m"
23 | memory: "200Mi"
24 | defaultRequest:
25 | cpu: "200m"
26 | memory: "100Mi"
27 | maxLimitRequestRatio:
28 | cpu: "10"
29 |
--------------------------------------------------------------------------------
/00-cluster/01-namespaces/Readme.md:
--------------------------------------------------------------------------------
1 | #### Todo Komutlari yaz
--------------------------------------------------------------------------------
/00-cluster/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | kubectl cluster-info
3 | kubectl component-statuses
4 | kubectl api-resources
5 | kubectl api-versions
6 | kubectl get nodes
7 | kubectl get nodes -o wide
8 | ```
--------------------------------------------------------------------------------
/01-pods/02-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | run: kuard
6 | name: kuard
7 | internalVersion: 0.0.2
8 | name: kuard
9 | spec:
10 | containers:
11 | - image: gcr.io/kuar-demo/kuard-amd64:1
12 | imagePullPolicy: IfNotPresent
13 | name: kuard
14 | ports:
15 | - containerPort: 8080
16 | name: http
17 | protocol: TCP
18 | resources: {}
19 | dnsPolicy: ClusterFirst
20 | restartPolicy: Never
21 |
--------------------------------------------------------------------------------
/01-pods/03-pods-health.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | run: kuard
6 | name: kuard
7 | internalVersion: 0.0.1
8 | name: kuard
9 | spec:
10 | containers:
11 | - image: gcr.io/kuar-demo/kuard-amd64:1
12 | imagePullPolicy: IfNotPresent
13 | name: kuard
14 | livenessProbe:
15 | httpGet:
16 | path: /healthy
17 | port: 8080
18 | initialDelaySeconds: 5
19 | timeoutSeconds: 1
20 | periodSeconds: 10
21 | failureThreshold: 3
22 | readinessProbe:
23 | httpGet:
24 | path: /ready
25 | port: 8080
26 | timeoutSeconds: 1
27 | periodSeconds: 10
28 | initialDelaySeconds: 30
29 | ports:
30 | - containerPort: 8080
31 | name: http
32 | protocol: TCP
33 | resources: {}
34 | dnsPolicy: ClusterFirst
35 | restartPolicy: OnFailure
36 |
--------------------------------------------------------------------------------
/01-pods/04-pods-resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | run: kuard
6 | name: kuard
7 | internalVersion: 0.0.1
8 | name: kuard
9 | spec:
10 | containers:
11 | - image: gcr.io/kuar-demo/kuard-amd64:1
12 | imagePullPolicy: IfNotPresent
13 | name: kuard
14 | resources:
15 | requests:
16 | memory: "1024Mi"
17 | cpu: "500m"
18 | limits:
19 | memory: "1024Mi"
20 | cpu: "500m"
21 | ports:
22 | - containerPort: 8080
23 | name: http
24 | protocol: TCP
25 | dnsPolicy: ClusterFirst
26 | restartPolicy: OnFailure
27 |
--------------------------------------------------------------------------------
/01-pods/05-pod-volumes.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | run: kuard
6 | name: kuard
7 | internalVersion: 0.0.1
8 | name: kuard
9 | spec:
10 | volumes:
11 | - name: "kuard-data"
12 | hostPath:
13 | path: /etc
14 | containers:
15 | - image: gcr.io/kuar-demo/kuard-amd64:1
16 | imagePullPolicy: IfNotPresent
17 | name: kuard
18 | resources:
19 | requests:
20 | cpu: "500m"
21 | memory: "128Mi"
22 | limits:
23 | cpu: "1000m"
24 | memory: "1024Mi"
25 | volumeMounts:
26 | - mountPath: "/data"
27 | name: "kuard-data"
28 | ports:
29 | - containerPort: 8080
30 | name: http
31 | protocol: TCP
32 | resources: {}
33 | dnsPolicy: ClusterFirst
34 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/01-pods/06-nodeantiaffinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: redis-cache
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: store
9 | replicas: 7
10 | template:
11 | metadata:
12 | labels:
13 | app: store
14 | spec:
15 | affinity:
16 | podAntiAffinity:
17 | requiredDuringSchedulingIgnoredDuringExecution:
18 | - labelSelector:
19 | matchExpressions:
20 | - key: app
21 | operator: In
22 | values:
23 | - store
24 | topologyKey: "kubernetes.io/hostname"
25 | containers:
26 | - name: redis-server
27 | image: redis:3.2-alpine
28 |
--------------------------------------------------------------------------------
/01-pods/07-nodeselector.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | labels:
6 | env: test
7 | spec:
8 | containers:
9 | - name: nginx
10 | image: nginx
11 | imagePullPolicy: IfNotPresent
12 | nodeSelector:
13 | cpu: kotu
--------------------------------------------------------------------------------
/01-pods/08-pod-gitvolume.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: gitrepo-volume-pod
5 | spec:
6 | containers:
7 | - image: nginx:alpine
8 | name: web-server
9 | volumeMounts:
10 | - name: html
11 | mountPath: /usr/share/nginx/html
12 | readOnly: true
13 | ports:
14 | - containerPort: 80
15 | protocol: TCP
16 | volumes:
17 | - name: html
18 | gitRepo:
19 | repository: https://github.com/luksa/kubia-website-example.git
20 | revision: master
21 | directory: .
--------------------------------------------------------------------------------
/01-pods/09-pod-init-containers.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: myapp-pod
5 | labels:
6 | app: myapp
7 | spec:
8 | containers:
9 | - name: myapp-container
10 | image: busybox:1.28
11 | command: ['sh', '-c', 'echo The app is running! && sleep 3600']
12 | initContainers:
13 | - name: init-myservice
14 | image: busybox:1.28
15 | command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
16 | - name: init-mydb
17 | image: busybox:1.28
18 | command: ['sh', '-c', 'until nslookup mydb; do echo waiting for mydb; sleep 2; done;']
--------------------------------------------------------------------------------
/01-pods/10-initcontainer-services.yaml:
--------------------------------------------------------------------------------
1 |
2 | ---
3 | apiVersion: v1
4 | kind: Service
5 | metadata:
6 | name: myservice
7 | spec:
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 9376
12 | ---
13 | apiVersion: v1
14 | kind: Service
15 | metadata:
16 | name: mydb
17 | spec:
18 | ports:
19 | - protocol: TCP
20 | port: 80
21 | targetPort: 9377
--------------------------------------------------------------------------------
/01-pods/11-pod-presets.yaml:
--------------------------------------------------------------------------------
1 | kind: PodPreset
2 | apiVersion: settings.k8s.io/v1alpha1
3 | metadata:
4 | name: preset-enabled
5 | spec:
6 | selector:
7 | matchLabels:
8 | role: example
9 | env:
10 | - name: IS_PRESET_ENABLED
11 | value: "yes"
12 |
--------------------------------------------------------------------------------
/01-pods/12-poststart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: client
5 | spec:
6 | containers:
7 | - image: nginx
8 | name: client
9 | lifecycle:
10 | postStart:
11 | exec:
12 | command:
13 | - sh
14 | - -c
15 | - sleep 10 && exit 1
16 |
--------------------------------------------------------------------------------
/01-pods/13-prestop-graceperiod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: lifecycle-demo
5 | spec:
6 | containers:
7 | - name: lifecycle-demo-container
8 | image: nginx
9 | lifecycle:
10 | postStart:
11 | exec:
12 | command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
13 | preStop:
14 | exec:
15 | command: ["/bin/sh","-c","nginx -s quit; while killall -0 nginx; do sleep 1; done"]
16 | terminationGracePeriodSeconds: 60
17 |
18 |
--------------------------------------------------------------------------------
/01-pods/14-qos-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: qos-demo
5 | namespace: qos-example
6 | spec:
7 | containers:
8 | - name: qos-demo-ctr
9 | image: nginx
10 | resources:
11 | limits:
12 | memory: "200Mi"
13 | cpu: "700m"
14 | requests:
15 | memory: "200Mi"
16 | cpu: "700m"
--------------------------------------------------------------------------------
/01-pods/15-qos-pod-2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: qos-demo-2
5 | namespace: qos-example
6 | spec:
7 | containers:
8 | - name: qos-demo-2-ctr
9 | image: nginx
10 | resources:
11 | limits:
12 | memory: "200Mi"
13 | requests:
14 | memory: "100Mi"
--------------------------------------------------------------------------------
/01-pods/16-qos-pod-3.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: qos-demo-3
5 | namespace: qos-example
6 | spec:
7 | containers:
8 | - name: qos-demo-3-ctr
9 | image: nginx
--------------------------------------------------------------------------------
/01-pods/17-qos-pod-4.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: qos-demo-4
5 | namespace: qos-example
6 | spec:
7 | containers:
8 |
9 | - name: qos-demo-4-ctr-1
10 | image: nginx
11 | resources:
12 | requests:
13 | memory: "200Mi"
14 |
15 | - name: qos-demo-4-ctr-2
16 | image: redis
--------------------------------------------------------------------------------
/01-pods/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | alias k=kubectl
3 | kubectl apply -f 02-pod.yaml
4 | kubectl delete -f 02-pod.yaml
5 |
6 | kubectl port-forward kuard 8080:8080
7 |
8 | kubectl apply -f 04-pods-resources.yaml
9 | kubectl delete -f 04-pods-resources.yaml
10 |
11 | kubectl apply -f 07-nodeselector.yaml
12 | kubectl describe pod nginx
13 | kubectl label node xxx cpu=kotu
14 | kubectl get pods -w
15 | kubectl delete -f 07-nodeselector.yaml
16 | ```
17 |
18 | ```bash
19 | kubectl run nginx --image=nginx --restart=Never --command -it -- env
20 | kubectl run nginx --image=nginx --restart=Never --command -it -- env > command-pod.yaml
21 | kubectl apply -f command-pod.yaml
22 | kubectl run nginx --image=nginx --restart=Never --command --dry-run=true -o yaml
23 | k run nginx --image=nginx --port=80 --dry-run=true -o yaml
24 | kubectl set image pod/nginx nginx=nginx:1.7.9
25 | kubectl get pods -o=jsonpath='{.items[*].spec.containers[*].image}{"\n"}'
26 | ```
27 |
28 |
29 |
30 |
31 |
32 | #### Todo
33 |
34 | - Aşağıdaki konularda çalışabilmek için öncelikle kendi reponuza bu repoyu fork edip daha sonra pull request ile merge isteği yollayabilirsiniz. Aşağıdaki linkten inceleyebilirsiniz
35 | - https://www.youtube.com/watch?v=G1I3HF4YWEw
36 | - Quality of Service for Pods
37 | - Aşağıdaki özelliğin tanıtılması için qosClass.md içine bir yazı eklenilmesi.
38 | - qosClass özelliğinin tanıtılması.
39 | - https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/
40 | - Projected Volume
41 | - Aşağıdaki özelliğin tanıtılması için projectedvolume.md içine bir yazı eklenilmesi.
42 | - https://kubernetes.io/docs/tasks/configure-pod-container/configure-projected-volume-storage/
43 | - ImagePull Policy
44 | - Aşağıdaki özelliğin tanıtılması için images.md içine bir yazı eklenilmesi.
45 | - ImagePullPolicy özelliğinin tanıtılması
46 | - https://kubernetes.io/docs/concepts/containers/images/
47 |
48 | - kompose
49 | - Aşağıdaki özelliğin tanıtılması için kompose.md içine bir yazı eklenilmesi.
50 | - Docker-compose dosyalarının otomatik kubernetes yamllarına dönüştürülmesi
51 | - https://kompose.io/
52 | - terminationGracePeriodSeconds
53 | - Aşağıdaki özelliğin tanıtılması için signals.md içine bir yazı eklenilmesi.
54 | - terminationGracePeriodSeconds özelliği nedir neden kullanılır. Containerlara yollanan SIGTERM ve SIGKILL sinyallari ile ilgisi nedir?
55 | - https://pracucci.com/graceful-shutdown-of-kubernetes-pods.html
56 | - https://tasdikrahman.me/2019/04/24/handling-singals-for-applications-in-kubernetes-docker/
57 | - termination yönetimi
58 | - Aşağıdaki iki özelliğin tanıtılması için termination.md içine bir yazı eklenilmesi.
59 | - terminationMessagePath: /dev/termination-log
60 | - terminationMessagePolicy: File
61 |
62 |
--------------------------------------------------------------------------------
/01-pods/qosClass.md:
--------------------------------------------------------------------------------
1 |
Quality of Service for Pods
2 | Kubernetes, Pod'ları uygun şartlar altında schedule etmek veya silmek için Quality of Service (QoS) sınıflarını kullanır.
3 | Kubernetes yeni bir Pod objesi yarattıktan sonra aşağıdaki QoS sınıflarından bir tanesini ilgili Pod'a atar.
4 |
5 | • Guaranteed
6 | • Burstable
7 | • BestEffort
8 |
9 | Bu çalışmamızda yapacağımız örnekleri uygulayacağımız yeni bir namespace oluşturalım.
10 |
11 | Ø kubectl create namespace qos-example
12 |
13 | Guaranteed
14 | Guaranteed tipinde QoS sınıfı atanmış bir pod oluşturalım.
15 |
16 | Bu poddaki her bir container için eşit memory limit ve memory request değerleri belirlenmelidir.
17 | Aynı zamanda, CPU limit ve CPU request değerlerinin eşit olarak belirlenmesi gerekmektedir.
18 |
19 | Örnek bir Pod Definiton yaml dosyası için dosyalardaki 14-qos-pod.yaml adlı dosyayı inceleyebiliriz.
20 | Bu dosyada tek bir container tanımlaması yapılmış Memory için 200Mi, CPU için ise 700m değerleri belirlenmiştir. Guaranteed tipinde bir QoS sınıfı ataması istediğimiz için request ve limit değerleri eşit olmalıdır.
21 |
22 | Bu definition yaml dosyasını kullanarak pod'u oluşturalım.
23 |
24 | Ø kubectl apply -f 14-qos-pod.yaml --namespace qos-example
25 |
26 | Pod oluştuktan sonra -o yaml parametresi ile birlikte pod'un qosClass değerini kontrol edelim.
27 |
28 | Ø kubectl get pods qos-demo -o yaml --namespace qos-example
29 |
30 | spec:
31 | containers:
32 | ...
33 | resources:
34 | limits:
35 | cpu: 700m
36 | memory: 200Mi
37 | requests:
38 | cpu: 700m
39 | memory: 200Mi
40 | ...
41 | qosClass: Guaranteed
42 |
43 | Görüleceği gibi pod'umuz Guaranteed sınıfında oluşmuş durumdadır.
44 |
45 | Şimdi bu podu silebiliriz.
46 |
47 | Ø kubectl delete pod qos-demo --namespace qos-example
48 |
49 | Burstable
50 | Burstable tipinde QoS sınıfı atanmış bir pod oluşturalım.
51 |
52 | Eğer Pod Guaranteed sınıfı kriterlerini sağlamıyorsa ve Pod'un içindeki en az bir container'ın CPU veya memory request değeri mevcut ise Burstable tipinde bir pod oluşturulur.
53 |
54 | Burstable tipinde QoS sınıfına sahip bir Pod oluşturmak için örnek bir pod definition yaml dosyası oluşturalım: 15-qos-pod-2.yaml
55 |
56 | Bu dosyayı kullanarak Pod'u schedule edelim ve yeniden output'u inceleyelim
57 |
58 | Ø kubectl apply -f 15-qos-pod-2.yaml --namespace qos-example
59 | Ø kubectl get pods qos-demo-2 -o yaml --namespace qos-example
60 |
61 | spec:
62 | containers:
63 | - image: nginx
64 | imagePullPolicy: Always
65 | name: qos-demo-2-ctr
66 | resources:
67 | limits:
68 | memory: 200Mi
69 | requests:
70 | memory: 100Mi
71 | ...
72 | qosClass: Burstable
73 |
74 | Burada gördüğümüz gibi pod'umuz Burstable tipinde bir QoS sınıfına sahiptir.
75 |
76 | Şimdi bu podu silebiliriz.
77 |
78 | Ø kubectl delete pod qos-demo-2 --namespace qos-example
79 |
80 | BestEffort
81 | BestEffort tipinde QoS sınıfı atanmış bir pod oluşturalım.
82 |
83 | Pod'un içinde yer alan containerların herhangi bir CPU veya Memory request, limitlerinin tanımlı olmasına gerek olmadığı durumlarda Best effort tipinde QoS sınıfına sahip Podlar oluşur.
84 |
85 | BestEffort tipinde QoS sınıfına sahip bir Pod oluşturmak için örnek bir pod definition yaml dosyası oluşturalım: 16-qos-pod-3.yaml
86 |
87 | Bu dosyayı kullanarak Pod'u schedule edelim ve yeniden output'u inceleyelim
88 |
89 | Ø kubectl apply -f 16-qos-pod-3.yaml --namespace qos-example
90 | Ø kubectl get pods qos-demo-3 -o yaml --namespace qos-example
91 |
92 | spec:
93 | containers:
94 | ...
95 | resources: {}
96 | ...
97 | qosClass: BestEffort
98 |
99 | Görüldüğü gibi pod'umuz BestEffort tipinde bir QoS sınıfına sahiptir.
100 |
101 | Şimdi bu podu silebiliriz.
102 |
103 | Ø kubectl delete pod qos-demo-3 --namespace qos-example
104 |
105 | Example
106 | Şimdi ise bir Pod tanımı yapalım. Bu pod tanımında iki farklı container bulunduralım ve nginx image'nın çalışacağı container için memory request tanımı yapalım. Fakat redis image'nın çalışacağı container için herhangi bir tanımda bulunmuyoruz. Dosyamızın adı: 17-qos-pod-4.yaml
107 |
108 | Bu dosyayı kullanarak Pod'u schedule edelim ve yeniden output'u inceleyelim
109 |
110 | Ø kubectl apply -f 17-qos-pod-4.yaml --namespace qos-example
111 | Ø kubectl get pods qos-demo-4 -o yaml --namespace qos-example
112 |
113 | Bu Pod, Guaranteed sınıfının gerekliliklerini karşılamıyor fakat Burstable sınıfının gerekliliğini bir container'da Memory request tanımı olduğu için sağlıyor.
114 |
115 | spec:
116 | containers:
117 | ...
118 | name: qos-demo-4-ctr-1
119 | resources:
120 | requests:
121 | memory: 200Mi
122 | ...
123 | name: qos-demo-4-ctr-2
124 | resources: {}
125 | ...
126 | qosClass: Burstable
127 |
128 | Output'ta da göreceğimiz gibi Pod'un QoS sınıfı Burstable olarak ayarlanmış durumdadır.
129 |
130 |
131 |
--------------------------------------------------------------------------------
/02-deployments/01-strategy/01-basic-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: nginx
7 | name: nginx
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: nginx
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: nginx
19 | spec:
20 | containers:
21 | - image: nginx
22 | name: nginx
23 | resources: {}
24 | status: {}
25 |
--------------------------------------------------------------------------------
/02-deployments/01-strategy/01-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: frontend
8 | namespace: default
9 | spec:
10 | progressDeadlineSeconds: 600
11 | replicas: 10
12 | revisionHistoryLimit: 10
13 | selector:
14 | matchExpressions:
15 | - key: tier
16 | operator: In
17 | values:
18 | - frontend
19 | strategy:
20 | rollingUpdate:
21 | maxSurge: 100%
22 | maxUnavailable: 25%
23 | type: RollingUpdate
24 | template:
25 | metadata:
26 | labels:
27 | app: guestbook
28 | tier: frontend
29 | spec:
30 | containers:
31 | - env:
32 | - name: GET_HOSTS_FROM
33 | value: dns
34 | image: gcr.io/google_samples/gb-frontend:v2
35 | imagePullPolicy: IfNotPresent
36 | name: php-redis
37 | ports:
38 | - containerPort: 80
39 | protocol: TCP
40 | resources:
41 | requests:
42 | cpu: 100m
43 | memory: 100Mi
44 | terminationMessagePath: /dev/termination-log
45 | terminationMessagePolicy: File
46 | dnsPolicy: ClusterFirst
47 | restartPolicy: Always
48 | schedulerName: default-scheduler
49 | securityContext: {}
50 | terminationGracePeriodSeconds: 30
--------------------------------------------------------------------------------
/02-deployments/01-strategy/02-deployment-recreate.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: frontend
8 | namespace: default
9 | spec:
10 | progressDeadlineSeconds: 600
11 | replicas: 10
12 | revisionHistoryLimit: 10
13 | selector:
14 | matchExpressions:
15 | - key: tier
16 | operator: In
17 | values:
18 | - frontend
19 | strategy:
20 | type: Recreate
21 | template:
22 | metadata:
23 | labels:
24 | app: guestbook
25 | tier: frontend
26 | spec:
27 | containers:
28 | - env:
29 | - name: GET_HOSTS_FROM
30 | value: dns
31 | image: gcr.io/google_samples/gb-frontend:v2
32 | imagePullPolicy: IfNotPresent
33 | name: php-redis
34 | ports:
35 | - containerPort: 80
36 | protocol: TCP
37 | resources:
38 | requests:
39 | cpu: 100m
40 | memory: 100Mi
41 | terminationMessagePath: /dev/termination-log
42 | terminationMessagePolicy: File
43 | dnsPolicy: ClusterFirst
44 | restartPolicy: Always
45 | schedulerName: default-scheduler
46 | securityContext: {}
47 | terminationGracePeriodSeconds: 30
--------------------------------------------------------------------------------
/02-deployments/02-rollout/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | kubectl apply -f nginx-deployment.yaml
3 |
4 | #watch the status of deployment
5 | kubectl rollout status deployment.v1.apps/nginx-deployment
6 | ```
7 | ### Deployment History
8 | ```bash
9 | kubectl set image deployment/nginx-deployment nginx=nginx:1.9.1 --record
10 | #or
11 | kubectl edit deployment nginx-deployment
12 |
13 | kubectl rollout status deployment.v1.apps/nginx-deployment
14 | kubectl get deployments
15 | kubectl get rs
16 | kubectl describe deployment nginx-deployment
17 | ```
18 |
19 | ### Rolling Back a Deployment
20 | ```bash
21 | kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.91 --record=true
22 | kubectl rollout status deployment.v1.apps/nginx-deployment
23 | kubectl get rs
24 | kubectl get pods
25 | kubectl describe deployment nginx-deployment
26 | # details of the revision
27 | kubectl rollout history deployment.v1.apps/nginx-deployment
28 | kubectl rollout history deployment.v1.apps/nginx-deployment --revision=2
29 | # rollout undo deployment
30 | kubectl rollout undo deployment.v1.apps/nginx-deployment
31 | kubectl rollout undo deployment.v1.apps/nginx-deployment --to-revision=2
32 | ```
33 |
34 | ### Pause Deployment
35 | ```bash
36 | kubectl rollout pause deployment nginx-deployment
37 | kubectl set image deployment nginx-deployment nginx=nginx:1.7.9
38 | kubectl rollout history deployment nginx-deployment
39 | kubectl get rs
40 | kubectl set resources deployment nginx-deployment -c=nginx --limits=memory=256
41 | kubectl rollout resume deployment nginx-deployment
42 | ```
43 |
--------------------------------------------------------------------------------
/02-deployments/02-rollout/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | labels:
6 | app: nginx
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: nginx
12 | template:
13 | metadata:
14 | labels:
15 | app: nginx
16 | spec:
17 | containers:
18 | - name: nginx
19 | image: nginx:1.7.9
20 | ports:
21 | - containerPort: 80
22 | initContainers:
23 | - name: init-myservice
24 | image: busybox:1.28
25 | command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
26 | - name: init-mydb
27 | image: busybox:1.28
28 | command: ['sh', '-c', 'until nslookup mydb; do echo waiting for mydb; sleep 2; done;']
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/02-deployments/03-bluegreen/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | kubectl create ns bluegreen
3 | kubectl apply -f app-v1.yaml
4 | kubectl run --restart=Never --image=raesene/alpine-nettools nettools
5 | kubectl exec -it nettools -- /bin/sh
6 | while true; do curl http://my-app.bluegreen; done
7 |
8 | kubectl apply -f app-v2.yaml
9 | kubectl get pods -n bluegreen
10 | kubectl patch service my-app -n bluegreen -p '{"spec":{"selector":{"version":"v2.0.0"}}}'
11 |
12 | kubectl delete ns bluegreen
13 | kubectl delete pod nettools
14 |
15 | ```
--------------------------------------------------------------------------------
/02-deployments/03-bluegreen/app-v1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-app
5 | namespace: bluegreen
6 | labels:
7 | app: my-app
8 | spec:
9 | type: ClusterIP
10 | ports:
11 | - name: http
12 | port: 80
13 | targetPort: http
14 |
15 | # Note here that we match both the app and the version
16 | selector:
17 | app: my-app
18 | version: v1.0.0
19 | ---
20 | apiVersion: apps/v1
21 | kind: Deployment
22 | metadata:
23 | name: my-app-v1
24 | namespace: bluegreen
25 | labels:
26 | app: my-app
27 | spec:
28 | replicas: 3
29 | selector:
30 | matchLabels:
31 | app: my-app
32 | version: v1.0.0
33 | template:
34 | metadata:
35 | labels:
36 | app: my-app
37 | version: v1.0.0
38 | annotations:
39 | prometheus.io/scrape: "true"
40 | prometheus.io/port: "9101"
41 | spec:
42 | containers:
43 | - name: my-app
44 | image: containersol/k8s-deployment-strategies
45 | ports:
46 | - name: http
47 | containerPort: 8080
48 | - name: probe
49 | containerPort: 8086
50 | env:
51 | - name: VERSION
52 | value: v1.0.0
53 | livenessProbe:
54 | httpGet:
55 | path: /live
56 | port: probe
57 | initialDelaySeconds: 5
58 | periodSeconds: 5
59 | readinessProbe:
60 | httpGet:
61 | path: /ready
62 | port: probe
63 | periodSeconds: 5
--------------------------------------------------------------------------------
/02-deployments/03-bluegreen/app-v2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-app-v2
5 | namespace: bluegreen
6 | labels:
7 | app: my-app
8 | spec:
9 | replicas: 3
10 | selector:
11 | matchLabels:
12 | app: my-app
13 | version: v2.0.0
14 | template:
15 | metadata:
16 | labels:
17 | app: my-app
18 | version: v2.0.0
19 | annotations:
20 | prometheus.io/scrape: "true"
21 | prometheus.io/port: "9101"
22 | spec:
23 | containers:
24 | - name: my-app
25 | image: containersol/k8s-deployment-strategies
26 | ports:
27 | - name: http
28 | containerPort: 8080
29 | - name: probe
30 | containerPort: 8086
31 | env:
32 | - name: VERSION
33 | value: v2.0.0
34 | livenessProbe:
35 | httpGet:
36 | path: /live
37 | port: probe
38 | initialDelaySeconds: 5
39 | periodSeconds: 5
40 | readinessProbe:
41 | httpGet:
42 | path: /ready
43 | port: probe
44 | periodSeconds: 5
--------------------------------------------------------------------------------
/02-deployments/04-canary/01-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: http
11 | port: 80
12 | targetPort: http
13 |
14 | # Note here that we match both the app and the version
15 | selector:
16 | app: my-app
17 | ---
18 | apiVersion: apps/v1
19 | kind: Deployment
20 | metadata:
21 | name: my-app-v1
22 | labels:
23 | app: my-app
24 | version: 0.0.1
25 | spec:
26 | replicas: 3
27 | selector:
28 | matchLabels:
29 | app: my-app
30 | version: 0.0.1
31 | template:
32 | metadata:
33 | labels:
34 | app: my-app
35 | version: 0.0.1
36 | annotations:
37 | prometheus.io/scrape: "true"
38 | prometheus.io/port: "9101"
39 | spec:
40 | containers:
41 | - name: my-app
42 | image: containersol/k8s-deployment-strategies
43 | ports:
44 | - name: http
45 | containerPort: 8080
46 | - name: probe
47 | containerPort: 8086
48 | env:
49 | - name: VERSION
50 | value: v1.0.0
51 | livenessProbe:
52 | httpGet:
53 | path: /live
54 | port: probe
55 | initialDelaySeconds: 5
56 | periodSeconds: 5
57 | readinessProbe:
58 | httpGet:
59 | path: /ready
60 | port: probe
61 | periodSeconds: 5
--------------------------------------------------------------------------------
/02-deployments/04-canary/02-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-app-v2
5 | labels:
6 | app: my-app
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: my-app
12 | version: 0.0.2
13 | template:
14 | metadata:
15 | labels:
16 | app: my-app
17 | version: 0.0.2
18 | annotations:
19 | prometheus.io/scrape: "true"
20 | prometheus.io/port: "9101"
21 | spec:
22 | containers:
23 | - name: my-app
24 | image: containersol/k8s-deployment-strategies
25 | ports:
26 | - name: http
27 | containerPort: 8080
28 | - name: probe
29 | containerPort: 8086
30 | env:
31 | - name: VERSION
32 | value: v2.0.0
33 | livenessProbe:
34 | httpGet:
35 | path: /live
36 | port: probe
37 | initialDelaySeconds: 5
38 | periodSeconds: 5
39 | readinessProbe:
40 | httpGet:
41 | path: /ready
42 | port: probe
43 | periodSeconds: 5
--------------------------------------------------------------------------------
/02-deployments/04-canary/Readme.md:
--------------------------------------------------------------------------------
1 | #### Spinnaker v1 Legacy Style Canary Deployment
2 |
3 | ```bash
4 | kubectl apply -f 01-deployment.yaml
5 | kubectl exec -it nettools -- /bin/sh
6 | while true; do curl --connect-timeout 1 -m 1 http://my-app; done
7 | kubectl apply -f 02-deployment.yaml
8 | ```
9 |
10 | #### Cleanup
11 | ```bash
12 | kubectl delete -f 01-deployment.yaml
13 | kubectl delete -f 02-deployment.yaml
14 | ```
--------------------------------------------------------------------------------
/03-labels-annotations/01-nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: frontend
8 | spec:
9 | progressDeadlineSeconds: 600
10 | replicas: 10
11 | revisionHistoryLimit: 10
12 | selector:
13 | matchExpressions:
14 | - key: tier
15 | operator: In
16 | values:
17 | - frontend
18 | strategy:
19 | rollingUpdate:
20 | maxSurge: 100%
21 | maxUnavailable: 25%
22 | type: RollingUpdate
23 | template:
24 | metadata:
25 | labels:
26 | app: guestbook
27 | tier: frontend
28 | spec:
29 | containers:
30 | - env:
31 | - name: GET_HOSTS_FROM
32 | value: dns
33 | image: gcr.io/google_samples/gb-frontend:v2
34 | imagePullPolicy: IfNotPresent
35 | name: php-redis
36 | ports:
37 | - containerPort: 80
38 | protocol: TCP
39 | resources:
40 | requests:
41 | cpu: 100m
42 | memory: 100Mi
43 | terminationMessagePath: /dev/termination-log
44 | terminationMessagePolicy: File
45 | dnsPolicy: ClusterFirst
46 | restartPolicy: Always
47 | schedulerName: default-scheduler
48 | securityContext: {}
49 | terminationGracePeriodSeconds: 30
--------------------------------------------------------------------------------
/03-labels-annotations/02-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: kuard
8 | spec:
9 | containers:
10 | - image: gcr.io/kuar-demo/kuard-amd64:1
11 | imagePullPolicy: IfNotPresent
12 | name: kuard
13 | ports:
14 | - containerPort: 8080
15 | name: http
16 | protocol: TCP
17 | dnsPolicy: ClusterFirst
18 | restartPolicy: Never
19 |
--------------------------------------------------------------------------------
/03-labels-annotations/03-frontend-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: frontend
8 | spec:
9 | ports:
10 | - port: 80
11 | protocol: TCP
12 | targetPort: 80
13 | selector:
14 | app: guestbook
15 | tier: frontend
16 | sessionAffinity: None
17 | type: NodePort
18 |
--------------------------------------------------------------------------------
/04-services/01-nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: frontend
8 | spec:
9 | progressDeadlineSeconds: 600
10 | replicas: 10
11 | revisionHistoryLimit: 10
12 | selector:
13 | matchExpressions:
14 | - key: tier
15 | operator: In
16 | values:
17 | - frontend
18 | strategy:
19 | rollingUpdate:
20 | maxSurge: 100%
21 | maxUnavailable: 25%
22 | type: RollingUpdate
23 | template:
24 | metadata:
25 | labels:
26 | app: guestbook
27 | tier: frontend
28 | spec:
29 | containers:
30 | - env:
31 | - name: GET_HOSTS_FROM
32 | value: dns
33 | image: gcr.io/google_samples/gb-frontend:v2
34 | imagePullPolicy: IfNotPresent
35 | name: php-redis
36 | ports:
37 | - containerPort: 80
38 | protocol: TCP
39 | resources:
40 | requests:
41 | cpu: 100m
42 | memory: 100Mi
43 | terminationMessagePath: /dev/termination-log
44 | terminationMessagePolicy: File
45 | dnsPolicy: ClusterFirst
46 | restartPolicy: Always
47 | schedulerName: default-scheduler
48 | securityContext: {}
49 | terminationGracePeriodSeconds: 30
--------------------------------------------------------------------------------
/04-services/02-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: kuard
8 | spec:
9 | containers:
10 | - image: gcr.io/kuar-demo/kuard-amd64:1
11 | imagePullPolicy: IfNotPresent
12 | name: kuard
13 | ports:
14 | - containerPort: 8080
15 | name: http
16 | protocol: TCP
17 | dnsPolicy: ClusterFirst
18 | restartPolicy: Never
19 |
--------------------------------------------------------------------------------
/04-services/03-frontend-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: frontend
8 | spec:
9 | ports:
10 | - port: 80
11 | protocol: TCP
12 | targetPort: 80
13 | selector:
14 | app: guestbook
15 | tier: frontend
16 | sessionAffinity: None
17 | type: ClusterIP
18 |
--------------------------------------------------------------------------------
/04-services/04-frontend-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: frontend
8 | spec:
9 | ports:
10 | - port: 80
11 | protocol: TCP
12 | targetPort: 80
13 | selector:
14 | app: guestbook
15 | tier: frontend
16 | sessionAffinity: None
17 | type: NodePort
18 |
19 |
--------------------------------------------------------------------------------
/04-services/05-frontend-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: guestbook
6 | tier: frontend
7 | name: frontend
8 | spec:
9 | ports:
10 | - port: 80
11 | protocol: TCP
12 | targetPort: 80
13 | selector:
14 | app: guestbook
15 | tier: frontend
16 | sessionAffinity: None
17 | type: LoadBalancer
18 |
--------------------------------------------------------------------------------
/05-configmapsandsecrets/01-configmaps/01-configmap-sample.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: simpleconfig
5 | data:
6 | foo: bar
7 | hello: world
8 | ---
9 | apiVersion: v1
10 | kind: Pod
11 | metadata:
12 | name: pod2
13 | spec:
14 | containers:
15 | - name: nginx
16 | image: nginx
17 | env:
18 | - name: FOO_ENV_VAR
19 | valueFrom:
20 | configMapKeyRef:
21 | name: simpleconfig
22 | key: foo
23 | - name: HELLO_ENV_VAR
24 | valueFrom:
25 | configMapKeyRef:
26 | name: simpleconfig
27 | key: hello
--------------------------------------------------------------------------------
/05-configmapsandsecrets/01-configmaps/02-configmap-sample.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: nginx
7 | name: nginx
8 | spec:
9 | containers:
10 | - image: nginx
11 | imagePullPolicy: IfNotPresent
12 | name: nginx
13 | env:
14 | - name: option # name of the env variable
15 | valueFrom:
16 | configMapKeyRef:
17 | name: simple-config # name of config map
18 | key: name # name of the entity in config map
19 | dnsPolicy: ClusterFirst
20 | restartPolicy: Never
21 | status: {}
--------------------------------------------------------------------------------
/05-configmapsandsecrets/01-configmaps/03-javaapp-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: javaapp
5 | spec:
6 | containers:
7 | - name: nginx
8 | image: nginx
9 | volumeMounts:
10 | - mountPath: /var/configuration
11 | name: log4j-volume
12 | volumes:
13 | - name: log4j-volume
14 | configMap:
15 | name: log4j-config
--------------------------------------------------------------------------------
/05-configmapsandsecrets/01-configmaps/04-javaapp-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: javaapp
7 | name: javaapp
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: javaapp
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: javaapp
19 | annotations:
20 | configmap.reloader.stakater.com/reload: "log4j-config"
21 | spec:
22 | containers:
23 | - name: nginx
24 | image: nginx
25 | volumeMounts:
26 | - mountPath: /var/configuration
27 | name: log4j-volume
28 |
29 | volumes:
30 | - name: log4j-volume
31 | configMap:
32 | name: log4j-config
--------------------------------------------------------------------------------
/05-configmapsandsecrets/01-configmaps/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | kubectl create configmap simple-config --from-literal=name=msdevengers --from-literal=source=github
3 | kubectl get configmap simple-config -o yaml
4 | kubectl describe configmap simple-config
5 | kubectl delete configmap simple-config
6 | ```
7 |
8 | ```bash
9 | cat config.txt
10 | kubectl create configmap simple-config --from-file=config.txt
11 | kubectl get configmap simple-config -o yaml
12 | kubectl describe configmap simple-config
13 | kubectl delete configmap simple-config
14 | ```
15 | ```bash
16 | kubectl create configmap simple-config --from-env-file=config.txt
17 | kubectl get configmap simple-config -o yaml
18 | kubectl describe configmap simple-config
19 | kubectl delete configmap simple-config
20 | ```
21 |
22 | ```bash
23 | kubectl create configmap simple-config --from-file=config=config.txt
24 | kubectl get cm -o yaml simple-config
25 | kubectl delete configmap simple-config
26 | ```
27 | ```bash
28 | kubectl create cm simple-config --from-literal=name=msdevengers
29 | kubectl apply -f 02-configmap-sample.yaml
30 | ```
31 |
32 | ```bash
33 | kubectl delete configmap simple-config
34 | kubectl delete pod nginx
35 | ```
36 |
37 |
38 | ```bash
39 | kubectl create configmap log4j-config --from-file=log4j.xml
40 | kubectl apply -f 03-javaapp-pod.yaml
41 | kubectl exec -it javaapp -- /bin/sh ls -lart /var/configuration
42 | ```
43 |
44 | ```yaml
45 | apiVersion: v1
46 | kind: Pod
47 | metadata:
48 | name: javaapp
49 | spec:
50 | containers:
51 | - name: nginx
52 | image: nginx
53 | volumeMounts:
54 | - mountPath: /var/configuration
55 | name: log4j-volume
56 | volumes:
57 | - name: log4j-volume
58 | configMap:
59 | name: log4j-config
60 | ```
61 |
62 | ```bash
63 | kubectl delete -f 03-javaapp-pod.yaml
64 | kubectl delete configmap log4j-config
65 | ```
66 |
67 | ```bash
68 | ## BINGO
69 | kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
70 | kubectl create configmap log4j-config --from-file=log4j.xml
71 | kubectl apply -f 04-javaapp-pod.yaml
72 | ```
73 | ```yaml
74 | apiVersion: apps/v1
75 | kind: Deployment
76 | metadata:
77 | creationTimestamp: null
78 | labels:
79 | run: javaapp
80 | name: javaapp
81 | annotations:
82 | configmap.reloader.stakater.com/reload: "log4j-config"
83 | ```
84 | ```bash
85 | kubectl edit configmap log4j-congig
86 | ```
87 |
88 | ```xml
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 | ```
97 |
98 | ```bash
99 | watch -n5 kubectl get pods
100 | kubectl delete configmap log4j-config
101 | kubectl delete deployment javaapp
102 | kubectl delete -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
103 | ```
104 |
105 | References
106 | - https://github.com/stakater/Reloader
107 | - https://azure.microsoft.com/en-us/resources/kubernetes-up-and-running/
108 | - https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/
--------------------------------------------------------------------------------
/05-configmapsandsecrets/01-configmaps/config.txt:
--------------------------------------------------------------------------------
1 | name=msdevengers
2 | source=github
--------------------------------------------------------------------------------
/05-configmapsandsecrets/01-configmaps/log4j.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/05-configmapsandsecrets/02-secrets/01-secret-sample.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod2
5 | spec:
6 | containers:
7 | - name: nginx
8 | image: nginx
9 | env:
10 | - name: FOO_USER_NAME
11 | valueFrom:
12 | secretKeyRef:
13 | key: username
14 | name: dev-db-secret
15 |
16 | - name: FOO_PASS
17 | valueFrom:
18 | secretKeyRef:
19 | key: password
20 | name: dev-db-secret
--------------------------------------------------------------------------------
/05-configmapsandsecrets/02-secrets/02-secret-sample.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod2
5 | spec:
6 | containers:
7 | - name: nginx
8 | image: nginx
9 | volumeMounts:
10 | - name: top-secret
11 | mountPath: /var/configuration/
12 | volumes:
13 | - name: top-secret
14 | secret:
15 | secretName: user-pass
--------------------------------------------------------------------------------
/05-configmapsandsecrets/02-secrets/03-secret-sample.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: private-reg
5 | spec:
6 | containers:
7 | - name: private-reg-container
8 | image: nginx
9 | imagePullSecrets:
10 | - name: repository-secret
--------------------------------------------------------------------------------
/05-configmapsandsecrets/02-secrets/04-secret-sample.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | spec:
6 | replicas: 1
7 | template:
8 | metadata:
9 | labels:
10 | app: nginx
11 | spec:
12 | containers:
13 | - image: nginx
14 | name: nginx
15 |
16 | volumeMounts:
17 | - name: tls
18 | mountPath: /usr/src/nginx/tls
19 |
20 | volumes:
21 | - name: tls
22 | secret:
23 | secretName: jenkins-tls-secret
--------------------------------------------------------------------------------
/05-configmapsandsecrets/02-secrets/Readme.md:
--------------------------------------------------------------------------------
1 | #### Generic Secrets
2 | Not for Production patterns. Instead of using secrets try to use Vault Systems
3 |
4 | ```bash
5 | kubectl create secret generic dev-db-secret --from-literal=username=devuser --from-literal=password='123456'
6 | kubectl get secret dev-db-secret -o yaml
7 | kubectl apply -f 01-secret-sample.yaml
8 | kubectl get pods
9 | kubectl exec -it pod2 -- env | grep PASS
10 | kubectl delete secret dev-db-secret
11 | kubectl delete -f 01-secret-sample.yaml
12 | ```
13 |
14 | ```bash
15 | kubectl create secret generic user-pass --from-file=userpass
16 | kubectl get secret
17 | kubectl get secret user-pass -o yaml
18 | kubectl apply -f 02-secret-sample.yaml
19 | kubectl exec -it pod2 -- cat /var/configration/userpass
20 | ```
21 |
22 | ```yaml
23 | apiVersion: v1
24 | data:
25 | userpass: dXNlcm5hbWU9bXNkZXZlbmdlcnMKcGFzc3dvcmQ9MTIzNDU2
26 | kind: Secret
27 | metadata:
28 | creationTimestamp: "2019-12-21T17:58:10Z"
29 | name: user-pass
30 | namespace: default
31 | resourceVersion: "560828"
32 | selfLink: /api/v1/namespaces/default/secrets/user-pass
33 | uid: 94086743-8180-4cce-997d-143bb5875953
34 | type: Opaque
35 | ```
36 | ```bash
37 | kubectl get secrets/user-pass --template={{.data.userpass}} | base64 -D
38 | ```
39 | CleanUp
40 | ```bash
41 | kubectl delete -f 02-secret-sample.yaml
42 | kubectl delete secret user-pass
43 | ```
44 |
45 |
46 | #### Image Secrets
47 | ```bash
48 | kubectl create secret docker-registry repository-secret \
49 | --docker-server=docker.io --docker-username=devengers \
50 | --docker-password=123456 --docker-email=devengers@dev.com
51 | kubectl apply -f 03-secret-sample.yaml
52 | ```
53 |
54 | ```yaml
55 | apiVersion: v1
56 | kind: Pod
57 | metadata:
58 | name: pod2
59 | spec:
60 | containers:
61 | - name: nginx
62 | image: nginx
63 | imagePullSecrets:
64 | - repository-secret
65 | ```
66 | Cleanup
67 | ```bash
68 | kubectl delete -f 03-secret-sample.yaml
69 | kubectl delete secret docker-registry
70 | ```
71 |
72 | #### TLS Secrets
73 | ```bash
74 | openssl req -newkey rsa:2048 -nodes -keyout jenkins.key -x509 -days 365 -out jenkins.crt
75 | kubectl create secret tls jenkins-tls-secret --key=jenkins.key --cert=jenkins.crt
76 | kubectl get secret
77 | ```
78 |
79 | ```
80 | NAME TYPE DATA AGE
81 | default-token-6p7c5 kubernetes.io/service-account-token 3 3d21h
82 | dev-db-secret Opaque 2 25m
83 | jenkins-tls-secret kubernetes.io/tls 2 7s
84 | reloader-reloader-token-rdd45 kubernetes.io/service-account-token 3 58m
85 | user-pass Opaque 1 37m
86 | ```
87 |
88 | ```bash
89 | kubectl get pods
90 | ```
91 | ```
92 | NAME READY STATUS RESTARTS AGE
93 | nginx-7476885559-w9bp9 1/1 Running 0 2m7s
94 | pod2 1/1 Running 0 24m
95 | reloader-reloader-79784f86c7-kzpml 1/1 Running 0 62m
96 | ```
97 |
98 | ```bash
99 | kubectl exec -it nginx-7476885559-w9bp9 -- ls /usr/src/nginx/tls
100 | ```
--------------------------------------------------------------------------------
/05-configmapsandsecrets/02-secrets/userpass:
--------------------------------------------------------------------------------
1 | username=msdevengers
2 | password=123456
--------------------------------------------------------------------------------
/06-storage/01-ephemeral-storage/01-emptydir-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: test-pd
5 | spec:
6 | containers:
7 | - image: nginx:1.7.9
8 | name: test-container
9 | volumeMounts:
10 | - mountPath: /usr/share/nginx/html
11 | name: cache-volume
12 | initContainers:
13 | - name: git-code-downlaod
14 | image: alpine/git
15 | command: ['sh', '-c', 'git clone https://github.com/wlsf82/helloworld.git; mv * /cache ']
16 | volumeMounts:
17 | - mountPath: /cache
18 | name: cache-volume
19 | volumes:
20 | - name: cache-volume
21 | emptyDir: {}
--------------------------------------------------------------------------------
/06-storage/01-ephemeral-storage/02-emptydir-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: test-pd
5 | spec:
6 | containers:
7 | - image: nginx:1.7.9
8 | name: test-container
9 | volumeMounts:
10 | - mountPath: /usr/share/nginx/html
11 | name: cache-volume
12 | - name: git-code-downlaod
13 | image: alpine/git
14 | command: ['sh', '-c', 'while true; do rm -rdf /cache/*; git clone https://github.com/wlsf82/helloworld.git; mv -f helloworld/ /cache; sleep 30; done; ']
15 | volumeMounts:
16 | - mountPath: /cache
17 | name: cache-volume
18 | volumes:
19 | - name: cache-volume
20 | emptyDir: {}
--------------------------------------------------------------------------------
/06-storage/01-ephemeral-storage/03-emptydir-pod.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: test-pd
6 | spec:
7 | containers:
8 | - image: nginx:1.7.9
9 | name: test-container
10 | volumeMounts:
11 | - mountPath: /usr/share/nginx/html
12 | name: cache-volume
13 | initContainers:
14 | - name: template-changer
15 | image: alpine/git
16 | command: ['sh', '-c', 'cp /conf/* /cache; chmod +x /cache/*']
17 | volumeMounts:
18 | - mountPath: /cache
19 | name: cache-volume
20 | - mountPath: /conf
21 | name: conf
22 |
23 | volumes:
24 | - name: cache-volume
25 | emptyDir: {}
26 | - name: conf
27 | configMap:
28 | name: mysql-conn
--------------------------------------------------------------------------------
/06-storage/01-ephemeral-storage/04-emptydir-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: test-pd
5 | spec:
6 | containers:
7 | - image: nginx:1.7.9
8 | name: test-container
9 | volumeMounts:
10 | - mountPath: /cache
11 | name: cache-volume
12 | volumes:
13 | - name: cache-volume
14 | emptyDir:
15 | medium: Memory
16 | sizeLimit: "1Gi"
--------------------------------------------------------------------------------
/06-storage/01-ephemeral-storage/05-downward-api.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-pd
5 | labels:
6 | zone: us-est-coast
7 | cluster: test-cluster1
8 | rack: rack-22
9 | annotations:
10 | build: two
11 | builder: john-doe
12 | spec:
13 | containers:
14 | - name: nginx-pd
15 | image: nginx:1.7.9
16 | volumeMounts:
17 | - name: podinfo
18 | mountPath: /etc/podinfo
19 | readOnly: false
20 | volumes:
21 | - name: podinfo
22 | downwardAPI:
23 | items:
24 | - path: "labels"
25 | fieldRef:
26 | fieldPath: metadata.labels
27 | - path: "annotations"
28 | fieldRef:
29 | fieldPath: metadata.annotations
--------------------------------------------------------------------------------
/06-storage/01-ephemeral-storage/06-secret-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: mysql-client
5 | spec:
6 | containers:
7 | - name: mysql-client
8 | image: nginx
9 | volumeMounts:
10 | - name: connection
11 | mountPath: "/etc/connection"
12 | readOnly: true
13 | volumes:
14 | - name: connection
15 | secret:
16 | secretName: mysqlconnection
--------------------------------------------------------------------------------
/06-storage/01-ephemeral-storage/Readme.md:
--------------------------------------------------------------------------------
1 |
2 | ### EmptyDir
3 | ```bash
4 | kubectl apply -f 01-emptydir-pod.yaml
5 | kubectl port-forward test-pd 8080:80
6 | curl -XGET http://localhost:8080/helloworld/helloworld.html
7 | kubectl delete -f 01-emptydir-pod.yaml
8 | ```
9 |
10 | ### Bad Practice
11 | ```bash
12 | kubectl apply -f 02-emptydir-pod.yaml
13 | kubectl port-forward test-pd 8080:80
14 | curl -XGET http://localhost:8080/helloworld/helloworld.html
15 | kubectl delete -f 02-emptydir-pod.yaml
16 | ```
17 |
18 | ### configmap readonly
19 | ```bash
20 | kubectl create configmap mysql-conn --from-file mysql.conn
21 | kubectl apply -f 03-emptydir-pod.yaml
22 | kubectl exec -it test-pd -- /bin/sh
23 | cd /usr/share/nginx/html
24 | ls -lart
25 | # find executable rights
26 | kubectl delete configmap mysql-conn
27 | kubectl delete -f 03-emptydir-pod.yaml
28 | ```
29 |
30 | ### Secret Volume
31 | ```bash
32 | echo mysql://root:topsecretpassword@remote_mysql:3306/pamir > mysql.conn
33 | kubectl create secret generic mysqlconnection --from-file=mysql.conn
34 | kubectl apply -f 06-secret.yaml
35 | kubectl exec -it test-pd -- /bin/sh
36 | ls /secret
37 | cat /secret/mysql.conn
38 | ```
--------------------------------------------------------------------------------
/06-storage/01-ephemeral-storage/mysql.conn:
--------------------------------------------------------------------------------
1 | mysql://root:topsecretpassword@remote_mysql:3306/pamir
--------------------------------------------------------------------------------
/06-storage/02-intro/01-pv/01-nfs-pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv-nfs-data
5 | spec:
6 | accessModes:
7 | - ReadWriteMany
8 | capacity:
9 | storage: 10Gi
10 | persistentVolumeReclaimPolicy: Retain
11 | nfs:
12 | server: 10.240.0.11
13 | path: /mnt/sharedfolder
14 |
15 |
--------------------------------------------------------------------------------
/06-storage/02-intro/01-pv/02-nfs-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: pvc-nfs-data
5 | annotations:
6 | volume.beta.kubernetes.io/storage-class: ""
7 | spec:
8 | accessModes:
9 | - ReadWriteMany
10 | resources:
11 | requests:
12 | storage: 10Gi
--------------------------------------------------------------------------------
/06-storage/02-intro/02-selectors/01-nfs-pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv-nfs-data
5 | labels:
6 | volume-type: nfs
7 | type: custom
8 | spec:
9 | accessModes:
10 | - ReadWriteMany
11 | capacity:
12 | storage: 10Gi
13 | persistentVolumeReclaimPolicy: Retain
14 | nfs:
15 | server: 10.240.0.11
16 | path: /mnt/sharedfolder
17 |
18 |
--------------------------------------------------------------------------------
/06-storage/02-intro/02-selectors/02-nfs-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: pvc-nfs-data
5 | annotations:
6 | volume.beta.kubernetes.io/storage-class: ""
7 | spec:
8 | accessModes:
9 | - ReadWriteMany
10 | resources:
11 | requests:
12 | storage: 10Gi
13 | selector:
14 | matchLabels:
15 | volume-type: nfs
16 | type: custom
--------------------------------------------------------------------------------
/06-storage/02-intro/03-storageclass/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | az provider register --namespace Microsoft.NetApp --wait
3 | az aks show --resource-group myResourceGroup --name myAKSCluster --query nodeResourceGroup -o tsv
4 | az netappfiles account create \
5 | --resource-group MC_myResourceGroup_myAKSCluster_eastus \
6 | --location eastus \
7 | --account-name myaccount1
8 |
9 | az netappfiles pool create \
10 | --resource-group MC_myResourceGroup_myAKSCluster_eastus \
11 | --location eastus \
12 | --account-name myaccount1 \
13 | --pool-name mypool1 \
14 | --size 4 \
15 | --service-level Premium
16 |
17 |
18 | RESOURCE_GROUP=MC_myResourceGroup_myAKSCluster_eastus
19 | VNET_NAME=$(az network vnet list --resource-group $RESOURCE_GROUP --query [].name -o tsv)
20 | VNET_ID=$(az network vnet show --resource-group $RESOURCE_GROUP --name $VNET_NAME --query "id" -o tsv)
21 | SUBNET_NAME=MyNetAppSubnet
22 | az network vnet subnet create \
23 | --resource-group $RESOURCE_GROUP \
24 | --vnet-name $VNET_NAME \
25 | --name $SUBNET_NAME \
26 | --delegations "Microsoft.NetApp/volumes" \
27 | --address-prefixes 10.0.0.0/28
28 |
29 | RESOURCE_GROUP=MC_myResourceGroup_myAKSCluster_eastus
30 | LOCATION=eastus
31 | ANF_ACCOUNT_NAME=myaccount1
32 | POOL_NAME=mypool1
33 | SERVICE_LEVEL=Premium
34 | VNET_NAME=$(az network vnet list --resource-group $RESOURCE_GROUP --query [].name -o tsv)
35 | VNET_ID=$(az network vnet show --resource-group $RESOURCE_GROUP --name $VNET_NAME --query "id" -o tsv)
36 | SUBNET_NAME=MyNetAppSubnet
37 | SUBNET_ID=$(az network vnet subnet show --resource-group $RESOURCE_GROUP --vnet-name $VNET_NAME --name $SUBNET_NAME --query "id" -o tsv)
38 | VOLUME_SIZE_GiB=100 # 100 GiB
39 | UNIQUE_FILE_PATH="myfilepath2" # Please note that creation token needs to be unique within all ANF Accounts
40 |
41 | az netappfiles volume create \
42 | --resource-group $RESOURCE_GROUP \
43 | --location $LOCATION \
44 | --account-name $ANF_ACCOUNT_NAME \
45 | --pool-name $POOL_NAME \
46 | --name "myvol1" \
47 | --service-level $SERVICE_LEVEL \
48 | --vnet $VNET_ID \
49 | --subnet $SUBNET_ID \
50 | --usage-threshold $VOLUME_SIZE_GiB \
51 | --creation-token $UNIQUE_FILE_PATH \
52 | --protocol-types "NFSv3"
53 |
54 | az netappfiles volume show --resource-group $RESOURCE_GROUP --account-name $ANF_ACCOUNT_NAME --pool-name $POOL_NAME --volume-name "myvol1"
55 |
56 | ```
57 |
58 | ```yaml
59 | ---
60 | apiVersion: v1
61 | kind: PersistentVolume
62 | metadata:
63 | name: pv-nfs
64 | spec:
65 | capacity:
66 | storage: 100Gi
67 | accessModes:
68 | - ReadWriteMany
69 | nfs:
70 | server: 10.0.0.4
71 | path: /myfilepath2
72 | ```
--------------------------------------------------------------------------------
/06-storage/02-intro/03-storageclass/azure-redundant-02.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | labels:
5 | kubernetes.io/cluster-service: "true"
6 | name: managed-ultra-lrs-02
7 | parameters:
8 | cachingmode: ReadOnly
9 | kind: Managed
10 | storageaccounttype: UltraSSD_LRS
11 | provisioner: kubernetes.io/azure-disk
12 | reclaimPolicy: Retain
13 | volumeBindingMode: Immediate
14 |
--------------------------------------------------------------------------------
/06-storage/02-intro/03-storageclass/azure-redundant-05.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | labels:
5 | kubernetes.io/cluster-service: "true"
6 | name: managed-ultra-lrs-05
7 | parameters:
8 | cachingmode: ReadOnly
9 | kind: Managed
10 | storageaccounttype: UltraSSD_LRS
11 | DiskIOPSReadWrite: "160000"
12 | DiskMBpsReadWrite: "2000"
13 | provisioner: kubernetes.io/azure-disk
14 | reclaimPolicy: Retain
15 | volumeBindingMode: Immediate
16 |
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/01-nfs/01-nfs-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: nfsdata
5 | spec:
6 | capacity:
7 | storage: 10Gi
8 | accessModes:
9 | - ReadWriteMany
10 | nfs:
11 | server: 10.240.0.11
12 | path: "/mnt/sharedfolder"
13 |
14 | ---
15 | kind: PersistentVolumeClaim
16 | apiVersion: v1
17 | metadata:
18 | name: nfsdata
19 | spec:
20 | accessModes:
21 | - ReadWriteMany
22 | storageClassName: ""
23 | resources:
24 | requests:
25 | storage: 10Gi
26 | ---
27 |
28 | apiVersion: extensions/v1beta1
29 | kind: Deployment
30 | metadata:
31 | name: test-pd
32 | labels:
33 | app: test-pd
34 | spec:
35 | replicas: 1
36 | selector:
37 | matchLabels:
38 | app: test-pd
39 | template:
40 | metadata:
41 | labels:
42 | app: test-pd
43 | spec:
44 | containers:
45 | - image: nginx:1.7.9
46 | name: test-container
47 | volumeMounts:
48 | - mountPath: /cache
49 | name: cache-volume
50 | volumes:
51 | - name: cache-volume
52 | persistentVolumeClaim:
53 | claimName: nfsdata
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/01-nfs/Readme.md:
--------------------------------------------------------------------------------
1 | #### NFS
2 | #### centos
3 | ```bash
4 | yum -y install nfs-utils
5 | mkdir /nfsroot
6 | vim /etc/exports
7 | ```
8 |
9 | #### ubuntu
10 | ```bash
11 | sudo apt-get update
12 | sudo apt install nfs-kernel-server
13 | sudo mkdir -p /mnt/sharedfolder
14 | sudo chown nobody:nogroup /mnt/sharedfolder
15 | sudo chmod 777 /mnt/sharedfolder
16 | sudo vim /etc/exports
17 | /mnt/sharedfolder /24(rw,sync,no_subtree_check)
18 | sudo exportfs -a
19 | sudo systemctl restart nfs-kernel-server
20 | ```
21 |
22 | Add the line below
23 | ```
24 | /nfsroot 192.168.5.0/24(ro,no_root_squash,no_subtree_check)
25 | ```
26 | ```bash
27 | exportfs -r
28 | /etc/init.d/nfs start
29 | showmount -e
30 | ```
31 |
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/02-glusterfs/01-hekiti/hekiti-replicated.json:
--------------------------------------------------------------------------------
1 | "clusters": [
2 | {
3 | "nodes": [
4 | {
5 | "node": {
6 | "hostnames": {
7 | "manage": [
8 | ""
9 | ],
10 | "storage": [
11 | ""
12 | ]
13 | },
14 | "zone": 1
15 | },
16 | "devices": [
17 | "/dev/sdb"
18 | ]
19 | },
20 | {
21 | "node": {
22 | "hostnames": {
23 | "manage": [
24 | ""
25 | ],
26 | "storage": [
27 | ""
28 | ]
29 | },
30 | "zone": 1
31 | },
32 | "devices": [
33 | "/dev/sdb"
34 | ]
35 | }
36 | ]
37 | }
38 | ]
39 | }
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/02-glusterfs/01-hekiti/hekiti.json:
--------------------------------------------------------------------------------
1 | {
2 | "_port_comment": "Heketi Server Port Number",
3 | "port": "8080",
4 | "_use_auth": "Enable JWT authorization. Please enable for deployment",
5 | "use_auth": false,
6 | "_jwt": "Private keys for access",
7 | "jwt": {
8 | "_admin": "Admin has access to all APIs",
9 | "admin": {
10 | "key": "My Secret"
11 | },
12 | "_user": "User only has access to /volumes endpoint",
13 | "user": {
14 | "key": "My Secret"
15 | }
16 | },
17 | "_glusterfs_comment": "GlusterFS Configuration",
18 | "glusterfs": {
19 | "_executor_comment": "Execute plugin. Possible choices: mock, ssh",
20 | "executor": "ssh",
21 | "_sshexec_comment": "SSH username and private key file information",
22 | "sshexec": {
23 | "keyfile": "/etc/heketi/heketi_key",
24 | "user": "root",
25 | "port": "22",
26 | "fstab": "/etc/fstab"
27 | },
28 | "_db_comment": "Database file name",
29 | "db": "/var/lib/heketi/heketi.db"
30 | }
31 | }
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/02-glusterfs/01-hekiti/topology.json:
--------------------------------------------------------------------------------
1 | {
2 | "clusters": [
3 | {
4 | "nodes": [
5 | {
6 | "node": {
7 | "hostnames": {
8 | "manage": [
9 | "hostname or ip"
10 | ],
11 | "storage": [
12 | "hostname or ip"
13 | ]
14 | },
15 | "zone": 1
16 | },
17 | "devices": [
18 | "/dev/sdb1"
19 | ]
20 | }
21 | ]
22 | }
23 | ]
24 | }
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/02-glusterfs/02-sample/01-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-pod1
5 | labels:
6 | name: nginx-pod1
7 | spec:
8 | containers:
9 | - name: nginx-pod1
10 | image: nginx:1.7.9
11 | ports:
12 | - name: web
13 | containerPort: 80
14 | volumeMounts:
15 | - name: gluster-vol1
16 | mountPath: /usr/share/nginx/html
17 | volumes:
18 | - name: gluster-vol1
19 | persistentVolumeClaim:
20 | claimName: gluster1
21 |
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/02-glusterfs/02-sample/02-glusrer-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: gluster1
5 | annotations:
6 | volume.beta.kubernetes.io/storage-class: slow
7 | spec:
8 | accessModes:
9 | - ReadWriteMany
10 | resources:
11 | requests:
12 | storage: 5Gi
13 |
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/02-glusterfs/02-sample/03-gluster-storageclass.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: slow
5 | provisioner: kubernetes.io/glusterfs
6 | parameters:
7 | resturl: "http://192.168.102.9:8080"
8 | restauthenabled: "false"
9 | gidMin: "40000"
10 | gidMax: "50000"
11 | volumetype: "none"
12 |
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/02-glusterfs/Readme.md:
--------------------------------------------------------------------------------
1 | # kubernetes-glusterfs
2 | Glusterfs installation and Kubernetes StorageClass configuration
3 |
4 | ## Glusterfs Installation Steps
5 |
6 | ### Ubuntu
7 |
8 | - Create a Ubuntu 16.04 VM.
9 |
10 | - Attach a blank disk to VM.
11 |
12 | - Run the commands below to install glusterfs on Ubuntu.
13 |
14 | ```
15 | sudo apt-get -y update
16 | sudo apt-get -y install thin-provisioning-tools
17 | sudo apt-get -y install software-properties-common
18 | sudo apt-get -y install glusterfs-server
19 | sudo service glusterfs-server start
20 | ```
21 |
22 | - Run the command below to add disk device. (Optional)
23 |
24 | ```
25 | sudo fdisk /dev/sdb
26 |
27 | n for new partition
28 | p for primary partition
29 | 1 for partition number
30 | first and last sectors can be default, just press enter
31 | w for saving changes
32 | ```
33 |
34 | - Run "sudo fdisk -l" and see new /dev/sdb1 device. (Optional)
35 |
36 | ### CentOS
37 |
38 | - Create a CentOS 7 VM.
39 |
40 | - Attach a blank disk to VM.
41 |
42 | - Run the commands below to install glusterfs on CentOS.
43 |
44 | ```
45 | yum -y install centos-release-gluster
46 | yum -y install glusterfs-server
47 | systemctl enable glusterd
48 | systemctl start glusterd
49 | ```
50 |
51 | - Run the command below to add disk device. (Optional)
52 |
53 | ```
54 | fdisk /dev/sdb
55 |
56 | n for new partition
57 | p for primary partition
58 | 1 for partition number
59 | first and last sectors can be default, just press enter
60 | w for saving changes
61 | ```
62 |
63 | - Run "fdisk -l" and see new /dev/sdb1 device. (Optional)
64 |
65 | ## Heketi Installation Steps
66 |
67 | ### Ubuntu Installation Steps
68 |
69 | - Run the commands below to install heketi on Ubuntu.
70 |
71 | ```
72 | wget https://github.com/heketi/heketi/releases/download/v8.0.0/heketi-v8.0.0.linux.amd64.tar.gz
73 | tar -xvzf heketi-v8.0.0.linux.amd64.tar.gz
74 | cd heketi
75 | sudo mv heketi heketi-cli /usr/local/bin
76 | sudo groupadd -r -g 515 heketi
77 | sudo useradd -r -c "Heketi user" -d /var/lib/heketi -s /bin/false -m -u 515 -g heketi heketi
78 | sudo mkdir -p /var/lib/heketi && sudo chown -R heketi:heketi /var/lib/heketi
79 | sudo mkdir -p /var/log/heketi && sudo chown -R heketi:heketi /var/log/heketi
80 | sudo mkdir -p /etc/heketi
81 | ```
82 |
83 | - Run the commands below to configure root SSH authentication for heketi.
84 |
85 | ```
86 | sudo su
87 | ssh-keygen -f /etc/heketi/heketi_key -t rsa -N ''
88 | chown heketi:heketi /etc/heketi/heketi_key*
89 | cat /etc/heketi/heketi_key.pub >> /root/.ssh/authorized_keys
90 | ```
91 |
92 | - Switch back to ubuntu user and update heketi config. See heketi/heketi.json file.
93 |
94 | - Create a topology json file. See heketi/topology.json file.
95 |
96 | - Change glusterfs-server service name to glusterd and start glusterd, heketi with commands below.
97 |
98 | ```
99 | sudo mv /etc/init.d/glusterfs-server /etc/init.d/glusterd
100 | sudo systemctl daemon-reload
101 | sudo heketi --config=heketi.json
102 | ```
103 |
104 | - In another session run:
105 |
106 | ```
107 | heketi-cli topology load --json=topology.json
108 | ```
109 |
110 | ### CentOS Installation Steps
111 |
112 | - Run the commands below to install heketi on CentOS.
113 |
114 | ```
115 | yum -y install heketi
116 | yum -y install heketi-client
117 | mkdir -p /var/lib/heketi && chown -R heketi:heketi /var/lib/heketi
118 | mkdir -p /var/log/heketi && chown -R heketi:heketi /var/log/heketi
119 | mkdir -p /etc/heketi
120 | ```
121 |
122 | - Run the commands below to configure root SSH authentication for heketi.
123 |
124 | ```
125 | ssh-keygen -f /etc/heketi/heketi_key -t rsa -N ''
126 | chown heketi:heketi /etc/heketi/heketi_key*
127 | cat /etc/heketi/heketi_key.pub >> /root/.ssh/authorized_keys
128 | ```
129 |
130 | - Create a heketi config file. See heketi/heketi.json file.
131 |
132 | - Create a topology json file. See heketi/topology.json file.
133 |
134 | - In another session run:
135 |
136 | ```
137 | heketi-cli topology load --json=topology.json
138 | ```
139 |
140 | ## Kubernetes Configuration
141 |
142 | - Install glusterfs-client on Kubernetes worker nodes.
143 |
144 | Ubuntu:
145 | ```
146 | sudo apt-get -y install glusterfs-client
147 | ```
148 |
149 | CentOS:
150 | ```
151 | yum -y install glusterfs glusterfs-fuse
152 | ```
153 |
154 | - Run the commands below to create storage class and persistentvolumeclaim.
155 |
156 | ```
157 | kubectl apply -f kube/storageclass.yml
158 | kubectl apply -f kube/pvc.yml
159 | ```
160 |
161 | - See the example kube/nginx-glusterfs.yml file.
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/03-template/01-redis-template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | labels:
5 | app: redis-master
6 | name: redis-master
7 | namespace: redis
8 | spec:
9 | podManagementPolicy: OrderedReady
10 | replicas: 5
11 | revisionHistoryLimit: 10
12 | selector:
13 | matchLabels:
14 | app: redis-master
15 | serviceName: redis-master
16 | template:
17 | metadata:
18 | creationTimestamp: null
19 | labels:
20 | app: redis-master
21 | spec:
22 | containers:
23 | - args:
24 | - /opt/redis.conf
25 | command:
26 | - /opt/bin/k8s-redis-ha-server
27 | env:
28 | - name: SERVICE
29 | value: redis-master
30 | - name: SERVICE_PORT
31 | value: redis-master
32 | - name: SENTINEL
33 | value: redis-sentinel
34 | - name: SENTINEL_PORT
35 | value: redis-sentinel
36 | image: redis:4.0.11
37 | imagePullPolicy: IfNotPresent
38 | name: redis-master
39 | ports:
40 | - containerPort: 6379
41 | name: redis-master
42 | protocol: TCP
43 | readinessProbe:
44 | exec:
45 | command:
46 | - redis-cli
47 | - info
48 | - server
49 | failureThreshold: 3
50 | periodSeconds: 10
51 | successThreshold: 1
52 | timeoutSeconds: 1
53 | livenessProbe:
54 | exec:
55 | command:
56 | - redis-cli
57 | - info
58 | - server
59 | failureThreshold: 3
60 | periodSeconds: 10
61 | successThreshold: 1
62 | timeoutSeconds: 1
63 | resources:
64 | limits:
65 | cpu: "1"
66 | memory: 512Mi
67 | requests:
68 | cpu: "1"
69 | memory: 512Mi
70 | terminationMessagePath: /dev/termination-log
71 | terminationMessagePolicy: File
72 | volumeMounts:
73 | - mountPath: /data
74 | name: redis-master-volume
75 | - mountPath: /opt
76 | name: opt
77 | dnsPolicy: ClusterFirst
78 | initContainers:
79 | - command:
80 | - /bin/sh
81 | - -c
82 | - sysctl -w net.core.somaxconn=10000
83 | image: busybox
84 | imagePullPolicy: Always
85 | name: init-sysctl
86 | resources: {}
87 | securityContext:
88 | privileged: true
89 | terminationMessagePath: /dev/termination-log
90 | terminationMessagePolicy: File
91 |
92 | - image: rifatx/redis-replication:master-latest
93 | imagePullPolicy: Always
94 | name: redis-ha-master
95 | resources: {}
96 | terminationMessagePath: /dev/termination-log
97 | terminationMessagePolicy: File
98 | volumeMounts:
99 | - mountPath: /opt
100 | name: opt
101 |
102 |
103 | restartPolicy: Always
104 | schedulerName: default-scheduler
105 | securityContext: {}
106 | terminationGracePeriodSeconds: 30
107 |
108 | volumes:
109 | - emptyDir: {}
110 | name: opt
111 | volumeClaimTemplates:
112 | - metadata:
113 | annotations:
114 | volume.beta.kubernetes.io/storage-class: managed-premium
115 | name: redis-master-volume
116 | spec:
117 | storageClassName: managed-premium
118 | accessModes:
119 | - ReadWriteOnce
120 | resources:
121 | requests:
122 | storage: 100Gi
123 |
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/03-template/02-redis-template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | labels:
5 | app: redis-master
6 | name: redis-master
7 | namespace: redis
8 | spec:
9 | podManagementPolicy: OrderedReady
10 | replicas: 3
11 | revisionHistoryLimit: 10
12 | selector:
13 | matchLabels:
14 | app: redis-master
15 | serviceName: redis-master
16 | template:
17 | metadata:
18 | creationTimestamp: null
19 | labels:
20 | app: redis-master
21 | spec:
22 | affinity:
23 | podAntiAffinity:
24 | preferredDuringSchedulingIgnoredDuringExecution:
25 | - podAffinityTerm:
26 | labelSelector:
27 | matchExpressions:
28 | - key: app
29 | operator: In
30 | values:
31 | - redis-master
32 | topologyKey: kubernetes.io/hostname
33 | weight: 100
34 | containers:
35 | - args:
36 | - /opt/redis.conf
37 | command:
38 | - /opt/bin/k8s-redis-ha-server
39 | env:
40 | - name: SERVICE
41 | value: redis-master
42 | - name: SERVICE_PORT
43 | value: redis-master
44 | - name: SENTINEL
45 | value: redis-sentinel
46 | - name: SENTINEL_PORT
47 | value: redis-sentinel
48 | image: redis:4.0.11
49 | imagePullPolicy: IfNotPresent
50 | name: redis-master
51 | ports:
52 | - containerPort: 6379
53 | name: redis-master
54 | protocol: TCP
55 | readinessProbe:
56 | exec:
57 | command:
58 | - redis-cli
59 | - info
60 | - server
61 | failureThreshold: 3
62 | periodSeconds: 10
63 | successThreshold: 1
64 | timeoutSeconds: 1
65 | livenessProbe:
66 | exec:
67 | command:
68 | - redis-cli
69 | - info
70 | - server
71 | failureThreshold: 3
72 | periodSeconds: 10
73 | successThreshold: 1
74 | timeoutSeconds: 1
75 | resources:
76 | limits:
77 | cpu: "1"
78 | memory: 512Mi
79 | requests:
80 | cpu: "1"
81 | memory: 512Mi
82 | terminationMessagePath: /dev/termination-log
83 | terminationMessagePolicy: File
84 | volumeMounts:
85 | - mountPath: /data
86 | name: redis-master-data
87 | - mountPath: /opt
88 | name: opt
89 | dnsPolicy: ClusterFirst
90 | initContainers:
91 | - command:
92 | - /bin/sh
93 | - -c
94 | - sysctl -w net.core.somaxconn=10000
95 | image: busybox
96 | imagePullPolicy: Always
97 | name: init-sysctl
98 | resources: {}
99 | securityContext:
100 | privileged: true
101 | terminationMessagePath: /dev/termination-log
102 | terminationMessagePolicy: File
103 |
104 | - image: rifatx/redis-replication:master-latest
105 | imagePullPolicy: Always
106 | name: redis-ha-master
107 | resources: {}
108 | terminationMessagePath: /dev/termination-log
109 | terminationMessagePolicy: File
110 | volumeMounts:
111 | - mountPath: /opt
112 | name: opt
113 |
114 |
115 | - command:
116 | - /bin/sh
117 | - -c
118 | - cp -rf /data/* /data2/
119 | image: busybox
120 | imagePullPolicy: Always
121 | name: db-copy
122 | resources: {}
123 | securityContext:
124 | privileged: true
125 | terminationMessagePath: /dev/termination-log
126 | terminationMessagePolicy: File
127 | imagePullPolicy: Always
128 | volumeMounts:
129 | - mountPath: /data
130 | name: redis-master-volume
131 | - mountPath: /data2
132 | name: redis-master-data
133 | restartPolicy: Always
134 | schedulerName: default-scheduler
135 | securityContext: {}
136 | terminationGracePeriodSeconds: 30
137 |
138 | volumes:
139 | - emptyDir: {}
140 | name: opt
141 | volumeClaimTemplates:
142 | - metadata:
143 | annotations:
144 | volume.beta.kubernetes.io/storage-class: standard
145 | name: redis-master-volume
146 | spec:
147 | storageClassName: standard
148 | accessModes:
149 | - ReadWriteOnce
150 | resources:
151 | requests:
152 | storage: 10Gi
153 | - metadata:
154 | annotations:
155 | volume.beta.kubernetes.io/storage-class: ssd
156 | name: redis-master-data
157 | spec:
158 | storageClassName: ssd
159 | accessModes:
160 | - ReadWriteOnce
161 | resources:
162 | requests:
163 | storage: 10Gi
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/03-template/03-redis-template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | labels:
5 | app: redis-master
6 | name: redis-master
7 | namespace: redis
8 | spec:
9 | podManagementPolicy: OrderedReady
10 | replicas: 3
11 | revisionHistoryLimit: 10
12 | selector:
13 | matchLabels:
14 | app: redis-master
15 | serviceName: redis-master
16 | template:
17 | metadata:
18 | creationTimestamp: null
19 | labels:
20 | app: redis-master
21 | spec:
22 | affinity:
23 | podAntiAffinity:
24 | preferredDuringSchedulingIgnoredDuringExecution:
25 | - podAffinityTerm:
26 | labelSelector:
27 | matchExpressions:
28 | - key: app
29 | operator: In
30 | values:
31 | - redis-master
32 | topologyKey: kubernetes.io/hostname
33 | weight: 100
34 | containers:
35 | - args:
36 | - /opt/redis.conf
37 | command:
38 | - /opt/bin/k8s-redis-ha-server
39 | env:
40 | - name: SERVICE
41 | value: redis-master
42 | - name: SERVICE_PORT
43 | value: redis-master
44 | - name: SENTINEL
45 | value: redis-sentinel
46 | - name: SENTINEL_PORT
47 | value: redis-sentinel
48 | image: redis:4.0.11
49 | imagePullPolicy: IfNotPresent
50 | name: redis-master
51 | ports:
52 | - containerPort: 6379
53 | name: redis-master
54 | protocol: TCP
55 | readinessProbe:
56 | exec:
57 | command:
58 | - redis-cli
59 | - info
60 | - server
61 | failureThreshold: 3
62 | periodSeconds: 10
63 | successThreshold: 1
64 | timeoutSeconds: 1
65 | livenessProbe:
66 | exec:
67 | command:
68 | - redis-cli
69 | - info
70 | - server
71 | failureThreshold: 3
72 | periodSeconds: 10
73 | successThreshold: 1
74 | timeoutSeconds: 1
75 | resources:
76 | limits:
77 | cpu: "1"
78 | memory: 512Mi
79 | requests:
80 | cpu: "1"
81 | memory: 512Mi
82 | terminationMessagePath: /dev/termination-log
83 | terminationMessagePolicy: File
84 | volumeMounts:
85 | - mountPath: /data
86 | name: redis-master-data
87 | - mountPath: /opt
88 | name: opt
89 | dnsPolicy: ClusterFirst
90 | initContainers:
91 | - command:
92 | - /bin/sh
93 | - -c
94 | - sysctl -w net.core.somaxconn=10000
95 | image: busybox
96 | imagePullPolicy: Always
97 | name: init-sysctl
98 | resources: {}
99 | securityContext:
100 | privileged: true
101 | terminationMessagePath: /dev/termination-log
102 | terminationMessagePolicy: File
103 |
104 | - image: rifatx/redis-replication:master-latest
105 | imagePullPolicy: Always
106 | name: redis-ha-master
107 | resources: {}
108 | terminationMessagePath: /dev/termination-log
109 | terminationMessagePolicy: File
110 | volumeMounts:
111 | - mountPath: /opt
112 | name: opt
113 | restartPolicy: Always
114 | schedulerName: default-scheduler
115 | securityContext: {}
116 | terminationGracePeriodSeconds: 30
117 |
118 | volumes:
119 | - emptyDir: {}
120 | name: opt
121 | volumeClaimTemplates:
122 | - metadata:
123 | annotations:
124 | volume.beta.kubernetes.io/storage-class: ssd
125 | name: redis-master-data
126 | spec:
127 | storageClassName: ssd
128 | accessModes:
129 | - ReadWriteOnce
130 | resources:
131 | requests:
132 | storage: 10Gi
--------------------------------------------------------------------------------
/06-storage/03-storage/01-networked-storage/03-template/Readme.md:
--------------------------------------------------------------------------------
1 | ### wcgw
2 | ```bash
3 | kubectl create ns redis
4 | kubectl apply -f 01-redis-template.yaml
5 | kubectl get pods -n redis
6 | kubectl get pvc -n redis
7 | kubectl get pv
8 | kubectl exec -it redis-master-0 -- /bin/sh
9 | redis-cli
10 | set name pamir
11 | 127.0.0.1:6379> get name
12 | "pamir"
13 | exit
14 | exit
15 | kubectl delete -f 01-redis-template.yaml
16 | kubectl apply -f 02-redis-template.yaml
17 | kubectl exec -it redis-master-0 -n redis -- /bin/sh
18 | # redis-cli
19 | 127.0.0.1:6379> get name
20 | "pamir"
21 | 127.0.0.1:6379> exit
22 | # exit
23 | kubectl delete -f r02-redis-template.yaml
24 | kubectl apply -f 03-redis-template.yaml
25 | ```
--------------------------------------------------------------------------------
/06-storage/03-storage/02-block-storage/01-azurefile/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | kubectl get storageclass
3 | ```
4 | ```yaml
5 | kind: StorageClass
6 | apiVersion: storage.k8s.io/v1
7 | metadata:
8 | name: azurefile
9 | provisioner: kubernetes.io/azure-file
10 | mountOptions:
11 | - dir_mode=0777
12 | - file_mode=0777
13 | - uid=1000
14 | - gid=1000
15 | - mfsymlinks
16 | - nobrl
17 | - cache=none
18 | parameters:
19 | skuName: Standard_LRS
20 | ```
21 |
22 |
23 | ```bash
24 | kubectl apply -f azure-file-sc.yaml
25 | ```
26 |
27 | ```yaml
28 | apiVersion: v1
29 | kind: PersistentVolumeClaim
30 | metadata:
31 | name: azurefile
32 | spec:
33 | accessModes:
34 | - ReadWriteMany
35 | storageClassName: azurefile
36 | resources:
37 | requests:
38 | storage: 5Gi
39 | ```
40 |
41 | ```bash
42 | kubectl apply -f azure-file-pvc.yaml
43 | ```
44 |
45 | ```yaml
46 | kind: Pod
47 | apiVersion: v1
48 | metadata:
49 | name: mypod
50 | spec:
51 | containers:
52 | - name: mypod
53 | image: nginx:1.15.5
54 | resources:
55 | requests:
56 | cpu: 100m
57 | memory: 128Mi
58 | limits:
59 | cpu: 250m
60 | memory: 256Mi
61 | volumeMounts:
62 | - mountPath: "/mnt/azure"
63 | name: volume
64 | volumes:
65 | - name: volume
66 | persistentVolumeClaim:
67 | claimName: azurefile
68 | ```
69 |
70 | ```bash
71 | kubectl apply -f azure-pvc-files.yaml
72 | ```
--------------------------------------------------------------------------------
/06-storage/03-storage/02-block-storage/02-netapp/Readme.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devlerazureda/kubernetes-essentials/9ba1e24a0cd45be98d3b6794b8690bfae6ed3f93/06-storage/03-storage/02-block-storage/02-netapp/Readme.md
--------------------------------------------------------------------------------
/06-storage/03-storage/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | az provider register --namespace Microsoft.NetApp --wait
3 | az aks show --resource-group myResourceGroup --name myAKSCluster --query nodeResourceGroup -o tsv
4 | az netappfiles account create \
5 | --resource-group MC_myResourceGroup_myAKSCluster_eastus \
6 | --location eastus \
7 | --account-name myaccount1
8 |
9 | az netappfiles pool create \
10 | --resource-group MC_myResourceGroup_myAKSCluster_eastus \
11 | --location eastus \
12 | --account-name myaccount1 \
13 | --pool-name mypool1 \
14 | --size 4 \
15 | --service-level Premium
16 |
17 |
18 | RESOURCE_GROUP=MC_myResourceGroup_myAKSCluster_eastus
19 | VNET_NAME=$(az network vnet list --resource-group $RESOURCE_GROUP --query [].name -o tsv)
20 | VNET_ID=$(az network vnet show --resource-group $RESOURCE_GROUP --name $VNET_NAME --query "id" -o tsv)
21 | SUBNET_NAME=MyNetAppSubnet
22 | az network vnet subnet create \
23 | --resource-group $RESOURCE_GROUP \
24 | --vnet-name $VNET_NAME \
25 | --name $SUBNET_NAME \
26 | --delegations "Microsoft.NetApp/volumes" \
27 | --address-prefixes 10.0.0.0/28
28 |
29 | RESOURCE_GROUP=MC_myResourceGroup_myAKSCluster_eastus
30 | LOCATION=eastus
31 | ANF_ACCOUNT_NAME=myaccount1
32 | POOL_NAME=mypool1
33 | SERVICE_LEVEL=Premium
34 | VNET_NAME=$(az network vnet list --resource-group $RESOURCE_GROUP --query [].name -o tsv)
35 | VNET_ID=$(az network vnet show --resource-group $RESOURCE_GROUP --name $VNET_NAME --query "id" -o tsv)
36 | SUBNET_NAME=MyNetAppSubnet
37 | SUBNET_ID=$(az network vnet subnet show --resource-group $RESOURCE_GROUP --vnet-name $VNET_NAME --name $SUBNET_NAME --query "id" -o tsv)
38 | VOLUME_SIZE_GiB=100 # 100 GiB
39 | UNIQUE_FILE_PATH="myfilepath2" # Please note that creation token needs to be unique within all ANF Accounts
40 |
41 | az netappfiles volume create \
42 | --resource-group $RESOURCE_GROUP \
43 | --location $LOCATION \
44 | --account-name $ANF_ACCOUNT_NAME \
45 | --pool-name $POOL_NAME \
46 | --name "myvol1" \
47 | --service-level $SERVICE_LEVEL \
48 | --vnet $VNET_ID \
49 | --subnet $SUBNET_ID \
50 | --usage-threshold $VOLUME_SIZE_GiB \
51 | --creation-token $UNIQUE_FILE_PATH \
52 | --protocol-types "NFSv3"
53 |
54 | az netappfiles volume show --resource-group $RESOURCE_GROUP --account-name $ANF_ACCOUNT_NAME --pool-name $POOL_NAME --volume-name "myvol1"
55 |
56 | ```
57 |
58 | ```yaml
59 | ---
60 | apiVersion: v1
61 | kind: PersistentVolume
62 | metadata:
63 | name: pv-nfs
64 | spec:
65 | capacity:
66 | storage: 100Gi
67 | accessModes:
68 | - ReadWriteMany
69 | nfs:
70 | server: 10.0.0.4
71 | path: /myfilepath2
72 | ```
--------------------------------------------------------------------------------
/06-storage/04-dbench/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM dmonakhov/alpine-fio
2 |
3 | MAINTAINER Lee Liu
4 |
5 | VOLUME /tmp
6 | WORKDIR /tmp
7 | COPY ./docker-entrypoint.sh /
8 | ENTRYPOINT ["/docker-entrypoint.sh"]
9 | CMD ["fio"]
10 |
--------------------------------------------------------------------------------
/06-storage/04-dbench/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 LogDNA
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/06-storage/04-dbench/README.md:
--------------------------------------------------------------------------------
1 | # dbench
2 | Benchmark Kubernetes persistent disk volumes with `fio`: Read/write IOPS, bandwidth MB/s and latency.
3 |
4 | # Usage
5 |
6 | 1. Download [dbench.yaml](https://raw.githubusercontent.com/logdna/dbench/master/dbench.yaml) and edit the `storageClassName` to match your Kubernetes provider's Storage Class `kubectl get storageclasses`
7 | 2. Deploy Dbench using: `kubectl apply -f dbench.yaml`
8 | 3. Once deployed, the Dbench Job will:
9 | * provision a Persistent Volume of `1000Gi` (default) using `storageClassName: ssd` (default)
10 | * run a series of `fio` tests on the newly provisioned disk
11 | * currently there are 9 tests, 15s per test - total runtime is ~2.5 minutes
12 | 4. Follow benchmarking progress using: `kubectl logs -f job/dbench` (empty output means the Job not yet created, or `storageClassName` is invalid, see Troubleshooting below)
13 | 5. At the end of all tests, you'll see a summary that looks similar to this:
14 | ```
15 | ==================
16 | = Dbench Summary =
17 | ==================
18 | Random Read/Write IOPS: 75.7k/59.7k. BW: 523MiB/s / 500MiB/s
19 | Average Latency (usec) Read/Write: 183.07/76.91
20 | Sequential Read/Write: 536MiB/s / 512MiB/s
21 | Mixed Random Read/Write IOPS: 43.1k/14.4k
22 | ```
23 | 6. Once the tests are finished, clean up using: `kubectl delete -f dbench.yaml` and that should deprovision the persistent disk and delete it to minimize storage billing.
24 |
25 | ## Notes / Troubleshooting
26 |
27 | * If the Persistent Volume Claim is stuck on Pending, it's likely you didn't specify a valid Storage Class. Double check using `kubectl get storageclasses`. Also check that the volume size of `1000Gi` (default) is available for provisioning.
28 | * It can take some time for a Persistent Volume to be Bound and the Kubernetes Dashboard UI will show the Dbench Job as red until the volume is finished provisioning.
29 | * It's useful to test multiple disk sizes as most cloud providers price IOPS per GB provisioned. So a `4000Gi` volume will perform better than a `1000Gi` volume. Just edit the yaml, `kubectl delete -f dbench.yaml` and run `kubectl apply -f dbench.yaml` again after deprovision/delete completes.
30 | * A list of all `fio` tests are in [docker-entrypoint.sh](https://github.com/logdna/dbench/blob/master/docker-entrypoint.sh).
31 |
32 | ## Contributors
33 |
34 | * Lee Liu (LogDNA)
35 | * [Alexis Turpin](https://github.com/alexis-turpin)
36 |
37 | ## License
38 |
39 | * MIT
40 |
--------------------------------------------------------------------------------
/06-storage/04-dbench/dbench.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: dbench-pv-claim
5 | spec:
6 | storageClassName: managed-premium
7 | # storageClassName: gp2
8 | # storageClassName: local-storage
9 | # storageClassName: ibmc-block-bronze
10 | # storageClassName: ibmc-block-silver
11 | # storageClassName: ibmc-block-gold
12 | accessModes:
13 | - ReadWriteOnce
14 | resources:
15 | requests:
16 | storage: 1000Gi
17 | ---
18 | apiVersion: batch/v1
19 | kind: Job
20 | metadata:
21 | name: dbench
22 | spec:
23 | template:
24 | spec:
25 | containers:
26 | - name: dbench
27 | image: ndrpnt/dbench:1.0.0
28 | imagePullPolicy: Always
29 | env:
30 | - name: DBENCH_MOUNTPOINT
31 | value: /data
32 | # - name: DBENCH_QUICK
33 | # value: "yes"
34 | # - name: FIO_SIZE
35 | # value: 1G
36 | # - name: FIO_OFFSET_INCREMENT
37 | # value: 256M
38 | # - name: FIO_DIRECT
39 | # value: "0"
40 | volumeMounts:
41 | - name: dbench-pv
42 | mountPath: /data
43 | restartPolicy: Never
44 | volumes:
45 | - name: dbench-pv
46 | persistentVolumeClaim:
47 | claimName: dbench-pv-claim
48 | backoffLimit: 4
49 |
--------------------------------------------------------------------------------
/06-storage/04-dbench/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | set -e
3 |
4 | if [ -z $DBENCH_MOUNTPOINT ]; then
5 | DBENCH_MOUNTPOINT=/tmp
6 | fi
7 |
8 | if [ -z $FIO_SIZE ]; then
9 | FIO_SIZE=2G
10 | fi
11 |
12 | if [ -z $FIO_OFFSET_INCREMENT ]; then
13 | FIO_OFFSET_INCREMENT=500M
14 | fi
15 |
16 | if [ -z $FIO_DIRECT ]; then
17 | FIO_DIRECT=1
18 | fi
19 |
20 | echo Working dir: $DBENCH_MOUNTPOINT
21 | echo
22 |
23 | if [ "$1" = 'fio' ]; then
24 |
25 | echo Testing Read IOPS...
26 | READ_IOPS=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --gtod_reduce=1 --name=read_iops --filename=$DBENCH_MOUNTPOINT/fiotest --bs=4K --iodepth=64 --size=$FIO_SIZE --readwrite=randread --time_based --ramp_time=2s --runtime=15s)
27 | echo "$READ_IOPS"
28 | READ_IOPS_VAL=$(echo "$READ_IOPS"|grep -E 'read ?:'|grep -Eoi 'IOPS=[0-9k.]+'|cut -d'=' -f2)
29 | echo
30 | echo
31 |
32 | echo Testing Write IOPS...
33 | WRITE_IOPS=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --gtod_reduce=1 --name=write_iops --filename=$DBENCH_MOUNTPOINT/fiotest --bs=4K --iodepth=64 --size=$FIO_SIZE --readwrite=randwrite --time_based --ramp_time=2s --runtime=15s)
34 | echo "$WRITE_IOPS"
35 | WRITE_IOPS_VAL=$(echo "$WRITE_IOPS"|grep -E 'write:'|grep -Eoi 'IOPS=[0-9k.]+'|cut -d'=' -f2)
36 | echo
37 | echo
38 |
39 | echo Testing Read Bandwidth...
40 | READ_BW=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --gtod_reduce=1 --name=read_bw --filename=$DBENCH_MOUNTPOINT/fiotest --bs=128K --iodepth=64 --size=$FIO_SIZE --readwrite=randread --time_based --ramp_time=2s --runtime=15s)
41 | echo "$READ_BW"
42 | READ_BW_VAL=$(echo "$READ_BW"|grep -E 'read ?:'|grep -Eoi 'BW=[0-9GMKiBs/.]+'|cut -d'=' -f2)
43 | echo
44 | echo
45 |
46 | echo Testing Write Bandwidth...
47 | WRITE_BW=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --gtod_reduce=1 --name=write_bw --filename=$DBENCH_MOUNTPOINT/fiotest --bs=128K --iodepth=64 --size=$FIO_SIZE --readwrite=randwrite --time_based --ramp_time=2s --runtime=15s)
48 | echo "$WRITE_BW"
49 | WRITE_BW_VAL=$(echo "$WRITE_BW"|grep -E 'write:'|grep -Eoi 'BW=[0-9GMKiBs/.]+'|cut -d'=' -f2)
50 | echo
51 | echo
52 |
53 | if [ "$DBENCH_QUICK" == "" ] || [ "$DBENCH_QUICK" == "no" ]; then
54 | echo Testing Read Latency...
55 | READ_LATENCY=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --name=read_latency --filename=$DBENCH_MOUNTPOINT/fiotest --bs=4K --iodepth=4 --size=$FIO_SIZE --readwrite=randread --time_based --ramp_time=2s --runtime=15s)
56 | echo "$READ_LATENCY"
57 | READ_LATENCY_VAL=$(echo "$READ_LATENCY"|grep ' lat.*avg'|grep -Eoi 'avg=[0-9.]+'|cut -d'=' -f2)
58 | echo
59 | echo
60 |
61 | echo Testing Write Latency...
62 | WRITE_LATENCY=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --name=write_latency --filename=$DBENCH_MOUNTPOINT/fiotest --bs=4K --iodepth=4 --size=$FIO_SIZE --readwrite=randwrite --time_based --ramp_time=2s --runtime=15s)
63 | echo "$WRITE_LATENCY"
64 | WRITE_LATENCY_VAL=$(echo "$WRITE_LATENCY"|grep ' lat.*avg'|grep -Eoi 'avg=[0-9.]+'|cut -d'=' -f2)
65 | echo
66 | echo
67 |
68 | echo Testing Read Sequential Speed...
69 | READ_SEQ=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --gtod_reduce=1 --name=read_seq --filename=$DBENCH_MOUNTPOINT/fiotest --bs=1M --iodepth=16 --size=$FIO_SIZE --readwrite=read --time_based --ramp_time=2s --runtime=15s --thread --numjobs=4 --offset_increment=$FIO_OFFSET_INCREMENT)
70 | echo "$READ_SEQ"
71 | READ_SEQ_VAL=$(echo "$READ_SEQ"|grep -E 'READ:'|grep -Eoi '(aggrb|bw)=[0-9GMKiBs/.]+'|cut -d'=' -f2)
72 | echo
73 | echo
74 |
75 | echo Testing Write Sequential Speed...
76 | WRITE_SEQ=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --gtod_reduce=1 --name=write_seq --filename=$DBENCH_MOUNTPOINT/fiotest --bs=1M --iodepth=16 --size=$FIO_SIZE --readwrite=write --time_based --ramp_time=2s --runtime=15s --thread --numjobs=4 --offset_increment=$FIO_OFFSET_INCREMENT)
77 | echo "$WRITE_SEQ"
78 | WRITE_SEQ_VAL=$(echo "$WRITE_SEQ"|grep -E 'WRITE:'|grep -Eoi '(aggrb|bw)=[0-9GMKiBs/.]+'|cut -d'=' -f2)
79 | echo
80 | echo
81 |
82 | echo Testing Read/Write Mixed...
83 | RW_MIX=$(fio --randrepeat=0 --verify=0 --ioengine=libaio --direct=$FIO_DIRECT --gtod_reduce=1 --name=rw_mix --filename=$DBENCH_MOUNTPOINT/fiotest --bs=4k --iodepth=64 --size=$FIO_SIZE --readwrite=randrw --rwmixread=75 --time_based --ramp_time=2s --runtime=15s)
84 | echo "$RW_MIX"
85 | RW_MIX_R_IOPS=$(echo "$RW_MIX"|grep -E 'read ?:'|grep -Eoi 'IOPS=[0-9k.]+'|cut -d'=' -f2)
86 | RW_MIX_W_IOPS=$(echo "$RW_MIX"|grep -E 'write:'|grep -Eoi 'IOPS=[0-9k.]+'|cut -d'=' -f2)
87 | echo
88 | echo
89 | fi
90 |
91 | echo All tests complete.
92 | echo
93 | echo ==================
94 | echo = Dbench Summary =
95 | echo ==================
96 | echo "Random Read/Write IOPS: $READ_IOPS_VAL/$WRITE_IOPS_VAL. BW: $READ_BW_VAL / $WRITE_BW_VAL"
97 | if [ -z $DBENCH_QUICK ] || [ "$DBENCH_QUICK" == "no" ]; then
98 | echo "Average Latency (usec) Read/Write: $READ_LATENCY_VAL/$WRITE_LATENCY_VAL"
99 | echo "Sequential Read/Write: $READ_SEQ_VAL / $WRITE_SEQ_VAL"
100 | echo "Mixed Random Read/Write IOPS: $RW_MIX_R_IOPS/$RW_MIX_W_IOPS"
101 | fi
102 |
103 | rm $DBENCH_MOUNTPOINT/fiotest
104 | exit 0
105 | fi
106 |
107 | exec "$@"
108 |
--------------------------------------------------------------------------------
/06-storage/Readme.md:
--------------------------------------------------------------------------------
1 | Topics
2 | ======
3 | Remote Storage
4 | --------
5 | - NFS
6 | - GlusterFS
7 | - Ceph File
8 | - VMWare Photon FD
9 | - Azure File Storage
10 | - Azure Data Disk
11 | - GCE Persistent Disk
12 | Ephemeral Storage
13 | ----
14 | - EmptyDir
15 | - Kubernetes API
16 | - Secret
17 | - ConfigMap
18 | - DawnwardAPI
19 |
20 | Links
21 | ----
22 | - How volumes and storage work in Kubernetes - https://www.youtube.com/watch?v=inJ7YJ-jt8I
23 | - Kubernetes Storage Lingo 101 - https://www.youtube.com/watch?v=uSxlgK1bCuA
24 | - Kubernetes Secrets and ConfigMaps - https://www.youtube.com/watch?v=GoITFljdJdo
--------------------------------------------------------------------------------
/07-workloads/01-daemonset/fluentd-daemonset-syslog.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: fluentd
6 | namespace: kube-system
7 | labels:
8 | k8s-app: fluentd-logging
9 | version: v1
10 |
11 | ---
12 | apiVersion: rbac.authorization.k8s.io/v1beta1
13 | kind: ClusterRole
14 | metadata:
15 | name: fluentd
16 | namespace: kube-system
17 | rules:
18 | - apiGroups:
19 | - ""
20 | resources:
21 | - pods
22 | - namespaces
23 | verbs:
24 | - get
25 | - list
26 | - watch
27 |
28 | ---
29 | kind: ClusterRoleBinding
30 | apiVersion: rbac.authorization.k8s.io/v1beta1
31 | metadata:
32 | name: fluentd
33 | roleRef:
34 | kind: ClusterRole
35 | name: fluentd
36 | apiGroup: rbac.authorization.k8s.io
37 | subjects:
38 | - kind: ServiceAccount
39 | name: fluentd
40 | namespace: kube-system
41 |
42 | ---
43 | apiVersion: apps/v1
44 | kind: DaemonSet
45 | metadata:
46 | name: fluentd
47 | namespace: kube-system
48 | labels:
49 | k8s-app: fluentd-logging
50 | version: v1
51 | spec:
52 | selector:
53 | matchLabels:
54 | k8s-app: fluentd-logging
55 | template:
56 | metadata:
57 | labels:
58 | k8s-app: fluentd-logging
59 | version: v1
60 | spec:
61 | serviceAccount: fluentd
62 | serviceAccountName: fluentd
63 | tolerations:
64 | - key: node-role.kubernetes.io/master
65 | effect: NoSchedule
66 | containers:
67 | - name: fluentd
68 | image: fluent/fluentd-kubernetes-daemonset:v1-debian-syslog
69 | env:
70 | - name: SYSLOG_HOST
71 | value: "sysloghost"
72 | - name: SYSLOG_PORT
73 | value: "514"
74 | - name: SYSLOG_PROTOCOL
75 | value: "udp"
76 | resources:
77 | limits:
78 | memory: 200Mi
79 | requests:
80 | cpu: 100m
81 | memory: 200Mi
82 | volumeMounts:
83 | - name: varlog
84 | mountPath: /var/log
85 | - name: varlibdockercontainers
86 | mountPath: /var/lib/docker/containers
87 | readOnly: true
88 | terminationGracePeriodSeconds: 30
89 | nodeSelector:
90 | cpu: kotu
91 | volumes:
92 | - name: varlog
93 | hostPath:
94 | path: /var/log
95 | - name: varlibdockercontainers
96 | hostPath:
97 | path: /var/lib/docker/containers
98 |
--------------------------------------------------------------------------------
/07-workloads/02-job/01-job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: kubernetes-job-example
5 | labels:
6 | jobgroup: jobexample
7 | spec:
8 | template:
9 | metadata:
10 | name: kubejob
11 | labels:
12 | jobgroup: jobexample
13 | spec:
14 | containers:
15 | - name: c
16 | image: devopscube/kubernetes-job-demo:latest
17 | args: ["100"]
18 | restartPolicy: OnFailure
19 |
--------------------------------------------------------------------------------
/07-workloads/02-job/02-job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: kubernetes-job-example
5 | labels:
6 | jobgroup: jobexample
7 | spec:
8 | parallelism: 5
9 | activeDeadlineSeconds: 100
10 | template:
11 | metadata:
12 | name: kubejob
13 | labels:
14 | jobgroup: jobexample
15 | spec:
16 | containers:
17 | - name: c
18 | image: devopscube/kubernetes-job-demo:latest
19 | args: ["100"]
20 | restartPolicy: OnFailure
21 |
--------------------------------------------------------------------------------
/07-workloads/02-job/03-cronjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: hello
5 | spec:
6 | schedule: "*/1 * * * *"
7 | concurrencyPolicy: Replace
8 | jobTemplate:
9 | spec:
10 | template:
11 | spec:
12 | containers:
13 | - name: hello
14 | image: busybox
15 | args:
16 | - /bin/sh
17 | - -c
18 | - date; echo Hello from the Kubernetes cluster
19 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/07-workloads/02-job/04-cronjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: hello
5 | spec:
6 | schedule: "*/1 * * * *"
7 | concurrencyPolicy: Replace
8 | jobTemplate:
9 | spec:
10 | template:
11 | spec:
12 | containers:
13 | - name: hello
14 | image: busybox
15 | args:
16 | - /bin/sh
17 | - -c
18 | - sleep 100
19 | restartPolicy: OnFailure
20 | parallelism: 3
21 |
--------------------------------------------------------------------------------
/07-workloads/02-job/Readme.md:
--------------------------------------------------------------------------------
1 | ```
2 | Add Pod AntiAffinity to 02 Job Yaml
3 | https://stackoverflow.com/questions/56224595/deployment-fails-on-anti-affinity-rule-with-topologykey-kubernetes-io-hostname
4 | CronJob Parallism
5 | https://www.alibabacloud.com/blog/kubernetes-cronjobs---part-2-parallelism_595022
6 |
7 | ```
8 |
--------------------------------------------------------------------------------
/07-workloads/Readme.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devlerazureda/kubernetes-essentials/9ba1e24a0cd45be98d3b6794b8690bfae6ed3f93/07-workloads/Readme.md
--------------------------------------------------------------------------------
/09-ingress/01-nginx-ingress-controller/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | helm install --name nginx-ingress stable/nginx-ingress -f values.yaml --namespace nginx-ingress
3 | ```
4 |
5 | ```bash
6 | kubens nginx-ingress
7 | #kubectl get pods -n nginx-ingress
8 | kgpo
9 | ```
10 | ```
11 | NAME READY STATUS RESTARTS AGE
12 | nginx-ingress-controller-96725 1/1 Running 0 67m
13 | nginx-ingress-controller-cwv5r 1/1 Running 0 68m
14 | nginx-ingress-controller-fg92m 1/1 Running 0 69m
15 | nginx-ingress-default-backend-576b86996d-hb49n 1/1 Running 0 3d12h
16 | ```
17 | ```bash
18 | #kubectl get services -n nginx-ingress
19 | kgsvc
20 | ```
21 |
22 | ```
23 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
24 | nginx-ingress-controller LoadBalancer 10.0.234.248 51.105.101.142 80:30745/TCP,443:31815/TCP 7d19h
25 | nginx-ingress-controller-metrics ClusterIP 10.0.172.96 9913/TCP 146m
26 | nginx-ingress-default-backend ClusterIP 10.0.194.52 80/TCP 7d19h
27 | ```
28 | ```bash
29 | kubectl create ns sampleapp
30 | kubens sampleapp
31 | kubectl apply -f web-v1-fixed.yaml
32 | kubectl apply -f web-v2-fixed.yaml
33 | kubectl apply -f web-v1-svc.yaml
34 | kubectl apply -f web-v2-svc.yaml
35 | kubectl get pods
36 | ```
37 | ```
38 | NAME READY STATUS RESTARTS AGE
39 | web-v1-845d5c6978-6sgrl 1/1 Running 0 4h50m
40 | web-v2-fb7cb48f5-kkzkc 1/1 Running 0 4h51m
41 |
42 | ```
43 |
44 | ```bash
45 | kubectl get service -n sampleapp
46 | ```
47 |
48 | ```
49 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
50 | web-v1 NodePort 10.0.171.226 8080:30060/TCP 4h51m
51 | web-v2 NodePort 10.0.156.60 8080:30225/TCP 4h51m
52 | ```
53 | Ingress Definition
54 | ```
55 | apiVersion: extensions/v1beta1
56 | kind: Ingress
57 | metadata:
58 | name: web-ingress
59 | annotations:
60 | kubernetes.io/ingress.class: nginx
61 | nginx.ingress.kubernetes.io/ssl-redirect: "false"
62 | nginx.ingress.kubernetes.io/rewrite-target: /$2
63 | spec:
64 | rules:
65 | - http:
66 | paths:
67 | - path: /v2/(.*)
68 | backend:
69 | serviceName: web-v2
70 | servicePort: 8080
71 | - path: /v1/(.*)
72 | backend:
73 | serviceName: web-v1
74 | servicePort: 8080
75 | ```
76 | ```bash
77 | kubectl apply -f http-ingress.yaml
78 | curl -XGET $(kubectl get svc nginx-ingress-controller -n nginx-ingress -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')/v1/
79 |
80 | ```
81 | ```
82 | Hello, world!
83 | Version: 1.0.0
84 | Hostname: web-v1-845d5c6978-xrgpv
85 | ```
86 | ```bash
87 | curl -XGET $(kubectl get svc nginx-ingress-controller -n nginx-ingress -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')/v2/
88 | ```
89 | ```
90 | Hello, world!
91 | Version: 2.0.0
92 | Hostname: web-v2-fb7cb48f5-kkzkc
93 | ```
94 |
95 | ```bash
96 | openssl genrsa -out ca.key 2048
97 | openssl req -x509 -new -nodes -key ca.key -subj \
98 | "/CN=$(kubectl get svc -n nginx-ingress nginx-ingress-controller \
99 | -o jsonpath="{.status.loadBalancer.ingress[0].ip}")" -days 10000 -out ca.crt
100 |
101 | kubectl create secret tls web-tls --key=ca.key --cert=ca.crt -n nginx-ingress
102 |
103 | ```
104 |
105 | ```yaml
106 | ## Additional command line arguments to pass to nginx-ingress-controller
107 | ## E.g. to specify the default SSL certificate you can use
108 | ## extraArgs:
109 | ## default-ssl-certificate: "/"
110 | extraArgs:
111 | default-ssl-certificate: "nginx-ingress/web-tls"
112 |
113 | ```
114 |
115 | ```bash
116 | helm upgrade nginx-ingress stable/nginx-ingress -f values.yaml
117 | #wait 30 seconds
118 | openssl s_client -showcerts -connect 51.105.101.142:443
119 | ```
120 | ```
121 | CONNECTED(00000003)
122 | depth=0 CN = 51.105.101.142
123 | verify error:num=18:self signed certificate
124 | verify return:1
125 | depth=0 CN = 51.105.101.142
126 | verify return:1
127 | ---
128 | Certificate chain
129 | 0 s:/CN=51.105.101.142
130 | i:/CN=51.105.101.142
131 |
132 | ```
133 | Cleanup
134 | ```bash
135 | cd ..
136 | kubectl delete namespace sampleapp
137 | ```
--------------------------------------------------------------------------------
/09-ingress/01-nginx-ingress-controller/http-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: web-ingress
5 | annotations:
6 | kubernetes.io/ingress.class: nginx
7 | nginx.ingress.kubernetes.io/ssl-redirect: "false"
8 | nginx.ingress.kubernetes.io/rewrite-target: /$2
9 | spec:
10 | rules:
11 | - http:
12 | paths:
13 | - path: /v2/(.*)
14 | backend:
15 | serviceName: web-v2
16 | servicePort: 8080
17 | - path: /v1/(.*)
18 | backend:
19 | serviceName: web-v1
20 | servicePort: 8080
21 |
--------------------------------------------------------------------------------
/09-ingress/01-nginx-ingress-controller/ingress-tls.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: web-ingress
5 | annotations:
6 | kubernetes.io/ingress.class: nginx
7 | nginx.ingress.kubernetes.io/ssl-redirect: "false"
8 | nginx.ingress.kubernetes.io/rewrite-target: /$2
9 | spec:
10 | rules:
11 | - http:
12 | paths:
13 | - path: /v2/(.*)
14 | backend:
15 | serviceName: web-v2
16 | servicePort: 8080
17 | - path: /v1/(.*)
18 | backend:
19 | serviceName: web-v1
20 | servicePort: 8080
21 | tls:
22 | - secretName: web-tls
23 |
--------------------------------------------------------------------------------
/09-ingress/01-nginx-ingress-controller/values.yaml:
--------------------------------------------------------------------------------
1 | ## nginx configuration
2 | ## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md
3 | ##
4 | controller:
5 | name: controller
6 | image:
7 | repository: quay.io/kubernetes-ingress-controller/nginx-ingress-controller
8 | tag: "0.26.1"
9 | pullPolicy: IfNotPresent
10 | # www-data -> uid 33
11 | runAsUser: 33
12 | allowPrivilegeEscalation: true
13 |
14 | # Configures the ports the nginx-controller listens on
15 | containerPort:
16 | http: 80
17 | https: 443
18 |
19 | # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
20 | config: {}
21 |
22 | # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers
23 | proxySetHeaders: {}
24 |
25 | # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
26 | addHeaders: {}
27 |
28 | # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
29 | # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
30 | # is merged
31 | hostNetwork: false
32 |
33 | # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
34 | # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
35 | # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
36 | dnsPolicy: ClusterFirst
37 |
38 | # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
39 | # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
40 | reportNodeInternalIp: false
41 |
42 | ## Use host ports 80 and 443
43 | daemonset:
44 | useHostPort: false
45 |
46 | hostPorts:
47 | http: 80
48 | https: 443
49 |
50 | ## Required only if defaultBackend.enabled = false
51 | ## Must be /
52 | ##
53 | defaultBackendService: ""
54 |
55 | ## Election ID to use for status update
56 | ##
57 | electionID: ingress-controller-leader
58 |
59 | ## Name of the ingress class to route through this controller
60 | ##
61 | ingressClass: nginx
62 |
63 | # labels to add to the pod container metadata
64 | podLabels: {}
65 | # key: value
66 |
67 | ## Security Context policies for controller pods
68 | ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
69 | ## notes on enabling and using sysctls
70 | ##
71 | podSecurityContext: {}
72 |
73 | ## Allows customization of the external service
74 | ## the ingress will be bound to via DNS
75 | publishService:
76 | enabled: false
77 | ## Allows overriding of the publish service to bind to
78 | ## Must be /
79 | ##
80 | pathOverride: ""
81 |
82 | ## Limit the scope of the controller
83 | ##
84 | scope:
85 | enabled: false
86 | namespace: "" # defaults to .Release.Namespace
87 |
88 | ## Allows customization of the configmap / nginx-configmap namespace
89 | ##
90 | configMapNamespace: "" # defaults to .Release.Namespace
91 |
92 | ## Allows customization of the tcp-services-configmap namespace
93 | ##
94 | tcp:
95 | configMapNamespace: "" # defaults to .Release.Namespace
96 |
97 | ## Allows customization of the udp-services-configmap namespace
98 | ##
99 | udp:
100 | configMapNamespace: "" # defaults to .Release.Namespace
101 |
102 | ## Additional command line arguments to pass to nginx-ingress-controller
103 | ## E.g. to specify the default SSL certificate you can use
104 | ## extraArgs:
105 | ## default-ssl-certificate: "/"
106 | extraArgs:
107 | default-ssl-certificate: "nginx-ingress/web-tls"
108 |
109 | ## Additional environment variables to set
110 | extraEnvs: []
111 | # extraEnvs:
112 | # - name: FOO
113 | # valueFrom:
114 | # secretKeyRef:
115 | # key: FOO
116 | # name: secret-resource
117 |
118 | ## DaemonSet or Deployment
119 | ##
120 | kind: DaemonSet
121 |
122 | # The update strategy to apply to the Deployment or DaemonSet
123 | ##
124 | updateStrategy: {}
125 | # rollingUpdate:
126 | # maxUnavailable: 1
127 | # type: RollingUpdate
128 |
129 | # minReadySeconds to avoid killing pods before we are ready
130 | ##
131 | minReadySeconds: 0
132 |
133 |
134 | ## Node tolerations for server scheduling to nodes with taints
135 | ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
136 | ##
137 | tolerations: []
138 | # - key: "key"
139 | # operator: "Equal|Exists"
140 | # value: "value"
141 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
142 |
143 | ## Affinity and anti-affinity
144 | ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
145 | ##
146 | affinity: {}
147 | # # An example of preferred pod anti-affinity, weight is in the range 1-100
148 | # podAntiAffinity:
149 | # preferredDuringSchedulingIgnoredDuringExecution:
150 | # - weight: 100
151 | # podAffinityTerm:
152 | # labelSelector:
153 | # matchExpressions:
154 | # - key: app
155 | # operator: In
156 | # values:
157 | # - nginx-ingress
158 | # topologyKey: kubernetes.io/hostname
159 |
160 | # # An example of required pod anti-affinity
161 | # podAntiAffinity:
162 | # requiredDuringSchedulingIgnoredDuringExecution:
163 | # - labelSelector:
164 | # matchExpressions:
165 | # - key: app
166 | # operator: In
167 | # values:
168 | # - nginx-ingress
169 | # topologyKey: "kubernetes.io/hostname"
170 |
171 | ## terminationGracePeriodSeconds
172 | ##
173 | terminationGracePeriodSeconds: 60
174 |
175 | ## Node labels for controller pod assignment
176 | ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
177 | ##
178 | nodeSelector: {}
179 |
180 | ## Liveness and readiness probe values
181 | ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
182 | ##
183 | livenessProbe:
184 | failureThreshold: 3
185 | initialDelaySeconds: 10
186 | periodSeconds: 10
187 | successThreshold: 1
188 | timeoutSeconds: 1
189 | port: 10254
190 | readinessProbe:
191 | failureThreshold: 3
192 | initialDelaySeconds: 10
193 | periodSeconds: 10
194 | successThreshold: 1
195 | timeoutSeconds: 1
196 | port: 10254
197 |
198 | ## Annotations to be added to controller pods
199 | ##
200 | podAnnotations: {}
201 |
202 | replicaCount: 1
203 |
204 | minAvailable: 1
205 |
206 | resources:
207 | limits:
208 | # cpu: 100m
209 | memory: 1024Mi
210 | requests:
211 | cpu: 100m
212 | memory: 1024Mi
213 |
214 | autoscaling:
215 | enabled: false
216 | minReplicas: 1
217 | maxReplicas: 11
218 | targetCPUUtilizationPercentage: 50
219 | targetMemoryUtilizationPercentage: 50
220 |
221 | ## Override NGINX template
222 | customTemplate:
223 | configMapName: ""
224 | configMapKey: ""
225 |
226 | service:
227 | enabled: true
228 |
229 | annotations: {}
230 | labels: {}
231 | ## Deprecated, instead simply do not provide a clusterIP value
232 | omitClusterIP: false
233 | # clusterIP: ""
234 |
235 | ## List of IP addresses at which the controller services are available
236 | ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
237 | ##
238 | externalIPs: []
239 |
240 | loadBalancerIP: ""
241 | loadBalancerSourceRanges: []
242 |
243 | enableHttp: true
244 | enableHttps: true
245 |
246 | ## Set external traffic policy to: "Local" to preserve source IP on
247 | ## providers supporting it
248 | ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
249 | externalTrafficPolicy: ""
250 |
251 | healthCheckNodePort: 0
252 |
253 | ports:
254 | http: 80
255 | https: 443
256 |
257 | targetPorts:
258 | http: http
259 | https: https
260 |
261 | type: LoadBalancer
262 |
263 | # type: NodePort
264 | # nodePorts:
265 | # http: 32080
266 | # https: 32443
267 | # tcp:
268 | # 8080: 32808
269 | nodePorts:
270 | http: ""
271 | https: ""
272 | tcp: {}
273 | udp: {}
274 |
275 | extraContainers: []
276 | ## Additional containers to be added to the controller pod.
277 | ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
278 | # - name: my-sidecar
279 | # image: nginx:latest
280 | # - name: lemonldap-ng-controller
281 | # image: lemonldapng/lemonldap-ng-controller:0.2.0
282 | # args:
283 | # - /lemonldap-ng-controller
284 | # - --alsologtostderr
285 | # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
286 | # env:
287 | # - name: POD_NAME
288 | # valueFrom:
289 | # fieldRef:
290 | # fieldPath: metadata.name
291 | # - name: POD_NAMESPACE
292 | # valueFrom:
293 | # fieldRef:
294 | # fieldPath: metadata.namespace
295 | # volumeMounts:
296 | # - name: copy-portal-skins
297 | # mountPath: /srv/var/lib/lemonldap-ng/portal/skins
298 |
299 | extraVolumeMounts: []
300 | ## Additional volumeMounts to the controller main container.
301 | # - name: copy-portal-skins
302 | # mountPath: /var/lib/lemonldap-ng/portal/skins
303 |
304 | extraVolumes: []
305 | ## Additional volumes to the controller pod.
306 | # - name: copy-portal-skins
307 | # emptyDir: {}
308 |
309 | extraInitContainers: []
310 | ## Containers, which are run before the app containers are started.
311 | # - name: init-myservice
312 | # image: busybox
313 | # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
314 |
315 | admissionWebhooks:
316 | enabled: false
317 | failurePolicy: Fail
318 | port: 8443
319 |
320 | service:
321 | annotations: {}
322 | ## Deprecated, instead simply do not provide a clusterIP value
323 | omitClusterIP: false
324 | # clusterIP: ""
325 | externalIPs: []
326 | loadBalancerIP: ""
327 | loadBalancerSourceRanges: []
328 | servicePort: 443
329 | type: ClusterIP
330 |
331 | patch:
332 | enabled: true
333 | image:
334 | repository: jettech/kube-webhook-certgen
335 | tag: v1.0.0
336 | pullPolicy: IfNotPresent
337 | ## Provide a priority class name to the webhook patching job
338 | ##
339 | priorityClassName: ""
340 | podAnnotations: {}
341 | nodeSelector: {}
342 |
343 | metrics:
344 | port: 10254
345 | # if this port is changed, change healthz-port: in extraArgs: accordingly
346 | enabled: true
347 |
348 | service:
349 | annotations: {}
350 | # prometheus.io/scrape: "true"
351 | # prometheus.io/port: "10254"
352 |
353 | ## Deprecated, instead simply do not provide a clusterIP value
354 | omitClusterIP: false
355 | # clusterIP: ""
356 |
357 | ## List of IP addresses at which the stats-exporter service is available
358 | ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
359 | ##
360 | externalIPs: []
361 |
362 | loadBalancerIP: ""
363 | loadBalancerSourceRanges: []
364 | servicePort: 9913
365 | type: ClusterIP
366 |
367 | serviceMonitor:
368 | enabled: true
369 | additionalLabels:
370 | name: nginx-ingress
371 | release: prometheus
372 | namespace: ""
373 | namespaceSelector: {}
374 | # Default: scrape .Release.Namespace only
375 | # To scrape all, use the following:
376 | # namespaceSelector:
377 | # any: true
378 | scrapeInterval: 30s
379 | # honorLabels: true
380 |
381 | prometheusRule:
382 | enabled: false
383 | additionalLabels: {}
384 | namespace: ""
385 | rules: []
386 | # # These are just examples rules, please adapt them to your needs
387 | # - alert: TooMany500s
388 | # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
389 | # for: 1m
390 | # labels:
391 | # severity: critical
392 | # annotations:
393 | # description: Too many 5XXs
394 | # summary: More than 5% of the all requests did return 5XX, this require your attention
395 | # - alert: TooMany400s
396 | # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
397 | # for: 1m
398 | # labels:
399 | # severity: critical
400 | # annotations:
401 | # description: Too many 4XXs
402 | # summary: More than 5% of the all requests did return 4XX, this require your attention
403 |
404 |
405 | lifecycle: {}
406 |
407 | priorityClassName: ""
408 |
409 | ## Rollback limit
410 | ##
411 | revisionHistoryLimit: 10
412 |
413 | ## Default 404 backend
414 | ##
415 | defaultBackend:
416 |
417 | ## If false, controller.defaultBackendService must be provided
418 | ##
419 | enabled: true
420 |
421 | name: default-backend
422 | image:
423 | repository: k8s.gcr.io/defaultbackend-amd64
424 | tag: "1.5"
425 | pullPolicy: IfNotPresent
426 | # nobody user -> uid 65534
427 | runAsUser: 65534
428 |
429 | extraArgs: {}
430 |
431 | serviceAccount:
432 | create: true
433 | name:
434 | ## Additional environment variables to set for defaultBackend pods
435 | extraEnvs: []
436 |
437 | port: 8080
438 |
439 | ## Readiness and liveness probes for default backend
440 | ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
441 | ##
442 | livenessProbe:
443 | failureThreshold: 3
444 | initialDelaySeconds: 30
445 | periodSeconds: 10
446 | successThreshold: 1
447 | timeoutSeconds: 5
448 | readinessProbe:
449 | failureThreshold: 6
450 | initialDelaySeconds: 0
451 | periodSeconds: 5
452 | successThreshold: 1
453 | timeoutSeconds: 5
454 |
455 | ## Node tolerations for server scheduling to nodes with taints
456 | ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
457 | ##
458 | tolerations: []
459 | # - key: "key"
460 | # operator: "Equal|Exists"
461 | # value: "value"
462 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
463 |
464 | affinity: {}
465 |
466 | ## Security Context policies for controller pods
467 | ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
468 | ## notes on enabling and using sysctls
469 | ##
470 | podSecurityContext: {}
471 |
472 | # labels to add to the pod container metadata
473 | podLabels: {}
474 | # key: value
475 |
476 | ## Node labels for default backend pod assignment
477 | ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
478 | ##
479 | nodeSelector: {}
480 |
481 | ## Annotations to be added to default backend pods
482 | ##
483 | podAnnotations: {}
484 |
485 | replicaCount: 1
486 |
487 | minAvailable: 1
488 |
489 | resources: {}
490 | # limits:
491 | # cpu: 10m
492 | # memory: 20Mi
493 | # requests:
494 | # cpu: 10m
495 | # memory: 20Mi
496 |
497 | service:
498 | annotations: {}
499 | ## Deprecated, instead simply do not provide a clusterIP value
500 | omitClusterIP: false
501 | # clusterIP: ""
502 |
503 | ## List of IP addresses at which the default backend service is available
504 | ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
505 | ##
506 | externalIPs: []
507 |
508 | loadBalancerIP: ""
509 | loadBalancerSourceRanges: []
510 | servicePort: 80
511 | type: ClusterIP
512 |
513 | priorityClassName: ""
514 |
515 | ## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
516 | rbac:
517 | create: true
518 |
519 | # If true, create & use Pod Security Policy resources
520 | # https://kubernetes.io/docs/concepts/policy/pod-security-policy/
521 | podSecurityPolicy:
522 | enabled: false
523 |
524 | serviceAccount:
525 | create: true
526 | name:
527 |
528 | ## Optional array of imagePullSecrets containing private registry credentials
529 | ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
530 | imagePullSecrets: []
531 | # - name: secretName
532 |
533 | # TCP service key:value pairs
534 | # Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
535 | ##
536 | tcp: {}
537 | # 8080: "default/example-tcp-svc:9000"
538 |
539 | # UDP service key:value pairs
540 | # Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
541 | ##
542 | udp: {}
543 | # 53: "kube-system/kube-dns:53"
544 |
--------------------------------------------------------------------------------
/09-ingress/01-nginx-ingress-controller/web-v1-fixed.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v1
7 | name: web-v1
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: web-v1
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: web-v1
19 | spec:
20 | containers:
21 | - image: gcr.io/google-samples/hello-app:1.0
22 | name: web-v1
23 | ports:
24 | - containerPort: 8080
25 | livenessProbe:
26 | httpGet:
27 | path: /
28 | port: 8080
29 | initialDelaySeconds: 5
30 | timeoutSeconds: 1
31 | periodSeconds: 10
32 | failureThreshold: 3
33 | readinessProbe:
34 | httpGet:
35 | path: /
36 | port: 8080
37 | timeoutSeconds: 1
38 | periodSeconds: 10
39 | initialDelaySeconds: 30
40 | status: {}
41 |
--------------------------------------------------------------------------------
/09-ingress/01-nginx-ingress-controller/web-v1-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v1
7 | name: web-v1
8 | spec:
9 | ports:
10 | - port: 8080
11 | protocol: TCP
12 | targetPort: 8080
13 | selector:
14 | run: web-v1
15 | type: NodePort
16 | status:
17 | loadBalancer: {}
18 |
--------------------------------------------------------------------------------
/09-ingress/01-nginx-ingress-controller/web-v2-fixed.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v2
7 | name: web-v2
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: web-v2
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: web-v2
19 | spec:
20 | containers:
21 | - image: gcr.io/google-samples/hello-app:2.0
22 | name: web-v2
23 | ports:
24 | - containerPort: 8080
25 | livenessProbe:
26 | httpGet:
27 | path: /
28 | port: 8080
29 | initialDelaySeconds: 5
30 | timeoutSeconds: 1
31 | periodSeconds: 10
32 | failureThreshold: 3
33 | readinessProbe:
34 | httpGet:
35 | path: /
36 | port: 8080
37 | timeoutSeconds: 1
38 | periodSeconds: 10
39 | initialDelaySeconds: 30
40 | status: {}
41 |
--------------------------------------------------------------------------------
/09-ingress/01-nginx-ingress-controller/web-v2-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v2
7 | name: web-v2
8 | spec:
9 | ports:
10 | - port: 8080
11 | protocol: TCP
12 | targetPort: 8080
13 | selector:
14 | run: web-v2
15 | type: NodePort
16 | status:
17 | loadBalancer: {}
18 |
--------------------------------------------------------------------------------
/09-ingress/02-traefik-ingress-controller/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | helm install --name traefik stable/traefik --namespace traefik -f values.yaml
3 | kubectl get pods -n traefik
4 | kubectl get svc -n traefik
5 | ```
6 |
7 | ```
8 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
9 | traefik LoadBalancer 10.0.201.17 104.45.75.216 80:30466/TCP,443:32393/TCP 9m29s
10 | traefik-dashboard ClusterIP 10.0.24.2 80/TCP 9m29s
11 | traefik-prometheus ClusterIP 10.0.240.111 9100/TCP 9m29s
12 | ```
13 |
14 |
15 | ```bash
16 | kubectl create ns sampleapp
17 | kubens sampleapp
18 | kubectl apply -f web-v1-fixed.yaml
19 | kubectl apply -f web-v2-fixed.yaml
20 | kubectl apply -f web-v1-svc.yaml
21 | kubectl apply -f web-v2-svc.yaml
22 | kubectl get pods
23 | ```
24 | ```
25 | NAME READY STATUS RESTARTS AGE
26 | web-v1-845d5c6978-6sgrl 1/1 Running 0 4h50m
27 | web-v2-fb7cb48f5-kkzkc 1/1 Running 0 4h51m
28 |
29 | ```
30 |
31 | ```bash
32 | kubectl get service -n sampleapp
33 | ```
34 |
35 | ```
36 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
37 | web-v1 NodePort 10.0.171.226 8080:30060/TCP 4h51m
38 | web-v2 NodePort 10.0.156.60 8080:30225/TCP 4h51m
39 | ```
40 |
41 | Ingress Definition
42 | ```
43 | apiVersion: extensions/v1beta1
44 | kind: Ingress
45 | metadata:
46 | name: web-ingress
47 | annotations:
48 | kubernetes.io/ingress.class: traefik
49 | spec:
50 | rules:
51 | - http:
52 | paths:
53 | - path: /v2/
54 | backend:
55 | serviceName: web-v2
56 | servicePort: 8080
57 | - path: /v1/
58 | backend:
59 | serviceName: web-v1
60 | servicePort: 8080
61 | ```
62 |
63 | Http Traffic
64 |
65 | ```bash
66 | openssl genrsa -out ca.key 2048
67 | openssl req -x509 -new -nodes -key ca.key -subj \
68 | "/CN=$(kubectl get svc -n traefik traefik \
69 | -o jsonpath="{.status.loadBalancer.ingress[0].ip}")" -days 10000 -out ca.crt
70 |
71 | kubectl create secret tls web-tls --key=ca.key --cert=ca.crt -n traefik-ingress
72 |
73 | ```
74 | cleanup
75 | ```bash
76 | kubectl delete ns sampleapp
77 | ```
78 |
--------------------------------------------------------------------------------
/09-ingress/02-traefik-ingress-controller/http-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: web-ingress
5 | annotations:
6 | kubernetes.io/ingress.class: traefik
7 | #traefik.frontend.rule.type: PathPrefixStrip
8 | spec:
9 | rules:
10 | - http:
11 | paths:
12 | - path: /v2
13 | backend:
14 | serviceName: web-v2
15 | servicePort: http
16 | - path: /v1
17 | backend:
18 | serviceName: web-v1
19 | servicePort: http
20 |
--------------------------------------------------------------------------------
/09-ingress/02-traefik-ingress-controller/values.yaml:
--------------------------------------------------------------------------------
1 | ## Default values for Traefik
2 | image: traefik
3 | imageTag: 1.7.20
4 |
5 | ## Optionally specify an array of imagePullSecrets.
6 | ## Secrets must be manually created in the namespace.
7 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
8 | # imagePullSecrets:
9 | # - "regsecret"
10 |
11 |
12 | testFramework:
13 | enabled: false
14 | image: "dduportal/bats"
15 | tag: "0.4.0"
16 |
17 | ## can switch the service type to NodePort if required
18 | serviceType: LoadBalancer
19 | # loadBalancerIP: ""
20 | # loadBalancerSourceRanges: []
21 | whiteListSourceRange: []
22 | externalTrafficPolicy: Cluster
23 | replicas: 1
24 | # startupArguments:
25 | # - "--ping"
26 | # - "--ping.entrypoint=http"
27 |
28 | # /ping health-check entry point.
29 | # pingEntryPoint: http
30 |
31 | podDisruptionBudget: {}
32 | # maxUnavailable: 1
33 | # minAvailable: 2
34 |
35 | # priorityClassName: ""
36 |
37 | # rootCAs: []
38 |
39 | resources: {}
40 |
41 | debug:
42 | enabled: false
43 |
44 | # logLevel: error
45 |
46 | # maxIdleConnsPerHost: 200
47 |
48 | deploymentStrategy: {}
49 | # rollingUpdate:
50 | # maxSurge: 1
51 | # maxUnavailable: 0
52 | # type: RollingUpdate
53 |
54 | securityContext: {}
55 | useNonPriviledgedPorts: false
56 | env: {}
57 |
58 | nodeSelector: {}
59 | # key: value
60 | affinity: {}
61 | # key: value
62 | tolerations: []
63 | # - key: "key"
64 | # operator: "Equal|Exists"
65 | # value: "value"
66 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
67 | ## Kubernetes ingress filters
68 | # kubernetes:
69 | # endpoint:
70 | # namespaces:
71 | # - default
72 | # labelSelector:
73 | # ingressClass:
74 | # ingressEndpoint:
75 | # hostname: "localhost"
76 | # ip: "127.0.0.1"
77 | # publishedService: "namespace/servicename"
78 | # useDefaultPublishedService: false
79 |
80 | fileBackend: ""
81 | # as in same traefik.toml
82 | #
83 | # [backends]
84 | # [backends.backend1]
85 | # # ...
86 | # [backends.backend2]
87 | # # ...
88 | # [frontends]
89 | # [frontends.frontend1]
90 | # # ...
91 | # [frontends.frontend2]
92 | #
93 | # or separated file from configFiles
94 | # filename = "/configs/rules.toml"
95 |
96 | proxyProtocol:
97 | enabled: false
98 | # trustedIPs is required when enabled
99 | trustedIPs: []
100 | # - 10.0.0.0/8
101 | forwardedHeaders:
102 | enabled: false
103 | # trustedIPs is required when enabled
104 | trustedIPs: []
105 | # - 10.0.0.0/8
106 |
107 | ## Add arbitrary ConfigMaps to deployment
108 | ## Will be mounted to /configs/, i.e. myconfig.json would
109 | ## be mounted to /configs/myconfig.json.
110 | configFiles: {}
111 | # myconfig.json: |
112 | # filecontents...
113 |
114 | ## Add arbitrary Secrets to deployment
115 | ## Will be mounted to /secrets/, i.e. file.name would
116 | ## be mounted to /secrets/mysecret.txt.
117 | ## The contents will be base64 encoded when added
118 | secretFiles: {}
119 | # mysecret.txt: |
120 | # filecontents...
121 |
122 | ssl:
123 | enabled: true
124 | enforced: false
125 | permanentRedirect: false
126 | upstream: false
127 | insecureSkipVerify: false
128 | generateTLS: false
129 | # defaultCN: "example.com"
130 | # or *.example.com
131 | defaultSANList: []
132 | # - example.com
133 | # - test1.example.com
134 | defaultIPList: []
135 | # - 1.2.3.4
136 | # cipherSuites: []
137 | # https://docs.traefik.io/configuration/entrypoints/#specify-minimum-tls-version
138 | # tlsMinVersion: VersionTLS12
139 | # https://docs.traefik.io/configuration/entrypoints/#strict-sni-checking
140 | # sniStrict: false
141 | defaultCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVtekNDQTRPZ0F3SUJBZ0lKQUpBR1FsTW1DMGt5TUEwR0NTcUdTSWIzRFFFQkJRVUFNSUdQTVFzd0NRWUQKVlFRR0V3SlZVekVSTUE4R0ExVUVDQk1JUTI5c2IzSmhaRzh4RURBT0JnTlZCQWNUQjBKdmRXeGtaWEl4RkRBUwpCZ05WQkFvVEMwVjRZVzF3YkdWRGIzSndNUXN3Q1FZRFZRUUxFd0pKVkRFV01CUUdBMVVFQXhRTktpNWxlR0Z0CmNHeGxMbU52YlRFZ01CNEdDU3FHU0liM0RRRUpBUllSWVdSdGFXNUFaWGhoYlhCc1pTNWpiMjB3SGhjTk1UWXgKTURJME1qRXdPVFV5V2hjTk1UY3hNREkwTWpFd09UVXlXakNCanpFTE1Ba0dBMVVFQmhNQ1ZWTXhFVEFQQmdOVgpCQWdUQ0VOdmJHOXlZV1J2TVJBd0RnWURWUVFIRXdkQ2IzVnNaR1Z5TVJRd0VnWURWUVFLRXd0RmVHRnRjR3hsClEyOXljREVMTUFrR0ExVUVDeE1DU1ZReEZqQVVCZ05WQkFNVURTb3VaWGhoYlhCc1pTNWpiMjB4SURBZUJna3EKaGtpRzl3MEJDUUVXRVdGa2JXbHVRR1Y0WVcxd2JHVXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQwpBUThBTUlJQkNnS0NBUUVBdHVKOW13dzlCYXA2SDROdUhYTFB6d1NVZFppNGJyYTFkN1ZiRUJaWWZDSStZNjRDCjJ1dThwdTNhVTVzYXVNYkQ5N2pRYW95VzZHOThPUHJlV284b3lmbmRJY3RFcmxueGpxelUyVVRWN3FEVHk0bkEKNU9aZW9SZUxmZXFSeGxsSjE0VmlhNVFkZ3l3R0xoRTlqZy9jN2U0WUp6bmg5S1dZMnFjVnhEdUdEM2llaHNEbgphTnpWNFdGOWNJZm1zOHp3UHZPTk5MZnNBbXc3dUhUKzNiSzEzSUloeDI3ZmV2cXVWcENzNDFQNnBzdStWTG4yCjVIRHk0MXRoQkN3T0wrTithbGJ0ZktTcXM3TEFzM25RTjFsdHpITHZ5MGE1RGhkakpUd2tQclQrVXhwb0tCOUgKNFpZazErRUR0N09QbGh5bzM3NDFRaE4vSkNZK2RKbkFMQnNValFJREFRQUJvNEgzTUlIME1CMEdBMVVkRGdRVwpCQlJwZVc1dFhMdHh3TXJvQXM5d2RNbTUzVVVJTERDQnhBWURWUjBqQklHOE1JRzVnQlJwZVc1dFhMdHh3TXJvCkFzOXdkTW01M1VVSUxLR0JsYVNCa2pDQmp6RUxNQWtHQTFVRUJoTUNWVk14RVRBUEJnTlZCQWdUQ0VOdmJHOXkKWVdSdk1SQXdEZ1lEVlFRSEV3ZENiM1ZzWkdWeU1SUXdFZ1lEVlFRS0V3dEZlR0Z0Y0d4bFEyOXljREVMTUFrRwpBMVVFQ3hNQ1NWUXhGakFVQmdOVkJBTVVEU291WlhoaGJYQnNaUzVqYjIweElEQWVCZ2txaGtpRzl3MEJDUUVXCkVXRmtiV2x1UUdWNFlXMXdiR1V1WTI5dGdna0FrQVpDVXlZTFNUSXdEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3EKaGtpRzl3MEJBUVVGQUFPQ0FRRUFjR1hNZms4TlpzQit0OUtCemwxRmw2eUlqRWtqSE8wUFZVbEVjU0QyQjRiNwpQeG5NT2pkbWdQcmF1SGI5dW5YRWFMN3p5QXFhRDZ0YlhXVTZSeENBbWdMYWpWSk5aSE93NDVOMGhyRGtXZ0I4CkV2WnRRNTZhbW13QzFxSWhBaUE2MzkwRDNDc2V4N2dMNm5KbzdrYnIxWVdVRzN6SXZveGR6OFlEclpOZVdLTEQKcFJ2V2VuMGxNYnBqSVJQNFhac25DNDVDOWdWWGRoM0xSZTErd3lRcTZoOVFQaWxveG1ENk5wRTlpbVRPbjJBNQovYkozVktJekFNdWRlVTZrcHlZbEpCemRHMXVhSFRqUU9Xb3NHaXdlQ0tWVVhGNlV0aXNWZGRyeFF0aDZFTnlXCnZJRnFhWng4NCtEbFNDYzkzeWZrL0dsQnQrU0tHNDZ6RUhNQjlocVBiQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
142 | defaultKey: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdHVKOW13dzlCYXA2SDROdUhYTFB6d1NVZFppNGJyYTFkN1ZiRUJaWWZDSStZNjRDCjJ1dThwdTNhVTVzYXVNYkQ5N2pRYW95VzZHOThPUHJlV284b3lmbmRJY3RFcmxueGpxelUyVVRWN3FEVHk0bkEKNU9aZW9SZUxmZXFSeGxsSjE0VmlhNVFkZ3l3R0xoRTlqZy9jN2U0WUp6bmg5S1dZMnFjVnhEdUdEM2llaHNEbgphTnpWNFdGOWNJZm1zOHp3UHZPTk5MZnNBbXc3dUhUKzNiSzEzSUloeDI3ZmV2cXVWcENzNDFQNnBzdStWTG4yCjVIRHk0MXRoQkN3T0wrTithbGJ0ZktTcXM3TEFzM25RTjFsdHpITHZ5MGE1RGhkakpUd2tQclQrVXhwb0tCOUgKNFpZazErRUR0N09QbGh5bzM3NDFRaE4vSkNZK2RKbkFMQnNValFJREFRQUJBb0lCQUhrTHhka0dxNmtCWWQxVAp6MkU4YWFENnhneGpyY2JSdGFCcTc3L2hHbVhuQUdaWGVWcE81MG1SYW8wbHZ2VUgwaE0zUnZNTzVKOHBrdzNmCnRhWTQxT1dDTk1PMlYxb1MvQmZUK3Zsblh6V1hTemVQa0pXd2lIZVZMdVdEaVVMQVBHaWl4emF2RFMyUnlQRmEKeGVRdVNhdE5pTDBGeWJGMG5Zd3pST3ZoL2VSa2NKVnJRZlZudU1melFkOGgyMzZlb1UxU3B6UnhSNklubCs5UApNc1R2Wm5OQmY5d0FWcFo5c1NMMnB1V1g3SGNSMlVnem5oMDNZWUZJdGtDZndtbitEbEdva09YWHBVM282aWY5ClRIenBleHdubVJWSmFnRG85bTlQd2t4QXowOW80cXExdHJoU1g1U2p1K0xyNFJvOHg5bytXdUF1VnVwb0lHd0wKMWVseERFRUNnWUVBNzVaWGp1enNJR09PMkY5TStyYVFQcXMrRHZ2REpzQ3gyZnRudk1WWVJKcVliaGt6YnpsVQowSHBCVnk3NmE3WmF6Umxhd3RGZ3ljMlpyQThpM0F3K3J6d1pQclNJeWNieC9nUVduRzZlbFF1Y0FFVWdXODRNCkdSbXhKUGlmOGRQNUxsZXdRalFjUFJwZVoxMzlYODJreGRSSEdma1pscHlXQnFLajBTWExRSEVDZ1lFQXcybkEKbUVXdWQzZFJvam5zbnFOYjBlYXdFUFQrbzBjZ2RyaENQOTZQK1pEekNhcURUblZKV21PeWVxRlk1eVdSSEZOLwpzbEhXU2lTRUFjRXRYZys5aGlMc0RXdHVPdzhUZzYyN2VrOEh1UUtMb2tWWEFUWG1NZG9xOWRyQW9INU5hV2lECmRSY3dEU2EvamhIN3RZV1hKZDA4VkpUNlJJdU8vMVZpbDBtbEk5MENnWUVBb2lsNkhnMFNUV0hWWDNJeG9raEwKSFgrK1ExbjRYcFJ5VEg0eldydWY0TjlhYUxxNTY0QThmZGNodnFiWGJHeEN6U3RxR1E2cW1peUU1TVpoNjlxRgoyd21zZEpxeE14RnEzV2xhL0lxSzM0cTZEaHk3cUNld1hKVGRKNDc0Z3kvY0twZkRmeXZTS1RGZDBFejNvQTZLCmhqUUY0L2lNYnpxUStQREFQR0YrVHFFQ2dZQmQ1YnZncjJMMURzV1FJU3M4MHh3MDBSZDdIbTRaQVAxdGJuNk8KK0IvUWVNRC92UXBaTWV4c1hZbU9lV2Noc3FCMnJ2eW1MOEs3WDY1NnRWdGFYay9nVzNsM3ZVNTdYSFF4Q3RNUwpJMVYvcGVSNHRiN24yd0ZncFFlTm1XNkQ4QXk4Z0xiaUZhRkdRSDg5QWhFa0dTd1d5cWJKc2NoTUZZOUJ5OEtUCkZaVWZsUUtCZ0V3VzJkVUpOZEJMeXNycDhOTE1VbGt1ZnJxbllpUTNTQUhoNFZzWkg1TXU0MW55Yi95NUUyMW4KMk55d3ltWGRlb3VJcFZjcUlVTXl0L3FKRmhIcFJNeVEyWktPR0QyWG5YaENNVlRlL0FQNDJod294Nm02QkZpQgpvemZFa2wwak5uZmREcjZrL1p2MlQ1TnFzaWxaRXJBQlZGOTBKazdtUFBIa0Q2R1ZMUUJ4Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
143 | # Basic auth to protect all the routes. Can use htpasswd to generate passwords
144 | # > htpasswd -n -b testuser testpass
145 | # > testuser:$apr1$JXRA7j2s$LpVns9vsme8FHN0r.aSt11
146 | auth: {}
147 | # basic:
148 | # testuser: $apr1$JXRA7j2s$LpVns9vsme8FHN0r.aSt11
149 |
150 | # a list of any extra certificate/key filenames you want included in the traefik instance. This must be used in conjunction with
151 | # the "secretFiles" parameter to include the certs on each traefik pod. the expected format is:
152 | # extraCerts:
153 | # - certFile: /secrets/cert1.crt
154 | # keyFile: /secrets/key1.key
155 | # - certFile: /secrets/cert2.crt
156 | # keyFile: /secrets/key2.key
157 |
158 | # mtls:
159 | # enabled: true
160 | # optional: false
161 | # clientCaCerts: []
162 | # # When mTLS is enabled, the set of CA certificates used to validate client TLS certificates.
163 | # # https://docs.traefik.io/configuration/entrypoints/#tls-mutual-authentication
164 | # # CA certificates should be in PEM format.
165 |
166 | kvprovider:
167 | ## If you want to run Traefik in HA mode, you will need to setup a KV Provider. Therefore you can choose one of
168 | ## * etcd
169 | ## * consul
170 | ## * boltdb
171 | ## * zookeeper
172 | ##
173 | ## ref: https://docs.traefik.io/user-guide/cluster/
174 |
175 | ## storeAcme has to be enabled to support HA Support using acme, but at least one kvprovider is needed
176 | storeAcme: false
177 | acmeStorageLocation: traefik/acme/account
178 | importAcme: false
179 |
180 | # etcd:
181 | # endpoint: etcd-service:2379
182 | # useAPIV3: false
183 | # watch: true
184 | # prefix: traefik
185 |
186 | ## Override default configuration template.
187 | ## For advanced users :)
188 | ##
189 | ## Optional
190 | # filename: consul.tmpl
191 | # username: foo
192 | # password: bar
193 | # tls:
194 | # ca: "/etc/ssl/ca.crt"
195 | # cert: "/etc/ssl/consul.crt"
196 | # key: "/etc/ssl/consul.key"
197 | # insecureSkipVerify: true
198 | #
199 | # consul:
200 | # endpoint: consul-service:8500
201 | # watch: true
202 | # prefix: traefik
203 |
204 | ## Override default configuration template.
205 | ## For advanced users :)
206 | ##
207 | ## Optional
208 | # filename: consul.tmpl
209 | # username: foo
210 | # password: bar
211 | # tls:
212 | # ca: "/etc/ssl/ca.crt"
213 | # cert: "/etc/ssl/consul.crt"
214 | # key: "/etc/ssl/consul.key"
215 | # insecureSkipVerify: true
216 |
217 | ## only relevant for etcd
218 |
219 |
220 | acme:
221 | keyType: RSA4096
222 | enabled: false
223 | email: admin@example.com
224 | onHostRule: true
225 | staging: true
226 | ## Specify a custom ACME server endpoint
227 | ## Optional
228 | # caServer: https://acme-staging-v02.api.letsencrypt.org/directory
229 | logging: false
230 | # Configure a Let's Encrypt certificate to be managed by default.
231 | # This is the only way to request wildcard certificates (works only with dns challenge).
232 | domains:
233 | enabled: false
234 | # List of sets of main and (optional) SANs to generate for
235 | # for wildcard certificates see https://docs.traefik.io/configuration/acme/#wildcard-domains
236 | domainsList:
237 | # - main: "*.example.com"
238 | # - sans:
239 | # - "example.com"
240 | # - main: "*.example2.com"
241 | # - sans:
242 | # - "test1.example2.com"
243 | # - "test2.example2.com"
244 | ## ACME challenge type: "tls-sni-01", "tls-alpn-01", "http-01" or "dns-01"
245 | ## Note that "tls-sni-01" has been DEPRECATED.
246 | challengeType: tls-alpn-01
247 | ## Configure dnsProvider to perform domain verification using dns challenge
248 | ## Applicable only if using the dns-01 challenge type
249 | delayBeforeCheck: 0
250 | resolvers: []
251 | # - 1.1.1.1:53
252 | # - 8.8.8.8:53
253 | ## Configure the endpoint used for the HTTP challenge
254 | ## Applicable only if using the http-01 challenge type
255 | httpChallenge:
256 | entrypoint: http
257 | dnsProvider:
258 | name: nil
259 | existingSecretName: ""
260 | auroradns:
261 | AURORA_USER_ID: ""
262 | AURORA_KEY: ""
263 | AURORA_ENDPOINT: ""
264 | azure:
265 | AZURE_CLIENT_ID: ""
266 | AZURE_CLIENT_SECRET: ""
267 | AZURE_SUBSCRIPTION_ID: ""
268 | AZURE_TENANT_ID: ""
269 | AZURE_RESOURCE_GROUP: ""
270 | cloudflare:
271 | CLOUDFLARE_EMAIL: ""
272 | CLOUDFLARE_API_KEY: ""
273 | digitalocean:
274 | DO_AUTH_TOKEN: ""
275 | dnsimple:
276 | DNSIMPLE_OAUTH_TOKEN: ""
277 | DNSIMPLE_BASE_URL: ""
278 | dnsmadeeasy:
279 | DNSMADEEASY_API_KEY: ""
280 | DNSMADEEASY_API_SECRET: ""
281 | DNSMADEEASY_SANDBOX: ""
282 | dnspod:
283 | DNSPOD_API_KEY: ""
284 | dreamhost:
285 | DREAMHOST_API_KEY: ""
286 | dyn:
287 | DYN_CUSTOMER_NAME: ""
288 | DYN_USER_NAME: ""
289 | DYN_PASSWORD: ""
290 | exoscale:
291 | EXOSCALE_API_KEY: ""
292 | EXOSCALE_API_SECRET: ""
293 | EXOSCALE_ENDPOINT: ""
294 | gandi:
295 | GANDI_API_KEY: ""
296 | godaddy:
297 | GODADDY_API_KEY: ""
298 | GODADDY_API_SECRET: ""
299 | gcloud:
300 | GCE_PROJECT: ""
301 | GCE_SERVICE_ACCOUNT_FILE: ""
302 | linode:
303 | LINODE_API_KEY: ""
304 | namecheap:
305 | NAMECHEAP_API_USER: ""
306 | NAMECHEAP_API_KEY: ""
307 | ns1:
308 | NS1_API_KEY: ""
309 | otc:
310 | OTC_DOMAIN_NAME: ""
311 | OTC_USER_NAME: ""
312 | OTC_PASSWORD: ""
313 | OTC_PROJECT_NAME: ""
314 | OTC_IDENTITY_ENDPOINT: ""
315 | ovh:
316 | OVH_ENDPOINT: ""
317 | OVH_APPLICATION_KEY: ""
318 | OVH_APPLICATION_SECRET: ""
319 | OVH_CONSUMER_KEY: ""
320 | pdns:
321 | PDNS_API_URL: ""
322 | rackspace:
323 | RACKSPACE_USER: ""
324 | RACKSPACE_API_KEY: ""
325 | rfc2136:
326 | RFC2136_NAMESERVER: ""
327 | RFC2136_TSIG_ALGORITHM: ""
328 | RFC2136_TSIG_KEY: ""
329 | RFC2136_TSIG_SECRET: ""
330 | RFC2136_TIMEOUT: ""
331 | route53:
332 | AWS_REGION: ""
333 | AWS_ACCESS_KEY_ID: ""
334 | AWS_SECRET_ACCESS_KEY: ""
335 | vultr:
336 | VULTR_API_KEY: ""
337 | ## Save ACME certs to a persistent volume.
338 | ## WARNING: If you do not do this and you did not have configured
339 | ## a kvprovider, you will re-request certs every time a pod (re-)starts
340 | ## and you WILL be rate limited!
341 | persistence:
342 | enabled: true
343 | annotations: {}
344 | ## acme data Persistent Volume Storage Class
345 | ## If defined, storageClassName:
346 | ## If set to "-", storageClassName: "", which disables dynamic provisioning
347 | ## If undefined (the default) or set to null, no storageClassName spec is
348 | ## set, choosing the default provisioner. (gp2 on AWS, standard on
349 | ## GKE, AWS & OpenStack)
350 | ##
351 | # storageClass: "-"
352 | accessMode: ReadWriteOnce
353 | size: 1Gi
354 | ## A manually managed Persistent Volume Claim
355 | ## Requires persistence.enabled: true
356 | ## If defined, PVC must be created manually before volume will be bound
357 | ##
358 | # existingClaim:
359 | dashboard:
360 | enabled: true
361 | domain: traefik.example.com
362 | # serviceType: ClusterIP
363 | service: {}
364 | # annotations:
365 | # key: value
366 | ingress: {}
367 | # annotations:
368 | # key: value
369 | # labels:
370 | # key: value
371 | # tls:
372 | # - hosts:
373 | # - traefik.example.com
374 | # secretName: traefik-default-cert
375 | auth: {}
376 | # basic:
377 | # username: password
378 | statistics: {}
379 | ## Number of recent errors to show in the ‘Health’ tab
380 | # recentErrors:
381 | service:
382 | # annotations:
383 | # key: value
384 | # labels:
385 | # key: value
386 | ## Further config for service of type NodePort
387 | ## Default config with empty string "" will assign a dynamic
388 | ## nodePort to http and https ports
389 | nodePorts:
390 | http: ""
391 | https: ""
392 | ## If static nodePort configuration is required it can be enabled as below
393 | ## Configure ports in allowable range (eg. 30000 - 32767 on minikube)
394 | # nodePorts:
395 | # http: 30080
396 | # https: 30443
397 | gzip:
398 | enabled: true
399 | traefikLogFormat: json
400 | accessLogs:
401 | enabled: false
402 | ## Path to the access logs file. If not provided, Traefik defaults it to stdout.
403 | # filePath: ""
404 | format: common # choices are: common, json
405 | ## for JSON logging, finer-grained control over what is logged. Fields can be
406 | ## retained or dropped, and request headers can be retained, dropped or redacted
407 | fields:
408 | # choices are keep, drop
409 | defaultMode: keep
410 | names: {}
411 | # ClientUsername: drop
412 | headers:
413 | # choices are keep, drop, redact
414 | defaultMode: keep
415 | names: {}
416 | # Authorization: redact
417 | rbac:
418 | enabled: true
419 | ## Enable the /metrics endpoint, for now only supports prometheus
420 | ## set to true to enable metric collection by prometheus
421 | metrics:
422 | prometheus:
423 | enabled: true
424 | buckets: [0.1,0.3,1.2,5]
425 | serviceMonitor:
426 | # When set true and if Prometheus Operator is installed then use a ServiceMonitor to configure scraping
427 | enabled: true
428 | # Set the namespace the ServiceMonitor should be deployed
429 | namespace: traefik
430 | # Set how frequently Prometheus should scrape
431 | interval: 30s
432 | # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
433 | labels:
434 | release: prometheus
435 | datadog:
436 | enabled: false
437 | # address: localhost:8125
438 | # pushinterval: 10s
439 | statsd:
440 | enabled: false
441 | # address: localhost:8125
442 | # pushinterval: 10s
443 | deployment:
444 | # labels to add to the deployment
445 | # labels:
446 | # key: value
447 | # annotations:
448 | # key: value
449 | # labels to add to the pod container metadata
450 | # podLabels:
451 | # key: value
452 | # podAnnotations:
453 | # key: value
454 | hostPort:
455 | httpEnabled: false
456 | httpsEnabled: false
457 | dashboardEnabled: false
458 | # httpPort: 80
459 | # httpsPort: 443
460 | # dashboardPort: 8080
461 | sendAnonymousUsage: false
462 | tracing:
463 | enabled: false
464 | serviceName: traefik
465 | # backend: choices are jaeger, zipkin, datadog
466 | # jaeger:
467 | # localAgentHostPort: "127.0.0.1:6831"
468 | # samplingServerURL: http://localhost:5778/sampling
469 | # samplingType: const
470 | # samplingParam: 1.0
471 | # zipkin:
472 | # httpEndpoint: http://localhost:9411/api/v1/spans
473 | # debug: false
474 | # sameSpan: false
475 | # id128bit: true
476 | # datadog:
477 | # localAgentHostPort: "127.0.0.1:8126"
478 | # debug: false
479 | # globalTag: ""
480 |
481 | ## Create HorizontalPodAutoscaler object.
482 | ##
483 | # autoscaling:
484 | # minReplicas: 1
485 | # maxReplicas: 10
486 | # metrics:
487 | # - type: Resource
488 | # resource:
489 | # name: cpu
490 | # targetAverageUtilization: 60
491 | # - type: Resource
492 | # resource:
493 | # name: memory
494 | # targetAverageUtilization: 60
495 |
496 | ## Timeouts
497 | ##
498 | # timeouts:
499 | # ## responding are timeouts for incoming requests to the Traefik instance
500 | # responding:
501 | # readTimeout: 0s
502 | # writeTimeout: 0s
503 | # idleTimeout: 180s
504 | # ## forwarding are timeouts for requests forwarded to the backend servers
505 | # forwarding:
506 | # dialTimeout: 30s
507 | # responseHeaderTimeout: 0s
508 |
509 | # forwardAuth:
510 | # entryPoints: ["http", "https"]
511 | # address: https://authserver.com/auth
512 | # trustForwardHeader: true
513 |
514 | # Any extra volumes to define for the pod
515 | extraVolumes: []
516 | # - name: example-name
517 | # hostPath:
518 | # path: /path/on/host
519 | # type: DirectoryOrCreate
520 |
521 | # Any extra volume mounts to define for the Traefik container
522 | extraVolumeMounts: []
523 | # - name: example-name
524 | # mountPath: /path/in/container
525 |
--------------------------------------------------------------------------------
/09-ingress/02-traefik-ingress-controller/web-v1-fixed.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v1
7 | name: web-v1
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: web-v1
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: web-v1
19 | spec:
20 | containers:
21 | - image: gcr.io/google-samples/hello-app:1.0
22 | name: web-v1
23 | ports:
24 | - containerPort: 8080
25 | livenessProbe:
26 | httpGet:
27 | path: /
28 | port: 8080
29 | initialDelaySeconds: 5
30 | timeoutSeconds: 1
31 | periodSeconds: 10
32 | failureThreshold: 3
33 | readinessProbe:
34 | httpGet:
35 | path: /
36 | port: 8080
37 | timeoutSeconds: 1
38 | periodSeconds: 10
39 | initialDelaySeconds: 30
40 | status: {}
41 |
--------------------------------------------------------------------------------
/09-ingress/02-traefik-ingress-controller/web-v1-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v1
7 | name: web-v1
8 | spec:
9 | ports:
10 | - port: 8080
11 | protocol: TCP
12 | targetPort: 8080
13 | name: http
14 | selector:
15 | run: web-v1
16 | type: NodePort
17 | status:
18 | loadBalancer: {}
19 |
--------------------------------------------------------------------------------
/09-ingress/02-traefik-ingress-controller/web-v2-fixed.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v2
7 | name: web-v2
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: web-v2
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: web-v2
19 | spec:
20 | containers:
21 | - image: gcr.io/google-samples/hello-app:2.0
22 | name: web-v2
23 | ports:
24 | - containerPort: 8080
25 | livenessProbe:
26 | httpGet:
27 | path: /
28 | port: 8080
29 | initialDelaySeconds: 5
30 | timeoutSeconds: 1
31 | periodSeconds: 10
32 | failureThreshold: 3
33 | readinessProbe:
34 | httpGet:
35 | path: /
36 | port: 8080
37 | timeoutSeconds: 1
38 | periodSeconds: 10
39 | initialDelaySeconds: 30
40 | status: {}
41 |
--------------------------------------------------------------------------------
/09-ingress/02-traefik-ingress-controller/web-v2-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v2
7 | name: web-v2
8 | spec:
9 | ports:
10 | - port: 8080
11 | protocol: TCP
12 | targetPort: 8080
13 | name: http
14 | selector:
15 | run: web-v2
16 | type: NodePort
17 | status:
18 | loadBalancer: {}
19 |
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/Readme.md:
--------------------------------------------------------------------------------
1 | Deploy Contour Ingress
2 | ```bash
3 | helm repo add rimusz https://charts.rimusz.net
4 | helm repo updade
5 | helm install --name contour-ingress rimusz/contour --namespace contour -f values.yaml
6 | kubens contour
7 | kubectl get pods
8 | ```
9 | Deploy Sample Application
10 |
11 | ```bash
12 | kubectl create ns sampleapp
13 | kubens sampleapp
14 | kubectl apply -f web-v1-fixed.yaml
15 | kubectl apply -f web-v2-fixed.yaml
16 | kubectl apply -f web-v1-svc.yaml
17 | kubectl apply -f web-v2-svc.yaml
18 | ```
19 |
20 | ```
21 | NAME READY STATUS RESTARTS AGE
22 | contour-ingress-67b588db67-qh5xt 2/2 Running 0 82s
23 | ```
24 | ```bash
25 | kubectl get svc
26 | ```
27 | ```
28 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
29 | contour-ingress LoadBalancer 10.0.169.141 104.45.66.31 80:32001/TCP,443:30033/TCP 70s
30 | ```
31 |
32 | References
33 | - https://www.youtube.com/watch?v=764YUk-wSa0
34 | - https://www.youtube.com/watch?v=O7HfkgzD7Z0
35 | - -
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/contour-monitoring-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: contour
6 | name: contour-monitoring
7 | namespace: contour
8 | spec:
9 | ports:
10 | - name: contour-monitoring
11 | port: 8002
12 | targetPort: 8002
13 | - name: envoy-monitoring
14 | port: 8000
15 | protocol: TCP
16 | targetPort: 8000
17 | selector:
18 | app: contour
19 | release: contour-ingress
20 | type: ClusterIP
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/controur-servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | prometheus: kube-prometheus
6 | release: prometheus
7 | name: envoy
8 | namespace: contour
9 | spec:
10 | selector:
11 | matchLabels:
12 | app: contour
13 | namespaceSelector:
14 | matchNames:
15 | - contour
16 | endpoints:
17 | - targetPort: 8002
18 | interval: 30s
19 | path: /stats/prometheus
20 | - targetPort: 8000
21 | path: /metrics
22 | interval: 30s
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/http-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: web-ingress
5 | annotations:
6 | kubernetes.io/ingress.class: contour
7 | spec:
8 | rules:
9 | - http:
10 | paths:
11 | - path: /v2
12 | backend:
13 | serviceName: web-v2
14 | servicePort: http
15 | - path: /v1
16 | backend:
17 | serviceName: web-v1
18 | servicePort: http
19 |
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for contour.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | replicaCount: 3
6 |
7 | # Contour Deployment specific annotations
8 | annotations:
9 | prometheus.io/scrape: "true"
10 | prometheus.io/port: "8002"
11 | prometheus.io/path: "/stats/prometheus"
12 |
13 | controller:
14 | image:
15 | repository: gcr.io/heptio-images/contour
16 | # Note that by default we use appVersion to get images tag
17 | # tag:
18 | pullPolicy: IfNotPresent
19 | # Enable statsd metrics
20 | statsd:
21 | enabled: false
22 | stats: {}
23 | # address: 0.0.0.0
24 | # port: 8002
25 |
26 | proxy:
27 | image:
28 | repository: docker.io/envoyproxy/envoy-alpine
29 | tag: v1.9.1
30 | pullPolicy: IfNotPresent
31 |
32 | service:
33 | type: LoadBalancer
34 | loadBalancerIP: ""
35 | # Contour specific Service annotations
36 | annotations:
37 | prometheus.io/port: "8000"
38 | prometheus.io/scrape: "true"
39 | # This annotation puts the AWS ELB into "TCP" mode so that it does not
40 | # do HTTP negotiation for HTTPS connections at the ELB edge.
41 | # The downside of this is the remote IP address of all connections will
42 | # appear to be the internal address of the ELB. See docs/proxy-proto.md
43 | # for information about enabling the PROXY protocol on the ELB to recover
44 | # the original remote IP address.
45 | # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
46 | # Due to the implementation of this feature, the source IP seen in the
47 | # target container is not the original source IP of the client. To enable
48 | # preservation of the client IP, the following fields can be configured in the
49 | # service spec (supported in GCE/Google Kubernetes Engine environments)
50 | # There are two available options: Cluster (default) and Local
51 | # externalTrafficPolicy: "Cluster"
52 |
53 | resources: {}
54 | # We usually recommend not to specify default resources and to leave this as a conscious
55 | # choice for the user. This also increases chances charts run on environments with little
56 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
57 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
58 | # limits:
59 | # cpu: 100m
60 | # memory: 128Mi
61 | # requests:
62 | # cpu: 100m
63 | # memory: 128Mi
64 |
65 | # RBAC manifests management
66 | rbac:
67 | enabled: true
68 |
69 | nodeSelector: {}
70 |
71 | tolerations: {}
72 |
73 | # Enable and set Pod Disruption Budget
74 | podDisruptionBudget:
75 | enabled: false
76 | maxUnavailable: 1
77 | minAvailable: null
78 |
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/web-v1-fixed.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v1
7 | name: web-v1
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: web-v1
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: web-v1
19 | spec:
20 | containers:
21 | - image: gcr.io/google-samples/hello-app:1.0
22 | name: web-v1
23 | ports:
24 | - containerPort: 8080
25 | livenessProbe:
26 | httpGet:
27 | path: /
28 | port: 8080
29 | initialDelaySeconds: 5
30 | timeoutSeconds: 1
31 | periodSeconds: 10
32 | failureThreshold: 3
33 | readinessProbe:
34 | httpGet:
35 | path: /
36 | port: 8080
37 | timeoutSeconds: 1
38 | periodSeconds: 10
39 | initialDelaySeconds: 30
40 | status: {}
41 |
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/web-v1-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v1
7 | name: web-v1
8 | spec:
9 | ports:
10 | - port: 8080
11 | protocol: TCP
12 | targetPort: 8080
13 | name: http
14 | selector:
15 | run: web-v1
16 | type: NodePort
17 | status:
18 | loadBalancer: {}
19 |
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/web-v2-fixed.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v2
7 | name: web-v2
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: web-v2
13 | strategy: {}
14 | template:
15 | metadata:
16 | creationTimestamp: null
17 | labels:
18 | run: web-v2
19 | spec:
20 | containers:
21 | - image: gcr.io/google-samples/hello-app:2.0
22 | name: web-v2
23 | ports:
24 | - containerPort: 8080
25 | livenessProbe:
26 | httpGet:
27 | path: /
28 | port: 8080
29 | initialDelaySeconds: 5
30 | timeoutSeconds: 1
31 | periodSeconds: 10
32 | failureThreshold: 3
33 | readinessProbe:
34 | httpGet:
35 | path: /
36 | port: 8080
37 | timeoutSeconds: 1
38 | periodSeconds: 10
39 | initialDelaySeconds: 30
40 | status: {}
41 |
--------------------------------------------------------------------------------
/09-ingress/03-contour-ingress-controller/web-v2-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: web-v2
7 | name: web-v2
8 | spec:
9 | ports:
10 | - port: 8080
11 | protocol: TCP
12 | targetPort: 8080
13 | name: http
14 | selector:
15 | run: web-v2
16 | type: NodePort
17 | status:
18 | loadBalancer: {}
19 |
--------------------------------------------------------------------------------
/09-ingress/README.md:
--------------------------------------------------------------------------------
1 | Compare Ingress Controllers Performance with Azure DevOps Load Test
2 |
3 | Let's see which Ingress Controller has better performance rather than others.
4 | We will use Azure DevOps to test the performances of ingress controllers.
5 | First we need to define load test scenarios on Azure DevOps.
6 | Our Load Test Scenario is designed as instantly 1000 users in 2 minutes for all ingress types.
7 |
8 | This is sample scenario for nginx-ingress.
9 | 
10 |
11 | After that we should add 2 more scenarios for traefik-ingress and contour-ingress.
12 | Then, let's run test and examine results.
13 |
14 | Nginx Ingress Controller Load Test Summary
15 |
16 | 
17 |
18 | Nginx Ingress Controller Load Test Chart Details
19 |
20 | 
21 |
22 |
23 | Traefik Ingress Controller Load Test Summary
24 |
25 | 
26 |
27 | Traefik Ingress Controller Load Test Chart Details
28 |
29 | 
30 |
31 |
32 | Contour Ingress Controller Load Test Summary
33 |
34 | 
35 |
36 | Contour Ingress Controller Load Test Chart Details
37 |
38 | 
39 |
40 |
41 | Summary
42 | We can see the Average Response Times for each ingress controllers.
43 | According to test results, it is obvious which ingress controller type has best performance for load balancing.
44 | Contour Ingress Controller has lowest average response time with 5.7 ms.
45 | Then, Traefik Ingress Conroller has the second lowest average response time with 7.5 ms.
46 | Finally, Nginx Ingress Controller has the highest average response time with 11 ms.
47 |
--------------------------------------------------------------------------------
/10-monitoring/Readme.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | helm install --name prometheues stable/prometheus-operator --namespace monitoring -f values.yaml
3 | kubectl apply -f aks-kubelet-service-monitor.yaml -n monitoring
4 | ```
5 |
--------------------------------------------------------------------------------
/10-monitoring/aks-kubelet-service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | creationTimestamp: "2019-12-17T21:59:48Z"
5 | generation: 2
6 | labels:
7 | app: prometheus-operator-kubelet
8 | chart: prometheus-operator-8.3.3
9 | heritage: Tiller
10 | release: prometheus
11 | name: prometheus-prometheus-oper-kubelet
12 | namespace: monitoring
13 | resourceVersion: "10297"
14 | selfLink: /apis/monitoring.coreos.com/v1/namespaces/monitoring/servicemonitors/prometheus-prometheus-oper-kubelet
15 | uid: 5b814bf9-56a3-4801-b611-08f09bc2ed80
16 | spec:
17 | endpoints:
18 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
19 | honorLabels: true
20 | port: http-metrics
21 | scheme: http
22 | tlsConfig:
23 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
24 | insecureSkipVerify: true
25 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
26 | honorLabels: true
27 | path: /metrics/cadvisor
28 | port: http-metrics
29 | scheme: http
30 | tlsConfig:
31 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
32 | insecureSkipVerify: true
33 | jobLabel: k8s-app
34 | namespaceSelector:
35 | matchNames:
36 | - kube-system
37 | selector:
38 | matchLabels:
39 | k8s-app: kubelet
40 |
--------------------------------------------------------------------------------
/11-security/Readme.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devlerazureda/kubernetes-essentials/9ba1e24a0cd45be98d3b6794b8690bfae6ed3f93/11-security/Readme.md
--------------------------------------------------------------------------------
/12-advanced-k8s/01-scheduling/01-nodeaffinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mysql
5 | labels:
6 | app: mysql
7 | spec:
8 | ports:
9 | - port: 3306
10 | name: mysql
11 | targetPort: 3306
12 | selector:
13 | app: mysql
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: mysql
19 | spec:
20 | selector:
21 | matchLabels:
22 | app: mysql
23 | template:
24 | metadata:
25 | labels:
26 | app: mysql
27 | spec:
28 | affinity:
29 | nodeAffinity:
30 | requiredDuringSchedulingIgnoredDuringExecution:
31 | nodeSelectorTerms:
32 | - matchExpressions:
33 | - key: disktype
34 | operator: In
35 | values:
36 | - ssd
37 | containers:
38 | - image: mysql:5.6
39 | name: mysql
40 | env:
41 | - name: MYSQL_ROOT_PASSWORD
42 | value: "password"
43 | ports:
44 | - containerPort: 3306
45 | name: mysql
46 | volumeMounts:
47 | - name: mysql-persistent-storage
48 | mountPath: /var/lib/mysql
49 | volumes:
50 | - name: mysql-persistent-storage
51 | hostPath:
52 | path: /mnt/data
53 |
--------------------------------------------------------------------------------
/12-advanced-k8s/01-scheduling/02-podaffinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: with-pod-affinity
5 | spec:
6 | affinity:
7 | podAffinity:
8 | requiredDuringSchedulingIgnoredDuringExecution:
9 | - labelSelector:
10 | matchExpressions:
11 | - key: security
12 | operator: In
13 | values:
14 | - S1
15 | topologyKey: failure-domain.beta.kubernetes.io/zone
16 | containers:
17 | - name: with-pod-affinity
18 | image: docker.io/ocpqe/hello-pod
19 |
--------------------------------------------------------------------------------
/12-advanced-k8s/01-scheduling/03-tolerated-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: nginx
7 | name: nginx
8 | spec:
9 | containers:
10 | - image: nginx:1.7.9
11 | name: nginx
12 | resources: {}
13 | dnsPolicy: ClusterFirst
14 | restartPolicy: Never
15 | tolerations:
16 | - key: "os"
17 | value: "windows"
18 | effect: "NoSchedule"
19 | nodeSelector:
20 | cpu: kotu
--------------------------------------------------------------------------------
/12-advanced-k8s/01-scheduling/04-single-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | run: nginx
7 | name: nginx
8 | spec:
9 | containers:
10 | - image: nginx:1.7.9
11 | name: nginx
12 | resources: {}
13 | dnsPolicy: ClusterFirst
14 | restartPolicy: Never
15 | nodeSelector:
16 | cpu: kotu
--------------------------------------------------------------------------------
/12-advanced-k8s/01-scheduling/Readme.md:
--------------------------------------------------------------------------------
1 | #### Pod Affinity
2 | ```bash
3 | kubectl apply -f 02-podaffinity.yaml
4 | kubectl describe pod with-pod-affinity
5 | ```
6 |
7 | ```
8 | Events:
9 | Type Reason Age From Message
10 | ---- ------ ---- ---- -------
11 | Warning FailedScheduling 6s (x2 over 6s) default-scheduler 0/7 nodes are available: 7 node(s) didn't match pod affinity rules, 7 node(s) didn't match pod affinity/anti-affinity.
12 | Normal NotTriggerScaleUp 5s cluster-autoscaler pod didn't trigger scale-up (it wouldn't fit if a new node is added): 1 node(s) didn't match pod affinity/anti-affinity, 1 node(s) didn't match pod affinity rules
13 | ```
14 |
15 | ```bash
16 | kubectl run nginx --image=nginx --labels=security=S1 --restart=Never
17 | watch -n2 kubectl get pods
18 | ```
19 |
20 | #### Taints and Tolerations
21 | NoSchedule | PreferNoSchedule | NoExecute
22 |
23 | ```bash
24 | kubectl taint node aks-nodepool1-17949986-vmss000013 os=windows:NoSchedule
25 | kubectl label node aks-nodepool1-17949986-vmss000013 cpu=kotu
26 | kubectl apply -f 03-tolerated-pod.yaml
27 | kubectl taint node aks-nodepool1-17949986-vmss000013 os:NoExecute-
28 | ```
29 |
30 | #### Cordon/Drain
31 | ```bash
32 | kubectl apply -f 04-single-pod.yaml
33 | kubectl cordon node aks-nodepool1-17949986-vmss000013
34 | kubectl drain node aks-nodepool1-17949986-vmss000013 --ignore-daemonsets
35 | kubectl delete pod nginx
36 | kubectl drain node aks-nodepool1-17949986-vmss000013 --ignore-daemonsets
37 | kubectl get pods
38 | kubectl uncordon aks-nodepool1-17949986-vmss000013
39 | ```
40 |
--------------------------------------------------------------------------------
/12-advanced-k8s/Readme.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devlerazureda/kubernetes-essentials/9ba1e24a0cd45be98d3b6794b8690bfae6ed3f93/12-advanced-k8s/Readme.md
--------------------------------------------------------------------------------
/ReadMe.md:
--------------------------------------------------------------------------------
1 | ### Installations
2 | Install kubectx https://github.com/ahmetb/kubectx
3 | Install kubeps-1 https://github.com/jonmosco/kube-ps1
4 | Install azure sdk
5 | Install Kube Alias https://github.com/ahmetb/kubectl-aliases
6 |
7 | ### Readings
8 | https://azure.microsoft.com/en-us/resources/kubernetes-up-and-running/
9 |
10 | ### Workshop
11 | https://aksworkshop.io/
12 |
13 | #### Todo
14 | - PodPresets
15 | - ResourceQuota
16 | - Namespaces
17 | - Labels
18 | - Bash Completion
19 | - dnsPolicy
20 | - ClusterFirstWithHostNet
21 | - ClusterFirst
22 | - WindowsUserNames aka RunAsUserName
23 |
24 |
--------------------------------------------------------------------------------