├── README.md ├── batch-job.yml ├── ch-01-pods-containers ├── burstable-pod.yaml ├── cow-say-with-init ├── guaranteed-pod.yaml ├── myfirstpod.yml ├── pod-simple-qos-besteffort.yml ├── pod-simple-qos-burstable.yml ├── pod-simple-qos-guaranteed.yml ├── pod-simple-with-ports.yml ├── pod-with-cpu-exceed.yml ├── pod-with-cpu-limits.yml ├── pod-with-env-variable ├── pod-with-memory-exceed.yml ├── pod-with-memory-limits.yml ├── pod-with-ports.yml ├── pod-with-resource-limits.yml └── tshoot-pod-run.yaml ├── ch-02-pod-scheduling ├── metric-server.yaml ├── pod-for-anti-affinity.yml ├── pod-for-specific-node-selector.yml ├── pod-for-specific-node.yml ├── pod-priority.yaml ├── pod-with-anti-pod-affinity.yml ├── pod-with-node-affinity-cannot.yml ├── pod-with-node-affinity-multiple.yml ├── pod-with-node-affinity.yml ├── pod-with-preferred-node-affinity.yml ├── pod-with-required-node-affinity.yml └── pod-with-taint.yml ├── ch-03-deployments ├── calculate-hpa ├── declarative-deployment.yaml ├── deployment-different ├── deployment-for-autoscaler.yaml ├── deployment-hpa-with-policies ├── deployment-one.yml ├── deployment-using-affinity.yaml ├── deployment-webserver-with-service.yml ├── deployment-with-anti-pod-affinity.yaml ├── deployment-with-strategy ├── hpa-for-autoscaler-deployment.yaml ├── hpa-for-deployment-v2.yaml ├── imperative-deployment.sh ├── secure-apache ├── tshoot-deployment-svc.yaml └── tshoot-deployment.yaml ├── ch-04-services ├── pod-liveness-probe ├── pod-readiness-liveness-probes ├── pod-simple-lifecycle-events.yml ├── pod-simple-with-health-check.yml ├── pod-with-health-check.yml ├── probes ├── replica-set-one-service.yml ├── service-for-pod.yml └── service-nodeport.yml ├── ch-05-namespaces ├── limit-ranges-default-min-max.yaml ├── limit-ranges.yaml ├── namespace-cpu-limitrange.yml ├── namespace-memory-limitrange.yml ├── namespace-pod-quota.yml └── namespace-resourcequota.yml ├── ch-06-storage ├── deployment-using-pvc.yaml ├── empty-dir ├── empty-dir-multiple ├── persistent-volume-nfs.yaml ├── pod-with-volume-external.yml ├── pod-with-volume.yml ├── pvc-nfs.yaml ├── service-node-port.yaml └── storage-class-nfs ├── ch-07-rbac ├── restricted-role ├── second-kubernetes-admin ├── service-account-permissions ├── steps-for-user-authentication └── user-config-file ├── ch-10-high-value-extra ├── Dockerfile-multi-stage-build ├── Dockerfile-single-stage ├── apache2-with-non-root-user ├── backup-restore-velero-different-clusters ├── backup-using-velero ├── change-docker-registry ├── change-worker-internal-ip ├── changing-docker-root-dir ├── delete-node ├── descheduler ├── docker-change-default-ip-range ├── docker-compose-minecraft ├── docker-compose.yaml ├── dockerfile-mysql ├── entrypoint-vs-cmd ├── explore-images-using-dive ├── gui-on-server ├── helm-install ├── image-pull-secrets ├── install kubectl ├── kubectl-view-allocations ├── kubernetes-cluster-with-crio ├── kubernetes-dashboard ├── kubescape ├── kustomize ├── lens-prometheus-grafana ├── metal-lb load balancer ├── multi-master-setup ├── nginx-ingress-controller ├── patch-resources ├── priority-classes ├── private-image-registry ├── remove-stuck-namespaces ├── restore-etcd-using-etcdctl ├── script-with-config-map ├── statefulsets-app.txt ├── topology-spread.yaml ├── using visual studio code ├── using-environment-variables └── validate-kubernetes-yaml ├── ch-11-challenges-one └── readme.md ├── ch-12-challenges-two └── readme.md ├── cluster-setup.sh ├── daemonset.yml ├── k8s-master.sh ├── k8s-node.sh └── scripts ├── calico.yaml ├── cluster-uninstall.sh ├── common-1.28.0-with-proxy ├── common-1.28.0-without-proxy ├── common-1.28.9-without-proxy-may-2024 ├── common-with-containerd-july-2024 ├── docker-kind-install.sh ├── flannel-install-steps ├── k8s-manager-1-23-7-aws.sh ├── k8s-manager-1-24.4-00.sh ├── k8s-manager-1.26.0.sh ├── k8s-node-1-23-7-aws.sh ├── k8s-node-1-24.4-00.sh ├── k8s-node-1.26.0.sh ├── on-manager-1.28.0 ├── os-proxy-settings.sh ├── packages-kubernetes-1.30 └── proxy-configurations /README.md: -------------------------------------------------------------------------------- 1 | # kubernetes 2 | ## Additional learning material on kubernetes 3 | ### You can verify your yaml code online - https://www.yamllint.com/ 4 | 5 | -------------------------------------------------------------------------------- /batch-job.yml: -------------------------------------------------------------------------------- 1 | #batch-job.yml 2 | 3 | apiVersion: batch/v1 4 | kind: Job 5 | metadata: 6 | name: batch-job 7 | spec: 8 | # completion: 5 9 | # this will run the pod 5 times, sequentially 10 | # parallelism: 2 11 | # this will create 2 parallel jobs, two pods and run in parallel 12 | template: 13 | metadata: 14 | labels: 15 | app: batch-job 16 | spec: 17 | restartPolicy: OnFailure 18 | containers: 19 | - name: nn-batch 20 | image: lovelearnlinux/batch-job 21 | 22 | # run the job 23 | kubectl create -f batch-job.yml 24 | # get info about job 25 | kubectl get jobs 26 | # get the pod its running 27 | kubectl get po 28 | # after 2 minutes the job will finish 29 | # check it with 30 | kubectl get po -a 31 | # check the logs of the pod 32 | kubectl logs 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /ch-01-pods-containers/burstable-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: burst 5 | labels: 6 | app: apache 7 | spec: 8 | containers: 9 | - name: networknuts-app 10 | image: lovelearnlinux/webserver:v1 11 | resources: 12 | limits: 13 | memory: 250Mi 14 | cpu: 200m 15 | requests: 16 | memory: 150Mi 17 | cpu: 100m 18 | -------------------------------------------------------------------------------- /ch-01-pods-containers/cow-say-with-init: -------------------------------------------------------------------------------- 1 | kind: Pod #A 2 | apiVersion: v1 3 | metadata: #B 4 | name: nginx 5 | spec: #C 6 | restartPolicy: Always 7 | volumes: #D 8 | - name: data 9 | emptyDir: {} 10 | initContainers: 11 | - name: nginx-init #E 12 | image: docker/whalesay 13 | command: [sh, -c] 14 | args: [echo "
$(cowsay -b 'Hello Kubernetes')
" > /data/index.html] 15 | volumeMounts: 16 | - name: data 17 | mountPath: /data 18 | containers: 19 | - name: nginx #F 20 | image: nginx:1.11 21 | volumeMounts: 22 | - name: data 23 | mountPath: /usr/share/nginx/html 24 | -------------------------------------------------------------------------------- /ch-01-pods-containers/guaranteed-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: guaranteed 5 | labels: 6 | app: apache 7 | spec: 8 | containers: 9 | - name: networknuts-app 10 | image: lovelearnlinux/webserver:v1 11 | resources: 12 | limits: 13 | memory: 250Mi 14 | cpu: 200m 15 | requests: 16 | memory: 250Mi 17 | cpu: 200m 18 | -------------------------------------------------------------------------------- /ch-01-pods-containers/myfirstpod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nnappone 5 | labels: 6 | app: nnappone 7 | spec: 8 | containers: 9 | - name: networknuts-app 10 | image: lovelearnlinux/webserver:v1 11 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-simple-qos-besteffort.yml: -------------------------------------------------------------------------------- 1 | # pod-simple-qos-besteffort.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | # 15 | # Creating a simple pod. Which has apache 16 | # configured inside it. There are no cpu or 17 | # memory limits applied on pod. So kubernetes 18 | # cluster will try to give the best resources 19 | # available. But cannot guarantee any number. 20 | # 21 | # QoS = Best Effort 22 | # 23 | # create namespace learning - 24 | # kubectl create namespace learning 25 | # 26 | # run the pod 27 | # kubectl create -f pod-simple-qos-besteffort.yml 28 | # 29 | # check the pod 30 | # kubectl get pods --namespace=learning 31 | # 32 | # get details about pod 33 | # kubectl describe pod/nnappone --namespace=learning 34 | # 35 | # Check the "QoS Class" value. 36 | # 37 | # delete pod 38 | # kubectl delete pod/nnappone --namespace=learning 39 | # 40 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-simple-qos-burstable.yml: -------------------------------------------------------------------------------- 1 | # pod-simple-qos-burstable.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | resources: 15 | limits: 16 | memory: "250Mi" 17 | requests: 18 | memory: "150Mi" 19 | # 20 | # Creating a simple pod. Which has apache 21 | # configured inside it. There are memory request 22 | # and memory limits restrictions. So pod will 23 | # initially be given 150Mi of physical memory, but 24 | # it can burst upto 250Mi. If no other default limits 25 | # are applied. So kubernetes can burst memory, if 26 | # needed. 27 | # 28 | # QoS = Burstable 29 | # 30 | # create namespace learning - 31 | # kubectl create namespace learning 32 | # 33 | # run the pod 34 | # kubectl create -f pod-simple-qos-burstable.yml 35 | # 36 | # check the pod 37 | # kubectl get pods --namespace=learning 38 | # 39 | # get details about pod 40 | # kubectl describe pod/nnappone --namespace=learning 41 | # 42 | # Check the "QoS Class" value. 43 | # 44 | # delete pod 45 | # kubectl delete pod/nnappone --namespace=learning 46 | # 47 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-simple-qos-guaranteed.yml: -------------------------------------------------------------------------------- 1 | # pod-simple-qos-burstable.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | resources: 15 | limits: 16 | memory: "250Mi" 17 | cpu: "400m" 18 | requests: 19 | memory: "250Mi" 20 | cpu: "400m" 21 | # 22 | # Creating a simple pod. Which has apache 23 | # configured inside it. There are memory & cpu 24 | # restrictions. But both are same so kubernetes 25 | # cluster will guarantee that much of 26 | # resources to the pod, before running it on any node. 27 | # 28 | # QoS = Guaranteed 29 | # 30 | # create namespace learning - 31 | # kubectl create namespace learning 32 | # 33 | # run the pod 34 | # kubectl create -f pod-simple-qos-guaranteed.yml 35 | # 36 | # check the pod 37 | # kubectl get pods --namespace=learning 38 | # 39 | # get details about pod 40 | # kubectl describe pod/nnappone --namespace=learning 41 | # 42 | # Check the "QoS Class" value. 43 | # 44 | # delete pod 45 | # kubectl delete pod/nnappone --namespace=learning 46 | # 47 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-simple-with-ports.yml: -------------------------------------------------------------------------------- 1 | # pod-simple-with-ports.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | ports: 15 | - containerPort: 80 16 | name: http 17 | protocol: TCP 18 | 19 | # 20 | # Creating a simple pod. Which has apache 21 | # configured inside it. Exposed port is 80 22 | # using TCP protocol 23 | # 24 | # create namespace learning - 25 | # kubectl create namespace learning 26 | # 27 | # run the pod 28 | # kubectl create -f pod-simple-with-ports.yml 29 | # 30 | # check the pod 31 | # kubectl get pods --namespace=learning 32 | # 33 | # get details about pod 34 | # kubectl describe pod/nnappone --namespace=learning 35 | # 36 | # Find the IP address of this pod and the node where 37 | # its running. Go to that node and do a : 38 | # 39 | # curl http://ip-address-of-pod 40 | # 41 | # You should the website welcome message from 42 | # Network Nuts 43 | # 44 | # delete pod 45 | # kubectl delete pod/nnappone --namespace=learning 46 | # 47 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-with-cpu-exceed.yml: -------------------------------------------------------------------------------- 1 | # pod-with-cpu-exceed.yml 2 | 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | resources: 15 | limits: 16 | cpu: "4" 17 | requests: 18 | cpu: "4" 19 | 20 | 21 | # 22 | # the pod wants 4 cpu's which is not available 23 | # on any node in the cluster. So it should fail 24 | # with "insufficient cpu" message. 25 | # 26 | # create namespace learning - 27 | # kubectl create namespace learning 28 | # 29 | # run the pod 30 | # kubectl create -f pod-with-cpu-exceed.yml 31 | # 32 | # check the pod 33 | # kubectl get pods 34 | # 35 | # get details about pod 36 | # kubectl describe pod/nnappone --namespace=learning 37 | # 38 | # delete pod 39 | # kubectl delete pod/nnappone --namespace=learning 40 | # 41 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-with-cpu-limits.yml: -------------------------------------------------------------------------------- 1 | # pod-with-cpu-limits.yml 2 | 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | resources: 15 | limits: 16 | cpu: "1" 17 | requests: 18 | cpu: "0.5" 19 | 20 | # 21 | # Pod will start with 0.5 cpu share and can 22 | # go upto 1 cpu. Well within the permissible 23 | # limits. 24 | # 25 | # create namespace learning - 26 | # kubectl create namespace learning 27 | # 28 | # run the pod 29 | # kubectl create -f pod-with-cpu-limits.yml 30 | # 31 | # check the pod 32 | # kubectl get pods 33 | # 34 | # get details about pod 35 | # kubectl describe pod/nnappone --namespace=learning 36 | # 37 | # delete pod 38 | # kubectl delete pod/nnappone --namespace=learning 39 | # 40 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-with-env-variable: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: dbpod 6 | spec: 7 | containers: 8 | - name: mysql 9 | image: mysql:latest 10 | ports: 11 | - containerPort: 3329 12 | env: 13 | - name: MYSQL_ROOT_PASSWORD 14 | value: "redhat" 15 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-with-memory-exceed.yml: -------------------------------------------------------------------------------- 1 | # pod-with-memory-exceed.yml 2 | 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/stress:latest 14 | resources: 15 | limits: 16 | memory: "250Mi" 17 | requests: 18 | memory: "150Mi" 19 | command: ["stress"] 20 | args: ["--vm", "1", "--vm-bytes", "275M", "--vm-hang", "1"] 21 | 22 | 23 | # 24 | # here using the stress utility and using args 25 | # we are trying to claim 275MiB of memory while 26 | # running the pod. Which should fail, as its beyond 27 | # the max limit of 250MiB 28 | # 29 | # OOM killer (Out Of Memory) will be activated and 30 | # kill the pod 31 | # 32 | # create namespace learning - 33 | # kubectl create namespace learning 34 | # 35 | # run the pod 36 | # kubectl create -f pod-with-memory-exceed.yml 37 | # 38 | # check the pod 39 | # kubectl get pods 40 | # 41 | # get details about pod 42 | # kubectl describe pod/nnappone --namespace=learning 43 | # 44 | # delete pod 45 | # kubectl delete pod/nnappone --namespace=learning 46 | # 47 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-with-memory-limits.yml: -------------------------------------------------------------------------------- 1 | # pod-with-memory-limits.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/stress:latest 14 | resources: 15 | limits: 16 | memory: "250Mi" 17 | requests: 18 | memory: "150Mi" 19 | command: ["stress"] 20 | args: ["--vm", "1", "--vm-bytes", "175M", "--vm-hang", "1"] 21 | 22 | 23 | # 24 | # here using the stress utility and using args 25 | # we are trying to claim 175MiB of memory while 26 | # running the pod. Which should be success, as its 27 | # within the max limit of 250MiB 28 | # 29 | # create namespace learning - 30 | # kubectl create namespace learning 31 | # 32 | # run the pod 33 | # kubectl create -f pod-with-memory-limits.yml 34 | # 35 | # check the pod 36 | # kubectl get pods 37 | # 38 | # get details about pod 39 | # kubectl describe pod/nnappone --namespace=learning 40 | # 41 | # delete pod 42 | # kubectl delete pod/nnappone --namespace=learning 43 | # 44 | # 45 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-with-ports.yml: -------------------------------------------------------------------------------- 1 | # pod-with-ports.yml 2 | apiVersion: v1 3 | 4 | kind: Pod 5 | 6 | metadata: 7 | name: nnwebserver 8 | 9 | spec: 10 | containers: 11 | - name: nnwebserver 12 | image: lovelearnlinux/webserver:v1 13 | ports: 14 | - containerPort: 80 15 | name: http 16 | protocol: TCP 17 | -------------------------------------------------------------------------------- /ch-01-pods-containers/pod-with-resource-limits.yml: -------------------------------------------------------------------------------- 1 | # pod-with-resource-limits.yml 2 | apiVersion: v1 3 | 4 | kind: Pod 5 | 6 | metadata: 7 | name: nnwebserver 8 | 9 | spec: 10 | containers: 11 | - name: nnwebserver 12 | image: lovelearnlinux/webserver:v1 13 | resources: 14 | requests: #request means minimum 15 | cpu: "500m" 16 | memory: "128Mi" 17 | limits: #limits means maximum 18 | cpu: "1000m" 19 | memory: "256Mi" 20 | ports: 21 | - containerPort: 80 22 | name: http 23 | protocol: TCP 24 | -------------------------------------------------------------------------------- /ch-01-pods-containers/tshoot-pod-run.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nnappone 5 | labels: 6 | app: nnappone 7 | spec: 8 | containers: 9 | - name: networknuts-app 10 | image: lovelearnlinux/webserver:v1 11 | resources: 12 | limits: 13 | cpu: "10" 14 | requests: 15 | cpu: "10" 16 | 17 | #pod must be in running state 18 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/metric-server.yaml: -------------------------------------------------------------------------------- 1 | ### USE THIS FOR LATEST VERSION 2 | ### kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml 3 | 4 | apiVersion: v1 5 | kind: ServiceAccount 6 | metadata: 7 | labels: 8 | k8s-app: metrics-server 9 | name: metrics-server 10 | namespace: kube-system 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRole 14 | metadata: 15 | labels: 16 | k8s-app: metrics-server 17 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 18 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 19 | rbac.authorization.k8s.io/aggregate-to-view: "true" 20 | name: system:aggregated-metrics-reader 21 | rules: 22 | - apiGroups: 23 | - metrics.k8s.io 24 | resources: 25 | - pods 26 | - nodes 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | --- 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | kind: ClusterRole 34 | metadata: 35 | labels: 36 | k8s-app: metrics-server 37 | name: system:metrics-server 38 | rules: 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - nodes/metrics 43 | verbs: 44 | - get 45 | - apiGroups: 46 | - "" 47 | resources: 48 | - pods 49 | - nodes 50 | verbs: 51 | - get 52 | - list 53 | - watch 54 | --- 55 | apiVersion: rbac.authorization.k8s.io/v1 56 | kind: RoleBinding 57 | metadata: 58 | labels: 59 | k8s-app: metrics-server 60 | name: metrics-server-auth-reader 61 | namespace: kube-system 62 | roleRef: 63 | apiGroup: rbac.authorization.k8s.io 64 | kind: Role 65 | name: extension-apiserver-authentication-reader 66 | subjects: 67 | - kind: ServiceAccount 68 | name: metrics-server 69 | namespace: kube-system 70 | --- 71 | apiVersion: rbac.authorization.k8s.io/v1 72 | kind: ClusterRoleBinding 73 | metadata: 74 | labels: 75 | k8s-app: metrics-server 76 | name: metrics-server:system:auth-delegator 77 | roleRef: 78 | apiGroup: rbac.authorization.k8s.io 79 | kind: ClusterRole 80 | name: system:auth-delegator 81 | subjects: 82 | - kind: ServiceAccount 83 | name: metrics-server 84 | namespace: kube-system 85 | --- 86 | apiVersion: rbac.authorization.k8s.io/v1 87 | kind: ClusterRoleBinding 88 | metadata: 89 | labels: 90 | k8s-app: metrics-server 91 | name: system:metrics-server 92 | roleRef: 93 | apiGroup: rbac.authorization.k8s.io 94 | kind: ClusterRole 95 | name: system:metrics-server 96 | subjects: 97 | - kind: ServiceAccount 98 | name: metrics-server 99 | namespace: kube-system 100 | --- 101 | apiVersion: v1 102 | kind: Service 103 | metadata: 104 | labels: 105 | k8s-app: metrics-server 106 | name: metrics-server 107 | namespace: kube-system 108 | spec: 109 | ports: 110 | - name: https 111 | port: 443 112 | protocol: TCP 113 | targetPort: https 114 | selector: 115 | k8s-app: metrics-server 116 | --- 117 | apiVersion: apps/v1 118 | kind: Deployment 119 | metadata: 120 | labels: 121 | k8s-app: metrics-server 122 | name: metrics-server 123 | namespace: kube-system 124 | spec: 125 | selector: 126 | matchLabels: 127 | k8s-app: metrics-server 128 | strategy: 129 | rollingUpdate: 130 | maxUnavailable: 0 131 | template: 132 | metadata: 133 | labels: 134 | k8s-app: metrics-server 135 | spec: 136 | containers: 137 | - args: 138 | - --cert-dir=/tmp 139 | - --secure-port=4443 140 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 141 | - --kubelet-use-node-status-port 142 | - --metric-resolution=15s 143 | - --kubelet-insecure-tls 144 | image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1 145 | imagePullPolicy: IfNotPresent 146 | livenessProbe: 147 | failureThreshold: 3 148 | httpGet: 149 | path: /livez 150 | port: https 151 | scheme: HTTPS 152 | periodSeconds: 10 153 | name: metrics-server 154 | ports: 155 | - containerPort: 4443 156 | name: https 157 | protocol: TCP 158 | readinessProbe: 159 | failureThreshold: 3 160 | httpGet: 161 | path: /readyz 162 | port: https 163 | scheme: HTTPS 164 | initialDelaySeconds: 20 165 | periodSeconds: 10 166 | resources: 167 | requests: 168 | cpu: 100m 169 | memory: 200Mi 170 | securityContext: 171 | allowPrivilegeEscalation: false 172 | readOnlyRootFilesystem: true 173 | runAsNonRoot: true 174 | runAsUser: 1000 175 | volumeMounts: 176 | - mountPath: /tmp 177 | name: tmp-dir 178 | nodeSelector: 179 | kubernetes.io/os: linux 180 | priorityClassName: system-cluster-critical 181 | serviceAccountName: metrics-server 182 | volumes: 183 | - emptyDir: {} 184 | name: tmp-dir 185 | --- 186 | apiVersion: apiregistration.k8s.io/v1 187 | kind: APIService 188 | metadata: 189 | labels: 190 | k8s-app: metrics-server 191 | name: v1beta1.metrics.k8s.io 192 | spec: 193 | group: metrics.k8s.io 194 | groupPriorityMinimum: 100 195 | insecureSkipTLSVerify: true 196 | service: 197 | name: metrics-server 198 | namespace: kube-system 199 | version: v1beta1 200 | versionPriority: 100 201 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-for-anti-affinity.yml: -------------------------------------------------------------------------------- 1 | # pod-for-anti-affinity.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnweb 7 | namespace: learning 8 | labels: 9 | app: nnweb 10 | spec: 11 | containers: 12 | - name: nnweb-app 13 | image: lovelearnlinux/webserver:v1 14 | # 15 | # Creating a simple pod. Which has apache 16 | # configured inside it. 17 | # 18 | # create namespace learning - 19 | # kubectl create namespace learning 20 | # 21 | # run the pod 22 | # kubectl create -f pod-for-anti-affinity.yml 23 | # 24 | # check the pod 25 | # kubectl get pods --namespace=learning 26 | # 27 | # get details about pod 28 | # kubectl describe pod nnweb --namespace=learning 29 | # 30 | # Find the NODE, where its running. 31 | # 32 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-for-specific-node-selector.yml: -------------------------------------------------------------------------------- 1 | # pod-for-specific-node-selector.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | resources: 15 | limits: 16 | memory: "500Mi" 17 | requests: 18 | memory: "300Mi" 19 | nodeSelector: 20 | sku: small 21 | 22 | # 23 | # First label one/more nodes with some kind of tag 24 | # we are classifying our nodes as small, medium & 25 | # heavy and the label name is "sku" value can be 26 | # "small / medium / heavy" 27 | # 28 | # First label nodeone.example.com as small: 29 | # 30 | # kubectl label nodes nodeone.example.com sku=small 31 | # 32 | # Confirm for the labels on nodes using: 33 | # 34 | # kubectl get nodes --show-labels 35 | # 36 | # create namespace learning - 37 | # kubectl create namespace learning 38 | # 39 | # run the pod 40 | # kubectl create -f pod-for-specific-node-selector.yml 41 | # 42 | # check the pod 43 | # kubectl get pods --namespace=learning --output=wide 44 | # 45 | # get details about pod 46 | # kubectl describe pod/nnappone --namespace=learning 47 | # 48 | # check for the node where its running 49 | # 50 | # Also look for: 51 | # Node-Selectors: sku=small 52 | # 53 | # value in the kubectl describe output 54 | # 55 | # delete pod 56 | # kubectl delete pod/nnappone --namespace=learning 57 | # 58 | # delete the label from nodeone.example.com 59 | # kubectl label node nodeone.example.com sku- 60 | # 61 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-for-specific-node.yml: -------------------------------------------------------------------------------- 1 | # pod-for-specific-node.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | nodeName: nodeone.example.com #this will assign pod to specific node 12 | containers: 13 | - name: networknuts-app 14 | image: lovelearnlinux/webserver:v1 15 | resources: 16 | limits: 17 | memory: "500Mi" 18 | requests: 19 | memory: "300Mi" 20 | 21 | 22 | # 23 | # We are scheduling our pod to run on a SPECIFIC 24 | # node - nodeone.example.com using 25 | # "nodeName" setting 26 | # 27 | # create namespace learning - 28 | # kubectl create namespace learning 29 | # 30 | # run the pod 31 | # kubectl create -f pod-for-specific-node.yml 32 | # 33 | # check the pod 34 | # kubectl get pods 35 | # 36 | # get details about pod 37 | # kubectl describe pod/nnappone --namespace=learning 38 | # 39 | # check for the node where its running 40 | # 41 | # delete pod 42 | # kubectl delete pod/nnappone --namespace=learning 43 | # 44 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-priority.yaml: -------------------------------------------------------------------------------- 1 | # Pods can have priority. That's importance of a Pod relative to other Pods. If a Pod cannot be scheduled, 2 | # the scheduler tries to preempt (evict) lower priority Pods to make scheduling of the pending Pod possible. 3 | 4 | ## POINTS TO REMEMBER ## 5 | 6 | # Its a two step process: 7 | # 1 - create a priority class 8 | # 2 - add pod in that priority class 9 | 10 | # A PriorityClass is a non-namespaced object. Higher value, higher priority. 11 | # Value can be uptp 1,000,000,000 (one billion) 12 | 13 | # But how can we safeguard system-critical pods from preemption? 14 | # There are two default high-priority classes set by Kubernetes 15 | 16 | # system-node-critical: This class has a value of 2000001000. Pods like etcd, kube-apiserver, and Controller manager use this priority class. 17 | # system-cluster-critical: This class has a value of 2000000000. Addon Pods like coredns, calico controller, metrics server, etc use this Priority class. 18 | 19 | STEP #1 - Create a Priority Class 20 | vim mission-critical-priority-class.yaml 21 | 22 | ## file start here 23 | 24 | apiVersion: scheduling.k8s.io/v1 25 | kind: PriorityClass 26 | metadata: 27 | name: mission-critical-apps 28 | value: 1000000 29 | preemptionPolicy: Never #this will not remove(preempt) lower priority pods, pod will this priority class 30 | # will be just placed ahead of other pods in the scheduling queue. The value can 31 | # be set to "PreemptLowerPriority" which will remove lower priority pods. 32 | # NOT expected from a Gentleman :) 33 | globalDefault: false #will not change priority of existing pods 34 | description: "To be used only for mission critical applications" 35 | 36 | # file ends here 37 | 38 | STEP #2 - Configure application to use priority class. 39 | vim mission-critical-pod.yaml 40 | 41 | # file start here 42 | 43 | apiVersion: v1 44 | kind: Pod 45 | metadata: 46 | name: topgun 47 | labels: 48 | app: topgun 49 | env: prod 50 | spec: 51 | containers: 52 | - name: boxone 53 | image: lovelearnlinux/webserver:v1 54 | imagePullPolicy: IfNotPresent 55 | priorityClassName: mission-critical-apps 56 | 57 | # file ends here 58 | 59 | ###### ADDITIONAL POINTS TO REMEMBER ###### 60 | # When Pod priority is enabled, the scheduler orders pending Pods by their priority and a pending Pod is placed 61 | # ahead of other pending Pods with lower priority in the scheduling queue. As a result, the higher priority Pod may 62 | # be scheduled sooner than Pods with lower priority if its scheduling requirements are met. 63 | # 64 | # If such Pod cannot be scheduled, scheduler will continue and tries to schedule other lower priority Pods 65 | # But, if for some reason, the scheduling requirements of "priority class" pods are not met, the scheduler goes 66 | # ahead with scheduling the lower priority pods. 67 | # 68 | # The scheduler preempts (evicts) low priority pod from a node where it can schedule the higher priority pod. 69 | # The evicted pod gets a graceful default termination time of 30 seconds. 70 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-with-anti-pod-affinity.yml: -------------------------------------------------------------------------------- 1 | # pod-with-anti-pod-affinity.yml 2 | # 3 | # 4 | # First create a pod (nnweb) using 5 | # kubectl create -f pod-for-anti-affinity.yml 6 | # 7 | # Check the node where its running. 8 | # 9 | # Now we want this pod NEVER to be deployed on 10 | # same node, where "nnweb" pod is running 11 | # 12 | apiVersion: v1 13 | kind: Pod 14 | metadata: 15 | name: nnappone 16 | namespace: learning 17 | labels: 18 | app: nnappone 19 | spec: 20 | containers: 21 | - name: networknuts-app 22 | image: lovelearnlinux/webserver:v1 23 | resources: 24 | limits: 25 | memory: "500Mi" 26 | requests: 27 | memory: "300Mi" 28 | affinity: 29 | # podAntiAffinity: 30 | podAffinity: 31 | requiredDuringSchedulingIgnoredDuringExecution: 32 | - labelSelector: 33 | matchExpressions: 34 | - key: app 35 | operator: In 36 | values: 37 | - nnweb 38 | topologyKey: "kubernetes.io/hostname" 39 | 40 | 41 | # 42 | # create namespace learning - 43 | # kubectl create namespace learning 44 | # 45 | # run the pod 46 | # kubectl create -f pod-with-anti-pod-affinity.yml 47 | # 48 | # check the pod 49 | # kubectl get pods --namespace=learning --output=wide 50 | # 51 | # get details about pod 52 | # kubectl describe pod/nnappone --namespace=learning 53 | # 54 | # get details of all the pods 55 | # kubectl get pods -o wide --namespace=learning 56 | # 57 | # check that both the pods will be running on different nodes 58 | # and nnappone pod will never be deployed on same node where 59 | # nnweb pod is running 60 | # 61 | # delete pod 62 | # kubectl delete pod nnappone --namespace=learning 63 | # kubectl delete pod nnweb --namespace=learning 64 | # 65 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-with-node-affinity-cannot.yml: -------------------------------------------------------------------------------- 1 | # pod-with-node-affinity-cannot.yml 2 | # 3 | # First assign labels on all nodes in our cluster 4 | # kubectl label node nodeone.example.com size=small 5 | # kubectl label node nodetwo.example.com size=large 6 | # 7 | # Confirm the labels 8 | # kubectl get nodes --show-labels 9 | # 10 | apiVersion: v1 11 | kind: Pod 12 | metadata: 13 | name: nnappone 14 | namespace: learning 15 | labels: 16 | app: nnappone 17 | spec: 18 | containers: 19 | - name: networknuts-app 20 | image: lovelearnlinux/webserver:v1 21 | resources: 22 | limits: 23 | memory: "500Mi" 24 | requests: 25 | memory: "300Mi" 26 | affinity: 27 | nodeAffinity: 28 | requiredDuringSchedulingIgnoredDuringExecution: 29 | nodeSelectorTerms: 30 | - matchExpressions: 31 | - key: size 32 | operator: NotIn 33 | values: 34 | - small 35 | 36 | 37 | 38 | # 39 | # Lets think we have some nodes with label small/medium/large 40 | # We want this pod, NEVER to be deployed on node having size=small 41 | # 42 | # Confirm for the labels on nodes using: 43 | # 44 | # kubectl get nodes --show-labels 45 | # 46 | # create namespace learning - 47 | # kubectl create namespace learning 48 | # 49 | # run the pod 50 | # kubectl create -f pod-with-node-affinity-cannot.yml 51 | # 52 | # check the pod 53 | # kubectl get pods --namespace=learning --output=wide 54 | # 55 | # get details about pod 56 | # kubectl describe pod/nnappone --namespace=learning 57 | # 58 | # check for the node where its running. It will NOT be 59 | # running on node having size=small label 60 | # 61 | # delete pod 62 | # kubectl delete pod/nnappone --namespace=learning 63 | # 64 | # delete the labels 65 | # kubectl label node nodeone.example.com size- 66 | # kubectl label node nodetwo.example.com size- 67 | # 68 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-with-node-affinity-multiple.yml: -------------------------------------------------------------------------------- 1 | # pod-with-node-affinity-multiple.yml 2 | # 3 | # First assign labels on all nodes in our cluster 4 | # kubectl label node nodeone.example.com size=small 5 | # kubectl label node nodetwo.example.com size=large 6 | # 7 | # Confirm the labels 8 | # kubectl get nodes --show-labels 9 | # 10 | apiVersion: v1 11 | kind: Pod 12 | metadata: 13 | name: nnappone 14 | namespace: learning 15 | labels: 16 | app: nnappone 17 | spec: 18 | containers: 19 | - name: networknuts-app 20 | image: lovelearnlinux/webserver:v1 21 | resources: 22 | limits: 23 | memory: "500Mi" 24 | requests: 25 | memory: "300Mi" 26 | affinity: 27 | nodeAffinity: 28 | requiredDuringSchedulingIgnoredDuringExecution: 29 | nodeSelectorTerms: 30 | - matchExpressions: 31 | - key: size 32 | operator: In 33 | values: 34 | - large 35 | - medium 36 | 37 | 38 | # 39 | # Lets think we have some nodes with label small/medium/large 40 | # We want this pod to be deployed on either pod having small 41 | # OR medium label 42 | # 43 | # Confirm for the labels on nodes using: 44 | # 45 | # kubectl get nodes --show-labels 46 | # 47 | # create namespace learning - 48 | # kubectl create namespace learning 49 | # 50 | # run the pod 51 | # kubectl create -f pod-with-node-affinity-multiple.yml 52 | # 53 | # check the pod 54 | # kubectl get pods --namespace=learning --output=wide 55 | # 56 | # get details about pod 57 | # kubectl describe pod/nnappone --namespace=learning 58 | # 59 | # check for the node where its running. It must be 60 | # running on node having label size=small OR size=medium 61 | # 62 | # delete pod 63 | # kubectl delete pod/nnappone --namespace=learning 64 | # 65 | # delete the labels 66 | # kubectl label node nodeone.example.com size- 67 | # kubectl label node nodetwo.example.com size- 68 | # 69 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-with-node-affinity.yml: -------------------------------------------------------------------------------- 1 | # pod-with-node-affinity.yml 2 | # 3 | # First assign labels on all nodes in our cluster 4 | # kubectl label node nodeone.example.com size=small 5 | # kubectl label node nodetwo.example.com size=large 6 | # 7 | # Confirm the labels 8 | # kubectl get nodes --show-labels 9 | # 10 | apiVersion: v1 11 | kind: Pod 12 | metadata: 13 | name: nnappone 14 | namespace: learning 15 | labels: 16 | app: nnappone 17 | spec: 18 | containers: 19 | - name: networknuts-app 20 | image: lovelearnlinux/webserver:v1 21 | resources: 22 | limits: 23 | memory: "500Mi" 24 | requests: 25 | memory: "300Mi" 26 | affinity: 27 | nodeAffinity: 28 | requiredDuringSchedulingIgnoredDuringExecution: 29 | nodeSelectorTerms: 30 | - matchExpressions: 31 | - key: size 32 | operator: In 33 | values: 34 | - small 35 | 36 | 37 | # 38 | # Confirm for the labels on nodes using: 39 | # 40 | # kubectl get nodes --show-labels 41 | # 42 | # create namespace learning - 43 | # kubectl create namespace learning 44 | # 45 | # run the pod 46 | # kubectl create -f pod-with-node-affinity.yml 47 | # 48 | # check the pod 49 | # kubectl get pods --namespace=learning --output=wide 50 | # 51 | # get details about pod 52 | # kubectl describe pod/nnappone --namespace=learning 53 | # 54 | # check for the node where its running. It must be 55 | # running on nodeone.example.com 56 | # 57 | # delete pod 58 | # kubectl delete pod/nnappone --namespace=learning 59 | # 60 | # delete the labels 61 | # kubectl label node nodeone.example.com size- 62 | # kubectl label node nodetwo.example.com size- 63 | # 64 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-with-preferred-node-affinity.yml: -------------------------------------------------------------------------------- 1 | # pod-with-preferred-node-affinity.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | affinity: 12 | nodeAffinity: 13 | preferredDuringSchedulingIgnoredDuringExecution: 14 | - weight: 1 15 | preference: 16 | matchExpressions: 17 | - key: size 18 | operator: In 19 | values: 20 | - small 21 | containers: 22 | - name: networknuts-app 23 | image: lovelearnlinux/webserver:v1 24 | resources: 25 | limits: 26 | memory: "500Mi" 27 | requests: 28 | memory: "300Mi" 29 | 30 | # 31 | # This means that the pod will PREFER to get 32 | # scheduled on a node that has a size=small label. 33 | # 34 | # First label one/more nodes with some kind of tag 35 | # we are classifying our nodes as small, medium & 36 | # heavy and the label name is "size" value can be 37 | # "small / medium / heavy" 38 | # 39 | # First label nodes as small / medium: 40 | # kubectl label nodes nodeone.example.com size=small 41 | # kubectl label nodes nodetwo.example.com size=medium 42 | # 43 | # Confirm for the labels on nodes using: 44 | # 45 | # kubectl get nodes --show-labels 46 | # 47 | # create namespace learning - 48 | # kubectl create namespace learning 49 | # 50 | # run the pod 51 | # kubectl create -f pod-with-preferred-node-affinity.yml 52 | # 53 | # check the pod 54 | # kubectl get pods --namespace=learning --output=wide 55 | # 56 | # get details about pod 57 | # kubectl describe pod/nnappone --namespace=learning 58 | # 59 | # check for the node where its running, it should be 60 | # PROBABLY on nodes with label "size=small" 61 | # 62 | # delete pod 63 | # kubectl delete pod/nnappone --namespace=learning 64 | # 65 | # delete the labels 66 | # kubectl label node nodeone.example.com size- 67 | # kubectl label node nodetwo.example.com size- 68 | # 69 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-with-required-node-affinity.yml: -------------------------------------------------------------------------------- 1 | # pod-with-required-node-affinity.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | affinity: 12 | nodeAffinity: 13 | requiredDuringSchedulingIgnoredDuringExecution: 14 | nodeSelectorTerms: 15 | - matchExpressions: 16 | - key: size 17 | operator: In 18 | values: 19 | - small 20 | containers: 21 | - name: networknuts-app 22 | image: lovelearnlinux/webserver:v1 23 | resources: 24 | limits: 25 | memory: "500Mi" 26 | requests: 27 | memory: "300Mi" 28 | 29 | # 30 | # This means that the pod will get scheduled ONLY 31 | # on a node that has a size=small label. 32 | # 33 | # First label one/more nodes with some kind of tag 34 | # we are classifying our nodes as small, medium & 35 | # heavy and the label name is "size" value can be 36 | # "small / medium / heavy" 37 | # 38 | # First label nodeone.example.com as small / medium: 39 | # kubectl label nodes nodeone.example.com size=small 40 | # kubectl label nodes nodetwo.example.com size=medium 41 | # 42 | # Confirm for the labels on nodes using: 43 | # kubectl get nodes --show-labels 44 | # 45 | # create namespace learning - 46 | # kubectl create namespace learning 47 | # 48 | # run the pod 49 | # kubectl create -f pod-with-required-node-affinity.yml 50 | # 51 | # check the pod 52 | # kubectl get pods --namespace=learning --output=wide 53 | # 54 | # get details about pod 55 | # kubectl describe pod/nnappone --namespace=learning 56 | # 57 | # check for the node where its running, it should be 58 | # on any of the nodes with label "size=small" 59 | # 60 | # delete pod 61 | # kubectl delete pod/nnappone --namespace=learning 62 | # 63 | # delete the labels 64 | # kubectl label node nodeone.example.com size- 65 | # kubectl label node nodetwo.example.com size- 66 | # 67 | -------------------------------------------------------------------------------- /ch-02-pod-scheduling/pod-with-taint.yml: -------------------------------------------------------------------------------- 1 | # pod-with-taint.yml 2 | # 3 | # first add a taint (fragrance) to nodeone.example.com: 4 | # kubectl taint node nodeone.example.com color=pink:NoSchedule 5 | # 6 | # add taint for nodetwo.example.com also: 7 | # kubectl taint node nodetwo.example.com color=yellow:NoSchedule 8 | # 9 | # confirm that taint is applied on nodeone.example.com & nodetwo.example.com: 10 | # kubectl describe node nodeone.example.com 11 | # kubectl describe node nodetwo.example.com 12 | # (check for value under Taints) 13 | # 14 | apiVersion: v1 15 | 16 | kind: Pod 17 | 18 | metadata: 19 | name: nnwebserver 20 | namespace: learning 21 | 22 | spec: 23 | containers: 24 | - name: nnwebserver 25 | image: lovelearnlinux/webserver:v1 26 | resources: 27 | requests: #request means minimum 28 | cpu: "500m" 29 | memory: "128Mi" 30 | limits: #limits means maximum 31 | cpu: "1000m" 32 | memory: "256Mi" 33 | ports: 34 | - containerPort: 80 35 | name: http 36 | protocol: TCP 37 | tolerations: 38 | - key: "color" 39 | operator: "Equal" 40 | value: "pink" 41 | effect: "NoSchedule" 42 | 43 | # 44 | # Create the pod which has tolerance for color pink 45 | # 46 | # Create namespace learning - 47 | # kubectl create namespace learning 48 | # 49 | # Run the pod 50 | # kubectl create -f pod-with-taint.yml 51 | # 52 | # Check the pod 53 | # kubectl get pods --namespace=learning 54 | # (it should be running on nodeone.example.com) 55 | # (because of taint and toleration) 56 | # 57 | # Remember: taint & tolerance is only for restricting 58 | # nodes to ACCEPT only certain kind of pods. So, 59 | # our pod can also go to other nodes, which are 60 | # NEUTRAL to taint (fragrance). But, nodeone.example.com 61 | # will accept ONLY those pods which have pink taint (fragrance) 62 | # 63 | # Delete pod 64 | # kubectl delete pod/nnwebserver --namespace=learning 65 | # 66 | # Delete the taints from the nodes: 67 | # kubectl taint node nodeone.example.com color:NoSchedule- 68 | # kubectl taint node nodetwo.example.com color:NoSchedule- 69 | # 70 | # Describe the node master.example.com and understand 71 | # why a pod is never assigned for master.example.com 72 | # kubectl describe node master.example.com 73 | # 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | -------------------------------------------------------------------------------- /ch-03-deployments/calculate-hpa: -------------------------------------------------------------------------------- 1 | For most cases, HPA is used with the trigger based on CPU usage. In this case, a good practice to define the target is: 2 | 3 | (CPU_LIMIT - safety) / (CPU_LIMIT + growth) 4 | 5 | Where: 6 | 7 | CPU_LIMIT: CPU limit is the usage limit on the pod. In most cases, the limit is 100%, but for node-pools that have a considerable percentage of idle resource, we can increase the limit. 8 | safety: We don't want the resource to reach its limit, so we set a safety threshold. 9 | growth: Percentage of traffic growth that we expect in a few minutes (say next 5 minutes). 10 | 11 | A practical example is an application where we set the limit at 100% usage for cpu, a safety threshold of 15% with an expected traffic growth of 30% in 5 minutes: 12 | 13 | (1 - 0.15) / (1 + 0.30) = 0.66 14 | 15 | So the HPA would look something like this: 16 | 17 | apiVersion: autoscaling/v2beta2 18 | kind: HorizontalPodAutoscaler 19 | metadata: 20 | name: my-app 21 | spec: 22 | scaleTargetRef: 23 | apiVersion: apps/v1 24 | kind: Deployment 25 | name: my-app 26 | minReplicas: 1 27 | maxReplicas: 5 28 | metrics: 29 | - type: Resource 30 | resource: 31 | name: cpu 32 | target: 33 | type: Utilization 34 | averageUtilization: 66 35 | -------------------------------------------------------------------------------- /ch-03-deployments/declarative-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-declarative 5 | annotations: 6 | environment: prod 7 | organization: sales 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: nginx:latest 21 | -------------------------------------------------------------------------------- /ch-03-deployments/deployment-different: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: different 5 | labels: 6 | app: different 7 | spec: 8 | replicas: 3 # Number of pod replicas 9 | selector: 10 | matchLabels: 11 | app: different 12 | template: 13 | metadata: 14 | labels: 15 | app: different 16 | spec: 17 | containers: 18 | - name: boxone 19 | image: lovelearnlinux/different:v1 20 | ports: 21 | - containerPort: 80 22 | env: 23 | - name: NODE_NAME 24 | valueFrom: 25 | fieldRef: 26 | fieldPath: spec.nodeName 27 | --- 28 | apiVersion: v1 29 | kind: Service 30 | metadata: 31 | name: different-service 32 | spec: 33 | type: NodePort # Change to LoadBalancer if using a cloud provider 34 | ports: 35 | - port: 80 36 | targetPort: 80 37 | nodePort: 30080 # Port to expose on the node 38 | selector: 39 | app: different 40 | -------------------------------------------------------------------------------- /ch-03-deployments/deployment-for-autoscaler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: k8s-autoscaler 5 | spec: 6 | selector: 7 | matchLabels: 8 | run: k8s-autoscaler 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | run: k8s-autoscaler 14 | spec: 15 | containers: 16 | - name: k8s-autoscaler 17 | image: lovelearnlinux/webserver:v1 18 | ports: 19 | - containerPort: 80 20 | resources: 21 | limits: 22 | cpu: 500m 23 | memory: 256Mi 24 | requests: 25 | cpu: 200m 26 | memory: 128Mi 27 | --- 28 | apiVersion: v1 29 | kind: Service 30 | metadata: 31 | name: k8s-autoscaler 32 | labels: 33 | run: k8s-autoscaler 34 | spec: 35 | type: ClusterIP 36 | ports: 37 | - port: 80 38 | selector: 39 | run: k8s-autoscaler 40 | -------------------------------------------------------------------------------- /ch-03-deployments/deployment-hpa-with-policies: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | ports: 21 | - containerPort: 80 22 | resources: 23 | limits: 24 | cpu: 100m 25 | memory: 100Mi 26 | requests: 27 | cpu: 80m 28 | memory: 80Mi 29 | --- 30 | apiVersion: v1 31 | kind: Service 32 | metadata: 33 | name: nginx-deployment-svc 34 | spec: 35 | selector: 36 | app: nginx 37 | ports: 38 | - protocol: TCP 39 | port: 80 40 | targetPort: 80 41 | --- 42 | apiVersion: autoscaling/v2 43 | kind: HorizontalPodAutoscaler 44 | metadata: 45 | name: nginx-deployment-hpa 46 | spec: 47 | behavior: 48 | scaleDown: 49 | stabilizationWindowSeconds: 30 50 | policies: 51 | - type: Pods 52 | value: 1 53 | periodSeconds: 30 54 | scaleTargetRef: 55 | apiVersion: apps/v1 56 | kind: Deployment 57 | name: nginx-deployment 58 | minReplicas: 3 59 | maxReplicas: 10 60 | metrics: 61 | - type: Resource 62 | resource: 63 | name: cpu 64 | target: 65 | type: Utilization 66 | averageUtilization: 10 67 | -------------------------------------------------------------------------------- /ch-03-deployments/deployment-one.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nnwebserver 5 | spec: 6 | selector: 7 | matchLabels: 8 | run: nnwebserver 9 | replicas: 2 10 | strategy: 11 | type: RollingUpdate 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | template: 16 | metadata: 17 | #uncomment below two lines when you update to a new image 18 | # annotations: 19 | # kubernetes.io/change-cause: "updated to new version" 20 | labels: 21 | run: nnwebserver 22 | spec: 23 | containers: 24 | - name: nnwebserver 25 | #for checking rolling updates first try with 26 | # image: lovelearnlinux/webserver:v1 27 | #create deployment and check it, then change 28 | # image: lovelearnlinux/webserver:v2 29 | image: lovelearnlinux/webserver:v2 30 | livenessProbe: 31 | exec: 32 | command: 33 | - cat 34 | - /var/www/html/index.html 35 | initialDelaySeconds: 10 36 | timeoutSeconds: 3 37 | periodSeconds: 20 38 | failureThreshold: 3 39 | resources: 40 | requests: #request means minimum 41 | cpu: "100m" 42 | memory: "128Mi" 43 | limits: #limits means maximum 44 | cpu: "200m" 45 | memory: "256Mi" 46 | ports: 47 | - containerPort: 80 48 | name: http 49 | protocol: TCP 50 | 51 | # IMPORTANT POINTS # 52 | #to apply - kubectl create -f deployment-one.yml 53 | #to inspect - kubectl get deployment 54 | #to describe - kubectl describe deployment nnwebserver 55 | #to inspect replicaset created by this deployment - kubectl get rs --selector=run=nnwebserver 56 | #to manually scale the deployment - kubectl scale deployment nnwebserver --replicas=3 57 | #NOW try decreasing the rs using - kubectl scale rs --replicas=1 58 | #Get the value of rs again using - kubectl get rs --selector=run-nnwebserver 59 | #NOTICE the relationship between deployment and replicaset 60 | #to get a YAML format of your deployment, in detail: 61 | # kubectl get deployment nnwebserver --export -o yaml > nnwebserver-deployment.yml 62 | # to get rollout status - kubectl rollout status deployment nnwebserver 63 | # to pause deployment - kubectl rollout pause deployment nnwebserver 64 | # to resume deployment - kubectl rollout resume deployment nnwebserver 65 | # to get deployment history - kubectl rollout history deployment nnwebserver 66 | # to delete deployment - kubectl delete deployment nnwebserver 67 | # whenever updating deployment use - kubectl apply -f deployment-one.yml 68 | # 69 | -------------------------------------------------------------------------------- /ch-03-deployments/deployment-using-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: gotohell 5 | spec: 6 | selector: 7 | matchLabels: 8 | run: gotohell 9 | replicas: 2 10 | strategy: 11 | type: RollingUpdate 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | template: 16 | metadata: 17 | #uncomment below two lines when you update to a new image 18 | # annotations: 19 | # kubernetes.io/change-cause: "updated to new version" 20 | labels: 21 | run: gotohell 22 | spec: 23 | affinity: 24 | podAffinity: 25 | #podAntiAffinity: 26 | requiredDuringSchedulingIgnoredDuringExecution: 27 | - topologyKey: kubernetes.io/hostname 28 | labelSelector: 29 | matchLabels: 30 | run: gotohell 31 | containers: 32 | - name: gotohell 33 | #for checking rolling updates first try with 34 | # image: lovelearnlinux/webserver:v1 35 | #create deployment and check it, then change 36 | # image: lovelearnlinux/webserver:v2 37 | image: lovelearnlinux/webserver:v2 38 | livenessProbe: 39 | exec: 40 | command: 41 | - cat 42 | - /var/www/html/index.html 43 | initialDelaySeconds: 10 44 | timeoutSeconds: 3 45 | periodSeconds: 20 46 | failureThreshold: 3 47 | ports: 48 | - containerPort: 80 49 | name: http 50 | protocol: TCP 51 | -------------------------------------------------------------------------------- /ch-03-deployments/deployment-webserver-with-service.yml: -------------------------------------------------------------------------------- 1 | # deployment-webserver-with-service.yml 2 | # we are deploying a webserver with 2 replicas 3 | # and then exposing the webserver service using 4 | # service resource 5 | # 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: nn-web 10 | namespace: learning 11 | spec: 12 | selector: 13 | matchLabels: 14 | run: nn-web 15 | replicas: 2 16 | template: 17 | metadata: 18 | labels: 19 | run: nn-web 20 | spec: 21 | containers: 22 | - name: nn-webserver 23 | image: lovelearnlinux/webserver:v1 24 | ports: 25 | - containerPort: 80 26 | 27 | 28 | # 29 | # Creating a deployment with two pods and then 30 | # exposing the webserver using service resource type. 31 | # 32 | # create namespace learning - 33 | # kubectl create namespace learning 34 | # 35 | # initialize the deployment 36 | # kubectl create -f deployment-webserver-with-service.yml 37 | # 38 | # check the deployment 39 | # kubectl get deployment --namespace=learning 40 | # 41 | # get more details about the deployment 42 | # kubectl describe deployments nn-web --namespace=learning 43 | # 44 | # get details about pods initialized by deployment 45 | # kubectl get pods --namespace=learning 46 | # 47 | # get information about all pods created by deployment nn-web 48 | # kubectl get pods -l run=nn-web -o wide --namespace=learning 49 | # 50 | # get the IP address of all pods initialized by deployment nn-web 51 | # kubectl get pods -l run=nn-web -o yaml --namespace=learning | grep podIP 52 | # 53 | # expose the deployment using service resource 54 | # kubectl expose deployment/nn-web --namespace=learning 55 | # 56 | # get information about the service (note down the Cluster IP) 57 | # kubectl get service nn-web --namespace=learning 58 | # 59 | # get detailed information about service, where: 60 | # Endpoints - are the pod's IP address 61 | # IP - is the service IP address, where we will hit (call it Cluster IP) 62 | # kubectl describe service nn-web --namespace=learning 63 | # 64 | # check endpoints (the pods which the service will hit) for the service 65 | # kubectl get ep nn-web --namespace=learning 66 | # 67 | # You can go to ANY node in the cluster and use 68 | # curl http://CLUSTER-IP 69 | # 70 | # --- You should be able to get the website contents --- 71 | # 72 | # delete the deployment 73 | # kubectl delete deployment nn-web --namespace=learning 74 | # 75 | # delete the service 76 | # kubectl delete service nn-web --namespace=learning 77 | # 78 | -------------------------------------------------------------------------------- /ch-03-deployments/deployment-with-anti-pod-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | affinity: 13 | podAntiAffinity: 14 | requiredDuringSchedulingIgnoredDuringExecution: <---- hard requirement not to schedule "nginx" pod if already one scheduled. 15 | - topologyKey: kubernetes.io/hostname <---- Anti affinity scope is host 16 | labelSelector: 17 | matchLabels: 18 | app: nginx 19 | container: 20 | image: nginx:latest 21 | -------------------------------------------------------------------------------- /ch-03-deployments/deployment-with-strategy: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-deploy 5 | annotations: 6 | kubernetes.io/change-cause: "changing version to new" 7 | spec: 8 | replicas: 10 9 | selector: 10 | matchLabels: 11 | app: hello-world 12 | minReadySeconds: 10 13 | strategy: 14 | type: RollingUpdate 15 | rollingUpdate: 16 | maxUnavailable: 1 17 | maxSurge: 1 18 | template: 19 | metadata: 20 | labels: 21 | app: hello-world 22 | spec: 23 | containers: 24 | - name: hello-pod 25 | image: lovelearnlinux/webserver:v1 26 | ports: 27 | - containerPort: 80 28 | resources: 29 | limits: 30 | cpu: 100m 31 | memory: 128Mi 32 | requests: 33 | cpu: 50m 34 | memory: 100Mi 35 | readinessProbe: 36 | exec: 37 | command: 38 | - cat 39 | - /var/www/html/index.html 40 | initialDelaySeconds: 10 41 | periodSeconds: 10 42 | timeoutSeconds: 4 43 | failureThreshold: 2 44 | successThreshold: 1 45 | livenessProbe: 46 | exec: 47 | command: 48 | - cat 49 | - /var/www/html/index.html 50 | initialDelaySeconds: 10 51 | periodSeconds: 10 52 | timeoutSeconds: 4 53 | failureThreshold: 6 54 | successThreshold: 1 55 | 56 | # kubectl set image deploy/hello-deploy hello-pod=lovelearnlinux/webserver:v2 --annotation=update image from v1 to v2 57 | # kubectl annotate deployments.apps hello-deploy kubernetes.io/change-cause="image changed to nginx:latest" 58 | # kubectl rollout history deployment hello-deploy --revision 5 59 | 60 | 61 | -------------------------------------------------------------------------------- /ch-03-deployments/hpa-for-autoscaler-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: k8s-autoscaler 5 | spec: 6 | maxReplicas: 10 7 | minReplicas: 2 8 | scaleTargetRef: 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | name: k8s-autoscaler 12 | targetCPUUtilizationPercentage: 10 13 | 14 | # now put some load on deployment 15 | # while true; do curl http://service-ip-address ; done 16 | -------------------------------------------------------------------------------- /ch-03-deployments/hpa-for-deployment-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: my-app 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: my-app 10 | minReplicas: 1 11 | maxReplicas: 5 12 | metrics: 13 | - type: Resource 14 | resource: 15 | name: cpu 16 | target: 17 | type: Utilization 18 | averageUtilization: 66 19 | -------------------------------------------------------------------------------- /ch-03-deployments/imperative-deployment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | kubectl create deployment nginx-imperative --image=nginx:latest #A 3 | kubectl scale deployment/nginx-imperative --replicas 3 #B 4 | kubectl annotate deployment/nginx-imperative environment=prod #C 5 | kubectl annotate deployment/nginx-imperative organization=sales #D 6 | -------------------------------------------------------------------------------- /ch-03-deployments/secure-apache: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: secure 6 | labels: 7 | app: nginx 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: lovelearnlinux/secureapache:v1 21 | ports: 22 | - containerPort: 8080 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: secure-svc 28 | spec: 29 | selector: 30 | app: nginx 31 | ports: 32 | - protocol: TCP 33 | port: 80 34 | targetPort: 8080 35 | --- 36 | apiVersion: v1 37 | kind: Service 38 | metadata: 39 | name: secure-svc-np 40 | spec: 41 | type: NodePort 42 | selector: 43 | app: nginx 44 | ports: 45 | - port: 8080 46 | # By default and for convenience, the `targetPort` is set to 47 | # the same value as the `port` field. 48 | targetPort: 8080 49 | # Optional field 50 | # By default and for convenience, the Kubernetes control plane 51 | # will allocate a port from a range (default: 30000-32767) 52 | nodePort: 30007 53 | -------------------------------------------------------------------------------- /ch-03-deployments/tshoot-deployment-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | 21 | # expose this deployment with a service name "my-nginx-web-svc", which will send the request to port 80 22 | # of the containers 23 | # you can use - kubectl expose deployment .... 24 | 25 | -------------------------------------------------------------------------------- /ch-03-deployments/tshoot-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: lovelearnlinux/webserver:v1 20 | ports: 21 | - containerPort: 80 22 | name: nginx-web-svc 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: nginx-deployment-service 28 | spec: 29 | selector: 30 | app: ngimx 31 | ports: 32 | - name: name-of-service-port 33 | protocol: TCP 34 | port: 80 35 | targetPort: nginx-web-svc 36 | 37 | 38 | #we should be able to access the application using service IP 39 | -------------------------------------------------------------------------------- /ch-04-services/pod-liveness-probe: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | test: liveness 6 | name: liveness-exec 7 | spec: 8 | containers: 9 | - name: liveness 10 | image: lovelearnlinux/webserver:v1 11 | args: 12 | - /bin/sh 13 | - -c 14 | - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600 15 | livenessProbe: 16 | exec: 17 | command: 18 | - cat 19 | - /tmp/healthy 20 | initialDelaySeconds: 5 21 | periodSeconds: 5 22 | -------------------------------------------------------------------------------- /ch-04-services/pod-readiness-liveness-probes: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: probeone 5 | labels: 6 | app: apache 7 | spec: 8 | containers: 9 | - name: boxone 10 | image: lovelearnlinux/webserver:v1 11 | ports: 12 | - containerPort: 80 13 | readinessProbe: 14 | tcpSocket: 15 | port: 80 16 | initialDelaySeconds: 5 17 | periodSeconds: 10 18 | livenessProbe: 19 | tcpSocket: 20 | port: 80 21 | initialDelaySeconds: 15 22 | periodSeconds: 20 23 | -------------------------------------------------------------------------------- /ch-04-services/pod-simple-lifecycle-events.yml: -------------------------------------------------------------------------------- 1 | # pod-simple-lifecycle-events.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | lifecycle: 15 | postStart: 16 | exec: 17 | command: ["/bin/sh", "-c", "useradd networknuts -p redhat"] 18 | preStop: 19 | exec: 20 | command: ["/bin/sh","-c","rm -rf /home/networknuts; userdel networknuts"] 21 | # 22 | # Creating a simple pod. Which has apache 23 | # configured inside it. The container inside the 24 | # pod has handlers for the postStart and preStop events. 25 | # 26 | # postStart - postStart event is called immediately 27 | # after the Container is created 28 | # 29 | # preStop - preStop event is called immediately 30 | # before the Container is terminated 31 | # 32 | # Create namespace learning - 33 | # kubectl create namespace learning 34 | # 35 | # Run the pod 36 | # kubectl create -f pod-simple-lifecycle-events.yml 37 | # 38 | # Check the pod 39 | # kubectl get pods --namespace=learning 40 | # 41 | # Go inside the pod to check user - networknuts 42 | # kubectl exec -it nnappone --namespace=learning -- /bin/bash 43 | # 44 | # Check inside /etc/passwd and you will find user - networknuts 45 | # tail /etc/passwd 46 | # 47 | # Delete pod 48 | # kubectl delete pod/nnappone --namespace=learning 49 | # 50 | -------------------------------------------------------------------------------- /ch-04-services/pod-simple-with-health-check.yml: -------------------------------------------------------------------------------- 1 | # pod-simple-with-health-check.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | containers: 12 | - name: networknuts-app 13 | image: lovelearnlinux/webserver:v1 14 | args: 15 | - /bin/sh 16 | - -c 17 | - touch /tmp/health.txt; sleep 30; rm -rf /tmp/health.txt; sleep 35 18 | livenessProbe: 19 | exec: 20 | command: 21 | - cat 22 | - /tmp/health.txt 23 | initialDelaySeconds: 5 24 | periodSeconds: 5 25 | failureThreshold: 3 26 | resources: 27 | requests: 28 | cpu: "400m" 29 | memory: "128Mi" 30 | limits: 31 | cpu: "500m" 32 | memory: "256Mi" 33 | ports: 34 | - containerPort: 80 35 | name: http 36 | protocol: TCP 37 | 38 | # 39 | # Creating a simple pod with health checks. 40 | # "periodSeconds" - kubelet should perform a liveness probe every 5 seconds 41 | # "initialDelaySeconds" - kubelet should wait 5 seconds before performing 42 | # the first probe 43 | # 44 | # create namespace learning - 45 | # kubectl create namespace learning 46 | # 47 | # run the pod 48 | # kubectl create -f pod-simple-with-health-check.yml 49 | # 50 | # check the pod 51 | # kubectl get pods --namespace=learning 52 | # 53 | # get details about pod 54 | # kubectl describe pod/nnappone --namespace=learning 55 | # 56 | # observe that there is no pod restart within first 30s 57 | # 58 | # delete pod 59 | # kubectl delete pod/nnappone --namespace=learning 60 | # 61 | -------------------------------------------------------------------------------- /ch-04-services/pod-with-health-check.yml: -------------------------------------------------------------------------------- 1 | # pod-with-health-check.yml 2 | apiVersion: v1 3 | 4 | kind: Pod 5 | 6 | metadata: 7 | name: nnwebserver 8 | 9 | spec: 10 | containers: 11 | - name: nnwebserver 12 | image: lovelearnlinux/webserver:v1 13 | livenessProbe: 14 | exec: 15 | command: 16 | - cat 17 | - /var/www/html/index.html 18 | initialDelaySeconds: 10 19 | timeoutSeconds: 3 20 | periodSeconds: 20 21 | failureThreshold: 3 22 | resources: 23 | requests: #request means minimum 24 | cpu: "500m" 25 | memory: "128Mi" 26 | limits: #limits means maximum 27 | cpu: "1000m" 28 | memory: "256Mi" 29 | ports: 30 | - containerPort: 80 31 | name: http 32 | protocol: TCP 33 | 34 | -------------------------------------------------------------------------------- /ch-04-services/probes: -------------------------------------------------------------------------------- 1 | #### STARTUP PROBE - SUCCESS 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: startup-probe-demo 6 | spec: 7 | containers: 8 | - name: startup-probe-demo 9 | image: busybox:latest 10 | args: 11 | - /bin/sh 12 | - -c 13 | - sleep 300 14 | startupProbe: 15 | exec: 16 | command: 17 | - cat 18 | - /etc/hostname 19 | periodSeconds: 10 20 | failureThreshold: 3 21 | 22 | 23 | ##### 24 | check with describe pod .. it will be a success 25 | ##### 26 | 27 | #### STARTUP PROBE - FAILURE 28 | apiVersion: v1 29 | kind: Pod 30 | metadata: 31 | name: startup-probe-demo 32 | spec: 33 | containers: 34 | - name: startup-probe-demo 35 | image: busybox:latest 36 | args: 37 | - /bin/sh 38 | - -c 39 | - sleep 300 40 | startupProbe: 41 | exec: 42 | command: 43 | - cat 44 | - /etc/hostnamee 45 | periodSeconds: 10 46 | failureThreshold: 3 47 | 48 | #### 49 | check with describe pod - it will fail and container will be restarted 50 | #### 51 | 52 | #### READINESS PROBE 53 | apiVersion: apps/v1 54 | kind: Deployment 55 | metadata: 56 | name: readiness 57 | labels: 58 | app: readiness 59 | spec: 60 | replicas: 3 61 | selector: 62 | matchLabels: 63 | app: readiness 64 | template: 65 | metadata: 66 | labels: 67 | app: readiness 68 | spec: 69 | containers: 70 | - name: nginx 71 | image: lovelearnlinux/webserver:v1 72 | ports: 73 | - containerPort: 80 74 | startupProbe: 75 | exec: 76 | command: 77 | - cat 78 | - /var/www/html/index.html 79 | periodSeconds: 10 80 | failureThreshold: 3 81 | readinessProbe: 82 | exec: 83 | command: 84 | - cat 85 | - /var/www/html/index.html 86 | initialDelaySeconds: 10 87 | periodSeconds: 10 88 | timeoutSeconds: 4 89 | failureThreshold: 2 90 | successThreshold: 1 91 | 92 | ### do create a service for this deployment. 93 | ### now go inside a pod and delete index.html file 94 | ### after readiness probe fails, kubernetes 95 | ### will remove that pod's IP from service endpoint 96 | 97 | 98 | #### LIVENESS PROBE 99 | apiVersion: apps/v1 100 | kind: Deployment 101 | metadata: 102 | name: liveness 103 | labels: 104 | app: readiness 105 | spec: 106 | replicas: 3 107 | selector: 108 | matchLabels: 109 | app: readiness 110 | template: 111 | metadata: 112 | labels: 113 | app: readiness 114 | spec: 115 | containers: 116 | - name: nginx 117 | image: lovelearnlinux/webserver:v1 118 | ports: 119 | - containerPort: 80 120 | startupProbe: 121 | exec: 122 | command: 123 | - cat 124 | - /var/www/html/index.html 125 | periodSeconds: 10 126 | failureThreshold: 3 127 | readinessProbe: 128 | exec: 129 | command: 130 | - cat 131 | - /var/www/html/index.html 132 | initialDelaySeconds: 10 133 | periodSeconds: 10 134 | timeoutSeconds: 4 135 | failureThreshold: 2 136 | successThreshold: 1 137 | livenessProbe: 138 | exec: 139 | command: 140 | - cat 141 | - /var/www/html/index.html 142 | initialDelaySeconds: 10 143 | periodSeconds: 10 144 | timeoutSeconds: 4 145 | failureThreshold: 6 146 | successThreshold: 1 147 | 148 | ### do create a service for this deployment. 149 | ### now go inside a pod and delete index.html file 150 | ### after readiness probe fails, kubernetes 151 | ### will remove that pod's IP from service endpoint 152 | ### after liveness probe fails, kubernetes 153 | ### will restart the pod to solve the problem 154 | 155 | 156 | 157 | -------------------------------------------------------------------------------- /ch-04-services/replica-set-one-service.yml: -------------------------------------------------------------------------------- 1 | # replica-set-one-service.yml 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: nnwebserver 6 | spec: 7 | ports: 8 | - port: 8080 9 | targetPort: 80 10 | selector: 11 | app: nnwebserver 12 | -------------------------------------------------------------------------------- /ch-04-services/service-for-pod.yml: -------------------------------------------------------------------------------- 1 | # service-for-pod.yml 2 | # 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: nnappone-service 7 | spec: 8 | selector: 9 | app: nnappone 10 | ports: 11 | - protocol: TCP 12 | port: 8080 13 | targetPort: 80 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /ch-04-services/service-nodeport.yml: -------------------------------------------------------------------------------- 1 | # service-nodeport.yml 2 | 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: nnweb-svc 7 | namespace: learning 8 | labels: 9 | app: hello-nn 10 | spec: 11 | type: NodePort 12 | ports: 13 | - port: 80 14 | nodePort: 30003 15 | protocol: TCP 16 | selector: 17 | app: hello-nn 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: hello-deploy 23 | namespace: learning 24 | spec: 25 | replicas: 2 26 | selector: 27 | matchLabels: 28 | app: hello-nn 29 | minReadySeconds: 10 30 | strategy: 31 | type: RollingUpdate 32 | rollingUpdate: 33 | maxUnavailable: 1 34 | maxSurge: 1 35 | template: 36 | metadata: 37 | labels: 38 | app: hello-nn 39 | spec: 40 | containers: 41 | - name: webserver-pod 42 | image: lovelearnlinux/webserver:v1 43 | ports: 44 | - containerPort: 80 45 | 46 | # 47 | # Creating a webserver deployment having 2 replicas 48 | # and a nodeport service to access that webserver. 49 | # 50 | # 51 | # create namespace learning - 52 | # kubectl create namespace learning 53 | # 54 | # create deployment and service 55 | # kubectl create -f service-nodeport.yml 56 | # 57 | # check the deployment 58 | # kubectl get deploy --namespace=learning 59 | # 60 | # also check that 2 pods are running as per deployment 61 | # kubectl get pods --namespace=learning 62 | # 63 | # check the service inside learning namespace 64 | # kubectl get svc --namespace=learning 65 | # 66 | # get details about the service 67 | # kubectl describe service nnweb-svc --namespace=learning 68 | # (note down the endpoints, these are the actual pod IP's 69 | # where the request will hit) 70 | # 71 | # get details about service endpoints 72 | # kubectl get endpoints nnweb-svc --namespace=learning 73 | # 74 | # go to a node where the pod is deployed and access service 75 | # using cluster IP 76 | # curl http://:80 77 | # (because our containerPort is 80) 78 | # 79 | # we can also access the application using nodePort (30003) 80 | # with NodeIP:30003 81 | # curl http://:30003 82 | # 83 | # delete deployment and service 84 | # kubectl delete -f service-nodeport.yml 85 | # 86 | -------------------------------------------------------------------------------- /ch-05-namespaces/limit-ranges-default-min-max.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: LimitRange 3 | metadata: 4 | name: cpu-ram-min-max-test 5 | # namespace: test 6 | spec: 7 | limits: 8 | - default: 9 | cpu: 100m 10 | memory: 200Mi 11 | defaultRequest: 12 | cpu: 40m 13 | memory: 100Mi 14 | max: 15 | cpu: "200m" 16 | memory: 256Mi 17 | min: 18 | cpu: "10m" 19 | memory: 90Mi 20 | type: Container 21 | -------------------------------------------------------------------------------- /ch-05-namespaces/limit-ranges.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: LimitRange 3 | metadata: 4 | name: def-cpu-mem-limit 5 | namespace: dev 6 | spec: 7 | limits: 8 | - default: 9 | cpu: 111m 10 | memory: 99Mi 11 | defaultRequest: 12 | cpu: 101m 13 | memory: 91Mi 14 | max: 15 | cpu: 200m 16 | memory: 100Mi 17 | min: 18 | cpu: 100m 19 | memory: 90Mi 20 | type: Container 21 | 22 | # in this case if pod has no resource block 23 | # the defaultrequest and default will apply 24 | # in case pod has resource block 25 | # its resource block has be to within 26 | # min and max 27 | -------------------------------------------------------------------------------- /ch-05-namespaces/namespace-cpu-limitrange.yml: -------------------------------------------------------------------------------- 1 | # namespace-cpu-limit.yml 2 | 3 | apiVersion: v1 4 | kind: LimitRange 5 | metadata: 6 | name: cpu-limit-range 7 | namespace: learning 8 | spec: 9 | limits: 10 | - default: 11 | cpu: 1 12 | defaultRequest: 13 | cpu: 0.4 14 | type: Container 15 | 16 | # 17 | # create namespace first using - 18 | # kubectl create namespace learning 19 | # 20 | # apply it using - 21 | # kubectl apply -f namespace-cpu-limit.yml 22 | # 23 | # check limits - 24 | # kubectl get limitranges --namespace=learning 25 | # 26 | # get details of limits - 27 | # kubectl describe limitranges cpu-limit-range --namespace=learning 28 | # 29 | # delete cpu limits - 30 | # kubectl delete limitranges cpu-limit-range --namespace=learning 31 | # 32 | # delete namespace - 33 | # kubectl delete namespace learning 34 | # 35 | -------------------------------------------------------------------------------- /ch-05-namespaces/namespace-memory-limitrange.yml: -------------------------------------------------------------------------------- 1 | # namespace-memory-limit.yml 2 | 3 | apiVersion: v1 4 | kind: LimitRange 5 | metadata: 6 | name: memory-limit-range 7 | namespace: learning 8 | spec: 9 | limits: 10 | - default: 11 | memory: 500Mi 12 | defaultRequest: 13 | memory: 250Mi 14 | type: Container 15 | 16 | # 17 | # create namespace first using - 18 | # kubectl create namespace learning 19 | # 20 | # apply it using - 21 | # kubectl apply -f namespace-memory-limit.yml 22 | # 23 | # check limits - 24 | # kubectl get limitranges --namespace=learning 25 | # 26 | # get details of limits - 27 | # kubectl describe limitranges memory-limit-range --namespace=learning 28 | # 29 | # delete the memory limits - 30 | # kubectl delete limitranges memory-limit-range --namespace=learning 31 | # 32 | # delete namespace - 33 | # kubectl delete namespace learning 34 | # 35 | -------------------------------------------------------------------------------- /ch-05-namespaces/namespace-pod-quota.yml: -------------------------------------------------------------------------------- 1 | # namespace-pod-quota.yml 2 | # 3 | apiVersion: v1 4 | kind: ResourceQuota 5 | metadata: 6 | name: pod-quota 7 | namespace: learning 8 | spec: 9 | hard: 10 | pods: "4" 11 | 12 | # 13 | # We are applying quota on number of pods 14 | # that can run inside a namespace. So if 15 | # total number of running pods increases 16 | # above 4, it will fail with quota error. 17 | # 18 | # create namespace learning - 19 | # kubectl create namespace learning 20 | # 21 | # apply the quota on number of pods 22 | # kubectl apply -f namespace-pod-quota.yml 23 | # 24 | # check the resource quota 25 | # kubectl get resourcequotas --namespace=learning 26 | # 27 | # get details about resource quota 28 | # kubectl describe resourcequotas pod-quota --namespace=learning 29 | # 30 | # delete resource quota 31 | # kubectl delete resourcequotas pod-quota --namespace=learning 32 | # 33 | 34 | -------------------------------------------------------------------------------- /ch-05-namespaces/namespace-resourcequota.yml: -------------------------------------------------------------------------------- 1 | # namespace-resourcequota.yml 2 | 3 | apiVersion: v1 4 | kind: ResourceQuota 5 | metadata: 6 | name: memory-cpu-quota 7 | namespace: learning 8 | spec: 9 | hard: 10 | requests.cpu: "1" 11 | requests.memory: 1Gi 12 | limits.cpu: "2" 13 | limits.memory: 2Gi 14 | 15 | 16 | # 17 | # create namespace first using - 18 | # kubectl create namespace learning 19 | # 20 | # apply it using - 21 | # kubectl apply -f namespace-resourcequota.yml 22 | # 23 | # check resourcequota - 24 | # kubectl get resourcequotas --namespace=learning 25 | # 26 | # get details of resourcequota - 27 | # kubectl describe resourcequotas memory-cpu-quota --namespace=learning 28 | # 29 | # delete resourcequota - 30 | # kubectl delete resourcequotas memory-cpu-quota --namespace=learning 31 | # 32 | # delete namespace - 33 | # kubectl delete namespace learning 34 | # 35 | -------------------------------------------------------------------------------- /ch-06-storage/deployment-using-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: webapp 6 | labels: 7 | app: nginx 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: lovelearnlinux/webserver:v1 21 | ports: 22 | - containerPort: 80 23 | volumeMounts: 24 | - mountPath: "/var/www/html" 25 | name: webroot 26 | volumes: 27 | - name: webroot 28 | persistentVolumeClaim: 29 | claimName: myclaim 30 | -------------------------------------------------------------------------------- /ch-06-storage/empty-dir: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: empty-pod 5 | spec: 6 | # runtimeClassName: allsecure 7 | containers: 8 | - image: lovelearnlinux/webserver:v1 9 | imagePullPolicy: IfNotPresent 10 | name: boxone 11 | volumeMounts: 12 | - mountPath: /var/www/html 13 | name: demo-volume 14 | volumes: 15 | - name: demo-volume 16 | emptyDir: {} 17 | -------------------------------------------------------------------------------- /ch-06-storage/empty-dir-multiple: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: empty-multi 5 | spec: 6 | containers: 7 | - image: nginx 8 | imagePullPolicy: IfNotPresent 9 | name: boxone 10 | volumeMounts: 11 | - mountPath: /demo 12 | name: demo-volume 13 | - name: boxtwo 14 | image: lovelearnlinux/webserver:v1 15 | volumeMounts: 16 | - mountPath: /var/www/html 17 | name: demo-volume 18 | volumes: 19 | - name: demo-volume 20 | emptyDir: {} 21 | -------------------------------------------------------------------------------- /ch-06-storage/persistent-volume-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pvone-nfs 5 | spec: 6 | capacity: 7 | storage: 5Gi 8 | volumeMode: Filesystem 9 | accessModes: 10 | - ReadWriteOnce 11 | persistentVolumeReclaimPolicy: Recycle 12 | storageClassName: slow 13 | nfs: 14 | path: /foldername 15 | server: ip-address-nfs-server 16 | -------------------------------------------------------------------------------- /ch-06-storage/pod-with-volume-external.yml: -------------------------------------------------------------------------------- 1 | # pod-with-volume-external.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | volumes: 12 | - name: "web-root" 13 | nfs: 14 | server: ansible.example.com 15 | path: "/webserver" 16 | containers: 17 | - name: networknuts-app 18 | image: lovelearnlinux/webserver:v1 19 | volumeMounts: 20 | - mountPath: "/var/www/html" 21 | name: "web-root" 22 | resources: 23 | requests: 24 | cpu: "400m" 25 | memory: "128Mi" 26 | limits: 27 | cpu: "500m" 28 | memory: "256Mi" 29 | ports: 30 | - containerPort: 80 31 | name: http 32 | protocol: TCP 33 | 34 | # 35 | # Creating a webserver, which will mount its /var/www/html 36 | # from a nfs server inside our network. So the website is 37 | # available centrally. And it doesn't matter on which node 38 | # the pod is running. Pod will always be able to access the 39 | # website pages from NFS server. 40 | # 41 | # I have created NFS server on our ansible controller, which 42 | # we had used for creating our kubernetes cluster. These are 43 | # the steps you need to perform on your ansible controller 44 | # for making it a nfs server. 45 | # ----------- 46 | # mkdir /webserver 47 | # echo "

webserver from nfs server

" >> /webserver/index.html 48 | # chmod 777 /webserver 49 | # echo "/webserver *(rw,sync)" >> /etc/exports 50 | # systemctl start nfs-server 51 | # systemctl enable nfs-server 52 | # firewall-cmd --permanent --add-service={nfs,mountd,rpc-bind} 53 | # firewall-cmd --reload 54 | # showmount -e localhost { it should show you the /webserver directory } 55 | #------------ 56 | # 57 | # create namespace learning - 58 | # kubectl create namespace learning 59 | # 60 | # run the pod 61 | # kubectl create -f pod-with-volume-external.yml 62 | # 63 | # check the pod 64 | # kubectl get pods --namespace=learning 65 | # 66 | # get details about pod 67 | # kubectl describe pod/nnappone --namespace=learning 68 | # {observe the Volumes and Mounts and node where its deployed} 69 | # 70 | # ssh onto the node where its deployed and go inside the pod 71 | # kubectl exec -it nnappone /bin/bash --namespace=learning 72 | # 73 | # give the command: df -h 74 | # 75 | # you should see that /var/www/html is mounted from ansible.example.com 76 | # 77 | # exit from the pod and come to your kubernetes master 78 | # 79 | # apply port forwarding to access the webserver: 80 | # kubectl port-forward nnappone 8080:80 --namespace=learning 81 | # 82 | # now you should be able to access the webserver with index.html 83 | # file contents residing on nfs server - ansible.example.com 84 | # run the command from kubernetes master: 85 | # 86 | # curl http://127.0.0.1:8080 87 | # 88 | # delete pod 89 | # kubectl delete pod/nnappone --namespace=learning 90 | # 91 | # the website contents are still available for any other 92 | # pod to access and use it. Confirm by going on 93 | # ansible.example.com and checking contents of /webserver 94 | # 95 | -------------------------------------------------------------------------------- /ch-06-storage/pod-with-volume.yml: -------------------------------------------------------------------------------- 1 | # pod-with-volume.yml 2 | # 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: nnappone 7 | namespace: learning 8 | labels: 9 | app: nnappone 10 | spec: 11 | volumes: 12 | - name: "web-data" 13 | hostPath: 14 | path: "/tmp/webserver" 15 | containers: 16 | - name: networknuts-app 17 | image: lovelearnlinux/webserver:v1 18 | volumeMounts: 19 | - mountPath: "/var/www/html" 20 | name: "web-data" 21 | resources: 22 | requests: 23 | cpu: "400m" 24 | memory: "128Mi" 25 | limits: 26 | cpu: "500m" 27 | memory: "256Mi" 28 | ports: 29 | - containerPort: 80 30 | name: http 31 | protocol: TCP 32 | 33 | # 34 | # Creating a webserver pod that will use host hard disk 35 | # to store its data. Data will be persistent and independent 36 | # to the lifecycle of the pod. 37 | # 38 | # The volume will be automaticatally created on the host 39 | # where the pod is assigned by the scheduler. 40 | # 41 | # This pod definition also has memory and cpu limits 42 | # 43 | # create namespace learning - 44 | # kubectl create namespace learning 45 | # 46 | # run the pod 47 | # kubectl create -f pod-with-volume.yml 48 | # 49 | # check the pod 50 | # kubectl get pods --namespace=learning 51 | # 52 | # get details about pod 53 | # kubectl describe pod/nnappone --namespace=learning 54 | # {observe the mount location and node where its deployed} 55 | # 56 | # ssh onto the node where its deployed and go inside the pod 57 | # kubectl exec -it nnappone /bin/bash --namespace=learning 58 | # 59 | # { go inside /var/www/html/ directory and create a file } 60 | # 61 | # exit from the pod and delete the pod 62 | # 63 | # delete pod 64 | # kubectl delete pod/nnappone --namespace=learning 65 | # 66 | # now again go inside the same node where the pod was running 67 | # you will still find the file created inside /tmp/webserver/ 68 | # directory 69 | # 70 | -------------------------------------------------------------------------------- /ch-06-storage/pvc-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: myclaim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | volumeMode: Filesystem 9 | resources: 10 | requests: 11 | storage: 4Gi 12 | storageClassName: slow 13 | -------------------------------------------------------------------------------- /ch-06-storage/service-node-port.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: website-svc 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: nginx 9 | ports: 10 | - port: 80 11 | # By default and for convenience, the `targetPort` is set to 12 | # the same value as the `port` field. 13 | targetPort: 80 14 | # Optional field 15 | # By default and for convenience, the Kubernetes control plane 16 | # will allocate a port from a range (default: 30000-32767) 17 | nodePort: 30007 18 | -------------------------------------------------------------------------------- /ch-06-storage/storage-class-nfs: -------------------------------------------------------------------------------- 1 | Storage Class with NFS 2 | 3 | 1 - configure nfs server (10.0.0.99) 4 | 5 | apt install nfs-kernel-server 6 | 7 | #mkdir --mode=777 /k8sdata 8 | 9 | mkdir /k8sdata 10 | chown nobody.nobody /k8sdata 11 | chmod g+rwxs /k8sdata 12 | 13 | echo "/k8sdata *(rw,sync,no_subtree_check)" >> /etc/exports 14 | exportfs -a 15 | exportfs -r 16 | 17 | 2 - check from manager / nodes 18 | apt update && apt install nfs-common -y 19 | showmount -e ip-address-nfs-server 20 | 21 | 3. add nfs provisioner 22 | 23 | # install helm - 24 | curl https://baltocdn.com/helm/signing.asc | sudo apt-key add - 25 | sudo apt-get install apt-transport-https --yes 26 | echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 27 | sudo apt-get update 28 | sudo apt-get install helm 29 | 30 | # then 31 | # reference - https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner 32 | 33 | helm repo add nfs-store https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ 34 | helm repo list 35 | 36 | # create a ns - storagenfs 37 | kubectl create ns storagenfs 38 | 39 | helm install nfs-sc \ 40 | nfs-store/nfs-subdir-external-provisioner \ 41 | --set nfs.server=10.0.0.99 \ 42 | --set nfs.path=/k8sdata \ 43 | --set storageClass.onDelete=true -n storagenfs 44 | 45 | kubectl get storageclass 46 | 47 | #### 48 | 49 | 4. create pvc for first application 50 | 51 | --- 52 | apiVersion: v1 53 | kind: PersistentVolumeClaim 54 | metadata: 55 | name: sample-nfs-pvc 56 | spec: 57 | accessModes: 58 | - ReadWriteOnce 59 | storageClassName: nfs-client 60 | resources: 61 | requests: 62 | storage: 2Gi 63 | 64 | 65 | 5. Create service and application 66 | 67 | --- 68 | apiVersion: v1 69 | kind: Service 70 | metadata: 71 | name: nfs-nginx-svc 72 | spec: 73 | selector: 74 | app: sc-nginx 75 | ports: 76 | - protocol: TCP 77 | port: 80 78 | targetPort: 80 79 | --- 80 | apiVersion: apps/v1 81 | kind: Deployment 82 | metadata: 83 | labels: 84 | app: sc-nginx 85 | name: nfs-nginx 86 | spec: 87 | replicas: 2 88 | selector: 89 | matchLabels: 90 | app: sc-nginx 91 | template: 92 | metadata: 93 | labels: 94 | app: sc-nginx 95 | spec: 96 | volumes: 97 | - name: nfs-test 98 | persistentVolumeClaim: 99 | claimName: sample-nfs-pvc 100 | containers: 101 | - image: nginx 102 | name: nginx 103 | volumeMounts: 104 | - name: nfs-test # template.spec.volumes[].name 105 | mountPath: /usr/share/nginx/html # mount inside of container 106 | #readOnly: true # if enforcing read-only on volume 107 | ports: 108 | - containerPort: 80 109 | 110 | 111 | ======== 112 | Create another pvc and application 113 | 114 | # PVC for second application 115 | --- 116 | apiVersion: v1 117 | kind: PersistentVolumeClaim 118 | metadata: 119 | name: apache-nfs-pvc 120 | spec: 121 | accessModes: 122 | - ReadWriteOnce 123 | storageClassName: nfs-client 124 | resources: 125 | requests: 126 | storage: 1Gi 127 | 128 | 129 | # Service and Application 130 | 131 | --- 132 | apiVersion: v1 133 | kind: Service 134 | metadata: 135 | name: apache-svc 136 | spec: 137 | selector: 138 | app: apache 139 | ports: 140 | - protocol: TCP 141 | port: 80 142 | targetPort: 80 143 | --- 144 | apiVersion: apps/v1 145 | kind: Deployment 146 | metadata: 147 | labels: 148 | app: apache 149 | name: webserver 150 | spec: 151 | replicas: 2 152 | selector: 153 | matchLabels: 154 | app: apache 155 | template: 156 | metadata: 157 | labels: 158 | app: apache 159 | spec: 160 | volumes: 161 | - name: web-root 162 | persistentVolumeClaim: 163 | claimName: apache-nfs-pvc 164 | containers: 165 | - image: lovelearnlinux/webserver:v1 166 | name: apache 167 | volumeMounts: 168 | - name: web-root # template.spec.volumes[].name 169 | mountPath: /var/www/html # mount inside of container 170 | #readOnly: true # if enforcing read-only on volume 171 | ports: 172 | - containerPort: 80 173 | 174 | ### 175 | 176 | 177 | -------------------------------------------------------------------------------- /ch-07-rbac/restricted-role: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | namespace: dev 5 | name: limited-pods 6 | rules: 7 | - apiGroups: [""] 8 | # 9 | # will allow users in this role to view details of 10 | # pod named - pod-a and pod-b 11 | resources: ["pods"] 12 | resourceNames: ["pod-a", "pod-b"] 13 | verbs: ["update", "get", "list"] 14 | 15 | -------------------------------------------------------------------------------- /ch-07-rbac/second-kubernetes-admin: -------------------------------------------------------------------------------- 1 | #### CREATING SECOND KUBERNETES ADMIN #### 2 | 3 | 4 | Step #1 - New admin - alok, should generate his openssl key and csr, from his workstation 5 | 6 | openssl genrsa -out alok.key 2048 7 | openssl req -new -key alok.key -subj "/CN=alok/O=system:masters" -out alok.csr 8 | 9 | Step #2 - Send the alok.key and alok.csr to manager, using scp 10 | 11 | Step #3 - MANAGER - should approve the csr request 12 | 13 | openssl x509 -req -in alok.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -out alok.crt 14 | 15 | Step #4 - MANAGER - now send the alok.crt back to alok user (the new cluster admin) 16 | 17 | Step #5 - MANAGER add the user alok in the pre-defined clusterrole - cluster-admin 18 | 19 | kubectl describe clusterrole cluster-admin 20 | kubectl create clusterrolebinding alok-admin --clusterrole=cluster-admin --user=alok 21 | kubectl describe clusterrolebinding alok-admin 22 | 23 | STEP #6 - ALOK - will install kubectl on his machine 24 | 25 | STEP #7 - ALOK - the new admin should have ~/.kube/config file with these contents 26 | 27 | apiVersion: v1 28 | 29 | kind: Config 30 | current-context: alok@kubernetes 31 | 32 | clusters: 33 | - cluster: 34 | certificate-authority-data: 35 | server: https://:6443 36 | name: kubernetes 37 | 38 | contexts: 39 | - context: 40 | cluster: kubernetes 41 | user: alok 42 | name: alok@kubernetes 43 | 44 | users: 45 | - name: alok 46 | user: 47 | client-certificate: /home/alok/alok.crt 48 | client-key: /home/alok/alok.key 49 | 50 | -------------------------------------------------------------------------------- /ch-07-rbac/service-account-permissions: -------------------------------------------------------------------------------- 1 | ==CHECK 2 | kubectl auth can-i create pods --as=system:serviceaccount:: -n 3 | -------------------------------------------------------------------------------- /ch-07-rbac/steps-for-user-authentication: -------------------------------------------------------------------------------- 1 | Step 1 - User creating his openssl key and converting it into .csr from his workstation machine 2 | 3 | # openssl genrsa -out user.key 2048 4 | # openssl req -new -key user.key -out user.csr -subj "/CN=username" 5 | 6 | Step 2 - User submit his key and csr to CA (on manager) 7 | 8 | # scp user.key user.csr login@manager: 9 | 10 | Step 3 - Root CA will convert his .csr into .crt and send the .crt back to user 11 | 12 | # openssl x509 -req -in user.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key \ 13 | -CAcreateserial -out user.crt -days 365 14 | 15 | # scp user.crt login@workstation: 16 | 17 | Step 4 - Now user will create a .kube directory under his home and copy the config file sent by root CA 18 | 19 | Step 5 - User also needs to install kubectl on his workstation 20 | 21 | # sudo snap install kubectl --classic 22 | 23 | ####### 24 | 25 | Once this is done root CA will create user credentials on kubernetes manager and limit user to a namespace 26 | dev, for this example 27 | 28 | # kubectl config set-credentials user --client-certificate=user.crt --client-key=user.key 29 | # kubectl config set-context user-context --cluster=kubernetes --namespace=dev --user=user 30 | 31 | ###### 32 | 33 | Finally a role and role binding will be attached with user 34 | 35 | 36 | -------------------------------------------------------------------------------- /ch-07-rbac/user-config-file: -------------------------------------------------------------------------------- 1 | # We have a user jack created in kubernetes who is given very restricted permissions on cluster in namespace "test" 2 | # Jack will use his workstation to access / manage kubernetes cluster 3 | # Jack need to install "kubectl" on his machine - snap install kubectl --classic 4 | # Jack need to create a directory ".kube" inside his home directory 5 | # Admin will send copy of /etc/kubernetes/admin.conf to jack with these modifications (jack can also do these things by himself) 6 | # Admin need to send jack.crt back to jack 7 | 8 | @@@@@ 9 | 10 | apiVersion: v1 11 | clusters: 12 | - cluster: 13 | certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1J ... output omitted 14 | server: https://10.0.0.100:6443 15 | name: kubernetes 16 | contexts: 17 | - context: 18 | cluster: kubernetes 19 | namespace: test 20 | user: jack 21 | name: jack 22 | current-context: jack 23 | kind: Config 24 | preferences: {} 25 | users: 26 | - name: jack 27 | user: 28 | client-certificate: /home/jack/jack.crt 29 | client-key: /home/jack/jack.key 30 | 31 | @@@@@ 32 | 33 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/Dockerfile-multi-stage-build: -------------------------------------------------------------------------------- 1 | # THIS image is using React Counter App 2 | # https://github.com/networknuts/React-Counter-App 3 | 4 | # First stage - Building the application 5 | # Use node:16-a;pine image as a parent image 6 | FROM node:16-alpine AS build 7 | 8 | # Create app directory 9 | WORKDIR /usr/src/app 10 | 11 | # Copy package.json files to the working directory 12 | COPY package*.json ./ 13 | 14 | # Install app dependencies 15 | RUN npm install 16 | 17 | # Copy the source files 18 | COPY . . 19 | 20 | # Build the React app for production 21 | RUN npm run build 22 | 23 | # Second stage - Serve the application 24 | FROM nginx:alpine 25 | 26 | # Copy build files to Nginx 27 | COPY --from=build /usr/src/app/build /usr/share/nginx/html 28 | EXPOSE 80 29 | CMD ["nginx", "-g", "daemon off;"] 30 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/Dockerfile-single-stage: -------------------------------------------------------------------------------- 1 | 2 | # THIS image is using React-Counter-App application 3 | # https://github.com/networknuts/React-Counter-App 4 | 5 | # Use node:16-alpine image as a parent image 6 | FROM node:16-alpine 7 | 8 | # Create app directory 9 | WORKDIR /usr/src/app 10 | 11 | # Copy package.json files to the working directory 12 | COPY package*.json ./ 13 | 14 | # Install app dependencies 15 | RUN npm install 16 | 17 | # Copy the source files 18 | COPY . . 19 | 20 | # Build the React app for production 21 | RUN npm run build 22 | 23 | # Expose port 3000 for serving the app 24 | EXPOSE 3000 25 | 26 | # Command to run the app 27 | CMD ["npm", "start"] 28 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/apache2-with-non-root-user: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | LABEL description="secure webserver" environment="for test/dev only" author="networknuts" 3 | RUN apt update -y && apt install apache2 -y 4 | RUN mkdir -p /var/run/apache2 /var/lock/apache2 /var/log/apache2 5 | RUN chown -R www-data:www-data /var/run/apache2 /var/lock/apache2 /var/log/apache2 /var/www/html/ 6 | 7 | COPY source/ /var/www/html/ 8 | USER www-data 9 | EXPOSE 80 10 | CMD ["apache2ctl", "-D", "FOREGROUND"] 11 | 12 | 13 | ## Using docker lint will help checking code quality - https://hadolint.github.io/hadolint/ 14 | ## Dockle - Container Image Linter for Security, Helping build the Best-Practice Docker Image - https://github.com/goodwithtech/dockle 15 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/backup-restore-velero-different-clusters: -------------------------------------------------------------------------------- 1 | Velero -- Backup from Cluster-A & Restore to Cluster-B 2 | 3 | Install the velero on both the clusters: clusterA(backup),clusterB(restore) 4 | Make sure both the cluster points to the same S3 bucket. 5 | 6 | Install Velero on both Cluster-A and Cluster-B 7 | 8 | ## both the cluster point to the same S3 bucket 9 | 10 | velero install \ 11 | --provider aws \ 12 | --plugins velero/velero-plugin-for-aws:v1.0.1 \ 13 | --bucket \ 14 | --backup-location-config region= \ 15 | --snapshot-location-config region= \ 16 | --secret-file /root/.aws/credentials 17 | 18 | Take Backup of Cluster-A 19 | 20 | velero backup create 21 | velero backup create clusterbackup1 22 | 23 | login to the ClusterB and restore the Cluster A backup on it. 24 | 25 | velero restore create --from-backup 26 | velero restore create --from-backup clusterbackup1 27 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/backup-using-velero: -------------------------------------------------------------------------------- 1 | VELERO 2 | 3 | 4 | #1 - Client / Workstation component - 5 | 6 | wget https://github.com/vmware-tanzu/velero/releases/download/v1.13.0/velero-v1.13.0-linux-amd64.tar.gz 7 | 8 | tar -xvf velero-v1.13.0-linux-amd64.tar.gz 9 | 10 | cp velero-v1.13.0-linux-amd64/velero /usr/local/bin 11 | 12 | velero --help 13 | 14 | #2 - Install AWS CLI 15 | 16 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 17 | unzip awscliv2.zip 18 | sudo ./aws/install 19 | 20 | #3 - Configure AWS CLI using: 21 | aws configure 22 | 23 | #4 - Create S3 bucket 24 | 25 | BUCKET= 26 | REGION= 27 | 28 | aws s3api create-bucket \ 29 | --bucket $BUCKET \ 30 | --region $REGION \ 31 | --create-bucket-configuration LocationConstraint=$REGION 32 | 33 | 34 | 35 | #5 - Set permissions for velero 36 | 37 | a. Create a IaM user 38 | 39 | aws iam create-user --user-name velero 40 | 41 | b. Attach policy to user velero 42 | 43 | cat > velero-policy.json < 99 | 100 | #7 - Create a file ~/credentials-velero 101 | 102 | [default] 103 | aws_access_key_id= 104 | aws_secret_access_key= 105 | 106 | 107 | #8 - Install and start velero 108 | 109 | velero install \ 110 | --provider aws \ 111 | --plugins velero/velero-plugin-for-aws:v1.9.0 \ 112 | --bucket $BUCKET \ 113 | --backup-location-config region=$REGION \ 114 | --snapshot-location-config region=$REGION \ 115 | --secret-file ./credentials-velero 116 | 117 | #9 - Check logs & status of velero 118 | 119 | kubectl -n velero get all 120 | 121 | kubectl logs deployment/velero -n velero 122 | 123 | #10 - BACKUP examples 124 | 125 | -------- ONE ----------- 126 | 127 | Create a namespace "prod" and run some applications there 128 | 129 | Take backup using velero: 130 | 131 | # velero backup create prod-backup --include-namespaces prod 132 | 133 | Check into S3 bucket you will see backup visible there. 134 | 135 | Now simulate a disaster, delete prod namespace 136 | 137 | # kubectl delete ns prod 138 | # kubectl get ns 139 | 140 | Perform a recovery 141 | 142 | # velero restore create --from-backup prod-backup 143 | 144 | ------------- TWO ------------- 145 | Backup & Restore entire cluster 146 | 147 | # velero backup create 148 | # velero restore create 149 | 150 | -------------- THREE -------------- 151 | Backup can be done based on selectors 152 | 153 | # velero backup create --selector = 154 | 155 | ----------- FOUR ----------------- 156 | Schedule a backup 157 | 158 | #1 - Create a backup schedule - it uses cron syntax 159 | creates a backup schedule that runs every day at 5am 160 | 161 | # velero schedule create morning-daily --schedule="0 5 * * *" 162 | 163 | #2 - Now trigger a backup using that schedule 164 | 165 | # velero backup create --from-schedule morning-schedule 166 | 167 | #### GET BACKUP LIST #### 168 | velero get backups 169 | 170 | 171 | #### DELETING BACKUPS ##### 172 | 173 | # kubectl delete backup -n 174 | 175 | will delete the backup custom resource only and will not delete any associated data from object/block storage 176 | 177 | # velero backup delete 178 | 179 | will delete the backup resource including all data in object/block storage 180 | 181 | ############### DELETE VELERO #################### 182 | kubectl delete namespace/velero clusterrolebinding/velero 183 | kubectl delete crds -l component=velero 184 | 185 | 186 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/change-docker-registry: -------------------------------------------------------------------------------- 1 | Either pass the --registry-mirror option when starting dockerd manually, 2 | or edit /etc/docker/daemon.json (file need to be created) and add the 3 | registry-mirrors key and value, to make the change persistent. 4 | 5 | 6 | { 7 | "registry-mirrors": ["https://"] 8 | } 9 | 10 | Save the file and reload Docker for the change to take effect. 11 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/change-worker-internal-ip: -------------------------------------------------------------------------------- 1 | ### 2 | ### Sometimes if your server has multiple nic's 3 | ### it can show wrong ip address as internal ip address 4 | ### use these steps to align/correct it 5 | ### *** a very common issue in production *** 6 | 7 | Method #1 - Create a drop-in file 8 | vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf 9 | ## add this line ## 10 | KUBELET_CONFIG_ARGS='--node-ip=' 11 | 12 | Method #2 - Directory edit the configuration file 13 | vim /etc/default/kubelet 14 | ## add this line ## 15 | KUBELET_CONFIG_ARGS='--node-ip=' 16 | 17 | Method #3 - Edit /var/lib/kubelet/kubeadm-flags.env 18 | add: --node-ip= 19 | 20 | In both cases run - systemctl daemon-reload && systemctl restart kubelet 21 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/changing-docker-root-dir: -------------------------------------------------------------------------------- 1 | Step #1 - Stop Docker 2 | sudo systemctl stop docker 3 | 4 | Step #2 - Create new location 5 | mkdir /home/alok/docker 6 | 7 | Create / edit the file 8 | 9 | cat <: | grep -i workdir 26 | 27 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/delete-node: -------------------------------------------------------------------------------- 1 | ## ON MANAGER 2 | kubectl get nodes 3 | kubectl drain --ignore-daemonsets 4 | kubectl delete node 5 | 6 | ## ON NODE 7 | kubeadm reset 8 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/descheduler: -------------------------------------------------------------------------------- 1 | Using Kubernetes Descheduler 2 | 3 | Step #1 - Install helm 4 | 5 | curl https://baltocdn.com/helm/signing.asc | sudo apt-key add - 6 | sudo apt-get install apt-transport-https --yes 7 | echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 8 | sudo apt-get update 9 | sudo apt-get install helm 10 | 11 | Step #2 - Install Descheduler using helm chart 12 | 13 | helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/ 14 | helm install my-release --namespace kube-system descheduler/descheduler 15 | 16 | (uninstall, in case) 17 | helm delete my-release --namespace kube-system 18 | 19 | Step #3 - 20 | 21 | kubectl -n kube-system get cronjobs 22 | kubectl -n kube-system get configmap 23 | kubectl -n kube-system describe configmap my-release-descheduler 24 | kubectl -n kube-system edit configmap my-release-descheduler 25 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/docker-change-default-ip-range: -------------------------------------------------------------------------------- 1 | Create file - /etc/docker/daemon.json 2 | with your custom ip range 3 | 4 | { 5 | "bip": "10.0.1.1/24", 6 | "default-address-pools": [ 7 | { "base": "10.0.2.0/18", "size": 24 } 8 | ] 9 | } 10 | 11 | ## save and exit 12 | systemctl restart docker 13 | 14 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/docker-compose-minecraft: -------------------------------------------------------------------------------- 1 | services: 2 | minecraft: 3 | image: itzg/minecraft-server 4 | ports: 5 | - "25565:25565" 6 | environment: 7 | EULA: "TRUE" 8 | deploy: 9 | resources: 10 | limits: 11 | memory: 1.5G 12 | volumes: 13 | - "~/minecraft_data:/data" 14 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | 3 | services: 4 | db: 5 | image: mysql:5.7 6 | volumes: 7 | - db_data:/var/lib/mysql 8 | restart: always 9 | environment: 10 | MYSQL_ROOT_PASSWORD: redhat 11 | MYSQL_DATABASE: wordpress 12 | MYSQL_USER: wordpress 13 | MYSQL_PASSWORD: wordpress 14 | 15 | wordpress: 16 | depends_on: 17 | - db 18 | image: wordpress:latest 19 | volumes: 20 | - wordpress_data:/var/www/html 21 | ports: 22 | - "8000:80" 23 | restart: always 24 | environment: 25 | WORDPRESS_DB_HOST: db 26 | WORDPRESS_DB_USER: wordpress 27 | WORDPRESS_DB_PASSWORD: wordpress 28 | WORDPRESS_DB_NAME: wordpress 29 | 30 | volumes: 31 | wordpress_data: 32 | db_data: 33 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/dockerfile-mysql: -------------------------------------------------------------------------------- 1 | ### Dockerfile for mysql 2 | 3 | Step #1 - Create Dockerfile, with these contents 4 | 5 | FROM komljen/ubuntu 6 | MAINTAINER alok 7 | 8 | ENV USER root 9 | ENV PASS aiPeekai0AeZ2meephoolais7doo1thu 10 | 11 | RUN \ 12 | apt-get update && \ 13 | apt-get -y install \ 14 | mysql-server-5.5 && \ 15 | rm -rf /var/lib/apt/lists/* 16 | 17 | COPY my.cnf /etc/mysql/my.cnf 18 | COPY start.sh start.sh 19 | 20 | VOLUME ["/var/lib/mysql"] 21 | 22 | RUN rm /usr/sbin/policy-rc.d 23 | CMD ["/start.sh"] 24 | 25 | EXPOSE 3306 26 | 27 | 28 | Step #2 - Create file, my.cnf, with these contents 29 | 30 | # 31 | # The MySQL database server configuration file. 32 | # 33 | # You can copy this to one of: 34 | # - "/etc/mysql/my.cnf" to set global options, 35 | # - "~/.my.cnf" to set user-specific options. 36 | # 37 | # One can use all long options that the program supports. 38 | # Run program with --help to get a list of available options and with 39 | # --print-defaults to see which it would actually understand and use. 40 | # 41 | # For explanations see 42 | # http://dev.mysql.com/doc/mysql/en/server-system-variables.html 43 | 44 | # This will be passed to all mysql clients 45 | # It has been reported that passwords should be enclosed with ticks/quotes 46 | # escpecially if they contain "#" chars... 47 | # Remember to edit /etc/mysql/debian.cnf when changing the socket location. 48 | [client] 49 | port = 3306 50 | socket = /var/run/mysqld/mysqld.sock 51 | 52 | # Here is entries for some specific programs 53 | # The following values assume you have at least 32M ram 54 | 55 | # This was formally known as [safe_mysqld]. Both versions are currently parsed. 56 | [mysqld_safe] 57 | socket = /var/run/mysqld/mysqld.sock 58 | nice = 0 59 | 60 | [mysqld] 61 | # 62 | # * Basic Settings 63 | # 64 | user = mysql 65 | pid-file = /var/run/mysqld/mysqld.pid 66 | socket = /var/run/mysqld/mysqld.sock 67 | port = 3306 68 | basedir = /usr 69 | datadir = /var/lib/mysql 70 | tmpdir = /tmp 71 | lc-messages-dir = /usr/share/mysql 72 | skip-name-resolve 73 | skip-external-locking 74 | # 75 | # Instead of skip-networking the default is now to listen only on 76 | # localhost which is more compatible and is not less secure. 77 | bind-address = 0.0.0.0 78 | # 79 | # * Fine Tuning 80 | # 81 | key_buffer = 16M 82 | max_allowed_packet = 16M 83 | thread_stack = 192K 84 | thread_cache_size = 8 85 | # This replaces the startup script and checks MyISAM tables if needed 86 | # the first time they are touched 87 | myisam-recover = BACKUP 88 | #max_connections = 100 89 | #table_cache = 64 90 | #thread_concurrency = 10 91 | # 92 | # * Query Cache Configuration 93 | # 94 | query_cache_limit = 1M 95 | query_cache_size = 16M 96 | # 97 | # * Logging and Replication 98 | # 99 | # Both location gets rotated by the cronjob. 100 | # Be aware that this log type is a performance killer. 101 | # As of 5.1 you can enable the log at runtime! 102 | #general_log_file = /var/log/mysql/mysql.log 103 | #general_log = 1 104 | # 105 | # Error logging goes to syslog due to /etc/mysql/conf.d/mysqld_safe_syslog.cnf. 106 | # 107 | # Here you can see queries with especially long duration 108 | #log_slow_queries = /var/log/mysql/mysql-slow.log 109 | #long_query_time = 2 110 | #log-queries-not-using-indexes 111 | # 112 | # The following can be used as easy to replay backup logs or for replication. 113 | # note: if you are setting up a replication slave, see README.Debian about 114 | # other settings you may need to change. 115 | #server-id = 1 116 | #log_bin = /var/log/mysql/mysql-bin.log 117 | expire_logs_days = 10 118 | max_binlog_size = 100M 119 | #binlog_do_db = include_database_name 120 | #binlog_ignore_db = include_database_name 121 | # 122 | # * InnoDB 123 | # 124 | # InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. 125 | # Read the manual for more InnoDB related options. There are many! 126 | # 127 | # * Security Features 128 | # 129 | # Read the manual, too, if you want chroot! 130 | # chroot = /var/lib/mysql/ 131 | # 132 | # For generating SSL certificates I recommend the OpenSSL GUI "tinyca". 133 | # 134 | # ssl-ca=/etc/mysql/cacert.pem 135 | # ssl-cert=/etc/mysql/server-cert.pem 136 | # ssl-key=/etc/mysql/server-key.pem 137 | 138 | 139 | 140 | [mysqldump] 141 | quick 142 | quote-names 143 | max_allowed_packet = 16M 144 | 145 | [mysql] 146 | #no-auto-rehash # faster start of mysql but no tab completition 147 | 148 | [isamchk] 149 | key_buffer = 16M 150 | 151 | # 152 | # * IMPORTANT: Additional settings that can override those from this file! 153 | # The files must end with '.cnf', otherwise they'll be ignored. 154 | # 155 | !includedir /etc/mysql/conf.d/ 156 | 157 | 158 | Step #3 - Create file, start.sh, with these contents 159 | 160 | #!/usr/bin/env bash 161 | #=============================================================================== 162 | # 163 | # Configure mysql 164 | # 165 | #=============================================================================== 166 | echo "Starting mysql:" 167 | /usr/bin/mysqld_safe & 168 | #------------------------------------------------------------------------------- 169 | until $(mysqladmin ping > /dev/null 2>&1) 170 | do 171 | : 172 | done 173 | #------------------------------------------------------------------------------- 174 | echo "Setting root password:" 175 | mysqladmin -u $USER password $PASS 176 | mysql -u $USER -p$PASS < 8 | 9 | Credit - https://github.com/wagoodman/dive 10 | 11 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/gui-on-server: -------------------------------------------------------------------------------- 1 | sudo apt update && sudo apt upgrade 2 | sudo apt install slim 3 | sudo apt install ubuntu-desktop 4 | sudo reboot 5 | 6 | Optional - to remove 7 | sudo apt remove slim ubuntu-desktop 8 | 9 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/helm-install: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null 3 | sudo apt-get install apt-transport-https --yes 4 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 5 | sudo apt-get update -y 6 | sudo apt-get install helm -y 7 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/image-pull-secrets: -------------------------------------------------------------------------------- 1 | Create a secret in kubernetes 2 | 3 | kubectl create secret docker-registry my-registry-secret \ 4 | --docker-username=DOCKER_USER \ 5 | --docker-password=DOCKER_PASSWORD \ 6 | --docker-email=DOCKER_EMAIL 7 | 8 | 9 | 10 | In the deployment 11 | 12 | spec: 13 | containers: 14 | - name: boxone 15 | image: image-name 16 | imagePullSecrets: 17 | - name: my-registry-secret 18 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/install kubectl: -------------------------------------------------------------------------------- 1 | sudo apt-get update -y 2 | # apt-transport-https may be a dummy package; if so, you can skip that package 3 | sudo apt-get install -y apt-transport-https ca-certificates curl gnupg 4 | curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg 5 | sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg 6 | echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list 7 | sudo chmod 644 /etc/apt/sources.list.d/kubernetes.list 8 | sudo apt-get update 9 | sudo apt-get install -y kubectl 10 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/kubectl-view-allocations: -------------------------------------------------------------------------------- 1 | kubectl-view-allocations 2 | 3 | kubectl plugin lists allocations for resources (cpu, memory, gpu,...) as defined into the manifest of nodes and running pods. 4 | It doesn't list usage like kubectl top. It can provide result grouped by namespaces, nodes, pods and filtered by resources'name. 5 | 6 | Installation: 7 | 8 | Run the script 9 | 10 | curl https://raw.githubusercontent.com/davidB/kubectl-view-allocations/master/scripts/getLatest.sh | bash 11 | 12 | Usage: 13 | 14 | kubectl-view-allocations -h 15 | 16 | kubectl-view-allocations #simplest view of resources 17 | 18 | kubectl-view-allocations -u #see cpu & memory utilization 19 | 20 | kubectl-view-allocations -r cpu #only cpu resource usage 21 | 22 | kubectl-view-allocations -r memory #only memory resource usage 23 | 24 | kubectl-view-allocations -g node #node specific usage of memory/cpu 25 | 26 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/kubernetes-cluster-with-crio: -------------------------------------------------------------------------------- 1 | ** Installing Kubernetes cluster on Ubuntu 20.04 using crio 1.18 and kubernetes version 1.18.0-00 ** 2 | 3 | Minimum 3 machines with 2-4 GiB of RAM & 1 vcpu. 4 | All machines must have two NIC. One working in NAT (for Internet connection) and one Internal (for cluster communication) 5 | Swap must be disabled on all machines 6 | Update /etc/hosts, so all machines will be able to talk to each other using names 7 | 8 | Names of machines: 9 | Control - control.example.com -- 10.0.0.100 10 | First Node - nodeone.example.com -- 10.0.0.1 11 | Second Node - nodetwo.example.com -- 10.0.0.2 12 | 13 | === Configuring Kubernetes controller & Nodes - ON ALL MACHINES 14 | 15 | #1 - Adding key 16 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 17 | 18 | #2 - Adding kubernetes repo 19 | echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" >> /etc/apt/sources.list.d/kubernetes.list 20 | apt-get update 21 | 22 | #3 - Installing kubernetes components 23 | Install kubeadm 24 | apt install -y kubeadm=1.18.0-00 25 | Install kubectl 26 | apt install -y kubectl=1.18.0-00 27 | Install cri-o 28 | modprobe overlay 29 | modprobe br_netfilter 30 | Create a file /etc/sysctl.d/99-mykubernetes-cri.conf, with these contents 31 | net.bridge.bridge-nf-call-iptables = 1 32 | net.ipv4.ip_forward = 1 33 | net.bridge.bridge-nf-call-ip6tables = 1 34 | Run 35 | sysctl --system 36 | Now create two environment variables 37 | export OS=xUbuntu_20.04 38 | export VERSION=1.18 39 | 40 | Adding repos for cri-o and install cri-o 41 | 42 | echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list 43 | echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list 44 | curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | apt-key add - 45 | curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | apt-key add - 46 | apt update 47 | apt install cri-o cri-o-runc 48 | 49 | Find container monitor and copy its path 50 | which conmon 51 | 52 | Edit /etc/crio/crio.conf 53 | conmon = "/usr/bin/conmon" 54 | registries = [ 55 | "docker.io", 56 | "quay.io", 57 | ] 58 | 59 | Start crio and enable it 60 | systemctl daemon-reload 61 | systemctl enable crio 62 | systemctl start crio 63 | systemctl status crio 64 | 65 | Install Kubelet. We need to tell kubelet that we are using "cri-o" as our container runtime, not docker. 66 | Create a file /etc/default/kubelet, with these contents 67 | KUBELET_EXTRA_ARGS=--feature-gates="AllAlpha=false,RunAsGroup=true" --container-runtime=remote --cgroup-driver=systemd --container-runtime-endpoint='unix:///var/run/crio/crio.sock' --runtime-request-timeout=5m 68 | 69 | Now install kubelet 70 | apt install -y kubelet=1.18.0-00 71 | 72 | Install kubernetes-cni network 73 | apt-get install -y kubernetes-cni 74 | 75 | 76 | ==== ONLY ON KUBERNETES CONTROLLER 77 | 78 | Initialize kubernetes controller 79 | kubeadm init --apiserver-advertise-address=10.0.0.100 --cri-socket=/var/run/crio/crio.sock --ignore-preflight-errors=all 80 | 81 | [check for success message and run the commands mentioned in message] 82 | 83 | === Install overlay network - calico in our case 84 | 85 | curl https://docs.projectcalico.org/manifests/calico.yaml -O 86 | kubectl apply -f calico.yaml 87 | 88 | Check the kubernetes control node status 89 | kubectl get nodes 90 | 91 | ====== 92 | 93 | Also copy the kubeadm join command to join nodes to kubernetes cluster 94 | kubeadm token create --print-join-command 95 | 96 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/kubernetes-dashboard: -------------------------------------------------------------------------------- 1 | == FROM WORKSTATION MACHINE == 2 | 3 | STEP #1 - Get dashboard 4 | # kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml 5 | 6 | # kubectl -n kubernetes-dashboard expose deployment kubernetes-dashboard --name k8s-dash --type=NodePort 7 | 8 | # kubectl -n kubernetes-dashboard describe svc k8s-dash 9 | 10 | 11 | STEP #2 - run 12 | kubectl proxy 13 | 14 | STEP #3 - Open Browser and access http://127.0.0.1:8001 OR https://:NodePort-value 15 | 16 | STEP #4 - You need a service account with cluster-admin rights. Create a file dash-srv-account.yaml 17 | 18 | --- 19 | apiVersion: v1 20 | kind: ServiceAccount 21 | metadata: 22 | name: admin-user 23 | namespace: kubernetes-dashboard 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | name: admin-user 29 | roleRef: 30 | apiGroup: rbac.authorization.k8s.io 31 | kind: ClusterRole 32 | name: cluster-admin 33 | subjects: 34 | - kind: ServiceAccount 35 | name: admin-user 36 | namespace: kubernetes-dashboard 37 | 38 | ## file end here 39 | and apply it - kubectl create -f dash-srv-account.yaml 40 | 41 | STEP #5 - Create token for the user created 42 | kubectl -n kubernetes-dashboard create token admin-user 43 | 44 | STEP #6 - Now paste the "token" in the authentication method in browser 45 | 46 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/kubescape: -------------------------------------------------------------------------------- 1 | kubescape - scan cluster / yaml / helm charts 2 | 3 | Installation: 4 | 5 | Linux Ubuntu 6 | 7 | Method #1 8 | curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh | /bin/bash 9 | 10 | Method #2 11 | sudo add-apt-repository ppa:kubescape/kubescape 12 | sudo apt update 13 | sudo apt install kubescape 14 | 15 | Usage: 16 | 17 | #1 - Scan running kubernetes cluster 18 | 19 | # kubescape scan --enable-host-scan --verbose 20 | 21 | PS: The Kubernetes API server has a very limited set of information about the worker nodes in a cluster. To help Kubescape evaluate the security posture of your nodes, you can use the host scanner, a component which is deployed to each node when a scan is being performed and removed immediately after. 22 | 23 | #2 - Scan running kubernetes cluster with NSA / mitre framework 24 | 25 | # kubescape scan framework nsa/mitre 26 | 27 | #3 - Scan for specific namespace in kubernetes cluster 28 | 29 | # kubescape scan --include-namespaces ,, 30 | 31 | #4 - Scan & exclude certain namespaces 32 | 33 | # kubescape scan --exlude-namespaces kube-system 34 | 35 | #5 - Scan yaml files before using it 36 | 37 | # kubescape scan *.yaml 38 | 39 | #6 - Scan kubernetes manifests files from a git repo 40 | 41 | # kubescape scan https://github.com/networknuts/kubernetes 42 | 43 | #7 - Scan and get output in pdf 44 | 45 | # kubescape scan framework nsa --format pdf --output scan-result.pdf 46 | 47 | 48 | ### 49 | Reference - https://github.com/kubescape/kubescape 50 | ### 51 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/kustomize: -------------------------------------------------------------------------------- 1 | #### Install Kustomize 2 | 3 | curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash 4 | 5 | sudo install -o root -g root -m 0755 kustomize /usr/local/bin/kustomize 6 | 7 | kustomize version 8 | 9 | ### CREATE directory structure 10 | 11 | mkdir ~/myapp 12 | mkdir ~/myapp/base 13 | mkdir ~/myapp/overlays 14 | mkdir ~/myapp/overlays/dev 15 | mkdir ~/myapp/overlays/prod 16 | 17 | #### STEP #1 - Polulate base 18 | 19 | vim ~/myapp/base/deployment.yaml 20 | 21 | --- 22 | apiVersion: apps/v1 23 | kind: Deployment 24 | metadata: 25 | name: web-deployment 26 | spec: 27 | replicas: 1 28 | selector: 29 | matchLabels: 30 | app: web 31 | template: 32 | metadata: 33 | labels: 34 | app: web 35 | spec: 36 | containers: 37 | - name: nginx 38 | image: nginx:1.14.2 39 | ports: 40 | - containerPort: 80 41 | 42 | 43 | vim ~/myapp/base/service.yaml 44 | 45 | --- 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: web-service 50 | spec: 51 | selector: 52 | app: web 53 | ports: 54 | - name: http 55 | port: 80 56 | 57 | 58 | vim ~/myapp/base/kustomization.yaml 59 | 60 | --- 61 | apiVersion: kustomize.config.k8s.io/v1beta1 62 | kind: Kustomization 63 | 64 | resources: 65 | - deployment.yaml 66 | - service.yaml 67 | 68 | # To add common labels to manifest file 69 | commonLabels: 70 | #Labels: 71 | company: networknuts 72 | 73 | namespace: default 74 | namePrefix: bike- 75 | nameSuffix: -dev 76 | commonAnnotations: 77 | branch: master 78 | 79 | 80 | #### STEP #2 - Customize base for DEV 81 | 82 | vim ~/myapp/overlays/dev/namespace.yaml 83 | 84 | --- 85 | apiVersion: v1 86 | kind: Namespace 87 | metadata: 88 | name: dev 89 | 90 | 91 | vim ~/myapp/overlays/dev/deployment-dev.yaml 92 | 93 | --- 94 | apiVersion: apps/v1 95 | kind: Deployment 96 | metadata: 97 | name: web-deployment 98 | spec: 99 | replicas: 3 # Update the replica count to 3 100 | template: 101 | spec: 102 | containers: 103 | - name: nginx 104 | resources: 105 | limits: 106 | cpu: "200" # Lower CPU limit to 200m (0.2 CPU cores) 107 | memory: "256Mi" # Lower memory limit to 256 MiB 108 | requests: 109 | cpu: "100" # Lower CPU request to 100m (0.1 CPU cores) 110 | memory: "128Mi" 111 | 112 | 113 | vim ~/myapp/overlays/dev/service-dev.yaml 114 | 115 | --- 116 | apiVersion: v1 117 | kind: Service 118 | metadata: 119 | name: web-service 120 | spec: 121 | type: NodePort 122 | 123 | 124 | vim ~/myapp/overlays/dev/kustomization.yaml 125 | 126 | --- 127 | apiVersion: kustomize.config.k8s.io/v1beta1 128 | kind: Kustomization 129 | 130 | namespace: dev 131 | namePrefix: dev- 132 | resources: 133 | - ../../base 134 | - namespace.yaml 135 | patches: 136 | - path: deployment-dev.yaml 137 | - path: service-dev.yaml 138 | 139 | 140 | #### STEP #3 - Customize base for PROD 141 | 142 | 143 | vim ~/myapp/overlays/prod/namespace.yaml 144 | 145 | --- 146 | apiVersion: v1 147 | kind: Namespace 148 | metadata: 149 | name: prod 150 | 151 | 152 | vim ~/myapp/overlays/prod/deployment-prod.yaml 153 | 154 | --- 155 | apiVersion: apps/v1 156 | kind: Deployment 157 | metadata: 158 | name: web-deployment 159 | spec: 160 | replicas: 4 # Update the replica count to 3 161 | template: 162 | spec: 163 | containers: 164 | - name: nginx 165 | resources: 166 | limits: 167 | cpu: "200" # Lower CPU limit to 200m (0.2 CPU cores) 168 | memory: "256Mi" # Lower memory limit to 256 MiB 169 | requests: 170 | cpu: "200" # Lower CPU request to 100m (0.1 CPU cores) 171 | memory: "256Mi" 172 | 173 | 174 | vim ~/myapp/overlays/prod/service-prod.yaml 175 | 176 | --- 177 | apiVersion: v1 178 | kind: Service 179 | metadata: 180 | name: web-service 181 | spec: 182 | type: NodePort 183 | 184 | 185 | vim ~/myapp/overlays/prod/kustomization.yaml 186 | 187 | --- 188 | apiVersion: kustomize.config.k8s.io/v1beta1 189 | kind: Kustomization 190 | 191 | namespace: prod 192 | namePrefix: prod- 193 | resources: 194 | - ../../base 195 | - namespace.yaml 196 | patches: 197 | - path: deployment-prod.yaml 198 | - path: service-prod.yaml 199 | 200 | 201 | #### CHECK AND APPLY 202 | 203 | cd ~/myapp 204 | kustomize build overlays/dev 205 | kustomize build overlays/prod 206 | 207 | kubectl apply -k overlays/dev 208 | kubectl apply -k overlays/prod 209 | 210 | 211 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/lens-prometheus-grafana: -------------------------------------------------------------------------------- 1 | ### Monitoring kubernetes cluster using LENS + HELM + PROMETHEUS + GRAFANA 2 | ### ON WORKSTATION MACHINE 3 | 4 | ############# 5 | INSTALL LENS 6 | ############# 7 | 8 | wget https://api.k8slens.dev/binaries/Lens-5.3.3-latest.20211223.1.amd64.deb 9 | dpkg -i Lens-5.3.3-latest.20211223.1.amd64.deb 10 | 11 | ## Copy the .kube/config from manager to workstation /home/alok/.kube/config 12 | 13 | Type - lens 14 | and refer your config file 15 | 16 | ## kubectl must be install on workstation machine 17 | snap install kubectl --classic 18 | 19 | 20 | ########################### 21 | HELM + PROMETHEUS + GRAFANA 22 | ########################### 23 | 24 | ### STEP ONE ### 25 | 26 | Install Helm Chart 27 | 28 | ----- 29 | curl https://baltocdn.com/helm/signing.asc | sudo apt-key add - 30 | sudo apt-get install apt-transport-https --yes 31 | echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 32 | sudo apt-get update 33 | sudo apt-get install helm 34 | ----- 35 | 36 | ### STEP TWO ### 37 | 38 | Install Kube Prometheus Helm Chart application using Helm. 39 | This helm chart (application) will install and configure prometheus and grafana 40 | 41 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 42 | helm repo update 43 | kubectl create ns monitoring 44 | helm install prometheus --namespace monitoring prometheus-community/kube-prometheus-stack 45 | 46 | ### STEP THREE ### 47 | Use the prometheus grafana stack for monitoring kubernetes cluster live 48 | 49 | kubectl get pods -n monitoring 50 | 51 | kubectl get svc -n monitoring ## Get the details of prometheus-grafana service 52 | 53 | kubectl port-forward -n monitoring service/prometheus-grafana 3000:80 54 | 55 | In Browser - http://localhost:3000 56 | 57 | 58 | ######### 59 | Reference - https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack 60 | ######### 61 | 62 | ######### 63 | Video - https://youtu.be/Bh6jPIu0p24 64 | ######### 65 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/metal-lb load balancer: -------------------------------------------------------------------------------- 1 | ########################################## 2 | MetalLB on bare metal Kubernetes cluster 3 | ########################################## 4 | 5 | 6 | ## Step #1 - Installing using Helm Chart 7 | 8 | kubectl create ns metal 9 | helm repo add metallb https://metallb.github.io/metallb 10 | helm install metallb metallb/metallb -n metal 11 | 12 | helm -n metal list 13 | kubectl -n metal get all 14 | 15 | ## Step #2 - Create IP Address Pool 16 | 17 | Get the IP range your cluster is using. 18 | 19 | ip a s #get ip which is used to go to Internet - 192.168.0.0/24 20 | 21 | vim metal-ip-address-pool.yaml 22 | 23 | --- 24 | apiVersion: metallb.io/v1beta1 25 | kind: IPAddressPool 26 | metadata: 27 | name: test-pool 28 | namespace: metal 29 | spec: 30 | addresses: 31 | - 192.168.0.100-192.168.0.110 32 | 33 | ## file ends here 34 | 35 | kubectl apply -f metal-ip-address-pool.yaml 36 | 37 | ## Step #3 - Create L2 Advertising 38 | 39 | vim metal-l2-advertising.yaml 40 | 41 | --- 42 | apiVersion: metallb.io/v1beta1 43 | kind: L2Advertisement 44 | metadata: 45 | name: example 46 | namespace: metal 47 | spec: 48 | ipAddressPools: 49 | - test-pool 50 | 51 | ## file ends here 52 | 53 | kubectl apply -f metal-l2-advertising.yaml 54 | 55 | ## Step #4 - Create a application with LoadBalancer service 56 | 57 | vim application.yaml 58 | 59 | --- 60 | apiVersion: apps/v1 61 | kind: Deployment 62 | metadata: 63 | name: nginx-deployment 64 | labels: 65 | app: nginx 66 | spec: 67 | replicas: 3 68 | selector: 69 | matchLabels: 70 | app: nginx 71 | template: 72 | metadata: 73 | labels: 74 | app: nginx 75 | spec: 76 | containers: 77 | - name: nginx 78 | image: nginx:1.14.2 79 | ports: 80 | - containerPort: 80 81 | --- 82 | apiVersion: v1 83 | kind: Service 84 | metadata: 85 | name: nginx-dep-svc 86 | spec: 87 | selector: 88 | app: nginx 89 | ports: 90 | - protocol: TCP 91 | port: 80 92 | targetPort: 80 93 | type: LoadBalancer 94 | 95 | ## file ends here 96 | 97 | kubectl apply -f application.yaml 98 | 99 | kubectl get svc #check External IP on your service 100 | 101 | ## Try accessing the application on external IP from your base OS 102 | ## if all is good, it should work 103 | 104 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/multi-master-setup: -------------------------------------------------------------------------------- 1 | MULTI-MASTER SETUP 2 | 3 | Cloud Platform used - AWS 4 | Instances created - 5 | 2 machines for master, ubuntu 16.04+, 2 CPU, 2 GB RAM, 10 GB storage 6 | 2 machines for worker, ubuntu 16.04+, 1 CPU, 2 GB RAM, 10 GB storage 7 | 1 machine for loadbalancer, ubuntu 16.04+, 1 CPU, 2 GB RAM, 10 GB storage 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | ##### 16 | STEP #1 - Configuring Load Balancer 17 | ##### 18 | 19 | 1. SSH into the loadbalancer machine 20 | 2. Switch to root using sudo -i 21 | 3. Change hostname (recommended), repeat the same step for all machines just change names to manager1/manager2/worker1/worker2 22 | # hostnamectl set-hostname loadbalancer 23 | # vim /etc/hosts 24 | 127.0.0.1 localhost loadbalancer 25 | x.x.x.x loadbalancer 26 | 4. Reboot to check 27 | 5. Update repo and system 28 | # sudo apt-get update && sudo apt-get upgrade -y 29 | 6. Install HAProxy 30 | # sudo apt-get install haproxy -y 31 | 7. Edit haproxy configuration file 32 | # vim /etc/haproxy/haproxy.cfg 33 | 34 | --- add these lines at bottom of file 35 | frontend fe-apiserver 36 | bind 0.0.0.0:6443 37 | mode tcp 38 | option tcplog 39 | default_backend be-apiserver 40 | 41 | backend be-apiserver 42 | mode tcp 43 | option tcplog 44 | option tcp-check 45 | balance roundrobin 46 | default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 47 | server manager1 :6443 check 48 | server manager2 :6443 check 49 | 50 | --- save and exit 51 | 8. Restart haproxy and check 52 | # systemctl restart haproxy 53 | # systemctl enable haproxy 54 | # systemctl status haproxy 55 | # nc -v localhost 6443 56 | 57 | 58 | ##### 59 | STEP #2 60 | ##### 61 | 62 | Installing kubeadm / kubelet and docker on manager1/manager2/worker1/worker2 63 | 64 | Create a file for-all-machines.sh, with these contents and run on all machines except loadbalancer 65 | 66 | ########### for-all-machines.sh ######## 67 | #!/bin/bash 68 | echo "disabling swap" 69 | swapoff -a 70 | sed -e '/swap/s/^/#/g' -i /etc/fstab 71 | echo "installing kubernetes version 1.24.1-00" 72 | sudo apt-get update 73 | sudo apt-get install -y apt-transport-https ca-certificates curl 74 | sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg 75 | echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list 76 | apt-get update 77 | apt-get install -y kubelet=1.24.1-00 kubeadm=1.24.1-00 kubectl=1.24.1-00 docker.io 78 | apt-mark hold kubelet kubeadm kubectl 79 | cat <:6443" --upload-certs --pod-network-cidr=192.168.0.0/16 105 | 106 | 4. Copy the output of SUCCESS in a file. 107 | 108 | ##### 109 | STEP 4 110 | ##### 111 | 112 | Login to manager2 113 | 114 | 1. SSH into manager2 115 | 2. Get root access using sudo -i 116 | 3. Paste the token of manager copied from STEP 3. Something like this: 117 | 118 | kubeadm join loadbalancer:6443 --token cnslau.kd5fjt96jeuzymzb \ 119 | --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5 \ 120 | --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146 121 | 122 | ##### 123 | STEP 5 124 | ##### 125 | 126 | Login in worker1 and worker2 and repeat these steps 127 | 128 | 1. SSH into worker1/worker2 129 | 2. Get root access using sudo -i 130 | 3. Paste the token of joining worker copied from STEP 3. Something like this: 131 | kubeadm join loadbalancer:6443 --token cnslau.kd5fjt96jeuzymzb \ 132 | --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5 133 | 134 | ##### 135 | STEP 6 136 | ##### 137 | 138 | Finally configure loadbalancer with kubeconfig and install kubectl 139 | 140 | 1. SSH into loadbalancer 141 | 2. Get root access using sudo -i 142 | 3. Create a directory - mkdir -p $HOME/.kube 143 | 4. Copy the contents of /etc/kubernetes/admin.conf from master1 in a file named "config" inside $HOME/.kube/ 144 | 5. Install kubectl 145 | # snap install kubectl --classic 146 | 6. Check cluster 147 | # kubectl get nodes 148 | 149 | 7. Install Overlay network/CNI 150 | # kubectl create -f https://projectcalico.docs.tigera.io/manifests/tigera-operator.yaml 151 | # curl https://projectcalico.docs.tigera.io/manifests/custom-resources.yaml -O 152 | # kubectl create -f custom-resources.yaml 153 | 154 | 8. Again check the cluster, nodes will be READY, it may take some time 155 | # kubectl get nodes 156 | 157 | 158 | 159 | 160 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/nginx-ingress-controller: -------------------------------------------------------------------------------- 1 | ### STEP TO CREATE NGINX INGRESS CONTROLLER 2 | 3 | 4 | git clone https://github.com/nginxinc/kubernetes-ingress.git --branch v3.1.1 5 | cd kubernetes-ingress/deployments/ 6 | kubectl apply -f common/ns-and-sa.yaml 7 | kubectl apply -f rbac/rbac.yaml 8 | kubectl apply -f rbac/ap-rbac.yaml 9 | kubectl apply -f rbac/apdos-rbac.yaml 10 | kubectl apply -f common/nginx-config.yaml 11 | kubectl apply -f common/ingress-class.yaml 12 | kubectl apply -f common/crds/k8s.nginx.org_virtualservers.yaml 13 | kubectl apply -f common/crds/k8s.nginx.org_virtualserverroutes.yaml 14 | kubectl apply -f common/crds/k8s.nginx.org_transportservers.yaml 15 | kubectl apply -f common/crds/k8s.nginx.org_policies.yaml 16 | kubectl apply -f common/crds/k8s.nginx.org_globalconfigurations.yaml 17 | kubectl apply -f daemon-set/nginx-ingress.yaml 18 | kubectl get ns 19 | kubectl -n nginx-ingress get ds 20 | kubectl -n nginx-ingress get pods 21 | 22 | #try accessing a node IP using browser - you will get 404 nginx error as request is taken by 23 | #nginx controller 24 | 25 | #let create a application now 26 | 27 | # vim petclinic.yaml 28 | --- 29 | apiVersion: v1 30 | kind: Namespace 31 | metadata: 32 | name: petclinic 33 | 34 | --- 35 | apiVersion: apps/v1 36 | kind: Deployment 37 | metadata: 38 | name: petclinic 39 | namespace: petclinic 40 | spec: 41 | replicas: 2 42 | selector: 43 | matchLabels: 44 | app: petclinic 45 | template: 46 | metadata: 47 | labels: 48 | app: petclinic 49 | spec: 50 | containers: 51 | - name: pet 52 | image: lovelearnlinux/webserver:v1 53 | ports: 54 | - containerPort: 80 55 | --- 56 | 57 | apiVersion: v1 58 | kind: Service 59 | metadata: 60 | name: petclinic-svc 61 | namespace: petclinic 62 | spec: 63 | ports: 64 | - port: 80 65 | targetPort: 80 66 | protocol: TCP 67 | name: http 68 | selector: 69 | app: petclinic 70 | --- 71 | apiVersion: networking.k8s.io/v1 72 | kind: Ingress 73 | metadata: 74 | name: petclinic-ingress 75 | namespace: petclinic 76 | annotations: 77 | nginx.ingress.kubernetes.io/rewrite-target: / 78 | spec: 79 | ingressClassName: nginx 80 | rules: 81 | - host: petclinic.example.com 82 | http: 83 | paths: 84 | - path: / 85 | pathType: Prefix 86 | backend: 87 | service: 88 | name: petclinic-svc 89 | port: 90 | number: 80 91 | 92 | ### save and exit 93 | 94 | #since we don't have DNS here. Modify your workers /etc/hosts 95 | petclinic.example.com 96 | 97 | #now try doing a - curl http://petclinic.example.com from worker 98 | ## it should work 99 | #### 100 | 101 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/patch-resources: -------------------------------------------------------------------------------- 1 | Step #1 - create a deployment - sampledeploy.yaml 2 | --- 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: nginx-deployment 7 | labels: 8 | app: nginx 9 | spec: 10 | replicas: 2 11 | selector: 12 | matchLabels: 13 | app: nginx 14 | template: 15 | metadata: 16 | labels: 17 | app: nginx 18 | env: test 19 | spec: 20 | containers: 21 | - name: nginx 22 | image: nginx:1.14.2 23 | ports: 24 | - containerPort: 80 25 | resources: 26 | limits: 27 | cpu: 200m 28 | memory: 256Mi 29 | requests: 30 | cpu: 110m 31 | memory: 100Mi 32 | resizePolicy: 33 | - resourceName: cpu 34 | restartPolicy: NotRequired 35 | - resourceName: memory 36 | restartPolicy: RestartContainer 37 | 38 | Step #2 - create the deployment - 39 | 40 | kubectl create -f sampledeploy.yaml 41 | kubectl get pods -o wide 42 | 43 | Step #3 - add a patch file to change labels - patch-labels.yaml 44 | --- 45 | spec: 46 | template: 47 | metadata: 48 | labels: 49 | app: nginx 50 | env: dev 51 | 52 | Step #4 - apply the patch 53 | kubectl patch deployment nginx-deployment --patch-file patch-labels.yaml 54 | 55 | Step #5 - check the results and repeat the same for cpu / memory / nodeselector patches 56 | 57 | patch-cpu.yaml 58 | --- 59 | spec: 60 | template: 61 | spec: 62 | containers: 63 | - name: nginx 64 | resources: 65 | limits: 66 | cpu: 200m 67 | memory: 256Mi 68 | requests: 69 | cpu: 110m 70 | memory: 100Mi 71 | 72 | patch-memory.yaml 73 | --- 74 | spec: 75 | template: 76 | spec: 77 | containers: 78 | - name: nginx 79 | resources: 80 | limits: 81 | cpu: 200m 82 | memory: 256Mi 83 | requests: 84 | cpu: 110m 85 | memory: 110Mi 86 | 87 | patch-nodeselector.yaml 88 | --- 89 | spec: 90 | template: 91 | spec: 92 | nodeSelector: 93 | key: value 94 | 95 | 96 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/priority-classes: -------------------------------------------------------------------------------- 1 | STEP #1 2 | 3 | ### Label your nodeone 4 | kubectl label node nodeone size=small 5 | ### my nodeone has only 2GiB of memory so it can't run more than 3 pods asking for 600Mi each 6 | ### you have to do your calculations for this lab 7 | 8 | 9 | STEP #2 10 | 11 | ### Create two priority classes - high-priority & low-priority 12 | ### vim priority-classes.yaml 13 | --- 14 | apiVersion: scheduling.k8s.io/v1 15 | kind: PriorityClass 16 | metadata: 17 | name: low-priority 18 | value: 500 19 | globalDefault: false 20 | description: "This priority class should be used for non-critical service pods only." 21 | --- 22 | apiVersion: scheduling.k8s.io/v1 23 | kind: PriorityClass 24 | metadata: 25 | name: high-priority 26 | value: 1000 27 | globalDefault: false 28 | description: "This priority class should be used for critical service pods only." 29 | 30 | 31 | ### file ends here 32 | # kubectl apply -f priority-classes.yaml 33 | # kubectl get priorityclasses 34 | 35 | STEP #2 36 | 37 | ### Create two deployments. One will use low-priority and other high-priority class 38 | ### vim dep-low-priority.yaml 39 | --- 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | metadata: 43 | name: trivial-app 44 | labels: 45 | app: nginx 46 | spec: 47 | replicas: 3 48 | selector: 49 | matchLabels: 50 | app: nginx 51 | template: 52 | metadata: 53 | labels: 54 | app: nginx 55 | spec: 56 | priorityClassName: low-priority 57 | containers: 58 | - name: nginx 59 | image: nginx:1.14.2 60 | ports: 61 | - containerPort: 80 62 | resources: 63 | limits: 64 | cpu: 300m 65 | memory: 600Mi 66 | requests: 67 | cpu: 300m 68 | memory: 600Mi 69 | nodeSelector: 70 | size: small 71 | 72 | ### file ends here 73 | # kubectl apply -f dep-low-priority.yaml 74 | #### check pods they will be running soon 75 | # kubectl get pods 76 | 77 | ######## NOW ########## 78 | ## Create another application wanting to run on nodeone with high-priority class 79 | ### vim dep-high-priority.yaml 80 | --- 81 | apiVersion: apps/v1 82 | kind: Deployment 83 | metadata: 84 | name: critical-app 85 | labels: 86 | app: nginx 87 | spec: 88 | replicas: 3 89 | selector: 90 | matchLabels: 91 | app: nginx 92 | template: 93 | metadata: 94 | labels: 95 | app: nginx 96 | spec: 97 | priorityClassName: high-priority 98 | containers: 99 | - name: nginx 100 | image: nginx:1.14.2 101 | ports: 102 | - containerPort: 80 103 | resources: 104 | limits: 105 | cpu: 300m 106 | memory: 600Mi 107 | requests: 108 | cpu: 300m 109 | memory: 600Mi 110 | nodeSelector: 111 | size: small 112 | 113 | ### file ends here 114 | # kubectl apply -f dep-high-priority.yaml 115 | ##### the moment you run a high priority application on same node with resource crunch 116 | ##### kubernetes will run the high priority class application and make the earlier one in pending 117 | # kubectl get pods 118 | 119 | 120 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/private-image-registry: -------------------------------------------------------------------------------- 1 | Step #1 - Private Image Registry - Kubernetes 2 | 3 | 1. Get one machine with ubuntu server (ip address=10.0.0.88) and install docker 4 | - also put entry in /etc/hosts - 10.0.0.88 imageserver.networknuts.net 5 | in ALL worker nodes 6 | 7 | 2. Run registry services as a container 8 | docker run -d -p 5000:5000 --restart always --name registry registry:2 9 | 10 | 3. Push one image in the the registry. 11 | a. first download a image 12 | docker pull lovelearnlinux/webserver:v1 13 | b. change the tag 14 | docker image tag lovelearnlinux/webserver:v1 imageserver.networknuts.net:5000/webserver:v1 15 | c. push the image 16 | docker push imageserver.networknuts.net:5000/webserver:v1 17 | 18 | 4. Check the images in private registry 19 | - curl -X GET http://10.0.0.88:5000/v2/_catalog 20 | - curl -X GET http://10.0.0.88:5000/v2/webserver/tags/list 21 | you should be able to see images and their tags 22 | 23 | Step #2 - Check from kubernetes nodes, if they can see images inside the private registry 24 | 25 | 1. Check if images are visible 26 | - curl -X GET http://imageserver.networknuts.net:5000/v2/_catalog 27 | - curl -X GET http://imageserver.networknuts.net:5000/v2/webserver/tags/list 28 | 29 | this should work 30 | 31 | 2. Configure container engine to refer image registry on 10.0.0.88:5000 (imageserver.networknuts.net:5000) 32 | 33 | a. Append these lines in /etc/containers/registries.conf. As we are using insecure registry 34 | [[registry]] 35 | insecure = true 36 | location = "imageserver.networknuts.net" 37 | 38 | b. Modify /etc/crio/crio.conf, to look like this 39 | insecure_registries = ["imageserver.networknuts.net"] 40 | 41 | registries = [ 42 | "imageserver.networknuts.net:5000" 43 | # "docker.io" 44 | ] 45 | 46 | systemctl daemon-reload 47 | systemctl restart crio 48 | 49 | 3. Test from kubernetes node by pulling image from private registry 50 | - crictl pull imageserver.networknuts.net:5000/webserver:v1 51 | 52 | this should pull the image on kubernetes node from private registry hosted on 53 | imageserver.networknust.net:5000 54 | 55 | - crictl images 56 | 57 | 58 | ===== 59 | In case you want to store your registry contents at a specific location on your host filesystem, such as if you have an SSD or SAN mounted into a particular directory. 60 | 61 | $ docker run -d -p 5000:5000 --restart=always --name registry -v /mnt/some-directory:/var/lib/registry registry:2 62 | 63 | ===== 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/remove-stuck-namespaces: -------------------------------------------------------------------------------- 1 | ## This is because of finalizers. 2 | 3 | NS=`kubectl get ns |grep Terminating | awk 'NR==1 {print $1}'` \ 4 | && kubectl get namespace "$NS" -o json \ 5 | | tr -d "\n" | sed "s/\"finalizers\": \[[^]]\+\]/\"finalizers\": []/" \ 6 | | kubectl replace --raw /api/v1/namespaces/$NS/finalize -f - 7 | 8 | kubectl get namespace 9 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/restore-etcd-using-etcdctl: -------------------------------------------------------------------------------- 1 | ETCDCTL_API=3 etcdctl --data-dir="/var/lib/etcd-backup" \ 2 | --endpoints=https://127.0.0.1:2379 \ 3 | --cacert=/etc/kubernetes/pki/etcd/ca.crt \ 4 | --cert=/etc/kubernetes/pki/etcd/server.crt \ 5 | --key=/etc/kubernetes/pki/etcd/server.key \ 6 | snapshot restore restore-filename.db 7 | 8 | 9 | Change these in - vi /etc/kubernetes/manifests/etcd.yaml 10 | 11 | spec: 12 | containers: 13 | - command: 14 | - --data-dir=/var/lib/ 15 | 16 | volumeMounts: 17 | - mountPath: 18 | name: etcd-data 19 | 20 | volumes: 21 | - hostPath: 22 | path: 23 | type: DirectoryOrCreate 24 | name: etcd-data 25 | 26 | 27 | === save and exit 28 | 29 | Then execute 30 | 31 | kubectl -n kube-system delete pod 32 | OR 33 | systemctl restart kubelet.service 34 | (or both) 35 | 36 | wait for sometimes and then give 37 | 38 | kubectl get nodes 39 | kubectl get pods 40 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/script-with-config-map: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mytestscript 5 | data: 6 | test.sh: | 7 | echo "testing script" 8 | df -h 9 | --- 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | metadata: 13 | name: nginx-deployment 14 | labels: 15 | app: nginx 16 | spec: 17 | replicas: 1 18 | selector: 19 | matchLabels: 20 | app: nginx 21 | template: 22 | metadata: 23 | labels: 24 | app: nginx 25 | spec: 26 | volumes: 27 | - name: testing 28 | configMap: 29 | name: mytestscript 30 | defaultMode: 0777 31 | containers: 32 | - name: nginx 33 | image: lovelearnlinux/webserver:v1 34 | command: ["/bin/bash", "./tmp/test.sh"] 35 | ports: 36 | - containerPort: 80 37 | volumeMounts: 38 | - mountPath: /tmp 39 | name: testing 40 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/statefulsets-app.txt: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: postgres 6 | labels: 7 | app: postgres 8 | spec: 9 | ports: 10 | - name: postgres 11 | port: 5432 12 | clusterIP: None 13 | selector: 14 | app: postgres 15 | 16 | --- 17 | apiVersion: apps/v1 18 | kind: StatefulSet 19 | metadata: 20 | name: postgres 21 | spec: 22 | selector: 23 | matchLabels: 24 | app: postgres 25 | serviceName: postgres 26 | replicas: 3 27 | template: 28 | metadata: 29 | labels: 30 | app: postgres 31 | spec: 32 | initContainers: 33 | - name: postgres-init 34 | image: postgres:latest 35 | command: 36 | - bash 37 | - "-c" 38 | - | 39 | set -ex 40 | [[ `hostname` =~ -([0-9]+)$ ]] || exit 1 41 | ordinal=${BASH_REMATCH[1]} 42 | if [[ $ordinal -eq 0 ]]; then 43 | printf "I am the primary" 44 | else 45 | printf "I am a read-only replica" 46 | fi 47 | containers: 48 | - name: postgres 49 | image: postgres:latest 50 | env: 51 | - name: POSTGRES_USER 52 | value: postgres 53 | - name: POSTGRES_PASSWORD 54 | value: postgres 55 | - name: POD_IP 56 | valueFrom: 57 | fieldRef: 58 | apiVersion: v1 59 | fieldPath: status.podIP 60 | ports: 61 | - name: postgres 62 | containerPort: 5432 63 | livenessProbe: 64 | exec: 65 | command: 66 | - "sh" 67 | - "-c" 68 | - "pg_isready --host $POD_IP" 69 | initialDelaySeconds: 30 70 | periodSeconds: 5 71 | timeoutSeconds: 5 72 | readinessProbe: 73 | exec: 74 | command: 75 | - "sh" 76 | - "-c" 77 | - "pg_isready --host $POD_IP" 78 | initialDelaySeconds: 5 79 | periodSeconds: 5 80 | timeoutSeconds: 1 81 | volumeMounts: 82 | - name: data 83 | mountPath: /var/lib/postgresql/data 84 | volumeClaimTemplates: 85 | - metadata: 86 | name: data 87 | spec: 88 | accessModes: ["ReadWriteOnce"] 89 | resources: 90 | requests: 91 | storage: 1Gi 92 | 93 | --- 94 | # kubectl logs postgres-0 -c postgres-init 95 | # kubectl logs postgres-1 -c postgres-init 96 | # kubectl scale sts postgres --replicas 5 97 | # kubectl scale sts postgres --replicas 2 98 | # kubectl get pv 99 | # kubectl get pvc 100 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/topology-spread.yaml: -------------------------------------------------------------------------------- 1 | ## 2 | #YOU SHOULD have min 3 node cluster for this 3 | ## 4 | #This configuration will make sure that one pod is 5 | #deployed on each node, maintaining a balance and HA 6 | #of application 7 | # 8 | apiVersion: v1 9 | kind: Namespace 10 | metadata: 11 | name: spread 12 | --- 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | metadata: 16 | name: spread 17 | namespace: spread 18 | spec: 19 | replicas: 3 20 | selector: 21 | matchLabels: 22 | type: spread 23 | template: 24 | metadata: 25 | labels: 26 | type: spread 27 | spec: 28 | topologySpreadConstraints: 29 | - maxSkew: 1 30 | topologyKey: kubernetes.io/hostname 31 | whenUnsatisfiable: ScheduleAnyway 32 | labelSelector: 33 | matchLabels: 34 | type: spread 35 | containers: 36 | - name: pause 37 | image: lovelearnlinux/webserver:v1 38 | 39 | # NOW try scaling the deployment to 6 replicas 40 | # and notice how pods are deployed on nodes 41 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/using visual studio code: -------------------------------------------------------------------------------- 1 | Using Workstation to access kubernetes cluster like in production 2 | 3 | Step #1 - Install openssh server and git 4 | # sudo apt install openssh-server git 5 | # sudo systemctl start sshd 6 | # sudo systemctl status sshd 7 | 8 | Step #2 - Install kubectl on workstation 9 | # sudo snap install kubectl --classic 10 | 11 | Step #3 - Copy ~/.kube/config file from manager to workstation 12 | 13 | Step #4 - Install visual studio code on workstation 14 | 15 | Go to - https://code.visualstudio.com/ 16 | 17 | Step #5 - Add kubernetes extenstion in visual studio code 18 | 19 | Step #6 - Click on "kubernetes" icon on left hand side panel. You should be able to explore your cluster 20 | 21 | Step #7 - Clone networknuts sample repo to your workstation machine 22 | # git clone https://github.com/networknuts/kubernetes 23 | 24 | Step #8 - Click on "Explorer" icon on top left hand side panel and browse to your "kubernetes" folder 25 | 26 | Step #9 - Go inside chapter1 and select myfirstpod.yml from left hand side panel, you should see yaml in right hand side panel 27 | 28 | Step #10 - Use "ctrl + shift + p" to enter command panel and write - kubernetes - select "create" 29 | 30 | Step #11 - Click on "kubernetes" icon on left hand side and go inside workloads->pods. You should be able to see pod 31 | 32 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/using-environment-variables: -------------------------------------------------------------------------------- 1 | ###USING ENVIRONMENT VARIABLES IN KUBERNETES YAML 2 | 3 | Step #1 - Create a environment variable 4 | 5 | export NGINX_REPLICAS=3 6 | 7 | Step #2 - Refer it in yaml - mydeployment.yaml 8 | 9 | --- 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | metadata: 13 | name: nginx-deployment 14 | labels: 15 | app: nginx 16 | spec: 17 | replicas: ${NGINX_REPLICAS} 18 | selector: 19 | matchLabels: 20 | app: nginx 21 | minReadySeconds: 10 22 | strategy: 23 | type: RollingUpdate 24 | rollingUpdate: 25 | maxUnavailable: 1 26 | maxSurge: 1 27 | template: 28 | metadata: 29 | labels: 30 | app: nginx 31 | spec: 32 | containers: 33 | - name: nginx 34 | image: lovelearnlinux/webserver:v1 35 | ports: 36 | - containerPort: 80 37 | resources: 38 | limits: 39 | cpu: 200m 40 | memory: 200Mi 41 | requests: 42 | cpu: 100m 43 | memory: 100Mi 44 | --- 45 | apiVersion: v1 46 | kind: Service 47 | metadata: 48 | name: nginx-dep-svc 49 | spec: 50 | selector: 51 | app: nginx 52 | ports: 53 | - protocol: TCP 54 | port: 80 55 | targetPort: 80 56 | 57 | Step #3 - Use "envsubst" utility to apply 58 | 59 | envsubst < mydeployment.yaml | kubectl apply -f - 60 | 61 | -------------------------------------------------------------------------------- /ch-10-high-value-extra/validate-kubernetes-yaml: -------------------------------------------------------------------------------- 1 | === VALIDATING YAML 2 | 3 | 1. Kubeval 4 | 5 | option: a - Install locally 6 | 7 | vim kubeval-install.sh 8 | 9 | 10 | #!/bin/bash 11 | wget https://github.com/instrumenta/kubeval/releases/latest/download/kubeval-linux-amd64.tar.gz 12 | tar xf kubeval-linux-amd64.tar.gz 13 | sudo cp kubeval /usr/local/bin 14 | 15 | ### save and exit 16 | 17 | Usage - 18 | 19 | # kubeval your-kubernetes-configuration.yaml 20 | 21 | option: b - Check online 22 | 23 | https://validkube.com/ 24 | 25 | 26 | 2. Kube-Score 27 | 28 | option: a - Check online 29 | 30 | https://kube-score.com/ 31 | 32 | option: b - Install locally 33 | 34 | https://github.com/zegl/kube-score/releases 35 | -------------------------------------------------------------------------------- /ch-11-challenges-one/readme.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # 🏆 Certified Kubernetes Administrator (CKA) 2025 - Challenge Tasks 4 | 5 | Welcome to the CKA Challenge! Below are 17 practical tasks designed to test your skills and readiness for the CKA exam. Each task reflects real-world Kubernetes administration scenarios. Let's dive in! 6 | 7 | --- 8 | 9 | ## 📘 Task 1: RBAC - Extracting Information 10 | 11 | - Extract all kubeconfig context names to `/tmp/contexts`, one per line. 12 | - Write the current context name to `/tmp/current-context`. 13 | - Base64-decode the client certificate of user `account-0027` and save it to `/tmp/cert`. 14 | 15 | --- 16 | 17 | ## 📦 Task 2: Helm - Installing Application 18 | 19 | 1. Create the `minio` namespace. 20 | 2. Install the `minio/operator` Helm chart into the `minio` namespace as release `minio-operator`. 21 | 3. Edit `/opt/course/2/minio-tenant.yaml` to enable SFTP: 22 | ```yaml 23 | features: 24 | enableSFTP: true 25 | ``` 26 | 4. Apply the updated Tenant resource. 27 | 28 | --- 29 | 30 | ## 📊 Task 3: Pod Management 31 | 32 | - In the `project-h800` namespace, scale the two `o3db-*` Pods down to **one replica** to conserve resources. 33 | 34 | --- 35 | 36 | ## ⚖️ Task 4: Pod Management & QoS 37 | 38 | - Identify the Pods in `project-c13` that are likely to be **terminated first** under resource pressure. 39 | - Write their names to `/tmp/pods-terminated-first.txt`. 40 | 41 | --- 42 | 43 | ## 🔄 Task 5: HPA Setup 44 | 45 | Replace the existing autoscaler with a HorizontalPodAutoscaler for the `api-gateway`: 46 | 47 | 1. Remove the `horizontal-scaling-config` ConfigMap. 48 | 2. Create an HPA: 49 | - Name: `api-gateway` 50 | - Min replicas: 2 51 | - Max replicas: 4 (6 in prod) 52 | - Target CPU utilization: 50% 53 | 3. Apply changes using: 54 | ```bash 55 | kubectl kustomize /opt/course/5/api-gateway/staging | kubectl apply -f - 56 | kubectl kustomize /opt/course/5/api-gateway/prod | kubectl apply -f - 57 | ``` 58 | 59 | --- 60 | 61 | ## 💾 Task 6: Persistent Volumes 62 | 63 | - **PersistentVolume** 64 | - Name: `safari-pv` 65 | - Capacity: 2Gi 66 | - AccessMode: `ReadWriteOnce` 67 | - Path: `/Volumes/Data` 68 | - **PersistentVolumeClaim** in `project-t230` 69 | - Name: `safari-pvc` 70 | - Storage: 2Gi 71 | - **Deployment** 72 | - Name: `safari` 73 | - Mount: `/tmp/safari-data` 74 | - Image: `httpd:2-alpine` 75 | 76 | --- 77 | 78 | ## 📈 Task 7: Monitoring Resource Consumption 79 | 80 | - Write two bash scripts using `kubectl` to gather resource metrics (details assumed to be provided separately). 81 | 82 | --- 83 | 84 | ## ⚙️ Task 8: Upgrade & Join Worker 85 | 86 | - Upgrade `cka3962-node1` to match the control plane Kubernetes version. 87 | - Join it to the cluster using `kubeadm`. 88 | 89 | --- 90 | 91 | ## 🔐 Task 9: Service Account 92 | 93 | - Create a Pod `api-contact` in `project-swan` using the `secret-reader` ServiceAccount. 94 | - Use `curl` inside the Pod to query all Secrets from the Kubernetes API. 95 | - Save the result to `/opt/course/9/result.json`. 96 | 97 | --- 98 | 99 | ## 🔑 Task 10: RBAC 100 | 101 | In `project-hamster`: 102 | 103 | - Create: 104 | - ServiceAccount: `processor` 105 | - Role: `processor` 106 | - RoleBinding: `processor` 107 | - Grant permissions to **create** only: 108 | - Secrets 109 | - ConfigMaps 110 | 111 | --- 112 | 113 | ## 🚫 Task 11: Taints and Tolerations 114 | 115 | In `project-tiger`: 116 | 117 | - Create a DaemonSet `ds-important`: 118 | - Image: `httpd:2-alpine` 119 | - Labels: `id=ds-important`, `uuid=18426a0b-5f59-4e10-923f-c0e078e82462` 120 | - Resource requests: 10m CPU, 10Mi Memory 121 | - Must run on all nodes (including control planes) 122 | 123 | --- 124 | 125 | ## 🚀 Task 12: Deployment 126 | 127 | In `project-tiger`: 128 | 129 | - Deployment: `deploy-important` 130 | - Replicas: 3 131 | - Labels: `id=very-important` 132 | - Two containers: 133 | - `container1`: `nginx:1-alpine` 134 | - `container2`: `google/pause` 135 | - Use `topologyKey: kubernetes.io/hostname` to ensure 1 Pod per node 136 | 137 | --- 138 | 139 | ## 🌐 Task 13: Gateway API 140 | 141 | In `project-r500`: 142 | 143 | 1. Migrate from Ingress (`/opt/course/13/ingress.yaml`) to Gateway API. 144 | 2. Create HTTPRoute: `traffic-director` 145 | - Replicate routes 146 | - Add `/auto` path logic: 147 | - Redirect to `/mobile` if `User-Agent: mobile` 148 | - Else redirect to `/desktop` 149 | 3. Ensure it works with: 150 | ```bash 151 | curl r500.gateway:30080/desktop 152 | curl r500.gateway:30080/mobile 153 | curl r500.gateway:30080/auto -H "User-Agent: mobile" 154 | curl r500.gateway:30080/auto 155 | ``` 156 | 157 | --- 158 | 159 | ## 📜 Task 14: Cluster Certificates 160 | 161 | 1. Check kube-apiserver certificate expiration with `openssl` or `cfssl`. 162 | 2. Save the date to `/opt/course/14/expiration`. 163 | 3. Use `kubeadm` to confirm the expiration. 164 | 4. Save the `kubeadm` renew command to `/opt/course/14/kubeadm-renew-certs.sh`. 165 | 166 | --- 167 | 168 | ## 🛡️ Task 15: Network Policy 169 | 170 | In `project-snake`, create `np-backend`: 171 | 172 | - Allow `backend-*` Pods to: 173 | - Connect to `db1-*` on port **1111** 174 | - Connect to `db2-*` on port **2222** 175 | 176 | --- 177 | 178 | ## 🧭 Task 16: CoreDNS Custom Domain 179 | 180 | 1. Backup current CoreDNS config to `/tmp/coredns_backup.yaml`. 181 | 2. Update CoreDNS to support: 182 | ``` 183 | SERVICE.NAMESPACE.custom-domain 184 | ``` 185 | in addition to `SERVICE.NAMESPACE.cluster.local`. 186 | 187 | - Test using: 188 | ```bash 189 | nslookup kubernetes.default.svc.cluster.local 190 | nslookup kubernetes.default.svc.custom-domain 191 | ``` 192 | 193 | --- 194 | 195 | ## 🐅 Task 17: Container Debugging 196 | 197 | In `project-tiger`: 198 | 199 | 1. Create Pod `tigers-reunite` with labels `pod=container`, `container=pod`, image `httpd:2-alpine`. 200 | 2. SSH into the node where it’s scheduled. 201 | 3. Use `crictl` to: 202 | - Save container ID and `info.runtimeType` to `/tmp/pod-container.txt` 203 | - Save container logs to `/tmp/pod-container.log` 204 | 205 | --- 206 | 207 | 208 | -------------------------------------------------------------------------------- /ch-12-challenges-two/readme.md: -------------------------------------------------------------------------------- 1 | # Certified Kubernetes Administrator (CKA) Challenges 2 | 3 | A curated list of hands-on Kubernetes tasks to test your skills and prepare for the CKA exam. 4 | 5 | --- 6 | 7 | ## 📅 Task #1 - Scheduling 8 | 9 | - Deploy a pod named `heavy-pod` with **Guaranteed QoS**: 10 | - CPU: `50m` 11 | - Memory: `50Mi` 12 | - Ensure the pod is in the **Running** state. 13 | - Edit the pod's resources to: 14 | - CPU: `100m` 15 | - Memory: `100Mi` 16 | 17 | --- 18 | 19 | ## 🚀 Task #2 - Workload & Scheduling 20 | 21 | - The deployment `video-app` has undergone several **rolling updates and rollbacks**. 22 | - Your task: 23 | - Find the total number of **revisions**. 24 | - Extract the **image name used in the 3rd revision**. 25 | - Record it in a file `app-file.txt` using the format: 26 | ``` 27 | REVISION_TOTAL_COUNT,IMAGE_NAME 28 | ``` 29 | 30 | --- 31 | 32 | ## 🌐 Task #3 - Pod & Service Web App 33 | 34 | - **Pod `app-pod`**: 35 | - Container name: `app-container` 36 | - Image: `httpd:latest` 37 | - Port: `80` 38 | - Label: `app=app-lab` 39 | 40 | - **Service `app-svc`**: 41 | - Type: `ClusterIP` 42 | - Port: `80` 43 | - Selects the `app-pod` 44 | 45 | - **Access**: 46 | - Use `kubectl port-forward` to map local port to the Pod. 47 | - Use `curl` to test the app returns: `It works!` 48 | 49 | --- 50 | 51 | ## 🔐 Task #4 - ConfigMap 52 | 53 | - Create a `ConfigMap` named `creds` with the content: 54 | ```bash 55 | username=batman 56 | ``` 57 | 58 | --- 59 | 60 | ## 🔧 Task #5 - ConfigMap & Deployment 61 | 62 | - A deployment `webapp-deployment` needs dynamic env var control. 63 | - Steps: 64 | - Create a `ConfigMap` named `webapp-deployment-config-map` with: 65 | ```bash 66 | APPLICATION=web-app 67 | ``` 68 | - Update the deployment to use this ConfigMap as an environment variable. 69 | 70 | --- 71 | 72 | ## 🧪 Task #6 - Namespaces, Deployment, Service, HPA 73 | 74 | - **Namespace**: `test` 75 | - **Deployment**: `testing-app` (in `test` namespace) 76 | - Replicas: `3` 77 | - Labels: `app=testing-app` 78 | - Node selector: `cpu=i5` 79 | - QoS: `Burstable` 80 | - Image: `nginx:latest` 81 | - Port: `80/tcp` 82 | 83 | - **Service**: `testing-app-svc` 84 | - Type: `ClusterIP` 85 | - Exposes deployment on port `80` 86 | 87 | - **Horizontal Pod Autoscaler (HPA)**: 88 | - Name: `testing-app-hpa` 89 | - Min pods: `3` 90 | - Max pods: `10` 91 | - CPU utilization target: `20%` 92 | - Scale-down stabilization: `30s`, max 1 pod per 20s 93 | - Scale-up stabilization: `20s`, max 1 pod per 30s 94 | 95 | --- 96 | 97 | ## 🥇 Task #7 - Priority Classes 98 | 99 | - Create a `PriorityClass` named `high-value` with value `2` less than max. 100 | - Create a deployment `high-value-application`: 101 | - Image: `lovelearnlinux/webserver:v1` 102 | - Replicas: `3` 103 | - Label: `app=webapp` 104 | - Uses the `high-value` priority class 105 | 106 | --- 107 | 108 | ## 🌍 Task #8 - ConfigMap with HTML Template 109 | 110 | - Namespace: `prod` 111 | - ConfigMap: `welcome` with: 112 | ```html 113 | 114 | sample application using configmap 115 |

welcome to our test environment

116 | 117 | ``` 118 | 119 | - Deployment: `welcome-app` 120 | - Image: `lovelearnlinux/webserver:v1` 121 | - Label: `app=welcome` 122 | - QoS: `Burstable` 123 | - Mounts ConfigMap to `/var/www/html` 124 | 125 | - Service: `welcome-app-svc`, ClusterIP on port `80` 126 | 127 | - Update ConfigMap: 128 | Replace: 129 | ```html 130 |

welcome to our test environment

131 | ``` 132 | with: 133 | ```html 134 |

welcome to our best environment

135 | ``` 136 | 137 | - Confirm the changes are applied. 138 | 139 | --- 140 | 141 | ## 🤝 Task #9 - Affinity & Anti-Affinity 142 | 143 | 1. Deploy pod `webapp`: 144 | - Image: `nginx:latest` 145 | - Label: `name=webapp` 146 | 147 | 2. Deploy pod `dbapp` **on the same node** as `webapp`: 148 | - Image: `redis` 149 | - Label: `name=dbapp` 150 | - Use `topologyKey: kubernetes.io/hostname` 151 | 152 | 3. Delete `dbapp` and redeploy it **on a different node** than `webapp`. 153 | 154 | --- 155 | 156 | ## 🔐 Task #10 - RBAC & Service Account 157 | 158 | 1. Namespace: `nsone` 159 | 2. Create a Service Account: `nsone-sa` 160 | 3. Grant access to: 161 | - Create, get, list: **pods** and **configmaps** 162 | - Scope: `nsone` namespace only 163 | 4. Create a pod using `nsone-sa` 164 | 5. Verify permissions using: 165 | ```bash 166 | kubectl auth can-i 167 | ``` 168 | 169 | -------------------------------------------------------------------------------- /cluster-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #this will create kubernetes cluster 3 | #and install all packages required on 4 | #all the nodes. 5 | 6 | #run this on all machines 7 | 8 | echo "### Enabling Bridging" 9 | sleep 2 10 | cat < /dev/null 26 | 27 | #apt-get update 28 | 29 | #apt-get install -y docker.io 30 | 31 | echo '\033[1mInstalling Kubeadm, Kubelet and Kubectl\033[0m' 32 | apt-get update 33 | 34 | apt-get install -y apt-transport-https ca-certificates curl 35 | 36 | sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg 37 | 38 | echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list 39 | 40 | apt-get update 41 | 42 | apt-get install -y kubelet=1.20.10-00 kubeadm=1.20.10-00 kubectl=1.20.10-00 docker.io 43 | 44 | apt-mark hold kubelet kubeadm kubectl 45 | 46 | ## Installing openssh-server 47 | echo 48 | echo '\033[7mInstalling Openssh-server & starting sshd\033[0m' 49 | sudo apt install -y openssh-server 50 | systemctl start sshd 51 | echo 52 | 53 | echo '\033[7mInitializing the cluster\033[0m' 54 | 55 | ## If you are going to use a different plugin you'll want 56 | ## to use a different IP address, found in that plugins 57 | ## readme file. 58 | 59 | sleep 3 60 | 61 | sudo kubeadm init --pod-network-cidr 192.168.0.0/16 --apiserver-advertise-address 10.0.0.100 62 | 63 | sleep 5 64 | 65 | echo '\033[7mRunning the steps explained at the end of script for you\033[0m' 66 | 67 | mkdir -p $HOME/.kube 68 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 69 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 70 | 71 | echo '\033[7mInstalling Calico Network Plugin\033[0m' 72 | 73 | kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml 74 | echo 75 | echo 76 | echo '\033[5mWait for 40 seconds\033[0m' 77 | sleep 40 78 | echo '\033[7mYou should see this node in the output below\033[0m' 79 | echo '\033[7mIt can take some time to show READY\033[0m' 80 | echo 81 | kubectl get node 82 | echo 83 | echo 84 | echo '\033[5mScript finished, move to next step\033[0m' 85 | echo '\033[5mDont forget to comment swap line from /etc/fstab\033[0m' 86 | -------------------------------------------------------------------------------- /k8s-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #/* **************** Network Nuts k8snode.sh **************** */ 3 | #!/bin/bash -x 4 | ## v1.19.0 CKA 5 | echo '\033[1mThis Script will work on Ubuntu 18.04\033[0m' 6 | 7 | echo '\033[1mAdding Bridge\033[0m' 8 | cat < /dev/null 26 | 27 | #sudo apt-get update 28 | 29 | echo '\033[1mInstalling Kubeadm, Kubelet and Kubectl\033[0m' 30 | apt-get update 31 | 32 | sudo apt-get install -y apt-transport-https ca-certificates curl 33 | 34 | sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg 35 | 36 | sudo echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list 37 | 38 | sudo apt-get update 39 | 40 | sudo apt-get install -y kubelet=1.20.10-00 kubeadm=1.20.10-00 kubectl=1.20.10-00 docker.io 41 | 42 | sudo apt-mark hold kubelet kubeadm kubectl 43 | 44 | ## Installing openssh-server 45 | echo 46 | echo '\033[7mInstalling Openssh-server & starting sshd\033[0m' 47 | sudo apt install -y openssh-server 48 | sudo systemctl start sshd 49 | echo 50 | 51 | echo 52 | echo '\033[7mScript finished. You need to join node to cluster\033[0m' 53 | echo '\033[7mRun on master node to get join token - \033[0m' 54 | echo 55 | echo '\033[1mkubeadm token create --print-join-command\033[0m' 56 | echo 57 | echo 58 | echo '\033[5mkubeadm token create --print-join-command\033[0m' 59 | echo 60 | echo 61 | -------------------------------------------------------------------------------- /scripts/cluster-uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kubeadm reset 3 | sudo apt-get purge kubeadm kubectl kubelet kubernetes-cni kube* 4 | sudo apt-get autoremove 5 | sudo rm -rf ~/.kube 6 | -------------------------------------------------------------------------------- /scripts/common-1.28.0-with-proxy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Common setup for ALL SERVERS (Control Plane and Nodes) 4 | # will run on Ubuntu Server 22.04 LTS 5 | # If you see some warning at certificates, RUN AGAIN ## 6 | # OUR PROXY is - 10.0.0.200 7 | 8 | #set -euxo pipefail 9 | apt-get update -y 10 | # Variable Declaration 11 | 12 | KUBERNETES_VERSION="1.28.0-00" 13 | 14 | # disable swap 15 | echo "" 16 | echo "\033[4mDisabling Swap Memory.\033[0m" 17 | echo "" 18 | sudo swapoff -a 19 | sed -e '/swap/s/^/#/g' -i /etc/fstab 20 | 21 | # Install CRI-O Runtime 22 | echo "" 23 | echo "\033[4mInstalling CRI-O runtime.\033[0m" 24 | echo "" 25 | OS="xUbuntu_22.04" 26 | VERSION="1.23" 27 | 28 | # Create the .conf file to load the modules at bootup 29 | cat < /etc/default/kubelet << EOF 132 | #KUBELET_EXTRA_ARGS=--node-ip=$local_ip 133 | #EOF 134 | 135 | echo "====" 136 | echo "Generate token on manager using" 137 | echo "===" 138 | echo "kubeadm token create --print-join-command" 139 | echo "" 140 | echo "" 141 | #echo "REBOOTING in 10 seconds" 142 | #sleep 10 143 | #reboot 144 | -------------------------------------------------------------------------------- /scripts/common-1.28.0-without-proxy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Common setup for ALL SERVERS (Control Plane and Nodes) 4 | # will run on Ubuntu Server 22.04 LTS 5 | # If you see some warning at certificates, RUN AGAIN 6 | 7 | #set -euxo pipefail 8 | apt-get update -y 9 | # Variable Declaration 10 | 11 | KUBERNETES_VERSION="1.29.0-1.1" 12 | 13 | # disable swap 14 | echo "" 15 | echo "\033[4mDisabling Swap Memory.\033[0m" 16 | echo "" 17 | sudo swapoff -a 18 | sed -e '/swap/s/^/#/g' -i /etc/fstab 19 | 20 | # Install CRI-O Runtime 21 | echo "" 22 | echo "\033[4mInstalling CRI-O runtime.\033[0m" 23 | echo "" 24 | OS="xUbuntu_22.04" 25 | 26 | VERSION="1.23" 27 | 28 | # Create the .conf file to load the modules at bootup 29 | cat < /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list 55 | echo "deb [signed-by=/usr/share/keyrings/libcontainers-crio-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list 56 | mkdir -p /usr/share/keyrings 57 | curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | gpg --dearmor -o /usr/share/keyrings/libcontainers-archive-keyring.gpg 58 | curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/Release.key | gpg --dearmor -o /usr/share/keyrings/libcontainers-crio-archive-keyring.gpg 59 | sudo apt-get update -y 60 | sudo apt-get install cri-o cri-o-runc cri-tools -y 61 | sudo systemctl daemon-reload 62 | sudo systemctl enable crio --now 63 | 64 | echo "" 65 | echo "\033[4mConfiguring CRI-O to use dockerhub.\033[0m" 66 | echo "" 67 | cat < /etc/default/kubelet 119 | #echo "KUBELET_EXTRA_ARGS='--node-ip=$ip_address'" > /etc/default/kubelet 120 | #systemctl daemon-reload && systemctl restart kubelet 121 | #echo "" 122 | #echo "====" 123 | #echo "" 124 | #echo "/etc/default/kubelet updated for internal IP address" 125 | #echo "" 126 | #echo "====" 127 | 128 | local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')" 129 | cat > /etc/default/kubelet << EOF 130 | KUBELET_EXTRA_ARGS=--node-ip=$local_ip 131 | EOF 132 | 133 | echo "====" 134 | echo "Generate token on manager using" 135 | echo "===" 136 | echo "kubeadm token create --print-join-command" 137 | echo "" 138 | echo "" 139 | echo "REBOOTING in 10 seconds" 140 | sleep 10 141 | -------------------------------------------------------------------------------- /scripts/common-1.28.9-without-proxy-may-2024: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Common setup for ALL SERVERS (Control Plane and Nodes) 4 | # will run on Ubuntu Server 22.04 LTS 5 | # If you see some warning at certificates, RUN AGAIN 6 | 7 | #set -euxo pipefail 8 | apt-get update -y 9 | # Variable Declaration 10 | 11 | KUBERNETES_VERSION="1.29.0-1.1" 12 | 13 | # disable swap 14 | echo "" 15 | echo "\033[4mDisabling Swap Memory.\033[0m" 16 | echo "" 17 | sudo swapoff -a 18 | sed -e '/swap/s/^/#/g' -i /etc/fstab 19 | 20 | # Install CRI-O Runtime 21 | echo "" 22 | echo "\033[4mInstalling CRI-O | Kubelet | Kubeadm | Kubectl.\033[0m" 23 | echo "" 24 | OS="xUbuntu_22.04" 25 | 26 | VERSION="1.23" 27 | 28 | # Create the .conf file to load the modules at bootup 29 | cat < /dev/null 66 | sudo apt-get update 67 | 68 | #sudo apt-get install containerd.io 69 | #sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 70 | sudo apt-get install containerd.io 71 | containerd config default > /etc/containerd/config.toml 72 | sed -i "/SystemdCgroup = false/c\ SystemdCgroup = true" /etc/containerd/config.toml 73 | sudo systemctl restart containerd 74 | 75 | 76 | #### NEW location for keyrings 77 | echo 78 | echo 79 | echo "****************************************" 80 | echo "INSTALLING CRI-O KUBEADM KUBECTL KUBELET" 81 | echo "****************************************" 82 | echo 83 | echo 84 | apt-get update -y 85 | apt-get install -y software-properties-common curl 86 | curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | 87 | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg 88 | echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /" | 89 | tee /etc/apt/sources.list.d/kubernetes.list 90 | 91 | #curl -fsSL https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/deb/Release.key | 92 | # gpg --dearmor -o /etc/apt/keyrings/cri-o-apt-keyring.gpg 93 | #echo "deb [signed-by=/etc/apt/keyrings/cri-o-apt-keyring.gpg] https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/deb/ /" | 94 | # tee /etc/apt/sources.list.d/cri-o.list 95 | 96 | apt-get update -y 97 | apt-get install -y kubelet kubeadm kubectl 98 | 99 | 100 | 101 | 102 | echo "" 103 | echo "\033[4mConfiguring CRI-O to use dockerhub.\033[0m" 104 | echo "" 105 | #cat < /dev/null 13 | sudo apt-get update -y 14 | sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y 15 | sudo [ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.22.0/kind-linux-amd64 16 | chmod +x ./kind 17 | sudo mv ./kind /usr/local/bin/kind 18 | sudo systemctl start docker 19 | 20 | -------------------------------------------------------------------------------- /scripts/flannel-install-steps: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #FOR FLANNEL WITH HELM on KUBERNETES 3 | echo 4 | echo "**********" 5 | echo "---- Installing Flannel using Helm ----" 6 | echo " --- no need to install calico now ---" 7 | echo "**********" 8 | 9 | #step 1 - install helm 10 | 11 | curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null 12 | sudo apt-get install apt-transport-https --yes 13 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 14 | sudo apt-get update 15 | sudo apt-get install helm 16 | 17 | #step 2 - install flannel using helm 18 | 19 | kubectl create ns kube-flannel 20 | kubectl label --overwrite ns kube-flannel pod-security.kubernetes.io/enforce=privileged 21 | 22 | helm repo add flannel https://flannel-io.github.io/flannel/ 23 | #change podCidr to your IP range, if you wish 24 | helm install flannel --set podCidr="172.16.0.0/16" --namespace kube-flannel flannel/flannel 25 | -------------------------------------------------------------------------------- /scripts/k8s-manager-1-23-7-aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "disabling swap" 3 | swapoff -a 4 | echo "installing kubernetes version 1.23.7-00" 5 | sudo apt-get update 6 | sudo apt-get install -y apt-transport-https ca-certificates curl 7 | sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg 8 | echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list 9 | apt-get update 10 | apt-get install -y kubelet=1.23.7-00 kubeadm=1.23.7-00 kubectl=1.23.7-00 docker.io 11 | apt-mark hold kubelet kubeadm kubectl 12 | cat </dev/null 2>&1 31 | 32 | sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/g' /etc/containerd/config.toml 33 | 34 | systemctl restart containerd 35 | 36 | systemctl enable containerd 37 | 38 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 39 | 40 | apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main" 41 | 42 | 43 | apt update 44 | 45 | apt install -y kubelet=1.26.0-00 kubeadm=1.26.0-00 kubectl=1.26.0-00 46 | 47 | apt-mark hold kubelet kubeadm kubectl 48 | 49 | kubeadm init --apiserver-advertise-address 10.0.0.100 50 | 51 | mkdir -p $HOME/.kube 52 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 53 | chown $(id -u):$(id -g) $HOME/.kube/config 54 | 55 | kubectl cluster-info 56 | kubectl get nodes 57 | curl https://projectcalico.docs.tigera.io/manifests/calico.yaml -O 58 | kubectl apply -f calico.yaml 59 | -------------------------------------------------------------------------------- /scripts/k8s-node-1-23-7-aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "disabling swap" 3 | swapoff -a 4 | echo "installing kubernetes version 1.23.7-00" 5 | sudo apt-get update 6 | sudo apt-get install -y apt-transport-https ca-certificates curl 7 | sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg 8 | echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list 9 | apt-get update 10 | apt-get install -y kubelet=1.23.7-00 kubeadm=1.23.7-00 kubectl=1.23.7-00 docker.io 11 | apt-mark hold kubelet kubeadm kubectl 12 | cat </dev/null 2>&1 30 | 31 | sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/g' /etc/containerd/config.toml 32 | 33 | systemctl restart containerd 34 | 35 | systemctl enable containerd 36 | 37 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 38 | 39 | apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main" 40 | 41 | 42 | apt update 43 | 44 | apt install -y kubelet=1.26.0-00 kubeadm=1.26.0-00 kubectl=1.26.0-00 45 | 46 | apt-mark hold kubelet kubeadm kubectl 47 | 48 | echo "===" 49 | echo "Generate token on manager using" 50 | echo "===" 51 | echo "kubeadm token create --print-join-command" 52 | echo 53 | echo 54 | -------------------------------------------------------------------------------- /scripts/on-manager-1.28.0: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Setup for Control Plane (Master) servers 4 | 5 | swapoff -a 6 | 7 | MASTER_IP="10.0.0.100" 8 | NODENAME=$(hostname -s) 9 | POD_CIDR="172.16.0.0/16" 10 | 11 | apt update -y 12 | sudo kubeadm config images pull 13 | 14 | echo "" 15 | echo "\033[4mPreflight Check Passed: Downloaded All Required Images.\033[0m" 16 | echo "\033[4mNow running kubeadm init.\033[0m" 17 | echo "" 18 | 19 | sudo kubeadm init --apiserver-advertise-address=$MASTER_IP --apiserver-cert-extra-sans=$MASTER_IP --pod-network-cidr=$POD_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap 20 | 21 | mkdir -p "$HOME"/.kube 22 | sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config 23 | sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config 24 | 25 | # OPTION - 1 Install Calico overlay network 26 | 27 | echo 28 | echo 29 | echo "********" 30 | echo "Install Pod Network - calico or flannel" 31 | echo "********" 32 | echo 33 | echo 34 | echo "for calico ..." 35 | echo "get calico.yaml and apply it" 36 | echo "kubectl apply -f calico.yaml" 37 | 38 | # OPTION - 2 Install Flannel overlay network 39 | echo "for flannel ..." 40 | echo "get instructions from flannel-install-steps file" 41 | -------------------------------------------------------------------------------- /scripts/os-proxy-settings.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #### RUN ON ALL MACHINES of CLUSTER #### 3 | ##### ONLY IF YOU ARE USING PROXY ###### 4 | ##### 10.0.0.200 is our PROXY IP ##### 5 | ##### 10.0.0.100 = manager ##### 6 | ##### 10.0.0.1 = nodeone ##### 7 | ##### 10.0.0.2 = nodetwo ##### 8 | ##### 172.16.0.0/16 = pod network ##### 9 | 10 | clear 11 | echo 12 | echo 13 | echo "********" 14 | echo "configuring proxy for OS" 15 | echo "********" 16 | echo 17 | echo 18 | sleep 5 19 | cat <> ~/.bashrc 116 | 117 | source ~/.bashrc 118 | 119 | 120 | ##### 121 | 122 | ### IN CASE calico pod fails to get IP, run this on manager 123 | kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=en.* 124 | # replace en.* with your interface name 125 | # delete the calico pod and check again 126 | 127 | 128 | 129 | --------------------------------------------------------------------------------