├── .gitignore ├── 00. Exercises ├── HPA │ ├── deployment.yaml │ ├── hpa.yaml │ └── services.yaml ├── Pod │ └── Pod.yaml ├── expose-the-app │ ├── manifest.yaml │ ├── manifest2.yaml │ ├── svc-lb.yaml │ └── svc.yaml ├── ingress-first-principle │ ├── backend.yaml │ ├── db.yaml │ ├── fe.yaml │ └── reverse-proxy.yaml ├── ingress │ ├── ingress.yaml │ ├── manifest-be.yaml │ └── manifest-fe.yaml └── secret │ └── manifest.yaml ├── 01. K8s Core Concepts └── useful-links.md ├── 02. Install Cluster ├── commands.md ├── install-containerd.sh ├── install-k8s-components.sh └── useful-links.md ├── 03. Deploy Application ├── commands.md ├── nginx-deployment.yaml ├── nginx-service.yaml └── useful-links.md ├── 04. External Access ├── README.md ├── clusterip-svc.yaml ├── ingress.yaml ├── load-balancer-svc.yaml ├── node-port-svc.yaml └── useful-links.md ├── 05. Users and Permissions ├── cicd-binding.yaml ├── cicd-role.yaml ├── commands.md ├── dev-cr.yaml ├── dev-crb.yaml ├── dev-tom-csr.yaml ├── jenkins-sa.yaml └── useful-links.md ├── 06. Debugging & Troubleshooting ├── busybox-pod.yaml ├── commands.md └── useful-links.md ├── 07. Multi-container Pods ├── expose-pod-info.yaml ├── multi-container-pod.yaml └── useful-links.md ├── 08. Data Persistence ├── deployment-with-emptydir.yaml ├── deployment-with-pvc.yaml ├── pv-and-pvc.yaml └── useful-links.md ├── 09. Secret & ConfigMap ├── config-as-env-vars.yaml ├── config-as-volumes.yaml └── useful-links.md ├── 10. Resource Requests & Limits ├── commands.md ├── my-deployment.yaml └── useful-links.md ├── 11. Taints & Tolerations, NodeAffinity ├── pod-nodeaffinity.yaml ├── pod-podaffinity.yaml ├── pod-with-node-name.yaml ├── pod-with-node-selector.yaml ├── pod-with-tolerations.yaml └── useful-links.md ├── 12. Readiness & Liveness Probes ├── pod-health-probes.yaml └── useful-links.md ├── 13. Rolling Updates ├── commands.md └── useful-links.md ├── 14. Etcd Backup & Restore ├── commands.md └── useful-links.md ├── 15. K8s Rest API ├── commands.md ├── myscript-role.yaml └── useful-links.md ├── 16. Upgrade K8s Cluster ├── commands.md └── useful-links.md ├── 17. Contexts with Multiple Clusters ├── commands.md ├── kubeconfig-multiple-contexts.yaml └── useful-links.md ├── 18. Renew K8s Certificates ├── commands.md └── useful-links.md ├── 19. Network Policy ├── commands.md ├── demo-database.yaml ├── demo-frontend.yaml ├── demo-np-database.yaml ├── demo-np-frontend.yaml ├── demp-backend.yaml ├── np-example-1.yaml ├── np-example-2.yaml ├── np-example-3.yaml ├── np-example-4.yaml ├── np-example-5.yaml └── useful-links.md ├── InterviewQuestions.md ├── README.md ├── app ├── Readme.md ├── app.py ├── certificate ├── cluster_issuer.yaml ├── db-secret.yaml ├── deploy.yaml ├── dockerfile ├── horizontal_scale.yaml ├── ingress.yaml ├── postgres-cluster.yaml ├── requirements.txt ├── service.yaml └── templates │ └── index.html ├── kind └── config.yaml └── mychart ├── .helmignore ├── Chart.yaml ├── templates ├── NOTES.txt ├── _helpers.tpl ├── deployment.yaml ├── hpa.yaml ├── ingress.yaml ├── service.yaml ├── serviceaccount.yaml └── tests │ └── test-connection.yaml └── values.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store -------------------------------------------------------------------------------- /00. Exercises/HPA/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: cpu-deployment 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | app: cpu-app 10 | template: 11 | metadata: 12 | labels: 13 | app: 14 | cpu-app 15 | spec: 16 | containers: 17 | - name: cpu-app 18 | image: 100xdevs/week-28:latest 19 | ports: 20 | - containerPort: 3000 -------------------------------------------------------------------------------- /00. Exercises/HPA/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: cpu-hpa 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: cpu-deployment 10 | minReplicas: 2 11 | maxReplicas: 5 12 | metrics: 13 | - type: Resource 14 | resource: 15 | name: cpu 16 | target: 17 | type: Utilization 18 | averageUtilization: 50 19 | -------------------------------------------------------------------------------- /00. Exercises/HPA/services.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: cpu-service 5 | spec: 6 | type: LoadBalancer 7 | selector: 8 | app: cpu-app 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 3000 -------------------------------------------------------------------------------- /00. Exercises/Pod/Pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: firstpod 5 | labels: # not important 6 | app: firstpod 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | - name: ubuntu 12 | image: ubuntu 13 | command: ["sleep", "infinity"] # This will keep the container running indefinitely by sleeping forever. 14 | -------------------------------------------------------------------------------- /00. Exercises/expose-the-app/manifest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | ports: 12 | - containerPort: 80 13 | - containerPort: 443 14 | -------------------------------------------------------------------------------- /00. Exercises/expose-the-app/manifest2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: httpd 5 | labels: 6 | app: nginx 7 | spec: 8 | containers: 9 | - name: httpd 10 | image: httpd 11 | ports: 12 | - containerPort: 80 13 | - containerPort: 443 14 | -------------------------------------------------------------------------------- /00. Exercises/expose-the-app/svc-lb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-services-lb 5 | annotations: 6 | service.beta.kubernetes.io/do-loadbalancer-protocol: "https" 7 | service.beta.kubernetes.io/do-loadbalancer-tls-passthrough: "true" 8 | spec: 9 | type: LoadBalancer 10 | selector: 11 | app: nginx 12 | ports: 13 | - protocol: TCP 14 | port: 443 15 | targetPort: 80 16 | -------------------------------------------------------------------------------- /00. Exercises/expose-the-app/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: nginx 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 13 | nodePort: 30007 14 | -------------------------------------------------------------------------------- /00. Exercises/ingress-first-principle/backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: backend-team 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | namespace: backend-team 10 | name: backend 11 | spec: 12 | replicas: 2 13 | selector: 14 | matchLabels: 15 | app: backend 16 | template: 17 | metadata: 18 | labels: 19 | app: backend 20 | spec: 21 | containers: 22 | - name: backend 23 | image: rishavmehra/backend-bun 24 | ports: 25 | - containerPort: 3000 26 | --- 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | namespace: backend-team 31 | name: backend 32 | spec: 33 | type: ClusterIP 34 | selector: 35 | app: backend 36 | ports: 37 | - protocol: TCP 38 | port: 3000 39 | targetPort: 3000 40 | -------------------------------------------------------------------------------- /00. Exercises/ingress-first-principle/db.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: db 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: db 10 | template: 11 | metadata: 12 | labels: 13 | app: db 14 | spec: 15 | containers: 16 | - name: db 17 | image: postgres:latest 18 | env: 19 | - name: POSTGRES_PASSWORD 20 | value: postgres 21 | ports: 22 | - containerPort: 5432 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: db 28 | spec: 29 | type: ClusterIP 30 | selector: 31 | app: db 32 | ports: 33 | - protocol: TCP 34 | port: 5432 35 | targetPort: 5432 36 | -------------------------------------------------------------------------------- /00. Exercises/ingress-first-principle/fe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: fe-team 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | namespace: fe-team 10 | name: fe 11 | spec: 12 | replicas: 2 13 | selector: 14 | matchLabels: 15 | app: fe 16 | template: 17 | metadata: 18 | labels: 19 | app: fe 20 | spec: 21 | containers: 22 | - name: fe 23 | image: httpd 24 | ports: 25 | - containerPort: 80 26 | --- 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | namespace: fe-team 31 | name: fe 32 | spec: 33 | type: ClusterIP 34 | selector: 35 | app: fe 36 | ports: 37 | - protocol: TCP 38 | port: 8080 39 | targetPort: 80 40 | -------------------------------------------------------------------------------- /00. Exercises/ingress-first-principle/reverse-proxy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: nginx-config 5 | data: 6 | nginx.conf: | 7 | events { 8 | worker_connections 1024; 9 | } 10 | 11 | http { 12 | server { 13 | listen 80; 14 | server_name k8s.rshv.xyz; 15 | 16 | location / { 17 | proxy_pass http://backend.backend-team.svc.cluster.local:3000; 18 | proxy_http_version 1.1; 19 | proxy_set_header Upgrade $http_upgrade; 20 | proxy_set_header Connection 'upgrade'; 21 | proxy_set_header Host $host; 22 | proxy_cache_bypass $http_upgrade; 23 | } 24 | } 25 | server { 26 | listen 80; 27 | server_name k8s2.rshv.xyz; 28 | 29 | location / { 30 | proxy_pass http://fe.fe-team.svc.cluster.local:8080; 31 | proxy_http_version 1.1; 32 | proxy_set_header Upgrade $http_upgrade; 33 | proxy_set_header Connection 'upgrade'; 34 | proxy_set_header Host $host; 35 | proxy_cache_bypass $http_upgrade; 36 | } 37 | } 38 | } 39 | 40 | --- 41 | apiVersion: apps/v1 42 | kind: Deployment 43 | metadata: 44 | name: ingress 45 | spec: 46 | replicas: 1 47 | selector: 48 | matchLabels: 49 | app: ingress 50 | template: 51 | metadata: 52 | labels: 53 | app: ingress 54 | spec: 55 | containers: 56 | - name: ingress 57 | image: nginx:latest 58 | ports: 59 | - containerPort: 80 60 | volumeMounts: 61 | - name: nginx-config 62 | mountPath: /etc/nginx/nginx.conf 63 | subPath: nginx.conf 64 | volumes: 65 | - name: nginx-config 66 | configMap: 67 | name: nginx-config 68 | --- 69 | apiVersion: v1 70 | kind: Service 71 | metadata: 72 | name: ingress 73 | spec: 74 | type: LoadBalancer 75 | selector: 76 | app: ingress 77 | ports: 78 | - protocol: TCP 79 | port: 80 80 | targetPort: 80 81 | -------------------------------------------------------------------------------- /00. Exercises/ingress/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: ingress-prod 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | ingressClassName: nginx 9 | rules: 10 | - host: k8s.rshv.xyz 11 | http: 12 | paths: 13 | - path: /backend 14 | pathType: Prefix 15 | backend: 16 | service: 17 | name: backend-server 18 | port: 19 | number: 80 20 | - path: /frontend 21 | pathType: Prefix 22 | backend: 23 | service: 24 | name: frontend-service 25 | port: 26 | number: 80 27 | -------------------------------------------------------------------------------- /00. Exercises/ingress/manifest-be.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: backend-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:latest 20 | ports: 21 | - containerPort: 80 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: backend-server 27 | spec: 28 | type: ClusterIP 29 | selector: 30 | app: nginx 31 | ports: 32 | - protocol: TCP 33 | port: 80 34 | targetPort: 80 35 | -------------------------------------------------------------------------------- /00. Exercises/ingress/manifest-fe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: frontend-deployement 5 | labels: 6 | app: apache 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: apache 12 | template: 13 | metadata: 14 | labels: 15 | app: apache 16 | spec: 17 | containers: 18 | - name: apache 19 | image: httpd 20 | ports: 21 | - containerPort: 80 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: frontend-service 27 | spec: 28 | type: ClusterIP 29 | selector: 30 | app: apache 31 | ports: 32 | - protocol: TCP 33 | port: 80 34 | targetPort: 80 35 | -------------------------------------------------------------------------------- /00. Exercises/secret/manifest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion -------------------------------------------------------------------------------- /01. K8s Core Concepts/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### K8s Components 2 | * K8s official documentation: https://kubernetes.io/docs/concepts/ 3 | * Enrypting Secret Data at Rest: https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/ 4 | 5 | ##### K8s Architecture 6 | * K8s Architecture Components: https://kubernetes.io/docs/concepts/overview/components/ 7 | 8 | ##### kubectl and K8s configuration 9 | * Kubectl - K8s CLI: https://kubernetes.io/docs/reference/kubectl/overview/ 10 | * Managing Objects Imperative (Kubectl): https://kubernetes.io/docs/tasks/manage-kubernetes-objects/imperative-command/ 11 | * Managing Objects Declarative (Config File): https://kubernetes.io/docs/tasks/manage-kubernetes-objects/declarative-config/ 12 | * Imperative vs Declarative: https://kubernetes.io/docs/concepts/overview/working-with-objects/object-management/ 13 | 14 | -------------------------------------------------------------------------------- /02. Install Cluster/commands.md: -------------------------------------------------------------------------------- 1 | ### Provision Infrastructure 2 | 3 | ##### move private key to .ssh folder and restrict access 4 | mv ~/Downloads/k8s-node.pem ~/.ssh 5 | chmod 400 ~/.ssh/k8s-node.pem 6 | 7 | ##### ssh into ec2 instance with its public ip 8 | ssh -i ~/.ssh/k8s-node.pem ubuntu@35.180.130.108 9 | 10 | 11 | ### Configure Infrastructure 12 | sudo swapoff -a 13 | 14 | ##### set host names of nodes 15 | sudo vim /etc/hosts 16 | 17 | ##### get priavate ips of each node and add this to each server 18 | 45.14.48.178 master 19 | 45.14.48.179 worker1 20 | 45.14.48.180 worker2 21 | 22 | ##### we can now use these names instead of typing the IPs, when nodes talk to each other. After that, assign a hostname to each of these servers. 23 | 24 | ##### on master server 25 | sudo hostnamectl set-hostname master 26 | 27 | ##### on worker1 server 28 | sudo hostnamectl set-hostname worker1 29 | 30 | ##### on worker2 server 31 | sudo hostnamectl set-hostname worker2 32 | 33 | 34 | ### Initialize K8s cluster 35 | sudo kubeadm init 36 | 37 | ### Check kubelet process running 38 | service kubelet status 39 | systemctl status kubelet 40 | 41 | ### Check extended logs of kubelet service 42 | journalctl -u kubelet 43 | 44 | ### Access cluster as admin 45 | mkdir -p $HOME/.kube 46 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 47 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 48 | 49 | ### Kubectl commands 50 | 51 | ##### get node information 52 | kubectl get node 53 | 54 | ##### get pods in kube-system namespace 55 | kubectl get pod -n kube-system 56 | 57 | ##### get pods from all namespaces 58 | kubectl get pod -A 59 | 60 | ##### get wide output 61 | kubectl get pod -n kube-system -o wide 62 | 63 | 64 | ### Install pod network plugin 65 | 66 | ##### download the manifest 67 | wget "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" -O "weave.yaml" 68 | 69 | ##### check weave net status 70 | kubectl exec -n kube-system weave-net-1jkl6 -c weave -- /home/weave/weave --local status 71 | 72 | ### Join worker nodes 73 | 74 | ##### create and execute script 75 | vim install-containerd.sh 76 | chmod u+x install-containerd.sh 77 | ./install-containerd.sh 78 | 79 | ##### on master 80 | kubeadm token create --help 81 | kubeadm token create --print-join-command 82 | 83 | ##### copy the output command and execute on worker node as ROOT 84 | sudo kubeadm join 172.31.43.99:6443 --token 9bds1l.3g9ypte9gf69b5ft --discovery-token-ca-cert-hash sha256:xxxx 85 | 86 | ##### start a test pod 87 | kubectl run test --image=nginx 88 | 89 | 90 | -------------------------------------------------------------------------------- /02. Install Cluster/install-containerd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install and configure prerequisites 4 | ## load the necessary modules for Containerd 5 | cat < dev-cr.yaml 35 | kubectl create clusterrolebinding dev-crb --dry-run=client -o yaml > dev-crb.yaml 36 | 37 | ##### check user permissions as dev-tom 38 | kubectl auth can-i get pod 39 | 40 | ##### check user permissions as admin 41 | kubectl auth can-i get pod —-as {user-name} 42 | 43 | 44 | ### Create Service Account with Permissions 45 | kubectl create serviceaccount jenkins-sa --dry-run=client -o yaml > jenkins-sa.yaml 46 | 47 | kubectl create role cicd-role 48 | 49 | kubectl create clusterrolebinding cicd-binding \ 50 | --clusterrole=cicd-role \ 51 | --serviceaccount=default:jenkins 52 | 53 | ### Access with service account token 54 | 55 | kubectl options 56 | 57 | kubectl --server $server \ 58 | --certificate-authority /etc/kubernetes/pki/ca.crt \ 59 | --token $token \ 60 | --user jenkins \ 61 | get pods 62 | -------------------------------------------------------------------------------- /05. Users and Permissions/dev-cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: dev-cr 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - pods 10 | - services 11 | verbs: ["*"] 12 | - apiGroups: 13 | - apps 14 | resources: 15 | - deployments 16 | - statefulSets 17 | verbs: 18 | - get 19 | - list 20 | - create 21 | -------------------------------------------------------------------------------- /05. Users and Permissions/dev-crb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: dev-crb 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: dev-cr 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: User 12 | name: tom 13 | -------------------------------------------------------------------------------- /05. Users and Permissions/dev-tom-csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: dev-tom 5 | spec: 6 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZqQ0NBVDRDQVFBd0VURVBNQTBHQTFVRUF3d0dZVzVuWld4aE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQTByczhJTHRHdTYxakx2dHhWTTJSVlRWMDNHWlJTWWw0dWluVWo4RElaWjBOCnR2MUZtRVFSd3VoaUZsOFEzcWl0Qm0wMUFSMkNJVXBGd2ZzSjZ4MXF3ckJzVkhZbGlBNVhwRVpZM3ExcGswSDQKM3Z3aGJlK1o2MVNrVHF5SVBYUUwrTWM5T1Nsbm0xb0R2N0NtSkZNMUlMRVI3QTVGZnZKOEdFRjJ6dHBoaUlFMwpub1dtdHNZb3JuT2wzc2lHQ2ZGZzR4Zmd4eW8ybmlneFNVekl1bXNnVm9PM2ttT0x1RVF6cXpkakJ3TFJXbWlECklmMXBMWnoyalVnald4UkhCM1gyWnVVV1d1T09PZnpXM01LaE8ybHEvZi9DdS8wYk83c0x0MCt3U2ZMSU91TFcKcW90blZtRmxMMytqTy82WDNDKzBERHk5aUtwbXJjVDBnWGZLemE1dHJRSURBUUFCb0FBd0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBR05WdmVIOGR4ZzNvK21VeVRkbmFjVmQ1N24zSkExdnZEU1JWREkyQTZ1eXN3ZFp1L1BVCkkwZXpZWFV0RVNnSk1IRmQycVVNMjNuNVJsSXJ3R0xuUXFISUh5VStWWHhsdnZsRnpNOVpEWllSTmU3QlJvYXgKQVlEdUI5STZXT3FYbkFvczFqRmxNUG5NbFpqdU5kSGxpT1BjTU1oNndLaTZzZFhpVStHYTJ2RUVLY01jSVUyRgpvU2djUWdMYTk0aEpacGk3ZnNMdm1OQUxoT045UHdNMGM1dVJVejV4T0dGMUtCbWRSeEgvbUNOS2JKYjFRQm1HCkkwYitEUEdaTktXTU0xMzhIQXdoV0tkNjVoVHdYOWl4V3ZHMkh4TG1WQzg0L1BHT0tWQW9FNkpsYWFHdTlQVmkKdjlOSjVaZlZrcXdCd0hKbzZXdk9xVlA3SVFjZmg3d0drWm89Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo= 7 | signerName: kubernetes.io/kube-apiserver-client 8 | # expirationSeconds: 8640000 9 | usages: 10 | - client auth -------------------------------------------------------------------------------- /05. Users and Permissions/jenkins-sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: jenkins 5 | -------------------------------------------------------------------------------- /05. Users and Permissions/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### RBAC 2 | * Authentication: https://kubernetes.io/docs/reference/access-authn-authz/authentication/ 3 | * Authorization: https://kubernetes.io/docs/reference/access-authn-authz/authorization/ 4 | * RBAC: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ 5 | 6 | ##### certificates API 7 | * Manage TLS Certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ 8 | * Certificate Signing Request: https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/ 9 | 10 | ##### API groups 11 | * Resource Types and corresponding apiGroup: https://kubernetes.io/docs/reference/kubectl/overview/#resource-types 12 | * API Groups: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#-strong-api-groups-strong 13 | 14 | ##### service account 15 | * Configure Service Account: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ 16 | * Managing Service Accounts: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/ 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /06. Debugging & Troubleshooting/busybox-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: busybox-pod 5 | spec: 6 | containers: 7 | - name: busybox-container 8 | image: busybox 9 | args: ["echo", "hello"] 10 | --- 11 | apiVersion: v1 12 | kind: Pod 13 | metadata: 14 | name: busybox-pod 15 | spec: 16 | containers: 17 | - name: busybox-container 18 | image: busybox 19 | command: ["printenv"] 20 | args: ["HOSTNAME", "KUBERNETES_PORT"] 21 | --- 22 | apiVersion: v1 23 | kind: Pod 24 | metadata: 25 | name: busybox-pod 26 | spec: 27 | containers: 28 | - name: busybox-container 29 | image: busybox 30 | command: ["/bin/sh"] 31 | args: ["-c", "while true; do echo hello; sleep 5; done"] 32 | --- 33 | apiVersion: v1 34 | kind: Pod 35 | metadata: 36 | name: busybox-pod 37 | spec: 38 | containers: 39 | - name: busybox-container 40 | image: busybox 41 | command: ["/bin/sh"] 42 | args: ["-c", "sleep 100"] -------------------------------------------------------------------------------- /06. Debugging & Troubleshooting/commands.md: -------------------------------------------------------------------------------- 1 | ### Debug pod 2 | 3 | ##### start busybox in interactive mode 4 | kubectl run debug-pod --image=busybox -it 5 | 6 | ##### check service name can be resolved 7 | nslookup nginx-service.default.svc.cluster.local 8 | nslookup nginx-service 9 | 10 | ##### access service ip returned by nslookup 11 | ping service-ip 12 | 13 | ### Execute commands in pod from master node 14 | 15 | ##### ping service 16 | kubectl exec -it pod-name -- sh -c "ping nginx-service" 17 | 18 | ##### print all envs 19 | kubectl exec -it pod-name -- sh -c "printenv" 20 | 21 | ##### print all running ports 22 | kubectl exec -it pod-name -- sh -c "netstat -lntp" 23 | 24 | 25 | ### Jsonpath output format 26 | kubectl get node -o json 27 | kubectl get pod -o json 28 | 29 | ##### for single pod 30 | kubectl get pod -o jsonpath='{.items[0].metadata.name}' 31 | 32 | ##### print for all pods 33 | kubectl get pod -o jsonpath='{.items[*].metadata.name}' 34 | 35 | ##### multiple attributes 36 | kubectl get pod -o jsonpath="{.items[*]['metadata.name', 'status.podIP']}" 37 | kubectl get pod -o jsonpath="{.items[*]['metadata.name', 'status.podIP', 'status.startTime']}" 38 | 39 | ##### print multiple attributes on new lines 40 | kubectl get pod -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.podIP}{"\n"}{end}' 41 | kubectl get pod -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.podIP}{"\t"}{.status.startTime}{"\n"}{end}' 42 | 43 | ### Custom columns output 44 | kubectl get pods -o custom-columns=POD_NAME:.metadata.name,POD_IP:.status.podIP,CREATED_AT:.status.startTime 45 | 46 | 47 | ### Debugging kubelet 48 | service kubelet status 49 | sudo vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 50 | sudo systemctl daemon-reload 51 | sudo systemctl restart kubelet 52 | service kubelet status 53 | -------------------------------------------------------------------------------- /06. Debugging & Troubleshooting/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### troubleshooting in k8s 2 | * Troubleshoot Applications: https://kubernetes.io/docs/tasks/debug-application-cluster/debug-application/ 3 | * Troubleshoot Clusters: https://kubernetes.io/docs/tasks/debug-application-cluster/debug-cluster/ 4 | 5 | ##### debuging pods - commands & args 6 | * BusyBox Image: https://hub.docker.com/_/busybox 7 | * Debug running Pods: https://kubernetes.io/docs/tasks/debug-application-cluster/debug-running-pod/ 8 | * Get Shell of running container: https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/ 9 | * Define Command & Arguments for Container: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/ 10 | 11 | ##### kubectl output formats 12 | * Output Options: https://kubernetes.io/docs/reference/kubectl/overview/#output-options 13 | * Kubectl jsonpath: https://kubernetes.io/docs/reference/kubectl/jsonpath/ 14 | -------------------------------------------------------------------------------- /07. Multi-container Pods/expose-pod-info.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp-pod 5 | labels: 6 | app: myapp 7 | spec: 8 | containers: 9 | - name: myapp-container 10 | image: nginx:1.20 11 | - name: logging-sidecar 12 | image: busybox:1.28 13 | command: [ "sh", "-c"] 14 | args: 15 | - while true; do 16 | echo sync logs; 17 | echo -en '\n'; 18 | printenv MY_NODE_NAME MY_POD_NAME MY_POD_NAMESPACE; 19 | printenv MY_POD_IP MY_POD_SERVICE_ACCOUNT; 20 | sleep 20; 21 | done; 22 | env: 23 | - name: MY_NODE_NAME 24 | valueFrom: 25 | fieldRef: 26 | fieldPath: spec.nodeName 27 | - name: MY_POD_NAME 28 | valueFrom: 29 | fieldRef: 30 | fieldPath: metadata.name 31 | - name: MY_POD_NAMESPACE 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.namespace 35 | - name: MY_POD_IP 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: status.podIP 39 | - name: MY_POD_SERVICE_ACCOUNT 40 | valueFrom: 41 | fieldRef: 42 | fieldPath: spec.serviceAccountName -------------------------------------------------------------------------------- /07. Multi-container Pods/multi-container-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp-pod 5 | labels: 6 | app: myapp 7 | spec: 8 | containers: 9 | - name: myapp-container 10 | image: nginx:1.20 11 | - name: logging-sidecar 12 | image: busybox:1.28 13 | command: ['sh', '-c', "while true; do echo sync logs; sleep 20; done"] 14 | initContainers: 15 | - name: myservice-available 16 | image: busybox:1.28 17 | command: ['sh', '-c', "until nslookup mydb-service; do echo waiting for myservice; sleep 4; done"] 18 | 19 | --- 20 | # alternative command syntax 21 | - name: logging-sidecar 22 | image: busybox:1.28 23 | command: 24 | - 'sh' 25 | - '-c' 26 | - "while true; do echo sync logs; sleep 20; done" 27 | 28 | # alternative with args 29 | - name: logging-sidecar 30 | image: busybox:1.28 31 | command: [ "sh", "-c"] 32 | args: 33 | - while true; do 34 | echo sync logs; 35 | sleep 20; 36 | done; -------------------------------------------------------------------------------- /07. Multi-container Pods/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### multi-container pods 2 | * Init Containers: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ 3 | * Sidecar Container: https://kubernetes.io/docs/concepts/workloads/pods/#how-pods-manage-multiple-containers 4 | 5 | ##### exposing pod data to containers 6 | * Exposing Pod Information: https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ 7 | -------------------------------------------------------------------------------- /08. Data Persistence/deployment-with-emptydir.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-app 5 | labels: 6 | app: my-app 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: my-app 12 | template: 13 | metadata: 14 | labels: 15 | app: my-app 16 | spec: 17 | containers: 18 | - name: my-app 19 | image: busybox:1.28 20 | command: ['sh', '-c'] 21 | args: 22 | - while true; do 23 | echo "$(date) INFO some app data" >> /var/log/myapp.log; 24 | sleep 5; 25 | done 26 | 27 | volumeMounts: 28 | - name: log 29 | mountPath: /var/log 30 | 31 | - name: log-sidecar 32 | image: busybox:1.28 33 | command: ['sh', '-c'] 34 | args: 35 | - tail -f /var/log/myapp.log 36 | 37 | volumeMounts: 38 | - name: log 39 | mountPath: /var/log 40 | 41 | volumes: 42 | - name: log 43 | emptyDir: {} 44 | -------------------------------------------------------------------------------- /08. Data Persistence/deployment-with-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-db 5 | labels: 6 | app: my-db 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: my-db 12 | template: 13 | metadata: 14 | labels: 15 | app: my-db 16 | spec: 17 | containers: 18 | - name: mysql 19 | image: mysql:8.0 20 | 21 | volumeMounts: 22 | - name: db-data 23 | mountPath: "/var/lib/mysql" 24 | 25 | 26 | volumes: 27 | - name: db-data 28 | persistentVolumeClaim: 29 | claimName: mysql-data-pvc -------------------------------------------------------------------------------- /08. Data Persistence/pv-and-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: data-pv 5 | spec: 6 | capacity: 7 | storage: 10Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | hostPath: 11 | path: "/mnt/data" 12 | --- 13 | apiVersion: v1 14 | kind: PersistentVolumeClaim 15 | metadata: 16 | name: mysql-data-pvc 17 | spec: 18 | accessModes: 19 | - ReadWriteOnce 20 | resources: 21 | requests: 22 | storage: 5Gi -------------------------------------------------------------------------------- /08. Data Persistence/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### volumes 2 | * Storage Official Docs: https://kubernetes.io/docs/concepts/storage/ 3 | * Volume types: https://kubernetes.io/docs/concepts/storage/volumes/#volume-types 4 | * Example k8s manifests https://gitlab.com/nanuchi/bootcamp-kubernetes/-/tree/master/kubernetes-volumes 5 | 6 | ##### hostpath 7 | * Hostpath Volume Type: https://kubernetes.io/docs/concepts/storage/volumes/#hostpath 8 | * How hostpath is different from local volume type: https://kubernetes.io/blog/2019/04/04/kubernetes-1.14-local-persistent-volumes-ga/#how-is-it-different-from-a-hostpath-volume 9 | * Configure Pod to use PV: https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/ 10 | * Access Modes: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes 11 | 12 | ##### emptydir 13 | * emptyDir Volume Type: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir 14 | 15 | -------------------------------------------------------------------------------- /09. Secret & ConfigMap/config-as-env-vars.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: myapp-config 5 | data: 6 | db_host: mysql-service 7 | --- 8 | apiVersion: v1 9 | kind: Secret 10 | metadata: 11 | name: myapp-secret 12 | type: Opaque 13 | data: 14 | username: dXNlcm5hbWU= 15 | password: cGFzc3dvcmQ= 16 | --- 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: my-app 21 | labels: 22 | app: my-app 23 | spec: 24 | replicas: 1 25 | selector: 26 | matchLabels: 27 | app: my-app 28 | template: 29 | metadata: 30 | labels: 31 | app: my-app 32 | spec: 33 | containers: 34 | - name: my-app 35 | image: busybox:1.28 36 | command: ['sh', '-c', "printenv MYSQL_USER MYSQL_PASSWORD MYSQL_SERVER"] 37 | env: 38 | - name: MYSQL_USER 39 | valueFrom: 40 | secretKeyRef: 41 | name: myapp-secret 42 | key: username 43 | - name: MYSQL_PASSWORD 44 | valueFrom: 45 | secretKeyRef: 46 | name: myapp-secret 47 | key: password 48 | - name: MYSQL_SERVER 49 | valueFrom: 50 | configMapKeyRef: 51 | name: myapp-config 52 | key: db_host -------------------------------------------------------------------------------- /09. Secret & ConfigMap/config-as-volumes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mysql-config 5 | data: 6 | mysql.conf: | 7 | [mysqld] 8 | port=3306 9 | socket=/tmp/mysql.sock 10 | key_buffer_size=16M 11 | max_allowed_packet=128M 12 | --- 13 | apiVersion: v1 14 | kind: Secret 15 | metadata: 16 | name: mysql-secret 17 | type: Opaque 18 | data: 19 | secret.file: | 20 | c29tZXN1cGVyc2VjcmV0IGZpbGUgY29udGVudHMgbm9ib2R5IHNob3VsZCBzZWU= 21 | 22 | --- 23 | apiVersion: apps/v1 24 | kind: Deployment 25 | metadata: 26 | name: my-db 27 | labels: 28 | app: my-db 29 | spec: 30 | replicas: 1 31 | selector: 32 | matchLabels: 33 | app: my-db 34 | template: 35 | metadata: 36 | labels: 37 | app: my-db 38 | spec: 39 | containers: 40 | - name: my-db 41 | image: busybox:1.28 42 | command: ['sh', '-c', "cat /mysql/db-config; cat /mysql/db-secret"] 43 | 44 | volumeMounts: 45 | - name: db-config 46 | mountPath: /mysql/db-config 47 | - name: db-secret 48 | mountPath: /mysql/db-secret 49 | readOnly: true 50 | 51 | volumes: 52 | - name: db-config 53 | configMap: 54 | name: mysql-config 55 | - name: db-secret 56 | secret: 57 | secretName: mysql-secret -------------------------------------------------------------------------------- /09. Secret & ConfigMap/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### configmap 2 | * Complete ConfigMap docs: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/ 3 | 4 | ##### secret 5 | * Complete Secret docs: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/ 6 | -------------------------------------------------------------------------------- /10. Resource Requests & Limits/commands.md: -------------------------------------------------------------------------------- 1 | ##### print all pods with resource requests and limits 2 | 3 | kubectl get pod -o jsonpath="{range .items[*]}{.metadata.name}{.spec.containers[*].resources}{'\n'}" 4 | -------------------------------------------------------------------------------- /10. Resource Requests & Limits/my-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-app 5 | labels: 6 | app: my-app 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: my-app 12 | template: 13 | metadata: 14 | labels: 15 | app: my-app 16 | spec: 17 | containers: 18 | - name: my-app 19 | image: nginx:1.20 20 | resources: 21 | requests: 22 | memory: "64Mi" 23 | cpu: "250m" 24 | limits: 25 | memory: "128Mi" 26 | cpu: "500m" 27 | - name: logging-sidecar 28 | image: busybox:1.28 29 | command: ['sh', '-c', "while true; do echo sync logs; sleep 20; done"] 30 | resources: 31 | requests: 32 | memory: "32Mi" 33 | cpu: "125m" 34 | limits: 35 | memory: "64Mi" 36 | cpu: "250m" -------------------------------------------------------------------------------- /10. Resource Requests & Limits/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### resources 2 | * Managing Resources in K8s: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 3 | * Resource Unit in K8s: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes 4 | -------------------------------------------------------------------------------- /11. Taints & Tolerations, NodeAffinity/pod-nodeaffinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: with-node-affinity 5 | spec: 6 | containers: 7 | - name: myapp 8 | image: nginx:1.20 9 | affinity: 10 | nodeAffinity: 11 | requiredDuringSchedulingIgnoredDuringExecution: 12 | nodeSelectorTerms: 13 | - matchExpressions: 14 | - key: kubernetes.io/os 15 | operator: In 16 | values: 17 | - linux 18 | preferredDuringSchedulingIgnoredDuringExecution: 19 | - weight: 1 20 | preference: 21 | matchExpressions: 22 | - key: type 23 | operator: In 24 | values: 25 | - cpu 26 | 27 | -------------------------------------------------------------------------------- /11. Taints & Tolerations, NodeAffinity/pod-podaffinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: myapp-deployment 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: myapp 9 | replicas: 5 10 | template: 11 | metadata: 12 | labels: 13 | app: myapp 14 | spec: 15 | affinity: 16 | podAntiAffinity: 17 | requiredDuringSchedulingIgnoredDuringExecution: 18 | - labelSelector: 19 | matchExpressions: 20 | - key: app 21 | operator: In 22 | values: 23 | - myapp 24 | topologyKey: "kubernetes.io/hostname" 25 | podAffinity: 26 | requiredDuringSchedulingIgnoredDuringExecution: 27 | - labelSelector: 28 | matchExpressions: 29 | - key: app 30 | operator: In 31 | values: 32 | - etcd 33 | topologyKey: "kubernetes.io/hostname" 34 | containers: 35 | - name: myapp-container 36 | image: nginx:1.20 37 | nodeSelector: 38 | type: master -------------------------------------------------------------------------------- /11. Taints & Tolerations, NodeAffinity/pod-with-node-name.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | spec: 6 | containers: 7 | - name: nginx 8 | image: nginx:1.20 9 | nodeName: worker1 -------------------------------------------------------------------------------- /11. Taints & Tolerations, NodeAffinity/pod-with-node-selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | spec: 6 | containers: 7 | - name: nginx 8 | image: nginx:1.20 9 | nodeSelector: 10 | type: cpu -------------------------------------------------------------------------------- /11. Taints & Tolerations, NodeAffinity/pod-with-tolerations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-with-toleration 5 | labels: 6 | env: test 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:1.20 11 | tolerations: 12 | - effect: NoExecute 13 | operator: Exists 14 | nodeName: master 15 | -------------------------------------------------------------------------------- /11. Taints & Tolerations, NodeAffinity/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### assigning pods to nodes 2 | * Assigning Pods to Nodes: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ 3 | 4 | ##### nodeAffinity 5 | * Node Affinity: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity 6 | 7 | ##### interPodAffinity and antiAffinity 8 | * InterPod Affinity & AntiAffinity: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity 9 | 10 | ##### taints & tolerations 11 | * Taints and Tolerations: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ 12 | 13 | 14 | -------------------------------------------------------------------------------- /12. Readiness & Liveness Probes/pod-health-probes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp-health-probes 5 | spec: 6 | containers: 7 | - image: nginx:1.20 8 | name: myapp-container 9 | ports: 10 | - containerPort: 80 11 | readinessProbe: 12 | tcpSocket: 13 | port: 80 14 | initialDelaySeconds: 10 15 | periodSeconds: 5 16 | livenessProbe: 17 | tcpSocket: 18 | port: 80 19 | initialDelaySeconds: 5 20 | periodSeconds: 15 21 | 22 | 23 | -------------------------------------------------------------------------------- /12. Readiness & Liveness Probes/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### health probes 2 | * Configure Readiness and Liveness Probes: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ 3 | -------------------------------------------------------------------------------- /13. Rolling Updates/commands.md: -------------------------------------------------------------------------------- 1 | ##### rollout commands 2 | kubectl rollout history deployment/{depl-name} 3 | kubectl rollout undo deployment/{depl-name} 4 | kubectl rollout status deployment/{depl-name} 5 | -------------------------------------------------------------------------------- /13. Rolling Updates/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### replicaset 2 | * ReplicaSet: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ 3 | 4 | ##### deployment upgrade strategies 5 | * Deployment Strategies: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy 6 | * Rolling Update: https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/ 7 | 8 | -------------------------------------------------------------------------------- /14. Etcd Backup & Restore/commands.md: -------------------------------------------------------------------------------- 1 | ### Install ectdctl 2 | sudo apt install etcd-client 3 | 4 | ### Backup 5 | 6 | ##### snapshot backup with authentication 7 | ETCDCTL_API=3 etcdctl snapshot save /tmp/etcd-backup.db \ 8 | --cacert /etc/kubernetes/pki/etcd/ca.crt \ 9 | --cert /etc/kubernetes/pki/etcd/server.crt \ 10 | --key /etc/kubernetes/pki/etcd/server.key 11 | 12 | ##### check snapshot status 13 | ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshotdb 14 | 15 | 16 | ### Restore 17 | 18 | ##### create restore point from the backup 19 | ETCDCTL_API=3 etcdctl snapshot restore /tmp/etcd-backup.db --data-dir /var/lib/etcd-backup 20 | 21 | ##### the restored files are located at the new folder /var/lib/etcd-backup, so now configure etcd to use that directory: 22 | vim /etc/kubernetes/manifests/etcd.yaml 23 | -------------------------------------------------------------------------------- /14. Etcd Backup & Restore/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### etcd backup 2 | Back up etcd store: https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#backing-up-an-etcd-cluster 3 | 4 | ##### etcd restore 5 | Restore etcd backup: https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#restoring-an-etcd-cluster 6 | -------------------------------------------------------------------------------- /15. K8s Rest API/commands.md: -------------------------------------------------------------------------------- 1 | ### Access API through proxy 2 | kubectl proxy --port=8081 & 3 | curl http://localhost:8081/api/ 4 | 5 | ### Access without kubectl proxy 6 | 7 | ##### create serviceaccount for myscript usage 8 | kubectl create serviceaccount myscript 9 | 10 | ##### create role with Deployment, Pod, Service permissions 11 | kubectl apply -f myscript-role.yml 12 | 13 | ##### add Binding for serviceaccount 14 | kubectl create rolebinding script-role-binding --role=script-role --serviceaccount=default:myscript 15 | 16 | ##### get config info from kubectl 17 | kubectl config view 18 | 19 | ##### set cluster location var 20 | APISERVER=https://172.31.44.88:6443 21 | 22 | ##### set token var from default token 23 | kubectl get serviceaccount myscript -o yaml 24 | kubectl get secret xxxxx -o yaml 25 | 26 | TOKEN=$(echo "token" | base64 --decode | tr -d "\n") 27 | 28 | 29 | ##### if we don't have the ca cert for curl, we can accept insecure, without providing curl client with ca certificate 30 | curl -X GET $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure 31 | 32 | ##### if we don't want insecure connection, we can specify ca cert for curl providing curl with k8s ca certificate 33 | curl -X GET $APISERVER/api --header "Authorization: Bearer $TOKEN" --cacert /etc/kubernetes/pki/ca.crt 34 | 35 | 36 | ### Get data 37 | 38 | ##### main endpoint /api 39 | curl -X GET $APISERVER/api --header "Authorization: Bearer $TOKEN" --cacert /etc/kubernetes/pki/ca.crt 40 | 41 | ##### list all deployments 42 | curl -X GET $APISERVER/apis/apps/v1/namespaces/default/deployments --header "Authorization: Bearer $TOKEN" --cacert /etc/kubernetes/pki/ca.crt 43 | 44 | ##### list all services 45 | curl -X GET $APISERVER/api/v1/namespaces/default/services --header "Authorization: Bearer $TOKEN" --cacert /etc/kubernetes/pki/ca.crt 46 | 47 | ##### get a specific service or deployment 48 | curl -X GET $APISERVER/api/v1/namespaces/default/services/nginx-service --header "Authorization: Bearer $TOKEN" --cacert /etc/kubernetes/pki/ca.crt 49 | 50 | ##### get all pod names 51 | curl -X GET $APISERVER/api/v1/namespaces/default/pods/pod-name/logs --header "Authorization: Bearer $TOKEN" --cacert /etc/kubernetes/pki/ca.crt 52 | -------------------------------------------------------------------------------- /15. K8s Rest API/myscript-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: script-role 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods", "services"] 8 | verbs: ["get", "list", "delete"] 9 | - apiGroups: ["apps"] 10 | resources: ["deployments"] 11 | verbs: ["get", "list", "delete"] -------------------------------------------------------------------------------- /15. K8s Rest API/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### K8s rest API 2 | * Access Cluster using API: https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/ 3 | * API Overview: https://kubernetes.io/docs/reference/using-api/ 4 | * More about K8s API: https://kubernetes.io/docs/concepts/overview/kubernetes-api/ 5 | * K8s REST API Documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#-strong-api-groups-strong- 6 | * Bearer Token: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#putting-a-bearer-token-in-a-request 7 | 8 | ##### programmatic access 9 | * Programmatic Access: https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/#programmatic-access-to-the-api 10 | * Client Libraries: https://kubernetes.io/docs/reference/using-api/client-libraries/ 11 | -------------------------------------------------------------------------------- /16. Upgrade K8s Cluster/commands.md: -------------------------------------------------------------------------------- 1 | ### Upgrade control plane node 2 | 3 | ##### check apt-get version 4 | sudo apt-get --version 5 | 6 | ##### for apt-get version gt 1.1 7 | sudo apt-get update 8 | sudo apt-get install -y --allow-change-held-packages kubeadm=1.22.0-00 9 | 10 | ##### get upgrade preview 11 | sudo kubeadm upgrade plan 12 | 13 | ##### upgrade cluster 14 | sudo kubeadm upgrade apply v1.22.0 15 | 16 | ##### drain node 17 | kubectl drain master --ignore-daemonsets 18 | 19 | ##### upgrade kubelet & kubectl 20 | sudo apt-get update 21 | sudo apt-get install -y --allow-change-held-packages kubelet=1.22.0-00 kubectl=1.22.0-00 22 | 23 | ##### restart kubelet 24 | sudo systemctl daemon-reload 25 | sudo systemctl restart kubelet 26 | 27 | ##### uncordon node 28 | kubectl uncordon master 29 | 30 | 31 | ### Upgrade worker node 32 | sudo apt-get update && \ 33 | sudo apt-get install -y --allow-change-held-packages kubeadm=1.22.x-00 34 | 35 | sudo kubeadm upgrade node 36 | 37 | kubectl drain worker1 --ignore-daemonsets --force 38 | 39 | sudo apt-get update 40 | sudo apt-get install -y --allow-change-held-packages kubelet=1.22.x-00 kubectl=1.22.x-00 41 | 42 | sudo systemctl daemon-reload 43 | sudo systemctl restart kubelet 44 | 45 | kubectl uncordon worker1 46 | -------------------------------------------------------------------------------- /16. Upgrade K8s Cluster/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### cluster upgrade 2 | * Cluster Upgrade: https://kubernetes.io/docs/tasks/administer-cluster/cluster-upgrade/ 3 | * Upgrading Kubeadm clusters: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/ 4 | * Drain a Node: https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ 5 | -------------------------------------------------------------------------------- /17. Contexts with Multiple Clusters/commands.md: -------------------------------------------------------------------------------- 1 | ##### all context commands 2 | kubectl config --help 3 | 4 | ##### show all contexts 5 | kubectl config get-contexts 6 | 7 | ##### show current context 8 | kubectl config current-context 9 | 10 | ##### switch to another context 11 | kubectl set-context context-name 12 | 13 | ##### change user or cluter name for any context 14 | kubectl config set-context --help 15 | kubectl config set-context context-name --user=user_name --cluster=cluster_name 16 | 17 | ##### change user or cluster for current context 18 | kubectl config set-context --current --user=user_name --cluster=cluster_name 19 | 20 | ##### change default namespace 21 | kubectl get pod 22 | kubectl config set-context --current --namespace=myapp 23 | kubectl get pod 24 | 25 | -------------------------------------------------------------------------------- /17. Contexts with Multiple Clusters/kubeconfig-multiple-contexts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | certificate-authority-data: xxxxxx 5 | server: https://172.31.44.88:6443 6 | name: development 7 | - cluster: 8 | certificate-authority-data: xxxxxx 9 | server: https://251.15.20.12:6443 10 | name: staging 11 | contexts: 12 | - context: 13 | cluster: development 14 | user: dev-admin 15 | name: dev-admin@development 16 | - context: 17 | cluster: development 18 | namespace: myapp 19 | user: my-script 20 | name: my-script@development 21 | - context: 22 | cluster: staging 23 | user: staging-admin 24 | name: staging-admin@staging 25 | current-context: dev-admin@development 26 | kind: Config 27 | preferences: {} 28 | users: 29 | - name: dev-admin 30 | user: 31 | client-certificate-data: xxxx 32 | client-key-data: xxxx 33 | - name: staging-admin 34 | user: 35 | client-certificate-data: xxxx 36 | client-key-data: xxxx 37 | - name: my-script 38 | user: 39 | token: xxxxxxx -------------------------------------------------------------------------------- /17. Contexts with Multiple Clusters/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### contexts 2 | * Configure Access to multiple clusters: https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/ 3 | * Context: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#context 4 | * kubectl config subcommand: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config 5 | -------------------------------------------------------------------------------- /18. Renew K8s Certificates/commands.md: -------------------------------------------------------------------------------- 1 | ##### check certificate expirations dates of all k8s certificates with kubeadm 2 | kubeadm certs --help 3 | kubeadm certs check-expiration 4 | 5 | ##### check expiration date of a certificate with openssl 6 | openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text -noout 7 | 8 | ##### filter resulting cert for validity attribute plus 2 following lines 9 | openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text -noout | grep Validity -A2 10 | -------------------------------------------------------------------------------- /18. Renew K8s Certificates/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### k8s certificates 2 | * Certificate Management with kubeadm: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/ 3 | -------------------------------------------------------------------------------- /19. Network Policy/commands.md: -------------------------------------------------------------------------------- 1 | ### Set namespace to myapp 2 | kubectl config set-context --current --namespace=myapp 3 | kubectl get pod -o wide 4 | 5 | ### Before creating network policies 6 | 7 | ##### all of these works 8 | kubectl exec backend-7787844fc5-q2hkk -- sh -c 'nc -v db-ip-10.44.0.9 6379' 9 | kubectl exec frontend-7d7b95b577-6m7fp -- sh -c 'nc -v backend-ip-10.44.0.8 80' 10 | kubectl exec frontend-7d7b95b577-6m7fp -- sh -c 'nc -v db-ip-10.44.0.10 6379' 11 | kubectl exec database-7874cd5f45-jvr5z -- sh -c 'nc -v frontend-ip-10.44.0.6 3000' 12 | kubectl exec database-7874cd5f45-jvr5z -- sh -c 'nc -v backend-ip-10.44.0.8 80' 13 | 14 | ### After creating network policies 15 | 16 | ##### still works 17 | kubectl exec backend-7787844fc5-q2hkk -- sh -c 'nc -v db-ip-10.44.0.9 6379' 18 | kubectl exec frontend-7d7b95b577-6m7fp -- sh -c 'nc -v backend-ip-10.44.0.8 80' 19 | 20 | ##### don't work any more 21 | kubectl exec frontend-7d7b95b577-6m7fp -- sh -c 'nc -v db-ip-10.44.0.10 6379' 22 | kubectl exec database-7874cd5f45-jvr5z -- sh -c 'nc -v frontend-ip-10.44.0.6 3000' 23 | kubectl exec database-7874cd5f45-jvr5z -- sh -c 'nc -v backend-ip-10.44.0.8 80' 24 | -------------------------------------------------------------------------------- /19. Network Policy/demo-database.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: database 5 | namespace: myapp 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: database 11 | template: 12 | metadata: 13 | labels: 14 | app: database 15 | spec: 16 | containers: 17 | - name: redis 18 | image: redis:6-alpine 19 | ports: 20 | - containerPort: 6379 21 | -------------------------------------------------------------------------------- /19. Network Policy/demo-frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: frontend 5 | namespace: myapp 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: frontend 11 | template: 12 | metadata: 13 | labels: 14 | app: frontend 15 | spec: 16 | containers: 17 | - name: node 18 | image: node:16-alpine 19 | command: ['sh', '-c', "sleep 3000"] 20 | ports: 21 | - containerPort: 3000 22 | -------------------------------------------------------------------------------- /19. Network Policy/demo-np-database.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np-database 5 | namespace: myapp 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: database 10 | policyTypes: 11 | - Ingress 12 | - Egress 13 | ingress: 14 | - from: 15 | - podSelector: 16 | matchLabels: 17 | app: backend 18 | ports: 19 | - protocol: TCP 20 | port: 6379 21 | 22 | -------------------------------------------------------------------------------- /19. Network Policy/demo-np-frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np-frontend 5 | namespace: myapp 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: frontend 10 | policyTypes: 11 | - Egress 12 | egress: 13 | - to: 14 | - podSelector: 15 | matchLabels: 16 | app: backend 17 | ports: 18 | - protocol: TCP 19 | port: 80 20 | -------------------------------------------------------------------------------- /19. Network Policy/demp-backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: backend 5 | namespace: myapp 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: backend 11 | template: 12 | metadata: 13 | labels: 14 | app: backend 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx:1.21-alpine 19 | ports: 20 | - containerPort: 80 21 | -------------------------------------------------------------------------------- /19. Network Policy/np-example-1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np-db 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: mysql 10 | policyTypes: 11 | - Ingress 12 | ingress: 13 | - from: 14 | - podSelector: 15 | matchLabels: 16 | app: backend -------------------------------------------------------------------------------- /19. Network Policy/np-example-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np-db 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: mysql 10 | policyTypes: 11 | - Ingress 12 | ingress: 13 | - from: # first condition 14 | - podSelector: 15 | matchLabels: 16 | app: backend 17 | ports: # second condition 18 | - protocol: TCP 19 | port: 3306 -------------------------------------------------------------------------------- /19. Network Policy/np-example-3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np-db 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: mysql 10 | policyTypes: 11 | - Ingress 12 | ingress: 13 | - from: # first rule 14 | - podSelector: 15 | matchLabels: 16 | app: backend 17 | ports: 18 | - protocol: TCP 19 | port: 3306 20 | - from: # second rule 21 | - podSelector: 22 | matchLabels: 23 | app: phpmyadmin 24 | ports: 25 | - protocol: TCP 26 | port: 3306 -------------------------------------------------------------------------------- /19. Network Policy/np-example-4.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np-backend 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: backend 10 | policyTypes: 11 | - Egress 12 | egress: 13 | - to: # first rule 14 | - podSelector: 15 | matchLabels: 16 | app: mysql 17 | ports: 18 | - protocol: TCP 19 | port: 3306 20 | - to: # second rule 21 | - podSelector: 22 | matchLabels: 23 | app: redis 24 | ports: 25 | - protocol: TCP 26 | port: 6379 -------------------------------------------------------------------------------- /19. Network Policy/np-example-5.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np-backend 5 | namespace: myapp # namespace for pod that gets the policy 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: backend 10 | policyTypes: 11 | - Egress 12 | egress: 13 | - to: # first rule 14 | - podSelector: 15 | matchLabels: 16 | app: mysql 17 | namespaceSelector: 18 | matchLabels: 19 | kubernetes.io/metadata.name: database 20 | ports: 21 | - protocol: TCP 22 | port: 3306 23 | - to: # second rule 24 | - podSelector: 25 | matchLabels: 26 | app: redis 27 | namespaceSelector: 28 | matchLabels: 29 | kubernetes.io/metadata.name: database 30 | ports: 31 | - protocol: TCP 32 | port: 6379 -------------------------------------------------------------------------------- /19. Network Policy/useful-links.md: -------------------------------------------------------------------------------- 1 | ##### network policy 2 | * Network Policies: https://kubernetes.io/docs/concepts/services-networking/network-policies/ 3 | -------------------------------------------------------------------------------- /InterviewQuestions.md: -------------------------------------------------------------------------------- 1 | # DevOps Interview Q&A (Practice with Guess and Reveal) 2 | 3 | --- 4 | 5 | ## 1. Self Introduction 6 |
7 | Click to reveal 8 | 9 | I am a DevOps/Cloud Engineer with strong experience in CI/CD pipelines, containerization, Kubernetes, cloud services like AWS, and infrastructure as code tools like Terraform. I focus on building scalable, automated, and secure systems. 10 | 11 |
12 | 13 | --- 14 | 15 | ## 2. Explain Detailed CI-CD Pipeline 16 |
17 | Click to reveal 18 | 19 | A CI/CD pipeline automates building, testing, and deploying applications: 20 | - CI: Code → Build → Test → Artifact. 21 | - CD: Deploy → Staging/Prod. 22 | Common tools: Jenkins, GitHub Actions, GitLab CI, ArgoCD. 23 | 24 |
25 | 26 | --- 27 | 28 | ## 3. Which Branching Strategy You Have Used in Your Company 29 |
30 | Click to reveal 31 | 32 | We used **Gitflow strategy**: 33 | - `main` (Production ready) 34 | - `develop` (Ongoing development) 35 | - `feature/*` (New features) 36 | - `release/*` (Prepare releases) 37 | - `hotfix/*` (Urgent fixes) 38 | 39 |
40 | 41 | --- 42 | 43 | ## 4. How to Build and Deploy an Application Using Docker 44 |
45 | Click to reveal 46 | 47 | - Create a `Dockerfile`. 48 | - Build: `docker build -t app:v1 .` 49 | - Run: `docker run -d -p 8080:8080 app:v1` 50 | - Push to a Docker registry if needed. 51 | 52 |
53 | 54 | --- 55 | 56 | ## 5. What is the Difference Between Feature Branch and Release Branch 57 |
58 | Click to reveal 59 | 60 | | Feature Branch | Release Branch | 61 | |----------------|----------------| 62 | | New features | Finalize version | 63 | | Merged into `develop` | Merged into `main` and `develop` | 64 | | Short-lived | More stable for QA | 65 | 66 |
67 | 68 | --- 69 | 70 | ## 6. Who Merges the Code in Your Team 71 |
72 | Click to reveal 73 | 74 | Developers raise a Pull Request (PR). 75 | After successful review and passing tests, Team Lead or Reviewer merges it to main branches. 76 | 77 |
78 | 79 | --- 80 | 81 | ## 7. What is Dockerfile, Docker Image, and Docker Containers 82 |
83 | Click to reveal 84 | 85 | - **Dockerfile:** Instructions to build a container image. 86 | - **Docker Image:** Built artifact containing application. 87 | - **Docker Container:** Running instance of an image. 88 | 89 |
90 | 91 | --- 92 | 93 | ## 8. Difference Between Docker ADD and Docker COPY 94 |
95 | Click to reveal 96 | 97 | - **COPY:** Copies local files/folders into container. 98 | - **ADD:** Copies + auto-extracts archives and supports URLs. 99 | 100 |
101 | 102 | --- 103 | 104 | ## 9. How Many Pods Are Present in One Node and How Many Containers in One Pod 105 |
106 | Click to reveal 107 | 108 | - **Pods per Node:** Limited by node resources (CPU, RAM). 109 | - **Containers per Pod:** One or more (usually one unless sidecar needed). 110 | 111 |
112 | 113 | --- 114 | 115 | ## 10. Efficient Way: Pod with One Container or Multiple Containers 116 |
117 | Click to reveal 118 | 119 | - **One container per pod** is efficient for simple apps. 120 | - **Multiple containers per pod** used when containers are tightly coupled (e.g., sidecar pattern). 121 | 122 |
123 | 124 | --- 125 | 126 | ## 11. How to Measure Metrics in Kubernetes 127 |
128 | Click to reveal 129 | 130 | Use tools like: 131 | - Prometheus + Grafana 132 | - Metrics-server 133 | - kube-state-metrics 134 | They collect CPU, memory, and pod statuses. 135 | 136 |
137 | 138 | --- 139 | 140 | ## 12. What is Port of DNS, SSH, and HTTPS 141 |
142 | Click to reveal 143 | 144 | | Service | Port | 145 | |---------|------| 146 | | DNS | 53 | 147 | | SSH | 22 | 148 | | HTTPS | 443 | 149 | 150 |
151 | 152 | --- 153 | 154 | ## 13. If You Have New EC2 and RDS How Can You Connect RDS with EC2 155 |
156 | Click to reveal 157 | 158 | - Ensure EC2 and RDS are in same VPC/Subnet. 159 | - Configure RDS security group to allow EC2 access. 160 | - Connect using RDS endpoint and database credentials. 161 | 162 |
163 | 164 | --- 165 | 166 | ## 14. Explain S3 Storage Classes 167 |
168 | Click to reveal 169 | 170 | | Storage Class | Purpose | 171 | |---------------|---------| 172 | | Standard | Frequent access | 173 | | Intelligent-Tiering | Auto-cost optimization | 174 | | Standard-IA | Infrequent access | 175 | | One Zone-IA | Lower cost, one AZ | 176 | | Glacier | Archiving | 177 | | Glacier Deep Archive | Long-term archival | 178 | 179 |
180 | 181 | --- 182 | 183 | ## 15. Any Idea on AWS S3 Bucket Backup 184 |
185 | Click to reveal 186 | 187 | Backup options: 188 | - Enable **Versioning**. 189 | - Set **Lifecycle policies**. 190 | - Use **Cross-Region Replication**. 191 | - Use **AWS Backup service**. 192 | 193 |
194 | 195 | --- 196 | 197 | ## 16. How to Create AWS Lambda and How It Works 198 |
199 | Click to reveal 200 | 201 | - Create Lambda in AWS Console or CLI. 202 | - Choose runtime (Python, Node.js, etc.) 203 | - Add trigger (S3, API Gateway, etc.) 204 | - Lambda runs your code automatically when triggered. 205 | 206 |
207 | 208 | --- 209 | 210 | ## 17. How to Store Terraform Statefile and Secure It 211 |
212 | Click to reveal 213 | 214 | - Store remotely in S3 bucket with encryption. 215 | - Use **DynamoDB** for state locking. 216 | - Apply strict **IAM policies** to restrict access. 217 | - Enable **versioning** on S3 bucket. 218 | 219 |
220 | 221 | --- 222 | 223 | ## 18. I Stored Statefile in Remote and Current State is Different, Can I Update? 224 |
225 | Click to reveal 226 | 227 | Yes: 228 | - Use `terraform refresh` to update local state. 229 | - Or run `terraform plan` and `terraform apply` to sync remote state with infrastructure. 230 | 231 |
232 | 233 | --- 234 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DevOps 2 | 3 | Distributed Systems Check here: 4 | - [Distributed Systems](https://github.com/rishavmehra/distributedsystems) 5 | 6 | ## Kubernetes 7 | 8 | 📄 **Reference:** 9 | - [📌 kubectl Cheat Sheet](https://kubernetes.io/docs/reference/kubectl/quick-reference/) 10 | - [📘 Pods Documentation](https://kubernetes.io/docs/concepts/workloads/pods/) 11 | 12 | --- 13 | 14 |
15 | Most Used Commands 16 |
17 |

 18 | $ kubectl get pods
 19 | $ kubectl run nginx --image=nginx --dry-run=client -o yaml
 20 | $ kubectl describe pod 
 21 | $ kubectl get pod  -o yaml
 22 | 
23 |
24 | 25 | ## 🔧 Exercise 1: Working with Pods (Declarative Only) 26 | 27 | ### 🎯 Task: 28 | > Create a **Pod** in the **default namespace** using a YAML file with the following specifications: 29 | > 30 | > - Use any **container image** of your choice. 31 | > - The pod must have **2 containers**. 32 | > - After applying the YAML, **verify** that the pod is running. 33 | 34 | ### 📂 Solution: 35 | 👉 [00. Exercises/Pod/Pod.yaml](00.%20Exercises/Pod/Pod.yaml) 36 | 37 | --- 38 | 39 | ## 🔧 Exercise 2: Expose the App 40 | 41 | ### 🎯 Task: 42 | > Create a service to expose a Pod in Kubernetes using different service types: 43 | > 44 | > - Create a Pod running an Nginx container that exposes ports 80 and 443. 45 | > - Expose the Pod with a NodePort service on port 30007. 46 | > - Create a LoadBalancer service that exposes the HTTPS port with TLS passthrough. 47 | > - Create another Pod with httpd that shares the same label to demonstrate service selector functionality. 48 | 49 | ### 📂 Solution: 50 | 👉 [00. Exercises/expose-the-app/manifest.yaml](00.%20Exercises/expose-the-app/manifest.yaml) - Nginx Pod 51 | 👉 [00. Exercises/expose-the-app/manifest2.yaml](00.%20Exercises/expose-the-app/manifest2.yaml) - Apache httpd Pod 52 | 👉 [00. Exercises/expose-the-app/svc.yaml](00.%20Exercises/expose-the-app/svc.yaml) - NodePort Service 53 | 👉 [00. Exercises/expose-the-app/svc-lb.yaml](00.%20Exercises/expose-the-app/svc-lb.yaml) - LoadBalancer Service 54 | 55 | #### 📚 Key Concepts: 56 | - **Services** act as stable network endpoints for Pods 57 | - **NodePort** exposes the service on each node's IP at a static port 58 | - **LoadBalancer** provisions an external load balancer to route traffic to the service 59 | - **Selectors** determine which Pods a Service targets based on labels 60 | 61 | --- 62 | 63 | ## 🔧 Exercise 3: Ingress from First Principle 64 | 65 | ### 🎯 Task: 66 | > Implement a multi-service architecture with a custom Nginx-based ingress controller: 67 | > 68 | > - Create separate namespaces for frontend and backend applications 69 | > - Deploy a PostgreSQL database 70 | > - Deploy a frontend application (httpd) and a backend service (bun) 71 | > - Implement a custom Nginx reverse proxy to route requests based on domain names: 72 | > - k8s.rshv.xyz(use your own domain) should route to the backend service 73 | > - k8s2.rshv.xyz(use your own domain) should route to the frontend service 74 | 75 | ### 📂 Solution: 76 | 👉 [00. Exercises/ingress-first-principle/fe.yaml](00.%20Exercises/ingress-first-principle/fe.yaml) - Frontend Application 77 | 👉 [00. Exercises/ingress-first-principle/backend.yaml](00.%20Exercises/ingress-first-principle/backend.yaml) - Backend Application 78 | 👉 [00. Exercises/ingress-first-principle/db.yaml](00.%20Exercises/ingress-first-principle/db.yaml) - Database Deployment 79 | 👉 [00. Exercises/ingress-first-principle/reverse-proxy.yaml](00.%20Exercises/ingress-first-principle/reverse-proxy.yaml) - Custom Nginx Ingress 80 | 81 | #### 📚 Key Concepts: 82 | - **Namespaces** provide isolation between different application components 83 | - **ConfigMaps** store configuration data like Nginx configuration 84 | - **Service Discovery** using Kubernetes DNS (servicename.namespace.svc.cluster.local) 85 | - **Custom Ingress Controller** using Nginx reverse proxy for domain-based routing 86 | 87 | ## 🔧 Exercise 4: Kubernetes Native Ingress 88 | 89 | ### 🎯 Task: 90 | > Implement path-based routing using Kubernetes native Ingress resources: 91 | > 92 | > - Deploy two applications: a backend using Nginx and a frontend using Apache httpd 93 | > - Create ClusterIP services for both applications 94 | > - Configure a Kubernetes Ingress resource to route traffic based on URL paths: 95 | > - /backend path should route to the backend service 96 | > - /frontend path should route to the frontend service 97 | > - Use annotations to configure URL rewriting 98 | 99 | ### 📂 Solution: 100 | 👉 [00. Exercises/ingress/manifest-be.yaml](00.%20Exercises/ingress/manifest-be.yaml) - Backend Deployment and Service 101 | 👉 [00. Exercises/ingress/manifest-fe.yaml](00.%20Exercises/ingress/manifest-fe.yaml) - Frontend Deployment and Service 102 | 👉 [00. Exercises/ingress/ingress.yaml](00.%20Exercises/ingress/ingress.yaml) - Ingress Resource 103 | 104 | #### 📚 Key Concepts: 105 | - **Ingress Resources** provide HTTP/HTTPS routing, path-based routing, and name-based virtual hosting 106 | - **IngressClass** specifies which controller should implement the Ingress 107 | - **Path Types** define how paths are matched (Prefix, Exact, ImplementationSpecific) 108 | - **Annotations** configure controller-specific behaviors like URL rewriting 109 | - **Ingress Controller** is the component that actually implements the Ingress rules (typically Nginx) 110 | 111 | --- 112 | # HELM 113 | [Cheat Sheet](https://helm.sh/docs/intro/cheatsheet/) 114 | ``` 115 | // Install a chart from a local directory 116 | helm create mychart 117 | helm install mychartrelease mychart 118 | 119 | // Uninstall a release 120 | helm uninstall mychartrelease 121 | 122 | // List releases 123 | helm list -a 124 | 125 | // upgrade the the helm chart 126 | helm upgrade mychartrelease mychart 127 | 128 | // rollback to a previous version 129 | helm rollback mychartrelease 1 130 | 131 | // debug 132 | helm install --dry-run --debug mychartrelease mychart 133 | 134 | // helm template 135 | helm template mychart 136 | 137 | 138 | // helm lint 139 | helm lint mychart 140 | 141 | ``` -------------------------------------------------------------------------------- /app/Readme.md: -------------------------------------------------------------------------------- 1 | # K8s 2 | 3 | ## Kubeconfig file, GVK/GVR, K8s rest API 4 | 5 | ```bash 6 | # Generate RSA Key 7 | openssl genrsa -out rishav.key 2048 8 | 9 | # Create Certificate Signing Request (CSR) 10 | openssl req -new -key rishav.key -out developer.csr -subj "/CN=rishav" 11 | 12 | # Base64 Encode CSR 13 | cat developer.csr | base64 | tr -d '\n' 14 | ``` 15 | 16 | --- 17 | 18 | ### CSR YAML Configuration 19 | 20 | ```yaml 21 | apiVersion: certificates.k8s.io/v1 22 | kind: CertificateSigningRequest 23 | metadata: 24 | name: rishav 25 | spec: 26 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1p6Q0NBVThDQVFBd0lqRVBNQTBHQTFVRUF3d0djMkZwZVdGdE1ROHdEUVlEVlFRS0RBWm5jbTkxY0RFdwpnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDOWRSLzhvcUl4R3Vsa0IxS2Q1aTJFClZpM0g1SkpmbjlZWk54WWRGMm41UUU0bFhvSmpwbmtzeEVrWWIwZUlKWkdHUXFYMzhIN2RQRi9UN3A2RVJDWWQKNmhJNHRxbFFRa0lFT202RnNKTVJUZ3p2VHdjNzFMSitnRVFXbkEwTVA5MUVqNzBGT0xFU1ZOb1lvd2huNllWTQpCbXhHTUdOWW1RRjVQRGw3VlBSNTFZenRsaHZvVkpKSnJoUXN1empVNGJnUGxGOURsaEZyQThXcHdEU2d2N2xLCkkwa25ZWjdVQkVFRnJweUU5VU9EMmlFVHFXMnlIaFc0Ym0rY0RMRFJQS3hrYytpaUlldjBqWVN4RzIyUUt0VE8KR1J0bS9LT1NkQTVZOG5DZ3JTZk9adGNFVExhMGc1RzlhNEtFRHNoK2wxTWNraVdPbS9oSUpsVDFUQnIxSm9qVgpBZ01CQUFHZ0FEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFDTmxkbkErcFdrZE1NNGV3Uk1LZXpvK0hwY215CnhOSjJ6OFJPMkQrZ2p6NXBZNjlRMlFaRFRKSkZnUTVpRnFVUGFQNDc4ZHdxSUMwTFVGMXh2Nkh5Z3d4aElWZkYKT3N0MzZhcTZIRCtFM1lCazNMRzhSbGlUMTh6UXUzYS9Nd3dGWVFxV0xCcGNhUU84ZUpPVlZ5Ty9URk82YWUrQQpzYzRJSlUvRDZUNjNWZS9NM3B2Qkd4cHRabnNPTC81cUhlL1p4MnhXMlU2Yksyb290cDBjQjZOaHFITVVwcGJ0ClEzYkRIbGl4dVV2TzI0cFVMeC9CZVA4SHV2R1RnUUpzemJIeGt4TDR5cDZkMDNRZm42RjdaeWFhTnlTMGtqamQKWWhqd0E2Q3FjQXdVOXFwWlJCdkVFYUpuTk5JMHlnUEs4M1ZRYnN5VEFoQk01YWZvYWpOb3h3OTVHQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo= 27 | signerName: kubernetes.io/kube-apiserver-client 28 | usages: 29 | - client auth 30 | ``` 31 | 32 | --- 33 | 34 | ```bash 35 | # Apply CSR 36 | kubectl apply -f csr.yaml 37 | 38 | # Approve CSR 39 | kubectl certificate approve rishav 40 | 41 | # Retrieve and Decode the Certificate 42 | kubectl get csr rishav -o jsonpath='{.status.certificate}' | base64 --decode > rishav.crt 43 | ``` 44 | 45 | ### Role and RoleBinding Configuration 46 | 47 | ```yaml 48 | kind: Role 49 | apiVersion: rbac.authorization.k8s.io/v1 50 | metadata: 51 | namespace: default 52 | name: pod-reader 53 | rules: 54 | - apiGroups: [""] 55 | resources: ["pods"] 56 | verbs: ["get", "watch", "list"] 57 | --- 58 | kind: RoleBinding 59 | apiVersion: rbac.authorization.k8s.io/v1 60 | metadata: 61 | name: read-pods 62 | namespace: default 63 | subjects: 64 | - kind: User 65 | name: rishav 66 | apiGroup: rbac.authorization.k8s.io 67 | roleRef: 68 | kind: Role 69 | name: pod-reader 70 | apiGroup: rbac.authorization.k8s.io 71 | ``` 72 | 73 | ```bash 74 | # Apply Role and RoleBinding 75 | kubectl apply -f role.yaml 76 | 77 | # Set Credentials and Context 78 | kubectl config set-credentials rishav --client-certificate=rishav.crt --client-key=rishav.key 79 | kubectl config get-contexts 80 | kubectl config set-context rishav-context --cluster=kubernetes --namespace=default --user=rishav 81 | kubectl config use-context rishav-context 82 | 83 | # Deploy nginx 84 | kubectl run nginx --image=nginx 85 | ``` 86 | 87 | ### GVR | GVK -> [Read Here](https://jamy.hashnode.dev/understanding-kubernetes-gvk-and-gvr-in-60-seconds) 88 | 89 | ### Merging Multiple KubeConfig Files 90 | 91 | ```bash 92 | export KUBECONFIG=/path/to/first/config:/path/to/second/config:/path/to/third/config 93 | ``` 94 | 95 | --- 96 | 97 | ### Deployment JSON Creation 98 | 99 | ```bash 100 | kubectl create deployment nginx --image=nginx --dry-run=client -o json > deploy.json 101 | kubectl run nginx --image=nginx --dry-run=client -o json 102 | ``` 103 | 104 | ### Service Account (SA) Creation 105 | 106 | ```bash 107 | kubectl create serviceaccount sam --namespace default 108 | kubectl create clusterrolebinding sam-clusteradmin-binding --clusterrole=cluster-admin --serviceaccount=default:sam 109 | kubectl create token sam 110 | 111 | # Set Token and API Server 112 | TOKEN=outputfromabove 113 | APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}') 114 | 115 | # List Deployments 116 | curl -X GET $APISERVER/apis/apps/v1/namespaces/default/deployments -H "Authorization: Bearer $TOKEN" -k 117 | 118 | # Create Deployment 119 | curl -X POST $APISERVER/apis/apps/v1/namespaces/default/deployments \ 120 | -H "Authorization: Bearer $TOKEN" \ 121 | -H 'Content-Type: application/json' \ 122 | -d @deploy.json \ 123 | -k 124 | 125 | # List Pods 126 | curl -X GET $APISERVER/api/v1/namespaces/default/pods \ 127 | -H "Authorization: Bearer $TOKEN" \ 128 | -k 129 | ``` 130 | 131 | --- 132 | 133 | ## YAML, Pod, Pod lifecycle, init containers, sidecar containers 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /app/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template, request, redirect, url_for 2 | import psycopg2 3 | from psycopg2 import sql, Error 4 | import os 5 | 6 | app = Flask(__name__) 7 | 8 | def create_connection(): 9 | try: 10 | connection = psycopg2.connect( 11 | user=os.getenv('DB_USERNAME'), 12 | password=os.getenv('DB_PASSWORD'), 13 | host=os.getenv('DB_HOST'), 14 | port=os.getenv('DB_PORT'), 15 | database=os.getenv('DB_NAME') 16 | 17 | ) 18 | return connection 19 | except Error as e: 20 | print("Error while connecting to PostgreSQL", e) 21 | return None 22 | 23 | @app.route('/', methods=['GET']) 24 | def index(): 25 | connection = create_connection() 26 | if connection: 27 | cursor = connection.cursor() 28 | cursor.execute("SELECT * FROM goals") 29 | goals = cursor.fetchall() 30 | cursor.close() 31 | connection.close() 32 | return render_template('index.html', goals=goals) 33 | else: 34 | return "Error connecting to the PostgreSQL database", 500 35 | 36 | @app.route('/add_goal', methods=['POST']) 37 | def add_goal(): 38 | goal_name = request.form.get('goal_name') 39 | if goal_name: 40 | connection = create_connection() 41 | if connection: 42 | cursor = connection.cursor() 43 | cursor.execute("INSERT INTO goals (goal_name) VALUES (%s)", (goal_name,)) 44 | connection.commit() 45 | cursor.close() 46 | connection.close() 47 | return redirect(url_for('index')) 48 | 49 | @app.route('/remove_goal', methods=['POST']) 50 | def remove_goal(): 51 | goal_id = request.form.get('goal_id') 52 | if goal_id: 53 | connection = create_connection() 54 | if connection: 55 | cursor = connection.cursor() 56 | cursor.execute("DELETE FROM goals WHERE id = %s", (goal_id,)) 57 | connection.commit() 58 | cursor.close() 59 | connection.close() 60 | return redirect(url_for('index')) 61 | 62 | @app.route('/health', methods=['GET']) 63 | def health_check(): 64 | return "OK", 200 65 | 66 | if __name__ == '__main__': 67 | app.run(host='0.0.0.0', port=8080) 68 | -------------------------------------------------------------------------------- /app/certificate: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: app 5 | spec: 6 | secretName: app 7 | issuerRef: 8 | name: production-app 9 | kind: ClusterIssuer 10 | commonName: app-116-203-255-68.nip.io 11 | dnsNames: 12 | - app-116-203-255-68.nip.io 13 | -------------------------------------------------------------------------------- /app/cluster_issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: production-app 5 | spec: 6 | acme: 7 | # The ACME server URL 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | # Email address used for ACME registration 10 | email: demo@v1.com 11 | # Name of a secret used to store the ACME account private key 12 | privateKeySecretRef: 13 | name: app 14 | # Enable the HTTP-01 challenge provider 15 | solvers: 16 | - http01: 17 | ingress: 18 | class: nginx 19 | -------------------------------------------------------------------------------- /app/db-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | password: bmV3X3Bhc3N3b3Jk 4 | username: Z29hbHNfdXNlcg== 5 | kind: Secret 6 | metadata: 7 | name: my-postgresql-credentials -------------------------------------------------------------------------------- /app/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-app 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: my-app 10 | template: 11 | metadata: 12 | labels: 13 | app: my-app 14 | spec: 15 | containers: 16 | - name: my-app 17 | image: ttl.sh/rishav-v2/app:24h 18 | imagePullPolicy: Always 19 | env: 20 | - name: DB_USERNAME 21 | valueFrom: 22 | secretKeyRef: 23 | name: my-postgresql-credentials 24 | key: username 25 | - name: DB_PASSWORD 26 | valueFrom: 27 | secretKeyRef: 28 | name: my-postgresql-credentials 29 | key: password 30 | - name: DB_HOST 31 | value: my-postgresql-rw.default.svc.cluster.local 32 | - name: DB_PORT 33 | value: "5432" 34 | - name: DB_NAME 35 | value: goals_database 36 | ports: 37 | - containerPort: 8080 38 | readinessProbe: 39 | httpGet: 40 | path: /health 41 | port: 8080 42 | initialDelaySeconds: 5 43 | periodSeconds: 10 44 | livenessProbe: 45 | httpGet: 46 | path: /health 47 | port: 8080 48 | initialDelaySeconds: 15 49 | periodSeconds: 20 50 | resources: 51 | requests: 52 | memory: "350Mi" 53 | cpu: "250m" 54 | limits: 55 | memory: "500Mi" 56 | cpu: "500m" -------------------------------------------------------------------------------- /app/dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.9-slim 3 | 4 | # Set the working directory in the container 5 | WORKDIR /app 6 | 7 | # Copy the requirements file into the container at /app 8 | COPY requirements.txt /app/ 9 | 10 | # Install any dependencies specified in requirements.txt 11 | RUN pip install --no-cache-dir -r requirements.txt 12 | 13 | # Copy the rest of the application code into the container at /app 14 | COPY . /app 15 | 16 | # Make port 8080 available to the world outside this container 17 | EXPOSE 8080 18 | 19 | # Define environment variable for Flask 20 | ENV FLASK_APP=app.py 21 | 22 | # Run the application using Gunicorn 23 | CMD ["gunicorn", "--bind", "0.0.0.0:8080", "app:app"] -------------------------------------------------------------------------------- /app/horizontal_scale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: my-app-hpa 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: my-app 10 | minReplicas: 1 11 | maxReplicas: 10 12 | metrics: 13 | - type: Resource 14 | resource: 15 | name: cpu 16 | target: 17 | type: Utilization 18 | averageUtilization: 20 19 | - type: Resource 20 | resource: 21 | name: memory 22 | target: 23 | type: AverageValue 24 | averageValue: 350Mi -------------------------------------------------------------------------------- /app/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: my-app-ingress 5 | annotations: 6 | cert-manager.io/cluster-issuer: production-app 7 | 8 | spec: 9 | ingressClassName: nginx 10 | rules: 11 | - host: app-116-203-255-68.nip.io 12 | http: 13 | paths: 14 | - path: / 15 | pathType: Prefix 16 | backend: 17 | service: 18 | name: my-app-service 19 | port: 20 | number: 80 21 | tls: 22 | - hosts: 23 | - app-116-203-255-68.nip.io 24 | secretName: app -------------------------------------------------------------------------------- /app/postgres-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: postgresql.cnpg.io/v1 2 | kind: Cluster 3 | metadata: 4 | name: my-postgresql 5 | namespace: default 6 | spec: 7 | instances: 3 8 | storage: 9 | size: 1Gi 10 | bootstrap: 11 | initdb: 12 | database: goals_database 13 | owner: goals_user 14 | secret: 15 | name: my-postgresql-credentials 16 | -------------------------------------------------------------------------------- /app/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask 2 | psycopg2-binary 3 | gunicorn 4 | -------------------------------------------------------------------------------- /app/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: my-app-service 5 | spec: 6 | selector: 7 | app: my-app 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 8080 -------------------------------------------------------------------------------- /app/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | demo 5 | 82 | 83 | 84 |
85 | 86 |

Rishav Demo

87 |
88 | 89 | 90 |
91 | {% if goals %} 92 | 103 | {% endif %} 104 |
105 | 108 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /kind/config.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | image: kindest/node:v1.28.0 6 | - role: worker 7 | image: kindest/node:v1.28.0 8 | 9 | 10 | -------------------------------------------------------------------------------- /mychart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /mychart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: mychart 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | -------------------------------------------------------------------------------- /mychart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range $host := .Values.ingress.hosts }} 4 | {{- range .paths }} 5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} 6 | {{- end }} 7 | {{- end }} 8 | {{- else if contains "NodePort" .Values.service.type }} 9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mychart.fullname" . }}) 10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 11 | echo http://$NODE_IP:$NODE_PORT 12 | {{- else if contains "LoadBalancer" .Values.service.type }} 13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 14 | You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "mychart.fullname" . }}' 15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mychart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 16 | echo http://$SERVICE_IP:{{ .Values.service.port }} 17 | {{- else if contains "ClusterIP" .Values.service.type }} 18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "mychart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") 20 | echo "Visit http://127.0.0.1:8080 to use your application" 21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /mychart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "mychart.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "mychart.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "mychart.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "mychart.labels" -}} 37 | helm.sh/chart: {{ include "mychart.chart" . }} 38 | {{ include "mychart.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "mychart.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "mychart.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "mychart.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "mychart.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /mychart/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "mychart.fullname" . }} 5 | labels: 6 | {{- include "mychart.labels" . | nindent 4 }} 7 | spec: 8 | {{- if not .Values.autoscaling.enabled }} 9 | replicas: {{ .Values.replicaCount }} 10 | {{- end }} 11 | selector: 12 | matchLabels: 13 | {{- include "mychart.selectorLabels" . | nindent 6 }} 14 | template: 15 | metadata: 16 | {{- with .Values.podAnnotations }} 17 | annotations: 18 | {{- toYaml . | nindent 8 }} 19 | {{- end }} 20 | labels: 21 | {{- include "mychart.labels" . | nindent 8 }} 22 | {{- with .Values.podLabels }} 23 | {{- toYaml . | nindent 8 }} 24 | {{- end }} 25 | spec: 26 | {{- with .Values.imagePullSecrets }} 27 | imagePullSecrets: 28 | {{- toYaml . | nindent 8 }} 29 | {{- end }} 30 | serviceAccountName: {{ include "mychart.serviceAccountName" . }} 31 | {{- with .Values.podSecurityContext }} 32 | securityContext: 33 | {{- toYaml . | nindent 8 }} 34 | {{- end }} 35 | containers: 36 | - name: {{ .Chart.Name }} 37 | {{- with .Values.securityContext }} 38 | securityContext: 39 | {{- toYaml . | nindent 12 }} 40 | {{- end }} 41 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 42 | imagePullPolicy: {{ .Values.image.pullPolicy }} 43 | ports: 44 | - name: http 45 | containerPort: {{ .Values.service.port }} 46 | protocol: TCP 47 | {{- with .Values.livenessProbe }} 48 | livenessProbe: 49 | {{- toYaml . | nindent 12 }} 50 | {{- end }} 51 | {{- with .Values.readinessProbe }} 52 | readinessProbe: 53 | {{- toYaml . | nindent 12 }} 54 | {{- end }} 55 | {{- with .Values.resources }} 56 | resources: 57 | {{- toYaml . | nindent 12 }} 58 | {{- end }} 59 | {{- with .Values.volumeMounts }} 60 | volumeMounts: 61 | {{- toYaml . | nindent 12 }} 62 | {{- end }} 63 | {{- with .Values.volumes }} 64 | volumes: 65 | {{- toYaml . | nindent 8 }} 66 | {{- end }} 67 | {{- with .Values.nodeSelector }} 68 | nodeSelector: 69 | {{- toYaml . | nindent 8 }} 70 | {{- end }} 71 | {{- with .Values.affinity }} 72 | affinity: 73 | {{- toYaml . | nindent 8 }} 74 | {{- end }} 75 | {{- with .Values.tolerations }} 76 | tolerations: 77 | {{- toYaml . | nindent 8 }} 78 | {{- end }} 79 | -------------------------------------------------------------------------------- /mychart/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.autoscaling.enabled }} 2 | apiVersion: autoscaling/v2 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: {{ include "mychart.fullname" . }} 6 | labels: 7 | {{- include "mychart.labels" . | nindent 4 }} 8 | spec: 9 | scaleTargetRef: 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | name: {{ include "mychart.fullname" . }} 13 | minReplicas: {{ .Values.autoscaling.minReplicas }} 14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 15 | metrics: 16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} 17 | - type: Resource 18 | resource: 19 | name: cpu 20 | target: 21 | type: Utilization 22 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 23 | {{- end }} 24 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} 25 | - type: Resource 26 | resource: 27 | name: memory 28 | target: 29 | type: Utilization 30 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /mychart/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: {{ include "mychart.fullname" . }} 6 | labels: 7 | {{- include "mychart.labels" . | nindent 4 }} 8 | {{- with .Values.ingress.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | {{- with .Values.ingress.className }} 14 | ingressClassName: {{ . }} 15 | {{- end }} 16 | {{- if .Values.ingress.tls }} 17 | tls: 18 | {{- range .Values.ingress.tls }} 19 | - hosts: 20 | {{- range .hosts }} 21 | - {{ . | quote }} 22 | {{- end }} 23 | secretName: {{ .secretName }} 24 | {{- end }} 25 | {{- end }} 26 | rules: 27 | {{- range .Values.ingress.hosts }} 28 | - host: {{ .host | quote }} 29 | http: 30 | paths: 31 | {{- range .paths }} 32 | - path: {{ .path }} 33 | {{- with .pathType }} 34 | pathType: {{ . }} 35 | {{- end }} 36 | backend: 37 | service: 38 | name: {{ include "mychart.fullname" $ }} 39 | port: 40 | number: {{ $.Values.service.port }} 41 | {{- end }} 42 | {{- end }} 43 | {{- end }} 44 | -------------------------------------------------------------------------------- /mychart/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "mychart.fullname" . }} 5 | labels: 6 | {{- include "mychart.labels" . | nindent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | selector: 15 | {{- include "mychart.selectorLabels" . | nindent 4 }} 16 | -------------------------------------------------------------------------------- /mychart/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "mychart.serviceAccountName" . }} 6 | labels: 7 | {{- include "mychart.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | automountServiceAccountToken: {{ .Values.serviceAccount.automount }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /mychart/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: "{{ include "mychart.fullname" . }}-test-connection" 5 | labels: 6 | {{- include "mychart.labels" . | nindent 4 }} 7 | annotations: 8 | "helm.sh/hook": test 9 | spec: 10 | containers: 11 | - name: wget 12 | image: busybox 13 | command: ['wget'] 14 | args: ['{{ include "mychart.fullname" . }}:{{ .Values.service.port }}'] 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /mychart/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for mychart. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | # This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ 6 | replicaCount: 2 7 | 8 | # This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/ 9 | image: 10 | repository: nginx 11 | # This sets the pull policy for images. 12 | pullPolicy: IfNotPresent 13 | # Overrides the image tag whose default is the chart appVersion. 14 | tag: "" 15 | 16 | # This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 17 | imagePullSecrets: [] 18 | # This is to override the chart name. 19 | nameOverride: "" 20 | fullnameOverride: "" 21 | 22 | # This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ 23 | serviceAccount: 24 | # Specifies whether a service account should be created 25 | create: true 26 | # Automatically mount a ServiceAccount's API credentials? 27 | automount: true 28 | # Annotations to add to the service account 29 | annotations: {} 30 | # The name of the service account to use. 31 | # If not set and create is true, a name is generated using the fullname template 32 | name: "" 33 | 34 | # This is for setting Kubernetes Annotations to a Pod. 35 | # For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ 36 | podAnnotations: {} 37 | # This is for setting Kubernetes Labels to a Pod. 38 | # For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ 39 | podLabels: {} 40 | 41 | podSecurityContext: {} 42 | # fsGroup: 2000 43 | 44 | securityContext: {} 45 | # capabilities: 46 | # drop: 47 | # - ALL 48 | # readOnlyRootFilesystem: true 49 | # runAsNonRoot: true 50 | # runAsUser: 1000 51 | 52 | # This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/ 53 | service: 54 | # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types 55 | type: NodePort 56 | # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports 57 | port: 80 58 | 59 | # This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/ 60 | ingress: 61 | enabled: false 62 | className: "" 63 | annotations: {} 64 | # kubernetes.io/ingress.class: nginx 65 | # kubernetes.io/tls-acme: "true" 66 | hosts: 67 | - host: chart-example.local 68 | paths: 69 | - path: / 70 | pathType: ImplementationSpecific 71 | tls: [] 72 | # - secretName: chart-example-tls 73 | # hosts: 74 | # - chart-example.local 75 | 76 | resources: {} 77 | # We usually recommend not to specify default resources and to leave this as a conscious 78 | # choice for the user. This also increases chances charts run on environments with little 79 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 80 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 81 | # limits: 82 | # cpu: 100m 83 | # memory: 128Mi 84 | # requests: 85 | # cpu: 100m 86 | # memory: 128Mi 87 | 88 | # This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ 89 | livenessProbe: 90 | httpGet: 91 | path: / 92 | port: http 93 | readinessProbe: 94 | httpGet: 95 | path: / 96 | port: http 97 | 98 | # This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ 99 | autoscaling: 100 | enabled: false 101 | minReplicas: 1 102 | maxReplicas: 100 103 | targetCPUUtilizationPercentage: 80 104 | # targetMemoryUtilizationPercentage: 80 105 | 106 | # Additional volumes on the output Deployment definition. 107 | volumes: [] 108 | # - name: foo 109 | # secret: 110 | # secretName: mysecret 111 | # optional: false 112 | 113 | # Additional volumeMounts on the output Deployment definition. 114 | volumeMounts: [] 115 | # - name: foo 116 | # mountPath: "/etc/foo" 117 | # readOnly: true 118 | 119 | nodeSelector: {} 120 | 121 | tolerations: [] 122 | 123 | affinity: {} 124 | --------------------------------------------------------------------------------