├── learning ├── What-is-Pod-in-Kubernetes │ ├── livenessprobe.yaml │ ├── readinessprobe.yaml │ ├── deployment.yaml │ ├── pod.yaml │ └── README.md ├── Kubernetes-components-overview │ ├── service.yaml │ ├── my-pod.yaml │ ├── my-svc.yaml │ ├── job-example.yaml │ ├── my-nginx-rc.yaml │ ├── my-nginx-ds.yaml │ └── cronjob-example.yaml ├── Managing-ConfigMaps-and-Secrets │ ├── configmap-literal.yaml │ ├── pod-with-config.yaml │ └── README.md ├── kubernetes-for-everyone │ ├── service.yaml │ ├── pod.yaml │ ├── deployment.yaml │ ├── statefulset.yaml │ └── README.md ├── Network-Policies-in-K8s │ ├── default-deny.yaml │ ├── allow-frontend.yaml │ └── README.md ├── RBAC-Access-Control │ ├── role.yaml │ └── README.md ├── Deploy-DaemonSets-Service-in-Kubernetes │ ├── fluentd.yaml │ └── README.md ├── Deploying-an-Application-on-Kubernetes │ ├── service.yaml │ ├── deployment.yaml │ └── README.md └── Monitoring-and-Logging │ ├── fluentd-ds.yaml │ └── README.md ├── projects ├── 10-microservices-deployment-eks │ ├── sa.yaml │ ├── sec.yaml │ ├── bind.yaml │ ├── rol.yaml │ ├── Jenkinsfile │ └── README.md ├── Uber-Clone-DevSecOps │ ├── jenkinsfile │ └── README.md └── Deploying-Spring-Boot-K8S │ └── README.md ├── CKAD-exercises ├── README.md ├── i.crd.md ├── h.helm.md ├── CODE_OF_CONDUCT.md ├── b.multi_container_pods.md ├── e.observability.md ├── j.podman.md ├── f.services.md ├── g.state.md ├── a.core_concepts.md └── d.configuration.md └── README.md /learning/What-is-Pod-in-Kubernetes/livenessprobe.yaml: -------------------------------------------------------------------------------- 1 | livenessProbe: 2 | httpGet: 3 | path: hello-world/health 4 | port: http 5 | initialDelaySeconds: 120 -------------------------------------------------------------------------------- /learning/Kubernetes-components-overview/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: my-google-svc 5 | spec: 6 | type: ExternalName 7 | externalName: google.com -------------------------------------------------------------------------------- /learning/Managing-ConfigMaps-and-Secrets/configmap-literal.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: app-config 5 | data: 6 | APP_COLOR: blue 7 | APP_MODE: prod 8 | CACHE_TTL: "300" -------------------------------------------------------------------------------- /learning/What-is-Pod-in-Kubernetes/readinessprobe.yaml: -------------------------------------------------------------------------------- 1 | readinessProbe: 2 | httpGet: 3 | path: hello-world/health 4 | port: http 5 | initialDelaySeconds: 20 6 | periodSeconds: 15 7 | failureThreshold: 6 -------------------------------------------------------------------------------- /learning/kubernetes-for-everyone/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: my-service 5 | spec: 6 | selector: 7 | app: MyApp 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 9376 -------------------------------------------------------------------------------- /learning/Kubernetes-components-overview/my-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-pod 5 | labels: 6 | app: my-app 7 | type: front-app 8 | spec: 9 | containers: 10 | - name: nginx-container 11 | image: nginx -------------------------------------------------------------------------------- /learning/Network-Policies-in-K8s/default-deny.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: default-deny 5 | namespace: default 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Ingress 10 | - Egress -------------------------------------------------------------------------------- /projects/10-microservices-deployment-eks/sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: jenkins 5 | namespace: webapps 6 | labels: 7 | app: jenkins 8 | environment: dev # Optional: adjust based on your usage (dev/staging/prod) 9 | -------------------------------------------------------------------------------- /learning/Kubernetes-components-overview/my-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: my-svc 5 | spec: 6 | type: NodePort 7 | ports: 8 | - targetPort: 80 9 | port: 80 10 | nodePort: 30008 11 | selector: 12 | app: my-app 13 | type: front-app -------------------------------------------------------------------------------- /learning/kubernetes-for-everyone/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp-pod 5 | labels: 6 | app: myapp 7 | spec: 8 | containers: 9 | - name: myapp-container 10 | image: busybox 11 | command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600'] -------------------------------------------------------------------------------- /learning/RBAC-Access-Control/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | namespace: default 5 | name: pod-manager 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods"] 9 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 10 | - apiGroups: [""] 11 | resources: ["pods/log"] 12 | verbs: ["get"] -------------------------------------------------------------------------------- /projects/10-microservices-deployment-eks/sec.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: kubernetes.io/service-account-token 4 | metadata: 5 | name: mysecretname 6 | namespace: webapps # Optional: specify the namespace for the secret 7 | annotations: 8 | kubernetes.io/service-account.name: jenkins # Link to ServiceAccount 9 | labels: 10 | app: jenkins # Optional: add a label for better organization 11 | -------------------------------------------------------------------------------- /learning/Network-Policies-in-K8s/allow-frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-frontend 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app: frontend 9 | policyTypes: 10 | - Ingress 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | role: frontend 16 | ports: 17 | - protocol: TCP 18 | port: 80 -------------------------------------------------------------------------------- /learning/Kubernetes-components-overview/job-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: job-example 5 | spec: 6 | completions: 2 7 | parallelism: 2 8 | template: 9 | metadata: 10 | name: counter 11 | spec: 12 | containers: 13 | - name: counter 14 | image: ubuntu 15 | command: ["bash"] 16 | args: ["-c", "for i in {1..10}; do echo $i; done"] 17 | restartPolicy: Never -------------------------------------------------------------------------------- /learning/Deploy-DaemonSets-Service-in-Kubernetes/fluentd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd 5 | namespace: kube-system 6 | labels: 7 | k8s-app: fluentd 8 | spec: 9 | selector: 10 | matchLabels: 11 | name: fluentd 12 | template: 13 | metadata: 14 | labels: 15 | name: fluentd 16 | spec: 17 | containers: 18 | - name: fluentd 19 | image: fluentd:latest 20 | -------------------------------------------------------------------------------- /learning/kubernetes-for-everyone/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.7.9 20 | ports: 21 | - containerPort: 80 -------------------------------------------------------------------------------- /learning/Kubernetes-components-overview/my-nginx-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: my-nginx-rc 5 | labels: 6 | app: my-nginx-rc-app 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: my-nginx-rc-app 12 | template: # PODs template 13 | metadata: 14 | labels: 15 | app: my-nginx-rc-app 16 | spec: 17 | containers: 18 | - name: nginx-container 19 | image: nginx -------------------------------------------------------------------------------- /learning/Managing-ConfigMaps-and-Secrets/pod-with-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: app-pod 5 | spec: 6 | containers: 7 | - name: app-container 8 | image: nginx:1.21 9 | env: 10 | - name: APP_COLOR 11 | valueFrom: 12 | configMapKeyRef: 13 | name: app-config 14 | key: APP_COLOR 15 | - name: APP_MODE 16 | valueFrom: 17 | configMapKeyRef: 18 | name: app-config 19 | key: APP_MODE -------------------------------------------------------------------------------- /learning/Kubernetes-components-overview/my-nginx-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: my-nginx-ds 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: my-nginx-pod 9 | template: 10 | metadata: 11 | labels: 12 | app: my-nginx-pod 13 | spec: 14 | tolerations: 15 | - effect: NoSchedule 16 | operator: Exists 17 | containers: 18 | - name: nginx-container 19 | image: nginx 20 | ports: 21 | - containerPort: 80 -------------------------------------------------------------------------------- /learning/Deploying-an-Application-on-Kubernetes/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: notes-app-service # Service name 5 | labels: 6 | app: notes-app 7 | app.kubernetes.io/name: notes-app 8 | spec: 9 | type: LoadBalancer # Expose the service externally 10 | selector: 11 | app: notes-app # Selector to match pods with the same label 12 | ports: 13 | - port: 80 # Port exposed externally 14 | targetPort: 3000 # Port on the container 15 | protocol: TCP 16 | -------------------------------------------------------------------------------- /learning/Kubernetes-components-overview/cronjob-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: cronjob-example 5 | spec: 6 | schedule: "*/1 * * * *" 7 | jobTemplate: 8 | spec: 9 | completions: 2 10 | parallelism: 2 11 | template: 12 | metadata: 13 | name: counter 14 | spec: 15 | containers: 16 | - name: counter 17 | image: ubuntu 18 | command: ["bash"] 19 | args: ["-c", "for i in {1..10}; do echo $i; done"] 20 | restartPolicy: Never -------------------------------------------------------------------------------- /projects/10-microservices-deployment-eks/bind.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: app-rolebinding 5 | namespace: webapps # Target namespace 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io # API group for the Role 8 | kind: Role # Reference to a Role (not ClusterRole) 9 | name: app-role # Name of the Role being bound 10 | subjects: 11 | - kind: ServiceAccount # Subject type 12 | name: jenkins # ServiceAccount name 13 | namespace: webapps # Namespace of the ServiceAccount 14 | -------------------------------------------------------------------------------- /learning/What-is-Pod-in-Kubernetes/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-mysql 5 | spec: 6 | volumes: 7 | - name: data 8 | emptydir: {} 9 | containers: 10 | - name: mysql 11 | image: mysql 12 | env: 13 | - name: MYSQL_USER 14 | value: root 15 | - name: MYSQL_ALLOW_EMPTY_PASSWORD 16 | value: 'yes' 17 | - name: MYSQL_DATABASE 18 | value: 'some-app' 19 | ports: 20 | - containerPort: 3306 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: hello-mysql 26 | spec: 27 | selector: 28 | app: hello-mysql 29 | ports: 30 | - port: 3306 -------------------------------------------------------------------------------- /learning/What-is-Pod-in-Kubernetes/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hello-world 5 | labels: 6 | app: hello 7 | spec: 8 | containers: 9 | - name: hello-world-container 10 | image: busybox 11 | command: ['sh', '-c', 'echo Hello World! && sleep 3600'] 12 | initContainers: 13 | - name: mysql-container 14 | image: busybox 15 | command: 16 | - '/bin/sh' 17 | - '-c' 18 | - | 19 | while true 20 | do 21 | rt=$(nc -z -w 1 hello-mysql 3306) 22 | if [ $? -eq 0 ]; then 23 | echo "DB is UP" 24 | break 25 | fi 26 | echo "DB is not yet reachable;sleep for 10s before retry" 27 | sleep 10 28 | done -------------------------------------------------------------------------------- /learning/kubernetes-for-everyone/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: web 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx # has to match .spec.template.metadata.labels 9 | serviceName: "nginx" 10 | replicas: 3 # by default is 1 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx # has to match .spec.selector.matchLabels 15 | spec: 16 | terminationGracePeriodSeconds: 10 17 | containers: 18 | - name: nginx 19 | image: k8s.gcr.io/nginx-slim:0.8 20 | ports: 21 | - containerPort: 80 22 | name: web 23 | volumeMounts: 24 | - name: www 25 | mountPath: /usr/share/nginx/html 26 | volumeClaimTemplates: 27 | - metadata: 28 | name: www 29 | spec: 30 | accessModes: [ "ReadWriteOnce" ] 31 | storageClassName: "my-storage-class" 32 | resources: 33 | requests: 34 | storage: 1Gi -------------------------------------------------------------------------------- /projects/10-microservices-deployment-eks/rol.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: app-role 5 | namespace: webapps 6 | rules: 7 | - apiGroups: 8 | - "" # Core API group 9 | - apps 10 | - autoscaling 11 | - batch 12 | - extensions 13 | - policy 14 | - rbac.authorization.k8s.io 15 | resources: 16 | - pods 17 | - configmaps 18 | - deployments 19 | - daemonsets 20 | - componentstatuses 21 | - events 22 | - endpoints 23 | - horizontalpodautoscalers 24 | - ingress 25 | - jobs 26 | - limitranges 27 | - namespaces 28 | - nodes 29 | - persistentvolumes 30 | - persistentvolumeclaims 31 | - resourcequotas 32 | - replicasets 33 | - replicationcontrollers 34 | - serviceaccounts 35 | - services 36 | verbs: 37 | - get 38 | - list 39 | - watch 40 | - create 41 | - update 42 | - patch 43 | - delete 44 | -------------------------------------------------------------------------------- /learning/Deploying-an-Application-on-Kubernetes/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: notes-app-deployment 5 | labels: 6 | app: notes-app 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: notes-app 12 | template: 13 | metadata: 14 | labels: 15 | app: notes-app 16 | spec: 17 | containers: 18 | - name: notes-app 19 | image: pavansa/notes-app 20 | imagePullPolicy: IfNotPresent 21 | ports: 22 | - containerPort: 3000 23 | resources: 24 | requests: 25 | cpu: "100m" 26 | memory: "128Mi" 27 | limits: 28 | cpu: "250m" 29 | memory: "256Mi" 30 | env: 31 | - name: NODE_ENV 32 | value: "production" 33 | readinessProbe: 34 | httpGet: 35 | path: / 36 | port: 3000 37 | initialDelaySeconds: 5 38 | periodSeconds: 10 39 | livenessProbe: 40 | httpGet: 41 | path: / 42 | port: 3000 43 | initialDelaySeconds: 15 44 | periodSeconds: 20 45 | -------------------------------------------------------------------------------- /learning/Monitoring-and-Logging/fluentd-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd 5 | namespace: logging 6 | labels: 7 | app: fluentd 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: fluentd 12 | template: 13 | metadata: 14 | labels: 15 | app: fluentd 16 | spec: 17 | tolerations: 18 | - key: node-role.kubernetes.io/master 19 | effect: NoSchedule 20 | containers: 21 | - name: fluentd 22 | image: fluent/fluentd-kubernetes-daemonset:v1.14-debian-elasticsearch7-1 23 | env: 24 | - name: FLUENT_ELASTICSEARCH_HOST 25 | value: "elasticsearch" 26 | - name: FLUENT_ELASTICSEARCH_PORT 27 | value: "9200" 28 | volumeMounts: 29 | - name: varlog 30 | mountPath: /var/log 31 | - name: varlibdockercontainers 32 | mountPath: /var/lib/docker/containers 33 | readOnly: true 34 | volumes: 35 | - name: varlog 36 | hostPath: 37 | path: /var/log 38 | - name: varlibdockercontainers 39 | hostPath: 40 | path: /var/lib/docker/containers -------------------------------------------------------------------------------- /CKAD-exercises/README.md: -------------------------------------------------------------------------------- 1 | [![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE) 2 | [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com) 3 | [![unofficial Google Analytics for GitHub](https://gaforgithub.azurewebsites.net/api?repo=CKAD-exercises)](https://github.com/dgkanatsios/gaforgithub) 4 | 5 | # CKAD Exercises 6 | 7 | A set of exercises that helped me prepare for the [Certified Kubernetes Application Developer](https://www.cncf.io/certification/ckad/) exam, offered by the Cloud Native Computing Foundation, organized by curriculum domain. 8 | They may as well serve as learning and practicing with Kubernetes. 9 | 10 | Make a mental note of the breadcrumb at the start of the excercise section, to quickly locate the relevant document in kubernetes.io. 11 | It is recommended that you read the official documents before attempting exercises below it. 12 | During the exam, you are only allowed to refer to official documentation from a browser window within the exam VM. 13 | A Quick Reference box will contain helpful links for each exam exercise as well. 14 | 15 | ## Contents 16 | 17 | - [Core Concepts - 13%](a.core_concepts.md) 18 | - [Multi-container pods - 10%](b.multi_container_pods.md) 19 | - [Pod design - 20%](c.pod_design.md) 20 | - [Configuration - 18%](d.configuration.md) 21 | - [Observability - 18%](e.observability.md) 22 | - [Services and networking - 13%](f.services.md) 23 | - [State persistence - 8%](g.state.md) 24 | - [helm](h.helm.md) 25 | - [Custom Resource Definitions](i.crd.md) 26 | 27 | > If your work is related to multiplayer game servers, checkout out [thundernetes, a brand new project to host game servers on Kubernetes](https://github.com/PlayFab/thundernetes)! 28 | 29 | Feel free to PR and edit/add questions and solutions, but please stick to the existing format. 30 | 31 | -------------------------------------------------------------------------------- /CKAD-exercises/i.crd.md: -------------------------------------------------------------------------------- 1 | # Extend the Kubernetes API with CRD (CustomResourceDefinition) 2 | 3 | - Note: CRD is part of the new CKAD syllabus. Here are a few examples of installing custom resource into the Kubernetes API by creating a CRD. 4 | 5 | ## CRD in K8s 6 | 7 | ### Create a CustomResourceDefinition manifest file for an Operator with the following specifications : 8 | * *Name* : `operators.stable.example.com` 9 | * *Group* : `stable.example.com` 10 | * *Schema*: `` 11 | * *Scope*: `Namespaced` 12 | * *Names*: `` 13 | * *Kind*: `Operator` 14 | 15 |
show 16 |

17 | 18 | ```yaml 19 | apiVersion: apiextensions.k8s.io/v1 20 | kind: CustomResourceDefinition 21 | metadata: 22 | # name must match the spec fields below, and be in the form: . 23 | name: operators.stable.example.com 24 | spec: 25 | group: stable.example.com 26 | versions: 27 | - name: v1 28 | served: true 29 | # One and only one version must be marked as the storage version. 30 | storage: true 31 | schema: 32 | openAPIV3Schema: 33 | type: object 34 | properties: 35 | spec: 36 | type: object 37 | properties: 38 | email: 39 | type: string 40 | name: 41 | type: string 42 | age: 43 | type: integer 44 | scope: Namespaced 45 | names: 46 | plural: operators 47 | singular: operator 48 | # kind is normally the CamelCased singular type. Your resource manifests use this. 49 | kind: Operator 50 | shortNames: 51 | - op 52 | ``` 53 | 54 |

55 |
56 | 57 | ### Create the CRD resource in the K8S API 58 | 59 |
show 60 |

61 | 62 | ```bash 63 | kubectl apply -f operator-crd.yml 64 | ``` 65 | 66 |

67 |
68 | 69 | ### Create custom object from the CRD 70 | 71 | * *Name* : `operator-sample` 72 | * *Kind*: `Operator` 73 | * Spec: 74 | * email: `operator-sample@stable.example.com` 75 | * name: `operator sample` 76 | * age: `30` 77 | 78 |
show 79 |

80 | 81 | ```yaml 82 | apiVersion: stable.example.com/v1 83 | kind: Operator 84 | metadata: 85 | name: operator-sample 86 | spec: 87 | email: operator-sample@stable.example.com 88 | name: "operator sample" 89 | age: 30 90 | ``` 91 | 92 | ```bash 93 | kubectl apply -f operator.yml 94 | ``` 95 | 96 |

97 |
98 | 99 | ### Listing operator 100 | 101 |
show 102 |

103 | 104 | Use singular, plural and short forms 105 | 106 | ```bash 107 | kubectl get operators 108 | or 109 | kubectl get operator 110 | or 111 | kubectl get op 112 | ``` 113 | 114 |

115 |
116 | -------------------------------------------------------------------------------- /CKAD-exercises/h.helm.md: -------------------------------------------------------------------------------- 1 | # Managing Kubernetes with Helm 2 | 3 | - Note: Helm is part of the new CKAD syllabus. Here are a few examples of using Helm to manage Kubernetes. 4 | 5 | ## Helm in K8s 6 | 7 | ### Creating a basic Helm chart 8 | 9 |
show 10 |

11 | 12 | ```bash 13 | helm create chart-test ## this would create a helm 14 | ``` 15 | 16 |

17 |
18 | 19 | ### Running a Helm chart 20 | 21 |
show 22 |

23 | 24 | ```bash 25 | helm install -f myvalues.yaml myredis ./redis 26 | ``` 27 | 28 |

29 |
30 | 31 | ### Find pending Helm deployments on all namespaces 32 | 33 |
show 34 |

35 | 36 | ```bash 37 | helm list --pending -A 38 | ``` 39 | 40 |

41 |
42 | 43 | ### Uninstall a Helm release 44 | 45 |
show 46 |

47 | 48 | ```bash 49 | helm uninstall -n namespace release_name 50 | ``` 51 | 52 |

53 |
54 | 55 | ### Upgrading a Helm chart 56 | 57 |
show 58 |

59 | 60 | ```bash 61 | helm upgrade -f myvalues.yaml -f override.yaml redis ./redis 62 | ``` 63 | 64 |

65 |
66 | 67 | ### Using Helm repo 68 | 69 |
show 70 |

71 | 72 | Add, list, remove, update and index chart repos 73 | 74 | ```bash 75 | helm repo add [NAME] [URL] [flags] 76 | 77 | helm repo list / helm repo ls 78 | 79 | helm repo remove [REPO1] [flags] 80 | 81 | helm repo update / helm repo up 82 | 83 | helm repo update [REPO1] [flags] 84 | 85 | helm repo index [DIR] [flags] 86 | ``` 87 | 88 |

89 |
90 | 91 | ### Download a Helm chart from a repository 92 | 93 |
show 94 |

95 | 96 | ```bash 97 | helm pull [chart URL | repo/chartname] [...] [flags] ## this would download a helm, not install 98 | helm pull --untar [rep/chartname] # untar the chart after downloading it 99 | ``` 100 | 101 |

102 |
103 | 104 | ### Add the Bitnami repo at https://charts.bitnami.com/bitnami to Helm 105 |
show 106 |

107 | 108 | ```bash 109 | helm repo add bitnami https://charts.bitnami.com/bitnami 110 | ``` 111 | 112 |

113 |
114 | 115 | ### Write the contents of the values.yaml file of the `bitnami/node` chart to standard output 116 |
show 117 |

118 | 119 | ```bash 120 | helm show values bitnami/node 121 | ``` 122 | 123 |

124 |
125 | 126 | ### Install the `bitnami/node` chart setting the number of replicas to 5 127 |
show 128 |

129 | 130 | To achieve this, we need two key pieces of information: 131 | - The name of the attribute in values.yaml which controls replica count 132 | - A simple way to set the value of this attribute during installation 133 | 134 | To identify the name of the attribute in the values.yaml file, we could get all the values, as in the previous task, and then grep to find attributes matching the pattern `replica` 135 | ```bash 136 | helm show values bitnami/node | grep -i replica 137 | ``` 138 | which returns 139 | ```bash 140 | ## @param replicaCount Specify the number of replicas for the application 141 | replicaCount: 1 142 | ``` 143 | 144 | We can use the `--set` argument during installation to override attribute values. Hence, to set the replica count to 5, we need to run 145 | ```bash 146 | helm install mynode bitnami/node --set replicaCount=5 147 | ``` 148 | 149 |

150 |
151 | 152 | 153 | -------------------------------------------------------------------------------- /CKAD-exercises/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at dgkanatsios@outlook.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /projects/10-microservices-deployment-eks/Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | 4 | environment { 5 | SCANNER_HOME = tool 'sonar-scanner' 6 | } 7 | 8 | stages { 9 | stage('Git Checkout') { 10 | steps { 11 | git branch: 'latest', url: 'https://github.com/SushantOps/10-Tier-MicroService-Appliction.git' 12 | } 13 | } 14 | 15 | stage('SonarQube Analysis') { 16 | steps { 17 | withSonarQubeEnv('sonar') { 18 | sh '''$SCANNER_HOME/bin/sonar-scanner \ 19 | -Dsonar.projectKey=10-Tier \ 20 | -Dsonar.projectName=10-Tier \ 21 | -Dsonar.java.binaries=.''' 22 | } 23 | } 24 | } 25 | 26 | stage('Build & Push Docker Images') { 27 | parallel { 28 | stage('adservice') { 29 | steps { buildAndPush('adservice') } 30 | } 31 | stage('cartservice') { 32 | steps { buildAndPush('cartservice/src') } 33 | } 34 | stage('checkoutservice') { 35 | steps { buildAndPush('checkoutservice') } 36 | } 37 | stage('currencyservice') { 38 | steps { buildAndPush('currencyservice') } 39 | } 40 | stage('emailservice') { 41 | steps { buildAndPush('emailservice') } 42 | } 43 | stage('frontend') { 44 | steps { buildAndPush('frontend') } 45 | } 46 | stage('loadgenerator') { 47 | steps { buildAndPush('loadgenerator') } 48 | } 49 | stage('paymentservice') { 50 | steps { buildAndPush('paymentservice') } 51 | } 52 | stage('productcatalogservice') { 53 | steps { buildAndPush('productcatalogservice') } 54 | } 55 | stage('recommendationservice') { 56 | steps { buildAndPush('recommendationservice') } 57 | } 58 | stage('shippingservice') { 59 | steps { buildAndPush('shippingservice') } 60 | } 61 | } 62 | } 63 | 64 | stage('K8s Deploy') { 65 | steps { 66 | withKubeConfig( 67 | caCertificate: '', 68 | clusterName: 'my-eks2', 69 | contextName: '', 70 | credentialsId: 'k8-token', 71 | namespace: 'webapps', 72 | restrictKubeConfigAccess: false, 73 | serverUrl: 'https://EBCE08CF45C3AA5A574E126370E5D4FC.gr7.ap-south-1.eks.amazonaws.com' 74 | ) { 75 | sh 'kubectl apply -f deployment-service.yml' 76 | sh 'kubectl get pods' 77 | sh 'kubectl get svc' 78 | } 79 | } 80 | } 81 | } 82 | 83 | // Reusable build & push function 84 | // Adjust image name prefix if needed 85 | post { 86 | always { 87 | echo "Pipeline execution complete!" 88 | } 89 | } 90 | } 91 | 92 | def buildAndPush(String servicePath) { 93 | script { 94 | def imageName = "sushantkapare1717/${servicePath.tokenize('/')[-1]}:latest" 95 | withDockerRegistry(credentialsId: 'docker-cred', toolName: 'docker') { 96 | dir("/var/lib/jenkins/workspace/10-Tier/src/${servicePath}") { 97 | sh "docker build -t ${imageName} ." 98 | sh "docker push ${imageName}" 99 | sh "docker rmi ${imageName}" 100 | } 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /projects/Uber-Clone-DevSecOps/jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | 4 | tools { 5 | jdk 'jdk17' 6 | nodejs 'node16' 7 | } 8 | 9 | environment { 10 | SCANNER_HOME = tool 'sonar-scanner' 11 | DOCKER_IMAGE = 'NotHarshhaa/uber:latest' 12 | SONAR_PROJECT_NAME = 'Uber' 13 | SONAR_PROJECT_KEY = 'Uber' 14 | } 15 | 16 | options { 17 | timestamps() // Adds timestamps to logs for better debugging 18 | } 19 | 20 | stages { 21 | stage('Clean Workspace') { 22 | steps { 23 | cleanWs() 24 | } 25 | } 26 | 27 | stage('Checkout Code') { 28 | steps { 29 | git branch: 'master', url: 'https://github.com/NotHarshhaa/uber-clone.git' 30 | } 31 | } 32 | 33 | stage('SonarQube Analysis') { 34 | steps { 35 | withSonarQubeEnv('sonar-server') { 36 | sh ''' 37 | $SCANNER_HOME/bin/sonar-scanner \ 38 | -Dsonar.projectName=${SONAR_PROJECT_NAME} \ 39 | -Dsonar.projectKey=${SONAR_PROJECT_KEY} 40 | ''' 41 | } 42 | } 43 | } 44 | 45 | stage('Quality Gate Check') { 46 | steps { 47 | script { 48 | def qualityGate = waitForQualityGate abortPipeline: true, credentialsId: 'Sonar-token' 49 | if (qualityGate.status != 'OK') { 50 | error "❌ SonarQube Quality Gate failed!" 51 | } 52 | } 53 | } 54 | } 55 | 56 | stage('Install Dependencies') { 57 | steps { 58 | sh 'npm install' 59 | } 60 | } 61 | 62 | stage('OWASP Dependency Check') { 63 | steps { 64 | dependencyCheck additionalArguments: '--scan ./ --disableYarnAudit --disableNodeAudit', odcInstallation: 'DP-Check' 65 | dependencyCheckPublisher pattern: '**/dependency-check-report.xml' 66 | } 67 | } 68 | 69 | stage('Trivy File System Scan') { 70 | steps { 71 | sh 'trivy fs . | tee trivy-fs-report.txt' 72 | } 73 | } 74 | 75 | stage('Docker Build & Push') { 76 | steps { 77 | script { 78 | withDockerRegistry(credentialsId: 'docker', toolName: 'docker') { 79 | sh """ 80 | echo "🔨 Building Docker Image..." 81 | docker build -t uber . 82 | docker tag uber ${DOCKER_IMAGE} 83 | 84 | echo "🚀 Pushing Docker Image to Registry..." 85 | docker push ${DOCKER_IMAGE} 86 | """ 87 | } 88 | } 89 | } 90 | } 91 | 92 | stage('Trivy Image Scan') { 93 | steps { 94 | sh 'trivy image ${DOCKER_IMAGE} | tee trivy-image-report.txt' 95 | } 96 | } 97 | 98 | stage('Deploy Docker Container') { 99 | steps { 100 | sh """ 101 | echo "🛠️ Stopping existing container (if running)..." 102 | docker stop uber || true 103 | docker rm uber || true 104 | 105 | echo "🚀 Running new container..." 106 | docker run -d --name uber -p 3000:3000 ${DOCKER_IMAGE} 107 | """ 108 | } 109 | } 110 | } 111 | 112 | post { 113 | success { 114 | echo "✅ Build and Deployment Completed Successfully!" 115 | } 116 | failure { 117 | echo "❌ Build Failed! Check logs for more details." 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /CKAD-exercises/b.multi_container_pods.md: -------------------------------------------------------------------------------- 1 | ![](https://gaforgithub.azurewebsites.net/api?repo=CKAD-exercises/multi_container&empty) 2 | # Multi-container Pods (10%) 3 | 4 | ### Create a Pod with two containers, both with image busybox and command "echo hello; sleep 3600". Connect to the second container and run 'ls' 5 | 6 |
show 7 |

8 | 9 | Easiest way to do it is create a pod with a single container and save its definition in a YAML file: 10 | 11 | ```bash 12 | kubectl run busybox --image=busybox --restart=Never -o yaml --dry-run=client -- /bin/sh -c 'echo hello;sleep 3600' > pod.yaml 13 | vi pod.yaml 14 | ``` 15 | 16 | Copy/paste the container related values, so your final YAML should contain the following two containers (make sure those containers have a different name): 17 | 18 | ```YAML 19 | containers: 20 | - args: 21 | - /bin/sh 22 | - -c 23 | - echo hello;sleep 3600 24 | image: busybox 25 | imagePullPolicy: IfNotPresent 26 | name: busybox 27 | resources: {} 28 | - args: 29 | - /bin/sh 30 | - -c 31 | - echo hello;sleep 3600 32 | image: busybox 33 | name: busybox2 34 | ``` 35 | 36 | ```bash 37 | kubectl create -f pod.yaml 38 | # Connect to the busybox2 container within the pod 39 | kubectl exec -it busybox -c busybox2 -- /bin/sh 40 | ls 41 | exit 42 | 43 | # or you can do the above with just an one-liner 44 | kubectl exec -it busybox -c busybox2 -- ls 45 | 46 | # you can do some cleanup 47 | kubectl delete po busybox 48 | ``` 49 | 50 |

51 |
52 | 53 | ### Create a pod with an nginx container exposed on port 80. Add a busybox init container which downloads a page using "wget -O /work-dir/index.html http://neverssl.com/online". Make a volume of type emptyDir and mount it in both containers. For the nginx container, mount it on "/usr/share/nginx/html" and for the initcontainer, mount it on "/work-dir". When done, get the IP of the created pod and create a busybox pod and run "wget -O- IP" 54 | 55 |
show 56 |

57 | 58 | Easiest way to do it is create a pod with a single container and save its definition in a YAML file: 59 | 60 | ```bash 61 | kubectl run box --image=nginx --restart=Never --port=80 --dry-run=client -o yaml > pod-init.yaml 62 | ``` 63 | 64 | Copy/paste the container related values, so your final YAML should contain the volume and the initContainer: 65 | 66 | Volume: 67 | 68 | ```YAML 69 | containers: 70 | - image: nginx 71 | ... 72 | volumeMounts: 73 | - name: vol 74 | mountPath: /usr/share/nginx/html 75 | volumes: 76 | - name: vol 77 | emptyDir: {} 78 | ``` 79 | 80 | initContainer: 81 | 82 | ```YAML 83 | ... 84 | initContainers: 85 | - args: 86 | - /bin/sh 87 | - -c 88 | - "wget -O /work-dir/index.html http://neverssl.com/online" 89 | image: busybox 90 | name: box 91 | volumeMounts: 92 | - name: vol 93 | mountPath: /work-dir 94 | ``` 95 | 96 | In total you get: 97 | 98 | ```YAML 99 | 100 | apiVersion: v1 101 | kind: Pod 102 | metadata: 103 | labels: 104 | run: box 105 | name: box 106 | spec: 107 | initContainers: 108 | - args: 109 | - /bin/sh 110 | - -c 111 | - "wget -O /work-dir/index.html http://neverssl.com/online" 112 | image: busybox 113 | name: box 114 | volumeMounts: 115 | - name: vol 116 | mountPath: /work-dir 117 | containers: 118 | - image: nginx 119 | name: nginx 120 | ports: 121 | - containerPort: 80 122 | volumeMounts: 123 | - name: vol 124 | mountPath: /usr/share/nginx/html 125 | volumes: 126 | - name: vol 127 | emptyDir: {} 128 | ``` 129 | 130 | ```bash 131 | # Apply pod 132 | kubectl apply -f pod-init.yaml 133 | 134 | # Get IP 135 | kubectl get po -o wide 136 | 137 | # Execute wget 138 | kubectl run box-test --image=busybox --restart=Never -it --rm -- /bin/sh -c "wget -O- $(kubectl get pod box -o jsonpath='{.status.podIP}')" 139 | 140 | # you can do some cleanup 141 | kubectl delete po box 142 | ``` 143 | 144 |

145 |
146 | 147 | -------------------------------------------------------------------------------- /learning/Deploy-DaemonSets-Service-in-Kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # How to deploy DaemonSets Service in Kubernetes (K8s)? 2 | 3 | ![](https://miro.medium.com/v2/resize:fit:736/1*2iUgGBUrG4EHz3VbJgAQWA.jpeg) 4 | 5 | **DaemonSets in Kubernetes Cluster** 6 | 7 | Like other controllers, DaemonSets manage groups of replicated Pods. 8 | 9 | However, DaemonSet ensures that all or selected Worker Nodes run a copy of a Pod (one-Pod-per-node). 10 | 11 | As you add nodes, DaemonSets automatically add Pods to the new nodes. As the nodes are removed from the cluster, those Pods are garbage collected. 12 | 13 | Here is the manifest of DaemonSet: 14 | 15 | ```yaml 16 | apiVersion: apps/v1 17 | kind: DaemonSet 18 | metadata: 19 | name: fluentd 20 | namespace: kube-system 21 | labels: 22 | k8s-app: fluentd 23 | spec: 24 | selector: 25 | matchLabels: 26 | name: fluentd 27 | template: 28 | metadata: 29 | labels: 30 | name: fluentd 31 | spec: 32 | containers: 33 | - name: fluentd 34 | image: fluentd:latest 35 | ``` 36 | 37 | **Create a daemonset:** 38 | 39 | ```bash 40 | kubectl create -f daemonset.yamldaemonset.apps "fluentd" created 41 | ``` 42 | 43 | **Check the pod running:** 44 | 45 | ```bash 46 | kubectl get pods -n kube-systemNAME READY STATUS RESTARTS AGE 47 | fluentd-7svlj 1/1 Running 0 58s 48 | fluentd-kwm4x 1/1 Running 0 58s 49 | fluentd-q64wf 1/1 Running 0 58s 50 | ``` 51 | 52 | **Check no. of nodes:** 53 | 54 | ```bash 55 | kubectl get nodes 56 | NAME STATUS ROLES AGE VERSION 57 | gke-cluster-1-default-pool-a6a57f2a-77wb Ready < none > 6h v1.11.6-gke.2 58 | gke-cluster-1-default-pool-a6a57f2a-xkgz Ready < none > 6h v1.11.6-gke.2 59 | gke-cluster-1-default-pool-a6a57f2a-z2bx Ready < none > 6h v1.11.6-gke.2 60 | ``` 61 | 62 | **Display your daemon sets:** 63 | 64 | ```bash 65 | kubectl get daemonsets 66 | NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE AGE 67 | fluentd 3 3 3 3 3 5m 68 | ``` 69 | 70 | **Get details of a daemonset:** 71 | 72 | ```bash 73 | kubectl describe daemonset fluentd 74 | ``` 75 | 76 | **Edit a daemonset:** 77 | 78 | ```bash 79 | kubectl edit daemonset fluentd 80 | ``` 81 | 82 | **Delete a daemonset:** 83 | 84 | ```bash 85 | kubectl delete daemonset fluentd 86 | daemonset.apps “fluentd” deleted 87 | ``` 88 | 89 | **Some uses of a DaemonSet are:** 90 | 91 | 1. running a cluster storage daemon, such as glusterd, ceph, on each node. 92 | 93 | 2. running a logs collection daemon on every node, such as fluentd or logstash. 94 | 95 | 3. running a node monitoring daemon on every node, such as Prometheus Node Exporter, AppDynamics Agent, Datadog agent, New Relic agent, etc. 96 | 97 | 98 | **Running Pods on Only Some Nodes** 99 | 100 | If you specify a .spec.template.spec.mode selector, then the DaemonSet controller will create Pods on nodes which match that node selector. 101 | 102 | ```yaml 103 | spec: 104 | nodeSelector: 105 | environment: prod 106 | ``` 107 | 108 | Now, If you mention a .spec.template.spec.affinity, then DaemonSet controller will be creating the Pods on nodes which will be matching the node affinity. 109 | 110 | ```yaml 111 | spec: 112 | tolerations: 113 | - key: node-role.kubernetes.io/master 114 | effect: NoSchedule 115 | ``` 116 | 117 | In case you will not specify anything, then the controller will be creating Pods on every node of the cluster. 118 | 119 | **Additional Resources** 120 | 121 | For more information about DaemonSets, check out these resources: 122 | 123 | - [Kubernetes DaemonSet Documentation](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) - Official Kubernetes documentation explaining DaemonSets in detail 124 | - [Understanding Kubernetes DaemonSets](https://www.bmc.com/blogs/kubernetes-daemonset/) - A comprehensive guide covering DaemonSet use cases and implementation 125 | 126 | ![DaemonSet Architecture Diagram](https://miro.medium.com/v2/resize:fit:736/0*cRMUa8dBX1dHpj5L) 127 | 128 | ### If you like this article, please share with others. ❤️ 129 | -------------------------------------------------------------------------------- /learning/Managing-ConfigMaps-and-Secrets/README.md: -------------------------------------------------------------------------------- 1 | # Managing ConfigMaps and Secrets in Kubernetes 2 | 3 | This guide provides a comprehensive walkthrough of ConfigMaps and Secrets in Kubernetes, essential components for managing application configuration and sensitive data. 4 | 5 | ## What are ConfigMaps and Secrets? 6 | 7 | ### ConfigMaps 8 | ConfigMaps are API objects that store non-confidential data in key-value pairs. They allow you to decouple environment-specific configuration from your container images, making your applications more portable. 9 | 10 | Use cases for ConfigMaps: 11 | - Application configuration files 12 | - Command-line arguments 13 | - Environment variables 14 | - Port numbers 15 | - Feature flags 16 | 17 | ### Secrets 18 | Secrets are similar to ConfigMaps but are specifically designed to hold sensitive information such as: 19 | - API tokens 20 | - TLS certificates 21 | - SSH keys 22 | - Database credentials 23 | 24 | ## What you'll learn 25 | - Creating and managing ConfigMaps 26 | - Using literal values 27 | - From configuration files 28 | - From directories 29 | - Injecting configuration data into Pods 30 | - As environment variables 31 | - As configuration files 32 | - As command-line arguments 33 | - Working with Secrets 34 | - Creating encrypted secrets 35 | - Mounting secrets in pods 36 | - Automatic secret rotation 37 | - Best practices for configuration management 38 | - Versioning configurations 39 | - Namespace isolation 40 | - Access control 41 | 42 | ## Prerequisites 43 | - A running Kubernetes cluster (minikube, kind, or cloud provider) 44 | - kubectl CLI tool installed and configured 45 | - Basic understanding of Kubernetes concepts (Pods, Deployments) 46 | - Text editor for YAML file manipulation 47 | 48 | ## Examples included 49 | 50 | ### 1. ConfigMap from literal values 51 | ```bash 52 | # Create ConfigMap from literal values 53 | kubectl create configmap app-config \ 54 | --from-literal=APP_COLOR=blue \ 55 | --from-literal=APP_MODE=prod 56 | ``` 57 | 58 | ### 2. ConfigMap from files 59 | ```bash 60 | # Create ConfigMap from a configuration file 61 | kubectl create configmap app-config \ 62 | --from-file=app-config.properties 63 | ``` 64 | 65 | ### 3. Secret management 66 | ```bash 67 | # Create a secret for database credentials 68 | kubectl create secret generic db-secret \ 69 | --from-literal=username=myuser \ 70 | --from-literal=password=mypassword 71 | ``` 72 | 73 | ### 4. Environment variables injection 74 | The `pod-with-config.yaml` demonstrates how to inject ConfigMap values as environment variables. 75 | 76 | ### 5. Volume mounts for configuration 77 | ```yaml 78 | volumes: 79 | - name: config-volume 80 | configMap: 81 | name: app-config 82 | ``` 83 | 84 | ## Best Practices 85 | 86 | 1. **Namespace Isolation** 87 | - Use separate ConfigMaps for different environments 88 | - Keep configurations namespace-scoped 89 | - Use RBAC to control access 90 | 91 | 2. **Version Control** 92 | - Store ConfigMap definitions in git 93 | - Use labels for versioning 94 | - Implement change tracking 95 | 96 | 3. **Security** 97 | - Never store sensitive data in ConfigMaps 98 | - Use Secrets for confidential information 99 | - Implement proper RBAC policies 100 | 101 | 4. **Resource Management** 102 | - Keep ConfigMaps small 103 | - Use multiple ConfigMaps for different aspects 104 | - Consider using a configuration management tool 105 | 106 | ## Getting Started 107 | 108 | 1. Apply the ConfigMap manifests: 109 | ```bash 110 | # Create the literal ConfigMap 111 | kubectl apply -f configmap-literal.yaml 112 | 113 | # Create the file-based ConfigMap 114 | kubectl apply -f configmap-file.yaml 115 | 116 | # Create the secret 117 | kubectl apply -f secret.yaml 118 | 119 | # Deploy the pod with configurations 120 | kubectl apply -f pod-with-config.yaml 121 | ``` 122 | 123 | 2. Verify the configuration: 124 | ```bash 125 | # Check ConfigMap creation 126 | kubectl get configmaps 127 | 128 | # Verify Pod environment variables 129 | kubectl exec pod-name -- env 130 | 131 | # View mounted configurations 132 | kubectl exec pod-name -- ls /etc/config 133 | ``` 134 | 135 | ## Troubleshooting 136 | 137 | Common issues and solutions: 138 | 139 | 1. **ConfigMap Not Found** 140 | - Verify ConfigMap exists in the correct namespace 141 | - Check Pod and ConfigMap are in the same namespace 142 | - Validate RBAC permissions 143 | 144 | 2. **Configuration Not Updated** 145 | - Remember ConfigMap updates aren't automatically reflected 146 | - Restart Pod to pick up new configurations 147 | - Consider using a configuration reloader 148 | 149 | 3. **Volume Mount Issues** 150 | - Check volume mount paths 151 | - Verify file permissions 152 | - Validate ConfigMap keys match expected files 153 | 154 | ## Additional Resources 155 | 156 | - [Official Kubernetes ConfigMap Documentation](https://kubernetes.io/docs/concepts/configuration/configmap/) 157 | - [Kubernetes Secrets Documentation](https://kubernetes.io/docs/concepts/configuration/secret/) 158 | - [Configuration Best Practices](https://kubernetes.io/docs/concepts/configuration/overview/) -------------------------------------------------------------------------------- /learning/Network-Policies-in-K8s/README.md: -------------------------------------------------------------------------------- 1 | # Network Policies in Kubernetes 2 | 3 | This comprehensive guide explores Kubernetes Network Policies, which provide fine-grained control over how pods communicate with each other and other network endpoints. 4 | 5 | ## Understanding Network Policies 6 | 7 | Network Policies are application-centric constructs that specify how groups of pods are allowed to communicate with each other and with other network endpoints. They are crucial for: 8 | 9 | - Implementing zero-trust security models 10 | - Enforcing micro-segmentation 11 | - Controlling both ingress and egress traffic 12 | - Protecting sensitive workloads 13 | 14 | ### Key Concepts 15 | 16 | 1. **Pod Selectors**: Define which pods the policy applies to 17 | 2. **Namespace Selectors**: Control traffic across namespaces 18 | 3. **CIDR Blocks**: Specify IP ranges for traffic control 19 | 4. **Port Definitions**: Control traffic on specific ports 20 | 5. **Rule Types**: Ingress (incoming) and Egress (outgoing) rules 21 | 22 | ## What you'll learn 23 | - Understanding Network Policies 24 | - Basic concepts and architecture 25 | - Policy types and selectors 26 | - Rule evaluation and precedence 27 | - Implementing pod isolation 28 | - Default deny policies 29 | - Allowlist approach 30 | - Namespace isolation 31 | - Configuring ingress and egress rules 32 | - Port-based rules 33 | - Protocol-specific policies 34 | - CIDR-based rules 35 | - Namespace-based network policies 36 | - Cross-namespace communication 37 | - Namespace isolation patterns 38 | - Label-based network policies 39 | - Pod selection strategies 40 | - Dynamic policy application 41 | - Policy composition 42 | 43 | ## Prerequisites 44 | - Kubernetes cluster with Network Policy support 45 | - Calico 46 | - Cilium 47 | - Antrea 48 | - WeaveNet 49 | - kubectl CLI tool with admin access 50 | - Basic understanding of: 51 | - Kubernetes networking 52 | - Pod-to-Pod communication 53 | - Service networking 54 | - CNI concepts 55 | 56 | ## Examples included 57 | 58 | ### 1. Default Deny All Traffic 59 | ```yaml 60 | # default-deny.yaml 61 | apiVersion: networking.k8s.io/v1 62 | kind: NetworkPolicy 63 | metadata: 64 | name: default-deny 65 | spec: 66 | podSelector: {} 67 | policyTypes: 68 | - Ingress 69 | - Egress 70 | ``` 71 | 72 | ### 2. Allow Specific Pod-to-Pod Communication 73 | ```yaml 74 | # allow-frontend.yaml 75 | apiVersion: networking.k8s.io/v1 76 | kind: NetworkPolicy 77 | metadata: 78 | name: allow-frontend 79 | spec: 80 | podSelector: 81 | matchLabels: 82 | app: backend 83 | ingress: 84 | - from: 85 | - podSelector: 86 | matchLabels: 87 | app: frontend 88 | ``` 89 | 90 | ### 3. Namespace Isolation 91 | ```yaml 92 | apiVersion: networking.k8s.io/v1 93 | kind: NetworkPolicy 94 | metadata: 95 | name: namespace-isolation 96 | spec: 97 | podSelector: {} 98 | ingress: 99 | - from: 100 | - namespaceSelector: 101 | matchLabels: 102 | environment: production 103 | ``` 104 | 105 | ## Best Practices 106 | 107 | 1. **Start with Default Deny** 108 | - Implement default deny policies first 109 | - Add specific allow rules as needed 110 | - Document all policy decisions 111 | 112 | 2. **Use Labels Effectively** 113 | - Design a consistent labeling strategy 114 | - Use role-based labels 115 | - Maintain label documentation 116 | 117 | 3. **Policy Organization** 118 | - Group policies by namespace 119 | - Use clear naming conventions 120 | - Implement change control 121 | 122 | 4. **Testing and Validation** 123 | - Test policies in non-production first 124 | - Use network policy validators 125 | - Maintain test cases 126 | 127 | ## Implementation Guide 128 | 129 | 1. **Prepare Your Environment** 130 | ```bash 131 | # Verify NetworkPolicy support 132 | kubectl get pods -n kube-system | grep network 133 | ``` 134 | 135 | 2. **Apply Base Policies** 136 | ```bash 137 | # Apply default deny policy 138 | kubectl apply -f default-deny.yaml 139 | 140 | # Apply frontend access policy 141 | kubectl apply -f allow-frontend.yaml 142 | 143 | # Apply database access policy 144 | kubectl apply -f allow-database.yaml 145 | ``` 146 | 147 | 3. **Verify Policy Enforcement** 148 | ```bash 149 | # Test pod connectivity 150 | kubectl exec -it frontend-pod -- curl backend-service 151 | 152 | # Check policy status 153 | kubectl describe networkpolicy 154 | ``` 155 | 156 | ## Troubleshooting 157 | 158 | ### Common Issues 159 | 160 | 1. **Policy Not Applied** 161 | - Check CNI plugin status 162 | - Verify label selectors 163 | - Validate policy syntax 164 | 165 | 2. **Unexpected Blocking** 166 | - Review policy order 167 | - Check namespace selectors 168 | - Validate pod labels 169 | 170 | 3. **Performance Impact** 171 | - Monitor policy evaluation 172 | - Optimize selector usage 173 | - Consider policy aggregation 174 | 175 | ### Debugging Tools 176 | ```bash 177 | # View applied policies 178 | kubectl get networkpolicies -A 179 | 180 | # Check pod labels 181 | kubectl get pods --show-labels 182 | 183 | # Validate connectivity 184 | kubectl exec -it debugger -- ping target-pod 185 | ``` 186 | 187 | ## Advanced Topics 188 | 189 | 1. **Egress Control** 190 | - External service access 191 | - DNS policy configuration 192 | - CIDR-based rules 193 | 194 | 2. **Multi-cluster Policies** 195 | - Cross-cluster communication 196 | - Federation considerations 197 | - Service mesh integration 198 | 199 | 3. **Policy Metrics** 200 | - Monitoring policy effectiveness 201 | - Traffic analysis 202 | - Compliance reporting 203 | 204 | ## Additional Resources 205 | 206 | - [Official Kubernetes NetworkPolicy Documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 207 | - [Calico Network Policy Guide](https://docs.projectcalico.org/security/network-policy) 208 | - [Network Policy Recipes](https://github.com/ahmetb/kubernetes-network-policy-recipes) 209 | - [Kubernetes Network Policy Editor](https://editor.cilium.io/) -------------------------------------------------------------------------------- /learning/RBAC-Access-Control/README.md: -------------------------------------------------------------------------------- 1 | # Role-Based Access Control (RBAC) in Kubernetes 2 | 3 | This comprehensive guide explores Kubernetes RBAC, the native authorization mechanism used to regulate access to Kubernetes cluster resources. 4 | 5 | ## Understanding RBAC 6 | 7 | RBAC is a method of regulating access to computer or network resources based on the roles of individual users. In Kubernetes, RBAC: 8 | 9 | - Defines what actions users can perform 10 | - Controls access to cluster resources 11 | - Implements principle of least privilege 12 | - Provides fine-grained access control 13 | 14 | ### Core Concepts 15 | 16 | 1. **Rules**: Define what operations (verbs) can be performed on which resources 17 | 2. **Roles**: Collection of rules that apply within a namespace 18 | 3. **ClusterRoles**: Similar to Roles but apply cluster-wide 19 | 4. **Bindings**: Link roles to users, groups, or service accounts 20 | 21 | ## What you'll learn 22 | - Understanding RBAC components 23 | - Rules and permissions 24 | - Role definitions 25 | - Binding mechanisms 26 | - Scope and inheritance 27 | - Creating and managing Roles and ClusterRoles 28 | - Namespace-scoped roles 29 | - Cluster-wide permissions 30 | - Aggregated roles 31 | - Default roles 32 | - Working with RoleBindings and ClusterRoleBindings 33 | - User bindings 34 | - Group bindings 35 | - ServiceAccount bindings 36 | - Cross-namespace bindings 37 | - Service Account configuration 38 | - Creating service accounts 39 | - Token management 40 | - Pod association 41 | - Secret automation 42 | - Best practices for RBAC implementation 43 | - Least privilege principle 44 | - Role composition 45 | - Binding strategies 46 | - Security considerations 47 | 48 | ## Prerequisites 49 | - Kubernetes cluster with RBAC enabled 50 | - Most clusters enable it by default 51 | - Check with --authorization-mode flag 52 | - kubectl CLI tool with admin access 53 | - Basic understanding of: 54 | - Kubernetes API resources 55 | - Authentication mechanisms 56 | - Security concepts 57 | 58 | ## Examples included 59 | 60 | ### 1. Creating Service Accounts 61 | ```yaml 62 | apiVersion: v1 63 | kind: ServiceAccount 64 | metadata: 65 | name: app-service-account 66 | namespace: default 67 | ``` 68 | 69 | ### 2. Defining Roles 70 | ```yaml 71 | # Example of a pod-reader role 72 | apiVersion: rbac.authorization.k8s.io/v1 73 | kind: Role 74 | metadata: 75 | namespace: default 76 | name: pod-reader 77 | rules: 78 | - apiGroups: [""] 79 | resources: ["pods"] 80 | verbs: ["get", "list", "watch"] 81 | ``` 82 | 83 | ### 3. Setting up RoleBindings 84 | ```yaml 85 | apiVersion: rbac.authorization.k8s.io/v1 86 | kind: RoleBinding 87 | metadata: 88 | name: read-pods 89 | namespace: default 90 | subjects: 91 | - kind: ServiceAccount 92 | name: app-service-account 93 | namespace: default 94 | roleRef: 95 | kind: Role 96 | name: pod-reader 97 | apiGroup: rbac.authorization.k8s.io 98 | ``` 99 | 100 | ## Best Practices 101 | 102 | ### 1. Role Design 103 | - Follow least privilege principle 104 | - Group related permissions 105 | - Use descriptive names 106 | - Document role purposes 107 | 108 | ### 2. Binding Strategy 109 | - Prefer namespace isolation 110 | - Use groups for user management 111 | - Limit cluster-wide access 112 | - Regular access review 113 | 114 | ### 3. Service Accounts 115 | - One service account per application 116 | - Automate token rotation 117 | - Monitor token usage 118 | - Implement pod security policies 119 | 120 | ### 4. Security Considerations 121 | - Regular audit of permissions 122 | - Remove unused roles 123 | - Monitor binding changes 124 | - Implement detection controls 125 | 126 | ## Implementation Guide 127 | 128 | ### 1. Create Required Objects 129 | ```bash 130 | # Create service account 131 | kubectl create -f service-account.yaml 132 | 133 | # Create role 134 | kubectl create -f role.yaml 135 | 136 | # Create role binding 137 | kubectl create -f role-binding.yaml 138 | 139 | # Deploy pod with service account 140 | kubectl create -f pod-with-sa.yaml 141 | ``` 142 | 143 | ### 2. Verify Setup 144 | ```bash 145 | # Check service account 146 | kubectl get serviceaccount 147 | 148 | # Verify role 149 | kubectl get role 150 | 151 | # Check bindings 152 | kubectl get rolebinding 153 | 154 | # Test permissions 155 | kubectl auth can-i list pods --as system:serviceaccount:default:app-service-account 156 | ``` 157 | 158 | ## Troubleshooting 159 | 160 | ### Common Issues 161 | 162 | 1. **Permission Denied** 163 | - Verify role rules 164 | - Check binding configuration 165 | - Validate service account 166 | - Review API groups 167 | 168 | 2. **Role Not Applied** 169 | - Check namespace context 170 | - Verify binding subject 171 | - Validate role reference 172 | - Review aggregation labels 173 | 174 | 3. **Service Account Issues** 175 | - Check token mounting 176 | - Verify secret creation 177 | - Validate pod association 178 | - Review automount settings 179 | 180 | ### Debugging Commands 181 | ```bash 182 | # Check effective permissions 183 | kubectl auth can-i --list 184 | 185 | # View role details 186 | kubectl describe role role-name 187 | 188 | # Check binding 189 | kubectl describe rolebinding binding-name 190 | 191 | # Validate service account 192 | kubectl describe sa service-account-name 193 | ``` 194 | 195 | ## Advanced Topics 196 | 197 | ### 1. Aggregated ClusterRoles 198 | - Combining multiple roles 199 | - Dynamic role updates 200 | - Custom resource permissions 201 | - Extension patterns 202 | 203 | ### 2. External Authentication 204 | - Integration with external systems 205 | - Token management 206 | - Certificate handling 207 | - Authentication plugins 208 | 209 | ### 3. Audit Logging 210 | - Enabling audit logs 211 | - Policy configuration 212 | - Log analysis 213 | - Compliance reporting 214 | 215 | ## Additional Resources 216 | 217 | - [Official Kubernetes RBAC Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) 218 | - [RBAC Good Practices](https://kubernetes.io/docs/concepts/security/rbac-good-practices/) 219 | - [Using RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) 220 | - [Kubernetes Security Best Practices](https://kubernetes.io/docs/concepts/security/overview/) -------------------------------------------------------------------------------- /CKAD-exercises/e.observability.md: -------------------------------------------------------------------------------- 1 | ![](https://gaforgithub.azurewebsites.net/api?repo=CKAD-exercises/observability&empty) 2 | # Observability (18%) 3 | 4 | ## Liveness, readiness and startup probes 5 | 6 | kubernetes.io > Documentation > Tasks > Configure Pods and Containers > [Configure Liveness, Readiness and Startup Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) 7 | 8 | ### Create an nginx pod with a liveness probe that just runs the command 'ls'. Save its YAML in pod.yaml. Run it, check its probe status, delete it. 9 | 10 |
show 11 |

12 | 13 | ```bash 14 | kubectl run nginx --image=nginx --restart=Never --dry-run=client -o yaml > pod.yaml 15 | vi pod.yaml 16 | ``` 17 | 18 | ```YAML 19 | apiVersion: v1 20 | kind: Pod 21 | metadata: 22 | creationTimestamp: null 23 | labels: 24 | run: nginx 25 | name: nginx 26 | spec: 27 | containers: 28 | - image: nginx 29 | imagePullPolicy: IfNotPresent 30 | name: nginx 31 | resources: {} 32 | livenessProbe: # our probe 33 | exec: # add this line 34 | command: # command definition 35 | - ls # ls command 36 | dnsPolicy: ClusterFirst 37 | restartPolicy: Never 38 | status: {} 39 | ``` 40 | 41 | ```bash 42 | kubectl create -f pod.yaml 43 | kubectl describe pod nginx | grep -i liveness # run this to see that liveness probe works 44 | kubectl delete -f pod.yaml 45 | ``` 46 | 47 |

48 |
49 | 50 | ### Modify the pod.yaml file so that liveness probe starts kicking in after 5 seconds whereas the interval between probes would be 5 seconds. Run it, check the probe, delete it. 51 | 52 |
show 53 |

54 | 55 | ```bash 56 | kubectl explain pod.spec.containers.livenessProbe # get the exact names 57 | ``` 58 | 59 | ```YAML 60 | apiVersion: v1 61 | kind: Pod 62 | metadata: 63 | creationTimestamp: null 64 | labels: 65 | run: nginx 66 | name: nginx 67 | spec: 68 | containers: 69 | - image: nginx 70 | imagePullPolicy: IfNotPresent 71 | name: nginx 72 | resources: {} 73 | livenessProbe: 74 | initialDelaySeconds: 5 # add this line 75 | periodSeconds: 5 # add this line as well 76 | exec: 77 | command: 78 | - ls 79 | dnsPolicy: ClusterFirst 80 | restartPolicy: Never 81 | status: {} 82 | ``` 83 | 84 | ```bash 85 | kubectl create -f pod.yaml 86 | kubectl describe po nginx | grep -i liveness 87 | kubectl delete -f pod.yaml 88 | ``` 89 | 90 |

91 |
92 | 93 | ### Create an nginx pod (that includes port 80) with an HTTP readinessProbe on path '/' on port 80. Again, run it, check the readinessProbe, delete it. 94 | 95 |
show 96 |

97 | 98 | ```bash 99 | kubectl run nginx --image=nginx --dry-run=client -o yaml --restart=Never --port=80 > pod.yaml 100 | vi pod.yaml 101 | ``` 102 | 103 | ```YAML 104 | apiVersion: v1 105 | kind: Pod 106 | metadata: 107 | creationTimestamp: null 108 | labels: 109 | run: nginx 110 | name: nginx 111 | spec: 112 | containers: 113 | - image: nginx 114 | imagePullPolicy: IfNotPresent 115 | name: nginx 116 | resources: {} 117 | ports: 118 | - containerPort: 80 # Note: Readiness probes runs on the container during its whole lifecycle. Since nginx exposes 80, containerPort: 80 is not required for readiness to work. 119 | readinessProbe: # declare the readiness probe 120 | httpGet: # add this line 121 | path: / # 122 | port: 80 # 123 | dnsPolicy: ClusterFirst 124 | restartPolicy: Never 125 | status: {} 126 | ``` 127 | 128 | ```bash 129 | kubectl create -f pod.yaml 130 | kubectl describe pod nginx | grep -i readiness # to see the pod readiness details 131 | kubectl delete -f pod.yaml 132 | ``` 133 | 134 |

135 |
136 | 137 | ### Lots of pods are running in `qa`,`alan`,`test`,`production` namespaces. All of these pods are configured with liveness probe. Please list all pods whose liveness probe are failed in the format of `/` per line. 138 | 139 |
show 140 |

141 | 142 | A typical liveness probe failure event 143 | ``` 144 | LAST SEEN TYPE REASON OBJECT MESSAGE 145 | 22m Warning Unhealthy pod/liveness-exec Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory 146 | ``` 147 | 148 | collect failed pods namespace by namespace 149 | 150 | ```sh 151 | kubectl get events -o json | jq -r '.items[] | select(.message | contains("failed liveness probe")).involvedObject | .namespace + "/" + .name' 152 | ``` 153 | 154 |

155 |
156 | 157 | ## Logging 158 | 159 | ### Create a busybox pod that runs `i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done`. Check its logs 160 | 161 |
show 162 |

163 | 164 | ```bash 165 | kubectl run busybox --image=busybox --restart=Never -- /bin/sh -c 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done' 166 | kubectl logs busybox -f # follow the logs 167 | ``` 168 | 169 |

170 |
171 | 172 | ## Debugging 173 | 174 | ### Create a busybox pod that runs 'ls /notexist'. Determine if there's an error (of course there is), see it. In the end, delete the pod 175 | 176 |
show 177 |

178 | 179 | ```bash 180 | kubectl run busybox --restart=Never --image=busybox -- /bin/sh -c 'ls /notexist' 181 | # show that there's an error 182 | kubectl logs busybox 183 | kubectl describe po busybox 184 | kubectl delete po busybox 185 | ``` 186 | 187 |

188 |
189 | 190 | ### Create a busybox pod that runs 'notexist'. Determine if there's an error (of course there is), see it. In the end, delete the pod forcefully with a 0 grace period 191 | 192 |
show 193 |

194 | 195 | ```bash 196 | kubectl run busybox --restart=Never --image=busybox -- notexist 197 | kubectl logs busybox # will bring nothing! container never started 198 | kubectl describe po busybox # in the events section, you'll see the error 199 | # also... 200 | kubectl get events | grep -i error # you'll see the error here as well 201 | kubectl delete po busybox --force --grace-period=0 202 | ``` 203 | 204 |

205 |
206 | 207 | 208 | ### Get CPU/memory utilization for nodes ([metrics-server](https://github.com/kubernetes-incubator/metrics-server) must be running) 209 | 210 |
show 211 |

212 | 213 | ```bash 214 | kubectl top nodes 215 | ``` 216 | 217 |

218 |
219 | -------------------------------------------------------------------------------- /projects/Deploying-Spring-Boot-K8S/README.md: -------------------------------------------------------------------------------- 1 | # Deploying Spring Boot application on Kubernetes 2 | 3 | ![](https://miro.medium.com/v2/resize:fit:736/0*pUxaXy4_zjAWtFwf.png) 4 | 5 | ### **In this article we are deploying Spring Boot application on K8S** 6 | 7 | # **Step-by-Step Implementation:-** 8 | 9 | ## Step 1 — Create an **t2.medium** Instance 10 | 11 | ![](https://miro.medium.com/v2/resize:fit:736/1*m1tuu0xpgyaPuIA97M8cRw.png) 12 | 13 | ## Step 2 — Install the following command in instance 14 | 15 | ```bash 16 | sudo su 17 | yum update -y 18 | yum install docker -y 19 | systemctl enable docker 20 | systemctl start docker 21 | systemctl status docker 22 | docker — version 23 | ``` 24 | 25 | ![](https://miro.medium.com/v2/resize:fit:736/1*8qxOf5ULaFfT38rNUXx9ag.png) 26 | 27 | ## **Step 3 — Install Conntrack** 28 | 29 | **conntrack** :- In Kubernetes, “conntrack” refers to the Connection Tracking system used for network traffic management within the cluster. Conntrack is a kernel feature that keeps track of network connections and their states. It allows the kernel to maintain information about network connections, such as source IP addresses, destination IP addresses, ports, and connection states (established, closed, etc.) 30 | 31 | ```bash 32 | yum install conntrack -y 33 | ``` 34 | 35 | ![](https://miro.medium.com/v2/resize:fit:736/1*b9OprU_EyaxGph8vh7Ct2Q.png) 36 | 37 | ## **Step 4 — Install and run minikube in vm** 38 | 39 | ```bash 40 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 41 | 42 | sudo install minikube-linux-amd64 /usr/local/bin/minikube 43 | 44 | /usr/local/bin/minikube start --force --driver=docker 45 | ``` 46 | 47 | ![](https://miro.medium.com/v2/resize:fit:736/1*3KWYKJt47caknzbv27AkLg.png) 48 | 49 | ![](https://miro.medium.com/v2/resize:fit:736/1*oaum5rMKrA7v_hWd0QWN9Q.png) 50 | 51 | After that we can check the minikube version , 52 | 53 | ```bash 54 | /usr/local/bin/minikube version 55 | ``` 56 | 57 | **Step 5 — Install kubectl command , given permission to kubectl and check version of it.** 58 | 59 | ```bash 60 | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 61 | 62 | sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl 63 | 64 | /usr/local/bin/kubectl version 65 | ``` 66 | 67 | ![](https://miro.medium.com/v2/resize:fit:736/1*d3uGseuhtgUKJn6EZA7hNg.png) 68 | 69 | ## Step 6 — Install git and clone the repository as , 70 | 71 | ```bash 72 | yum install git -y 73 | cd /opt 74 | git clone https://github.com/SushantOps/SpringBootOnK8S_PS.git 75 | ``` 76 | 77 | ![](https://miro.medium.com/v2/resize:fit:736/1*Wq0x1zA0Q8khebtki1F6Mg.png) 78 | 79 | ## **Step 7 — Now we will start setting up Database** 80 | 81 | To create the persistent database and other services. 82 | 83 | ```bash 84 | /usr/local/bin/kubectl get pods 85 | 86 | /usr/local/bin/kubectl create -f db-deployment.yaml 87 | 88 | kubectl get pods 89 | ``` 90 | 91 | ![](https://miro.medium.com/v2/resize:fit:736/1*Le-vLbtW-AKSGtM-rZivQw.png) 92 | 93 | ![](https://miro.medium.com/v2/resize:fit:736/1*QsxPhddh9sweRi2KuNkmOg.png) 94 | 95 | check database and it’s content so go inside the container as , ( password is **root** ) so we can see the db create from the container . 96 | 97 | ```bash 98 | /usr/local/bin/kubectl exec -it /bin/bash 99 | 100 | mysql -u root -p 101 | ``` 102 | 103 | ![](https://miro.medium.com/v2/resize:fit:736/1*ooRy1qXVD2lOVPy3c9cy9Q.png) 104 | 105 | After that exit from the shell also container. 106 | 107 | **Install maven now and check version.** 108 | 109 | ```bash 110 | yum install maven -y 111 | mvn -v 112 | ``` 113 | 114 | **Step 8 — Create an image from Dockerfile as ,** 115 | 116 | ```bash 117 | docker build -t / . 118 | 119 | docker build -t sushantkapare1717/springboot-crud-k8s:1.0 . 120 | 121 | #check the docker images 122 | docker images 123 | ``` 124 | 125 | ![](https://miro.medium.com/v2/resize:fit:736/1*dDZ2dfHDr9TZvbVZolUyMg.png) 126 | 127 | ![](https://miro.medium.com/v2/resize:fit:736/1*IiOyFlD6wVukLPdO8fZ6Xw.png) 128 | 129 | Now push that image to dockerhub so first you have to login to dockerhub and then push that image to docker hub 130 | 131 | ```bash 132 | docker login 133 | 134 | docker push sushantkapare1717/springboot-crud-k8s 135 | ``` 136 | 137 | ![](https://miro.medium.com/v2/resize:fit:736/1*nZ4u8QWT6pDaNYvPPtbv-w.png) 138 | 139 | ![](https://miro.medium.com/v2/resize:fit:736/1*eiroqwaqp5npfz_eivrTlA.png) 140 | 141 | ![](https://miro.medium.com/v2/resize:fit:736/1*niIjqgGVGCaL-dVYC-a27w.png) 142 | 143 | ## **Step 9 — now create app-deployment.yml file and check pods and check service** 144 | 145 | ```bash 146 | /usr/local/bin/kubectl apply -f app-deployment.yaml 147 | 148 | /usr/local/bin/kubectl get pods 149 | 150 | /usr/local/bin/kubectl get svc 151 | ``` 152 | 153 | ![](https://miro.medium.com/v2/resize:fit:736/1*V_2x1Dl80JQmNMkDE0Tafw.png) 154 | 155 | ![](https://miro.medium.com/v2/resize:fit:736/1*hRwbqE0ebmtnkNpkMLWHqw.png) 156 | 157 | now check **minikube ip** as, 158 | 159 | ```bash 160 | /usr/local/bin/minikube ip 161 | ``` 162 | 163 | ## **PORT-FORWARDING** 164 | 165 | Port forwarding is a networking technique used to redirect network traffic from one port on a host to another port on a different host or the same host. In the context of Kubernetes, port forwarding allows you to access services running inside a Kubernetes cluster from your local machine or another remote host. 166 | 167 | ```bash 168 | /usr/local/bin/kubectl port-forward --address 0.0.0.0 svc/springboot-crud-svc 8080:8080 & 169 | ``` 170 | 171 | ![](https://miro.medium.com/v2/resize:fit:736/1*uHfR-a4k69dTgIQKPUhaEQ.png) 172 | 173 | ## **Step 10** — go to **POSTMAN** and check the methods you get replay 174 | 175 | Now go to the database in the server and check the entries done by you in the SQL database. 176 | 177 | ```bash 178 | /usr/local/bin/kubectl exec -it /bin/bash 179 | ``` 180 | 181 | **step 11 — Now we will open the Dashboard of K8S. Inside EC2 our minikube is running so we are setting a proxy for all the local address.The proxy server is started on port 8001.** 182 | 183 | ```bash 184 | /usr/local/bin/kubectl proxy --address='0.0.0.0' --accept-hosts='^*$' 185 | ``` 186 | 187 | ![](https://miro.medium.com/v2/resize:fit:736/1*zNr_s2M15kfdMt3dErvuQA.png) 188 | 189 | **Now open another terminal and run below command.** 190 | 191 | ```bash 192 | /usr/local/bin/minikube dashboard 193 | ``` 194 | 195 | ![](https://miro.medium.com/v2/resize:fit:736/1*uNdt2tyvKRxVURaNjP-jog.png) 196 | 197 | ```bash 198 | http://127.0.0.1:34927/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ 199 | change the IP address to your IP address and port mention as 8001 200 | 201 | e.g http://54.190.118.228:8001/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ 202 | ``` 203 | 204 | So you can access the k8s resources on dashboard as , 205 | 206 | ![](https://miro.medium.com/v2/resize:fit:736/1*0pLPd-WINJetavpNiEGy5w.png) 207 | 208 | ![](https://miro.medium.com/v2/resize:fit:736/1*F-KVuk3ST5Yoa6WQUiVdmg.png) 209 | 210 | ![](https://miro.medium.com/v2/resize:fit:736/1*yImdvieZ0FCD7KA4g4N6tA.png) 211 | 212 | ### If you like this article, please share with others. ❤️ 213 | -------------------------------------------------------------------------------- /learning/Monitoring-and-Logging/README.md: -------------------------------------------------------------------------------- 1 | # Monitoring and Logging in Kubernetes 2 | 3 | This comprehensive guide explores implementing robust monitoring and logging solutions in Kubernetes using industry-standard tools and best practices. 4 | 5 | ## Understanding Kubernetes Monitoring and Logging 6 | 7 | Effective monitoring and logging in Kubernetes involves multiple components working together to provide: 8 | 9 | - Real-time metrics collection 10 | - Resource utilization tracking 11 | - Application performance monitoring 12 | - Centralized log aggregation 13 | - Alerting and notification 14 | - Visualization and dashboards 15 | 16 | ### Core Components 17 | 18 | 1. **Metrics Collection** 19 | - Prometheus for metrics scraping 20 | - Node Exporter for hardware/OS metrics 21 | - kube-state-metrics for cluster state 22 | - Custom metrics adapters 23 | 24 | 2. **Visualization** 25 | - Grafana for metric dashboards 26 | - Kibana for log visualization 27 | - Custom dashboards 28 | 29 | 3. **Log Aggregation** 30 | - Elasticsearch for log storage 31 | - Fluentd/Fluent Bit for collection 32 | - Logstash for processing 33 | - Vector for modern processing 34 | 35 | ## What you'll learn 36 | - Setting up Prometheus and Grafana 37 | - Prometheus operator 38 | - Service monitors 39 | - Alert managers 40 | - Dashboard creation 41 | - Implementing logging with EFK/ELK stack 42 | - Elasticsearch clusters 43 | - Fluentd configuration 44 | - Kibana dashboards 45 | - Log parsing 46 | - Resource metrics collection 47 | - CPU and memory metrics 48 | - Network statistics 49 | - Storage metrics 50 | - Custom metrics 51 | - Custom metrics and alerts 52 | - Prometheus rules 53 | - Alert configuration 54 | - Notification channels 55 | - SLO/SLI monitoring 56 | - Log aggregation and visualization 57 | - Centralized logging 58 | - Log filtering 59 | - Pattern matching 60 | - Visual analysis 61 | 62 | ## Prerequisites 63 | - Running Kubernetes cluster 64 | - Minimum 3 nodes recommended 65 | - Sufficient resources for monitoring stack 66 | - kubectl CLI tool with admin access 67 | - Helm (optional, for easy deployment) 68 | - Basic understanding of: 69 | - Kubernetes architecture 70 | - Monitoring concepts 71 | - Log management 72 | - Metrics and alerting 73 | 74 | ## Component Setup 75 | 76 | ### 1. Prometheus Stack 77 | ```yaml 78 | # prometheus-values.yaml 79 | prometheus: 80 | prometheusSpec: 81 | retention: 15d 82 | resources: 83 | requests: 84 | memory: 1Gi 85 | cpu: 500m 86 | limits: 87 | memory: 2Gi 88 | cpu: 1000m 89 | 90 | grafana: 91 | persistence: 92 | enabled: true 93 | size: 10Gi 94 | ``` 95 | 96 | ### 2. Elasticsearch Configuration 97 | ```yaml 98 | # elasticsearch.yaml 99 | apiVersion: elasticsearch.k8s.elastic.co/v1 100 | kind: Elasticsearch 101 | metadata: 102 | name: logging 103 | spec: 104 | version: 7.17.3 105 | nodeSets: 106 | - name: default 107 | count: 3 108 | config: 109 | node.store.allow_mmap: false 110 | ``` 111 | 112 | ### 3. Fluentd DaemonSet 113 | The `fluentd-ds.yaml` file in this directory contains a complete Fluentd configuration for log collection. 114 | 115 | ## Implementation Guide 116 | 117 | ### 1. Deploy Prometheus Stack 118 | ```bash 119 | # Add Prometheus repository 120 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 121 | helm repo update 122 | 123 | # Install Prometheus stack 124 | helm install monitoring prometheus-community/kube-prometheus-stack -f prometheus-values.yaml 125 | ``` 126 | 127 | ### 2. Deploy EFK Stack 128 | ```bash 129 | # Create namespace 130 | kubectl create namespace logging 131 | 132 | # Deploy Elasticsearch 133 | kubectl apply -f elasticsearch.yaml 134 | 135 | # Deploy Fluentd 136 | kubectl apply -f fluentd-ds.yaml 137 | 138 | # Deploy Kibana 139 | kubectl apply -f kibana.yaml 140 | ``` 141 | 142 | ### 3. Configure Service Monitors 143 | ```yaml 144 | apiVersion: monitoring.coreos.com/v1 145 | kind: ServiceMonitor 146 | metadata: 147 | name: app-monitor 148 | spec: 149 | selector: 150 | matchLabels: 151 | app: your-app 152 | endpoints: 153 | - port: metrics 154 | ``` 155 | 156 | ## Best Practices 157 | 158 | ### 1. Resource Management 159 | - Size components appropriately 160 | - Use resource limits 161 | - Monitor monitoring components 162 | - Implement retention policies 163 | 164 | ### 2. Security 165 | - Implement authentication 166 | - Encrypt communications 167 | - Use secure endpoints 168 | - Regular security updates 169 | 170 | ### 3. Performance 171 | - Optimize scrape intervals 172 | - Configure log rotation 173 | - Use efficient queries 174 | - Index management 175 | 176 | ### 4. High Availability 177 | - Deploy redundant components 178 | - Use persistent storage 179 | - Implement backups 180 | - Disaster recovery planning 181 | 182 | ## Troubleshooting 183 | 184 | ### Common Issues 185 | 186 | 1. **Prometheus Issues** 187 | - Target scraping failures 188 | - Storage problems 189 | - Rule evaluation errors 190 | - Resource constraints 191 | 192 | 2. **Logging Problems** 193 | - Log shipping delays 194 | - Missing logs 195 | - Index management 196 | - Storage capacity 197 | 198 | 3. **Visualization Issues** 199 | - Dashboard loading 200 | - Query performance 201 | - Data availability 202 | - User access 203 | 204 | ### Debugging Commands 205 | ```bash 206 | # Check Prometheus status 207 | kubectl get prometheuses -n monitoring 208 | kubectl describe prometheus -n monitoring 209 | 210 | # Verify Fluentd 211 | kubectl get pods -n logging -l app=fluentd 212 | kubectl logs -n logging -l app=fluentd 213 | 214 | # Elasticsearch health 215 | kubectl exec -it elasticsearch-0 -n logging -- curl localhost:9200/_cluster/health 216 | ``` 217 | 218 | ## Advanced Configuration 219 | 220 | ### 1. Custom Metrics 221 | ```yaml 222 | apiVersion: monitoring.coreos.com/v1 223 | kind: PrometheusRule 224 | metadata: 225 | name: custom-alerts 226 | spec: 227 | groups: 228 | - name: custom.rules 229 | rules: 230 | - alert: HighErrorRate 231 | expr: rate(http_requests_total{status=~"5.."}[5m]) > 1 232 | for: 5m 233 | labels: 234 | severity: critical 235 | ``` 236 | 237 | ### 2. Log Processing 238 | ```yaml 239 | # Fluentd configuration 240 | 241 | @type elasticsearch 242 | host elasticsearch 243 | port 9200 244 | logstash_format true 245 | 246 | @type file 247 | path /var/log/fluentd-buffers/kubernetes.system.buffer 248 | flush_mode interval 249 | retry_type exponential_backoff 250 | flush_interval 5s 251 | retry_forever false 252 | retry_max_interval 30 253 | chunk_limit_size 2M 254 | queue_limit_length 8 255 | overflow_action block 256 | 257 | 258 | ``` 259 | 260 | ## Accessing Dashboards 261 | - Prometheus: http://localhost:9090 262 | - Grafana: http://localhost:3000 263 | - Kibana: http://localhost:5601 264 | 265 | Default credentials and access methods are documented in each component's deployment guide. 266 | 267 | ## Additional Resources 268 | 269 | - [Prometheus Documentation](https://prometheus.io/docs/introduction/overview/) 270 | - [Grafana Documentation](https://grafana.com/docs/) 271 | - [Elasticsearch Documentation](https://www.elastic.co/guide/index.html) 272 | - [Fluentd Documentation](https://docs.fluentd.org/) 273 | - [Kubernetes Monitoring Guide](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-usage-monitoring/) 274 | - [EFK Stack Tutorial](https://www.digitalocean.com/community/tutorials/how-to-set-up-an-elasticsearch-fluentd-and-kibana-efk-logging-stack-on-kubernetes) -------------------------------------------------------------------------------- /CKAD-exercises/j.podman.md: -------------------------------------------------------------------------------- 1 | # Define, build and modify container images 2 | 3 | - Note: The topic is part of the new CKAD syllabus. Here are a few examples of using **podman** to manage the life cycle of container images. The use of **docker** had been the industry standard for many years, but now large companies like [Red Hat](https://www.redhat.com/en/blog/say-hello-buildah-podman-and-skopeo) are moving to a new suite of open source tools: podman, skopeo and buildah. Also Kubernetes has moved in this [direction](https://kubernetes.io/blog/2022/02/17/dockershim-faq/). In particular, `podman` is meant to be the replacement of the `docker` command: so it makes sense to get familiar with it, although they are quite interchangeable considering that they use the same syntax. 4 | 5 | ## Podman basics 6 | 7 | ### Create a Dockerfile to deploy an Apache HTTP Server which hosts a custom main page 8 | 9 |
show 10 |

11 | 12 | ```Dockerfile 13 | FROM docker.io/httpd:2.4 14 | RUN echo "Hello, World!" > /usr/local/apache2/htdocs/index.html 15 | ``` 16 | 17 |

18 |
19 | 20 | ### Build and see how many layers the image consists of 21 | 22 |
show 23 |

24 | 25 | ```bash 26 | :~$ podman build -t simpleapp . 27 | STEP 1/2: FROM httpd:2.4 28 | STEP 2/2: RUN echo "Hello, World!" > /usr/local/apache2/htdocs/index.html 29 | COMMIT simpleapp 30 | --> ef4b14a72d0 31 | Successfully tagged localhost/simpleapp:latest 32 | ef4b14a72d02ae0577eb0632d084c057777725c279e12ccf5b0c6e4ff5fd598b 33 | 34 | :~$ podman images 35 | REPOSITORY TAG IMAGE ID CREATED SIZE 36 | localhost/simpleapp latest ef4b14a72d02 8 seconds ago 148 MB 37 | docker.io/library/httpd 2.4 98f93cd0ec3b 7 days ago 148 MB 38 | 39 | :~$ podman image tree localhost/simpleapp:latest 40 | Image ID: ef4b14a72d02 41 | Tags: [localhost/simpleapp:latest] 42 | Size: 147.8MB 43 | Image Layers 44 | ├── ID: ad6562704f37 Size: 83.9MB 45 | ├── ID: c234616e1912 Size: 3.072kB 46 | ├── ID: c23a797b2d04 Size: 2.721MB 47 | ├── ID: ede2e092faf0 Size: 61.11MB 48 | ├── ID: 971c2cdf3872 Size: 3.584kB Top Layer of: [docker.io/library/httpd:2.4] 49 | └── ID: 61644e82ef1f Size: 6.144kB Top Layer of: [localhost/simpleapp:latest] 50 | ``` 51 | 52 |

53 |
54 | 55 | ### Run the image locally, inspect its status and logs, finally test that it responds as expected 56 | 57 |
show 58 |

59 | 60 | ```bash 61 | :~$ podman run -d --name test -p 8080:80 localhost/simpleapp 62 | 2f3d7d613ea6ba19703811d30704d4025123c7302ff6fa295affc9bd30e532f8 63 | 64 | :~$ podman ps 65 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 66 | 2f3d7d613ea6 localhost/simpleapp:latest httpd-foreground 5 seconds ago Up 6 seconds ago 0.0.0.0:8080->80/tcp test 67 | 68 | :~$ podman logs test 69 | AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using 10.0.2.100. Set the 'ServerName' directive globally to suppress this message 70 | AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using 10.0.2.100. Set the 'ServerName' directive globally to suppress this message 71 | [Sat Jun 04 16:15:38.071377 2022] [mpm_event:notice] [pid 1:tid 139756978220352] AH00489: Apache/2.4.53 (Unix) configured -- resuming normal operations 72 | [Sat Jun 04 16:15:38.073570 2022] [core:notice] [pid 1:tid 139756978220352] AH00094: Command line: 'httpd -D FOREGROUND' 73 | 74 | :~$ curl 0.0.0.0:8080 75 | Hello, World! 76 | ``` 77 | 78 |

79 |
80 | 81 | ### Run a command inside the pod to print out the index.html file 82 | 83 |
show 84 |

85 | 86 | ```bash 87 | :~$ podman exec -it test cat /usr/local/apache2/htdocs/index.html 88 | Hello, World! 89 | ``` 90 |

91 |
92 | 93 | ### Tag the image with ip and port of a private local registry and then push the image to this registry 94 | 95 |
show 96 |

97 | 98 | > Note: Some small distributions of Kubernetes (such as [microk8s](https://microk8s.io/docs/registry-built-in)) have a built-in registry you can use for this exercise. If this is not your case, you'll have to setup it on your own. 99 | 100 | ```bash 101 | :~$ podman tag localhost/simpleapp $registry_ip:5000/simpleapp 102 | 103 | :~$ podman push $registry_ip:5000/simpleapp 104 | ``` 105 | 106 |

107 |
108 | 109 | Verify that the registry contains the pushed image and that you can pull it 110 | 111 |
show 112 |

113 | 114 | ```bash 115 | :~$ curl http://$registry_ip:5000/v2/_catalog 116 | {"repositories":["simpleapp"]} 117 | 118 | # remove the image already present 119 | :~$ podman rmi $registry_ip:5000/simpleapp 120 | 121 | :~$ podman pull $registry_ip:5000/simpleapp 122 | Trying to pull 10.152.183.13:5000/simpleapp:latest... 123 | Getting image source signatures 124 | Copying blob 643ea8c2c185 skipped: already exists 125 | Copying blob 972107ece720 skipped: already exists 126 | Copying blob 9857eeea6120 skipped: already exists 127 | Copying blob 93859aa62dbd skipped: already exists 128 | Copying blob 8e47efbf2b7e skipped: already exists 129 | Copying blob 42e0f5a91e40 skipped: already exists 130 | Copying config ef4b14a72d done 131 | Writing manifest to image destination 132 | Storing signatures 133 | ef4b14a72d02ae0577eb0632d084c057777725c279e12ccf5b0c6e4ff5fd598b 134 | ``` 135 | 136 |

137 |
138 | 139 | ### Run a pod with the image pushed to the registry 140 | 141 |
show 142 |

143 | 144 | ```bash 145 | :~$ kubectl run simpleapp --image=$registry_ip:5000/simpleapp --port=80 146 | 147 | :~$ curl ${kubectl get pods simpleapp -o jsonpath='{.status.podIP}'} 148 | Hello, World! 149 | ``` 150 | 151 |

152 |
153 | 154 | ### Log into a remote registry server and then read the credentials from the default file 155 | 156 | 157 |
show 158 |

159 | 160 | > Note: The two most used container registry servers with a free plan are [DockerHub](https://hub.docker.com/) and [Quay.io](https://quay.io/). 161 | 162 | ```bash 163 | :~$ podman login --username $YOUR_USER --password $YOUR_PWD docker.io 164 | :~$ cat ${XDG_RUNTIME_DIR}/containers/auth.json 165 | { 166 | "auths": { 167 | "docker.io": { 168 | "auth": "Z2l1bGl0JLSGtvbkxCcX1xb617251xh0x3zaUd4QW45Q3JuV3RDOTc=" 169 | } 170 | } 171 | } 172 | ``` 173 | 174 |

175 |
176 | 177 | ### Create a secret both from existing login credentials and from the CLI 178 | 179 |
show 180 |

181 | 182 | ```bash 183 | :~$ kubectl create secret generic mysecret --from-file=.dockerconfigjson=${XDG_RUNTIME_DIR}/containers/auth.json --type=kubernetes.io/dockeconfigjson 184 | secret/mysecret created 185 | :~$ kubectl create secret docker-registry mysecret2 --docker-server=https://index.docker.io/v1/ --docker-username=$YOUR_USR --docker-password=$YOUR_PWD 186 | secret/mysecret2 created 187 | ``` 188 | 189 |

190 |
191 | 192 | ### Create the manifest for a Pod that uses one of the two secrets just created to pull an image hosted on the relative private remote registry 193 | 194 |
show 195 |

196 | 197 | ```yaml 198 | apiVersion: v1 199 | kind: Pod 200 | metadata: 201 | name: private-reg 202 | spec: 203 | containers: 204 | - name: private-reg-container 205 | image: $YOUR_PRIVATE_IMAGE 206 | imagePullSecrets: 207 | - name: mysecret 208 | ``` 209 | 210 |

211 |
212 | 213 | ### Clean up all the images and containers 214 | 215 |
show 216 |

217 | 218 | ```bash 219 | :~$ podman rm --all --force 220 | :~$ podman rmi --all 221 | :~$ kubectl delete pod simpleapp 222 | ``` 223 | 224 |

225 |
226 | -------------------------------------------------------------------------------- /CKAD-exercises/f.services.md: -------------------------------------------------------------------------------- 1 | ![](https://gaforgithub.azurewebsites.net/api?repo=CKAD-exercises/services&empty) 2 | # Services and Networking (13%) 3 | 4 | ### Create a pod with image nginx called nginx and expose its port 80 5 | 6 |
show 7 |

8 | 9 | ```bash 10 | kubectl run nginx --image=nginx --restart=Never --port=80 --expose 11 | # observe that a pod as well as a service are created 12 | ``` 13 | 14 |

15 |
16 | 17 | 18 | ### Confirm that ClusterIP has been created. Also check endpoints 19 | 20 |
show 21 |

22 | 23 | ```bash 24 | kubectl get svc nginx # services 25 | kubectl get ep # endpoints 26 | ``` 27 | 28 |

29 |
30 | 31 | ### Get service's ClusterIP, create a temp busybox pod and 'hit' that IP with wget 32 | 33 |
show 34 |

35 | 36 | ```bash 37 | kubectl get svc nginx # get the IP (something like 10.108.93.130) 38 | kubectl run busybox --rm --image=busybox -it --restart=Never -- 39 | wget -O- [PUT THE POD'S IP ADDRESS HERE]:80 40 | exit 41 | ``` 42 | 43 |

44 | or 45 |

46 | 47 | ```bash 48 | IP=$(kubectl get svc nginx --template={{.spec.clusterIP}}) # get the IP (something like 10.108.93.130) 49 | kubectl run busybox --rm --image=busybox -it --restart=Never --env="IP=$IP" -- wget -O- $IP:80 --timeout 2 50 | # Tip: --timeout is optional, but it helps to get answer more quickly when connection fails (in seconds vs minutes) 51 | ``` 52 | 53 |

54 |
55 | 56 | ### Convert the ClusterIP to NodePort for the same service and find the NodePort port. Hit service using Node's IP. Delete the service and the pod at the end. 57 | 58 |
show 59 |

60 | 61 | ```bash 62 | kubectl edit svc nginx 63 | ``` 64 | 65 | ```yaml 66 | apiVersion: v1 67 | kind: Service 68 | metadata: 69 | creationTimestamp: 2018-06-25T07:55:16Z 70 | name: nginx 71 | namespace: default 72 | resourceVersion: "93442" 73 | selfLink: /api/v1/namespaces/default/services/nginx 74 | uid: 191e3dac-784d-11e8-86b1-00155d9f663c 75 | spec: 76 | clusterIP: 10.97.242.220 77 | ports: 78 | - port: 80 79 | protocol: TCP 80 | targetPort: 80 81 | selector: 82 | run: nginx 83 | sessionAffinity: None 84 | type: NodePort # change cluster IP to nodeport 85 | status: 86 | loadBalancer: {} 87 | ``` 88 | 89 | or 90 | 91 | ```bash 92 | kubectl patch svc nginx -p '{"spec":{"type":"NodePort"}}' 93 | ``` 94 | 95 | ```bash 96 | kubectl get svc 97 | ``` 98 | 99 | ``` 100 | # result: 101 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 102 | kubernetes ClusterIP 10.96.0.1 443/TCP 1d 103 | nginx NodePort 10.107.253.138 80:31931/TCP 3m 104 | ``` 105 | 106 | ```bash 107 | wget -O- NODE_IP:31931 # if you're using Kubernetes with Docker for Windows/Mac, try 127.0.0.1 108 | #if you're using minikube, try minikube ip, then get the node ip such as 192.168.99.117 109 | ``` 110 | 111 | ```bash 112 | kubectl delete svc nginx # Deletes the service 113 | kubectl delete pod nginx # Deletes the pod 114 | ``` 115 |

116 |
117 | 118 | ### Create a deployment called foo using image 'dgkanatsios/simpleapp' (a simple server that returns hostname) and 3 replicas. Label it as 'app=foo'. Declare that containers in this pod will accept traffic on port 8080 (do NOT create a service yet) 119 | 120 |
show 121 |

122 | 123 | ```bash 124 | kubectl create deploy foo --image=dgkanatsios/simpleapp --port=8080 --replicas=3 125 | kubectl label deployment foo --overwrite app=foo #This is optional since kubectl create deploy foo will create label app=foo by default 126 | ``` 127 |

128 |
129 | 130 | ### Get the pod IPs. Create a temp busybox pod and try hitting them on port 8080 131 | 132 |
show 133 |

134 | 135 | 136 | ```bash 137 | kubectl get pods -l app=foo -o wide # 'wide' will show pod IPs 138 | kubectl run busybox --image=busybox --restart=Never -it --rm -- sh 139 | wget -O- :8080 # do not try with pod name, will not work 140 | # try hitting all IPs generated after running 1st command to confirm that hostname is different 141 | exit 142 | # or 143 | kubectl get po -o wide -l app=foo | awk '{print $6}' | grep -v IP | xargs -L1 -I '{}' kubectl run --rm -ti tmp --restart=Never --image=busybox -- wget -O- http://\{\}:8080 144 | # or 145 | kubectl get po -l app=foo -o jsonpath='{range .items[*]}{.status.podIP}{"\n"}{end}' | xargs -L1 -I '{}' kubectl run --rm -ti tmp --restart=Never --image=busybox -- wget -O- http://\{\}:8080 146 | ``` 147 | 148 |

149 |
150 | 151 | ### Create a service that exposes the deployment on port 6262. Verify its existence, check the endpoints 152 | 153 |
show 154 |

155 | 156 | 157 | ```bash 158 | kubectl expose deploy foo --port=6262 --target-port=8080 159 | kubectl get service foo # you will see ClusterIP as well as port 6262 160 | kubectl get endpoints foo # you will see the IPs of the three replica pods, listening on port 8080 161 | ``` 162 | 163 |

164 |
165 | 166 | ### Create a temp busybox pod and connect via wget to foo service. Verify that each time there's a different hostname returned. Delete deployment and services to cleanup the cluster 167 | 168 |
show 169 |

170 | 171 | ```bash 172 | kubectl get svc # get the foo service ClusterIP 173 | kubectl run busybox --image=busybox -it --rm --restart=Never -- sh 174 | wget -O- foo:6262 # DNS works! run it many times, you'll see different pods responding 175 | wget -O- :6262 # ClusterIP works as well 176 | # you can also kubectl logs on deployment pods to see the container logs 177 | kubectl delete svc foo 178 | kubectl delete deploy foo 179 | ``` 180 | 181 |

182 |
183 | 184 | ### Create an nginx deployment of 2 replicas, expose it via a ClusterIP service on port 80. Create a NetworkPolicy so that only pods with labels 'access: granted' can access the deployment and apply it 185 | 186 | kubernetes.io > Documentation > Concepts > Services, Load Balancing, and Networking > [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 187 | 188 | > Note that network policies may not be enforced by default, depending on your k8s implementation. E.g. Azure AKS by default won't have policy enforcement, the cluster must be created with an explicit support for `netpol` https://docs.microsoft.com/en-us/azure/aks/use-network-policies#overview-of-network-policy 189 | 190 |
show 191 |

192 | 193 | ```bash 194 | kubectl create deployment nginx --image=nginx --replicas=2 195 | kubectl expose deployment nginx --port=80 196 | 197 | kubectl describe svc nginx # see the 'app=nginx' selector for the pods 198 | # or 199 | kubectl get svc nginx -o yaml 200 | 201 | vi policy.yaml 202 | ``` 203 | 204 | ```YAML 205 | kind: NetworkPolicy 206 | apiVersion: networking.k8s.io/v1 207 | metadata: 208 | name: access-nginx # pick a name 209 | spec: 210 | podSelector: 211 | matchLabels: 212 | app: nginx # selector for the pods 213 | ingress: # allow ingress traffic 214 | - from: 215 | - podSelector: # from pods 216 | matchLabels: # with this label 217 | access: granted 218 | ``` 219 | 220 | ```bash 221 | # Create the NetworkPolicy 222 | kubectl create -f policy.yaml 223 | 224 | # Check if the Network Policy has been created correctly 225 | # make sure that your cluster's network provider supports Network Policy (https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy/#before-you-begin) 226 | kubectl run busybox --image=busybox --rm -it --restart=Never -- wget -O- http://nginx:80 --timeout 2 # This should not work. --timeout is optional here. But it helps to get answer more quickly (in seconds vs minutes) 227 | kubectl run busybox --image=busybox --rm -it --restart=Never --labels=access=granted -- wget -O- http://nginx:80 --timeout 2 # This should be fine 228 | ``` 229 | 230 |

231 |
232 | -------------------------------------------------------------------------------- /CKAD-exercises/g.state.md: -------------------------------------------------------------------------------- 1 | ![](https://gaforgithub.azurewebsites.net/api?repo=CKAD-exercises/state&empty) 2 | # State Persistence (8%) 3 | 4 | kubernetes.io > Documentation > Tasks > Configure Pods and Containers > [Configure a Pod to Use a Volume for Storage](https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/) 5 | 6 | kubernetes.io > Documentation > Tasks > Configure Pods and Containers > [Configure a Pod to Use a PersistentVolume for Storage](https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/) 7 | 8 | ## Define volumes 9 | 10 | ### Create busybox pod with two containers, each one will have the image busybox and will run the 'sleep 3600' command. Make both containers mount an emptyDir at '/etc/foo'. Connect to the second busybox, write the first column of '/etc/passwd' file to '/etc/foo/passwd'. Connect to the first busybox and write '/etc/foo/passwd' file to standard output. Delete pod. 11 | 12 |
show 13 |

14 | 15 | *This question is probably a better fit for the 'Multi-container-pods' section but I'm keeping it here as it will help you get acquainted with state* 16 | 17 | Easiest way to do this is to create a template pod with: 18 | 19 | ```bash 20 | kubectl run busybox --image=busybox --restart=Never -o yaml --dry-run=client -- /bin/sh -c 'sleep 3600' > pod.yaml 21 | vi pod.yaml 22 | ``` 23 | Copy paste the container definition and type the lines that have a comment in the end: 24 | 25 | ```YAML 26 | apiVersion: v1 27 | kind: Pod 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | run: busybox 32 | name: busybox 33 | spec: 34 | dnsPolicy: ClusterFirst 35 | restartPolicy: Never 36 | containers: 37 | - args: 38 | - /bin/sh 39 | - -c 40 | - sleep 3600 41 | image: busybox 42 | imagePullPolicy: IfNotPresent 43 | name: busybox 44 | resources: {} 45 | volumeMounts: # 46 | - name: myvolume # 47 | mountPath: /etc/foo # 48 | - args: 49 | - /bin/sh 50 | - -c 51 | - sleep 3600 52 | image: busybox 53 | name: busybox2 # don't forget to change the name during copy paste, must be different from the first container's name! 54 | volumeMounts: # 55 | - name: myvolume # 56 | mountPath: /etc/foo # 57 | volumes: # 58 | - name: myvolume # 59 | emptyDir: {} # 60 | ``` 61 | In case you forget to add ```bash -- /bin/sh -c 'sleep 3600'``` in template pod create command, you can include command field in config file 62 | 63 | ```YAML 64 | spec: 65 | containers: 66 | - image: busybox 67 | name: busybox 68 | command: ["/bin/sh", "-c", "sleep 3600"] 69 | ``` 70 | 71 | Connect to the second container: 72 | 73 | ```bash 74 | kubectl exec -it busybox -c busybox2 -- /bin/sh 75 | cat /etc/passwd | cut -f 1 -d ':' > /etc/foo/passwd # instead of cut command you can use awk -F ":" '{print $1}' 76 | cat /etc/foo/passwd # confirm that stuff has been written successfully 77 | exit 78 | ``` 79 | 80 | Connect to the first container: 81 | 82 | ```bash 83 | kubectl exec -it busybox -c busybox -- /bin/sh 84 | mount | grep foo # confirm the mounting 85 | cat /etc/foo/passwd 86 | exit 87 | kubectl delete po busybox 88 | ``` 89 | 90 |

91 |
92 | 93 | 94 | ### Create a PersistentVolume of 10Gi, called 'myvolume'. Make it have accessMode of 'ReadWriteOnce' and 'ReadWriteMany', storageClassName 'normal', mounted on hostPath '/etc/foo'. Save it on pv.yaml, add it to the cluster. Show the PersistentVolumes that exist on the cluster 95 | 96 |
show 97 |

98 | 99 | ```bash 100 | vi pv.yaml 101 | ``` 102 | 103 | ```YAML 104 | kind: PersistentVolume 105 | apiVersion: v1 106 | metadata: 107 | name: myvolume 108 | spec: 109 | storageClassName: normal 110 | capacity: 111 | storage: 10Gi 112 | accessModes: 113 | - ReadWriteOnce 114 | - ReadWriteMany 115 | hostPath: 116 | path: /etc/foo 117 | ``` 118 | 119 | Show the PersistentVolumes: 120 | 121 | ```bash 122 | kubectl create -f pv.yaml 123 | # will have status 'Available' 124 | kubectl get pv 125 | ``` 126 | 127 |

128 |
129 | 130 | ### Create a PersistentVolumeClaim for this storage class, called 'mypvc', a request of 4Gi and an accessMode of ReadWriteOnce, with the storageClassName of normal, and save it on pvc.yaml. Create it on the cluster. Show the PersistentVolumeClaims of the cluster. Show the PersistentVolumes of the cluster 131 | 132 |
show 133 |

134 | 135 | ```bash 136 | vi pvc.yaml 137 | ``` 138 | 139 | ```YAML 140 | kind: PersistentVolumeClaim 141 | apiVersion: v1 142 | metadata: 143 | name: mypvc 144 | spec: 145 | storageClassName: normal 146 | accessModes: 147 | - ReadWriteOnce 148 | resources: 149 | requests: 150 | storage: 4Gi 151 | ``` 152 | 153 | Create it on the cluster: 154 | 155 | ```bash 156 | kubectl create -f pvc.yaml 157 | ``` 158 | 159 | Show the PersistentVolumeClaims and PersistentVolumes: 160 | 161 | ```bash 162 | kubectl get pvc # will show as 'Bound' 163 | kubectl get pv # will show as 'Bound' as well 164 | ``` 165 | 166 |

167 |
168 | 169 | ### Create a busybox pod with command 'sleep 3600', save it on pod.yaml. Mount the PersistentVolumeClaim to '/etc/foo'. Connect to the 'busybox' pod, and copy the '/etc/passwd' file to '/etc/foo/passwd' 170 | 171 |
show 172 |

173 | 174 | Create a skeleton pod: 175 | 176 | ```bash 177 | kubectl run busybox --image=busybox --restart=Never -o yaml --dry-run=client -- /bin/sh -c 'sleep 3600' > pod.yaml 178 | vi pod.yaml 179 | ``` 180 | 181 | Add the lines that finish with a comment: 182 | 183 | ```YAML 184 | apiVersion: v1 185 | kind: Pod 186 | metadata: 187 | creationTimestamp: null 188 | labels: 189 | run: busybox 190 | name: busybox 191 | spec: 192 | containers: 193 | - args: 194 | - /bin/sh 195 | - -c 196 | - sleep 3600 197 | image: busybox 198 | imagePullPolicy: IfNotPresent 199 | name: busybox 200 | resources: {} 201 | volumeMounts: # 202 | - name: myvolume # 203 | mountPath: /etc/foo # 204 | dnsPolicy: ClusterFirst 205 | restartPolicy: Never 206 | volumes: # 207 | - name: myvolume # 208 | persistentVolumeClaim: # 209 | claimName: mypvc # 210 | status: {} 211 | ``` 212 | 213 | Create the pod: 214 | 215 | ```bash 216 | kubectl create -f pod.yaml 217 | ``` 218 | 219 | Connect to the pod and copy '/etc/passwd' to '/etc/foo/passwd': 220 | 221 | ```bash 222 | kubectl exec busybox -it -- cp /etc/passwd /etc/foo/passwd 223 | ``` 224 | 225 |

226 |
227 | 228 | ### Create a second pod which is identical with the one you just created (you can easily do it by changing the 'name' property on pod.yaml). Connect to it and verify that '/etc/foo' contains the 'passwd' file. Delete pods to cleanup. Note: If you can't see the file from the second pod, can you figure out why? What would you do to fix that? 229 | 230 | 231 | 232 |
show 233 |

234 | 235 | Create the second pod, called busybox2: 236 | 237 | ```bash 238 | vim pod.yaml 239 | # change 'metadata.name: busybox' to 'metadata.name: busybox2' 240 | kubectl create -f pod.yaml 241 | kubectl exec busybox2 -- ls /etc/foo # will show 'passwd' 242 | # cleanup 243 | kubectl delete po busybox busybox2 244 | kubectl delete pvc mypvc 245 | kubectl delete pv myvolume 246 | ``` 247 | 248 | If the file doesn't show on the second pod but it shows on the first, it has most likely been scheduled on a different node. 249 | 250 | ```bash 251 | # check which nodes the pods are on 252 | kubectl get po busybox -o wide 253 | kubectl get po busybox2 -o wide 254 | ``` 255 | 256 | If they are on different nodes, you won't see the file, because we used the `hostPath` volume type. 257 | If you need to access the same files in a multi-node cluster, you need a volume type that is independent of a specific node. 258 | There are lots of different types per cloud provider [(see here)](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes), a general solution could be to use NFS. 259 | 260 |

261 |
262 | 263 | ### Create a busybox pod with 'sleep 3600' as arguments. Copy '/etc/passwd' from the pod to your local folder 264 | 265 |
show 266 |

267 | 268 | ```bash 269 | kubectl run busybox --image=busybox --restart=Never -- sleep 3600 270 | kubectl cp busybox:/etc/passwd ./passwd # kubectl cp command 271 | # previous command might report an error, feel free to ignore it since copy command works 272 | cat passwd 273 | ``` 274 | 275 |

276 |
277 | -------------------------------------------------------------------------------- /CKAD-exercises/a.core_concepts.md: -------------------------------------------------------------------------------- 1 | ![](https://gaforgithub.azurewebsites.net/api?repo=CKAD-exercises/core_concepts&empty) 2 | # Core Concepts (13%) 3 | 4 | kubernetes.io > Documentation > Reference > kubectl CLI > [kubectl Cheat Sheet](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) 5 | 6 | kubernetes.io > Documentation > Tasks > Monitoring, Logging, and Debugging > [Get a Shell to a Running Container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/) 7 | 8 | kubernetes.io > Documentation > Tasks > Access Applications in a Cluster > [Configure Access to Multiple Clusters](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) 9 | 10 | kubernetes.io > Documentation > Tasks > Access Applications in a Cluster > [Accessing Clusters](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/) using API 11 | 12 | kubernetes.io > Documentation > Tasks > Access Applications in a Cluster > [Use Port Forwarding to Access Applications in a Cluster](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) 13 | 14 | ### Create a namespace called 'mynamespace' and a pod with image nginx called nginx on this namespace 15 | 16 |
show 17 |

18 | 19 | ```bash 20 | kubectl create namespace mynamespace 21 | kubectl run nginx --image=nginx --restart=Never -n mynamespace 22 | ``` 23 | 24 |

25 |
26 | 27 | ### Create the pod that was just described using YAML 28 | 29 |
show 30 |

31 | 32 | Easily generate YAML with: 33 | 34 | ```bash 35 | kubectl run nginx --image=nginx --restart=Never --dry-run=client -n mynamespace -o yaml > pod.yaml 36 | ``` 37 | 38 | ```bash 39 | cat pod.yaml 40 | ``` 41 | 42 | ```yaml 43 | apiVersion: v1 44 | kind: Pod 45 | metadata: 46 | creationTimestamp: null 47 | labels: 48 | run: nginx 49 | name: nginx 50 | namespace: mynamespace 51 | spec: 52 | containers: 53 | - image: nginx 54 | imagePullPolicy: IfNotPresent 55 | name: nginx 56 | resources: {} 57 | dnsPolicy: ClusterFirst 58 | restartPolicy: Never 59 | status: {} 60 | ``` 61 | 62 | ```bash 63 | kubectl create -f pod.yaml 64 | ``` 65 | 66 | Alternatively, you can run in one line 67 | 68 | ```bash 69 | kubectl run nginx --image=nginx --restart=Never --dry-run=client -o yaml | kubectl create -n mynamespace -f - 70 | ``` 71 | 72 |

73 |
74 | 75 | ### Create a busybox pod (using kubectl command) that runs the command "env". Run it and see the output 76 | 77 |
show 78 |

79 | 80 | ```bash 81 | kubectl run busybox --image=busybox --command --restart=Never -it --rm -- env # -it will help in seeing the output, --rm will immediately delete the pod after it exits 82 | # or, just run it without -it 83 | kubectl run busybox --image=busybox --command --restart=Never -- env 84 | # and then, check its logs 85 | kubectl logs busybox 86 | ``` 87 | 88 |

89 |
90 | 91 | ### Create a busybox pod (using YAML) that runs the command "env". Run it and see the output 92 | 93 |
show 94 |

95 | 96 | ```bash 97 | # create a YAML template with this command 98 | kubectl run busybox --image=busybox --restart=Never --dry-run=client -o yaml --command -- env > envpod.yaml 99 | # see it 100 | cat envpod.yaml 101 | ``` 102 | 103 | ```YAML 104 | apiVersion: v1 105 | kind: Pod 106 | metadata: 107 | creationTimestamp: null 108 | labels: 109 | run: busybox 110 | name: busybox 111 | spec: 112 | containers: 113 | - command: 114 | - env 115 | image: busybox 116 | name: busybox 117 | resources: {} 118 | dnsPolicy: ClusterFirst 119 | restartPolicy: Never 120 | status: {} 121 | ``` 122 | 123 | ```bash 124 | # apply it and then see the logs 125 | kubectl apply -f envpod.yaml 126 | kubectl logs busybox 127 | ``` 128 | 129 |

130 |
131 | 132 | ### Get the YAML for a new namespace called 'myns' without creating it 133 | 134 |
show 135 |

136 | 137 | ```bash 138 | kubectl create namespace myns -o yaml --dry-run=client 139 | ``` 140 | 141 |

142 |
143 | 144 | ### Create the YAML for a new ResourceQuota called 'myrq' with hard limits of 1 CPU, 1G memory and 2 pods without creating it 145 | 146 |
show 147 |

148 | 149 | ```bash 150 | kubectl create quota myrq --hard=cpu=1,memory=1G,pods=2 --dry-run=client -o yaml 151 | ``` 152 | 153 |

154 |
155 | 156 | ### Get pods on all namespaces 157 | 158 |
show 159 |

160 | 161 | ```bash 162 | kubectl get po --all-namespaces 163 | ``` 164 | Alternatively 165 | 166 | ```bash 167 | kubectl get po -A 168 | ``` 169 |

170 |
171 | 172 | ### Create a pod with image nginx called nginx and expose traffic on port 80 173 | 174 |
show 175 |

176 | 177 | ```bash 178 | kubectl run nginx --image=nginx --restart=Never --port=80 179 | ``` 180 | 181 |

182 |
183 | 184 | ### Change pod's image to nginx:1.7.1. Observe that the container will be restarted as soon as the image gets pulled 185 | 186 |
show 187 |

188 | 189 | *Note*: The `RESTARTS` column should contain 0 initially (ideally - it could be any number) 190 | 191 | ```bash 192 | # kubectl set image POD/POD_NAME CONTAINER_NAME=IMAGE_NAME:TAG 193 | kubectl set image pod/nginx nginx=nginx:1.7.1 194 | kubectl describe po nginx # you will see an event 'Container will be killed and recreated' 195 | kubectl get po nginx -w # watch it 196 | ``` 197 | 198 | *Note*: some time after changing the image, you should see that the value in the `RESTARTS` column has been increased by 1, because the container has been restarted, as stated in the events shown at the bottom of the `kubectl describe pod` command: 199 | 200 | ``` 201 | Events: 202 | Type Reason Age From Message 203 | ---- ------ ---- ---- ------- 204 | [...] 205 | Normal Killing 100s kubelet, node3 Container pod1 definition changed, will be restarted 206 | Normal Pulling 100s kubelet, node3 Pulling image "nginx:1.7.1" 207 | Normal Pulled 41s kubelet, node3 Successfully pulled image "nginx:1.7.1" 208 | Normal Created 36s (x2 over 9m43s) kubelet, node3 Created container pod1 209 | Normal Started 36s (x2 over 9m43s) kubelet, node3 Started container pod1 210 | ``` 211 | 212 | *Note*: you can check pod's image by running 213 | 214 | ```bash 215 | kubectl get po nginx -o jsonpath='{.spec.containers[].image}{"\n"}' 216 | ``` 217 | 218 |

219 |
220 | 221 | ### Get nginx pod's ip created in previous step, use a temp busybox image to wget its '/' 222 | 223 |
show 224 |

225 | 226 | ```bash 227 | kubectl get po -o wide # get the IP, will be something like '10.1.1.131' 228 | # create a temp busybox pod 229 | kubectl run busybox --image=busybox --rm -it --restart=Never -- wget -O- 10.1.1.131:80 230 | ``` 231 | 232 | Alternatively you can also try a more advanced option: 233 | 234 | ```bash 235 | # Get IP of the nginx pod 236 | NGINX_IP=$(kubectl get pod nginx -o jsonpath='{.status.podIP}') 237 | # create a temp busybox pod 238 | kubectl run busybox --image=busybox --env="NGINX_IP=$NGINX_IP" --rm -it --restart=Never -- sh -c 'wget -O- $NGINX_IP:80' 239 | ``` 240 | 241 | Or just in one line: 242 | 243 | ```bash 244 | kubectl run busybox --image=busybox --rm -it --restart=Never -- wget -O- $(kubectl get pod nginx -o jsonpath='{.status.podIP}:{.spec.containers[0].ports[0].containerPort}') 245 | ``` 246 | 247 |

248 |
249 | 250 | ### Get pod's YAML 251 | 252 |
show 253 |

254 | 255 | ```bash 256 | kubectl get po nginx -o yaml 257 | # or 258 | kubectl get po nginx -oyaml 259 | # or 260 | kubectl get po nginx --output yaml 261 | # or 262 | kubectl get po nginx --output=yaml 263 | ``` 264 | 265 |

266 |
267 | 268 | ### Get information about the pod, including details about potential issues (e.g. pod hasn't started) 269 | 270 |
show 271 |

272 | 273 | ```bash 274 | kubectl describe po nginx 275 | ``` 276 | 277 |

278 |
279 | 280 | ### Get pod logs 281 | 282 |
show 283 |

284 | 285 | ```bash 286 | kubectl logs nginx 287 | ``` 288 | 289 |

290 |
291 | 292 | ### If pod crashed and restarted, get logs about the previous instance 293 | 294 |
show 295 |

296 | 297 | ```bash 298 | kubectl logs nginx -p 299 | # or 300 | kubectl logs nginx --previous 301 | ``` 302 | 303 |

304 |
305 | 306 | ### Execute a simple shell on the nginx pod 307 | 308 |
show 309 |

310 | 311 | ```bash 312 | kubectl exec -it nginx -- /bin/sh 313 | ``` 314 | 315 |

316 |
317 | 318 | ### Create a busybox pod that echoes 'hello world' and then exits 319 | 320 |
show 321 |

322 | 323 | ```bash 324 | kubectl run busybox --image=busybox -it --restart=Never -- echo 'hello world' 325 | # or 326 | kubectl run busybox --image=busybox -it --restart=Never -- /bin/sh -c 'echo hello world' 327 | ``` 328 | 329 |

330 |
331 | 332 | ### Do the same, but have the pod deleted automatically when it's completed 333 | 334 |
show 335 |

336 | 337 | ```bash 338 | kubectl run busybox --image=busybox -it --rm --restart=Never -- /bin/sh -c 'echo hello world' 339 | kubectl get po # nowhere to be found :) 340 | ``` 341 | 342 |

343 |
344 | 345 | ### Create an nginx pod and set an env value as 'var1=val1'. Check the env value existence within the pod 346 | 347 |
show 348 |

349 | 350 | ```bash 351 | kubectl run nginx --image=nginx --restart=Never --env=var1=val1 352 | # then 353 | kubectl exec -it nginx -- env 354 | # or 355 | kubectl exec -it nginx -- sh -c 'echo $var1' 356 | # or 357 | kubectl describe po nginx | grep val1 358 | # or 359 | kubectl run nginx --restart=Never --image=nginx --env=var1=val1 -it --rm -- env 360 | # or 361 | kubectl run nginx --image nginx --restart=Never --env=var1=val1 -it --rm -- sh -c 'echo $var1' 362 | ``` 363 | 364 |

365 |
366 | -------------------------------------------------------------------------------- /learning/What-is-Pod-in-Kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # What is a Pod in Kubernetes? ☸️💡🎉 2 | 3 | ![pod](https://imgur.com/8jx68Ep.png) 4 | 5 | # Pods 6 | 7 | > A Pod is the basic execution unit of a Kubernetes application–the smallest and simplest unit in the Kubernetes object model that you create or deploy. 8 | 9 | We will define a "Hello World" pod. We will define it with a `yaml` file. We will define the `apiVersion` and also instruct the `kind` of Kubernetes object (like Pod, Deployment, Service, etc.,). 10 | 11 | ```yaml 12 | apiVersion: v1 13 | kind: Pod 14 | ``` 15 | 16 | Then we will have to specify some identifiers for the Pod that we are creating. These are called `metadata`. The `metadata` object holds the name for the pod and any labels to identify the pod. 17 | 18 | ```yaml 19 | metadata: 20 | name: hello-world 21 | labels: 22 | app: hello 23 | ``` 24 | 25 | We need to define the containers that will run in the Pod. A pod can contain one or more containers inside. We can define them in `spec` object. 26 | 27 | ```yaml 28 | spec: 29 | containers: 30 | - name: hello-world-container 31 | image: busybox 32 | command: ['sh', '-c', 'echo Hello World! && sleep 3600'] 33 | ``` 34 | 35 | This is more similar to the Docker definition except the keywords differ. But all it does is get the `busybox` image from the repository and run the specified command. It also names the container to `hello-world-container` when running the image. 36 | 37 | [![Alt Text](https://res.cloudinary.com/practicaldev/image/fetch/s--hW4gRlU0--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://thepracticaldev.s3.amazonaws.com/i/bpgn65e0fp83v1lxqhhy.jpeg)](https://res.cloudinary.com/practicaldev/image/fetch/s--hW4gRlU0--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://thepracticaldev.s3.amazonaws.com/i/bpgn65e0fp83v1lxqhhy.jpeg) 38 | 39 | ## But why a pod and not a container? 40 | 41 | Sometimes we need to share the resource between two different containers. They need to connect with each other without any latencies. Making them coexist in a cohesive environment is desirable. We will add these containers in a pod. The Kubernetes also tries to schedule these containers in the same network namespace. This makes it easy for them to share and communicate easily. 42 | 43 | These shared containers in a pod will have the following advantages: 44 | 45 | 1. They communicate with each other using `localhost`. 46 | 47 | 2. They share the storage volumes 48 | 49 | 3. They form a cohesive unit of service. 50 | 51 | 52 | Check out more about Pods [here](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) 53 | 54 | --- 55 | 56 | ## Init and App container 57 | 58 | The Kubernetes schedule the pods and run them inside the cluster. Sometimes we will need to run something before a container is started. 59 | 60 | For example, we need to start the application only after the Database is started. We will use `initContainers`. 61 | 62 | GIF 63 | 64 | ![Alt Text](https://res.cloudinary.com/practicaldev/image/fetch/s--HK4oGCmu--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_66%2Cw_800/https://thepracticaldev.s3.amazonaws.com/i/7sluvf9dajw7mhm5v72c.gif) 65 | 66 | 67 | 68 | ```yaml 69 | apiVersion: v1 70 | kind: Pod 71 | metadata: 72 | name: hello-world 73 | labels: 74 | app: hello 75 | spec: 76 | containers: 77 | - name: hello-world-container 78 | image: busybox 79 | command: ['sh', '-c', 'echo Hello World! && sleep 3600'] 80 | initContainers: 81 | - name: mysql-container 82 | image: busybox 83 | command: 84 | - '/bin/sh' 85 | - '-c' 86 | - | 87 | while true 88 | do 89 | rt=$(nc -z -w 1 hello-mysql 3306) 90 | if [ $? -eq 0 ]; then 91 | echo "DB is UP" 92 | break 93 | fi 94 | echo "DB is not yet reachable;sleep for 10s before retry" 95 | sleep 10 96 | done 97 | ``` 98 | 99 | The `initContainers` is similar to `containers`. Here we used another `busybox` container and wait till `hello-mysql` pod is up and running. The `hello-mysql` pod looks like this. 100 | 101 | ```yaml 102 | apiVersion: v1 103 | kind: Deployment 104 | metadata: 105 | name: hello-mysql 106 | spec: 107 | volumes: 108 | - name: data 109 | emptydir: {} 110 | containers: 111 | - name: mysql 112 | image: mysql 113 | env: 114 | - name: MYSQL_USER 115 | value: root 116 | - name: MYSQL_ALLOW_EMPTY_PASSWORD 117 | value: 'yes' 118 | - name: MYSQL_DATABASE 119 | value: 'some-app' 120 | ports: 121 | - containerPort: 3306 122 | --- 123 | apiVersion: v1 124 | kind: Service 125 | metadata: 126 | name: hello-mysql 127 | spec: 128 | selector: 129 | app: hello-mysql 130 | ports: 131 | - port: 3306 132 | ``` 133 | 134 | The above definition consists of a `deployment` and `service`. We will explore them later. But the template for the `Deployment` is more similar to the `Pod` template. 135 | 136 | The `initContainers` boots up a busybox container and wait till it returns success. Once the `DB is UP`, the `appContainers` or `containers` is started. 137 | 138 | More about `initContainers` [here](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/). 139 | 140 | --- 141 | 142 | ## Pods life cycle 143 | 144 | Each pod has a `status` object. The `status` object holds the `phase` field. 145 | 146 | > The phase is not intended to be a comprehensive rollup of observations of Container or Pod state, nor is it intended to be a comprehensive state machine. 147 | 148 | [![Alt Text](https://res.cloudinary.com/practicaldev/image/fetch/s--GUaCvSLY--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://thepracticaldev.s3.amazonaws.com/i/3kfhr014pwme691k7yqm.jpeg)](https://res.cloudinary.com/practicaldev/image/fetch/s--GUaCvSLY--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://thepracticaldev.s3.amazonaws.com/i/3kfhr014pwme691k7yqm.jpeg) 149 | 150 | When the Kubernetes controller asks the scheduler to create a pod. The scheduler will schedule the pod for the creation. The controller decides whether it needs to create the pod. The scheduler will ask the kubelets to download the container image(s) from the repository. They are then downloaded over the network. During this stage, the Pods are in `Pending` phase. 151 | 152 | Once the containers are downloaded then the pod is assigned a Node in the Kubernetes cluster. And the containers are initiated to run. It doesn't matter whether the container is booted up correctly and running all the required process, the Kubernetes assigns `Running` phase to the pod. 153 | 154 | In the Kubernetes world, all containers will be terminated eventually. They are terminated either successfully or failed. 155 | 156 | The pod which is in phase `Succeeded` means it is terminated successfully and the pod will never be restarted. 157 | 158 | The pod that is terminated with a `failure` will be terminated with a `Failed` phase. 159 | 160 | Sometimes, the Kubernetes master node will not be able to connect with the pods that are running. These pods are marked with `Unknown` phase. 161 | 162 | The main advantage of Kubernetes is its ability to restart the pods/containers automagically. We can define this behaviour to a pod with the `restartPolicy` field. The `restartPolicy` accepts either `Always | onFailure | Never`. The default value is `Always`. That is whenever a pod is terminated with `failed` status, then the Kubernetes system will try to restart the pod. 163 | 164 | --- 165 | 166 | ## Probe it 167 | 168 | When running containers, the container might be running but Kubernetes cannot confirm whether the application works correctly. For that, we need to check periodically whether the endpoint or the application in the container is responding correctly. Kubernetes provides three types of handlers, that will probe the container and ensure that it is in the correct and expected state. 169 | 170 | [![Alt Text](https://res.cloudinary.com/practicaldev/image/fetch/s--AqpvGP-r--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://thepracticaldev.s3.amazonaws.com/i/0qnfowu4ri1vj1o0fq6j.jpeg)](https://res.cloudinary.com/practicaldev/image/fetch/s--AqpvGP-r--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://thepracticaldev.s3.amazonaws.com/i/0qnfowu4ri1vj1o0fq6j.jpeg) 171 | 172 | **The handlers are**: 173 | 174 | 1. ExecAction 175 | 176 | 2. TCPSocketAction 177 | 178 | 3. HTTPGetAction 179 | 180 | 181 | They are simple to understand. They allow us to probe by executing a command inside the container, check for TCP connection or HTTP request respectively. 182 | 183 | The probe can result in either `Success` | `Failure` | `Unknown`. 184 | 185 | But it is important to understand when to probe. For example, in the previous example with the Database. We were waiting until the Database is UP. During this time any probe to check the application is not possible. So the Kubernetes probe has to two kinds of a probe to check the running containers. They are: 186 | 187 | 1. livenessProbe 188 | 189 | 2. readinessProbe 190 | 191 | 192 | **readinessProbe** 193 | 194 | The readinessProbe is to probe into the container until the container is ready. That is all the initial jobs like `initContainers` are ran. The pod is made available only when the `readinessProbe` is succeeded. 195 | 196 | Consider that your application has a `health` endpoint. That tells the application's health. 197 | 198 | ```yaml 199 | readinessProbe: 200 | httpGet: 201 | path: hello-world/health 202 | port: http 203 | initialDelaySeconds: 20 204 | periodSeconds: 15 205 | failureThreshold: 6 206 | ``` 207 | 208 | The above readinessProbe specifies to do an `HTTPGetAction` at the path specified `hello-world/health`. It initially waits for 20 seconds to start the probing with `initialDelaySeconds`. Then for every 15 seconds (`periodSeconds`) it probes the container for readiness. 209 | 210 | The `failureThreshold` here specifies issue the state as `failure` only after receiving 6 consecutive failure messages. 211 | 212 | **livenessProbe** 213 | 214 | The livenessProbe indicates whether the container is `running`. When the probe fails the container is killed and then the container is restarted based on the `restartPolicy`. 215 | 216 | ```yaml 217 | livenessProbe: 218 | httpGet: 219 | path: hello-world/health 220 | port: http 221 | initialDelaySeconds: 120 222 | ``` 223 | 224 | Here the livenessProbe check the same end point as above but it only starts to check for the liveness after an initial delay of 120 seconds. 225 | 226 | --- 227 | 228 | ## Limitations 229 | 230 | The pods has containers inside them. It is very important for us to limit the amount of `memory` and `cpus` that it can utilize. 231 | 232 | > CPU and memory are collectively referred to as compute resources, or just resources. 233 | 234 | ```yaml 235 | resources: 236 | requests: 237 | memory: "512Mi" 238 | cpu: "500m" 239 | limits: 240 | memory: "1Gi" 241 | cpu: "1" 242 | ``` 243 | 244 | Here we are specifying the pod is allowed to request for `512Mi` of memory and `500m` which is 0.5% of total CPU available. 245 | 246 | The `limits` specify the maximum memory and CPU allowed to the pod. When the memory overshoots, Kubernetes simply restart the pod based on the restartPolicy. 247 | 248 | It is very important to give proper value here and allocate memory and CPU efficiently. 249 | 250 | > We can also define the resources at a pod or at the container level. 251 | 252 | --- 253 | 254 | Hopefully, this might have given you a brief overview of `Pods` in Kubernetes. Head over [kubernetes.io](https://kubernetes.io/) for more information on Kubernetes. 255 | 256 | Want to understand the whole picture of Kubernetes and how it works check out [this](https://harshhaa.hashnode.dev/kubernetes-for-everyone) post. 257 | 258 | ### If you like this article, please share with others. ❤️ 259 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🚀 Kubernetes Projects & Learning Hub 2 | 3 |
4 | Kubernetes Logo 5 |
6 |

A Comprehensive Guide to Master Kubernetes Through Hands-on Projects

7 |

Learn, Practice, and Deploy Real-world Applications on Kubernetes

8 |
9 | 10 |
11 | Kubernetes Architecture 12 |
13 | 14 | --- 15 | 16 | ## 📋 Table of Contents 17 | - [Prerequisites](#-prerequisites) 18 | - [Setup & Preparation](#-setup--preparation) 19 | - [Learning Path](#-kubernetes-learning-path) 20 | - [Real-Time Projects](#-real-time-kubernetes-projects) 21 | - [Guides & Best Practices](#-kubernetes-guides--best-practices) 22 | - [Troubleshooting](#-troubleshooting-kubernetes-issues) 23 | - [Cloud Platforms](#-kubernetes-in-the-cloud) 24 | - [Certifications](#-cncf-kubernetes-certifications) 25 | - [Infrastructure as Code](#%EF%B8%8F-kubernetes-infrastructure-as-code-iac) 26 | - [Cheat Sheets & Tools](#-kubernetes-cheat-sheets--tools) 27 | 28 | --- 29 | 30 | ## 📌 Prerequisites 31 | 32 | Before diving in, ensure you have: 33 | 34 | | Requirement | Description | 35 | |------------|-------------| 36 | | `kubectl` | Basic command-line knowledge | 37 | | Containers | Understanding of Docker/containerd/cri-o | 38 | | Linux | Basic Linux commands familiarity | 39 | 40 | --- 41 | 42 | ## 🛠 Setup & Preparation 43 | 44 | ### Essential Tools Installation 45 | 46 | 1. **Kubernetes CLI (`kubectl`)** 47 | ```bash 48 | # Installation guide available at: 49 | https://kubernetes.io/docs/tasks/tools/ 50 | ``` 51 | 52 | 2. **Local Kubernetes Cluster** 53 | - Minikube 54 | - kind 55 | - k3s 56 | 57 | 📚 **Detailed Setup Guide**: [Kubernetes CLI & Cluster Setup](https://gist.github.com/NotHarshhaa/854ed5c12fff07acde88faf95b9decff) 58 | 59 | > 💡 **Pro Tip**: Enable kubectl autocompletion for improved productivity! 60 | 61 | --- 62 | 63 | ## 📚 Kubernetes Learning Path 64 | 65 | ### Core Concepts 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 |
LevelTopicDescription
BeginnerKubernetes for EveryoneFoundation concepts and basic architecture
BeginnerUnderstanding PodsDeep dive into Kubernetes Pods
IntermediateApplication DeploymentComplete deployment workflow
IntermediateArchitecture OverviewDetailed component analysis
AdvancedDaemonSetsCluster-wide service deployment
98 | 99 | ### Advanced Topics 100 | - ConfigMaps and Secrets Management 101 | - Network Policies Implementation 102 | - RBAC Access Control 103 | - Monitoring and Logging Solutions 104 | 105 | > 🔄 New topics are regularly added to keep content fresh and relevant! 106 | 107 | --- 108 | 109 | ## 🔥 Real-Time Kubernetes Projects 110 | 111 | ### Production-Grade Implementations 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 127 | 128 | 129 | 130 | 131 | 136 | 137 | 138 | 139 | 140 | 145 | 146 | 147 | 148 | 149 | 154 | 155 |
ProjectComplexityKey Learning Points
10-Microservices on EKS⭐⭐⭐⭐⭐ 123 | - Microservices Architecture
124 | - AWS EKS Management
125 | - Service Mesh Integration 126 |
Spring Boot Deployment⭐⭐⭐ 132 | - Java Application Deployment
133 | - Service Configuration
134 | - Resource Management 135 |
Uber Clone DevSecOps⭐⭐⭐⭐ 141 | - Security Implementation
142 | - CI/CD Pipeline
143 | - Scalability Patterns 144 |
Jenkins CI/CD Pipeline⭐⭐⭐ 150 | - Automated Deployment
151 | - Jenkins Integration
152 | - Pipeline Management 153 |
156 | 157 |
158 | Kubernetes Tools 159 |
160 | Kubernetes Resource Map 161 |
162 | 163 | ## 🌟 Additional Resources & Projects 164 | 165 | ### Production-Grade Examples 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 182 | 183 | 184 | 185 | 186 | 192 | 193 | 194 | 195 | 196 | 202 | 203 |
ProjectDescriptionKey Features
Online BoutiqueCloud-native microservices demo app by Google 177 | - 11 microservices in different languages
178 | - gRPC communication
179 | - Cloud Operations integration
180 | - Istio & Service Mesh ready 181 |
Kubernetes ConfigsProduction-ready Kubernetes configurations 187 | - Best practices & templates
188 | - CI/CD integrations
189 | - Multi-cloud support
190 | - Advanced security configs 191 |
Kubernetes ExamplesOfficial Kubernetes example applications 197 | - Guestbook application
198 | - Cassandra deployment
199 | - WordPress with MySQL
200 | - Various storage examples 201 |
204 | 205 | ### Essential Tools & Utilities 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 |
ToolCategoryDescription
K9sCLI ToolTerminal UI to interact with clusters
PopeyeCluster SanitizerScans live clusters for misconfigurations
KOPSCluster ManagementProduction-grade K8s installation & management
KubesprayDeploymentDeploy production-ready clusters
234 | 235 | ### Learning Resources 236 | 237 | - [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) - Step by step guide 238 | - [Kubernetes Learning Path](https://github.com/techiescamp/kubernetes-learning-path) - Structured learning path 239 | - [Awesome Kubernetes](https://github.com/ramitsurana/awesome-kubernetes) - Curated list of resources 240 | - [Kubernetes Failure Stories](https://github.com/hjacobs/kubernetes-failure-stories) - Learn from others' experiences 241 | 242 | --- 243 | 244 | ## 📖 Kubernetes Guides & Best Practices 245 | 246 | ### 🌐 Networking 247 | - [The Kubernetes Networking Guide](https://www.tkng.io/) 248 | - [Hands-on Networking Labs](https://www.tkng.io/lab/) 249 | 250 | ### 🔒 Security 251 | 1. [Official Security Checklist](https://kubernetes.io/docs/concepts/security/security-checklist/) 252 | 2. [Awesome K8s Security](https://github.com/magnologan/awesome-k8s-security) 253 | 3. [Security CTF Challenges](https://eksclustergames.com) 254 | 255 | ### 🗄️ Storage 256 | - **Comprehensive Guide**: [Understanding Kubernetes Storage](https://medium.com/@seifeddinerajhi/understanding-storage-in-kubernetes-ee2c19001aae) 257 | - Persistent Volumes (PV) 258 | - Persistent Volume Claims (PVC) 259 | - Storage Classes 260 | - Dynamic Provisioning 261 | 262 | --- 263 | 264 | ## 🛠 Troubleshooting Kubernetes Issues 265 | 266 | | Issue Type | Resource | 267 | |------------|----------| 268 | | Common Errors | [Solutions Guide](https://cloudtweaks.com/2023/01/common-kubernetes-errors/) | 269 | | Exit Codes | [Complete Guide](https://komodor.com/learn/exit-codes-in-containers-and-kubernetes-the-complete-guide/) | 270 | | Deployments | [Visual Troubleshooter](https://learnk8s.io/troubleshooting-deployments) | 271 | | General Issues | [Comprehensive Guide](https://komodor.com/learn/kubernetes-troubleshooting-the-complete-guide/) | 272 | 273 | --- 274 | 275 | ## ☁ Kubernetes in the Cloud 276 | 277 | ### Major Cloud Providers 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 295 | 296 | 297 | 298 | 299 | 305 | 306 | 307 | 308 | 309 | 315 | 316 |
PlatformServiceResources
AWSEKS 289 | 294 |
AzureAKS 300 | 304 |
GoogleGKE 310 | 314 |
317 | 318 | --- 319 | 320 | ## 🎓 CNCF Kubernetes Certifications 321 | 322 | ### Certification Preparation Resources 323 | 324 | | Certification | Resources | 325 | |---------------|-----------| 326 | | CKA | [Practice Exercises](https://github.com/alijahnas/CKA-practice-exercises) • [Additional Exercises](https://github.com/chadmcrowell/CKA-Exercises) | 327 | | CKS | [Study Guide](https://github.com/walidshaari/Certified-Kubernetes-Security-Specialist) • [Video Course](https://www.youtube.com/watch?v=d9xfB5qaOfg) | 328 | 329 | --- 330 | 331 | ## ⚙️ Kubernetes Infrastructure as Code (IaC) 332 | 333 | | Tool | Purpose | Resource | 334 | |------|----------|----------| 335 | | Helm | Package Manager | [Repository](https://github.com/helm/helm) | 336 | | Kustomize | Config Management | [Repository](https://github.com/kubernetes-sigs/kustomize) | 337 | | Terraform | Infrastructure | [Documentation](https://www.terraform.io/) | 338 | | Pulumi | Multi-language IaC | [Repository](https://github.com/pulumi/pulumi) | 339 | | Skaffold | Development | [Repository](https://github.com/GoogleContainerTools/skaffold) | 340 | 341 | --- 342 | 343 | ## 🔥 Kubernetes Cheat Sheets & Tools 344 | 345 | ### Quick Reference Guides 346 | - [kubectl Commands](https://github.com/NotHarshhaa/devops-cheatsheet/blob/master/Containerization/Kubernetes.md) 347 | - [Helm Commands](https://github.com/NotHarshhaa/devops-cheatsheet/blob/master/Containerization/Helm.md) 348 | - [Docker Reference](https://github.com/NotHarshhaa/devops-cheatsheet/blob/master/Containerization/Docker.md) 349 | 350 | ### Tools 351 | - [K8s YAML Generator](https://www.k8syaml.com/) 352 | 353 | --- 354 | 355 | ## 🤝 Contributing 356 | 357 | We welcome contributions! To contribute: 358 | 359 | 1. Fork the repository 360 | 2. Create your feature branch 361 | 3. Commit your changes 362 | 4. Push to the branch 363 | 5. Create a Pull Request 364 | 365 | --- 366 | 367 | ## ⭐ Show Your Support 368 | 369 | If you find this repository helpful, please give it a star! Your support motivates continued maintenance and improvements. 370 | 371 | --- 372 | 373 | ## 🛠️ Author & Community 374 | 375 | Created with 💡 by [Harshhaa](https://github.com/NotHarshhaa) 376 | 377 | ### 📫 Connect With Me 378 | 379 |
380 | 381 | [![LinkedIn](https://img.shields.io/badge/LinkedIn-%230077B5.svg?style=for-the-badge&logo=linkedin&logoColor=white)](https://linkedin.com/in/harshhaa-vardhan-reddy) 382 | [![GitHub](https://img.shields.io/badge/GitHub-181717?style=for-the-badge&logo=github&logoColor=white)](https://github.com/NotHarshhaa) 383 | [![Telegram](https://img.shields.io/badge/Telegram-26A5E4?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/prodevopsguy) 384 | [![Dev.to](https://img.shields.io/badge/Dev.to-0A0A0A?style=for-the-badge&logo=dev.to&logoColor=white)](https://dev.to/notharshhaa) 385 | [![Hashnode](https://img.shields.io/badge/Hashnode-2962FF?style=for-the-badge&logo=hashnode&logoColor=white)](https://hashnode.com/@prodevopsguy) 386 | 387 |
388 | 389 | --- 390 | 391 | ### 📢 Stay Updated 392 | 393 |
394 | Follow Me 395 |
396 | -------------------------------------------------------------------------------- /learning/Deploying-an-Application-on-Kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Deploying an Application on Kubernetes: A Complete Guide 2 | 3 | ![guide](https://imgur.com/2TkZCg7.png) 4 | 5 | **Kubernetes** is an open-source platform for automating the deployment, scaling, and management of containerized applications. It is a popular tool for container orchestration and provides a way to manage large numbers of containers as a single unit rather than having to manage each container individually. 6 | 7 | ## ✍️ **Importance of Kubernetes** 8 | 9 | **Kubernetes** has become an essential tool for managing and deploying modern applications, and its importance lies in its ability to provide a unified platform for automating and scaling the deployment, management, and scaling of applications. With Kubernetes, organizations can achieve increased efficiency and agility in their development and deployment processes, resulting in faster time to market and reduced operational costs. Kubernetes also provides a high degree of scalability, allowing organizations to scale their applications as their business grows and evolves easily. 10 | 11 | Additionally, Kubernetes offers robust security features, ensuring that applications are protected against potential threats and vulnerabilities. With its active community and extensive ecosystem, Kubernetes provides organizations with access to a wealth of resources, tools, and services that can help them improve and enhance their applications continuously. Overall, the importance of using Kubernetes lies in its ability to provide a flexible, scalable, and secure platform for managing modern applications and enabling organizations to stay ahead in a rapidly evolving digital landscape. 12 | 13 | ## ✍️ **Here's a basic overview of how to use Kubernetes** 14 | 15 | * ### **Set up a cluster** 16 | 17 | To use Kubernetes, you need to set up a cluster, which is a set of machines that run the Kubernetes control plane and the containers. You can set up a cluster on your infrastructure or use a cloud provider such as Amazon Web Services (AWS), Google Cloud Platform (GCP), or Microsoft Azure. 18 | 19 | * ### **Package your application into containers** 20 | 21 | To run your application on Kubernetes, you need to package it into one or more containers. A container is a standalone executable package that includes everything needed to run your application, including the code, runtime, system tools, libraries, and settings. 22 | 23 | * ### **Define the desired state of your application using manifests** 24 | 25 | Kubernetes uses manifests, which are files that describe the desired state of your application, to manage the deployment and scaling of your containers. The manifests specify the number of replicas of each container, how they should be updated, and how they should communicate with each other. 26 | 27 | * ### **Push your code to an SCM platform** 28 | 29 | Push your application code to an SCM platform such as GitHub. 30 | 31 | * ### **Use a CI/CD tool to automate** 32 | 33 | Use a specialised CI/CD platform such as Harness to automate the deployment of your application. Once you set it up, done; you can easily and often deploy your application code in chunks whenever a new code gets pushed to the project repository. 34 | 35 | * ### **Expose the application** 36 | 37 | Once you deploy your application, you need to expose the application to the outside world by creating a Service with a type of LoadBalancer or ExternalName. This allows users to access the application through a stable IP address or hostname. 38 | 39 | * ### **Monitor and manage your application** 40 | 41 | After your application is deployed, you can use the kubectl tool to monitor the status of your containers, make changes to the desired state, and scale your application up or down. 42 | 43 | These are the general steps to deploy an application on Kubernetes. Depending on the application's complexity, additional steps may be required, such as configuring storage, network policies, or security. However, this should give you a good starting point for deploying your application on Kubernetes. 44 | 45 | Today, we will see how to automate simple application deployment on Kubernetes using Harness. 46 | 47 | ## 👉 **Prerequisites** 48 | 49 | * Free [Harness Cloud](https://app.harness.io/auth/#/signup/?module=cd&utm_source=internal&utm_medium=social&utm_campaign=devadvocacy&utm_content=pavan_notes_cicd_article&utm_term=get-started) account 50 | 51 | * Download and install [Node.js and npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) 52 | 53 | * GitHub account, we will be using our [sample notes application](https://github.com/pavanbelagatti/notes-app-cicd) 54 | 55 | * Kubernetes cluster access, you can use [Minikube](https://minikube.sigs.k8s.io/docs/start/) or [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) to create a single-node cluster 56 | 57 | ## 👉 **Tutorial** 58 | 59 | ![Kubernetes deployment](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/qm7dy8qnl1kyg3q10v0b.png) 60 | 61 | We will use our sample application that is already in the GitHub repository. We will use a Kubernetes cluster to deploy our application. Next, we will use a CI/CD platform, Harness, in this tutorial to show how we can automate the software delivery process easily. 62 | 63 | ### **Step 1: Test the sample application locally** 64 | 65 | Fork and clone the [sample notes application](https://github.com/pavanbelagatti/notes-app-cicd) 66 | 67 | Go to the application folder with the following command 68 | 69 | ```bash 70 | cd notes-app-cicd 71 | ``` 72 | 73 | Install dependencies with the following command 74 | 75 | ```bash 76 | npm install 77 | ``` 78 | 79 | Run the application locally to see if the application works perfectly well 80 | 81 | ```bash 82 | node app.js 83 | ``` 84 | 85 | ### **Step 2: Containerize the application** 86 | 87 | You can see the Dockerfile in the sample application repository. 88 | 89 | ```go 90 | FROM node:14-alpine 91 | 92 | WORKDIR /app 93 | 94 | COPY package*.json ./ 95 | 96 | RUN npm install 97 | 98 | COPY . . 99 | 100 | EXPOSE 3000 101 | 102 | CMD [ "npm", "start" ] 103 | ``` 104 | 105 | Use the following command to build, tag and push the image to any container registry of your choice. We will push it to Docker Hub in this tutorial. 106 | 107 | For Mac M1, use the following command 108 | 109 | ```bash 110 | docker buildx build --platform=linux/arm64 --platform=linux/amd64 -t docker.io/$your docker hub user name/$image name:$tag name --push -f ./Dockerfile . 111 | ``` 112 | 113 | For other than Mac M1, use the below commands to build and push the image, 114 | 115 | ```bash 116 | docker build -t $your docker hub user name/$image name . 117 | ``` 118 | 119 | ```bash 120 | docker push $your docker hub user name/$image name . 121 | ``` 122 | 123 | ### **Step 3: Create or get access to a Kubernetes cluster** 124 | 125 | Make sure to have access to a Kubernetes cluster from any cloud provider. You can even use Minikube or Kind to create a cluster. In this tutorial, we are going to make use of a Kubernetes cluster from Google Cloud (GCP) 126 | 127 | I already have an account on Google Cloud, so creating a cluster will be easy. 128 | 129 | ### **Step 4: Make sure the Kubernetes manifest files are neat and clean** 130 | 131 | You need deployment yaml and service yaml files to deploy and expose your application. Make sure both files are configured properly. 132 | 133 | You can see that we have deployment.yaml and service.yaml file already present in the sample application repository. 134 | 135 | **Below is our deployment.yaml file.** 136 | 137 | ```yaml 138 | apiVersion: apps/v1 139 | kind: Deployment 140 | metadata: 141 | name: notes-app-deployment 142 | labels: 143 | app: notes-app 144 | spec: 145 | replicas: 2 146 | selector: 147 | matchLabels: 148 | app: notes-app 149 | template: 150 | metadata: 151 | labels: 152 | app: notes-app 153 | spec: 154 | containers: 155 | - name: notes-app 156 | image: pavansa/notes-app 157 | imagePullPolicy: IfNotPresent 158 | ports: 159 | - containerPort: 3000 160 | resources: 161 | requests: 162 | cpu: "100m" 163 | memory: "128Mi" 164 | limits: 165 | cpu: "250m" 166 | memory: "256Mi" 167 | env: 168 | - name: NODE_ENV 169 | value: "production" 170 | readinessProbe: 171 | httpGet: 172 | path: / 173 | port: 3000 174 | initialDelaySeconds: 5 175 | periodSeconds: 10 176 | livenessProbe: 177 | httpGet: 178 | path: / 179 | port: 3000 180 | initialDelaySeconds: 15 181 | periodSeconds: 20 182 | ``` 183 | 184 | **Below is our service.yaml file** 185 | 186 | ```yaml 187 | apiVersion: v1 188 | kind: Service 189 | metadata: 190 | name: notes-app-service # Service name 191 | labels: 192 | app: notes-app 193 | app.kubernetes.io/name: notes-app 194 | spec: 195 | type: LoadBalancer # Expose the service externally 196 | selector: 197 | app: notes-app # Selector to match pods with the same label 198 | ports: 199 | - port: 80 # Port exposed externally 200 | targetPort: 3000 # Port on the container 201 | protocol: TCP 202 | ``` 203 | 204 | Apply the manifest files with the following commands. Starting with deployment and then service yaml file. 205 | 206 | ```bash 207 | kubectl apply -f deployment.yaml 208 | ``` 209 | 210 | ```bash 211 | kubectl apply -f service.yaml 212 | ``` 213 | 214 | Verify the pods are running properly as expected after applying the kubectl apply commands. 215 | 216 | ```bash 217 | kubectl get pods 218 | ``` 219 | 220 | You should see the pods and their status. 221 | 222 | ![running pods](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/tofw9z6mevb2ezk0twmt.png) 223 | 224 | ### **Step 5: Let’s automate the deployment using Harness** 225 | 226 | You need a CI/CD tool to automate your continuous integration and deployment process. Harness is known for its innovation and simplicity in the CI/CD space. Hence, we will use this platform to set up automated continuous deployment of our application. 227 | 228 | Once you sign up and verify your account, you will be presented with a welcome message and project creation set-up. Proceed to create a project. 229 | 230 | ![create project](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/xypyf0exsggs3da8zds1.png) 231 | 232 | Add the name to the project, save and continue. 233 | 234 | ![about the project](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/4jtng0giw7zpibwc5b7u.png) 235 | 236 | Select the ‘Continuous Delivery’ module and start your free plan. 237 | 238 | ![Harness CD module](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/6fqqm7vsa3b0vkonlh7c.png) 239 | 240 | Go to the module and start your deployment journey. 241 | 242 | ![deployment pipeline](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/0gmka7gitb0lj80jeuw7.png) 243 | 244 | The set-up is very straightforward, as shown in the above image; you can deploy your application in just four simple steps. 245 | 246 | Select your deployment type i, e Kubernetes and click ‘Connect to Environment’. 247 | 248 | ![deployment type](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/runmg0ljv7ubca7nl89i.png) 249 | 250 | Connect to your Kubernetes environment with Delegate. A Delegate is a service that runs on your infrastructure to execute tasks on behalf of the Harness platform. 251 | 252 | ![connect Harness](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/os8hwkybtiao3praib7h.png) 253 | 254 | Download the Delegate YAML file and install it on your Kubernetes cluster by applying the kubectl apply command as stated in the above step. 255 | 256 | Make sure to execute the command `kubectl apply -f harness-delegate.yaml` in the right path where you downloaded your delegate YAML file. 257 | 258 | Ensure your Delegate installation is successful. 259 | 260 | Next, configure the service and add the manifest details. 261 | 262 | ![configure service](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/d9apbdvfhuocjy755qru.png) 263 | 264 | After adding all the details, click ‘Create a Pipeline’. 265 | 266 | ![create pipeline](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/eleem6jyt22bsixn4yum.png) 267 | 268 | Check if all the connections are successful. Once everything looks fine, click on ‘Run Pipeline’. 269 | 270 | ![run cd pipeline](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/xk3ayy2hnfgtpjdz888w.png) 271 | 272 | Click on ‘Run Pipeline’ to see the successful deployment. 273 | 274 | ![Successful Harness Pipeline](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/tib4lngwirps3tjmt4fg.png) 275 | 276 | **Congratulations!** We successfully deployed our application successfully on Kubernetes using Harness. Now, we can easily automate the deployment using the Harness CD module. 277 | 278 | You can automate your CD process by adding Triggers. When any authorised person pushes any new code to your repository, your pipeline should get triggered and do CD. Let’s see how to do that. 279 | 280 | In the pipeline studio, you can click the ‘Triggers’ tab and add your desired trigger. 281 | 282 | ![new trigger](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/cwgpeo03kb0mkw76oipi.png) 283 | 284 | ![all trigger related](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/rachguxj97dskwirzihp.png) 285 | 286 | Click on ‘Add New Trigger’ and select ‘GitHub’. 287 | 288 | ![trigger list](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/pk3sdw5o9fdxoo16f7jh.png) 289 | 290 | Add the required details and continue. As you can see, we are selecting ‘Push’ as our event. So whenever any authorised push happens to our repository, the pipeline should trigger and run. 291 | 292 | ![trigger settings](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/bgw1f8eb9aow4hrbi828.png) 293 | 294 | ![trigger webhook](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/7ijtfeggddhpc6g3zt57.png) 295 | 296 | You can see your newly created trigger in the ‘Triggers’ tab. 297 | 298 | ![new trigger listed](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/oinnpcc3qou6k5qsr6qt.png) 299 | Now, whenever any authorised person pushes any code changes to your main/master repository, the pipeline automatically gets triggered. 300 | 301 | ### **If you like this article, please share with others. ❤️** 302 | -------------------------------------------------------------------------------- /learning/kubernetes-for-everyone/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes for everyone ☸️ 2 | 3 | ![kubernetes](https://imgur.com/12s2Bi8.png) 4 | 5 | # Kubernetes 6 | 7 | Kubernetes is the `de facto` standard for running containerized applications. 8 | 9 | > Kubernetes (K8s) is an open-source system for `automating deployment`, `scaling`, and `management` of containerized applications. 10 | 11 | ![](https://res.cloudinary.com/practicaldev/image/fetch/s--RWDsJZoQ--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://thepracticaldev.s3.amazonaws.com/i/idtqaj43wbzw5704fwdb.png) 12 | 13 | **Kubernetes** makes it easy to deploy and run containerized applications. **Kubernetes** is simple to use. 14 | 15 | **Kubernetes** is complex to understand because it provides a huge set of options to make your deployment easier. 16 | 17 | Aptly named, **Kubernetes** is a pilot (or) **helmsman** that helps you to sail the container world. **Kubernetes** is a portable and extensible system built by the community for the community. 18 | 19 | As Kelsey, correctly quotes 20 | 21 | > Kubernetes does the things that the very best system administrator would do automation, failover, centralized logging, monitoring. It takes what we’ve learned in the DevOps community and makes it default, out of the box. 22 | 23 | In order to work with Kubernetes, it is very important to understand 24 | 25 | * How Kubernetes works? 26 | 27 | * How Kubernetes is architected? 28 | 29 | * What are the various components in Kubernetes? 30 | 31 | 32 | Let us start hacking on Kubernetes. 33 | 34 | ## **How does Kubernetes work?** 35 | 36 | The Kubernetes run in a highly available cluster mode. Each Kubernetes cluster consists of one or more master node and a few worker nodes. 37 | 38 | ![Alt Text](https://dev-to-uploads.s3.amazonaws.com/i/gkc90lkkd9cdfqjx2i2m.png) 39 | 40 | ### **Master Node** 41 | 42 | The master node consists of an API server, Scheduler, Controllers, etcd. This node is called the `control plane` of Kubernetes. This control plane is the `brain` of Kubernetes. 43 | 44 | That is the control plane is responsible for all the actions inside Kubernetes. 45 | 46 | ![Alt Text](https://dev-to-uploads.s3.amazonaws.com/i/wgenne5f9y8a25shgb29.png) 47 | 48 | Via the `API server`, we can instruct the Kubernetes or get information from the Kubernetes. 49 | 50 | The `Scheduler` is responsible for scheduling the pods. 51 | 52 | The `controllers` are responsible for running the resource controllers. 53 | 54 | The `etcd` is a storage for the Kubernetes. It is key-value storage. 55 | 56 | ### **Node** 57 | 58 | ![Alt Text](https://dev-to-uploads.s3.amazonaws.com/i/g9w3a7g1ytmxz83m46n3.png) 59 | 60 | The worker nodes have a Kubelet and proxy. 61 | 62 | The Kubelets are the actual workhorse and the Kube-proxy handles the networking. 63 | 64 | #### **Working** 65 | 66 | ![Alt Text](https://dev-to-uploads.s3.amazonaws.com/i/h0hxgch8ape716dwxl0t.png) 67 | 68 | We provide the `yaml` file to the Kubernetes cluster through `kubectl apply` command. 69 | 70 | The `apply` command calls the API server, which will send the information to the `controller` and simultaneously stores the information to the `etcd`. 71 | 72 | The `etcd` then replicate this information across multiple nodes to survive any node failure. 73 | 74 | The `controller` will check whether the given state matches the desired state. If it is not it initiates the pod deployment, by sending the information to the `scheduler` 75 | 76 | The checks are called as the **reconciliation loop** that runs inside the Kubernetes. The job of this loop is to validate whether the state requested is maintained correctly. If the expected state and actual states mismatch this loop will do the necessary actions to convert the actual state into the expected state. 77 | 78 | The `scheduler` has a queue inside. Once the message is received in the queue. 79 | 80 | The `scheduler` will then invoke the kubelet to do the intended action such as deploying the container. 81 | 82 | This is a 10000 feet bird view of how Kubernetes does the deployment. 83 | 84 | There are various components inside the Kubernetes. Let us take a look at what are they and how are they useful. 85 | 86 | ## **Components of Kubernetes** 87 | 88 | ### **Pods** 89 | 90 | > In general terms, pods are nothing but a group of dolphins or whales. 91 | 92 | ![](https://thepracticaldev.s3.amazonaws.com/i/wri3njxsoku0z6na9zj8.png) 93 | 94 | Similarly, in Kubernetes world, `pods` are a group of containers living together. A pod may have one or more containers in it. 95 | 96 | The `pod` is the smallest unit of deployment in Kubernetes. Usually, the containers that cannot live outside the scope of another container are grouped to form a pod. 97 | 98 | This is how you define a pod in Kubernetes. 99 | 100 | ```yaml 101 | apiVersion: v1 102 | kind: Pod 103 | metadata: 104 | name: myapp-pod 105 | labels: 106 | app: myapp 107 | spec: 108 | containers: 109 | - name: myapp-container 110 | image: busybox 111 | command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600'] 112 | ``` 113 | 114 | * *apiVersion* denotes the Kubernetes cluster which version of API to use when parsing and executing this file. 115 | 116 | * *kind* defines what is the **kind** of Kubernetes object, this file will refer to. 117 | 118 | * *metadata* includes all the necessary metadata to identify the Pod. 119 | 120 | * *spec* includes the container information. 121 | 122 | 123 | ### **Deployments** 124 | 125 | While pods are the unit of deployment. For an application to work, it needs one or more pods. Kubernetes considers this entire set as deployment. 126 | 127 | Thus deployment is recorded information about pods. Kubernetes uses this deployment information to manage and monitor the applications that are deployed in them. 128 | 129 | The below file is the sample deployment file that tells the Kubernetes to create a deployment of `nginx` using the `nginx:1.7.9` container. 130 | 131 | ```yaml 132 | apiVersion: apps/v1 133 | kind: Deployment 134 | metadata: 135 | name: nginx-deployment 136 | labels: 137 | app: nginx 138 | spec: 139 | replicas: 3 140 | selector: 141 | matchLabels: 142 | app: nginx 143 | template: 144 | metadata: 145 | labels: 146 | app: nginx 147 | spec: 148 | containers: 149 | - name: nginx 150 | image: nginx:1.7.9 151 | ports: 152 | - containerPort: 80 153 | ``` 154 | 155 | ### **Replicasets** 156 | 157 | While deployment tells the Kubernetes what containers are needed for your application and how many replicas to run. The `replica sets` are the ones that ensure those replicas are up and running. 158 | 159 | ReplicaSet is responsible for managing and monitoring the replicas. 160 | 161 | ### **StatefulSet** 162 | 163 | Often times we will need to have persistent storage or permanent network identifiers or ordered deployment, scaling, and update. During those times we will use `StatefulSets`. 164 | 165 | You can define the StatefulSet like below: 166 | 167 | ```yaml 168 | apiVersion: apps/v1 169 | kind: StatefulSet 170 | metadata: 171 | name: web 172 | spec: 173 | selector: 174 | matchLabels: 175 | app: nginx # has to match .spec.template.metadata.labels 176 | serviceName: "nginx" 177 | replicas: 3 # by default is 1 178 | template: 179 | metadata: 180 | labels: 181 | app: nginx # has to match .spec.selector.matchLabels 182 | spec: 183 | terminationGracePeriodSeconds: 10 184 | containers: 185 | - name: nginx 186 | image: k8s.gcr.io/nginx-slim:0.8 187 | ports: 188 | - containerPort: 80 189 | name: web 190 | volumeMounts: 191 | - name: www 192 | mountPath: /usr/share/nginx/html 193 | volumeClaimTemplates: 194 | - metadata: 195 | name: www 196 | spec: 197 | accessModes: [ "ReadWriteOnce" ] 198 | storageClassName: "my-storage-class" 199 | resources: 200 | requests: 201 | storage: 1Gi 202 | ``` 203 | 204 | We mounted the volume and also claimed the volume storage. 205 | 206 | ### **DaemonSet** 207 | 208 | Sometimes you need to run a pod on every node of your Kubernetes cluster. For example, if you are collecting metrics from every node, then we will need to schedule some pods on every node that collects the metrics. We can use DaemonSet for those nodes. 209 | 210 | ### **Services** 211 | 212 | The deployments define the actual state of the application running on the containers. Users will need to access the application or you might need to connect to the container to debug it. Services will help you. 213 | 214 | The services are the Kubernetes object that provides access to the containers from the external world or between themselves. 215 | 216 | We can define the service like below: 217 | 218 | ```yaml 219 | apiVersion: v1 220 | kind: Service 221 | metadata: 222 | name: my-service 223 | spec: 224 | selector: 225 | app: MyApp 226 | ports: 227 | - protocol: TCP 228 | port: 80 229 | targetPort: 9376 230 | ``` 231 | 232 | The above `service` maps incoming connections on port `80` to the targetPort `9376`. 233 | 234 | > You can consider the services as the load balancer, proxy or traffic router in the world of Kubernetes. 235 | 236 | ### **Networking** 237 | 238 | This is the most important element of Kubernetes. The pods running should be exposed to the network. The containers that are running inside the pods should communicate between themselves and also to the external world. 239 | 240 | While service provides a way to connect to the pods, networking determines how to expose these services. 241 | 242 | In Kubernetes we can expose the service through the following ways: 243 | 244 | * **Load Balancer** 245 | 246 | * The Load Balancer provides an external IP through which we can access the pods running inside. 247 | 248 | * The Kubernetes will start the services and then `asynchronously` starts a load-balancer. 249 | 250 | ![](https://thepracticaldev.s3.amazonaws.com/i/29miv7gkc1bmpz0yfdze.gif) 251 | 252 | * **Node Port** 253 | 254 | * Each of the services will have a dynamically assigned port. 255 | 256 | * We can access the services using the Kubernetes master IP. 257 | 258 | ![](https://thepracticaldev.s3.amazonaws.com/i/pactfmoby255r4pny8iq.gif) 259 | 260 | * **Ingress** 261 | 262 | * Each of the services will have a separate address. 263 | 264 | * These services are then accessed by an ingress controller. 265 | 266 | * The ingress controller is not a public IP or external IP. 267 | 268 | ![](https://thepracticaldev.s3.amazonaws.com/i/09tndignuksgqrjdqlze.gif) 269 | 270 | ### **Secrets** 271 | 272 | Often for the applications, we need to provide passwords, tokens, etc., Kubernetes provides `secrets` object to store and manage the sensitive information. We can create a secret like below: 273 | 274 | ```yaml 275 | apiVersion: v1 276 | kind: Secret 277 | metadata: 278 | name: mysecret 279 | type: Opaque 280 | stringData: 281 | config.yaml: |- 282 | apiUrl: "https://my.api.com/api/v1" 283 | username: {{username}} 284 | password: {{password}} 285 | ``` 286 | 287 | ## Best practices 288 | 289 | > While Kubernetes is an ocean and whatever we have seen is just a drop in it. Since Kubernetes supports a wide range of applications and options, there are various different options and features available. 290 | 291 | Few best practices to follow while working with Kubernetes are: 292 | 293 | #### **Make smaller YAML** 294 | 295 | The `yaml` files are the heart of Kubernetes configuration. 296 | 297 | We can define multiple Kubernetes configurations in a single `yaml`. While `yaml` reduces the boilerplate when compared with `JSON`. But still `yaml` files are space-sensitive and error-prone. 298 | 299 | So always try to minimize the size of `yaml` files. 300 | 301 | For every service, deployment, secrets, and other Kubernetes objects define them in a separate `yaml` file. 302 | 303 | > Split your yaml files into smaller files. 304 | 305 | The `single responsibility principle` applies here. 306 | 307 | #### **Smaller and Fast boot time for images** 308 | 309 | Kubernetes automatically restarts the pods when there is a crash or upgrade or increased usage. It is important to have a faster boot time for the images. In order to have a faster boot time, we need to have smaller images. 310 | 311 | Alpine images are your friends. Use the Alpine images as the base and then add in components or libraries to the images only when they are absolutely necessary. 312 | 313 | > Always remember to have smaller image sizes. Use `builder pattern` to create the images from Alpine images. 314 | 315 | #### Healthy - Zombie Process 316 | 317 | Docker containers will terminate only when all the processes running inside the container are terminated. The Docker containers will return `healthy` status even when one of the processes is killed. This creates a `Healthy-Zombie` process. 318 | 319 | Try to have a single process inside the container. If running a single process is not possible then try to have a mechanism to figure out whether all the required processes are running. 320 | 321 | #### Clean up unused resources 322 | 323 | In the container world, it is quite common to have unused resources occupying the memory. It is important to ensure the resources are properly cleaned. 324 | 325 | #### Think about Requests & Limits 326 | 327 | Ensure that requests and limits are properly specified for all the containers. 328 | 329 | ```yaml 330 | resources: 331 | requests: 332 | memory: "100Mi" 333 | cpu: "100m" 334 | limits: 335 | memory: "200Mi" 336 | cpu: "500m" 337 | ``` 338 | 339 | The `requests` are the limits that the container is guaranteed to get. The `limits` are is the maximum or minimum resource a container is allowed to use. 340 | 341 | > Each container in the pod can request and limit their resources. 342 | 343 | #### RED / USE pattern 344 | 345 | Monitor and manage your services using `RED` pattern. 346 | 347 | * Requests 348 | 349 | * Errors 350 | 351 | * Duration 352 | 353 | 354 | Track the requests, errors in the response and the duration to receive the response. Based on this information, tweak your service to receive optimum performance. 355 | 356 | For the resources, use the `USE` pattern. 357 | 358 | * Utilization 359 | 360 | * Saturation 361 | 362 | * Errors 363 | 364 | 365 | Monitor the resource utilization and how much the resources are saturated and what are the errors. Based on this information, tweak your resources to optimize resource allocation. 366 | 367 | Hopefully, this might have given you a brief overview of `Kubernetes`. Head over [kubernetes.io](https://kubernetes.io/) for more information on Kubernetes. 368 | 369 | ### **If you like this article, please share with others. ❤️** 370 | -------------------------------------------------------------------------------- /projects/10-microservices-deployment-eks/README.md: -------------------------------------------------------------------------------- 1 | # 🚀🛠️ Designing a 10-Microservices Application Deployment on EKS! 🤖 2 | 3 | ![](https://miro.medium.com/v2/resize:fit:736/1*BR95t3O5WYZykgPvJnv2HA.png) 4 | 5 | In the realm of cloud computing, deploying a complex application with precision and efficiency is a paramount challenge. Amazon Web Services (AWS) offers Elastic Kubernetes Service (EKS), a fully managed Kubernetes service that simplifies the deployment, management, and scaling of containerized applications. In this blog post, we’ll delve into the intricacies of orchestrating a 10-tier application on AWS EKS. 6 | 7 | ## **Designing the 10-Microservices Application** 8 | 9 | ## **1\. Presentation Tier 🖥️:** 10 | 11 | * Host your frontend application here. 12 | 13 | * Utilize AWS Elastic Load Balancer (ELB) for distributing incoming traffic. 14 | 15 | ## **2\. Web Server Tier 🌐:** 16 | 17 | * Deploy web servers using containers managed by Kubernetes pods. 18 | 19 | * Leverage Kubernetes Services for load balancing within the web server tier. 20 | 21 | ## **3\. Application Tier 🚀:** 22 | 23 | * Run application logic in containers using Kubernetes Deployments. 24 | 25 | * Implement auto-scaling based on demand to ensure optimal performance. 26 | 27 | ## **4\. API Gateway Tier 🚪:** 28 | 29 | * Use Amazon API Gateway to create, publish, and manage APIs. 30 | 31 | * Enable authentication and throttling for secure and controlled access. 32 | 33 | ## **5\. Business Logic Tier 💼:** 34 | 35 | * Containerize business logic components and deploy using Kubernetes. 36 | 37 | * Utilize AWS Lambda for serverless execution of specific functions. 38 | 39 | ## **6\. Message Queue Tier 📬:** 40 | 41 | * Implement message queues like Amazon Simple Queue Service (SQS) for asynchronous communication between components. 42 | 43 | * Ensure reliability and scalability of message processing. 44 | 45 | ## **7\. Data Access Tier 📊:** 46 | 47 | * Manage data storage and retrieval using Amazon Relational Database Service (RDS) or Amazon DynamoDB. 48 | 49 | * Implement read replicas for scalability and fault tolerance. 50 | 51 | ## **8\. Caching Tier 🚀:** 52 | 53 | * Utilize Amazon ElastiCache for in-memory caching to enhance application performance. 54 | 55 | * Configure caching strategies based on the application’s needs. 56 | 57 | ## **9\. Storage Tier 🗄️:** 58 | 59 | * Use Amazon S3 for scalable and durable object storage. 60 | 61 | * Implement data lifecycle policies to manage data efficiently. 62 | 63 | ## **10\. Infrastructure Tier 🌐:** 64 | 65 | * Leverage Amazon EKS to manage and orchestrate containers effectively. 66 | 67 | * Utilize AWS Identity and Access Management (IAM) for secure access control. 68 | 69 | ![](https://miro.medium.com/v2/resize:fit:736/0*lBtxr2c2uMcXmz9y.png) 70 | 71 | # **Step-by-Step Implementation :-** 72 | 73 | 1. **Create an AWS EC2 instance** 74 | 75 | ![](https://miro.medium.com/v2/resize:fit:736/0*jetGaqyeE1uAXzuC.png) 76 | 77 | **2\. Create a user and give the following permissions :-** 78 | 79 | ![](https://miro.medium.com/v2/resize:fit:736/1*giZzF4Cn9X49DlLdCexjbg.png) 80 | 81 | Connect with the EC2 instance you can use ssh or Putty or mobaXterm. 82 | 83 | **3\. After connect install aws ctl on your server to give your credentials** 84 | [https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) 85 | 86 | **4\. After connecting install Jenkins on your server** 87 | [https://www.jenkins.io/doc/book/installing/linux/#debianubuntu](https://www.jenkins.io/doc/book/installing/linux/#debianubuntu) 88 | 89 | **5\. Now install kubctl on the linux** 90 | [https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-kubectl-binary-with-curl-on-linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-kubectl-binary-with-curl-on-linux) 91 | 92 | **6\. Install eksctl** 93 | [https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-eksctl.html](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-eksctl.html) 94 | 95 | **7\. Install docker and give permission** 96 | 97 | ```bash 98 | sudo apt-get install docker.io 99 | sudo usermod -aG docker ubuntu 100 | sudo newgrp docker 101 | ``` 102 | 103 | **8\. Install sonarqube from docker image** 104 | 105 | ```bash 106 | docker run -d -p 9000:9000 sonarqube:lts-community 107 | ``` 108 | 109 | **9\. Install EKS** 110 | 111 | ```bash 112 | eksctl create cluster --name=my-eks2 \ 113 | --region=ap-south-1 \ 114 | --zones=ap-south-1a,ap-south-1b \ 115 | --without-nodegroup 116 | 117 | // after the above setup complete run this 118 | eksctl utils associate-iam-oidc-provider \ 119 | --region ap-south-1 \ 120 | --cluster my-eks2 \ 121 | --approve 122 | 123 | eksctl create nodegroup --cluster=my-eks2 \ 124 | --region=ap-south-1 \ 125 | --name=node2 \ 126 | --node-type=t3.medium \ 127 | --nodes=3 \ 128 | --nodes-min=2 \ 129 | --nodes-max=3 \ 130 | --node-volume-size=20 \ 131 | --ssh-public-key=10-tier-key \ 132 | --managed \ 133 | --asg-access \ 134 | --external-dns-access \ 135 | --full-ecr-access \ 136 | --appmesh-access \ 137 | --alb-ingress-access 138 | ``` 139 | 140 | Installing some pugins in Jenkins as, 141 | 142 | ```yaml 143 | sonarqube scanner 144 | docker 145 | docker pipeline 146 | docker common 147 | cloud base docker build and publish 148 | kubernetes 149 | kubernetes cli 150 | ``` 151 | 152 | **Configure Sonar Server in Manage Jenkins** 153 | 154 | Grab the Public IP Address of your EC2 Instance, Sonarqube works on Port 9000 , sp <Public IP>:9000. Goto your Sonarqube Server. Click on Administration → Security → Users → Click on Tokens and Update Token → Give it a name → and click on Generate Token 155 | 156 | ![](https://miro.medium.com/v2/resize:fit:736/0*ohY_aQ9IHj9FQ1hl.png) 157 | 158 | Click on Update Token 159 | 160 | ![](https://miro.medium.com/v2/resize:fit:736/0*0DBl74AeobYkTgyy.png) 161 | 162 | **Copy this Token** 163 | 164 | Go to Dashboard → Manage Jenkins → Credentials → Add Secret Text. It should look like this 165 | 166 | ![](https://miro.medium.com/v2/resize:fit:736/0*uVqbfa70yVgMLcFt.png) 167 | 168 | Now, goto Dashboard → Manage Jenkins → Configure System 169 | 170 | ![](https://miro.medium.com/v2/resize:fit:736/0*TGpOriGYMoFi8w0j.png) 171 | 172 | Click on Apply and Save 173 | 174 | **Configure System option** is used in Jenkins to configure different server 175 | 176 | **Global Tool Configuration** is used to configure different tools that we install using Plugins 177 | 178 | We will install sonar-scanner in tools. 179 | 180 | ![](https://miro.medium.com/v2/resize:fit:736/0*mpd0x6wSOIxuHZ58.png) 181 | 182 | **11\. Go to EKS service and add all traffic to its SG** 183 | 184 | ![](https://miro.medium.com/v2/resize:fit:736/1*fCoAzi74QtB4S1c7q96OlQ.png) 185 | 186 | **12\. Create a Service Account and role and Assign that role create a secret service account, and generate a token** 187 | 188 | Creating Service Account 189 | 190 | 1\. create namespace:- 191 | 192 | ![](https://miro.medium.com/v2/resize:fit:663/1*PqFe-i3Yia1gqDD-1bZ-dg.png) 193 | 194 | 2\. Create sa.yml file and add the follow code 195 | 196 | ```yaml 197 | apiVersion: v1 198 | kind: ServiceAccount 199 | metadata: 200 | name: jenkins 201 | namespace: webapps 202 | labels: 203 | app: jenkins 204 | environment: dev # Optional: adjust based on your usage (dev/staging/prod) 205 | ``` 206 | 207 | ```yaml 208 | kubectl apply -f sa.yaml 209 | ``` 210 | 211 | 3\. Now we need to create role 212 | 213 | ```yaml 214 | apiVersion: rbac.authorization.k8s.io/v1 215 | kind: Role 216 | metadata: 217 | name: app-role 218 | namespace: webapps 219 | rules: 220 | - apiGroups: 221 | - "" # Core API group 222 | - apps 223 | - autoscaling 224 | - batch 225 | - extensions 226 | - policy 227 | - rbac.authorization.k8s.io 228 | resources: 229 | - pods 230 | - configmaps 231 | - deployments 232 | - daemonsets 233 | - componentstatuses 234 | - events 235 | - endpoints 236 | - horizontalpodautoscalers 237 | - ingress 238 | - jobs 239 | - limitranges 240 | - namespaces 241 | - nodes 242 | - persistentvolumes 243 | - persistentvolumeclaims 244 | - resourcequotas 245 | - replicasets 246 | - replicationcontrollers 247 | - serviceaccounts 248 | - services 249 | verbs: 250 | - get 251 | - list 252 | - watch 253 | - create 254 | - update 255 | - patch 256 | - delete 257 | ``` 258 | 259 | ![](https://miro.medium.com/v2/resize:fit:685/1*q3EwCbqtjT4dOpzytx1jAA.png) 260 | 261 | 4\. now assigning the role to the service account 262 | 263 | ```yaml 264 | apiVersion: rbac.authorization.k8s.io/v1 265 | kind: RoleBinding 266 | metadata: 267 | name: app-rolebinding 268 | namespace: webapps # Target namespace 269 | roleRef: 270 | apiGroup: rbac.authorization.k8s.io # API group for the Role 271 | kind: Role # Reference to a Role (not ClusterRole) 272 | name: app-role # Name of the Role being bound 273 | subjects: 274 | - kind: ServiceAccount # Subject type 275 | name: jenkins # ServiceAccount name 276 | namespace: webapps # Namespace of the ServiceAccount 277 | ``` 278 | 279 | ![](https://miro.medium.com/v2/resize:fit:736/0*vrx020cTWLWNSrcE.png) 280 | 281 | 5\. now creating a token for service account 282 | 283 | ```yaml 284 | apiVersion: v1 285 | kind: Secret 286 | type: kubernetes.io/service-account-token 287 | metadata: 288 | name: mysecretname 289 | namespace: webapps # Optional: specify the namespace for the secret 290 | annotations: 291 | kubernetes.io/service-account.name: jenkins # Link to ServiceAccount 292 | labels: 293 | app: jenkins # Optional: add a label for better organization 294 | ``` 295 | 296 | ![](https://miro.medium.com/v2/resize:fit:569/0*8T2cSyNTgW6u2oY-.png) 297 | 298 | Go to Jenkins and add a pipeline 299 | 300 | ![](https://miro.medium.com/v2/resize:fit:736/0*EcuT0dEzhK1ccOV0.png) 301 | 302 | Lets goto our Pipeline and add below Script. 303 | 304 | ```groovy 305 | pipeline { 306 | agent any 307 | 308 | environment { 309 | SCANNER_HOME = tool 'sonar-scanner' 310 | } 311 | 312 | stages { 313 | stage('Git Checkout') { 314 | steps { 315 | git branch: 'latest', url: 'https://github.com/SushantOps/10-Tier-MicroService-Appliction.git' 316 | } 317 | } 318 | 319 | stage('SonarQube Analysis') { 320 | steps { 321 | withSonarQubeEnv('sonar') { 322 | sh '''$SCANNER_HOME/bin/sonar-scanner \ 323 | -Dsonar.projectKey=10-Tier \ 324 | -Dsonar.projectName=10-Tier \ 325 | -Dsonar.java.binaries=.''' 326 | } 327 | } 328 | } 329 | 330 | stage('Build & Push Docker Images') { 331 | parallel { 332 | stage('adservice') { 333 | steps { buildAndPush('adservice') } 334 | } 335 | stage('cartservice') { 336 | steps { buildAndPush('cartservice/src') } 337 | } 338 | stage('checkoutservice') { 339 | steps { buildAndPush('checkoutservice') } 340 | } 341 | stage('currencyservice') { 342 | steps { buildAndPush('currencyservice') } 343 | } 344 | stage('emailservice') { 345 | steps { buildAndPush('emailservice') } 346 | } 347 | stage('frontend') { 348 | steps { buildAndPush('frontend') } 349 | } 350 | stage('loadgenerator') { 351 | steps { buildAndPush('loadgenerator') } 352 | } 353 | stage('paymentservice') { 354 | steps { buildAndPush('paymentservice') } 355 | } 356 | stage('productcatalogservice') { 357 | steps { buildAndPush('productcatalogservice') } 358 | } 359 | stage('recommendationservice') { 360 | steps { buildAndPush('recommendationservice') } 361 | } 362 | stage('shippingservice') { 363 | steps { buildAndPush('shippingservice') } 364 | } 365 | } 366 | } 367 | 368 | stage('K8s Deploy') { 369 | steps { 370 | withKubeConfig( 371 | caCertificate: '', 372 | clusterName: 'my-eks2', 373 | contextName: '', 374 | credentialsId: 'k8-token', 375 | namespace: 'webapps', 376 | restrictKubeConfigAccess: false, 377 | serverUrl: 'https://EBCE08CF45C3AA5A574E126370E5D4FC.gr7.ap-south-1.eks.amazonaws.com' 378 | ) { 379 | sh 'kubectl apply -f deployment-service.yml' 380 | sh 'kubectl get pods' 381 | sh 'kubectl get svc' 382 | } 383 | } 384 | } 385 | } 386 | 387 | // Reusable build & push function 388 | // Adjust image name prefix if needed 389 | post { 390 | always { 391 | echo "Pipeline execution complete!" 392 | } 393 | } 394 | } 395 | 396 | def buildAndPush(String servicePath) { 397 | script { 398 | def imageName = "sushantkapare1717/${servicePath.tokenize('/')[-1]}:latest" 399 | withDockerRegistry(credentialsId: 'docker-cred', toolName: 'docker') { 400 | dir("/var/lib/jenkins/workspace/10-Tier/src/${servicePath}") { 401 | sh "docker build -t ${imageName} ." 402 | sh "docker push ${imageName}" 403 | sh "docker rmi ${imageName}" 404 | } 405 | } 406 | } 407 | } 408 | ``` 409 | 410 | How to get the key 411 | 412 | ```yaml 413 | kubectl -n examplens describe secret mysecretname 414 | ``` 415 | 416 | Now run the pipeline 417 | 418 | After that we run that command 419 | 420 | ```yaml 421 | kubectl get pods -n webapps 422 | ``` 423 | 424 | ![](https://miro.medium.com/v2/resize:fit:736/0*ZQKaW4Pfn340JyXr.png) 425 | 426 | ![](https://miro.medium.com/v2/resize:fit:736/1*sYrGLLwBtA9t3gSIQG9VRA.png) 427 | 428 | ![](https://miro.medium.com/v2/resize:fit:736/1*HM2SUQOIjgQ-YmCCZr7mSQ.png) 429 | 430 | - **Terminate the AWS EC2 Instance** 431 | 432 | - **Lastly, do not forget to terminate the AWS EC2 Instance.** 433 | 434 | ## If you like this article, please share with others. ❤️ 435 | -------------------------------------------------------------------------------- /projects/Uber-Clone-DevSecOps/README.md: -------------------------------------------------------------------------------- 1 | # Uber Clone DevSecOps CI/CD Kubernetes Project 2 | 3 | ![](https://miro.medium.com/v2/resize:fit:736/0*xJc4sgDsMZqdfPyZ.png) 4 | 5 | # **Introduction :-** 6 | 7 | In the fast-paced world of app development, the need for a seamless and secure Continuous Integration/Continuous Deployment (CI/CD) pipeline cannot be overstated. For businesses looking to replicate the success of Uber, it’s crucial to implement a DevSecOps approach that ensures both speed and security throughout the software development lifecycle. In this blog post, we’ll explore the key components of a DevSecOps CI/CD pipeline for an Uber clone, emphasizing the importance of integrating security into every stage of the development process. 8 | 9 | **Github Repo** :- [uber-clone](https://github.com/SushantOps/uber-clone.git) 10 | 11 | ## **STEP 1: Launch an Ubuntu instance (T2.large) :-** 12 | 13 | Launch an AWS T2 Large Instance. Use the image as Ubuntu. You can create a new key pair or use an existing one. Enable HTTP and HTTPS settings in the Security Group and open all ports . 14 | 15 | ![](https://miro.medium.com/v2/resize:fit:736/0*eXj5Sn_hPKuI2j4V) 16 | 17 | ## **STEP 2: Create IAM role :-** 18 | 19 | Search for IAM in the search bar of AWS and click on roles 20 | 21 | ![](https://miro.medium.com/v2/resize:fit:736/0*PoZtjfWGGkd1O2oY.png) 22 | 23 | ![](https://miro.medium.com/v2/resize:fit:736/0*nIywO8dCvfYYqWbW.png) 24 | 25 | Now Attach this role to Ec2 instance that we created earlier, so we can provision cluster from that instance. 26 | 27 | Go to EC2 Dashboard and select the instance. 28 | 29 | Click on Actions → Security → Modify IAM role. 30 | 31 | Select the Role that created earlier and click on Update IAM role. 32 | 33 | ![](https://miro.medium.com/v2/resize:fit:736/0*ph05Dw_YwIsr4CPV.png) 34 | 35 | Connect the instance to Mobaxtreme or Putty 36 | 37 | ## **STEP 3: Installations of Packages :-** 38 | 39 | create shell script in Ubuntu ec2 instance 40 | 41 | ```bash 42 | sudo su #run from inside root 43 | vi script1.sh 44 | ``` 45 | 46 | Enter this script into it 47 | This script installs Jenkins, Docker , Kubectl, Terraform, AWS Cli,Sonarqube 48 | 49 | ```bash 50 | #!/bin/bash 51 | sudo apt update -y 52 | wget -O - https://packages.adoptium.net/artifactory/api/gpg/key/public | tee /etc/apt/keyrings/adoptium.asc 53 | echo "deb [signed-by=/etc/apt/keyrings/adoptium.asc] https://packages.adoptium.net/artifactory/deb $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) main" | tee /etc/apt/sources.list.d/adoptium.list 54 | sudo apt update -y 55 | sudo apt install temurin-17-jdk -y 56 | /usr/bin/java --version 57 | curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io-2023.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null 58 | echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null 59 | sudo apt-get update -y 60 | sudo apt-get install jenkins -y 61 | sudo systemctl start jenkins 62 | #install docker 63 | # Add Docker's official GPG key: 64 | sudo apt-get update 65 | sudo apt-get install ca-certificates curl gnupg -y 66 | sudo install -m 0755 -d /etc/apt/keyrings 67 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 68 | sudo chmod a+r /etc/apt/keyrings/docker.gpg 69 | # Add the repository to Apt sources: 70 | echo \ 71 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 72 | $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ 73 | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 74 | sudo apt-get update 75 | sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y 76 | sudo usermod -aG docker ubuntu 77 | newgrp docker 78 | ``` 79 | 80 | Now provide executable permissions to shell script 81 | 82 | ```bash 83 | chmod 777 script1.sh 84 | sh script1.sh 85 | ``` 86 | 87 | Let’s Run the second script 88 | 89 | ```bash 90 | vi script2.sh 91 | ``` 92 | 93 | Add this script 94 | 95 | ```bash 96 | #!/bin/bash 97 | # install trivy 98 | sudo apt-get install wget apt-transport-https gnupg lsb-release -y 99 | wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | gpg --dearmor | sudo tee /usr/share/keyrings/trivy.gpg > /dev/null 100 | echo "deb [signed-by=/usr/share/keyrings/trivy.gpg] https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" | sudo tee -a /etc/apt/sources.list.d/trivy.list 101 | sudo apt-get update 102 | sudo apt-get install trivy -y 103 | #install terraform 104 | sudo apt install wget -y 105 | wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg 106 | echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list 107 | sudo apt update && sudo apt install terraform 108 | #install Kubectl on Jenkins 109 | sudo apt update 110 | sudo apt install curl -y 111 | curl -LO https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl 112 | sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl 113 | kubectl version --client 114 | #install Aws cli 115 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 116 | sudo apt-get install unzip -y 117 | unzip awscliv2.zip 118 | sudo ./aws/install 119 | ``` 120 | 121 | Now provide executable permissions to shell script 122 | 123 | ```bash 124 | chmod 777 script2.sh 125 | sh script2.sh 126 | ``` 127 | 128 | Now check the versions of packages 129 | 130 | ```bash 131 | docker --version 132 | trivy --version 133 | aws --version 134 | terraform --version 135 | kubectl version 136 | ``` 137 | 138 | ![](https://miro.medium.com/v2/resize:fit:736/0*-Lt_XjTgHgidBd0t) 139 | 140 | Provide executable permissions 141 | 142 | ```bash 143 | sudo chmod 777 /var/run/docker.sock 144 | docker run -d --name sonar -p 9000:9000 sonarqube:lts-community 145 | ``` 146 | 147 | ## **STEP 4: Connect to Jenkins and Sonarqube :-** 148 | 149 | Now copy the public IP address of ec2 and paste it into the browser 150 | 151 | ```bash 152 | #you will Jenkins login page 153 | ``` 154 | 155 | ![](https://miro.medium.com/v2/resize:fit:736/0*6GvmvuP5IoyPXGG3) 156 | 157 | Connect your Instance to Putty or Mobaxtreme and provide the below command for the Administrator password 158 | 159 | ```bash 160 | sudo cat /var/lib/jenkins/secrets/initialAdminPassword 161 | ``` 162 | 163 | Now you see Jenkins Dashboard 164 | 165 | ![](https://miro.medium.com/v2/resize:fit:736/0*Jg-5zIDUh5gHuRDX) 166 | 167 | Now Copy the public IP again and paste it into a new tab in the browser with 9000 168 | 169 | ```bash 170 | #runs sonar container 171 | ``` 172 | 173 | ![](https://miro.medium.com/v2/resize:fit:736/0*jK69OFfd_VluUY7I.png) 174 | 175 | Enter username and password, click on login and change password 176 | 177 | ```bash 178 | username admin 179 | password admin 180 | ``` 181 | 182 | Update New password, This is Sonar Dashboard. 183 | 184 | ![](https://miro.medium.com/v2/resize:fit:736/0*z0gUrkNWFjHwmlaG.png) 185 | 186 | ## **STEP 5: Terraform plugin install and EKS provision :-** 187 | 188 | Now go to Jenkins and add a terraform plugin to provision the AWS EKS using the Pipeline Job. 189 | 190 | Go to Jenkins dashboard –> Manage Jenkins –> Plugins 191 | 192 | Available Plugins, Search for Terraform and install it. 193 | 194 | ![](https://miro.medium.com/v2/resize:fit:736/0*NKbH4PhPoA2t-IXz) 195 | 196 | Go to Mobaxtream and use the below command 197 | 198 | let’s find the path to our Terraform (we will use it in the tools section of Terraform) 199 | 200 | ```bash 201 | which terraform 202 | ``` 203 | 204 | ![](https://miro.medium.com/v2/resize:fit:706/0*ZfU3jF_KlLnFRAew) 205 | 206 | Now come back to Manage Jenkins –> Tools 207 | 208 | Add the terraform in Tools 209 | 210 | ![](https://miro.medium.com/v2/resize:fit:736/0*ht-PwEdarZMrXODe) 211 | 212 | Apply and save. 213 | 214 | CHANGE YOUR S3 BUCKET NAME IN THE [BACKEND.TF](https://github.com/SushantOps/uber-clone/blob/main/EKS_TERRAFORM/backend.tf) 215 | 216 | Now create a new job for the EKS provision 217 | 218 | ![](https://miro.medium.com/v2/resize:fit:736/0*7FzwigThYCAmwYO_) 219 | 220 | I want to do this with build parameters to apply and destroy while building only. 221 | 222 | you have to add this inside job like the below image 223 | 224 | ![](https://miro.medium.com/v2/resize:fit:736/0*5UqtfjHDFZVjW5WH) 225 | 226 | Let’s add a pipeline 227 | 228 | ```bash 229 | pipeline{ 230 | agent any 231 | stages { 232 | stage('Checkout from Git'){ 233 | steps{ 234 | git branch: 'main', url: 'https://github.com/SushantOps/uber-clone.git' 235 | } 236 | } 237 | stage('Terraform version'){ 238 | steps{ 239 | sh 'terraform --version' 240 | } 241 | } 242 | stage('Terraform init'){ 243 | steps{ 244 | dir('EKS_TERRAFORM') { 245 | sh 'terraform init' 246 | } 247 | } 248 | } 249 | stage('Terraform validate'){ 250 | steps{ 251 | dir('EKS_TERRAFORM') { 252 | sh 'terraform validate' 253 | } 254 | } 255 | } 256 | stage('Terraform plan'){ 257 | steps{ 258 | dir('EKS_TERRAFORM') { 259 | sh 'terraform plan' 260 | } 261 | } 262 | } 263 | stage('Terraform apply/destroy'){ 264 | steps{ 265 | dir('EKS_TERRAFORM') { 266 | sh 'terraform ${action} --auto-approve' 267 | } 268 | } 269 | } 270 | } 271 | } 272 | ``` 273 | 274 | Let’s apply and save and Build with parameters and select action as apply 275 | 276 | ![](https://miro.medium.com/v2/resize:fit:736/0*O6KWAliZDV2G6lUs) 277 | 278 | Stage view it will take max 10mins to provision 279 | 280 | ![](https://miro.medium.com/v2/resize:fit:736/0*t7DhLpgdQKM7-JVj) 281 | 282 | Check in Your Aws console whether it created EKS or not. 283 | 284 | ![](https://miro.medium.com/v2/resize:fit:736/0*iMaJxL4rezGoQQWU) 285 | 286 | Ec2 instance is created for the Node group 287 | 288 | ![](https://miro.medium.com/v2/resize:fit:736/0*pYjPKkJVgoe1yDDI) 289 | 290 | ## **STEP 6: Plugins installation & setup (Java, Sonar, Nodejs, owasp, Docker)** 291 | 292 | Go to Jenkins dashboard 293 | 294 | Manage Jenkins –> Plugins –> Available Plugins 295 | 296 | Search for the Below Plugins 297 | 298 | `Eclipse Temurin installer` 299 | 300 | `Sonarqube Scanner` 301 | 302 | `NodeJs` 303 | 304 | `Owasp Dependency-Check` 305 | 306 | `Docker` 307 | 308 | `Docker Commons` 309 | 310 | `Docker Pipeline` 311 | 312 | `Docker API` 313 | 314 | `Docker-build-step` 315 | 316 | ![](https://miro.medium.com/v2/resize:fit:736/0*-06BuX1Y9bQQBDEJ) 317 | 318 | ![](https://miro.medium.com/v2/resize:fit:736/0*NNzvqaRQDgmbl8ay) 319 | 320 | ## **STEP 7: Configure in Global Tool Configuration :-** 321 | 322 | Goto Manage Jenkins → Tools → Install JDK(17) and NodeJs(16)→ Click on Apply and Save 323 | 324 | ![](https://miro.medium.com/v2/resize:fit:736/0*vzZRbUCSN0IMqIFM) 325 | 326 | ![](https://miro.medium.com/v2/resize:fit:736/0*oFNNs10rbEwhYSgQ) 327 | 328 | For Sonarqube use the latest version 329 | 330 | ![](https://miro.medium.com/v2/resize:fit:736/0*K-J3jqgYbvkB3Bja) 331 | 332 | For Owasp use the 6.5.1 version 333 | 334 | ![](https://miro.medium.com/v2/resize:fit:736/0*qo71Ig8i417eQ3r8) 335 | 336 | Use the latest version of Docker 337 | 338 | ![](https://miro.medium.com/v2/resize:fit:736/0*_31rYDmTfCmIdvtF) 339 | 340 | Click apply and save. 341 | 342 | ## **STEP 8: Configure Sonar Server in Manage Jenkins :-** 343 | 344 | Grab the Public IP Address of your EC2 Instance, Sonarqube works on Port 9000, so <Public IP>:9000. Goto your Sonarqube Server. Click on Administration → Security → Users → Click on Tokens and Update Token → Give it a name → and click on Generate Token 345 | 346 | ![](https://miro.medium.com/v2/resize:fit:736/0*WF7CU5JxPkv9I5Mf) 347 | 348 | click on update Token 349 | 350 | ![](https://miro.medium.com/v2/resize:fit:736/0*s5KZmD682CpU9BCX) 351 | 352 | Create a token with a name and generate 353 | 354 | ![](https://miro.medium.com/v2/resize:fit:736/0*wmo0v6TZas5pxL35) 355 | 356 | copy Token , Goto Jenkins Dashboard → Manage Jenkins → Credentials → Add Secret Text. It should look like this 357 | 358 | ![](https://miro.medium.com/v2/resize:fit:736/0*PUpRah8c3lig8DvQ) 359 | 360 | You will this page once you click on create 361 | 362 | ![](https://miro.medium.com/v2/resize:fit:736/0*fRh-e1-st3N691jq) 363 | 364 | Now, go to Dashboard → Manage Jenkins → System and Add like the below image. 365 | 366 | ![](https://miro.medium.com/v2/resize:fit:736/0*z_ZN_VZuvk88whIx) 367 | 368 | Click on Apply and Save 369 | 370 | The Configure System option is used in Jenkins to configure different server 371 | 372 | Global Tool Configuration is used to configure different tools that we install using Plugins 373 | 374 | We will install a sonar scanner in the tools. 375 | 376 | ![](https://miro.medium.com/v2/resize:fit:736/0*8ZsUcXK5cMDk2IfZ) 377 | 378 | In the Sonarqube Dashboard add a quality gate also 379 | 380 | Administration → Configuration →Webhooks 381 | 382 | ![](https://miro.medium.com/v2/resize:fit:736/0*vNVsJ4myibOhj-Q2.png) 383 | 384 | Add details 385 | 386 | ```bash 387 | #in url section of quality gate 388 | /sonarqube-webhook/ 389 | ``` 390 | 391 | ![](https://miro.medium.com/v2/resize:fit:736/0*ISW6D59PGyFGI17T.png) 392 | 393 | Now add Docker credentials to the Jenkins to log in and push the image 394 | 395 | Manage Jenkins –> Credentials –> global –> add credential 396 | 397 | Add DockerHub Username and Password under Global Credentials and create. 398 | 399 | ![](https://miro.medium.com/v2/resize:fit:736/0*6qyutwoP8QL20ZEL) 400 | 401 | ## **STEP 09: RUN an Pipeline :-** 402 | 403 | Add this code to Pipeline 404 | 405 | ```groovy 406 | pipeline{ 407 | agent any 408 | tools{ 409 | jdk 'jdk17' 410 | nodejs 'node16' 411 | } 412 | environment { 413 | SCANNER_HOME=tool 'sonar-scanner' 414 | } 415 | stages { 416 | stage('clean workspace'){ 417 | steps{ 418 | cleanWs() 419 | } 420 | } 421 | stage('Checkout from Git'){ 422 | steps{ 423 | git branch: 'main', url: 'https://github.com/SushantOps/uber-clone.git' 424 | } 425 | } 426 | stage("Sonarqube Analysis "){ 427 | steps{ 428 | withSonarQubeEnv('sonar-server') { 429 | sh ''' $SCANNER_HOME/bin/sonar-scanner -Dsonar.projectName=Uber \ 430 | -Dsonar.projectKey=Uber''' 431 | } 432 | } 433 | } 434 | stage("quality gate"){ 435 | steps { 436 | script { 437 | waitForQualityGate abortPipeline: false, credentialsId: 'Sonar-token' 438 | } 439 | } 440 | } 441 | stage('Install Dependencies') { 442 | steps { 443 | sh "npm install" 444 | } 445 | } 446 | stage('OWASP FS SCAN') { 447 | steps { 448 | dependencyCheck additionalArguments: '--scan ./ --disableYarnAudit --disableNodeAudit', odcInstallation: 'DP-Check' 449 | dependencyCheckPublisher pattern: '**/dependency-check-report.xml' 450 | } 451 | } 452 | stage('TRIVY FS SCAN') { 453 | steps { 454 | sh "trivy fs . > trivyfs.txt" 455 | } 456 | } 457 | stage("Docker Build & Push"){ 458 | steps{ 459 | script{ 460 | withDockerRegistry(credentialsId: 'docker', toolName: 'docker'){ 461 | sh "docker build -t uber ." 462 | sh "docker tag uber sushantkapare1717/uber:latest " 463 | sh "docker push sushantkapare1717/uber:latest " 464 | } 465 | } 466 | } 467 | } 468 | stage("TRIVY"){ 469 | steps{ 470 | sh "trivy image sushantkapare1717/uber:latest > trivyimage.txt" 471 | } 472 | } 473 | stage("deploy_docker"){ 474 | steps{ 475 | sh "docker run -d --name uber -p 3000:3000 sushantkapare1717/uber:latest" 476 | } 477 | } 478 | } 479 | } 480 | ``` 481 | 482 | Click on Apply and save. amd Build now, 483 | 484 | ![](https://miro.medium.com/v2/resize:fit:736/0*lJs6gEQCa4wJJtxU) 485 | 486 | To see the report, you can go to Sonarqube Server and go to Projects. 487 | 488 | ![](https://miro.medium.com/v2/resize:fit:736/0*Wmvx71ihe9tpTfTU) 489 | 490 | You can see the report has been generated and the status shows as passed. You can see that there are 1.2k lines it scanned. To see a detailed report, you can go to issues. 491 | 492 | OWASP, You will see that in status, a graph will also be generated and Vulnerabilities. 493 | 494 | ![](https://miro.medium.com/v2/resize:fit:736/0*pywfS4ydDuIMCdrN) 495 | 496 | When you log in to Dockerhub, you will see a new image is created 497 | 498 | ![](https://miro.medium.com/v2/resize:fit:736/1*zdr0TW5f35gUsBdb_65YSQ.png) 499 | 500 | ## **STEP 10: Kubernetes Deployment** 501 | 502 | Go to Putty of your Jenkins instance SSH and enter the below command 503 | 504 | ```bash 505 | aws eks update-kubeconfig --name --region 506 | aws eks update-kubeconfig --name EKS_CLOUD --region ap-south-1 507 | ``` 508 | 509 | ![](https://miro.medium.com/v2/resize:fit:736/0*2tRX5CIuxui2ur0A) 510 | 511 | Let’s see the nodes 512 | 513 | ```bash 514 | kubectl get nodes 515 | ``` 516 | 517 | ![](https://miro.medium.com/v2/resize:fit:736/0*a12UrkVgkprNgIUW) 518 | 519 | Copy the config file to Jenkins master or the local file manager and save it 520 | 521 | copy it and save it in documents or another folder save it as secret-file.txt 522 | 523 | Note: create a secret-file.txt in your file explorer save the config in it and use this at the kubernetes credential section. 524 | 525 | Install Kubernetes Plugin, Once it’s installed successfully 526 | 527 | ![](https://miro.medium.com/v2/resize:fit:736/0*yqwlaK36tju2sT1m) 528 | 529 | goto manage Jenkins –> manage credentials –> Click on Jenkins global –> add credentials 530 | 531 | ![](https://miro.medium.com/v2/resize:fit:736/0*zpHiGNpxLV_01U2m) 532 | 533 | final step to deploy on the Kubernetes cluster 534 | 535 | ```groovy 536 | stage('Deploy to kubernetes'){ 537 | steps{ 538 | script{ 539 | dir('K8S') { 540 | withKubeConfig(caCertificate: '', clusterName: '', contextName: '', credentialsId: 'k8s', namespace: '', restrictKubeConfigAccess: false, serverUrl: '') { 541 | sh 'kubectl apply -f deployment.yml' 542 | sh 'kubectl apply -f service.yml' 543 | } 544 | } 545 | } 546 | } 547 | } 548 | ``` 549 | 550 | You will see output like this, 551 | 552 | ![](https://miro.medium.com/v2/resize:fit:736/1*zNMT9axqs6YIYzXyRt22JA.png) 553 | 554 | ## **STEP 11: Termination Process :-** 555 | 556 | Now Go to Jenkins Dashboard and click on Terraform-Eks job 557 | 558 | And build with parameters and destroy action 559 | 560 | It will delete the EKS cluster that provisioned 561 | 562 | ![](https://miro.medium.com/v2/resize:fit:736/0*_sahkDUaJWMl00zM) 563 | 564 | After 10 minutes cluster will delete and wait for it. Don’t remove ec2 instance till that time. 565 | 566 | ![](https://miro.medium.com/v2/resize:fit:736/0*g1qVCP7nfwhBFEyB) 567 | 568 | **Cluster deleted** 569 | 570 | ![](https://miro.medium.com/v2/0*rAIsBEOSHwtnmRo2) 571 | 572 | **Delete the Ec2 instance.** 573 | 574 | ### If you like this article, please share with others. ❤️ 575 | -------------------------------------------------------------------------------- /CKAD-exercises/d.configuration.md: -------------------------------------------------------------------------------- 1 | ![](https://gaforgithub.azurewebsites.net/api?repo=CKAD-exercises/configuration&empty) 2 | # Configuration (18%) 3 | 4 | [ConfigMaps](#configmaps) 5 | 6 | [SecurityContext](#securitycontext) 7 | 8 | [Requests and Limits](#requests-and-limits) 9 | 10 | [Secrets](#secrets) 11 | 12 | [Service Accounts](#serviceaccounts) 13 | 14 |
#Tips, export to variable
15 |
export ns="-n secret-ops"
16 |
export do="--dry-run=client -oyaml"
17 | ## ConfigMaps 18 | 19 | kubernetes.io > Documentation > Tasks > Configure Pods and Containers > [Configure a Pod to Use a ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) 20 | 21 | ### Create a configmap named config with values foo=lala,foo2=lolo 22 | 23 |
show 24 |

25 | 26 | ```bash 27 | kubectl create configmap config --from-literal=foo=lala --from-literal=foo2=lolo 28 | ``` 29 | 30 |

31 |
32 | 33 | ### Display its values 34 | 35 |
show 36 |

37 | 38 | ```bash 39 | kubectl get cm config -o yaml 40 | # or 41 | kubectl describe cm config 42 | ``` 43 | 44 |

45 |
46 | 47 | ### Create and display a configmap from a file 48 | 49 | Create the file with 50 | 51 | ```bash 52 | echo -e "foo3=lili\nfoo4=lele" > config.txt 53 | ``` 54 | 55 |
show 56 |

57 | 58 | ```bash 59 | kubectl create cm configmap2 --from-file=config.txt 60 | kubectl get cm configmap2 -o yaml 61 | ``` 62 | 63 |

64 |
65 | 66 | ### Create and display a configmap from a .env file 67 | 68 | Create the file with the command 69 | 70 | ```bash 71 | echo -e "var1=val1\n# this is a comment\n\nvar2=val2\n#anothercomment" > config.env 72 | ``` 73 | 74 |
show 75 |

76 | 77 | ```bash 78 | kubectl create cm configmap3 --from-env-file=config.env 79 | kubectl get cm configmap3 -o yaml 80 | ``` 81 | 82 |

83 |
84 | 85 | ### Create and display a configmap from a file, giving the key 'special' 86 | 87 | Create the file with 88 | 89 | ```bash 90 | echo -e "var3=val3\nvar4=val4" > config4.txt 91 | ``` 92 | 93 |
show 94 |

95 | 96 | ```bash 97 | kubectl create cm configmap4 --from-file=special=config4.txt 98 | kubectl describe cm configmap4 99 | kubectl get cm configmap4 -o yaml 100 | ``` 101 | 102 |

103 |
104 | 105 | ### Create a configMap called 'options' with the value var5=val5. Create a new nginx pod that loads the value from variable 'var5' in an env variable called 'option' 106 | 107 |
show 108 |

109 | 110 | ```bash 111 | kubectl create cm options --from-literal=var5=val5 112 | kubectl run nginx --image=nginx --restart=Never --dry-run=client -o yaml > pod.yaml 113 | vi pod.yaml 114 | ``` 115 | 116 | ```YAML 117 | apiVersion: v1 118 | kind: Pod 119 | metadata: 120 | creationTimestamp: null 121 | labels: 122 | run: nginx 123 | name: nginx 124 | spec: 125 | containers: 126 | - image: nginx 127 | imagePullPolicy: IfNotPresent 128 | name: nginx 129 | resources: {} 130 | env: 131 | - name: option # name of the env variable 132 | valueFrom: 133 | configMapKeyRef: 134 | name: options # name of config map 135 | key: var5 # name of the entity in config map 136 | dnsPolicy: ClusterFirst 137 | restartPolicy: Never 138 | status: {} 139 | ``` 140 | 141 | ```bash 142 | kubectl create -f pod.yaml 143 | kubectl exec -it nginx -- env | grep option # will show 'option=val5' 144 | ``` 145 | 146 |

147 |
148 | 149 | ### Create a configMap 'anotherone' with values 'var6=val6', 'var7=val7'. Load this configMap as env variables into a new nginx pod 150 | 151 |
show 152 |

153 | 154 | ```bash 155 | kubectl create configmap anotherone --from-literal=var6=val6 --from-literal=var7=val7 156 | kubectl run --restart=Never nginx --image=nginx -o yaml --dry-run=client > pod.yaml 157 | vi pod.yaml 158 | ``` 159 | 160 | ```YAML 161 | apiVersion: v1 162 | kind: Pod 163 | metadata: 164 | creationTimestamp: null 165 | labels: 166 | run: nginx 167 | name: nginx 168 | spec: 169 | containers: 170 | - image: nginx 171 | imagePullPolicy: IfNotPresent 172 | name: nginx 173 | resources: {} 174 | envFrom: # different than previous one, that was 'env' 175 | - configMapRef: # different from the previous one, was 'configMapKeyRef' 176 | name: anotherone # the name of the config map 177 | dnsPolicy: ClusterFirst 178 | restartPolicy: Never 179 | status: {} 180 | ``` 181 | 182 | ```bash 183 | kubectl create -f pod.yaml 184 | kubectl exec -it nginx -- env 185 | ``` 186 | 187 |

188 |
189 | 190 | ### Create a configMap 'cmvolume' with values 'var8=val8', 'var9=val9'. Load this as a volume inside an nginx pod on path '/etc/lala'. Create the pod and 'ls' into the '/etc/lala' directory. 191 | 192 |
show 193 |

194 | 195 | ```bash 196 | kubectl create configmap cmvolume --from-literal=var8=val8 --from-literal=var9=val9 197 | kubectl run nginx --image=nginx --restart=Never -o yaml --dry-run=client > pod.yaml 198 | vi pod.yaml 199 | ``` 200 | 201 | ```YAML 202 | apiVersion: v1 203 | kind: Pod 204 | metadata: 205 | creationTimestamp: null 206 | labels: 207 | run: nginx 208 | name: nginx 209 | spec: 210 | volumes: # add a volumes list 211 | - name: myvolume # just a name, you'll reference this in the pods 212 | configMap: 213 | name: cmvolume # name of your configmap 214 | containers: 215 | - image: nginx 216 | imagePullPolicy: IfNotPresent 217 | name: nginx 218 | resources: {} 219 | volumeMounts: # your volume mounts are listed here 220 | - name: myvolume # the name that you specified in pod.spec.volumes.name 221 | mountPath: /etc/lala # the path inside your container 222 | dnsPolicy: ClusterFirst 223 | restartPolicy: Never 224 | status: {} 225 | ``` 226 | 227 | ```bash 228 | kubectl create -f pod.yaml 229 | kubectl exec -it nginx -- /bin/sh 230 | cd /etc/lala 231 | ls # will show var8 var9 232 | cat var8 # will show val8 233 | ``` 234 | 235 |

236 |
237 | 238 | ## SecurityContext 239 | 240 | kubernetes.io > Documentation > Tasks > Configure Pods and Containers > [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) 241 | 242 | ### Create the YAML for an nginx pod that runs with the user ID 101. No need to create the pod 243 | 244 |
show 245 |

246 | 247 | ```bash 248 | kubectl run nginx --image=nginx --restart=Never --dry-run=client -o yaml > pod.yaml 249 | vi pod.yaml 250 | ``` 251 | 252 | ```YAML 253 | apiVersion: v1 254 | kind: Pod 255 | metadata: 256 | creationTimestamp: null 257 | labels: 258 | run: nginx 259 | name: nginx 260 | spec: 261 | securityContext: # insert this line 262 | runAsUser: 101 # UID for the user 263 | containers: 264 | - image: nginx 265 | imagePullPolicy: IfNotPresent 266 | name: nginx 267 | resources: {} 268 | dnsPolicy: ClusterFirst 269 | restartPolicy: Never 270 | status: {} 271 | ``` 272 | 273 |

274 |
275 | 276 | 277 | ### Create the YAML for an nginx pod that has the capabilities "NET_ADMIN", "SYS_TIME" added to its single container 278 | 279 |
show 280 |

281 | 282 | ```bash 283 | kubectl run nginx --image=nginx --restart=Never --dry-run=client -o yaml > pod.yaml 284 | vi pod.yaml 285 | ``` 286 | 287 | ```YAML 288 | apiVersion: v1 289 | kind: Pod 290 | metadata: 291 | creationTimestamp: null 292 | labels: 293 | run: nginx 294 | name: nginx 295 | spec: 296 | containers: 297 | - image: nginx 298 | imagePullPolicy: IfNotPresent 299 | name: nginx 300 | securityContext: # insert this line 301 | capabilities: # and this 302 | add: ["NET_ADMIN", "SYS_TIME"] # this as well 303 | resources: {} 304 | dnsPolicy: ClusterFirst 305 | restartPolicy: Never 306 | status: {} 307 | ``` 308 | 309 |

310 |
311 | 312 | ## Resource requests and limits 313 | 314 | kubernetes.io > Documentation > Tasks > Configure Pods and Containers > [Assign CPU Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/) 315 | 316 | ### Create an nginx pod with requests cpu=100m,memory=256Mi and limits cpu=200m,memory=512Mi 317 | 318 |
show 319 |

320 | 321 | ```bash 322 | kubectl run nginx --image=nginx --dry-run=client -o yaml > pod.yaml 323 | vi pod.yaml 324 | ``` 325 | 326 | ```YAML 327 | apiVersion: v1 328 | kind: Pod 329 | metadata: 330 | creationTimestamp: null 331 | labels: 332 | run: nginx 333 | name: nginx 334 | spec: 335 | containers: 336 | - image: nginx 337 | name: nginx 338 | resources: 339 | requests: 340 | memory: "256Mi" 341 | cpu: "100m" 342 | limits: 343 | memory: "512Mi" 344 | cpu: "200m" 345 | dnsPolicy: ClusterFirst 346 | restartPolicy: Always 347 | status: {} 348 | ``` 349 | 350 |

351 |
352 | 353 | ## Limit Ranges 354 | kubernetes.io > Documentation > Concepts > Policies > Limit Ranges (https://kubernetes.io/docs/concepts/policy/limit-range/) 355 | 356 | ### Create a namespace named limitrange with a LimitRange that limits pod memory to a max of 500Mi and min of 100Mi 357 | 358 |
show 359 |

360 | 361 | ```bash 362 | kubectl create ns one 363 | ``` 364 | 365 | vi 1.yaml 366 | ```YAML 367 | apiVersion: v1 368 | kind: LimitRange 369 | metadata: 370 | name: ns-memory-limit 371 | namespace: one 372 | spec: 373 | limits: 374 | - max: # max and min define the limit range 375 | memory: "500Mi" 376 | min: 377 | memory: "100Mi" 378 | type: Container 379 | ``` 380 | 381 | ```bash 382 | kubectl apply -f 1.yaml 383 | ``` 384 |

385 |
386 | 387 | ### Describe the namespace limitrange 388 | 389 |
show 390 |

391 | 392 | ```bash 393 | kubectl describe limitrange ns-memory-limit -n one 394 | ``` 395 |

396 |
397 | 398 | ### Create an nginx pod that requests 250Mi of memory in the limitrange namespace 399 | 400 |
show 401 |

402 | 403 | vi 2.yaml 404 | ```YAML 405 | apiVersion: v1 406 | kind: Pod 407 | metadata: 408 | creationTimestamp: null 409 | labels: 410 | run: nginx 411 | name: nginx 412 | namespace: one 413 | spec: 414 | containers: 415 | - image: nginx 416 | name: nginx 417 | resources: 418 | requests: 419 | memory: "250Mi" 420 | dnsPolicy: ClusterFirst 421 | restartPolicy: Always 422 | status: {} 423 | ``` 424 | 425 | ```bash 426 | kubectl apply -f 2.yaml 427 | ``` 428 |

429 |
430 | 431 | 432 | ## Resource Quotas 433 | kubernetes.io > Documentation > Concepts > Policies > Resource Quotas (https://kubernetes.io/docs/concepts/policy/resource-quotas/) 434 | 435 | ### Create ResourceQuota in namespace `one` with hard requests `cpu=1`, `memory=1Gi` and hard limits `cpu=2`, `memory=2Gi`. 436 | 437 |
show 438 |

439 | 440 | Create the namespace: 441 | ```bash 442 | kubectl create ns one 443 | ``` 444 | 445 | Create the ResourceQuota 446 | ```bash 447 | vi rq-one.yaml 448 | ``` 449 | 450 | ```YAML 451 | apiVersion: v1 452 | kind: ResourceQuota 453 | metadata: 454 | name: my-rq 455 | namespace: one 456 | spec: 457 | hard: 458 | requests.cpu: "1" 459 | requests.memory: 1Gi 460 | limits.cpu: "2" 461 | limits.memory: 2Gi 462 | ``` 463 | 464 | ```bash 465 | kubectl apply -f rq-one.yaml 466 | ``` 467 | 468 | or 469 | ```bash 470 | kubectl create quota my-rq --namespace=one --hard=requests.cpu=1,requests.memory=1Gi,limits.cpu=2,limits.memory=2Gi 471 | ``` 472 |

473 |
474 | 475 | ### Attempt to create a pod with resource requests `cpu=2`, `memory=3Gi` and limits `cpu=3`, `memory=4Gi` in namespace `one` 476 | 477 |
show 478 |

479 | 480 | ```bash 481 | vi pod.yaml 482 | ``` 483 | 484 | ```YAML 485 | apiVersion: v1 486 | kind: Pod 487 | metadata: 488 | creationTimestamp: null 489 | labels: 490 | run: nginx 491 | name: nginx 492 | namespace: one 493 | spec: 494 | containers: 495 | - image: nginx 496 | name: nginx 497 | resources: 498 | requests: 499 | memory: "3Gi" 500 | cpu: "2" 501 | limits: 502 | memory: "4Gi" 503 | cpu: "3" 504 | dnsPolicy: ClusterFirst 505 | restartPolicy: Always 506 | status: {} 507 | ``` 508 | 509 | ```bash 510 | kubectl create -f pod.yaml 511 | ``` 512 | 513 | Expected error message: 514 | ```bash 515 | Error from server (Forbidden): error when creating "pod.yaml": pods "nginx" is forbidden: exceeded quota: my-rq, requested: limits.cpu=3,limits.memory=4Gi,requests.cpu=2,requests.memory=3Gi, used: limits.cpu=0,limits.memory=0,requests.cpu=0,requests.memory=0, limited: limits.cpu=2,limits.memory=2Gi,requests.cpu=1,requests.memory=1Gi 516 | ``` 517 |

518 |
519 | 520 | ### Create a pod with resource requests `cpu=0.5`, `memory=1Gi` and limits `cpu=1`, `memory=2Gi` in namespace `one` 521 | 522 |
show 523 |

524 | 525 | ```bash 526 | vi pod2.yaml 527 | ``` 528 | 529 | ```YAML 530 | apiVersion: v1 531 | kind: Pod 532 | metadata: 533 | creationTimestamp: null 534 | labels: 535 | run: nginx 536 | name: nginx 537 | namespace: one 538 | spec: 539 | containers: 540 | - image: nginx 541 | name: nginx 542 | resources: 543 | requests: 544 | memory: "1Gi" 545 | cpu: "0.5" 546 | limits: 547 | memory: "2Gi" 548 | cpu: "1" 549 | dnsPolicy: ClusterFirst 550 | restartPolicy: Always 551 | status: {} 552 | ``` 553 | 554 | ```bash 555 | kubectl create -f pod2.yaml 556 | ``` 557 | 558 | Show the ResourceQuota usage in namespace `one` 559 | ```bash 560 | kubectl get resourcequota -n one 561 | ``` 562 | 563 | ``` 564 | NAME AGE REQUEST LIMIT 565 | my-rq 10m requests.cpu: 500m/1, requests.memory: 3Mi/1Gi limits.cpu: 1/2, limits.memory: 4Mi/2Gi 566 | ``` 567 |

568 |
569 | 570 | 571 | ## Secrets 572 | 573 | kubernetes.io > Documentation > Concepts > Configuration > [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) 574 | 575 | kubernetes.io > Documentation > Tasks > Inject Data Into Applications > [Distribute Credentials Securely Using Secrets](https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/) 576 | 577 | ### Create a secret called mysecret with the values password=mypass 578 | 579 |
show 580 |

581 | 582 | ```bash 583 | kubectl create secret generic mysecret --from-literal=password=mypass 584 | ``` 585 | 586 |

587 |
588 | 589 | ### Create a secret called mysecret2 that gets key/value from a file 590 | 591 | Create a file called username with the value admin: 592 | 593 | ```bash 594 | echo -n admin > username 595 | ``` 596 | 597 |
show 598 |

599 | 600 | ```bash 601 | kubectl create secret generic mysecret2 --from-file=username 602 | ``` 603 | 604 |

605 |
606 | 607 | ### Get the value of mysecret2 608 | 609 |
show 610 |

611 | 612 | ```bash 613 | kubectl get secret mysecret2 -o yaml 614 | echo -n YWRtaW4= | base64 -d # on MAC it is -D, which decodes the value and shows 'admin' 615 | ``` 616 | 617 | Alternative using `--jsonpath`: 618 | 619 | ```bash 620 | kubectl get secret mysecret2 -o jsonpath='{.data.username}' | base64 -d # on MAC it is -D 621 | ``` 622 | 623 | Alternative using `--template`: 624 | 625 | ```bash 626 | kubectl get secret mysecret2 --template '{{.data.username}}' | base64 -d # on MAC it is -D 627 | ``` 628 | 629 | Alternative using `jq`: 630 | 631 | ```bash 632 | kubectl get secret mysecret2 -o json | jq -r .data.username | base64 -d # on MAC it is -D 633 | ``` 634 | 635 |

636 |
637 | 638 | ### Create an nginx pod that mounts the secret mysecret2 in a volume on path /etc/foo 639 | 640 |
show 641 |

642 | 643 | ```bash 644 | kubectl run nginx --image=nginx --restart=Never -o yaml --dry-run=client > pod.yaml 645 | vi pod.yaml 646 | ``` 647 | 648 | ```YAML 649 | apiVersion: v1 650 | kind: Pod 651 | metadata: 652 | creationTimestamp: null 653 | labels: 654 | run: nginx 655 | name: nginx 656 | spec: 657 | volumes: # specify the volumes 658 | - name: foo # this name will be used for reference inside the container 659 | secret: # we want a secret 660 | secretName: mysecret2 # name of the secret - this must already exist on pod creation 661 | containers: 662 | - image: nginx 663 | imagePullPolicy: IfNotPresent 664 | name: nginx 665 | resources: {} 666 | volumeMounts: # our volume mounts 667 | - name: foo # name on pod.spec.volumes 668 | mountPath: /etc/foo #our mount path 669 | dnsPolicy: ClusterFirst 670 | restartPolicy: Never 671 | status: {} 672 | ``` 673 | 674 | ```bash 675 | kubectl create -f pod.yaml 676 | kubectl exec -it nginx -- /bin/bash 677 | ls /etc/foo # shows username 678 | cat /etc/foo/username # shows admin 679 | ``` 680 | 681 |

682 |
683 | 684 | ### Delete the pod you just created and mount the variable 'username' from secret mysecret2 onto a new nginx pod in env variable called 'USERNAME' 685 | 686 |
show 687 |

688 | 689 | ```bash 690 | kubectl delete po nginx 691 | kubectl run nginx --image=nginx --restart=Never -o yaml --dry-run=client > pod.yaml 692 | vi pod.yaml 693 | ``` 694 | 695 | ```YAML 696 | apiVersion: v1 697 | kind: Pod 698 | metadata: 699 | creationTimestamp: null 700 | labels: 701 | run: nginx 702 | name: nginx 703 | spec: 704 | containers: 705 | - image: nginx 706 | imagePullPolicy: IfNotPresent 707 | name: nginx 708 | resources: {} 709 | env: # our env variables 710 | - name: USERNAME # asked name 711 | valueFrom: 712 | secretKeyRef: # secret reference 713 | name: mysecret2 # our secret's name 714 | key: username # the key of the data in the secret 715 | dnsPolicy: ClusterFirst 716 | restartPolicy: Never 717 | status: {} 718 | ``` 719 | 720 | ```bash 721 | kubectl create -f pod.yaml 722 | kubectl exec -it nginx -- env | grep USERNAME | cut -d '=' -f 2 # will show 'admin' 723 | ``` 724 | 725 |

726 |
727 | 728 | ### Create a Secret named 'ext-service-secret' in the namespace 'secret-ops'. Then, provide the key-value pair API_KEY=LmLHbYhsgWZwNifiqaRorH8T as literal. 729 | 730 |
show 731 |

732 | 733 | ```bash 734 | export ns="-n secret-ops" 735 | export do="--dry-run=client -oyaml" 736 | k create secret generic ext-service-secret --from-literal=API_KEY=LmLHbYhsgWZwNifiqaRorH8T $ns $do > sc.yaml 737 | k apply -f sc.yaml 738 | ``` 739 | 740 |

741 |
742 | 743 | ### Consuming the Secret. Create a Pod named 'consumer' with the image 'nginx' in the namespace 'secret-ops' and consume the Secret as an environment variable. Then, open an interactive shell to the Pod, and print all environment variables. 744 |
show 745 |

746 | 747 | ```bash 748 | export ns="-n secret-ops" 749 | export do="--dry-run=client -oyaml" 750 | k run consumer --image=nginx $ns $do > nginx.yaml 751 | vi nginx.yaml 752 | ``` 753 | 754 | ```YAML 755 | apiVersion: v1 756 | kind: Pod 757 | metadata: 758 | creationTimestamp: null 759 | labels: 760 | run: consumer 761 | name: consumer 762 | namespace: secret-ops 763 | spec: 764 | containers: 765 | - image: nginx 766 | name: consumer 767 | resources: {} 768 | env: 769 | - name: API_KEY 770 | valueFrom: 771 | secretKeyRef: 772 | name: ext-service-secret 773 | key: API_KEY 774 | dnsPolicy: ClusterFirst 775 | restartPolicy: Always 776 | status: {} 777 | ``` 778 | 779 | ```bash 780 | k exec -it $ns consumer -- /bin/sh 781 | #env 782 | ``` 783 |

784 |
785 | 786 | ### Create a Secret named 'my-secret' of type 'kubernetes.io/ssh-auth' in the namespace 'secret-ops'. Define a single key named 'ssh-privatekey', and point it to the file 'id_rsa' in this directory. 787 |
show 788 |

789 | 790 | ```bash 791 | #Tips, export to variable 792 | export do="--dry-run=client -oyaml" 793 | export ns="-n secret-ops" 794 | 795 | #if id_rsa file didn't exist. 796 | ssh-keygen 797 | 798 | k create secret generic my-secret $ns --type="kubernetes.io/ssh-auth" --from-file=ssh-privatekey=id_rsa $do > sc.yaml 799 | k apply -f sc.yaml 800 | ``` 801 |

802 |
803 | 804 | ### Create a Pod named 'consumer' with the image 'nginx' in the namespace 'secret-ops', and consume the Secret as Volume. Mount the Secret as Volume to the path /var/app with read-only access. Open an interactive shell to the Pod, and render the contents of the file. 805 |
show 806 |

807 | 808 | ```bash 809 | #Tips, export to variable 810 | export ns="-n secret-ops" 811 | export do="--dry-run=client -oyaml" 812 | k run consumer --image=nginx $ns $do > nginx.yaml 813 | vi nginx.yaml 814 | ``` 815 | 816 | ```YAML 817 | apiVersion: v1 818 | kind: Pod 819 | metadata: 820 | creationTimestamp: null 821 | labels: 822 | run: consumer 823 | name: consumer 824 | namespace: secret-ops 825 | spec: 826 | containers: 827 | - image: nginx 828 | name: consumer 829 | resources: {} 830 | volumeMounts: 831 | - name: foo 832 | mountPath: "/var/app" 833 | readOnly: true 834 | volumes: 835 | - name: foo 836 | secret: 837 | secretName: my-secret 838 | optional: true 839 | dnsPolicy: ClusterFirst 840 | restartPolicy: Always 841 | status: {} 842 | ``` 843 | 844 | ```bash 845 | k exec -it $ns consumer -- /bin/sh 846 | # cat /var/app/ssh-privatekey 847 | # exit 848 | ``` 849 |

850 |
851 | 852 | ## ServiceAccounts 853 | 854 | kubernetes.io > Documentation > Tasks > Configure Pods and Containers > [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) 855 | 856 | ### See all the service accounts of the cluster in all namespaces 857 | 858 |
show 859 |

860 | 861 | ```bash 862 | kubectl get sa --all-namespaces 863 | ``` 864 | Alternatively 865 | 866 | ```bash 867 | kubectl get sa -A 868 | ``` 869 | 870 |

871 |
872 | 873 | ### Create a new serviceaccount called 'myuser' 874 | 875 |
show 876 |

877 | 878 | ```bash 879 | kubectl create sa myuser 880 | ``` 881 | 882 | Alternatively: 883 | 884 | ```bash 885 | # let's get a template easily 886 | kubectl get sa default -o yaml > sa.yaml 887 | vim sa.yaml 888 | ``` 889 | 890 | ```YAML 891 | apiVersion: v1 892 | kind: ServiceAccount 893 | metadata: 894 | name: myuser 895 | ``` 896 | 897 | ```bash 898 | kubectl create -f sa.yaml 899 | ``` 900 | 901 |

902 |
903 | 904 | ### Create an nginx pod that uses 'myuser' as a service account 905 | 906 |
show 907 |

908 | 909 | ```bash 910 | kubectl run nginx --image=nginx --restart=Never -o yaml --dry-run=client > pod.yaml 911 | vi pod.yaml 912 | ``` 913 | 914 | ```YAML 915 | apiVersion: v1 916 | kind: Pod 917 | metadata: 918 | creationTimestamp: null 919 | labels: 920 | run: nginx 921 | name: nginx 922 | spec: 923 | serviceAccountName: myuser # we use pod.spec.serviceAccountName 924 | containers: 925 | - image: nginx 926 | imagePullPolicy: IfNotPresent 927 | name: nginx 928 | resources: {} 929 | dnsPolicy: ClusterFirst 930 | restartPolicy: Never 931 | status: {} 932 | ``` 933 | 934 | or 935 | 936 | ```YAML 937 | apiVersion: v1 938 | kind: Pod 939 | metadata: 940 | creationTimestamp: null 941 | labels: 942 | run: nginx 943 | name: nginx 944 | spec: 945 | serviceAccount: myuser # we use pod.spec.serviceAccount 946 | containers: 947 | - image: nginx 948 | imagePullPolicy: IfNotPresent 949 | name: nginx 950 | resources: {} 951 | dnsPolicy: ClusterFirst 952 | restartPolicy: Never 953 | status: {} 954 | ``` 955 | 956 | ```bash 957 | kubectl create -f pod.yaml 958 | kubectl describe pod nginx # will see that a new secret called myuser-token-***** has been mounted 959 | ``` 960 | 961 |

962 |
963 | 964 | ### Generate an API token for the service account 'myuser' 965 | 966 |
show 967 |

968 | 969 | ```bash 970 | kubectl create token myuser 971 | ``` 972 | 973 |

974 |
975 | --------------------------------------------------------------------------------