├── labs ├── secret │ ├── password.txt │ ├── username.txt │ ├── server.txt │ ├── config.json │ ├── secret.yaml │ └── secret-pods.yaml ├── configmap │ ├── theme.txt │ └── configmap.yaml ├── service │ ├── backend_nodeport.yaml │ ├── backend_clusterip.yaml │ ├── backend_loadbalancer.yaml │ └── deploy.yaml ├── pod │ ├── pod1.yaml │ └── multicontainer.yaml ├── persistentvolume │ ├── pv.yaml │ ├── pvc.yaml │ └── deploy.yaml ├── job │ └── job.yaml ├── ingress │ ├── todoingress.yaml │ ├── appingress.yaml │ └── deploy.yaml ├── deployment │ ├── deployment1.yaml │ ├── recreate-deployment.yaml │ └── rolling-deployment.yaml ├── cronjob │ └── cronjob.yaml ├── tainttoleration │ └── podtoleration.yaml ├── daemonset │ └── daemonset.yaml ├── liveness │ └── liveness.yaml ├── affinity │ └── podnodeaffinity.yaml └── statefulset │ └── statefulset.yaml ├── index.html ├── LICENSE ├── K8s-CronJob.md ├── create_real_cluster ├── win2019-kubeadm1.26.2-calico3.25.0-docker │ ├── install2.ps1 │ ├── install1.ps1 │ └── install-docker-ce.ps1 ├── win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24 │ ├── install1.ps1 │ └── install2.ps1 ├── ubuntu20.04-kubeadm1.26.2-calico3.25.0-containerd1.6.10 │ ├── master.sh │ └── install.sh └── ubuntu24.04-kubeadm1.32.0-calico3.29.1-containerd1.7.24 │ ├── master-ubuntu24.04-k8s1.32.sh │ └── install-ubuntu24.04-k8s1.32.sh ├── K8s-Job.md ├── K8-CreatingPod-Declerative.md ├── K8s-CreatingPod-Imperative.md ├── K8s-Enable-Dashboard-On-Cluster.md ├── K8s-Configmap.md ├── K8s-Liveness-App.md ├── K8s-Deployment.md ├── K8s-Daemon-Sets.md ├── K8s-Taint-Toleration.md ├── K8s-Statefulset.md ├── K8s-Multicontainer-Sidecar.md ├── HelmCheatsheet.md ├── K8s-Helm-Jenkins.md ├── K8s-Node-Affinity.md ├── K8s-Monitoring-Prometheus-Grafana.md ├── K8s-Secret.md ├── K8s-Service-App.md ├── K8s-Kubeadm-Cluster-Docker.md ├── K8s-PersistantVolume.md ├── Helm.md ├── K8s-Ingress.md ├── K8s-Rollout-Rollback.md ├── KubernetesCommandCheatSheet.md └── K8s-Kubeadm-Cluster-Setup.md /labs/secret/password.txt: -------------------------------------------------------------------------------- 1 | P@ssw0rd! -------------------------------------------------------------------------------- /labs/secret/username.txt: -------------------------------------------------------------------------------- 1 | admin -------------------------------------------------------------------------------- /labs/configmap/theme.txt: -------------------------------------------------------------------------------- 1 | theme=dark -------------------------------------------------------------------------------- /labs/secret/server.txt: -------------------------------------------------------------------------------- 1 | db.example.com -------------------------------------------------------------------------------- /labs/secret/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "6bba108d4b2212f2c30c71dfa279e1f77cc5c3b2", 3 | } -------------------------------------------------------------------------------- /labs/secret/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mysecret 5 | type: Opaque 6 | stringData: 7 | db_server: db.example.com 8 | db_username: admin 9 | db_password: P@ssw0rd! -------------------------------------------------------------------------------- /labs/service/backend_nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: frontend 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: frontend 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 -------------------------------------------------------------------------------- /labs/service/backend_clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: backend 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | app: backend 9 | ports: 10 | - protocol: TCP 11 | port: 5000 12 | targetPort: 5000 -------------------------------------------------------------------------------- /labs/service/backend_loadbalancer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: frontendlb 5 | spec: 6 | type: LoadBalancer 7 | selector: 8 | app: frontend 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 -------------------------------------------------------------------------------- /labs/pod/pod1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: firstpod 5 | labels: 6 | app: frontend 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | ports: 12 | - containerPort: 80 13 | env: 14 | - name: USER 15 | value: "username" -------------------------------------------------------------------------------- /labs/persistentvolume/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mysqlpv 5 | labels: 6 | app: mysql 7 | spec: 8 | capacity: 9 | storage: 5Gi 10 | accessModes: 11 | - ReadWriteOnce 12 | persistentVolumeReclaimPolicy: Recycle 13 | nfs: 14 | path: / 15 | server: 10.255.255.10 -------------------------------------------------------------------------------- /labs/persistentvolume/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mysqlclaim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | volumeMode: Filesystem 9 | resources: 10 | requests: 11 | storage: 5Gi 12 | storageClassName: "" 13 | selector: 14 | matchLabels: 15 | app: mysql -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |
5 |

6 | Hello World! This file is created for the multicontainer-sidecar github update. 7 |

8 | This is the multicontainer scenario! Second version. 9 |

10 |

11 |
12 | 13 | 14 | -------------------------------------------------------------------------------- /labs/job/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: pi 5 | spec: 6 | parallelism: 2 7 | completions: 10 8 | backoffLimit: 5 9 | activeDeadlineSeconds: 100 10 | template: 11 | spec: 12 | containers: 13 | - name: pi 14 | image: perl 15 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] 16 | restartPolicy: Never #OnFailure -------------------------------------------------------------------------------- /labs/ingress/todoingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: todoingress 5 | spec: 6 | rules: 7 | - host: todoapp.com 8 | http: 9 | paths: 10 | - path: / 11 | pathType: Prefix 12 | backend: 13 | service: 14 | name: todosvc 15 | port: 16 | number: 80 -------------------------------------------------------------------------------- /labs/deployment/deployment1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: firstdeployment 5 | labels: 6 | team: development 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: frontend 12 | template: 13 | metadata: 14 | labels: 15 | app: frontend 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:latest 20 | ports: 21 | - containerPort: 80 -------------------------------------------------------------------------------- /labs/deployment/recreate-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rcdeployment 5 | labels: 6 | team: development 7 | spec: 8 | replicas: 5 9 | selector: 10 | matchLabels: 11 | app: recreate 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: recreate 18 | spec: 19 | containers: 20 | - name: nginx 21 | image: nginx 22 | ports: 23 | - containerPort: 80 -------------------------------------------------------------------------------- /labs/cronjob/cronjob.yaml: -------------------------------------------------------------------------------- 1 | # https://crontab.guru/ 2 | apiVersion: batch/v1 3 | kind: CronJob 4 | metadata: 5 | name: hello 6 | spec: 7 | schedule: "*/1 * * * *" 8 | jobTemplate: 9 | spec: 10 | template: 11 | spec: 12 | containers: 13 | - name: hello 14 | image: busybox 15 | imagePullPolicy: IfNotPresent 16 | command: 17 | - /bin/sh 18 | - -c 19 | - date; echo Hello from the Kubernetes cluster 20 | restartPolicy: OnFailure -------------------------------------------------------------------------------- /labs/deployment/rolling-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rolldeployment 5 | labels: 6 | team: development 7 | spec: 8 | replicas: 10 9 | selector: 10 | matchLabels: 11 | app: rolling 12 | strategy: 13 | type: RollingUpdate 14 | rollingUpdate: 15 | maxUnavailable: 2 16 | maxSurge: 2 17 | template: 18 | metadata: 19 | labels: 20 | app: rolling 21 | spec: 22 | containers: 23 | - name: nginx 24 | image: nginx 25 | ports: 26 | - containerPort: 80 -------------------------------------------------------------------------------- /labs/tainttoleration/podtoleration.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: toleratedpod1 5 | labels: 6 | env: test 7 | spec: 8 | containers: 9 | - name: toleratedcontainer1 10 | image: nginx:latest 11 | tolerations: 12 | - key: "platform" 13 | operator: "Equal" 14 | value: "production" 15 | effect: "NoSchedule" 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: toleratedpod2 21 | labels: 22 | env: test 23 | spec: 24 | containers: 25 | - name: toleratedcontainer2 26 | image: nginx 27 | tolerations: 28 | - key: "platform" 29 | operator: "Exists" 30 | effect: "NoSchedule" -------------------------------------------------------------------------------- /labs/ingress/appingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: appingress 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: /$1 7 | spec: 8 | rules: 9 | - host: webapp.com 10 | http: 11 | paths: 12 | - path: /blue 13 | pathType: Prefix 14 | backend: 15 | service: 16 | name: bluesvc 17 | port: 18 | number: 80 19 | - path: /green 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: greensvc 24 | port: 25 | number: 80 -------------------------------------------------------------------------------- /labs/pod/multicontainer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: multicontainer 5 | spec: 6 | containers: 7 | - name: webcontainer 8 | image: nginx 9 | ports: 10 | - containerPort: 80 11 | volumeMounts: 12 | - name: sharedvolume 13 | mountPath: /usr/share/nginx/html 14 | - name: sidecarcontainer 15 | image: busybox 16 | command: ["/bin/sh"] 17 | args: ["-c", "while true; do wget -O /var/log/index.html https://raw.githubusercontent.com/omerbsezer/Fast-Kubernetes/main/index.html; sleep 15; done"] 18 | volumeMounts: 19 | - name: sharedvolume 20 | mountPath: /var/log 21 | volumes: 22 | - name: sharedvolume 23 | emptyDir: {} -------------------------------------------------------------------------------- /labs/configmap/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: myconfigmap 5 | data: 6 | db_server: "db.example.com" 7 | database: "mydatabase" 8 | site.settings: | 9 | color=blue 10 | padding:25px 11 | --- 12 | apiVersion: v1 13 | kind: Pod 14 | metadata: 15 | name: configmappod 16 | spec: 17 | containers: 18 | - name: configmapcontainer 19 | image: nginx 20 | env: 21 | - name: DB_SERVER 22 | valueFrom: 23 | configMapKeyRef: 24 | name: myconfigmap 25 | key: db_server 26 | - name: DATABASE 27 | valueFrom: 28 | configMapKeyRef: 29 | name: myconfigmap 30 | key: database 31 | volumeMounts: 32 | - name: config-vol 33 | mountPath: "/config" 34 | readOnly: true 35 | volumes: 36 | - name: config-vol 37 | configMap: 38 | name: myconfigmap -------------------------------------------------------------------------------- /labs/service/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: frontend 5 | labels: 6 | team: development 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: frontend 12 | template: 13 | metadata: 14 | labels: 15 | app: frontend 16 | spec: 17 | containers: 18 | - name: frontend 19 | image: nginx:latest 20 | ports: 21 | - containerPort: 80 22 | --- 23 | apiVersion: apps/v1 24 | kind: Deployment 25 | metadata: 26 | name: backend 27 | labels: 28 | team: development 29 | spec: 30 | replicas: 3 31 | selector: 32 | matchLabels: 33 | app: backend 34 | template: 35 | metadata: 36 | labels: 37 | app: backend 38 | spec: 39 | containers: 40 | - name: backend 41 | image: ozgurozturknet/k8s:backend 42 | ports: 43 | - containerPort: 5000 -------------------------------------------------------------------------------- /labs/persistentvolume/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mysqlsecret 5 | type: Opaque 6 | stringData: 7 | password: P@ssw0rd! 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | name: mysqldeployment 13 | labels: 14 | app: mysql 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: mysql 20 | strategy: 21 | type: Recreate 22 | template: 23 | metadata: 24 | labels: 25 | app: mysql 26 | spec: 27 | containers: 28 | - name: mysql 29 | image: mysql 30 | ports: 31 | - containerPort: 3306 32 | volumeMounts: 33 | - mountPath: "/var/lib/mysql" 34 | name: mysqlvolume 35 | env: 36 | - name: MYSQL_ROOT_PASSWORD 37 | valueFrom: 38 | secretKeyRef: 39 | name: mysqlsecret 40 | key: password 41 | volumes: 42 | - name: mysqlvolume 43 | persistentVolumeClaim: 44 | claimName: mysqlclaim -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Omer Berat Sezer 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /labs/secret/secret-pods.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secretvolumepod 5 | spec: 6 | containers: 7 | - name: secretcontainer 8 | image: nginx 9 | volumeMounts: 10 | - name: secret-vol 11 | mountPath: /secret 12 | volumes: 13 | - name: secret-vol 14 | secret: 15 | secretName: mysecret 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: secretenvpod 21 | spec: 22 | containers: 23 | - name: secretcontainer 24 | image: nginx 25 | env: 26 | - name: username 27 | valueFrom: 28 | secretKeyRef: 29 | name: mysecret 30 | key: db_username 31 | - name: password 32 | valueFrom: 33 | secretKeyRef: 34 | name: mysecret 35 | key: db_password 36 | - name: server 37 | valueFrom: 38 | secretKeyRef: 39 | name: mysecret 40 | key: db_server 41 | --- 42 | apiVersion: v1 43 | kind: Pod 44 | metadata: 45 | name: secretenvallpod 46 | spec: 47 | containers: 48 | - name: secretcontainer 49 | image: nginx 50 | envFrom: 51 | - secretRef: 52 | name: mysecret -------------------------------------------------------------------------------- /labs/daemonset/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: logdaemonset 5 | labels: 6 | app: fluentd-logging 7 | spec: 8 | selector: 9 | matchLabels: 10 | name: fluentd-elasticsearch 11 | template: 12 | metadata: 13 | labels: 14 | name: fluentd-elasticsearch 15 | spec: 16 | tolerations: 17 | # this toleration is to have the daemonset runnable on master nodes 18 | # remove it if your masters can't run pods 19 | - key: node-role.kubernetes.io/master 20 | effect: NoSchedule 21 | containers: 22 | - name: fluentd-elasticsearch 23 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 24 | resources: 25 | limits: 26 | memory: 200Mi 27 | requests: 28 | cpu: 100m 29 | memory: 200Mi 30 | volumeMounts: 31 | - name: varlog 32 | mountPath: /var/log 33 | - name: varlibdockercontainers 34 | mountPath: /var/lib/docker/containers 35 | readOnly: true 36 | terminationGracePeriodSeconds: 30 37 | volumes: 38 | - name: varlog 39 | hostPath: 40 | path: /var/log 41 | - name: varlibdockercontainers 42 | hostPath: 43 | path: /var/lib/docker/containers -------------------------------------------------------------------------------- /labs/liveness/liveness.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | test: liveness 6 | name: liveness-http 7 | spec: 8 | containers: 9 | - name: liveness 10 | image: k8s.gcr.io/liveness 11 | args: 12 | - /server 13 | livenessProbe: 14 | httpGet: 15 | path: /healthz 16 | port: 8080 17 | httpHeaders: 18 | - name: Custom-Header 19 | value: Awesome 20 | initialDelaySeconds: 3 21 | periodSeconds: 3 22 | --- 23 | apiVersion: v1 24 | kind: Pod 25 | metadata: 26 | labels: 27 | test: liveness 28 | name: liveness-exec 29 | spec: 30 | containers: 31 | - name: liveness 32 | image: k8s.gcr.io/busybox 33 | args: 34 | - /bin/sh 35 | - -c 36 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 37 | livenessProbe: 38 | exec: 39 | command: 40 | - cat 41 | - /tmp/healthy 42 | initialDelaySeconds: 5 43 | periodSeconds: 5 44 | --- 45 | apiVersion: v1 46 | kind: Pod 47 | metadata: 48 | name: goproxy 49 | labels: 50 | app: goproxy 51 | spec: 52 | containers: 53 | - name: goproxy 54 | image: k8s.gcr.io/goproxy:0.1 55 | ports: 56 | - containerPort: 8080 57 | livenessProbe: 58 | tcpSocket: 59 | port: 8080 60 | initialDelaySeconds: 15 61 | periodSeconds: 20 -------------------------------------------------------------------------------- /K8s-CronJob.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Cron Job 2 | 3 | This scenario shows how K8s Cron job object works on minikube 4 | 5 | ### Steps 6 | 7 | - Copy and save (below) as file on your PC (cronjob.yaml). 8 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/cronjob/cronjob.yaml 9 | 10 | ``` 11 | apiVersion: batch/v1 12 | kind: CronJob 13 | metadata: 14 | name: hello 15 | spec: 16 | schedule: "*/1 * * * *" 17 | jobTemplate: 18 | spec: 19 | template: 20 | spec: 21 | containers: 22 | - name: hello 23 | image: busybox 24 | imagePullPolicy: IfNotPresent 25 | command: 26 | - /bin/sh 27 | - -c 28 | - date; echo Hello from the Kubernetes cluster 29 | restartPolicy: OnFailure 30 | ``` 31 | 32 | ![image](https://user-images.githubusercontent.com/10358317/154947805-0c1db85f-fd52-4e3e-8e86-5afca73359ca.png) 33 | 34 | 35 | - Create Cron Job: 36 | 37 | ![image](https://user-images.githubusercontent.com/10358317/152511636-b68caefa-1d1a-48a4-bc2b-a773e0ba5eef.png) 38 | 39 | - Watch pods' status: 40 | 41 | ![image](https://user-images.githubusercontent.com/10358317/152511899-cb32ee77-b3b2-4cf5-ad44-f3b1187555f2.png) 42 | 43 | - Watch job's status: 44 | 45 | ![image](https://user-images.githubusercontent.com/10358317/152511995-4a6ca576-99e1-4dbf-bf26-73c150a36b5b.png) 46 | 47 | - Delete job: 48 | 49 | ![image](https://user-images.githubusercontent.com/10358317/152512127-2410d92d-4555-45d7-ab3f-cac0d80839df.png) 50 | -------------------------------------------------------------------------------- /labs/affinity/podnodeaffinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nodeaffinitypod1 5 | spec: 6 | containers: 7 | - name: nodeaffinity1 8 | image: nginx:latest 9 | affinity: 10 | nodeAffinity: 11 | requiredDuringSchedulingIgnoredDuringExecution: 12 | nodeSelectorTerms: 13 | - matchExpressions: 14 | - key: app 15 | operator: In #In, NotIn, Exists, DoesNotExist 16 | values: 17 | - production 18 | --- 19 | apiVersion: v1 20 | kind: Pod 21 | metadata: 22 | name: nodeaffinitypod2 23 | spec: 24 | containers: 25 | - name: nodeaffinity2 26 | image: nginx:latest 27 | affinity: 28 | nodeAffinity: 29 | preferredDuringSchedulingIgnoredDuringExecution: 30 | - weight: 1 31 | preference: 32 | matchExpressions: 33 | - key: app 34 | operator: In 35 | values: 36 | - production 37 | - weight: 2 38 | preference: 39 | matchExpressions: 40 | - key: app 41 | operator: In 42 | values: 43 | - test 44 | --- 45 | apiVersion: v1 46 | kind: Pod 47 | metadata: 48 | name: nodeaffinitypod3 49 | spec: 50 | containers: 51 | - name: nodeaffinity3 52 | image: nginx:latest 53 | affinity: 54 | nodeAffinity: 55 | requiredDuringSchedulingIgnoredDuringExecution: 56 | nodeSelectorTerms: 57 | - matchExpressions: 58 | - key: app 59 | operator: Exists #In, NotIn, Exists, DoesNotExist -------------------------------------------------------------------------------- /create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install2.ps1: -------------------------------------------------------------------------------- 1 | echo "#########################################################" 2 | echo "Before to run this script, please be sure creating 'k' directory under C directory (c:\k) and includes K8s config file..." 3 | echo "e.g. mkdir c:\k" 4 | echo "e.g. run on the master node: scp -r /home/ubuntu/.kube/config windowsuser@IP:C:\k" 5 | echo "#########################################################" 6 | echo "Script will start in 10 Seconds..." 7 | Start-Sleep -s 10 8 | 9 | echo "Installing remote access..." 10 | Install-RemoteAccess -VpnType RoutingOnly 11 | Set-Service -Name RemoteAccess -StartupType 'Automatic' 12 | start-service RemoteAccess 13 | 14 | echo "Installing Calico, Waiting 10 Seconds..." 15 | Start-Sleep -s 10 16 | Invoke-WebRequest https://github.com/projectcalico/calico/releases/download/v3.25.0/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1 17 | c:\install-calico-windows.ps1 -DownloadOnly yes -KubeVersion 1.23.5 18 | Get-Service -Name CalicoNode 19 | Get-Service -Name CalicoFelix 20 | 21 | echo "Installing Kubelet Service, Waiting 10 Seconds..." 22 | Start-Sleep -s 10 23 | C:\CalicoWindows\kubernetes\install-kube-services.ps1 24 | Start-Service -Name kubelet 25 | Start-Service -Name kube-proxy 26 | 27 | echo "Testing kubectl..." 28 | kubectl get nodes -o wide 29 | 30 | echo "#########################################################" 31 | echo "Congrulations, kubernetes installed on Win..." 32 | echo "Calico Ref: https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/kubernetes/standard" 33 | echo "#########################################################" 34 | -------------------------------------------------------------------------------- /K8s-Job.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Job 2 | 3 | This scenario shows how K8s job object works on minikube 4 | 5 | ### Steps 6 | 7 | - Copy and save (below) as file on your PC (job.yaml). 8 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/job/job.yaml 9 | 10 | ``` 11 | apiVersion: batch/v1 12 | kind: Job 13 | metadata: 14 | name: pi 15 | spec: 16 | parallelism: 2 # each step how many pods start in parallel at a time 17 | completions: 10 # number of pods that run and complete job at the end of the time 18 | backoffLimit: 5 # to tolerate fail number of job, after 5 times of failure, not try to continue job, fail the job 19 | activeDeadlineSeconds: 100 # if this job is not completed in 100 seconds, fail the job 20 | template: 21 | spec: 22 | containers: 23 | - name: pi 24 | image: perl # image is perl from docker 25 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] # it calculates the first 2000 digits of pi number 26 | restartPolicy: Never 27 | ``` 28 | 29 | ![image](https://user-images.githubusercontent.com/10358317/154946885-80e87f3c-5120-4c09-bde2-a35cd09a7383.png) 30 | 31 | - Create job: 32 | 33 | ![image](https://user-images.githubusercontent.com/10358317/152507949-922134f4-28cb-4d4f-8ccf-d5c5657b79c3.png) 34 | 35 | - Watch pods' status: 36 | 37 | ![image](https://user-images.githubusercontent.com/10358317/152507888-21b8de27-c4a4-4772-8209-072bdcd66ad5.png) 38 | 39 | - Watch job's status: 40 | 41 | ![image](https://user-images.githubusercontent.com/10358317/152508221-1795ed68-083b-4e23-b0e5-8c97a0672141.png) 42 | 43 | - After pods' completion, we can see the logs of each pods. Pods are not deleted after the completion of task on each pod. 44 | 45 | ![image](https://user-images.githubusercontent.com/10358317/152508363-a61e5c7a-57fa-4030-a8b0-d9baed027146.png) 46 | 47 | - Delete job: 48 | 49 | ![image](https://user-images.githubusercontent.com/10358317/152508749-049880e4-96b5-4dfd-96c2-107796366c02.png) 50 | -------------------------------------------------------------------------------- /create_real_cluster/win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install1.ps1: -------------------------------------------------------------------------------- 1 | echo "#########################################################" 2 | echo "Script will start in 10 Seconds..." 3 | Start-Sleep -s 10 4 | 5 | echo "Firewall rules : Allow All Traffic..." 6 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction OutBound -Action Allow 7 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction InBound -Action Allow 8 | 9 | echo "Installing Containers..." 10 | Install-WindowsFeature -Name containers 11 | 12 | echo "Installing Containerd, Waiting 10 Seconds..." 13 | Start-Sleep -s 10 14 | Invoke-WebRequest -UseBasicParsing "https://raw.githubusercontent.com/microsoft/Windows-Containers/Main/helpful_tools/Install-ContainerdRuntime/install-containerd-runtime.ps1" -o install-containerd-runtime.ps1 15 | .\install-containerd-runtime.ps1 16 | 17 | echo "Setting Service Containerd, Waiting 20 Seconds..." 18 | Start-Sleep -s 20 19 | Set-Service -Name containerd -StartupType 'Automatic' 20 | 21 | echo "Installing additional Windows networking components: RemoteAccess, RSAT-RemoteAccess-PowerShell, Routing, Waiting 10 Seconds..." 22 | Start-Sleep -s 10 23 | Install-WindowsFeature RemoteAccess 24 | 25 | echo "Installing RSAT-RemoteAccess-PowerShell, Waiting 10 Seconds..." 26 | Start-Sleep -s 10 27 | Install-WindowsFeature RSAT-RemoteAccess-PowerShell 28 | 29 | echo "Installing Routing, Waiting 10 Seconds..." 30 | Start-Sleep -s 10 31 | Install-WindowsFeature Routing 32 | 33 | echo "#########################################################" 34 | echo "Docker and network components are installed..." 35 | echo "After Restart, please run INSTALL2.ps1..." 36 | echo "Before to run this install2.ps1, please be sure creating 'k' directory under C directory (c:\k) and includes K8s config file..." 37 | echo "e.g. mkdir c:\k" 38 | echo "e.g. run on the master node: scp -r /home/ubuntu/.kube/config windowsuser@IP:C:\k" 39 | echo "e.g. or copy the config file content, and paste it on Windows c:\k" 40 | echo "#########################################################" 41 | echo "Computer will be restarted in 10 Seconds..." 42 | Start-Sleep -s 10 43 | Restart-Computer -Force 44 | -------------------------------------------------------------------------------- /K8-CreatingPod-Declerative.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Creating Pod - Declarative Way (With Yaml File) 2 | 3 | This scenario shows: 4 | - how to create basic K8s pod using yaml file, 5 | - how to get more information about pod (to solve troubleshooting), 6 | 7 | 8 | ### Steps 9 | 10 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start") 11 | 12 | ![image](https://user-images.githubusercontent.com/10358317/153183333-371fe598-d5a4-4b86-9b5d-9e33f35063cc.png) 13 | 14 | - Create Yaml file (pod1.yaml) in your directory and copy the below definition into the file: 15 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/pod/pod1.yaml 16 | 17 | ``` 18 | apiVersion: v1 19 | kind: Pod # type of K8s object: Pod 20 | metadata: 21 | name: firstpod # name of pod 22 | labels: 23 | app: frontend # label pod with "app:frontend" 24 | spec: 25 | containers: 26 | - name: nginx 27 | image: nginx:latest # image name:image version, nginx downloads from DockerHub 28 | ports: 29 | - containerPort: 80 # open ports in the container 30 | env: # environment variables 31 | - name: USER 32 | value: "username" 33 | ``` 34 | 35 | ![image](https://user-images.githubusercontent.com/10358317/153674646-8997eb99-12b9-4394-91f2-2de4032ee3db.png) 36 | 37 | 38 | - Apply/run the file to create pod in declerative way ("kubectl apply -f pod1.yaml"): 39 | 40 | ![image](https://user-images.githubusercontent.com/10358317/153198471-55d92940-1141-4e04-a701-6356daaf0181.png) 41 | 42 | - Describe firstpod ("kubectl describe pods firstpod"): 43 | 44 | ![image](https://user-images.githubusercontent.com/10358317/153199893-95bfbef0-61b4-4c41-bd89-481d976c272c.png) 45 | 46 | - Delete pod and get all pods in the default namepace ("kubectl delete -f pod1.yaml"): 47 | 48 | ![image](https://user-images.githubusercontent.com/10358317/153200081-3f7823a8-e5d0-4143-aac4-157948fe2a61.png) 49 | 50 | - If you want to delete minikube ("minikube delete"): 51 | 52 | ![image](https://user-images.githubusercontent.com/10358317/153200584-01971754-0739-4c8f-8446-d2d3ab5bed31.png) 53 | 54 | -------------------------------------------------------------------------------- /create_real_cluster/win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install2.ps1: -------------------------------------------------------------------------------- 1 | echo "#########################################################" 2 | echo "Before to run this script, please be sure creating 'k' directory under C directory (c:\k) and includes K8s config file..." 3 | echo "e.g. mkdir c:\k" 4 | echo "e.g. run on the master node: scp -r /home/ubuntu/.kube/config windowsuser@IP:C:\k" 5 | echo "#########################################################" 6 | echo "Script will start in 10 Seconds..." 7 | Start-Sleep -s 10 8 | 9 | echo "Installing remote access..." 10 | Install-RemoteAccess -VpnType RoutingOnly 11 | Set-Service -Name RemoteAccess -StartupType 'Automatic' 12 | start-service RemoteAccess 13 | 14 | echo "Installing Calico, Waiting 10 Seconds..." 15 | Start-Sleep -s 10 16 | Invoke-WebRequest -Uri https://github.com/projectcalico/calico/releases/download/v3.29.2/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1 17 | c:\install-calico-windows.ps1 -ReleaseBaseURL "https://github.com/projectcalico/calico/releases/download/v3.29.2" -ReleaseFile "calico-windows-v3.29.2.zip" -KubeVersion "1.32.0" -DownloadOnly "yes" -ServiceCidr "10.96.0.0/24" -DNSServerIPs "127.0.0.1" 18 | 19 | $ENV:CNI_BIN_DIR="c:\program files\containerd\cni\bin" 20 | $ENV:CNI_CONF_DIR="c:\program files\containerd\cni\conf" 21 | c:\calicowindows\install-calico.ps1 22 | c:\calicowindows\start-calico.ps1 23 | 24 | echo "Installing Kubelet Service, Waiting 10 Seconds..." 25 | Start-Sleep -s 10 26 | c:\calicowindows\kubernetes\install-kube-services.ps1 27 | New-NetFirewallRule -Name 'Kubelet-In-TCP' -DisplayName 'Kubelet (node)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 10250 28 | Start-Service -Name kubelet 29 | Start-Service -Name kube-proxy 30 | 31 | echo "Testing kubectl..." 32 | kubectl get nodes -o wide 33 | 34 | 35 | echo "#########################################################" 36 | echo "Congrulations, kubernetes installed on Win..." 37 | echo "Calico Ref: https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/kubernetes/standard" 38 | echo "#########################################################" 39 | # ref: https://medium.com/@lubomir-tobek/kubernetes-cluster-and-adding-a-windows-worker-node-0a5b65bffbaa 40 | -------------------------------------------------------------------------------- /create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install1.ps1: -------------------------------------------------------------------------------- 1 | echo "#########################################################" 2 | echo "Script will start in 10 Seconds..." 3 | Start-Sleep -s 10 4 | 5 | echo "Firewall rules : Allow All Traffic..." 6 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction OutBound -Action Allow 7 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction InBound -Action Allow 8 | 9 | echo "Installing Docker Container..." 10 | Install-WindowsFeature -Name containers 11 | 12 | # DockerMsftProvider Depreciated!! instead of it, using install-docker-ce.ps1 from Microsoft to install on Windows Servers 13 | #echo "Installing DockerMsftProvider, Waiting 15 Seconds..." 14 | #Start-Sleep -s 15 15 | #Install-Module DockerMsftProvider -Force 16 | #Install-Package Docker -ProviderName DockerMsftProvider -Force 17 | 18 | echo "Installing Docker, Waiting 10 Seconds..." 19 | Start-Sleep -s 10 20 | .\install-docker-ce.ps1 21 | 22 | echo "Setting Service Docker, Waiting 20 Seconds..." 23 | Start-Sleep -s 20 24 | Set-Service -Name docker -StartupType 'Automatic' 25 | 26 | echo "Installing additional Windows networking components: RemoteAccess, RSAT-RemoteAccess-PowerShell, Routing, Waiting 10 Seconds..." 27 | Start-Sleep -s 10 28 | Install-WindowsFeature RemoteAccess 29 | 30 | echo "Installing RSAT-RemoteAccess-PowerShell, Waiting 10 Seconds..." 31 | Start-Sleep -s 10 32 | Install-WindowsFeature RSAT-RemoteAccess-PowerShell 33 | 34 | echo "Installing Routing, Waiting 10 Seconds..." 35 | Start-Sleep -s 10 36 | Install-WindowsFeature Routing 37 | 38 | echo "#########################################################" 39 | echo "Docker and network components are installed..." 40 | echo "After Restart, please run INSTALL2.ps1..." 41 | echo "Before to run this install2.ps1, please be sure creating 'k' directory under C directory (c:\k) and includes K8s config file..." 42 | echo "e.g. mkdir c:\k" 43 | echo "e.g. run on the master node: scp -r /home/ubuntu/.kube/config windowsuser@IP:C:\k" 44 | echo "e.g. or copy the config file content, and paste it on Windows c:\k" 45 | echo "#########################################################" 46 | echo "Computer will be restarted in 10 Seconds..." 47 | Start-Sleep -s 10 48 | Restart-Computer -Force 49 | -------------------------------------------------------------------------------- /K8s-CreatingPod-Imperative.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Creating Pod - Imperative Way 2 | 3 | This scenario shows: 4 | - how to create basic K8s pod using imperative commands, 5 | - how to get more information about pod (to solve troubleshooting), 6 | - how to run commands in pod, 7 | - how to delete pod. 8 | 9 | 10 | 11 | ### Steps 12 | 13 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) 14 | 15 | ![image](https://user-images.githubusercontent.com/10358317/153183333-371fe598-d5a4-4b86-9b5d-9e33f35063cc.png) 16 | 17 | - Run pod in imperative way 18 | - "kubectl run **podName** --image=**imageName**" 19 | - "kubectl get pods -o wide" : get info about pods 20 | 21 | ![image](https://user-images.githubusercontent.com/10358317/153183932-f8cd1547-3b10-47af-be3a-a1aedbfcf4ad.png) 22 | 23 | - Describe pod to get mor information about pods (when encountered troubleshooting): 24 | 25 | ![image](https://user-images.githubusercontent.com/10358317/153184743-b0617841-db71-4c02-8d7b-c0054d9249bd.png) 26 | 27 | - To reach logs in the pod (when encountered troubleshooting): 28 | 29 | ![image](https://user-images.githubusercontent.com/10358317/153185140-e7c2a4e3-29d0-4636-9586-62eec358c6bb.png) 30 | 31 | - To reach logs in the pod 2ith "-f" (LIVE Logs, attach to the pod's log): 32 | 33 | ![image](https://user-images.githubusercontent.com/10358317/153185353-1969fe8c-e166-492e-b55d-2d96cedf3709.png) 34 | 35 | - Run command on pod ("kubectl exec **podName** -- **command**"): 36 | 37 | ![image](https://user-images.githubusercontent.com/10358317/153185867-fbe27ddb-619d-4d3e-bbce-3f021c073ad8.png) 38 | 39 | - Entering into the pod and running bash or sh on pod: 40 | - "kubectl exec -it **podName** -- bash" 41 | - "kubectl exec -it **podName** -- /bins/sh" 42 | - exit from pods 2 ways: 43 | - "exit" command 44 | - "CTRL+P+Q" 45 | 46 | ![image](https://user-images.githubusercontent.com/10358317/153186349-4dff117c-66ca-46a9-8030-2bdf27e6e0bb.png) 47 | 48 | - Delete pod: 49 | 50 | ![image](https://user-images.githubusercontent.com/10358317/153187052-d3b12b0d-85cb-4885-afa9-9a7904dc964b.png) 51 | 52 | - Imperative way could be difficult to store and manage process. Every time we have to enter commands. To prevent this, we can use YAML file to define pods and pods' feature. This way is called Declerative Way. 53 | 54 | -------------------------------------------------------------------------------- /K8s-Enable-Dashboard-On-Cluster.md: -------------------------------------------------------------------------------- 1 | ## LAB: Enable Dashboard on Cluster 2 | 3 | 4 | ### K8s Cluster (with Multipass VM) 5 | - K8s cluster was created before: 6 | - **Goto:** [K8s Kubeadm Cluster Setup](https://github.com/omerbsezer/Fast-Kubernetes/blob/main/K8s-Kubeadm-Cluster-Setup.md) 7 | 8 | ### Enable Dashboard on Cluster 9 | 10 | - To enable dashboard on cluster, apply yaml file (https://github.com/kubernetes/dashboard) 11 | ``` 12 | kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml 13 | Kubectl proxy 14 | on browser: http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/ 15 | ``` 16 | 17 | ![image](https://user-images.githubusercontent.com/10358317/156365236-cda2797e-c786-41f4-b026-0a5779ebba5a.png) 18 | 19 | - Now we should find to token to enter dashboard as admin user. 20 | 21 | ``` 22 | kubectl create serviceaccount dashboard-admin-sa 23 | kubectl create clusterrolebinding dashboard-admin-sa --clusterrole=cluster-admin --serviceaccount=default:dashboard-admin-sa 24 | kubectl get secrets 25 | kubectl describe secret dashboard-admin-sa-token-m84l5 # token name change, pls find it using "kubectl get secrets" 26 | ``` 27 | 28 | ![image](https://user-images.githubusercontent.com/10358317/156364438-8b3a192d-b36c-4b8d-8aaf-6387e707ac08.png) 29 | 30 | ![image](https://user-images.githubusercontent.com/10358317/156364553-e917899b-b918-4cdc-b87f-5f59f63c63f9.png) 31 | 32 | ![image](https://user-images.githubusercontent.com/10358317/156364657-25877a8c-827a-4d59-8332-eb4b05f09de0.png) 33 | 34 | - Enter Token that is grabbed before: 35 | 36 | ![image](https://user-images.githubusercontent.com/10358317/156364296-a213c2fe-ad04-4ba7-97d4-fea5046aa6cf.png) 37 | 38 | - Now we reached the dashboard: 39 | 40 | ![image](https://user-images.githubusercontent.com/10358317/156365659-e6fc81a8-e5e4-4443-9ed3-3d839cc63842.png) 41 | 42 | ### Enable Dashboard on Minikube 43 | 44 | - Minikube has addons to enable dashboard: 45 | 46 | ``` 47 | minikube addons enable dashboard 48 | minikube addons enable metrics-server 49 | minikube dashboard 50 | # if running on WSL/WSL2 to open browser 51 | sensible-browser http://127.0.0.1:45771/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ 52 | ``` 53 | 54 | ![image](https://user-images.githubusercontent.com/10358317/152148024-6ec65b33-9fd0-42eb-89c3-927e453553a2.png) 55 | 56 | ### Reference 57 | 58 | - https://www.replex.io/blog/how-to-install-access-and-add-heapster-metrics-to-the-kubernetes-dashboard 59 | - https://github.com/kubernetes/dashboard 60 | -------------------------------------------------------------------------------- /labs/statefulset/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: cassandra 6 | name: cassandra 7 | spec: 8 | clusterIP: None 9 | ports: 10 | - port: 9042 11 | selector: 12 | app: cassandra 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: cassandra 18 | labels: 19 | app: cassandra 20 | spec: 21 | serviceName: cassandra 22 | replicas: 2 23 | selector: 24 | matchLabels: 25 | app: cassandra 26 | template: 27 | metadata: 28 | labels: 29 | app: cassandra 30 | spec: 31 | terminationGracePeriodSeconds: 1800 32 | containers: 33 | - name: cassandra 34 | image: gcr.io/google-samples/cassandra:v13 35 | imagePullPolicy: Always 36 | ports: 37 | - containerPort: 7000 38 | name: intra-node 39 | - containerPort: 7001 40 | name: tls-intra-node 41 | - containerPort: 7199 42 | name: jmx 43 | - containerPort: 9042 44 | name: cql 45 | resources: 46 | limits: 47 | cpu: "500m" 48 | memory: 1Gi 49 | requests: 50 | cpu: "500m" 51 | memory: 1Gi 52 | securityContext: 53 | capabilities: 54 | add: 55 | - IPC_LOCK 56 | lifecycle: 57 | preStop: 58 | exec: 59 | command: 60 | - /bin/sh 61 | - -c 62 | - nodetool drain 63 | env: 64 | - name: MAX_HEAP_SIZE 65 | value: 512M 66 | - name: HEAP_NEWSIZE 67 | value: 100M 68 | - name: CASSANDRA_SEEDS 69 | value: "cassandra-0.cassandra.default.svc.cluster.local" 70 | - name: CASSANDRA_CLUSTER_NAME 71 | value: "K8Demo" 72 | - name: CASSANDRA_DC 73 | value: "DC1-K8Demo" 74 | - name: CASSANDRA_RACK 75 | value: "Rack1-K8Demo" 76 | - name: POD_IP 77 | valueFrom: 78 | fieldRef: 79 | fieldPath: status.podIP 80 | readinessProbe: 81 | exec: 82 | command: 83 | - /bin/bash 84 | - -c 85 | - /ready-probe.sh 86 | initialDelaySeconds: 15 87 | timeoutSeconds: 5 88 | volumeMounts: 89 | - name: cassandra-data 90 | mountPath: /cassandra_data 91 | volumeClaimTemplates: 92 | - metadata: 93 | name: cassandra-data 94 | spec: 95 | accessModes: [ "ReadWriteOnce" ] 96 | storageClassName: standard 97 | resources: 98 | requests: 99 | storage: 1Gi -------------------------------------------------------------------------------- /create_real_cluster/ubuntu20.04-kubeadm1.26.2-calico3.25.0-containerd1.6.10/master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ## this script is to create K8s Master, 3 | ## usage => master.sh 4 | 5 | set -e -o pipefail # fail on error , debug all lines 6 | 7 | echo "Initiating K8s Cluster..." 8 | sudo kubeadm init --pod-network-cidr=172.24.0.0/16 --apiserver-advertise-address=$1 --control-plane-endpoint=$1 9 | mkdir -p $HOME/.kube 10 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 11 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 12 | 13 | echo "Install calico..." 14 | curl https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml > calico.yaml 15 | kubectl apply -f ./calico.yaml 16 | 17 | echo "Install calicoctl..." 18 | sudo curl -o /usr/local/bin/calicoctl -O -L "https://github.com/projectcalico/calico/releases/download/v3.25.0/calicoctl-linux-amd64" 19 | sudo chmod +x /usr/local/bin/calicoctl 20 | 21 | echo "Disable IPinIP..." 22 | echo "Waiting 40sec..." 23 | sleep 40 24 | calicoctl get ipPool 'default-ipv4-ippool' -o yaml 25 | calicoctl get ipPool 'default-ipv4-ippool' -o yaml > ippool.yaml 26 | sed -i 's/Always/Never/g' ippool.yaml 27 | calicoctl apply -f ippool.yaml 28 | 29 | echo "Configure felixconfig..." 30 | echo "Waiting 5sec..." 31 | sleep 5 32 | kubectl get felixconfigurations.crd.projectcalico.org default -o yaml -n kube-system > felixconfig.yaml 33 | sed -i 's/true/false/g' felixconfig.yaml 34 | kubectl apply -f felixconfig.yaml 35 | 36 | calicoctl ipam configure --strictaffinity=true 37 | sleep 2 38 | echo "" 39 | echo "*******" 40 | echo "*** Please REBOOT/RESTART the PC now..." 41 | echo "*** After restart run on this Master node: kubeadm token create --print-join-command" 42 | echo "*** After restart if you encounter error (not to reach cluster, or API), please run closing swap commands again:" 43 | echo "*** sudo swapoff -a" 44 | echo "*** sudo sed -i '/ swap / s/^/#/' /etc/fstab" 45 | echo "*** Copy and Paste the response into the each WORKER Node with SUDO command..." 46 | kubeadm token create --print-join-command 47 | echo "" 48 | echo "*** K8s Master Node is now up and the cluster is created..." 49 | echo "*******" 50 | kubectl cluster-info 51 | kubectl get nodes -o wide 52 | #sudo reboot 53 | 54 | # https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/kubeconfig 55 | echo "*******" 56 | echo "*** Calico-Node secret will be created for Windows Calico..." 57 | echo "*******" 58 | kubectl apply -f - < master.sh 4 | 5 | set -e -o pipefail # fail on error , debug all lines 6 | 7 | echo "Initiating K8s Cluster..." 8 | sudo kubeadm init --pod-network-cidr=172.24.0.0/16 --apiserver-advertise-address=$1 --control-plane-endpoint=$1 9 | mkdir -p $HOME/.kube 10 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 11 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 12 | 13 | echo "Install calico..." 14 | curl https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/calico.yaml > calico.yaml 15 | kubectl apply -f ./calico.yaml 16 | 17 | echo "Install calicoctl..." 18 | sudo curl -o /usr/local/bin/calicoctl -O -L "https://github.com/projectcalico/calico/releases/download/v3.29.1/calicoctl-linux-amd64" 19 | sudo chmod +x /usr/local/bin/calicoctl 20 | 21 | echo "Disable IPinIP..." 22 | echo "Waiting 40sec..." 23 | sleep 40 24 | calicoctl get ipPool 'default-ipv4-ippool' -o yaml 25 | calicoctl get ipPool 'default-ipv4-ippool' -o yaml > ippool.yaml 26 | sed -i 's/Always/Never/g' ippool.yaml 27 | calicoctl apply -f ippool.yaml 28 | 29 | echo "Configure felixconfig..." 30 | echo "Waiting 5sec..." 31 | sleep 5 32 | kubectl get felixconfigurations.crd.projectcalico.org default -o yaml -n kube-system > felixconfig.yaml 33 | sed -i 's/true/false/g' felixconfig.yaml 34 | kubectl apply -f felixconfig.yaml 35 | 36 | calicoctl ipam configure --strictaffinity=true 37 | sleep 2 38 | echo "" 39 | echo "*******" 40 | echo "*** Please REBOOT/RESTART the PC now..." 41 | echo "*** After restart run on this Master node: kubeadm token create --print-join-command" 42 | echo "*** After restart if you encounter error (not to reach cluster, or API), please run closing swap commands again:" 43 | echo "*** sudo swapoff -a" 44 | echo "*** sudo sed -i '/ swap / s/^/#/' /etc/fstab" 45 | echo "*** Copy and Paste the response into the each WORKER Node with SUDO command..." 46 | kubeadm token create --print-join-command 47 | echo "" 48 | echo "*** K8s Master Node is now up and the cluster is created..." 49 | echo "*******" 50 | kubectl cluster-info 51 | kubectl get nodes -o wide 52 | #sudo reboot 53 | 54 | # https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/kubeconfig 55 | echo "*******" 56 | echo "*** Calico-Node secret will be created for Windows Calico..." 57 | echo "*******" 58 | kubectl apply -f - < ./install.sh 5 | 6 | set -e -o pipefail # fail on error , debug all lines 7 | 8 | sudo apt-get update 9 | sudo apt-get upgrade -y 10 | 11 | echo "Configuring k8s.conf..." 12 | cat < /dev/null 65 | sudo apt-get update 66 | sudo apt-get install -y docker-ce docker-ce-cli containerd.io 67 | 68 | cd /etc/docker 69 | sudo touch /etc/docker/daemon.json 70 | 71 | echo "Configuring docker daemon.json..." 72 | cat < master.sh " 88 | echo "*** If you are installing worker node, on master node: kubeadm token create --print-join-command" 89 | echo "*** Copy and Paste the response into the each WORKER Node with SUDO command..." 90 | echo "*******" 91 | -------------------------------------------------------------------------------- /create_real_cluster/ubuntu24.04-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install-ubuntu24.04-k8s1.32.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ## this script is to install K8s dependency 3 | ## before using: chmod 777 install.sh 4 | ## usage => ./install.sh 5 | 6 | set -e -o pipefail # fail on error , debug all lines 7 | 8 | sudo apt-get update 9 | sudo apt-get upgrade -y 10 | 11 | echo "Configuring k8s.conf..." 12 | cat < /dev/null 71 | sudo apt-get update 72 | sudo apt-get install -y docker-ce docker-ce-cli containerd.io 73 | 74 | cd /etc/docker 75 | sudo touch /etc/docker/daemon.json 76 | 77 | echo "Configuring docker daemon.json..." 78 | cat < master.sh " 94 | echo "*** If you are installing worker node, on master node: kubeadm token create --print-join-command" 95 | echo "*** Copy and Paste the response into the each WORKER Node with SUDO command..." 96 | echo "*******" 97 | -------------------------------------------------------------------------------- /K8s-Liveness-App.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Liveness Probe 2 | 3 | This scenario shows how the liveness probe works. 4 | 5 | ### Steps 6 | 7 | - Create 3 Pods with following YAML file (liveness.yaml): 8 | - In the first pod (e.g. web app), it sends HTTP Get Request to "http://localhost/healthz:8080" (port 8080) 9 | - If returns 400 > HTTP Code > 200, this Pod works correctly. 10 | - If returns HTTP Code > = 400, this Pod does not work properly. 11 | - initialDelaySeconds:3 => after 3 seconds, start liveness probe. 12 | - periodSecond: 3 => Wait 3 seconds between each request. 13 | - In the second pod (e.g. console app), it controls whether a file ("healty") exists or not under specific directory ("/tmp/") with "cat" app. 14 | - If returns 0 code, this Pod works correctly. 15 | - If returns different code except for 0 code, this Pod does not work properly. 16 | - initialDelaySeconds: 5 => after 5 seconds, start liveness probe. 17 | - periodSecond: 5 => Wait 5 seconds between each request. 18 | - In the third pod (e.g. database app: mysql), it sends request over TCP Socket. 19 | - If returns positive response, this Pod works correctly. 20 | - If returns negative response (e.g. connection refuse), this Pod does not work properly. 21 | - initialDelaySeconds: 15 => after 15 seconds, start liveness probe. 22 | - periodSecond: 20 => Wait 20 seconds between each request. 23 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/liveness/liveness.yaml 24 | 25 | ``` 26 | apiVersion: v1 27 | kind: Pod 28 | metadata: 29 | labels: 30 | test: liveness 31 | name: liveness-http 32 | spec: 33 | containers: 34 | - name: liveness 35 | image: k8s.gcr.io/liveness 36 | args: 37 | - /server 38 | livenessProbe: 39 | httpGet: 40 | path: /healthz 41 | port: 8080 42 | httpHeaders: 43 | - name: Custom-Header 44 | value: Awesome 45 | initialDelaySeconds: 3 46 | periodSeconds: 3 47 | --- 48 | apiVersion: v1 49 | kind: Pod 50 | metadata: 51 | labels: 52 | test: liveness 53 | name: liveness-exec 54 | spec: 55 | containers: 56 | - name: liveness 57 | image: k8s.gcr.io/busybox 58 | args: 59 | - /bin/sh 60 | - -c 61 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 62 | livenessProbe: 63 | exec: 64 | command: 65 | - cat 66 | - /tmp/healthy 67 | initialDelaySeconds: 5 68 | periodSeconds: 5 69 | --- 70 | apiVersion: v1 71 | kind: Pod 72 | metadata: 73 | name: goproxy 74 | labels: 75 | app: goproxy 76 | spec: 77 | containers: 78 | - name: goproxy 79 | image: k8s.gcr.io/goproxy:0.1 80 | ports: 81 | - containerPort: 8080 82 | livenessProbe: 83 | tcpSocket: 84 | port: 8080 85 | initialDelaySeconds: 15 86 | periodSeconds: 20 87 | ``` 88 | 89 | ![image](https://user-images.githubusercontent.com/10358317/154686744-fa7bd4bd-6cf4-460f-bbe8-93f827eeb1de.png) 90 | 91 | ![image](https://user-images.githubusercontent.com/10358317/154686826-0828adb8-7581-4d56-987f-7858bd0711b4.png) 92 | 93 | ![image](https://user-images.githubusercontent.com/10358317/154686913-4d5cc891-b3cc-497d-b8be-568faccf4bc0.png) 94 | 95 | - Run on terminal: kubectl apply -f liveness.yaml 96 | - Run on another terminal: kubectl get pods -o wide --all-namespaces 97 | 98 | ![image](https://user-images.githubusercontent.com/10358317/150846081-7e9142d1-b833-431f-82bc-a7385c73a875.png) 99 | 100 | - Run to see details of liveness-http pod: kubectl describe pod liveness-http 101 | 102 | ![image](https://user-images.githubusercontent.com/10358317/150846456-5273b1f8-7043-4fa1-804c-77da74aca8de.png) 103 | -------------------------------------------------------------------------------- /K8s-Deployment.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Deployment - Scale Up/Down - Bash Connection - Port Forwarding 2 | 3 | This scenario shows: 4 | - how to create deployment, 5 | - how to get detail information of deployment and pods, 6 | - how to scale up and down of deployment, 7 | - how to connect to the one of the pods with bash, 8 | - how to show ethernet interfaces of the pod and ping other pods, 9 | - how to forward ports to see nginx server page using browser. 10 | 11 | ### Steps 12 | 13 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start") 14 | 15 | ![image](https://user-images.githubusercontent.com/10358317/153183333-371fe598-d5a4-4b86-9b5d-9e33f35063cc.png) 16 | 17 | - Create Yaml file (deployment1.yaml) in your directory and copy the below definition into the file. 18 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/deployment/deployment1.yaml 19 | 20 | ``` 21 | apiVersion: apps/v1 22 | kind: Deployment 23 | metadata: 24 | name: firstdeployment 25 | labels: 26 | team: development 27 | spec: 28 | replicas: 3 29 | selector: # deployment selector 30 | matchLabels: # deployment selects "app:frontend" pods, monitors and traces these pods 31 | app: frontend # if one of the pod is killed, K8s looks at the desire state (replica:3), it recreats another pods to protect number of replicas 32 | template: 33 | metadata: 34 | labels: # pod labels, if the deployment selector is same with these labels, deployment follows pods that have these labels 35 | app: frontend # key: value 36 | spec: 37 | containers: 38 | - name: nginx 39 | image: nginx:latest # image download from DockerHub 40 | ports: 41 | - containerPort: 80 # open following ports 42 | ``` 43 | 44 | ![image](https://user-images.githubusercontent.com/10358317/154119883-5ffcaaaa-572e-427e-b6d6-65e3a8723121.png) 45 | 46 | 47 | - Create deployment and list the deployment's pods: 48 | 49 | ![image](https://user-images.githubusercontent.com/10358317/153439583-c445b070-ac27-4838-8943-466261abf635.png) 50 | 51 | - Delete one of the pod, then K8s automatically creates new pod: 52 | 53 | ![image](https://user-images.githubusercontent.com/10358317/153440362-a95dbc41-2cc0-4ec6-8830-8924f3c4a2f7.png) 54 | 55 | - Scale up to 5 replicas: 56 | 57 | ![image](https://user-images.githubusercontent.com/10358317/153440932-39f98de1-c129-4d7d-a4e6-79acbed070ea.png) 58 | 59 | - Scale down to 3 replicas: 60 | 61 | ![image](https://user-images.githubusercontent.com/10358317/153441111-558460c7-e35e-4db3-9028-50b6c9149043.png) 62 | 63 | - Get more information about pods (ip, node): 64 | 65 | ![image](https://user-images.githubusercontent.com/10358317/153442941-da17b07e-ad14-49ae-84b3-d9902535f9a7.png) 66 | 67 | 68 | - Connect one of the pod with bash: 69 | 70 | ![image](https://user-images.githubusercontent.com/10358317/153442294-efb4dfa5-0753-404c-b1bf-896a8d8ed436.png) 71 | 72 | - To install ifconfig, run: "apt update", "apt install net-tools" 73 | - To install ping, run: "apt install iputils-ping" 74 | - Show ethernet interfaces: 75 | 76 | ![image](https://user-images.githubusercontent.com/10358317/153442647-32ea74cd-dd46-4631-b896-f90ec1afb1a3.png) 77 | 78 | - Ping other pods: 79 | 80 | ![image](https://user-images.githubusercontent.com/10358317/153443214-d0e3dc55-e4ef-449a-8b9e-35a45ecb2675.png) 81 | 82 | - Port-forward from one of the pod to host (8085:80): 83 | 84 | ![image](https://user-images.githubusercontent.com/10358317/153443668-18071c34-0e80-4ecd-a3e9-ae9570bd9d7d.png) 85 | 86 | - On the browser, goto http://127.0.0.1:8085/ 87 | 88 | ![image](https://user-images.githubusercontent.com/10358317/153443803-709fdf31-7d16-4268-a1f1-8fc822abc471.png) 89 | 90 | - Delete deployment: 91 | 92 | ![image](https://user-images.githubusercontent.com/10358317/153444098-e52f2cde-3fd2-4606-b68c-89e6f9194398.png) 93 | 94 | -------------------------------------------------------------------------------- /K8s-Daemon-Sets.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Daemon Sets 2 | 3 | This scenario shows how K8s Daemonsets work on minikube by adding new nodes 4 | 5 | ### Steps 6 | 7 | - Copy and save (below) as file on your PC (daemonset.yaml). 8 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/daemonset/daemonset.yaml 9 | 10 | ``` 11 | apiVersion: apps/v1 12 | kind: DaemonSet 13 | metadata: 14 | name: logdaemonset 15 | labels: 16 | app: fluentd-logging 17 | spec: 18 | selector: 19 | matchLabels: # label selector should be same labels in the template (template > metadata > labels) 20 | name: fluentd-elasticsearch 21 | template: 22 | metadata: 23 | labels: 24 | name: fluentd-elasticsearch 25 | spec: 26 | tolerations: 27 | - key: node-role.kubernetes.io/master # this toleration is to have the daemonset runnable on master nodes 28 | effect: NoSchedule # remove it if your masters can't run pods 29 | containers: 30 | - name: fluentd-elasticsearch 31 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 # installing fluentd elasticsearch on each nodes 32 | resources: 33 | limits: 34 | memory: 200Mi # resource limitations configured 35 | requests: 36 | cpu: 100m 37 | memory: 200Mi 38 | volumeMounts: # definition of volumeMounts for each pod 39 | - name: varlog 40 | mountPath: /var/log 41 | - name: varlibdockercontainers 42 | mountPath: /var/lib/docker/containers 43 | readOnly: true 44 | terminationGracePeriodSeconds: 30 45 | volumes: # ephemerial volumes on node (hostpath defined) 46 | - name: varlog 47 | hostPath: 48 | path: /var/log 49 | - name: varlibdockercontainers 50 | hostPath: 51 | path: /var/lib/docker/containers 52 | ``` 53 | 54 | ![image](https://user-images.githubusercontent.com/10358317/154733287-2c65a70a-2d9f-4b69-969e-8e2938ce425d.png) 55 | 56 | - Create daemonset on minikube: 57 | 58 | ![image](https://user-images.githubusercontent.com/10358317/152146006-265e0595-cdf5-43c7-aea2-5437700323fd.png) 59 | 60 | - Run watch command on Linux: "watch kubectl get daemonset", on Win: "kubectl get daemonset -w" 61 | 62 | ![image](https://user-images.githubusercontent.com/10358317/152146266-00d1f1b8-f2dc-495f-ab35-15e3d1629278.png) 63 | 64 | - Add new node on the cluster: 65 | 66 | ![image](https://user-images.githubusercontent.com/10358317/152146458-14a66e8a-fcad-4a15-ac3e-6df1af4a43a4.png) 67 | 68 | - To see, app runs automatically on the new node: 69 | 70 | ![image](https://user-images.githubusercontent.com/10358317/152147031-b934d393-8caf-49c3-ac4c-3b704f2d646a.png) 71 | 72 | - Add new node (3rd): 73 | 74 | ![image](https://user-images.githubusercontent.com/10358317/152151984-ac8fd54c-676d-4be4-b2f1-4356613a8fed.png) 75 | 76 | - Now daemonset have 3rd node: 77 | 78 | ![image](https://user-images.githubusercontent.com/10358317/152152156-c8cd559e-48dc-4ea3-85c9-6da7fbeb0794.png) 79 | 80 | - Delete one of the pod: 81 | 82 | ![image](https://user-images.githubusercontent.com/10358317/152152437-7c883cd5-e809-4386-8832-362a612acf5f.png) 83 | 84 | - Pod deletion can be seen here: 85 | 86 | ![image](https://user-images.githubusercontent.com/10358317/152152613-854c5340-c73b-4d72-bd08-951aa640d8ad.png) 87 | 88 | - Daemonset create new pod automatically: 89 | 90 | ![image](https://user-images.githubusercontent.com/10358317/152152744-9f14751b-e214-4621-8208-1cb5437b6d71.png) 91 | 92 | - See the nodes resource on dashboard: 93 | 94 | ![image](https://user-images.githubusercontent.com/10358317/152153072-5e53cd9c-42ba-4f50-85d8-c82ea1e39752.png) 95 | 96 | - Delete nodes and delete daemonset: 97 | 98 | ![image](https://user-images.githubusercontent.com/10358317/152153355-b98bca05-87cd-46d2-a26d-eb614ca263ca.png) 99 | 100 | 101 | 102 | -------------------------------------------------------------------------------- /K8s-Taint-Toleration.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Taint Toleration 2 | 3 | This scenario shows: 4 | - how to taint/untaint the node, 5 | - how to see the node details, 6 | - the pod that does not tolerate the taint is not running the node. 7 | 8 | 9 | ### Steps 10 | 11 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start") 12 | 13 | ![image](https://user-images.githubusercontent.com/10358317/153183333-371fe598-d5a4-4b86-9b5d-9e33f35063cc.png) 14 | 15 | - Create Yaml file (podtoleration.yaml) in your directory and copy the below definition into the file. 16 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/tainttoleration/podtoleration.yaml 17 | 18 | ``` 19 | apiVersion: v1 20 | kind: Pod 21 | metadata: 22 | name: toleratedpod1 23 | labels: 24 | env: test 25 | spec: 26 | containers: 27 | - name: toleratedcontainer1 28 | image: nginx:latest 29 | tolerations: # pod tolerates "app=production:NoSchedule" 30 | - key: "app" 31 | operator: "Equal" 32 | value: "production" 33 | effect: "NoSchedule" 34 | --- 35 | apiVersion: v1 36 | kind: Pod 37 | metadata: 38 | name: toleratedpod2 39 | labels: 40 | env: test 41 | spec: 42 | containers: 43 | - name: toleratedcontainer2 44 | image: nginx:latest 45 | tolerations: 46 | - key: "app" # pod tolerates "app:NoSchedule", value is not important in this pod 47 | operator: "Exists" # pod can run on the nodes which has "app=test:NoSchedule" or "app=production:NoSchedule" 48 | effect: "NoSchedule" 49 | ``` 50 | 51 | ![image](https://user-images.githubusercontent.com/10358317/154731410-8f2da14f-b98f-4958-8335-6488cb00e89f.png) 52 | 53 | ![image](https://user-images.githubusercontent.com/10358317/154731465-9d15d24d-089a-4f93-8b3d-0a9f637c0b1f.png) 54 | 55 | - When we look at the node details, there is not any taint on the node (minikube): 56 | ``` 57 | kubectl describe node minikube 58 | ``` 59 | ![image](https://user-images.githubusercontent.com/10358317/153669930-0ef1e295-f11d-49a3-9df0-4caae0a43349.png) 60 | 61 | - Add taint to the node (minikube): 62 | ``` 63 | kubectl taint node minikube platform=production:NoSchedule 64 | ``` 65 | ![image](https://user-images.githubusercontent.com/10358317/153670171-a5c3366b-c996-4d45-acd3-33dada7222b8.png) 66 | 67 | - Create pod that does not tolerate the taint: 68 | ``` 69 | kubectl run test --image=nginx --restart=Never 70 | ``` 71 | ![image](https://user-images.githubusercontent.com/10358317/153670451-f7a2657b-9c34-413e-8a00-b4c5f645e088.png) 72 | 73 | - This pod always waits as pending, because it is not tolerated the taints: 74 | 75 | ![image](https://user-images.githubusercontent.com/10358317/153670590-3477dd11-d328-4291-96fa-8b811a301037.png) 76 | 77 | ![image](https://user-images.githubusercontent.com/10358317/153670825-0c2e7736-0d1c-4b97-be57-0fbae607ccc6.png) 78 | 79 | 80 | - In the yaml file above (podtoleration.yaml), we have 2 pods that tolerates this taint => "app=production:NoSchedule" 81 | - Create these 2 pods: 82 | 83 | ![image](https://user-images.githubusercontent.com/10358317/153671055-2bf48e13-abbe-46dd-8dd6-14274109a503.png) 84 | 85 | - These pods tolerate the taint and they are running on the node, but "test" does not tolerate the taint, it still waits: 86 | 87 | ![image](https://user-images.githubusercontent.com/10358317/153671160-c96e5084-4314-486b-9d57-850acf63e973.png) 88 | 89 | - But if we define another taint with "NoExecute", running pods are terminated: 90 | ``` 91 | kubectl taint node minikube version=new:NoExecute 92 | ``` 93 | ![image](https://user-images.githubusercontent.com/10358317/153671667-f5901893-9a9b-4f59-b482-30639432c0af.png) 94 | 95 | ![image](https://user-images.githubusercontent.com/10358317/153672106-436e0268-82e1-40da-990f-9d98fbfd44ca.png) 96 | 97 | - Delete taint from the node: 98 | ``` 99 | kubectl taint node minikube version- 100 | ``` 101 | ![image](https://user-images.githubusercontent.com/10358317/153672236-97528ceb-aedd-4bb4-b8b1-172215027237.png) 102 | 103 | - Delete minikube: 104 | 105 | ![image](https://user-images.githubusercontent.com/10358317/153672400-2d2b7843-5acb-4e8a-8a3b-5aef04dc2a80.png) 106 | -------------------------------------------------------------------------------- /K8s-Statefulset.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Stateful Set - Nginx 2 | 3 | This scenario shows how K8s statefulset object works on minikube 4 | 5 | ### Steps 6 | 7 | - Copy and save (below) as file on your PC (statefulset_nginx.yaml). 8 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/statefulset/statefulset.yaml 9 | 10 | ``` 11 | apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: nginx # create a service with "nginx" name 15 | labels: 16 | app: nginx 17 | spec: 18 | ports: 19 | - port: 80 20 | name: web # create headless service if clusterIP:None 21 | clusterIP: None # when requesting service name, service returns one of the IP of pods 22 | selector: # headless service provides to reach pod with podName.serviceName 23 | app: nginx # selects/binds to app:nginx (defined in: spec > template > metadata > labels > app:nginx) 24 | --- 25 | apiVersion: apps/v1 26 | kind: StatefulSet 27 | metadata: 28 | name: web # statefulset name: web 29 | spec: 30 | serviceName: nginx # binds/selects service (defined in metadata > name: nginx) 31 | replicas: 3 32 | selector: 33 | matchLabels: 34 | app: nginx 35 | template: 36 | metadata: 37 | labels: 38 | app: nginx 39 | spec: 40 | containers: 41 | - name: nginx 42 | image: k8s.gcr.io/nginx-slim:0.8 43 | ports: 44 | - containerPort: 80 45 | name: web 46 | volumeMounts: 47 | - name: www 48 | mountPath: /usr/share/nginx/html 49 | volumeClaimTemplates: 50 | - metadata: 51 | name: www 52 | spec: 53 | accessModes: [ "ReadWriteOnce" ] # creates PVCs for each pod automatically 54 | resources: # hence, each node has own PV 55 | requests: 56 | storage: 512Mi 57 | ``` 58 | 59 | ![image](https://user-images.githubusercontent.com/10358317/154945153-d9b61958-94d2-44f0-a900-14494aeb41f7.png) 60 | 61 | ![image](https://user-images.githubusercontent.com/10358317/154945314-974de8ae-4456-4711-b499-8aad664b847a.png) 62 | 63 | - Create statefulset and pvc: 64 | 65 | ![image](https://user-images.githubusercontent.com/10358317/152322911-47e14c25-9f86-49ff-bdcf-df74e38e5939.png) 66 | 67 | - Pods are created with statefulsetName-0,1,2 (e.g. web-0) 68 | 69 | ![image](https://user-images.githubusercontent.com/10358317/152323071-a79b5d15-22e4-424b-86a3-f84a77377b69.png) 70 | 71 | - PVCs and PVs are automatically created for each pod. Even if pod is restarted again, same PV is bound to same pod. 72 | 73 | ![image](https://user-images.githubusercontent.com/10358317/152324124-bbae308a-533f-4476-8206-6d53c6b9b648.png) 74 | 75 | - Scaled from 3 Pods to 4 Pods: 76 | 77 | ![image](https://user-images.githubusercontent.com/10358317/152324908-762100ca-94b3-4db4-b73e-9ad09c32588d.png) 78 | 79 | - New pod's name is not assigned randomly, assigned in order and got "web-4" name. 80 | 81 | ![image](https://user-images.githubusercontent.com/10358317/152325051-2f757f13-77ae-4aab-84d9-d6f6c8a04c1c.png) 82 | 83 | - Scale down to 3 Pods again: 84 | 85 | ![image](https://user-images.githubusercontent.com/10358317/152325305-c10782a2-a8e2-4c5b-8da9-7ca90de9e00a.png) 86 | 87 | - Last created pod is deleted: 88 | 89 | ![image](https://user-images.githubusercontent.com/10358317/152325429-20d84fdf-aeb2-45e7-8790-55ba3a28b197.png) 90 | 91 | - When creating headless service, service does not get any IP (e.g. None) 92 | 93 | ![image](https://user-images.githubusercontent.com/10358317/152325883-3b833268-cae9-4863-9e05-af80b0cefa8d.png) 94 | 95 | - With headless service, service returns one of the IP, service balances the load between pods (loadbalacing between pods) 96 | 97 | ![image](https://user-images.githubusercontent.com/10358317/152327066-45cb6cf0-b988-48a7-aef7-2e8295334280.png) 98 | 99 | - If we ping the specific pod with podName.serviceName (e.g. ping web-0.nginx), it returns the IP of the that pod. 100 | - With statefulset, the name of the pod is known, this helps to ping pods with name of the pod. 101 | 102 | ![image](https://user-images.githubusercontent.com/10358317/152327651-449cb69b-fe2e-45a9-b0b1-bd01fa340eff.png) 103 | 104 | -------------------------------------------------------------------------------- /K8s-Multicontainer-Sidecar.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Multicontainer - Sidecar - Emptydir Volume - Port-Forwarding 2 | 3 | This scenario shows: 4 | - how to create multicontainer in one pod, 5 | - how the multicontainers in the same pod have same ethernet interface (IPs), 6 | - how the multicontainers in the same pod can reach the shared volume area, 7 | - how to make port-forwarding to host PC ports 8 | 9 | ### Steps 10 | 11 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start") 12 | 13 | ![image](https://user-images.githubusercontent.com/10358317/153183333-371fe598-d5a4-4b86-9b5d-9e33f35063cc.png) 14 | 15 | - Create Yaml file (multicontainer.yaml) in your directory and copy the below definition into the file. 16 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/pod/multicontainer.yaml 17 | 18 | ``` 19 | apiVersion: v1 20 | kind: Pod 21 | metadata: 22 | name: multicontainer 23 | spec: 24 | containers: 25 | - name: webcontainer # container name: webcontainer 26 | image: nginx # image from nginx 27 | ports: # opening-port: 80 28 | - containerPort: 80 29 | volumeMounts: 30 | - name: sharedvolume 31 | mountPath: /usr/share/nginx/html # path in the container 32 | - name: sidecarcontainer 33 | image: busybox # sidecar, second container image is busybox 34 | command: ["/bin/sh"] # it pulls index.html file from github every 15 seconds 35 | args: ["-c", "while true; do wget -O /var/log/index.html https://raw.githubusercontent.com/omerbsezer/Fast-Kubernetes/main/index.html; sleep 15; done"] 36 | volumeMounts: 37 | - name: sharedvolume 38 | mountPath: /var/log 39 | volumes: # define emptydir temporary volume, when the pod is deleted, volume also deleted 40 | - name: sharedvolume # name of volume 41 | emptyDir: {} # volume type emtpydir: creates empty directory where the pod is runnning 42 | ``` 43 | 44 | ![image](https://user-images.githubusercontent.com/10358317/154714091-7355eb36-20d1-4002-a46e-dce56bba5570.png) 45 | 46 | - Create multicontainer on the pod (webcontainer and sidecarcontainer): 47 | 48 | ![image](https://user-images.githubusercontent.com/10358317/153407239-c74aa02d-dc51-4ce3-a680-ec777db8477b.png) 49 | 50 | - Connect (/bin/sh of the webcontainer) and install net-tools to show ethernet interface (IP: 172.17.0.3) 51 | 52 | ![image](https://user-images.githubusercontent.com/10358317/153408261-bdd4b6b5-c44f-4a12-9959-85cb9c582178.png) 53 | 54 | - Connect (/bin/sh of the sidecarcontainer) and show ethernet interface (IP: 172.17.0.3). 55 | - Containers running on same pod have same ethernet interfaces and same IPs (172.17.0.3). 56 | 57 | ![image](https://user-images.githubusercontent.com/10358317/153408722-d01eff1c-64e9-4020-a556-9d44a7a0a4f8.png) 58 | 59 | - Under the webcontainer, the shared volume with sidecarcontainer can be reachable: 60 | 61 | ![image](https://user-images.githubusercontent.com/10358317/153412202-bfb7533a-1960-4436-b10b-69f4d788a4ae.png) 62 | 63 | - It can be seen from sidecarcontainer. Both of the container can reach same volume area. 64 | - If the new file is created on this volume, other container can also reach same new file. 65 | 66 | ![image](https://user-images.githubusercontent.com/10358317/153412522-9214cf3c-d529-4381-b668-a8ad84f95ad5.png) 67 | 68 | - When we look at the sidecarcontainer logs, it pulls index.html file from "https://raw.githubusercontent.com/omerbsezer/Fast-Kubernetes/main/index.html" every 15 seconds. 69 | 70 | ![image](https://user-images.githubusercontent.com/10358317/153412851-3f9763b8-9cfe-4822-b869-b2333f580e77.png) 71 | 72 | - We can forward the port of the pod to the host PC port (hostPort:containerPort, e.g: 8080:80): 73 | 74 | ![image](https://user-images.githubusercontent.com/10358317/153413173-55554d77-2531-4fbe-88e2-1e84ded64be7.png) 75 | 76 | - On the browser, goto http://127.0.0.1:8080/ 77 | 78 | ![image](https://user-images.githubusercontent.com/10358317/153413389-f5eec26e-b2cd-44f9-a968-e6133550bfc6.png) 79 | 80 | 81 | - After updating the content of the index.html, new html page will be downloaded by the sidecarcontainer: 82 | 83 | ![image](https://user-images.githubusercontent.com/10358317/153414407-3caf71b0-1286-42e8-87e4-d7d1ba47c356.png) 84 | 85 | - Exit from the container shell and delete multicontainer in a one pod: 86 | 87 | ![image](https://user-images.githubusercontent.com/10358317/153416457-65d792fb-62f2-4015-aefd-8f7305379f23.png) 88 | -------------------------------------------------------------------------------- /HelmCheatsheet.md: -------------------------------------------------------------------------------- 1 | ## Helm Commands Cheatsheet 2 | 3 | ### 1. Help, Version 4 | 5 | #### See the general help for Helm 6 | ``` 7 | helm --help 8 | ``` 9 | #### See help for a particular command 10 | ``` 11 | helm [command] --help 12 | ``` 13 | #### See the installed version of Helm 14 | ``` 15 | helm version 16 | ``` 17 | 18 | ### 2. Repo Add, Remove, Update 19 | 20 | #### Add a repository from the internet 21 | ``` 22 | helm repo add [name] [url] 23 | ``` 24 | #### Remove a repository from your system 25 | ``` 26 | helm repo remove [name] 27 | ``` 28 | #### Update repositories 29 | ``` 30 | helm repo update 31 | ``` 32 | 33 | ### 3. Repo List, Search 34 | 35 | #### List chart repositories 36 | ``` 37 | helm repo list 38 | ``` 39 | #### Search charts for a keyword 40 | ``` 41 | helm search [keyword] 42 | ``` 43 | #### Search repositories for a keyword 44 | ``` 45 | helm search repo [keyword] 46 | ``` 47 | #### Search Helm Hub 48 | ``` 49 | helm search hub [keyword] 50 | ``` 51 | 52 | ### 4. Install/Uninstall 53 | 54 | #### Install an app 55 | ``` 56 | helm install [name] [chart] 57 | ``` 58 | 59 | #### Install an app in a specific namespace 60 | ``` 61 | helm install [name] [chart] --namespace [namespace] 62 | ``` 63 | 64 | #### Override the default values with those specified in a file of your choice 65 | ``` 66 | helm install [name] [chart] --values [yaml-file/url] 67 | ``` 68 | 69 | #### Run a test install to validate and verify the chart 70 | ``` 71 | helm install [name] --dry-run --debug 72 | ``` 73 | 74 | #### Uninstall a release 75 | ``` 76 | helm uninstall [release name] 77 | ``` 78 | 79 | ### 5. Chart Management 80 | 81 | #### Create a directory containing the common chart files and directories 82 | ``` 83 | helm create [name] 84 | ``` 85 | 86 | #### Package a chart into a chart archive 87 | ``` 88 | helm package [chart-path] 89 | ``` 90 | 91 | #### Run tests to examine a chart and identify possible issues 92 | ``` 93 | helm lint [chart] 94 | ``` 95 | 96 | #### Inspect a chart and list its contents 97 | ``` 98 | helm show all [chart] 99 | ``` 100 | #### Display the chart’s definition 101 | ``` 102 | helm show chart [chart] 103 | ``` 104 | 105 | #### Download a chart 106 | ``` 107 | helm pull [chart] 108 | ``` 109 | 110 | #### Download a chart and extract the archive’s contents into a directory 111 | ``` 112 | helm pull [chart] --untar --untardir [directory] 113 | ``` 114 | 115 | #### Display a list of a chart’s dependencies 116 | ``` 117 | helm dependency list [chart] 118 | ``` 119 | 120 | ### 6. Release Monitoring 121 | 122 | #### List all the available releases in the current namespace 123 | ``` 124 | helm list 125 | ``` 126 | #### List all the available releases across all namespaces 127 | ``` 128 | helm list --all-namespaces 129 | ``` 130 | #### List all the releases in a specific namespace 131 | ``` 132 | helm list --namespace [namespace] 133 | ``` 134 | #### List all the releases in a specific output format 135 | ``` 136 | helm list --output [format] 137 | ``` 138 | #### See the status of a release 139 | ``` 140 | helm status [release] 141 | ``` 142 | #### See the release history 143 | ``` 144 | helm history [release] 145 | ``` 146 | #### See information about the Helm client environment 147 | ``` 148 | helm env 149 | ``` 150 | 151 | ### 7. Upgrade/Rollback 152 | 153 | #### Upgrade an app 154 | ``` 155 | helm upgrade [release] [chart] 156 | ``` 157 | 158 | #### Tell Helm to roll back changes if the upgrade fails 159 | ``` 160 | helm upgrade [release] [chart] --atomic 161 | ``` 162 | 163 | #### Upgrade a release. If it does not exist on the system, install it 164 | ``` 165 | helm upgrade [release] [chart] --install 166 | ``` 167 | 168 | #### Upgrade to a version other than the latest one Upgrade an app 169 | ``` 170 | helm upgrade [release] [chart] --version [version-number] 171 | ``` 172 | 173 | #### Roll back a release 174 | ``` 175 | helm rollback [release] [revision] 176 | ``` 177 | 178 | ### 8. GET Information 179 | 180 | #### Download all the release information 181 | ``` 182 | helm get all [release] 183 | ``` 184 | #### Download all hooks 185 | ``` 186 | helm get hooks [release] 187 | ``` 188 | #### Download the manifest 189 | ``` 190 | helm get manifest [release] 191 | ``` 192 | #### Download the notes 193 | ``` 194 | helm get notes [release] 195 | ``` 196 | #### Download the values file 197 | ``` 198 | helm get all [release] 199 | ``` 200 | #### Release history 201 | ``` 202 | helm history [release] 203 | ``` 204 | 205 | ### 9. Plugin 206 | 207 | #### Install plugins 208 | ``` 209 | helm plugin install [path/url1] [path/url2] 210 | ``` 211 | #### View a list of all the installed plugins 212 | ``` 213 | helm plugin list 214 | ``` 215 | #### Update plugins 216 | ``` 217 | helm plugin update [plugin1] [plugin2] 218 | ``` 219 | #### Uninstall a plugin 220 | ``` 221 | helm plugin uninstall [plugin] 222 | ``` 223 | 224 | 225 | 226 | -------------------------------------------------------------------------------- /K8s-Helm-Jenkins.md: -------------------------------------------------------------------------------- 1 | ## LAB: Helm-Jenkins on running K8s Cluster (2 Node Multipass VM) 2 | 3 | - "Whenever you trigger a Jenkins job, the Jenkins Kubernetes plugin will make an API call to create a Kubernetes agent pod. Then, the Jenkins agent pod gets deployed in the kubernetes with few environment variables containing the Jenkins server details and secrets." 4 | - "When the agent pod comes up, it used the details in its environment variables and talks back to Jenkins using the JNLP method" (Ref: DevopsCube) 5 | 6 |

7 | 8 |

9 | 10 | ### K8s Cluster (2 Node Multipass VM) 11 | - K8s cluster was created before: 12 | - **Goto:** [K8s Kubeadm Cluster Setup](https://github.com/omerbsezer/Fast-Kubernetes/blob/main/K8s-Kubeadm-Cluster-Setup.md) 13 | 14 | - On that cluster, helm was installed on the master node. 15 | 16 | ### Helm Install 17 | 18 | - Install on Ubuntu 20.04 (for other platforms: https://helm.sh/docs/intro/install/) 19 | 20 | ``` 21 | curl https://baltocdn.com/helm/signing.asc | sudo apt-key add - 22 | sudo apt-get install apt-transport-https --yes 23 | echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 24 | sudo apt-get update 25 | sudo apt-get install helm 26 | helm version 27 | ``` 28 | 29 | ### Jenkins Install 30 | 31 | ``` 32 | helm repo add jenkins https://charts.jenkins.io 33 | helm repo list 34 | mkdir helm 35 | cd helm 36 | helm pull jenkins/jenkins 37 | tar zxvf jenkins-3.11.4.tgz 38 | ``` 39 | 40 | - After unzipping, entered into the jenkins directory, you'll find values.yaml file. Disable the persistence with false. 41 | - If your cluster on-premise does not support storage class (like our multipass VM cluster), PVC and PV, disable persistence. But if you are working on minikube, minikube supports PVC and PV automatically. 42 | - If you don't disable persistence, you'll encounter that your PODs will not run (wait pending). You can inspect PVC, PV and Pod with kubectl describe command. 43 | 44 | ![image](https://user-images.githubusercontent.com/10358317/156223521-0982d3d4-61aa-4a33-a068-a634e7382eed.png) 45 | 46 | - Install Helm Jenkins Release: 47 | ``` 48 | helm install j1 jenkins 49 | kubectl get pods 50 | kubectl get svc 51 | kubectl get pods -o wide 52 | ``` 53 | 54 | ![image](https://user-images.githubusercontent.com/10358317/156224502-024f42ad-62e6-4887-9058-ae09f3beb91d.png) 55 | 56 | - To get Jenkins password (username:admin), run: 57 | ``` 58 | kubectl exec --namespace default -it svc/j1-jenkins -c jenkins -- /bin/cat /run/secrets/chart-admin-password && echo 59 | ``` 60 | ![image](https://user-images.githubusercontent.com/10358317/156224860-c40406a7-7fbf-45bc-ada5-d4bb54cf1b25.png) 61 | 62 | - Port Forwarding: 63 | ``` 64 | kubectl --namespace default port-forward svc/j1-jenkins 8080:8080 65 | ``` 66 | ![image](https://user-images.githubusercontent.com/10358317/156225021-759b0507-37be-484c-87f3-777c0472e4ba.png) 67 | 68 | 69 | ### Install Graphical Desktop to Reach Browser using Multipass VM 70 | 71 | - Install ubuntu-desktop, so you can reach multipass VM's browser using Windows RDP (Xrdp) (https://discourse.ubuntu.com/t/graphical-desktop-in-multipass/16229) 72 | 73 | ``` 74 | sudo apt update 75 | sudo apt install ubuntu-desktop xrdp 76 | sudo passwd ubuntu # set password 77 | ``` 78 | 79 | ### Jenkins Configuration 80 | 81 | - Helm also downloads automatically some of the plugins (kubernetes:1.31.3, workflow-aggregator:2.6, git:4.10.2, configuration-as-code:1.55.1) (Jenkins Version: 2.319.3) 82 | - Manage Jenkins > Configure System > Cloud 83 | ![image](https://user-images.githubusercontent.com/10358317/156225898-1487b783-d112-4fcb-8ffa-66195e2d5f35.png) 84 | 85 | ![image](https://user-images.githubusercontent.com/10358317/156226068-0afcd9c2-9537-4431-8cdd-954625a73434.png) 86 | 87 | ![image](https://user-images.githubusercontent.com/10358317/156226209-b05eb0fd-d467-42e0-9fc9-ad1b37cb6efa.png) 88 | 89 | ![image](https://user-images.githubusercontent.com/10358317/156226315-0dd0f343-d02d-45a3-b2ef-5289ad6dcd03.png) 90 | 91 | ![image](https://user-images.githubusercontent.com/10358317/156226468-2c09dd57-9d94-426d-ba9d-0c88f865afec.png) 92 | 93 | ![image](https://user-images.githubusercontent.com/10358317/156226617-caf80b7c-d20b-4cc2-84c3-d42742531cd5.png) 94 | 95 | - New Item on main page: 96 | 97 | ![image](https://user-images.githubusercontent.com/10358317/156226810-bfafc539-0ab5-4c18-b2ce-68191d5b0e4d.png) 98 | 99 | ![image](https://user-images.githubusercontent.com/10358317/156226947-78293336-a4ca-468c-b1e7-37247829d261.png) 100 | 101 | - Add script > Build > Execute Shell: 102 | 103 | ![image](https://user-images.githubusercontent.com/10358317/156227131-c9f2a519-2749-405e-ab4a-7ae27c6b2787.png) 104 | 105 | - After triggering jobs, Jenkins (on Master) creates agents on Worker1 automatically. After jobs are completed, they are terminated. 106 | 107 | ![image](https://user-images.githubusercontent.com/10358317/156227423-0dc264b5-9060-46c5-a353-4d15ea64e9fa.png) 108 | 109 | 110 | 111 | ### Reference 112 | 113 | - https://www.jenkins.io/doc/book/scaling/scaling-jenkins-on-kubernetes/ 114 | - https://devopscube.com/jenkins-build-agents-kubernetes/ 115 | 116 | 117 | -------------------------------------------------------------------------------- /K8s-Node-Affinity.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Node Affinity 2 | 3 | This scenario shows: 4 | - how to label the node, 5 | - when node is not labelled and pods' nodeAffinity are defined, pods always wait pending 6 | 7 | 8 | ### Steps 9 | 10 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start") 11 | 12 | ![image](https://user-images.githubusercontent.com/10358317/153183333-371fe598-d5a4-4b86-9b5d-9e33f35063cc.png) 13 | 14 | - Create Yaml file (podnodeaffinity.yaml) in your directory and copy the below definition into the file. 15 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/affinity/podnodeaffinity.yaml 16 | 17 | ``` 18 | apiVersion: v1 19 | kind: Pod 20 | metadata: 21 | name: nodeaffinitypod1 22 | spec: 23 | containers: 24 | - name: nodeaffinity1 25 | image: nginx:latest # "requiredDuringSchedulingIgnoredDuringExecution" means 26 | affinity: # Find a node during scheduling according to "matchExpression" and run pod on that node. 27 | nodeAffinity: # If it is not found, do not run this pod until finding specific node "matchExpression". 28 | requiredDuringSchedulingIgnoredDuringExecution: # "...IgnoredDuringExecution" means 29 | nodeSelectorTerms: # after scheduling, if the node label is removed/deleted from node, ignore it while executing. 30 | - matchExpressions: 31 | - key: app 32 | operator: In # In, NotIn, Exists, DoesNotExist 33 | values: # In => key=value, NotIn => key!=value 34 | - production # Exists => only key 35 | --- 36 | apiVersion: v1 37 | kind: Pod 38 | metadata: 39 | name: nodeaffinitypod2 40 | spec: 41 | containers: 42 | - name: nodeaffinity2 43 | image: nginx:latest 44 | affinity: # "preferredDuringSchedulingIgnoredDuringExecution" means 45 | nodeAffinity: # Find a node during scheduling according to "matchExpression" and run pod on that node. 46 | preferredDuringSchedulingIgnoredDuringExecution: # If it is not found, run this pod wherever it finds. 47 | - weight: 1 # if there is a pod with "app=production", run on that pod 48 | preference: # if there is NOT a pod with "app=production" and there is NOT any other preference, 49 | matchExpressions: # run this pod wherever scheduler finds a node. 50 | - key: app 51 | operator: In 52 | values: 53 | - production 54 | - weight: 2 # this is highest prior, weight:2 > weight:1 55 | preference: # if there is a pod with "app=test", run on that pod 56 | matchExpressions: # if there is NOT a pod with "app=test", goto weight:1 preference 57 | - key: app 58 | operator: In 59 | values: 60 | - test 61 | --- 62 | apiVersion: v1 63 | kind: Pod 64 | metadata: 65 | name: nodeaffinitypod3 66 | spec: 67 | containers: 68 | - name: nodeaffinity3 69 | image: nginx:latest 70 | affinity: 71 | nodeAffinity: 72 | requiredDuringSchedulingIgnoredDuringExecution: 73 | nodeSelectorTerms: 74 | - matchExpressions: 75 | - key: app 76 | operator: Exists # In, NotIn, Exists, DoesNotExist 77 | ``` 78 | 79 | ![image](https://user-images.githubusercontent.com/10358317/154728538-90ae7179-1fcb-4e96-9376-b089cffc5adf.png) 80 | 81 | ![image](https://user-images.githubusercontent.com/10358317/154728650-3f622711-dc2b-4e2c-8fce-966c8e892824.png) 82 | 83 | ![image](https://user-images.githubusercontent.com/10358317/154728769-784f3fb5-59b5-48bb-adc5-8bce0bf57acc.png) 84 | 85 | - Create pods: 86 | - 1st pod waits pending: Because it controls labelled "app:production" node, but it does not find, so it waits until finding labelled "app:production" node. 87 | - 2nd pod started: Because it controls the labels first, but "preferredDuringScheduling", even if it does not find, run anywhere. 88 | - 3rd pod waits pending: Because it controls labelled "app" node, but it does not find, so it waits until finding labelled "app" node. 89 | 90 | ![image](https://user-images.githubusercontent.com/10358317/153663079-4ce6a3cd-68a5-4df7-af2b-8c7a9bb3ea67.png) 91 | 92 | - After labelling node with label "app:production", 1st and 3rd nodes also run on the same node. Because they find the required label. 93 | 94 | ``` 95 | kubectl label node minikube app=production 96 | ``` 97 | ![image](https://user-images.githubusercontent.com/10358317/153664135-9752ca3b-6154-41bd-a026-7bb063bdbf23.png) 98 | 99 | - After unlabelling the node, all pods still run due to "IgnoredDuringExecution". Node ignores the label controlling after execution. 100 | 101 | ``` 102 | kubectl label node minikube app- 103 | ``` 104 | 105 | ![image](https://user-images.githubusercontent.com/10358317/153664599-b6426c70-93c3-45a7-95bf-721cded025e7.png) 106 | 107 | - Delete pods: 108 | 109 | ![image](https://user-images.githubusercontent.com/10358317/153665104-11406023-86c5-456b-89a8-7ba486f2c560.png) 110 | 111 | 112 | -------------------------------------------------------------------------------- /K8s-Monitoring-Prometheus-Grafana.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Monitoring: Prometheus and Grafana 2 | 3 | This scenario shows how to implement Prometheus and Grafana on K8s Cluster. 4 | 5 | 6 | ### Table of Contents 7 | - [Monitoring With SSH](#ssh) 8 | - [Monitoring With Prometheus and Grafana](#prometheus-grafana) 9 | - [Prometheus and Grafana for Windows](#windows) 10 | 11 | There are different options to monitor K8s cluster: SSH, Kubernetes Dashboard, Prometheus and Grafana, etc. 12 | 13 | ## 1. Monitoring With SSH 14 | 15 | - SSH can be used to get basic information about the cluster, nodes, and pods. 16 | - Make SSH connection to Master Node of the K8s Cluster 17 | 18 | ``` 19 | ssh username@masterIP 20 | ``` 21 | 22 | - To get the nodes of the K8s 23 | 24 | ``` 25 | kubectl get nodes -o wide 26 | ``` 27 | 28 | - To get the pods on the K8s Cluster 29 | 30 | ``` 31 | kubectl get pods -o wide 32 | ``` 33 | 34 | - For Linux PCs: To get the pods on the K8s Cluster in real-time with the "watch" command 35 | 36 | ``` 37 | watch kubectl get pods -o wide 38 | ``` 39 | 40 | - To get all K8s objects: 41 | 42 | ``` 43 | kubectl get all 44 | ``` 45 | 46 | ## 2. Monitoring With Prometheus and Grafana 47 | 48 | - While implementting Prometheus and Grafana, Helm is used. 49 | - To add Prometheus repo into local repo and download it: 50 | 51 | ``` 52 | mkdir helm 53 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 54 | helm repo update 55 | helm pull prometheus-community/kube-prometheus-stack 56 | tar zxvf kube-prometheus-stack-34.10.0.tgz 57 | cd kube-prometheus-stack 58 | ``` 59 | 60 | - "Values.yaml" file can be viewed and updated according to new values and new configuration. 61 | - To install prometheus, release name is prometheus 62 | 63 | ``` 64 | helm install prometheus kube-prometheus-stack 65 | ``` 66 | 67 | - Port forwarding is needed on the default connection, run on the different terminals: 68 | 69 | ``` 70 | kubectl port-forward deployment/prometheus-grafana 3000 71 | kubectl port-forward prometheus-prometheus-kube-prometheus-prometheus-0 9090 72 | ``` 73 | 74 | - Default provided username: admin, password: prom-operator 75 | 76 | ![image](https://user-images.githubusercontent.com/10358317/171119775-74e42538-afde-4cad-ac3b-01bd00b434f5.png) 77 | 78 | - Monitoring all nodes' resources in terms of CPU, memory, disk space, network transmitted/received 79 | 80 | ![image](https://user-images.githubusercontent.com/10358317/171121847-88a7ee68-c38e-4fbd-ac72-30900e2c2e86.png) 81 | 82 | ![image](https://user-images.githubusercontent.com/10358317/171122247-d0e5a80c-0460-4ede-9e3a-8a15fa03b89b.png) 83 | 84 | 85 | - Use nodeport option to make reachable with IP:Port. 86 | - Uninstall the current release run. 87 | 88 | ``` 89 | helm uninstall prometheus 90 | ``` 91 | 92 | - Open values.yaml file (kube-prometheus-stack/charts/grafana/values.yaml), change type from "ClusterIP" to "NodePort" and add "nodePort: 32333" 93 | 94 | ![image](https://user-images.githubusercontent.com/10358317/171122676-59c04a9d-1170-42cb-8c84-d1de9e6c341e.png) 95 | 96 | - Run the new release 97 | 98 | ``` 99 | helm install prometheus kube-prometheus-stack 100 | ``` 101 | 102 | - On the browser from any PC on the cluster, grafana screen can be viewed: MasterIP:32333 103 | 104 | - Update (kube-prometheus-stack/charts/prometheus-node-exporter/values.yaml) to implement that node exporter works only on Linux machines. Add nodeSelector: kubernetes.io/os: linux 105 | 106 | #### 2.1. Prometheus and Grafana for Windows 107 | 108 | - Download windows_exporter-0.18.1-amd64.exe (latest version) from here: https://github.com/prometheus-community/windows_exporter/releases 109 | - Copy/Move to under C:\ directory ("C:\windows_exporter-0.18.1-amd64.exe") 110 | - Open Powershell with Admistration Rights, run: 111 | 112 | ``` 113 | New-Service -Name "windows_node_exporter" -BinaryPathName "C:\windows_exporter-0.18.1-amd64.exe" 114 | Start-Service -Name windows_node_exporter 115 | ``` 116 | - Now, windows_exporter works as a service and runs automatically when restarting the windows node. Check if it works in 2 ways: 117 | - Open the browser and run: http://localhost:9182/metrics to see resource data/metrics 118 | - Open Task Manager - Services Tab and see whether windows_node_exporter runs or not. 119 | 120 | - Uninstall the current release run. 121 | 122 | ``` 123 | helm uninstall prometheus 124 | ``` 125 | 126 | - Open values.yaml in the kube-prometheus-stack directory (targets: Windows IP, default port 9182) 127 | 128 | ``` 129 | #additionalScrapeConfigs: [] (Line ~2480) 130 | additionalScrapeConfigs: 131 | - job_name: 'kubernetes-windows-exporter' 132 | static_configs: 133 | - targets: ["WindowsIP:9182"] 134 | ``` 135 | 136 | - Run the new release 137 | 138 | ``` 139 | helm install prometheus kube-prometheus-stack 140 | ``` 141 | 142 | - Open Grafana and "Import" 143 | 144 | ![image](https://user-images.githubusercontent.com/10358317/171125351-f2560aff-f9cb-4929-9971-2d3c94c10891.png) 145 | 146 | - Download Prometheus "Windows Exporter Node" dashboard from here: https://grafana.com/grafana/dashboards/14510/revisions 147 | - There are 2 options: 148 | - Copy the Json content and paste panel json, 149 | - Upload Json File 150 | 151 | ![image](https://user-images.githubusercontent.com/10358317/171125688-1df89d6f-ea85-4b13-bc0d-86934e6e4017.png) 152 | 153 | - Select "Prometheus" as data source 154 | 155 | - Now it works. Windows Node Exporter: 156 | 157 | ![image](https://user-images.githubusercontent.com/10358317/171122469-7b53a060-d778-463e-b215-cf8befb076b9.png) 158 | 159 | ## Reference 160 | 161 | - https://youtu.be/jatcPHvChfI 162 | -------------------------------------------------------------------------------- /K8s-Secret.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Secret 2 | 3 | This scenario shows: 4 | - how to create secrets with file, 5 | - how to use secrets: volume and environment variable, 6 | - how to create secrets with command, 7 | - how to get/delete secrets 8 | 9 | 10 | ### Steps 11 | 12 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start") 13 | 14 | ![image](https://user-images.githubusercontent.com/10358317/153183333-371fe598-d5a4-4b86-9b5d-9e33f35063cc.png) 15 | 16 | - Create Yaml file (secret.yaml) in your directory and copy the below definition into the file. 17 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/secret.yaml 18 | 19 | ``` 20 | # Secret Object Creation 21 | apiVersion: v1 22 | kind: Secret 23 | metadata: 24 | name: mysecret 25 | type: Opaque 26 | stringData: 27 | db_server: db.example.com 28 | db_username: admin 29 | db_password: P@ssw0rd! 30 | ``` 31 | 32 | ![image](https://user-images.githubusercontent.com/10358317/154717259-629e529e-4178-489e-8d20-bad22faeb782.png) 33 | 34 | - Create Yaml file (secret-pods.yaml) in your directory and copy the below definition into the file. 35 | - 3 Pods: 36 | - secret binding using volume 37 | - secret binding environment variable: 1. explicitly, 2. implicitly 38 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/secret-pods.yaml 39 | 40 | ``` 41 | apiVersion: v1 42 | kind: Pod 43 | metadata: 44 | name: secretvolumepod 45 | spec: 46 | containers: 47 | - name: secretcontainer 48 | image: nginx 49 | volumeMounts: 50 | - name: secret-vol 51 | mountPath: /secret 52 | volumes: 53 | - name: secret-vol 54 | secret: 55 | secretName: mysecret 56 | --- 57 | apiVersion: v1 58 | kind: Pod 59 | metadata: 60 | name: secretenvpod 61 | spec: 62 | containers: 63 | - name: secretcontainer 64 | image: nginx 65 | env: 66 | - name: username 67 | valueFrom: 68 | secretKeyRef: 69 | name: mysecret 70 | key: db_username 71 | - name: password 72 | valueFrom: 73 | secretKeyRef: 74 | name: mysecret 75 | key: db_password 76 | - name: server 77 | valueFrom: 78 | secretKeyRef: 79 | name: mysecret 80 | key: db_server 81 | --- 82 | apiVersion: v1 83 | kind: Pod 84 | metadata: 85 | name: secretenvallpod 86 | spec: 87 | containers: 88 | - name: secretcontainer 89 | image: nginx 90 | envFrom: 91 | - secretRef: 92 | name: mysecret 93 | ``` 94 | 95 | ![image](https://user-images.githubusercontent.com/10358317/154717520-554ae3b6-cb55-4ad6-a2f3-7669c0788f77.png) 96 | 97 | ![image](https://user-images.githubusercontent.com/10358317/154717625-d688251f-8bb6-44b4-843e-eca7b6496b29.png) 98 | 99 | ![image](https://user-images.githubusercontent.com/10358317/154717703-49d3e207-15c7-4f3e-afb6-ba712c4dea67.png) 100 | 101 | - Create secret object: 102 | 103 | ![image](https://user-images.githubusercontent.com/10358317/153636591-40f14380-02f2-4bc4-98f9-5f9c6eb7b9a6.png) 104 | 105 | - Create pods: 106 | 107 | ![image](https://user-images.githubusercontent.com/10358317/153636772-246179b9-01b9-4032-8b3c-bd16331f537f.png) 108 | 109 | - Describe secret to see details: 110 | 111 | ![image](https://user-images.githubusercontent.com/10358317/153638070-edba4d19-8ece-4f93-9579-fa9546c4a15d.png) 112 | 113 | - Run bash in the secretvolumepod (1st pod): 114 | 115 | ![image](https://user-images.githubusercontent.com/10358317/153637318-e42326e9-4dc3-490d-a787-b0f1251a1808.png) 116 | 117 | - Run "printenv" command in the secretenvpod (2nd pod): 118 | 119 | ![image](https://user-images.githubusercontent.com/10358317/153637549-9a1ceb13-d2dd-49ce-931b-ccfefbb75595.png) 120 | 121 | - Run "printenv" command in the secretenvallpod (3rd pod): 122 | 123 | ![image](https://user-images.githubusercontent.com/10358317/153637762-d6dff332-3d80-4558-80b5-2ae86f4d0c92.png) 124 | 125 | - Create new secret with imperative way: 126 | 127 | ``` 128 | kubectl create secret generic mysecret2 --from-literal=db_server=db.example.com --from-literal=db_username=admin --from-literal=db_password=P@ssw0rd! 129 | ``` 130 | 131 | ![image](https://user-images.githubusercontent.com/10358317/153638556-50874231-7be3-4801-90d0-ae84f66c28e9.png) 132 | 133 | - Create new secret using files (avoid to see in the history command list). 134 | - Create file on the same directory before to run command (e.g. "touch server.txt"): 135 | - server.txt => put into "db.example.com" with "cat" command 136 | - password.txt => put into "password" with "cat" command 137 | - username.txt => put into "admin" with "cat" command 138 | - Files: 139 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/server.txt 140 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/password.txt 141 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/username.txt 142 | 143 | ``` 144 | kubectl create secret generic mysecret3 --from-file=db_server=server.txt --from-file=db_username=username.txt --from-file=db_password=password.txt 145 | ``` 146 | 147 | ![image](https://user-images.githubusercontent.com/10358317/153639595-4f8e5c95-151c-4990-93ac-6e8b98776fbd.png) 148 | 149 | - Create json file (config.json) and put following content. 150 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/config.json 151 | 152 | ``` 153 | { 154 | "apiKey": "7ac4108d4b2212f2c30c71dfa279e1f77dd12356", 155 | } 156 | ``` 157 | 158 | ``` 159 | kubectl create secret generic mysecret4 --from-file=config.json 160 | ``` 161 | 162 | ![image](https://user-images.githubusercontent.com/10358317/153640684-cb16dac0-cddd-40b0-a90f-9f42b28e3373.png) 163 | 164 | - Delete mysecret4: 165 | 166 | ![image](https://user-images.githubusercontent.com/10358317/153640797-617ddd36-cbb6-4a73-8955-f4482e521dde.png) 167 | -------------------------------------------------------------------------------- /K8s-Service-App.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Service Implementations (ClusterIp, NodePort and LoadBalancer) 2 | 3 | This scenario shows how to create Services (ClusterIp, NodePort and LoadBalancer). It goes following: 4 | - Create Deployments for frontend and backend. 5 | - Create ClusterIP Service to reach backend pods. 6 | - Create NodePort Service to reach frontend pods from Internet. 7 | - Create Loadbalancer Service on the cloud K8s cluster to reach frontend pods from Internet. 8 | 9 | 10 | ![image](https://user-images.githubusercontent.com/10358317/149774101-d4cfa70a-f461-4d9d-b2c4-f29de65e0e8b.png) (Ref: Udemy Course: Kubernetes-Temelleri) 11 | 12 | ### Steps 13 | 14 | - Create 3 x front-end and 3 x back-end Pods with following YAML file run ("kubectl apply -f deploy.yaml"). 15 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/service/deploy.yaml 16 | 17 | ``` 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: frontend 22 | labels: 23 | team: development 24 | spec: 25 | replicas: 3 26 | selector: 27 | matchLabels: 28 | app: frontend 29 | template: 30 | metadata: 31 | labels: 32 | app: frontend 33 | spec: 34 | containers: 35 | - name: frontend 36 | image: nginx:latest 37 | ports: 38 | - containerPort: 80 39 | --- 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | metadata: 43 | name: backend 44 | labels: 45 | team: development 46 | spec: 47 | replicas: 3 48 | selector: 49 | matchLabels: 50 | app: backend 51 | template: 52 | metadata: 53 | labels: 54 | app: backend 55 | spec: 56 | containers: 57 | - name: backend 58 | image: ozgurozturknet/k8s:backend 59 | ports: 60 | - containerPort: 5000 61 | ``` 62 | 63 | ![image](https://user-images.githubusercontent.com/10358317/154670356-f3bcda44-60d3-4d85-a620-920345c5e026.png) 64 | 65 | - Run on the terminal: "kubectl get pods -w" (on Linux/WSL2: "watch kubectl get pods") 66 | 67 | 68 | ![image](https://user-images.githubusercontent.com/10358317/149765878-94ec4173-a6ab-4953-9fb2-c1ffff61e4b2.png) 69 | 70 | - Create ClusterIP service that connects to backend (selector: app: backend) (run: "kubectl apply -f backend_clusterip.yaml"). 71 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/service/backend_clusterip.yaml 72 | 73 | ``` 74 | apiVersion: v1 75 | kind: Service 76 | metadata: 77 | name: backend 78 | spec: 79 | type: ClusterIP 80 | selector: 81 | app: backend 82 | ports: 83 | - protocol: TCP 84 | port: 5000 85 | targetPort: 5000 86 | ``` 87 | 88 | ![image](https://user-images.githubusercontent.com/10358317/154670246-fe3466b9-e0d2-42f2-a6e2-37be9e0410bb.png) 89 | 90 | 91 | - ClusterIP Service created. If any resource in the cluster sends a request to the ClusterIP and Port 5000, this request will reach to one of the pod behind the ClusterIP Service. 92 | - We can show it from frontend pods. 93 | - Connect one of the front-end pods (list: "kubectl get pods", connect: "kubectl exec -it frontend-5966c698b4-b664t -- bash") 94 | - In the K8s, there is DNS server (core dns based) that provide us to query ip/name of service. 95 | - When running nslookup (backend), we can reach the complete name and IP of this service (serviceName.namespace.svc.cluster_domain, e.g. backend.default.svc.cluster.local). 96 | - When running curl to the one of the backend pods with port 5000, service provides us to make connection with one of the backend pods. 97 | 98 | ![image](https://user-images.githubusercontent.com/10358317/149767889-29c64bd6-54bf-42bf-b12b-ed83ffedb0a8.png) 99 | 100 | - Create NodePort Service to reach frontend pods from the outside of the cluster (run: "kubectl apply -f backend_nodeport.yaml"). 101 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/service/backend_nodeport.yaml 102 | 103 | ``` 104 | apiVersion: v1 105 | kind: Service 106 | metadata: 107 | name: frontend 108 | spec: 109 | type: NodePort 110 | selector: 111 | app: frontend 112 | ports: 113 | - protocol: TCP 114 | port: 80 115 | targetPort: 80 116 | ``` 117 | 118 | ![image](https://user-images.githubusercontent.com/10358317/154983087-ed031df1-ed5f-4910-b8bd-3bf7197954b2.png) 119 | 120 | - With NodePort Service (you can see the image below), frontend pods can be reachable from the opening port (32098). In other words, someone can reach frontend pods via WorkerNodeIP:32098. NodePort service listens all of the worker nodes' port (in this example: port 32098). 121 | - While working with minikube, it is only possible with minikube tunnelling. Minikube simulates the reaching of the NodeIP:Port with tunneling feature. 122 | 123 | ![image](https://user-images.githubusercontent.com/10358317/149769823-a9e00708-c614-41dc-bb73-321483ccf0f3.png) 124 | 125 | - On the other terminal, if we run the curl command, we can reach the frontend pods. 126 | 127 | ![image](https://user-images.githubusercontent.com/10358317/149770958-87b0c840-92b3-4f9d-81cc-84e725381bf3.png) 128 | 129 | - LoadBalancer Service is only available wih cloud services (because in the local cluster, it can not possible to get external-ip of the load-balancer service). So if you have connection to the one of the cloud service (Azure-AKS, AWS EKS, GCP GKE), please create loadbalance service on it (run: "kubectl apply -f backend_loadbalancer.yaml"). 130 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/service/backend_loadbalancer.yaml 131 | 132 | ``` 133 | apiVersion: v1 134 | kind: Service 135 | metadata: 136 | name: frontendlb 137 | spec: 138 | type: LoadBalancer 139 | selector: 140 | app: frontend 141 | ports: 142 | - protocol: TCP 143 | port: 80 144 | targetPort: 80 145 | ``` 146 | 147 | ![image](https://user-images.githubusercontent.com/10358317/154983532-a14b0046-e3a0-48a2-9784-965b80de4f72.png) 148 | 149 | - If you run on the cloud, you'll see the external-ip of the loadbalancer service. 150 | 151 | ![image](https://user-images.githubusercontent.com/10358317/149772479-a6262368-ab70-4c79-9897-a8162d5dc767.png) 152 | 153 | ![image](https://user-images.githubusercontent.com/10358317/149772584-705ab659-4e5e-496e-999c-cabaf3c5a9d2.png) 154 | 155 | - In addition, it can be possible service with Imperative way (with command). 156 | - kubectl expose deployment --type= --name= 157 | 158 | ![image](https://user-images.githubusercontent.com/10358317/149773190-44d11369-ee98-400b-b84a-57527fc1fba7.png) 159 | 160 | ## References 161 | - [udemy-course:Kubernetes-Temelleri](https://www.udemy.com/course/kubernetes-temelleri/) 162 | -------------------------------------------------------------------------------- /K8s-Kubeadm-Cluster-Docker.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Cluster Setup with Kubeadm and Docker 2 | 3 | - This scenario shows how to create K8s cluster on virtual PC (multipass, kubeadm, docker) 4 | 5 | ### Table of Contents 6 | - [Creating Cluster With Kubeadm, Docker](#creating) 7 | - [IP address changes in Kubernetes Master Node](#master_ip_changed) 8 | 9 | 10 | ## 1. Creating Cluster With Kubeadm, Docker 11 | 12 | #### 1.1 Multipass Installation - Creating VM 13 | 14 | - "Multipass is a mini-cloud on your workstation using native hypervisors of all the supported plaforms (Windows, macOS and Linux)" 15 | - Multipass is lightweight, fast, easy to use Ubuntu VM (on demand for any workstation) 16 | - Fast to install and to use. 17 | - **Link:** https://multipass.run/ 18 | 19 | ``` 20 | # creating VM 21 | multipass launch --name k8s-controller --cpus 2 --mem 2048M --disk 10G 22 | multipass launch --name k8s-node1 --cpus 2 --mem 1024M --disk 7G 23 | multipass launch --name k8s-node2 --cpus 2 --mem 1024M --disk 7G 24 | ``` 25 | 26 | ![image](https://user-images.githubusercontent.com/10358317/157879969-a049706d-e8b8-4096-97bb-dca4e9a9b87e.png) 27 | 28 | ``` 29 | # get shells on different terminals 30 | multipass shell k8s-controller 31 | multipass shell k8s-node1 32 | multipass shell k8s-node2 33 | multipass list 34 | ``` 35 | 36 | ![image](https://user-images.githubusercontent.com/10358317/157880347-dead1390-692c-4725-8e37-89121a346d7e.png) 37 | 38 | #### 1.2 Install Docker 39 | 40 | - Run for all 3 nodes on different terminals: 41 | 42 | ``` 43 | sudo apt-get update 44 | sudo apt-get install docker.io -y # install Docker 45 | sudo systemctl start docker # start and enable the Docker service 46 | sudo systemctl enable docker 47 | sudo usermod -aG docker $USER # add the current user to the docker group 48 | newgrp docker # make the system aware of the new group addition 49 | ``` 50 | 51 | #### 1.3 Install Kubeadm 52 | 53 | - Run for all 3 nodes on different terminals: 54 | 55 | ``` 56 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add # add the repository key and the repository 57 | sudo apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main" 58 | sudo apt-get install kubeadm kubelet kubectl -y # install all of the necessary Kubernetes tools 59 | ``` 60 | 61 | - Run on new terminal: 62 | 63 | ``` 64 | multipass list 65 | ``` 66 | 67 | ![image](https://user-images.githubusercontent.com/10358317/157883859-55497a48-3774-4f6c-bf8c-29cc8d591a82.png) 68 | 69 | - Run on controller, add IPs of PCs: 70 | 71 | ``` 72 | sudo nano /etc/hosts 73 | ``` 74 | 75 | ![image](https://user-images.githubusercontent.com/10358317/157883663-af21c3fb-bc19-4b37-9da1-112b1c974c84.png) 76 | 77 | - Run for all 3 nodes on different terminals: 78 | 79 | ``` 80 | sudo swapoff -a # turn off swap 81 | ``` 82 | 83 | - Create this file "daemon.json" in the directory "/etc/docker", docker change cgroup driver to systemd, run on 3 different machines: 84 | 85 | ``` 86 | cd /etc/docker 87 | sudo touch daemon.json 88 | sudo nano daemon.json 89 | # copy and paste it on daemon.json 90 | { 91 | "exec-opts": ["native.cgroupdriver=systemd"] 92 | } 93 | sudo systemctl restart docker 94 | ``` 95 | 96 | - Run on the controller: 97 | 98 | ``` 99 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16 100 | mkdir -p $HOME/.kube 101 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 102 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 103 | sudo kubectl get nodes 104 | ``` 105 | 106 | ![image](https://user-images.githubusercontent.com/10358317/157886788-4a136836-924b-4938-bfdc-0a07e9c16163.png) 107 | 108 | ![image](https://user-images.githubusercontent.com/10358317/157887715-27661178-2a0b-4314-ae84-30598cfd5e68.png) 109 | 110 | - Run on the nodes (node1, node2): 111 | 112 | ``` 113 | sudo kubeadm join 172.29.108.209:6443 --token ug13ec.cvi0jwi9xyf82b6f \ 114 | --discovery-token-ca-cert-hash sha256:12d59142ccd0148d3f12a673b5c47a2f549cce6b7647963882acd90f9b0fbd28 115 | ``` 116 | 117 | - Run "kubectl get nodes" on the controller, after deploying pod network, nodes will be ready. 118 | 119 | ![image](https://user-images.githubusercontent.com/10358317/157888135-5ad0e931-8a2d-4389-83c2-ec1d8d909c25.png) 120 | 121 | - Run on Controller to deploy a pod network: 122 | - Flannel: 123 | ``` 124 | sudo kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 125 | ``` 126 | - Calico: 127 | ``` 128 | kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml 129 | kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml 130 | ``` 131 | 132 | ![image](https://user-images.githubusercontent.com/10358317/157889081-d9ee73ed-ebb3-4386-bbef-03113b199ef3.png) 133 | 134 | - After testing more (restarting master, etc.), Containerd is more flexible and usable than Dockerd run time => [KubeAdm-Containerd Setup](https://github.com/omerbsezer/Fast-Kubernetes/blob/main/K8s-Kubeadm-Cluster-Setup.md), because every restart, /etc/hosts should be updated. However, updating of /etc/hosts is not required in the containerd. 135 | 136 | ## 2. IP address changes in Kubernetes Master Node 137 | - After restarting Master Node, it could be possible that the IP of master node is updated. Your K8s cluster API's IP is still old IP of the node. So you should configure the K8s cluster with new IP. 138 | 139 | - If you installed the docker for the docker registry, you can remove the exited containers: 140 | 141 | ``` 142 | sudo docker rm $(sudo docker ps -a -f status=exited -q) 143 | ``` 144 | 145 | #### On Master Node: 146 | 147 | - Run on controller, add IPs of PCs, after restarting IPs should be again updated: 148 | 149 | ``` 150 | sudo nano /etc/hosts 151 | ``` 152 | 153 | - Reset kubeadm and init new cluster: 154 | 155 | ``` 156 | sudo kubeadm reset 157 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16 158 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 159 | ``` 160 | 161 | - It shows which command should be used to join cluster: 162 | 163 | ``` 164 | sudo kubeadm join 172.31.40.125:6443 --token 07vo3z.q2n2qz6bd07ipdnf \ 165 | --discovery-token-ca-cert-hash sha256:46c7dcb092ca091e71ab39bd542e73b90b3f7bdf0c486202b857a678cd9879ba 166 | ``` 167 | 168 | 169 | 170 | - Network Configuratin with new IP: 171 | 172 | ``` 173 | kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml 174 | kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml 175 | ``` 176 | 177 | 178 | 179 | ### Reference 180 | - https://thenewstack.io/deploy-a-kubernetes-desktop-cluster-with-ubuntu-multipass/ 181 | -------------------------------------------------------------------------------- /K8s-PersistantVolume.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Persistent Volumes and Persistent Volume Claims 2 | 3 | This scenario shows how K8s PVC and PV work on minikube 4 | 5 | ### Steps 6 | 7 | - On Minikube, we do not have to reach NFS Server. So we simulate NFS Server with Docker Container. 8 | 9 | ``` 10 | docker volume create nfsvol 11 | docker network create --driver=bridge --subnet=10.255.255.0/24 --ip-range=10.255.255.0/24 --gateway=10.255.255.10 nfsnet 12 | docker run -dit --privileged --restart unless-stopped -e SHARED_DIRECTORY=/data -v nfsvol:/data --network nfsnet -p 2049:2049 --name nfssrv ozgurozturknet/nfs:latest 13 | ``` 14 | 15 | ![image](https://user-images.githubusercontent.com/10358317/152173180-47015aa9-a8b8-4a41-a49e-9154a4eb26e2.png) 16 | 17 | - Now our simulated server enabled. 18 | - Copy and save (below) as file on your PC (pv.yaml). 19 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/persistentvolume/pv.yaml 20 | 21 | ``` 22 | apiVersion: v1 23 | kind: PersistentVolume 24 | metadata: 25 | name: mysqlpv 26 | labels: 27 | app: mysql # labelled PV with "mysql" 28 | spec: 29 | capacity: 30 | storage: 5Gi # 5Gibibyte = power of 2; 5GB= power of 10 31 | accessModes: 32 | - ReadWriteOnce 33 | persistentVolumeReclaimPolicy: Retain 34 | nfs: 35 | path: / # binds the path on the NFS Server 36 | server: 10.255.255.10 # IP of NFS Server 37 | ``` 38 | 39 | ![image](https://user-images.githubusercontent.com/10358317/154735518-3bde3e54-518b-4fba-bdf5-bd57eabd2546.png) 40 | 41 | - Create PV object on our cluster: 42 | 43 | ![image](https://user-images.githubusercontent.com/10358317/152173879-837bb03a-fd9f-44ba-becc-fa3ab7ae748f.png) 44 | 45 | - Copy and save (below) as file on your PC (pvc.yaml). 46 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/persistentvolume/pvc.yaml 47 | 48 | ``` 49 | apiVersion: v1 50 | kind: PersistentVolumeClaim 51 | metadata: 52 | name: mysqlclaim 53 | spec: 54 | accessModes: 55 | - ReadWriteOnce 56 | volumeMode: Filesystem 57 | resources: 58 | requests: 59 | storage: 5Gi 60 | storageClassName: "" 61 | selector: 62 | matchLabels: 63 | app: mysql # chose/select "mysql" PV that is defined above. 64 | ``` 65 | 66 | ![image](https://user-images.githubusercontent.com/10358317/154735540-3026d9de-92bd-4e9d-a00a-3f0cf597db34.png) 67 | 68 | - Create PVC object on our cluster. After creation, PVC's status shows to bind to PV ("Bound"): 69 | 70 | ![image](https://user-images.githubusercontent.com/10358317/152174156-9d20270f-3be7-46b1-ac07-2c32c56036c4.png) 71 | 72 | - Copy and save (below) as file on your PC (deploy.yaml). 73 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/persistentvolume/deploy.yaml 74 | 75 | ``` 76 | apiVersion: v1 # Create Secret object for password 77 | kind: Secret 78 | metadata: 79 | name: mysqlsecret 80 | type: Opaque 81 | stringData: 82 | password: P@ssw0rd! 83 | --- 84 | apiVersion: apps/v1 85 | kind: Deployment # Deployment 86 | metadata: 87 | name: mysqldeployment 88 | labels: 89 | app: mysql 90 | spec: 91 | replicas: 1 92 | selector: 93 | matchLabels: 94 | app: mysql # select deployment container (template > metadata > labels) 95 | strategy: 96 | type: Recreate 97 | template: 98 | metadata: 99 | labels: 100 | app: mysql 101 | spec: 102 | containers: 103 | - name: mysql 104 | image: mysql 105 | ports: 106 | - containerPort: 3306 107 | volumeMounts: # VolumeMounts on path and volume name 108 | - mountPath: "/var/lib/mysql" 109 | name: mysqlvolume # which volume to select (volumes > name) 110 | env: 111 | - name: MYSQL_ROOT_PASSWORD 112 | valueFrom: # get mysql password from secrets 113 | secretKeyRef: 114 | name: mysqlsecret 115 | key: password 116 | volumes: 117 | - name: mysqlvolume # name of Volume 118 | persistentVolumeClaim: 119 | claimName: mysqlclaim # chose/select "mysqlclaim" PVC that is defined above. 120 | ``` 121 | 122 | ![image](https://user-images.githubusercontent.com/10358317/154735894-bb807908-5378-487c-bb67-8e68ab26cc00.png) 123 | 124 | - Run deployment on our cluster: 125 | 126 | ![image](https://user-images.githubusercontent.com/10358317/152175581-ccaafe14-e41d-4a14-8e4f-cde96e9bf31b.png) 127 | 128 | - Watching deployment status: 129 | 130 | ![image](https://user-images.githubusercontent.com/10358317/152175839-0b3c4cbd-210a-46ff-80ac-8dbd723c6a62.png) 131 | 132 | - See the details of pod (mounts and volumes): 133 | 134 | ![image](https://user-images.githubusercontent.com/10358317/152176550-73e8c06c-0f5a-42ed-ab06-171e545ee078.png) 135 | 136 | - Enter into the pod and see the path that the volume is mounted ("kubectl exec -it -- bash"): 137 | 138 | ![image](https://user-images.githubusercontent.com/10358317/152181824-96dfbc72-ee0f-45c0-b896-b6fea7b9f7a5.png) 139 | 140 | - If the new node is added into the cluster and this running pod is stopped running on the main minikube node, the pod will start on the another node. 141 | - With this scenario, we can see the followings: 142 | - Deployment always run pod on the cluster. 143 | - The pod which is created on the new node still connects the persistent volume (there is not any loss for volume) 144 | - How assigning taint on the node (key:=value:NoExecute, if NoExecute is not tolerated by pod, pod is deleted on the node) 145 | 146 | ![image](https://user-images.githubusercontent.com/10358317/152178562-388f60db-977e-4247-8b0f-2ff9e0df602e.png) 147 | 148 | - New pod is created on the new node (2nd node) 149 | 150 | ![image](https://user-images.githubusercontent.com/10358317/152178713-ca502e6c-140e-4471-aa37-dc4a8c5c6785.png) 151 | 152 | - Second pod also is connected to the same volume again. 153 | 154 | ![image](https://user-images.githubusercontent.com/10358317/152179192-d6030535-8a54-451a-b97a-319ba2549870.png) 155 | 156 | - Enter into the 2nd pod and see the path that the volume is mounted ("kubectl exec -it -- bash"). When you see the files at the same path on the 2nd pod, volume files are same: 157 | 158 | ![image](https://user-images.githubusercontent.com/10358317/152182472-e67f7162-a4cf-4034-aa98-375860fbd38d.png) 159 | 160 | - Delete minikube, docker container, volume, network: 161 | 162 | ![image](https://user-images.githubusercontent.com/10358317/152180006-911adcbc-0d5a-4d6d-9364-eb7fe1bca0d2.png) 163 | 164 | ### References 165 | - https://github.com/aytitech/k8sfundamentals/tree/main/pvpvc 166 | -------------------------------------------------------------------------------- /Helm.md: -------------------------------------------------------------------------------- 1 | ## Helm 2 | 3 | ### Helm Install 4 | - Installed on Ubuntu 20.04 (for other platforms: https://helm.sh/docs/intro/install/) 5 | 6 | ``` 7 | curl https://baltocdn.com/helm/signing.asc | sudo apt-key add - 8 | sudo apt-get install apt-transport-https --yes 9 | echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 10 | sudo apt-get update 11 | sudo apt-get install helm 12 | ``` 13 | - Check version (helm version): 14 | 15 | ![image](https://user-images.githubusercontent.com/10358317/153708424-d875f4bc-1af5-4169-85af-c87044e64f17.png) 16 | 17 | - **ArtifactHUB:** https://artifacthub.io/ 18 | - ArtifactHub is like DockerHub, but it includes Helm Charts. (e.g. search wordpress on artifactHub on browser) 19 | 20 | ![image](https://user-images.githubusercontent.com/10358317/153708626-6715df00-81c0-4314-b2fa-6c6b563a1af1.png) 21 | 22 | - With Helm Search on Hub: 23 | ``` 24 | helm search hub wordpress # searches package on the Hub 25 | helm search repo wordpress # searches package on the local machine repository list 26 | helm search repo bitnami # searches bitnami in the repo list 27 | 28 | ``` 29 | ![image](https://user-images.githubusercontent.com/10358317/153708687-c2542aa5-e763-4967-b8a9-0f4b82ab7af0.png) 30 | 31 | 32 | 33 | 34 | - **Repo:** the list on the local machine, repo item includes the package's download page (e.g. https://charts.bitnami.com/bitnami) 35 | 36 | ``` 37 | helm repo add bitnami https://charts.bitnami.com/bitnami # adds link into my repo list 38 | helm search repo wordpress # searches package on the local machine repository list 39 | helm repo list # list all repo 40 | helm pull [chart] 41 | helm pull jenkins/jenkins 42 | helm pull bitnami/jenkins # pull and download chart to the current directory 43 | tar zxvf jenkins-3.11.4.tgz # extract downloaded chart 44 | ``` 45 | 46 | ![image](https://user-images.githubusercontent.com/10358317/153730338-0f00f81b-b2e8-4fd9-be3c-3a8acd9e2d2a.png) 47 | 48 | ![image](https://user-images.githubusercontent.com/10358317/153730367-6ef92437-49bd-47df-8ca2-009301872614.png) 49 | 50 | - Downloaded chart file structure and files: 51 | - **values.yaml**: includes values, variables, configs, replicaCount, imageName, etc. These values are injected into the template yaml files (e.g. replicas: {{ .Values.replicaCount }} in the deployment yaml file) 52 | - **charts.yaml**: includes chart information (annotations, maintainers, appVersion, apiVersion, description, sources, etc.) 53 | - **template**: directory that includes all K8s yaml template files (deployment,secret,configmap, etc.) 54 | - **values-summary**: includes the configurable parameters about application, K8s (parameter, description and value) 55 | 56 | ``` 57 | tree jenkins 58 | ``` 59 | 60 | ![image](https://user-images.githubusercontent.com/10358317/153730633-6e4b4d24-e4c0-4b4b-bab8-a8f06eb2c074.png) 61 | 62 | 63 | - Install chart on K8s with application/release name 64 | 65 | ``` 66 | helm install helm-release-wordpress bitnami/wordpress # install bitnami/wordpress chart with helm-release-wordpress name on default namespace 67 | helm install release bitnami/wordpress --namespace production # install release on production namespace 68 | helm install my-release \ # possible to set username/password while creating pods 69 | --set wordpressUsername=admin \ 70 | --set wordpressPassword=password \ 71 | --set mariadb.auth.rootPassword=secretpassword \ 72 | bitnami/wordpress 73 | helm install wordpress-release bitnami/wordpress -f ./values.yaml # values.yaml includes import values (e.g. username,pass,..), if it is updated and using this file, it is possible to install with these values. 74 | echo '{mariadb.auth.database: user0db, mariadb.auth.username: user0}' > values.yaml 75 | helm install -f values.yaml bitnami/wordpress --generate-name # with using "-f values.yaml", updated values are used 76 | helm install j1 jenkins # jenkins is downloaded and extracted directory. After values.yaml updated, also possible to install with this updated app config 77 | ``` 78 | 79 | ![image](https://user-images.githubusercontent.com/10358317/153709179-d36c5c8a-39d9-4ba4-ab30-243706caa6ae.png) 80 | 81 | - To see the status of the release: 82 | 83 | ``` 84 | helm status helm-release-wordpress 85 | ``` 86 | ![image](https://user-images.githubusercontent.com/10358317/153711226-1d058594-9ba9-402d-a422-4f2c95e19070.png) 87 | 88 | - We can change/show the values that are the variables (e.g.username,password): 89 | ``` 90 | helm show values bitnami/wordpress 91 | ``` 92 | ![image](https://user-images.githubusercontent.com/10358317/153711295-2a25ea75-6ce1-434f-9138-54b262c100f1.png) 93 | 94 | 95 | - You can see the all K8s objects that are automatically created by Helm 96 | 97 | ``` 98 | kubectl get pods 99 | kubectl get svc 100 | kubectl get deployment 101 | kubectl get pv 102 | kubectl get pvc 103 | kubectl get configmap 104 | kubectl get secrets 105 | kubectl get pods --all-namespace 106 | helm list 107 | ``` 108 | ![image](https://user-images.githubusercontent.com/10358317/153709719-c26478a4-cad5-4d9b-80ab-9302c89629e2.png) 109 | 110 | - Get password of wordpress: 111 | 112 | ![image](https://user-images.githubusercontent.com/10358317/153709965-d702a32a-0041-4c5d-b0de-12b229476dfe.png) 113 | 114 | - Open tunnel from minikube: 115 | 116 | ``` 117 | minikube service helm-release-wordpress --url 118 | ``` 119 | 120 | ![image](https://user-images.githubusercontent.com/10358317/153709988-8252a1f1-dd56-46a3-a2d5-8ea8e7423a61.png) 121 | 122 | ![image](https://user-images.githubusercontent.com/10358317/153710041-47838752-ff54-4321-9fc1-e4d37211840d.png) 123 | 124 | - Using username and pass (http://127.0.0.1:46007/admin): 125 | 126 | ![image](https://user-images.githubusercontent.com/10358317/153710100-cc29ac32-4f7d-4c69-a466-31dac86c1f06.png) 127 | ![image](https://user-images.githubusercontent.com/10358317/153710112-697852b5-e3c9-4166-9038-f9494b99488f.png) 128 | 129 | - Uninstall helm release: 130 | 131 | ![image](https://user-images.githubusercontent.com/10358317/153711396-c6b4e973-22a3-4246-99a0-026ff4c7c14c.png) 132 | 133 | - Upgrade, rollback, history: 134 | ``` 135 | helm install j1 jenkins # create j1 release with jenkins chart 136 | helm upgrade -f [filename.yaml] [RELEASE] [CHART] 137 | helm upgrade -f values.yaml j1 jenkins/jenkins 138 | helm rollback [RELEASE] [REVISION] 139 | helm rollback j1 1 140 | helm history [RELEASE] 141 | helm rollback j1 142 | ``` 143 | ![image](https://user-images.githubusercontent.com/10358317/153731806-95b20cd9-f3fd-4ea8-9fed-d8b37993d3d6.png) 144 | 145 | - To learn more Helm commands: 146 | 147 | **Goto:** [Helm Commands Cheatsheet](https://github.com/omerbsezer/Fast-Kubernetes/blob/main/HelmCheatsheet.md) 148 | 149 | -------------------------------------------------------------------------------- /K8s-Ingress.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Ingress 2 | 3 | This scenario shows how K8s ingress works on minikube. When browsing urls, ingress controller (nginx) directs traffic to the related services. 4 | 5 | ![image](https://user-images.githubusercontent.com/10358317/152985194-76a3cb57-70c4-438a-a714-eae7ef287d83.png) (ref: Kubernetes.io) 6 | 7 | 8 | ### Steps 9 | 10 | - Run minikube on Windows Hyperv or Virtualbox. In this scenario: 11 | 12 | ``` 13 | minikube start --driver=hyperv 14 | or 15 | minikube start --driver=hyperv --force-systemd 16 | ``` 17 | 18 | - To install ingress controller on K8s cluster, please visit to learn: https://kubernetes.github.io/ingress-nginx/deploy/ 19 | 20 | - On Minikube, it is only needed to enable ingress controller. 21 | 22 | ``` 23 | minikube addons enable ingress 24 | minikube addons list 25 | ``` 26 | 27 | ![image](https://user-images.githubusercontent.com/10358317/152980050-9f59638e-22d2-4581-a045-0c4199cb0be1.png) 28 | 29 | - Copy and save (below) as file on your PC (appingress.yaml). 30 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/ingress/appingress.yaml 31 | 32 | ``` 33 | apiVersion: networking.k8s.io/v1 34 | kind: Ingress 35 | metadata: 36 | name: appingress 37 | annotations: 38 | nginx.ingress.kubernetes.io/rewrite-target: /$1 39 | spec: 40 | rules: 41 | - host: webapp.com 42 | http: 43 | paths: 44 | - path: /blue 45 | pathType: Prefix 46 | backend: 47 | service: 48 | name: bluesvc 49 | port: 50 | number: 80 51 | - path: /green 52 | pathType: Prefix 53 | backend: 54 | service: 55 | name: greensvc 56 | port: 57 | number: 80 58 | ``` 59 | 60 | ![image](https://user-images.githubusercontent.com/10358317/154954648-e730fbcd-4eb0-4a4c-a189-f1e9e118cdd0.png) 61 | 62 | - Copy and save (below) as file on your PC (todoingress.yaml). 63 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/ingress/todoingress.yaml 64 | 65 | ``` 66 | apiVersion: networking.k8s.io/v1 67 | kind: Ingress 68 | metadata: 69 | name: todoingress 70 | spec: 71 | rules: 72 | - host: todoapp.com 73 | http: 74 | paths: 75 | - path: / 76 | pathType: Prefix 77 | backend: 78 | service: 79 | name: todosvc 80 | port: 81 | number: 80 82 | ``` 83 | 84 | ![image](https://user-images.githubusercontent.com/10358317/154954757-4e873d67-855b-4123-85ce-48b6acfc839e.png) 85 | 86 | - Copy and save (below) as file on your PC (deploy.yaml). 87 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/ingress/deploy.yaml 88 | 89 | ``` 90 | apiVersion: apps/v1 91 | kind: Deployment 92 | metadata: 93 | name: blueapp 94 | labels: 95 | app: blue 96 | spec: 97 | replicas: 2 98 | selector: 99 | matchLabels: 100 | app: blue 101 | template: 102 | metadata: 103 | labels: 104 | app: blue 105 | spec: 106 | containers: 107 | - name: blueapp 108 | image: ozgurozturknet/k8s:blue 109 | ports: 110 | - containerPort: 80 111 | livenessProbe: 112 | httpGet: 113 | path: /healthcheck 114 | port: 80 115 | initialDelaySeconds: 5 116 | periodSeconds: 5 117 | readinessProbe: 118 | httpGet: 119 | path: /ready 120 | port: 80 121 | initialDelaySeconds: 5 122 | periodSeconds: 3 123 | --- 124 | apiVersion: v1 125 | kind: Service 126 | metadata: 127 | name: bluesvc 128 | spec: 129 | selector: 130 | app: blue 131 | ports: 132 | - protocol: TCP 133 | port: 80 134 | targetPort: 80 135 | --- 136 | apiVersion: apps/v1 137 | kind: Deployment 138 | metadata: 139 | name: greenapp 140 | labels: 141 | app: green 142 | spec: 143 | replicas: 2 144 | selector: 145 | matchLabels: 146 | app: green 147 | template: 148 | metadata: 149 | labels: 150 | app: green 151 | spec: 152 | containers: 153 | - name: greenapp 154 | image: ozgurozturknet/k8s:green 155 | ports: 156 | - containerPort: 80 157 | livenessProbe: 158 | httpGet: 159 | path: /healthcheck 160 | port: 80 161 | initialDelaySeconds: 5 162 | periodSeconds: 5 163 | readinessProbe: 164 | httpGet: 165 | path: /ready 166 | port: 80 167 | initialDelaySeconds: 5 168 | periodSeconds: 3 169 | --- 170 | apiVersion: v1 171 | kind: Service 172 | metadata: 173 | name: greensvc 174 | spec: 175 | selector: 176 | app: green 177 | ports: 178 | - protocol: TCP 179 | port: 80 180 | targetPort: 80 181 | --- 182 | apiVersion: apps/v1 183 | kind: Deployment 184 | metadata: 185 | name: todoapp 186 | labels: 187 | app: todo 188 | spec: 189 | replicas: 1 190 | selector: 191 | matchLabels: 192 | app: todo 193 | template: 194 | metadata: 195 | labels: 196 | app: todo 197 | spec: 198 | containers: 199 | - name: todoapp 200 | image: ozgurozturknet/samplewebapp:latest 201 | ports: 202 | - containerPort: 80 203 | --- 204 | apiVersion: v1 205 | kind: Service 206 | metadata: 207 | name: todosvc 208 | spec: 209 | selector: 210 | app: todo 211 | ports: 212 | - protocol: TCP 213 | port: 80 214 | targetPort: 80 215 | ``` 216 | 217 | ![image](https://user-images.githubusercontent.com/10358317/154954983-850acd87-b475-48d4-8d37-d1fa081b8159.png) 218 | 219 | ![image](https://user-images.githubusercontent.com/10358317/154955115-0e23d6b7-4aa9-4409-8ec7-b658edfda34c.png) 220 | 221 | ![image](https://user-images.githubusercontent.com/10358317/154955180-ec54ee41-6b40-4d5d-a4e1-3c6ce885a57b.png) 222 | 223 | - Run "deploy.yaml" and "appingress.yaml" to create deployments and services 224 | 225 | ![image](https://user-images.githubusercontent.com/10358317/152984112-aa3b03db-9e8f-4fb2-acf0-4b1150982f29.png) 226 | 227 | - Add url-ip on Windows/System32/Drivers/etc/hosts file: 228 | 229 | ![image](https://user-images.githubusercontent.com/10358317/152983054-66993f34-0d4b-4381-8ae6-ec8441cb6366.png) 230 | 231 | - When running on browser the url "webapp.com/blue", one of the blue app containers return response. 232 | 233 | ![image](https://user-images.githubusercontent.com/10358317/152982739-c86fac86-c0d6-465b-bc4e-391d4e56eb9f.png) 234 | 235 | - When running on browser the url "webapp.com/green", one of the green app containers return response. 236 | 237 | ![image](https://user-images.githubusercontent.com/10358317/152983147-057503d0-d2f1-45a2-bc35-0117676a2abb.png) 238 | 239 | - When running on browser, "todoapp.com": 240 | 241 | ![image](https://user-images.githubusercontent.com/10358317/152983854-c35588c1-170a-4d02-9573-0e712876bad2.png) 242 | 243 | - Hence, we can open services running on the cluster with one IP to the out of the cluster. 244 | 245 | - Delete all yaml file and minikube. 246 | 247 | ![image](https://user-images.githubusercontent.com/10358317/152985795-d69c713e-b6ae-417e-bf88-0f397ebdaaee.png) 248 | 249 | 250 | ### References 251 | 252 | https://github.com/aytitech/k8sfundamentals/tree/main/ingress 253 | -------------------------------------------------------------------------------- /K8s-Rollout-Rollback.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Rollout - Rollback 2 | 3 | This scenario shows: 4 | - how to roll out deployments with 2 different strategy: recreate and rollingUpdate, 5 | - how to save/record deployments' revision while rolling out with "--record" (e.g. changing image): 6 | - imperative: "kubectl set image deployment rcdeployment nginx=httpd --record", 7 | - declerative, edit file: "kubectl edit deployment rolldeployment --record", 8 | - how to rollback (rollout undo) the desired deployment revisions: 9 | - "kubectl rollout undo deployment rolldeployment --to-revision=2", 10 | - how to pause/resume rollout: 11 | - pause: "kubectl rollout pause deployment rolldeployment", 12 | - resume: "kubectl rollout resume deployment rolldeployment", 13 | - how to see the status of rollout deployment: 14 | - "kubectl rollout status deployment rolldeployment -w". 15 | 16 | ### Steps 17 | 18 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start") 19 | 20 | ![image](https://user-images.githubusercontent.com/10358317/153183333-371fe598-d5a4-4b86-9b5d-9e33f35063cc.png) 21 | 22 | - Create Yaml file (recreate-deployment.yaml) in your directory and copy the below definition into the file. 23 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/deployment/recreate-deployment.yaml 24 | 25 | ``` 26 | apiVersion: apps/v1 27 | kind: Deployment 28 | metadata: 29 | name: rcdeployment 30 | labels: 31 | team: development 32 | spec: 33 | replicas: 5 # create 5 replicas 34 | selector: 35 | matchLabels: # labelselector of deployment: selects pods which have "app:recreate" labels 36 | app: recreate 37 | strategy: # deployment roll up strategy: recreate => Delete all pods firstly and create Pods from scratch. 38 | type: Recreate 39 | template: 40 | metadata: 41 | labels: # labels the pod with "app:recreate" 42 | app: recreate 43 | spec: 44 | containers: 45 | - name: nginx 46 | image: nginx 47 | ports: 48 | - containerPort: 80 49 | ``` 50 | 51 | ![image](https://user-images.githubusercontent.com/10358317/154661824-0e6db25e-cf67-4789-97be-acd8d90f7c07.png) 52 | 53 | 54 | - Create Yaml file (rolling-deployment.yaml) in your directory and copy the below definition into the file. 55 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/deployment/rolling-deployment.yaml 56 | 57 | ``` 58 | apiVersion: apps/v1 59 | kind: Deployment 60 | metadata: 61 | name: rolldeployment 62 | labels: 63 | team: development 64 | spec: 65 | replicas: 10 66 | selector: 67 | matchLabels: # labelselector of deployment: selects pods which have "app:rolling" labels 68 | app: rolling 69 | strategy: 70 | type: RollingUpdate # deployment roll up strategy: rollingUpdate => Pods are updated step by step, all pods are not deleted at the same time. 71 | rollingUpdate: 72 | maxUnavailable: 2 # shows the max number of deleted containers => total:10 container; if maxUnava:2, min:8 containers run in that time period 73 | maxSurge: 2 # shows that the max number of containers => total:10 container; if maxSurge:2, max:12 containers run in a time 74 | template: 75 | metadata: 76 | labels: # labels the pod with "app:rolling" 77 | app: rolling 78 | spec: 79 | containers: 80 | - name: nginx 81 | image: nginx 82 | ports: 83 | - containerPort: 80 84 | ``` 85 | 86 | ![image](https://user-images.githubusercontent.com/10358317/154661909-087ac83a-d5ee-4268-805c-c4a7179dfafd.png) 87 | 88 | - Run deployment: 89 | 90 | ![image](https://user-images.githubusercontent.com/10358317/153604472-8af9e7d9-7d22-47e2-b02d-2e6c36c86de5.png) 91 | 92 | - Watching pods' status (on linux: "watch kubectl get pods", on win: "kubectl get pods -w") 93 | 94 | ![image](https://user-images.githubusercontent.com/10358317/153604648-9944dfd4-3148-4e8c-b52b-ef801a695ed2.png) 95 | 96 | - Watching replica set's status (on linux: "watch kubectl get rs", on win: "kubectl get rs -w") 97 | 98 | ![image](https://user-images.githubusercontent.com/10358317/153604880-a0697649-967d-4255-bc4d-e72446568844.png) 99 | 100 | - Update image version ("kubectl set image deployment rcdeployment nginx=httpd"), after new replicaset and pods are created, old ones are deleted. 101 | 102 | ![image](https://user-images.githubusercontent.com/10358317/153605645-3bd72a89-9840-4d6b-9c6c-3b8c251cf2e9.png) 103 | 104 | - With "recreate" strategy, pods are terminated: 105 | 106 | ![image](https://user-images.githubusercontent.com/10358317/153605318-8f71959d-3c44-4c72-bdd5-674aea6d1afc.png) 107 | 108 | - New pods are creating: 109 | 110 | ![image](https://user-images.githubusercontent.com/10358317/153605365-bc6ffcbe-cadc-4760-b85a-a4844fa1ccb4.png) 111 | 112 | - New replicaset created: 113 | 114 | ![image](https://user-images.githubusercontent.com/10358317/153605416-80d63de8-dee6-4131-bb24-a1a8f8e47cda.png) 115 | 116 | - Delete this deployment: 117 | 118 | ![image](https://user-images.githubusercontent.com/10358317/153605871-6ca3810d-ce23-4442-ae2c-44c362ada13d.png) 119 | 120 | - Run deployment (rolling-deployment.yaml): 121 | 122 | ![image](https://user-images.githubusercontent.com/10358317/153610269-96541251-b039-4393-87e3-a1e93e234753.png) 123 | 124 | 125 | - Watching pods' status (on linux: "watch kubectl get pods", on win: "kubectl get pods -w") 126 | 127 | ![image](https://user-images.githubusercontent.com/10358317/153610371-5836cf65-2a60-4e94-b96e-e4b8643412a2.png) 128 | 129 | - Watching replica set's status (on linux: "watch kubectl get rs", on win: "kubectl get rs -w") 130 | 131 | ![image](https://user-images.githubusercontent.com/10358317/153610454-e27200ec-1c52-48aa-89de-c798fa6d8d5f.png) 132 | 133 | - Run: "kubectl edit deployment rolldeployment --record", it opens vim editor on linux to edit 134 | - Find image definition, press "i" for insert mode, change to "httpd" instead of "nginx", press "ESC", press ":wq" to save and exit 135 | 136 | ![image](https://user-images.githubusercontent.com/10358317/153610924-b2fc3730-de65-4138-8ee8-d4675badd651.png) 137 | 138 | - New pods are creating with new version: 139 | 140 | ![image](https://user-images.githubusercontent.com/10358317/153614766-027ee933-0788-4418-8577-70f0860a8841.png) 141 | 142 | - New replicaset created: 143 | 144 | ![image](https://user-images.githubusercontent.com/10358317/153614901-55137709-b79a-4bfd-866b-a259b299cda5.png) 145 | 146 | - Run new deployment version: 147 | 148 | ![image](https://user-images.githubusercontent.com/10358317/153615453-95067330-5056-4103-a396-db2979d0b98a.png) 149 | 150 | - New pods are creating with new version: 151 | 152 | ![image](https://user-images.githubusercontent.com/10358317/153615342-043787b0-bb8a-438b-ba35-65e0a71985ac.png) 153 | 154 | - New replicaset created: 155 | 156 | ![image](https://user-images.githubusercontent.com/10358317/153615533-9af6f608-c94b-4a45-baf9-c68d394a3308.png) 157 | 158 | - To show history of the deployments (**important:** --record should be used to add old deployment versions in the history list): 159 | 160 | ![image](https://user-images.githubusercontent.com/10358317/153615727-30cfa59d-a144-41ed-9685-f4ec8a562ed0.png) 161 | 162 | - To show/describe the selected revision: 163 | 164 | ![image](https://user-images.githubusercontent.com/10358317/153616272-3fd95a8b-3b6c-42a7-add6-ae40550a47e8.png) 165 | 166 | - Rollback to the revision=1 (with undo: "kubectl rollout undo deployment rolldeployment --to-revision=1"): 167 | 168 | ![image](https://user-images.githubusercontent.com/10358317/153616842-e5a544c8-0d1b-4843-a263-d7fb7c51df22.png) 169 | 170 | 171 | - Pod status: 172 | 173 | ![image](https://user-images.githubusercontent.com/10358317/153616616-30b635d2-c95f-47ea-8abd-5fdcd4646719.png) 174 | 175 | - Replicaset revision=1: 176 | 177 | ![image](https://user-images.githubusercontent.com/10358317/153616770-5c72a691-8028-4bc1-9111-b1f63504b7c7.png) 178 | 179 | - It is possible to return from revision=1 to revision=2 (with undo: "kubectl rollout undo deployment rolldeployment --to-revision=2"): 180 | 181 | ![image](https://user-images.githubusercontent.com/10358317/153618994-f5b072c7-c758-46ce-bcb6-1c48e255200e.png) 182 | 183 | 184 | - It is also to pause rollout: 185 | 186 | ![image](https://user-images.githubusercontent.com/10358317/153617586-011a90d9-d4b7-4813-b191-75069ee5ffd0.png) 187 | 188 | - While rollback to the revision=3 from revision=2, it was paused: 189 | 190 | ![image](https://user-images.githubusercontent.com/10358317/153617783-da05f8a8-5b1b-4473-9bd6-47f709ab8349.png) 191 | 192 | - Resume the pause of rollout of deployment: 193 | 194 | ![image](https://user-images.githubusercontent.com/10358317/153617914-3ed84d3f-20a0-4693-bb9e-17e1346f28b5.png) 195 | 196 | - Now deployment's revision is 3: 197 | 198 | ![image](https://user-images.githubusercontent.com/10358317/153618035-5b506540-dc63-45fd-af83-d2bedb5b192e.png) 199 | 200 | - It is also possible to see the logs of rollout with: 201 | - "kubectl rollout status deployment rolldeployment -w" 202 | 203 | - Delete deployment: 204 | 205 | ![image](https://user-images.githubusercontent.com/10358317/153620662-bbd8d7e4-572b-4261-b300-f350ee655711.png) 206 | -------------------------------------------------------------------------------- /KubernetesCommandCheatSheet.md: -------------------------------------------------------------------------------- 1 | ## Kubernetes Commands Cheatsheet 2 | 3 | #### minikube command 4 | ``` 5 | minikube start 6 | minikube status 7 | kubectl get nodes 8 | minikube stop #does not delete, it runs with start 9 | minikube delete # delete all 10 | ``` 11 | #### kubeadm command 12 | - Kubeadm provides K8s cluster on on-premise 13 | - You can test Kubeadm on PlayWithKubernetes 14 | - Creating cluster with 1 master, 2 nodes (add new instance) on PlayWithKubernetes 15 | ``` 16 | on master: kubeadm init --apiserver-advertise-address $(hostname -i) --pod-network-cidr 10.5.0.0/16 17 | on nodes: kubeadm join 192.168.0.13:6443 --token ge5xcq.xh2mcb4rqa8lz0db \ 18 | --discovery-token-ca-cert-hash sha256:a3ba7ced9383a5b5704b6fbf696f243a8322759b68b9d07b747b174fcc838540 19 | on master: mkdir -p $HOME/.kube 20 | on master: cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 21 | on master: chown $(id -u):$(id -g) $HOME/.kube/config 22 | on master: kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml 23 | kubectl get nodes 24 | kubectl run test --image=nginx --restart=Never 25 | ``` 26 | 27 | #### kubectl config: context, user, cluster 28 | ``` 29 | kubectl config 30 | kubectl config get-contexts #get all context 31 | kubectl config current-context #get context 32 | kubectl config use-context docker-desktop #change context 33 | kubectl config use-context docker-desktop 34 | kubectl config use-context aks-k8s-test 35 | kubectl config use-context default # default=minikube 36 | kubectl get nodes 37 | ``` 38 | 39 | #### cluster-info 40 | ``` 41 | kubectl cluster-info 42 | kubectl cp --help 43 | kubectl [verb] [type] [object] 44 | kubectl delete pods test_pod 45 | kubectl [get|delete|edit|apply] [pods, deployment, services, etc.] [podName, serviceName, deploymentName, etc.] 46 | ``` 47 | 48 | #### namespace, -n 49 | ``` 50 | kubectl get pods # default namespace 51 | kubectl get pods -n kube-system #list kube-system namespace pods. 52 | kubectl get pods --all-namespaces 53 | kubectl get pods -A # all-namespace 54 | ``` 55 | 56 | #### more info about pods 57 | ``` 58 | kubectl get pods -A # all-namespace 59 | kubectl get pods -A -o wide # all-namespace with more detailed 60 | kubectl get pods -A -o yaml 61 | kubectl get pods -A -o json 62 | kubectl get pods -A -o go-template 63 | kubectl get pods -A -o json | jq -r ".items[].spec.containers[].name" #jq parser query 64 | ``` 65 | 66 | #### commands help 67 | - 'help' to learn more for commands 68 | ``` 69 | kubectl apply --help #explain command 70 | kubectl delete --help 71 | ``` 72 | 73 | #### object help: with explain 74 | - 'explain' to learn more for objects 75 | ``` 76 | kubectl explain pod 77 | kubectl explain deployment 78 | ``` 79 | 80 | #### pod ~ container 81 | ``` 82 | kubectl run firstpod --image=nginx --restart=Never 83 | kubectl run secondpod --image=nginx --port=80 --labels=app=frontend --restart=Never 84 | ``` 85 | 86 | #### get info about pods 87 | ``` 88 | kubectl get pods -o wide 89 | kubectl describe pods firstpod 90 | ``` 91 | 92 | #### show log 93 | ``` 94 | kubectl logs firstpod 95 | kubectl logs -f firstpod #watch live log with -f 96 | ``` 97 | 98 | #### run command in pod 99 | ``` 100 | kubectl exec firstpod -- hostname #hostname command run in pod 101 | kubectl exec firstpod -- ls / #list command run in pod 102 | ``` 103 | 104 | #### connect container in the pod 105 | ``` 106 | kubectl exec -it firstpod -- /bin/sh # open shell, connect container 107 | kubectl exec -it firstpod -- bash # run bash 108 | ``` 109 | 110 | #### delete pod 111 | ``` 112 | kubectl delete pods firstpod 113 | ``` 114 | 115 | #### learn/explain api of objects 116 | ``` 117 | kubectl explain pods 118 | kubectl explain deployments 119 | kubectl explain serviceaccount 120 | ``` 121 | 122 | #### Declerative way with file, Imperative way with command 123 | - File contents: 124 | - apiVersion: 125 | - kind: (pod, deployment, etc.) 126 | - metadata: (podName, label, etc.) 127 | - specs: (restartPolicy, container name, image, command, ports, etc.) 128 | 129 | #### file apply for declerative 130 | ``` 131 | kubectl apply -f pod1.yaml 132 | ``` 133 | 134 | #### edit 135 | ``` 136 | kubectl edit pods firstpod 137 | ``` 138 | 139 | #### delete 140 | ``` 141 | kubectl delete -f podlabel.yaml #all related objects deleted with declerative way 142 | ``` 143 | 144 | #### watch pods always 145 | ``` 146 | kubectl get pods -w 147 | ``` 148 | 149 | #### run multiple container on 1 pod, -c containerName 150 | ``` 151 | kubectl exec -it multicontainer -c webcontainer -- /bin/sh # -c ile containername, if more than one container 152 | kubectl exec -it multicontainer -c sidecarcontainer -- /bin/sh 153 | kubectl logs -f multicontainer -c sidecarcontainer 154 | ``` 155 | 156 | #### port-forward to pod 157 | ``` 158 | kubectl port-forward pod/multicontainer 80:80 ## host:container port, if command is not run, port is not opened 159 | kubectl port-forward pod/multicontainer 8080:80 # when browsing 127.0.0.1:8080, host:8080 goes to pod:80 and directs traffic. 160 | kubectl port-forward : 161 | kubectl port-forward deployment/mydeployment 5000 6000 # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment 162 | kubectl port-forward service/myservice 5000 6000 # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the service 163 | kubectl port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod 164 | kubectl port-forward pod/mypod :5000 # Listen on a random port locally, forwarding to 5000 in the pod 165 | kubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000 # Listen on port 8888 on all addresses, forwarding to 5000 in the pod 166 | ``` 167 | 168 | #### label ve selector 169 | ``` 170 | kubectl get pods -l "app" --show-labels #with -l, search label 171 | kubectl get pods -l "app=firstapp" --show-labels 172 | kubectl get pods -l "app=firstapp,tier=frontend" --show-labels 173 | kubectl get pods -l "app in (firstapp)" --show-labels 174 | kubectl get pods -l "!app" --show-labels #list not app key 175 | kubectl get pods -l "app notin (firstapp)" --show-labels #inverse 176 | kubectl get pods -l "app in (firstapp,secondapp)" --show-labels #or 177 | kubectl get pods -l "app=firstapp,app=secondapp)" --show-labels #and 178 | ``` 179 | 180 | #### label addition 181 | ``` 182 | command (imperative): kubectl label pods pod9 app=thirdapp 183 | command (imperative): kubectl label pods pod9 app- 184 | kubectl label --overwrite pods pod9 team=team3 #overwrite 185 | kubectl label pods --all foo=bar # all pods, label addition 186 | ``` 187 | 188 | #### label node 189 | - Node could be labelled (e.g. nodes have gpu, ssd, can be labelled) 190 | ``` 191 | kubectl label nodes minikube hddtype=ssd 192 | ``` 193 | 194 | #### annotation 195 | ``` 196 | kubectl annotate pods annotationpod foo=bar ##annotate add 197 | kubectl annotate pods annotationpod foo- ##annotate delete 198 | ``` 199 | 200 | #### namespace: object 201 | ``` 202 | kubectl get namespaces 203 | kubectl get pods #defaulttaki podlar 204 | kubectl get pods --namespace kube-system #only kube-system 205 | kubectl get pods -n kube-system #only kube-system 206 | kubectl get pods --all-namespaces #all namespaces 207 | kubectl get pods -A #all namespaces 208 | kubectl exec -it namespacepod -n development -- /bin/sh #run terminal to add namespace 209 | kubectl config set-context --current --namespace=development 210 | kubectl config set-context --current --namespace=default 211 | kubectl delete namespaces development 212 | ``` 213 | 214 | #### DEPLOYMENT: run more than 1 pod and synch 215 | ``` 216 | kubectl create deployment firstdeployment --image=nginx:latest --replicas=2 217 | kubectl get deployment -w #always watch 218 | kubectl get deployment 219 | kubectl delete pods firstdeployment-pod 220 | kubectl set image deployment/firstdeployment nginx=httpd # update containers on deployment 221 | kubectl scale deployment firstdeployment --replicas=5 # manuel scale, increase/decrease replicas 222 | kubectl delete deployment firstdeployment # delete deployment 223 | ``` 224 | 225 | #### Deployment from file 226 | - There should be at least one entry for spec/selector for each deployment to choose pod 227 | - There should be same entries for template/metadata/labels/app and spec/selector/matchLabels/app for deployment-pod match 228 | ``` 229 | kubectl apply -f deploymenttemplate.yaml 230 | ``` 231 | 232 | #### rollout 233 | ``` 234 | kubectl rollout undo deployment firstdeployment # undo 235 | ``` 236 | 237 | #### record: save, return to desired revision 238 | ``` 239 | kubectl apply -f deployrolling.yaml --record 240 | kubectl edit deployment rolldeployment --record 241 | kubectl set image deployment rolldeployment nginx=httpd:alpine --record=true 242 | kubectl rollout history deployment rolldeployment # show record history 243 | kubectl rollout history deployment rolldeployment --revision=2 # show 2.revision history 244 | kubectl rollout undo deployment rolldeployment --to-revision=1 # roll to the first revision 245 | ``` 246 | 247 | #### live rollout commands logs on different terminal 248 | ``` 249 | kubectl apply -f deployrolling.yaml 250 | on another terminal: kubectl rollout status deployment rolldeployment -w 251 | kubectl rollout pause deployment rolldeployment # pause the current deployment rollout/update 252 | kubectl rollout resume deployment rolldeployment # resume the current deployment rollout/update 253 | ``` 254 | 255 | #### service 256 | - Service, --service-cluster-ip-range "10.100.0.0/16" 257 | - 4 type Service object: 258 | - ClusterIP: direct traffic on the cluster 259 | - NodePort: node can be reachable from outside 260 | - LoadBalancer: load balancing 261 | - ExternalName 262 | ``` 263 | kubectl apply -f serviceClusterIP.yaml 264 | kubectl get service -o wide 265 | ``` 266 | 267 | #### service with command 268 | ``` 269 | kubectl expose deployment backend --type=ClusterIP --name=backend #clusterIP type service creation 270 | kubectl get service 271 | kubectl expose deployment frontend --type=NodePort --name=frontend #ndePort type service creation 272 | kubectl get service 273 | ``` 274 | 275 | #### service-endpoints 276 | ``` 277 | kubectl get endpoints # same endpoints created with services 278 | kubectl describe endpoints frontend # show ip adresses 279 | kubectl delete pods frontend-xx-xx # when pod deleted, ip also deleted 280 | kubectl scale deployment frontend --replicas=5 # new ip added 281 | kubectl scale deployment frontend --replicas=2 282 | ``` 283 | 284 | #### environment variables 285 | ``` 286 | spec: 287 | containers: 288 | - name: envpod 289 | image: ozgurozturknet/env:latest 290 | ports: 291 | - containerPort: 80 292 | env: 293 | - name: USER 294 | value: "Ozgur" 295 | - name: database 296 | value: "testdb.example.com" 297 | ``` 298 | ``` 299 | kubectl apply -f podenv.yaml 300 | kubectl get pods 301 | kubectl exec envpod -- printenv ## env. variable 302 | kubectl exec -it firstpod -- /bin/sh 303 | kubectl port-forward pod/envpod 8080:80 #port forwarding 304 | kubectl delete -f podenv.yaml 305 | ``` 306 | 307 | #### volume 308 | - ephemeral volume (temporary volume): it can be reachable from more than 1 container in the pod. When pod is deleted, volume is also deleted like cache. 309 | - 2 types of ephmeral volume: 310 | - 1.emptydir (create empty directory on the node, this volume is mounted on the container) 311 | - 2.hostpath: worker node (worker PC) with file path, more than one file or directory can be connected 312 | 313 | ##### emptydir volume: 314 | ``` 315 | volumes: 316 | - name: cache-vol 317 | emptyDir: {} 318 | ``` 319 | 320 | ##### container mount: 321 | ``` 322 | - name: sidecar 323 | image: busybox 324 | command: ["/bin/sh"] 325 | args: ["-c", "sleep 3600"] 326 | volumeMounts: 327 | - name: cache-vol 328 | mountPath: /tmp/log 329 | ``` 330 | ``` 331 | kubectl apply -f podvolumeemptydir.yaml 332 | kubectl get pods -w 333 | kubectl exec -it emptydir -c frontend -- bash 334 | kubectl exec emptydir -c frontend -- rm -rf healthcheck #heltcheck siliniyor, container restart ediliyor. 335 | ``` 336 | 337 | ##### hostpath type: 338 | ``` 339 | containers: 340 | volumeMounts: 341 | - name: directory-vol 342 | mountPath: /dir1 (on container /dir1) 343 | - name: dircreate-vol 344 | mountPath: /cache (on container /cache) 345 | - name: file-vol 346 | mountPath: /cache/config.json 347 | 348 | volumes: 349 | - name: directory-vol 350 | hostPath: 351 | path: /tmp (worker node /tmp directory, create Directory type volume) 352 | type: Directory 353 | - name: dircreate-vol 354 | hostPath: 355 | path: /cache (worker node /cache directory, create DirectoryOrCreate type volume) 356 | type: DirectoryOrCreate 357 | - name: file-vol 358 | hostPath: 359 | path: /cache/config.json 360 | type: FileOrCreate 361 | ``` 362 | ``` 363 | kubectl apply -f podvolumehostpath.yaml 364 | kubectl exec -it hostpath -c hostpathcontainer -- bash 365 | ``` 366 | 367 | 368 | #### secret: declerative way 369 | ``` 370 | kubectl apply -f secret.yaml 371 | kubectl get secrets 372 | kubectl describe secret mysecret 373 | ``` 374 | 375 | #### secret: imperative (cmd) 376 | ``` 377 | kubectl create secret generic mysecret2 --from-literal=db_server=db.example.com --from-literal=db_username=admin --from-literal=db_password=P@ssw0rd! 378 | kubectl create secret generic mysecret4 --from-file=config.json #create config.json that inludes pass and username. 379 | ``` 380 | 381 | #### taint and toleration 382 | ``` 383 | kubectl describe nodes minikube 384 | kubectl taint node minikube platform=production:NoSchedule #taint add 385 | kubectl taint node minikube platform- # taint delete 386 | ``` 387 | 388 | #### connect pod with bash and apt install 389 | ``` 390 | kubectl exec -it PodName -- bash 391 | apt update 392 | apt install net-tools 393 | apt install iputils-ping 394 | ifconfig 395 | ping x.x.x.x 396 | ``` 397 | 398 | -------------------------------------------------------------------------------- /create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install-docker-ce.ps1: -------------------------------------------------------------------------------- 1 | # Microsoft: https://github.com/microsoft/Windows-Containers/blob/Main/helpful_tools/Install-DockerCE/install-docker-ce.ps1 2 | ############################################################ 3 | # Script to install the community edition of docker on Windows 4 | ############################################################ 5 | 6 | <# 7 | .NOTES 8 | Copyright (c) Microsoft Corporation. All rights reserved. 9 | 10 | Use of this sample source code is subject to the terms of the Microsoft 11 | license agreement under which you licensed this sample source code. If 12 | you did not accept the terms of the license agreement, you are not 13 | authorized to use this sample source code. For the terms of the license, 14 | please see the license agreement between you and Microsoft or, if applicable, 15 | see the LICENSE.RTF on your install media or the root of your tools installation. 16 | THE SAMPLE SOURCE CODE IS PROVIDED "AS IS", WITH NO WARRANTIES. 17 | 18 | .SYNOPSIS 19 | Installs the prerequisites for creating Windows containers 20 | 21 | .DESCRIPTION 22 | Installs the prerequisites for creating Windows containers 23 | 24 | .PARAMETER DockerPath 25 | Path to Docker.exe, can be local or URI 26 | 27 | .PARAMETER DockerDPath 28 | Path to DockerD.exe, can be local or URI 29 | 30 | .PARAMETER DockerVersion 31 | Version of docker to pull from download.docker.com - ! OVERRIDDEN BY DockerPath & DockerDPath 32 | 33 | .PARAMETER ExternalNetAdapter 34 | Specify a specific network adapter to bind to a DHCP network 35 | 36 | .PARAMETER SkipDefaultHost 37 | Prevents setting localhost as the default network configuration 38 | 39 | .PARAMETER Force 40 | If a restart is required, forces an immediate restart. 41 | 42 | .PARAMETER HyperV 43 | If passed, prepare the machine for Hyper-V containers 44 | 45 | .PARAMETER NATSubnet 46 | Use to override the default Docker NAT Subnet when in NAT mode. 47 | 48 | .PARAMETER NoRestart 49 | If a restart is required the script will terminate and will not reboot the machine 50 | 51 | .PARAMETER ContainerBaseImage 52 | Use this to specify the URI of the container base image you wish to pre-pull 53 | 54 | .PARAMETER Staging 55 | 56 | .PARAMETER TransparentNetwork 57 | If passed, use DHCP configuration. Otherwise, will use default docker network (NAT). (alias -UseDHCP) 58 | 59 | .PARAMETER TarPath 60 | Path to the .tar that is the base image to load into Docker. 61 | 62 | .EXAMPLE 63 | .\install-docker-ce.ps1 64 | 65 | #> 66 | #Requires -Version 5.0 67 | 68 | [CmdletBinding(DefaultParameterSetName="Standard")] 69 | param( 70 | [string] 71 | [ValidateNotNullOrEmpty()] 72 | $DockerPath = "default", 73 | 74 | [string] 75 | [ValidateNotNullOrEmpty()] 76 | $DockerDPath = "default", 77 | 78 | [string] 79 | [ValidateNotNullOrEmpty()] 80 | $DockerVersion = "latest", 81 | 82 | [string] 83 | $ExternalNetAdapter, 84 | 85 | [switch] 86 | $Force, 87 | 88 | [switch] 89 | $HyperV, 90 | 91 | [switch] 92 | $SkipDefaultHost, 93 | 94 | [string] 95 | $NATSubnet, 96 | 97 | [switch] 98 | $NoRestart, 99 | 100 | [string] 101 | $ContainerBaseImage, 102 | 103 | [Parameter(ParameterSetName="Staging", Mandatory)] 104 | [switch] 105 | $Staging, 106 | 107 | [switch] 108 | [alias("UseDHCP")] 109 | $TransparentNetwork, 110 | 111 | [string] 112 | [ValidateNotNullOrEmpty()] 113 | $TarPath 114 | ) 115 | 116 | $global:RebootRequired = $false 117 | $global:ErrorFile = "$pwd\Install-ContainerHost.err" 118 | $global:BootstrapTask = "ContainerBootstrap" 119 | $global:HyperVImage = "NanoServer" 120 | $global:AdminPriviledges = $false 121 | 122 | $global:DefaultDockerLocation = "https://download.docker.com/win/static/stable/x86_64/" 123 | $global:DockerDataPath = "$($env:ProgramData)\docker" 124 | $global:DockerServiceName = "docker" 125 | 126 | function 127 | Restart-And-Run() 128 | { 129 | Test-Admin 130 | 131 | Write-Output "Restart is required; restarting now..." 132 | 133 | $argList = $script:MyInvocation.Line.replace($script:MyInvocation.InvocationName, "") 134 | 135 | # 136 | # Update .\ to the invocation directory for the bootstrap 137 | # 138 | $scriptPath = $script:MyInvocation.MyCommand.Path 139 | 140 | $argList = $argList -replace "\.\\", "$pwd\" 141 | 142 | if ((Split-Path -Parent -Path $scriptPath) -ne $pwd) 143 | { 144 | $sourceScriptPath = $scriptPath 145 | $scriptPath = "$pwd\$($script:MyInvocation.MyCommand.Name)" 146 | 147 | Copy-Item $sourceScriptPath $scriptPath 148 | } 149 | 150 | Write-Output "Creating scheduled task action ($scriptPath $argList)..." 151 | $action = New-ScheduledTaskAction -Execute "powershell.exe" -Argument "-NoExit $scriptPath $argList" 152 | 153 | Write-Output "Creating scheduled task trigger..." 154 | $trigger = New-ScheduledTaskTrigger -AtLogOn 155 | 156 | Write-Output "Registering script to re-run at next user logon..." 157 | Register-ScheduledTask -TaskName $global:BootstrapTask -Action $action -Trigger $trigger -RunLevel Highest | Out-Null 158 | 159 | try 160 | { 161 | if ($Force) 162 | { 163 | Restart-Computer -Force 164 | } 165 | else 166 | { 167 | Restart-Computer 168 | } 169 | } 170 | catch 171 | { 172 | Write-Error $_ 173 | 174 | Write-Output "Please restart your computer manually to continue script execution." 175 | } 176 | 177 | exit 178 | } 179 | 180 | 181 | function 182 | Install-Feature 183 | { 184 | [CmdletBinding()] 185 | param( 186 | [ValidateNotNullOrEmpty()] 187 | [string] 188 | $FeatureName 189 | ) 190 | 191 | Write-Output "Querying status of Windows feature: $FeatureName..." 192 | if (Get-Command Get-WindowsFeature -ErrorAction SilentlyContinue) 193 | { 194 | if ((Get-WindowsFeature $FeatureName).Installed) 195 | { 196 | Write-Output "Feature $FeatureName is already enabled." 197 | } 198 | else 199 | { 200 | Test-Admin 201 | 202 | Write-Output "Enabling feature $FeatureName..." 203 | } 204 | 205 | $featureInstall = Add-WindowsFeature $FeatureName 206 | 207 | if ($featureInstall.RestartNeeded -eq "Yes") 208 | { 209 | $global:RebootRequired = $true; 210 | } 211 | } 212 | else 213 | { 214 | if ((Get-WindowsOptionalFeature -Online -FeatureName $FeatureName).State -eq "Disabled") 215 | { 216 | if (Test-Nano) 217 | { 218 | throw "This NanoServer deployment does not include $FeatureName. Please add the appropriate package" 219 | } 220 | 221 | Test-Admin 222 | 223 | Write-Output "Enabling feature $FeatureName..." 224 | $feature = Enable-WindowsOptionalFeature -Online -FeatureName $FeatureName -All -NoRestart 225 | 226 | if ($feature.RestartNeeded -eq "True") 227 | { 228 | $global:RebootRequired = $true; 229 | } 230 | } 231 | else 232 | { 233 | Write-Output "Feature $FeatureName is already enabled." 234 | 235 | if (Test-Nano) 236 | { 237 | # 238 | # Get-WindowsEdition is not present on Nano. On Nano, we assume reboot is not needed 239 | # 240 | } 241 | elseif ((Get-WindowsEdition -Online).RestartNeeded) 242 | { 243 | $global:RebootRequired = $true; 244 | } 245 | } 246 | } 247 | } 248 | 249 | 250 | function 251 | New-ContainerTransparentNetwork 252 | { 253 | if ($ExternalNetAdapter) 254 | { 255 | $netAdapter = (Get-NetAdapter |? {$_.Name -eq "$ExternalNetAdapter"})[0] 256 | } 257 | else 258 | { 259 | $netAdapter = (Get-NetAdapter |? {($_.Status -eq 'Up') -and ($_.ConnectorPresent)})[0] 260 | } 261 | 262 | Write-Output "Creating container network (Transparent)..." 263 | New-ContainerNetwork -Name "Transparent" -Mode Transparent -NetworkAdapterName $netAdapter.Name | Out-Null 264 | } 265 | 266 | 267 | function 268 | Install-ContainerHost 269 | { 270 | "If this file exists when Install-ContainerHost.ps1 exits, the script failed!" | Out-File -FilePath $global:ErrorFile 271 | 272 | if (Test-Client) 273 | { 274 | if (-not $HyperV) 275 | { 276 | Write-Output "Enabling Hyper-V containers by default for Client SKU" 277 | $HyperV = $true 278 | } 279 | } 280 | # 281 | # Validate required Windows features 282 | # 283 | Install-Feature -FeatureName Containers 284 | 285 | if ($HyperV) 286 | { 287 | Install-Feature -FeatureName Hyper-V 288 | } 289 | 290 | if ($global:RebootRequired) 291 | { 292 | if ($NoRestart) 293 | { 294 | Write-Warning "A reboot is required; stopping script execution" 295 | exit 296 | } 297 | 298 | Restart-And-Run 299 | } 300 | 301 | # 302 | # Unregister the bootstrap task, if it was previously created 303 | # 304 | if ((Get-ScheduledTask -TaskName $global:BootstrapTask -ErrorAction SilentlyContinue) -ne $null) 305 | { 306 | Unregister-ScheduledTask -TaskName $global:BootstrapTask -Confirm:$false 307 | } 308 | 309 | # 310 | # Configure networking 311 | # 312 | if ($($PSCmdlet.ParameterSetName) -ne "Staging") 313 | { 314 | if ($TransparentNetwork) 315 | { 316 | Write-Output "Waiting for Hyper-V Management..." 317 | $networks = $null 318 | 319 | try 320 | { 321 | $networks = Get-ContainerNetwork -ErrorAction SilentlyContinue 322 | } 323 | catch 324 | { 325 | # 326 | # If we can't query network, we are in bootstrap mode. Assume no networks 327 | # 328 | } 329 | 330 | if ($networks.Count -eq 0) 331 | { 332 | Write-Output "Enabling container networking..." 333 | New-ContainerTransparentNetwork 334 | } 335 | else 336 | { 337 | Write-Output "Networking is already configured. Confirming configuration..." 338 | 339 | $transparentNetwork = $networks |? { $_.Mode -eq "Transparent" } 340 | 341 | if ($transparentNetwork -eq $null) 342 | { 343 | Write-Output "We didn't find a configured external network; configuring now..." 344 | New-ContainerTransparentNetwork 345 | } 346 | else 347 | { 348 | if ($ExternalNetAdapter) 349 | { 350 | $netAdapters = (Get-NetAdapter |? {$_.Name -eq "$ExternalNetAdapter"}) 351 | 352 | if ($netAdapters.Count -eq 0) 353 | { 354 | throw "No adapters found that match the name $ExternalNetAdapter" 355 | } 356 | 357 | $netAdapter = $netAdapters[0] 358 | $transparentNetwork = $networks |? { $_.NetworkAdapterName -eq $netAdapter.InterfaceDescription } 359 | 360 | if ($transparentNetwork -eq $null) 361 | { 362 | throw "One or more external networks are configured, but not on the requested adapter ($ExternalNetAdapter)" 363 | } 364 | 365 | Write-Output "Configured transparent network found: $($transparentNetwork.Name)" 366 | } 367 | else 368 | { 369 | Write-Output "Configured transparent network found: $($transparentNetwork.Name)" 370 | } 371 | } 372 | } 373 | } 374 | } 375 | 376 | # 377 | # Install, register, and start Docker 378 | # 379 | if (Test-Docker) 380 | { 381 | Write-Output "Docker is already installed." 382 | } 383 | else 384 | { 385 | if ($NATSubnet) 386 | { 387 | Install-Docker -DockerPath $DockerPath -DockerDPath $DockerDPath -NATSubnet $NATSubnet -ContainerBaseImage $ContainerBaseImage 388 | } 389 | else 390 | { 391 | Install-Docker -DockerPath $DockerPath -DockerDPath $DockerDPath -ContainerBaseImage $ContainerBaseImage 392 | } 393 | } 394 | 395 | if ($TarPath) 396 | { 397 | cmd /c "docker load -i `"$TarPath`"" 398 | } 399 | 400 | Remove-Item $global:ErrorFile 401 | 402 | Write-Output "Script complete!" 403 | } 404 | 405 | function 406 | Copy-File 407 | { 408 | [CmdletBinding()] 409 | param( 410 | [string] 411 | $SourcePath, 412 | 413 | [string] 414 | $DestinationPath 415 | ) 416 | 417 | if ($SourcePath -eq $DestinationPath) 418 | { 419 | return 420 | } 421 | 422 | if (Test-Path $SourcePath) 423 | { 424 | Copy-Item -Path $SourcePath -Destination $DestinationPath 425 | } 426 | elseif (($SourcePath -as [System.URI]).AbsoluteURI -ne $null) 427 | { 428 | if (Test-Nano) 429 | { 430 | $handler = New-Object System.Net.Http.HttpClientHandler 431 | $client = New-Object System.Net.Http.HttpClient($handler) 432 | $client.Timeout = New-Object System.TimeSpan(0, 30, 0) 433 | $cancelTokenSource = [System.Threading.CancellationTokenSource]::new() 434 | $responseMsg = $client.GetAsync([System.Uri]::new($SourcePath), $cancelTokenSource.Token) 435 | $responseMsg.Wait() 436 | 437 | if (!$responseMsg.IsCanceled) 438 | { 439 | $response = $responseMsg.Result 440 | if ($response.IsSuccessStatusCode) 441 | { 442 | $downloadedFileStream = [System.IO.FileStream]::new($DestinationPath, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write) 443 | $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream) 444 | $copyStreamOp.Wait() 445 | $downloadedFileStream.Close() 446 | if ($copyStreamOp.Exception -ne $null) 447 | { 448 | throw $copyStreamOp.Exception 449 | } 450 | } 451 | } 452 | } 453 | elseif ($PSVersionTable.PSVersion.Major -ge 5) 454 | { 455 | # 456 | # We disable progress display because it kills performance for large downloads (at least on 64-bit PowerShell) 457 | # 458 | $ProgressPreference = 'SilentlyContinue' 459 | Invoke-WebRequest -Uri $SourcePath -OutFile $DestinationPath -UseBasicParsing 460 | $ProgressPreference = 'Continue' 461 | } 462 | else 463 | { 464 | $webClient = New-Object System.Net.WebClient 465 | $webClient.DownloadFile($SourcePath, $DestinationPath) 466 | } 467 | } 468 | else 469 | { 470 | throw "Cannot copy from $SourcePath" 471 | } 472 | } 473 | 474 | 475 | function 476 | Test-Admin() 477 | { 478 | # Get the ID and security principal of the current user account 479 | $myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent() 480 | $myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID) 481 | 482 | # Get the security principal for the Administrator role 483 | $adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator 484 | 485 | # Check to see if we are currently running "as Administrator" 486 | if ($myWindowsPrincipal.IsInRole($adminRole)) 487 | { 488 | $global:AdminPriviledges = $true 489 | return 490 | } 491 | else 492 | { 493 | # 494 | # We are not running "as Administrator" 495 | # Exit from the current, unelevated, process 496 | # 497 | throw "You must run this script as administrator" 498 | } 499 | } 500 | 501 | 502 | function 503 | Test-Client() 504 | { 505 | return (-not ((Get-Command Get-WindowsFeature -ErrorAction SilentlyContinue) -or (Test-Nano))) 506 | } 507 | 508 | 509 | function 510 | Test-Nano() 511 | { 512 | $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId 513 | 514 | return (($EditionId -eq "ServerStandardNano") -or 515 | ($EditionId -eq "ServerDataCenterNano") -or 516 | ($EditionId -eq "NanoServer") -or 517 | ($EditionId -eq "ServerTuva")) 518 | } 519 | 520 | 521 | function 522 | Wait-Network() 523 | { 524 | $connectedAdapter = Get-NetAdapter |? ConnectorPresent 525 | 526 | if ($connectedAdapter -eq $null) 527 | { 528 | throw "No connected network" 529 | } 530 | 531 | $startTime = Get-Date 532 | $timeElapsed = $(Get-Date) - $startTime 533 | 534 | while ($($timeElapsed).TotalMinutes -lt 5) 535 | { 536 | $readyNetAdapter = $connectedAdapter |? Status -eq 'Up' 537 | 538 | if ($readyNetAdapter -ne $null) 539 | { 540 | return; 541 | } 542 | 543 | Write-Output "Waiting for network connectivity..." 544 | Start-Sleep -sec 5 545 | 546 | $timeElapsed = $(Get-Date) - $startTime 547 | } 548 | 549 | throw "Network not connected after 5 minutes" 550 | } 551 | 552 | 553 | function 554 | Install-Docker() 555 | { 556 | [CmdletBinding()] 557 | param( 558 | [string] 559 | [ValidateNotNullOrEmpty()] 560 | $DockerPath = "default", 561 | 562 | [string] 563 | [ValidateNotNullOrEmpty()] 564 | $DockerDPath = "default", 565 | 566 | [string] 567 | [ValidateNotNullOrEmpty()] 568 | $NATSubnet, 569 | 570 | [switch] 571 | $SkipDefaultHost, 572 | 573 | [string] 574 | $ContainerBaseImage 575 | ) 576 | 577 | Test-Admin 578 | 579 | #If one of these are set to default then the whole .zip needs to be downloaded anyways. 580 | Write-Output "DOCKER $DockerPath" 581 | if ($DockerPath -eq "default" -or $DockerDPath -eq "default") { 582 | Write-Output "Checking Docker versions" 583 | #Get the list of .zip packages available from docker. 584 | $availableVersions = ((Invoke-WebRequest -Uri $DefaultDockerLocation -UseBasicParsing).Links | Where-Object {$_.href -like "docker*"}).href | Sort-Object -Descending 585 | 586 | #Parse the versions from the file names 587 | $availableVersions = ($availableVersions | Select-String -Pattern "docker-(\d+\.\d+\.\d+).+" -AllMatches | Select-Object -Expand Matches | %{ $_.Groups[1].Value }) 588 | $version = $availableVersions[0] 589 | 590 | if($DockerVersion -ne "latest") { 591 | $version = $DockerVersion 592 | if(!($availableVersions | Select-String $DockerVersion)) { 593 | Write-Error "Docker version supplied $DockerVersion was invalid, please choose from the list of available versions: $availableVersions" 594 | throw "Invalid docker version supplied." 595 | } 596 | } 597 | 598 | $zipUrl = $global:DefaultDockerLocation + "docker-$version.zip" 599 | $destinationFolder = "$env:UserProfile\DockerDownloads" 600 | 601 | if(!(Test-Path "$destinationFolder")) { 602 | md -Path $destinationFolder | Out-Null 603 | } elseif(Test-Path "$destinationFolder\docker-$version") { 604 | Remove-Item -Recurse -Force "$destinationFolder\docker-$version" 605 | } 606 | 607 | Write-Output "Downloading $zipUrl to $destinationFolder\docker-$version.zip" 608 | Copy-File -SourcePath $zipUrl -DestinationPath "$destinationFolder\docker-$version.zip" 609 | Expand-Archive -Path "$destinationFolder\docker-$version.zip" -DestinationPath "$destinationFolder\docker-$version" 610 | 611 | if($DockerPath -eq "default") { 612 | $DockerPath = "$destinationFolder\docker-$version\docker\docker.exe" 613 | } 614 | if($DockerDPath -eq "default") { 615 | $DockerDPath = "$destinationFolder\docker-$version\docker\dockerd.exe" 616 | } 617 | } 618 | 619 | Write-Output "Installing Docker... $DockerPath" 620 | Copy-File -SourcePath $DockerPath -DestinationPath $env:windir\System32\docker.exe 621 | 622 | Write-Output "Installing Docker daemon... $DockerDPath" 623 | Copy-File -SourcePath $DockerDPath -DestinationPath $env:windir\System32\dockerd.exe 624 | 625 | $dockerConfigPath = Join-Path $global:DockerDataPath "config" 626 | 627 | if (!(Test-Path $dockerConfigPath)) 628 | { 629 | md -Path $dockerConfigPath | Out-Null 630 | } 631 | 632 | # 633 | # Register the docker service. 634 | # Configuration options should be placed at %programdata%\docker\config\daemon.json 635 | # 636 | Write-Output "Configuring the docker service..." 637 | 638 | $daemonSettings = New-Object PSObject 639 | 640 | $certsPath = Join-Path $global:DockerDataPath "certs.d" 641 | 642 | if (Test-Path $certsPath) 643 | { 644 | $daemonSettings | Add-Member NoteProperty hosts @("npipe://", "0.0.0.0:2376") 645 | $daemonSettings | Add-Member NoteProperty tlsverify true 646 | $daemonSettings | Add-Member NoteProperty tlscacert (Join-Path $certsPath "ca.pem") 647 | $daemonSettings | Add-Member NoteProperty tlscert (Join-Path $certsPath "server-cert.pem") 648 | $daemonSettings | Add-Member NoteProperty tlskey (Join-Path $certsPath "server-key.pem") 649 | } 650 | elseif (!$SkipDefaultHost.IsPresent) 651 | { 652 | # Default local host 653 | $daemonSettings | Add-Member NoteProperty hosts @("npipe://") 654 | } 655 | 656 | if ($NATSubnet -ne "") 657 | { 658 | $daemonSettings | Add-Member NoteProperty fixed-cidr $NATSubnet 659 | } 660 | 661 | $daemonSettingsFile = Join-Path $dockerConfigPath "daemon.json" 662 | 663 | $daemonSettings | ConvertTo-Json | Out-File -FilePath $daemonSettingsFile -Encoding ASCII 664 | 665 | & dockerd --register-service --service-name $global:DockerServiceName 666 | 667 | Start-Docker 668 | 669 | # 670 | # Waiting for docker to come to steady state 671 | # 672 | Wait-Docker 673 | 674 | if(-not [string]::IsNullOrEmpty($ContainerBaseImage)) { 675 | Write-Output "Attempting to pull specified base image: $ContainerBaseImage" 676 | docker pull $ContainerBaseImage 677 | } 678 | 679 | Write-Output "The following images are present on this machine:" 680 | 681 | docker images -a | Write-Output 682 | 683 | Write-Output "" 684 | } 685 | 686 | function 687 | Start-Docker() 688 | { 689 | Start-Service -Name $global:DockerServiceName 690 | } 691 | 692 | 693 | function 694 | Stop-Docker() 695 | { 696 | Stop-Service -Name $global:DockerServiceName 697 | } 698 | 699 | 700 | function 701 | Test-Docker() 702 | { 703 | $service = Get-Service -Name $global:DockerServiceName -ErrorAction SilentlyContinue 704 | 705 | return ($service -ne $null) 706 | } 707 | 708 | 709 | function 710 | Wait-Docker() 711 | { 712 | Write-Output "Waiting for Docker daemon..." 713 | $dockerReady = $false 714 | $startTime = Get-Date 715 | 716 | while (-not $dockerReady) 717 | { 718 | try 719 | { 720 | docker version | Out-Null 721 | 722 | if (-not $?) 723 | { 724 | throw "Docker daemon is not running yet" 725 | } 726 | 727 | $dockerReady = $true 728 | } 729 | catch 730 | { 731 | $timeElapsed = $(Get-Date) - $startTime 732 | 733 | if ($($timeElapsed).TotalMinutes -ge 1) 734 | { 735 | throw "Docker Daemon did not start successfully within 1 minute." 736 | } 737 | 738 | # Swallow error and try again 739 | Start-Sleep -sec 1 740 | } 741 | } 742 | Write-Output "Successfully connected to Docker Daemon." 743 | } 744 | 745 | try 746 | { 747 | Install-ContainerHost 748 | } 749 | catch 750 | { 751 | Write-Error $_ 752 | } 753 | -------------------------------------------------------------------------------- /K8s-Kubeadm-Cluster-Setup.md: -------------------------------------------------------------------------------- 1 | ## LAB: K8s Cluster Setup with Kubeadm and Containerd 2 | 3 | This scenario shows how to create K8s cluster on virtual PC (multipass, kubeadm, containerd) 4 | 5 | **Easy way to create K8s Cluster with Ubuntu (Control-Plane, Workers) and Windows Servers:** 6 | 7 | - Ubuntu 20.04 Installation Files (updated: K8s 1.26.2, calico 3.25.0, containerd 1.6.10) without using Corporate Proxy: 8 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/ubuntu20.04-kubeadm1.26.2-calico3.25.0-containerd1.6.10/install.sh 9 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/ubuntu20.04-kubeadm1.26.2-calico3.25.0-containerd1.6.10/master.sh 10 | - Ubuntu 24.04 Installation Files (updated: K8s 1.32.0, calico 3.29.1, containerd 1.7.24) without using Corporate Proxy: 11 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/ubuntu24.04-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install-ubuntu24.04-k8s1.32.sh 12 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/ubuntu24.04-kubeadm1.32.0-calico3.29.1-containerd1.7.24/master-ubuntu24.04-k8s1.32.sh 13 | - Windows 2019 Server Installation Files (K8s 1.23.5, calico 3.25.0, docker as container runtime) without using Corporate Proxy: 14 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install1.ps1 15 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install2.ps1 16 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install-docker-ce.ps1 17 | - Windows 2022 Server Installation Files (K8s 1.32.0, calico 3.29.1, containerd 1.7.24) without using Corporate Proxy: 18 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install1.ps1 19 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install2.ps1 20 | 21 | **IMPORTANT:** 22 | - If your cluster is behind the corporate proxy, you should add proxy settings on **Environment Variables, Docker Config, Containerd Config**. 23 | - Links in the script files might change in time (e.g. Calico updated their links) 24 | - Important Notes from K8s: 25 | - K8s on Windows: https://kubernetes.io/docs/concepts/windows/intro/ 26 | - Supported Versions: https://kubernetes.io/docs/concepts/windows/intro/#windows-os-version-support 27 | 28 | ### Table of Contents 29 | - [Creating Cluster With Kubeadm, Containerd](#creating) 30 | - [Multipass Installation - Creating VM](#creatingvm) 31 | - [IP-Tables Bridged Traffic Configuration](#ip-tables) 32 | - [Install Containerd](#installcontainerd) 33 | - [Install KubeAdm](#installkubeadm) 34 | - [Install Kubernetes Cluster](#installkubernetes) 35 | - [Install Kubernetes Network Infrastructure](#network) 36 | - [(Optional) If you need Windows Node: Creating Windows Node](#creatingWindows) 37 | - [Joining New K8s Worker Node to Existing Cluster](#joining) 38 | - [Brute-Force Method](#bruteforce) 39 | - [Easy Way to Get Join Command](#easy) 40 | - [IP address changes in Kubernetes Master Node](#master_ip_changed) 41 | - [Removing the Worker Node from Cluster](#removing) 42 | - [Installing Docker on Existing Cluster & Starting of Running Local Registry for Storing Local Image](#docker_registry) 43 | - [Installing Docker](#installingdocker) 44 | - [Running Docker Registry](#dockerregistry) 45 | - [Pulling Image from Docker Local Registry and Configure Containerd](#local_image) 46 | - [NFS Server Connection for Persistent Volume](#nfs_server) 47 | 48 | ## 1. Creating Cluster With Kubeadm, Containerd 49 | 50 | #### 1.1 Multipass Installation - Creating VM 51 | 52 | - "Multipass is a mini-cloud on your workstation using native hypervisors of all the supported plaforms (Windows, macOS and Linux)" 53 | - Fast to install and to use. 54 | - **Link:** https://multipass.run/ 55 | 56 | ``` 57 | # creating master, worker1 58 | # -c => cpu, -m => memory, -d => disk space 59 | multipass launch --name master -c 2 -m 2G -d 10G 60 | multipass launch --name worker1 -c 2 -m 2G -d 10G 61 | ``` 62 | 63 | ![image](https://user-images.githubusercontent.com/10358317/156150337-2f4b3ac9-df42-4567-a848-6869362a3001.png) 64 | 65 | ``` 66 | # get shell on master 67 | multipass shell master 68 | # get shell on worker1 69 | multipass shell worker1 70 | ``` 71 | 72 | ![image](https://user-images.githubusercontent.com/10358317/156150843-db217ba0-8fff-4a77-9f3d-09f9f71314df.png) 73 | 74 | #### 1.2 IP-Tables Bridged Traffic Configuration 75 | 76 | - Run on ALL nodes: 77 | ``` 78 | cat < 117 | - Run on ALL nodes: 118 | ``` 119 | cat < 172 | - Run on ALL nodes: 173 | ``` 174 | sudo apt-get update 175 | sudo apt-get install -y apt-transport-https ca-certificates curl 176 | sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg 177 | echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list 178 | sudo apt-get update 179 | sudo apt-get install -y kubelet kubeadm kubectl 180 | sudo apt-mark hold kubelet kubeadm kubectl 181 | ``` 182 | 183 | ![image](https://user-images.githubusercontent.com/10358317/156160934-11c45c68-a5e5-46fd-bde7-96301277b906.png) 184 | 185 | ![image](https://user-images.githubusercontent.com/10358317/156160979-f4f79703-9e60-4b59-b8fe-5fbd14969622.png) 186 | 187 | ![image](https://user-images.githubusercontent.com/10358317/156161071-59d5f19a-ca62-48a2-97db-73de53e2d29d.png) 188 | 189 | ![image](https://user-images.githubusercontent.com/10358317/156161142-e7ba1322-9cf8-4edf-9018-082fa5b2f76a.png) 190 | 191 | 192 | #### 1.5 Install Kubernetes Cluster 193 | 194 | - Run on ALL nodes: 195 | ``` 196 | sudo kubeadm config images pull 197 | ``` 198 | 199 | ![image](https://user-images.githubusercontent.com/10358317/156161542-7da94e9a-f124-4e05-896d-0c9fb2208729.png) 200 | 201 | - From worker1, ping the master to learn IP of master. 202 | ``` 203 | ping master 204 | ``` 205 | ![image](https://user-images.githubusercontent.com/10358317/156161683-63d2d56a-e5b1-4826-9665-e872a333d520.png) 206 | 207 | - Run on Master: 208 | ``` 209 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address= --control-plane-endpoint= 210 | # sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=172.31.45.74 --control-plane-endpoint=172.31.45.74 211 | ``` 212 | 213 | ![image](https://user-images.githubusercontent.com/10358317/156162236-15fa0c78-dccc-4bfb-8c0b-179b86a8ed31.png) 214 | 215 | - After kubeadm init command, master node responses back the followings: 216 | 217 | ![image](https://user-images.githubusercontent.com/10358317/156163029-e31ea507-9912-4377-a93d-93863c37039a.png) 218 | 219 | - On the Master node run: 220 | 221 | ``` 222 | mkdir -p $HOME/.kube 223 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 224 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 225 | ``` 226 | 227 | ![image](https://user-images.githubusercontent.com/10358317/156163241-66fed5a3-593e-4efd-8f12-2d024ef7554c.png) 228 | 229 | - On the worker node, run to join cluster (tokens are different in your case, please look at the kubeadm init respond): 230 | 231 | ``` 232 | sudo kubeadm join 172.31.45.74:6443 --token w7nntd.7t6qg4cd418wzkup \ 233 | --discovery-token-ca-cert-hash sha256:1f03886e5a28fb9716e01794b4a01144f362bf431220f15ca98bed2f5a44e91b 234 | ``` 235 | 236 | - If it is required to create another master node, copy the control plane line (tokens are different in your case, please look at the kubeadm init respond): 237 | 238 | ``` 239 | sudo kubeadm join 172.31.45.74:6443 --token w7nntd.7t6qg4cd418wzkup \ 240 | --discovery-token-ca-cert-hash sha256:1f03886e5a28fb9716e01794b4a01144f362bf431220f15ca98bed2f5a44e91b \ 241 | --control-plane 242 | ``` 243 | 244 | ![image](https://user-images.githubusercontent.com/10358317/156163626-ae2baf3f-43e8-4747-8fdc-80738603adbe.png) 245 | 246 | - On Master node: 247 | 248 | ![image](https://user-images.githubusercontent.com/10358317/156163717-c9c771c1-a850-4706-80dd-7fa85b890c2a.png) 249 | 250 | 251 | #### 1.6 Install Kubernetes Network Infrastructure 252 | 253 | - Calico is used for network plugin on K8s. Others (flannel, weave) could be also used. 254 | - Run only on Master, in our examples, we are using Calico instead of Flannel: 255 | - Calico: 256 | ``` 257 | kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml 258 | kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml 259 | ``` 260 | - Flannel: 261 | ``` 262 | kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 263 | ``` 264 | 265 | ![image](https://user-images.githubusercontent.com/10358317/156164127-d21ff5be-35d6-4ec6-a507-2ae0155031ac.png) 266 | 267 | ![image](https://user-images.githubusercontent.com/10358317/156164265-1d13bab5-6c55-4421-b7a8-e835d5d0ebfc.png) 268 | 269 | - After running network implementation, nodes are now ready. Only Master node is used to get information about the cluster. 270 | 271 | ![image](https://user-images.githubusercontent.com/10358317/156164572-5525bda3-6ff5-49a2-9a2f-392a804b4da2.png) 272 | 273 | ![image](https://user-images.githubusercontent.com/10358317/156165250-f1647540-467a-445d-8381-dd320922a70d.png) 274 | 275 | ##### 1.6.1 If You have Windows Node to add your Cluster: 276 | 277 | - Instead of running it as above, you should run Calico with this way, run on Master node: 278 | ``` 279 | # Download Calico CNI 280 | curl https://docs.projectcalico.org/manifests/calico.yaml > calico.yaml 281 | # Apply Calico CNI 282 | kubectl apply -f ./calico.yaml 283 | ``` 284 | 285 | Run on the Master Node: 286 | ``` 287 | # required to add windows node 288 | sudo -i 289 | cd /usr/local/bin/ 290 | curl -o calicoctl -O -L "https://github.com/projectcalico/calicoctl/releases/download/v3.19.1/calicoctl" 291 | chmod +x calicoctl 292 | exit 293 | 294 | # Disable "IPinIP": 295 | calicoctl get ipPool default-ipv4-ippool -o yaml > ippool.yaml 296 | nano ippool.yaml # set ipipmode: Never 297 | calicoctl apply -f ippool.yaml 298 | 299 | kubectl get felixconfigurations.crd.projectcalico.org default -o yaml -n kube-system > felixconfig.yaml 300 | nano felixconfig.yaml #Set: "ipipEnabled: false" 301 | kubectl apply -f felixconfig.yaml 302 | 303 | # This is required to prevent Linux nodes from borrowing IP addresses from Windows nodes:" 304 | calicoctl ipam configure --strictaffinity=true 305 | sudo reboot 306 | 307 | kubectl cluster-info 308 | kubectl get nodes -o wide 309 | ssh @ 'mkdir c:\k' 310 | scp -r $HOME/.kube/config @:/k/ # send to Win PC from master node, while installing calico, it is required 311 | ``` 312 | 313 | - Ref: https://github.com/gary-RR/my_YouTube_Kuberenetes_Hybird/blob/main/setupcluster.sh 314 | 315 | #### (Optional) If you need Windows Node: Creating Windows Node 316 | 317 | - Kubernetes requires a minimum Windows-2019 Server (https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/) 318 | - Run-on the PowerShell with administration privilege on the Windows nodes: 319 | 320 | ``` 321 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction OutBound -Action Allow 322 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction InBound -Action Allow 323 | 324 | Install-WindowsFeature -Name containers # install docker 325 | Restart-Computer -Force 326 | 327 | .\install-docker-ce.ps1 328 | 329 | Set-Service -Name docker -StartupType 'Automatic' 330 | 331 | #Install additional Windows networking components 332 | 333 | Install-WindowsFeature RemoteAccess 334 | Install-WindowsFeature RSAT-RemoteAccess-PowerShell 335 | Install-WindowsFeature Routing 336 | Restart-Computer -Force 337 | Install-RemoteAccess -VpnType RoutingOnly 338 | Set-Service -Name RemoteAccess -StartupType 'Automatic' 339 | start-service RemoteAccess 340 | 341 | # Install Calico 342 | mkdir c:\k 343 | #Copy the Kubernetes kubeconfig file from the master node (default, Location $HOME/.kube/config), to c:\k\config. 344 | 345 | Invoke-WebRequest https://docs.projectcalico.org/scripts/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1 346 | 347 | c:\install-calico-windows.ps1 -KubeVersion 1.23.5 348 | 349 | #Verify that the Calico services are running. 350 | Get-Service -Name CalicoNode 351 | Get-Service -Name CalicoFelix 352 | 353 | #Install and start kubelet/kube-proxy service. Execute following PowerShell script/commands. 354 | C:\CalicoWindows\kubernetes\install-kube-services.ps1 355 | Start-Service -Name kubelet 356 | Start-Service -Name kube-proxy 357 | 358 | #Copy kubectl.exe, kubeadm.etc to the folder below which is on the path: 359 | cp C:\k\*.exe C:\Users\\AppData\Local\Microsoft\WindowsApps 360 | 361 | #Test Win node##################################### 362 | #List all cluster nodes 363 | kubectl get nodes -o wide 364 | 365 | [Environment]::SetEnvironmentVariable("HTTP_PROXY", "http://:3128", [EnvironmentVariableTarget]::Machine) 366 | [Environment]::SetEnvironmentVariable("HTTPS_PROXY", "http://:3128", [EnvironmentVariableTarget]::Machine) 367 | [Environment]::SetEnvironmentVariable("NO_PROXY", "192.168.*.*, ::6443, :6443, 172.24.*.*, 172.25.*.*, 10.*.*.*, localhost, 127.0.0.1, 0.0.0.0/8", [EnvironmentVariableTarget]::Machine) 368 | Restart-Service docker 369 | ``` 370 | 371 | - Create win-webserver.yaml file for testing of Win Node, run on the Windows2019, details: https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/ 372 | - Ref: https://github.com/gary-RR/my_YouTube_Kuberenetes_Hybird/blob/main/Setting-ThingsUp-On-Windows-Server.sh 373 | 374 | ## 2. Joining New K8s Worker Node to Existing Cluster 375 | 376 | ### 2.1 Brute-Force Method 377 | 378 | - If we lose the token and token CA cert dash and API server address, wé need to learn them to join a new node into the cluster. 379 | - We are adding new node to existing cluster above. We need to get join token, discovery token CA cert hash, API server advertise address. After getting info, we'll create join command for each nodes. 380 | - Run on Master to get certificate and token information: 381 | 382 | ``` 383 | openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' 384 | kubeadm token list 385 | kubectl cluster-info 386 | ``` 387 | 388 | ![image](https://user-images.githubusercontent.com/10358317/156349584-9fe2f41e-4368-43ef-9674-c78512230938.png) 389 | 390 | - In this example, token TTL has 3 hours left (normally, token expires in 24 hours). So we don't need to create new token. 391 | - If the token is expired, generate a new one with the command: 392 | 393 | ``` 394 | sudo kubeadm token create 395 | kubeadm token list 396 | ``` 397 | 398 | - Create join command for worker nodes: 399 | 400 | ``` 401 | kubeadm join \ 402 | : \ 403 | --token \ 404 | --discovery-token-ca-cert-hash sha256: 405 | ``` 406 | 407 | - In our case, we run the following command on both workers (worker2, worker3): 408 | 409 | ``` 410 | sudo kubeadm join 172.31.32.27:6443 --token 39g7sx.v589tv38nxhus74k --discovery-token-ca-cert-hash sha256:1db5d45337803e35e438cdcdd9ff77449fef3272381ee43784626f19c873d356 411 | ``` 412 | 413 | ![image](https://user-images.githubusercontent.com/10358317/156350767-b14335d0-1d63-4ab1-a939-6eb47fadac9d.png) 414 | 415 | ![image](https://user-images.githubusercontent.com/10358317/156350852-d1df7b93-13aa-462d-8cce-51f3b9b6e553.png) 416 | 417 | ### 2.2 Easy Way to Get Join Command 418 | - Run on the master node: 419 | ``` 420 | kubeadm token create --print-join-command 421 | ``` 422 | - Copy the join command above and paste it on **ALL worker nodes**. 423 | - Then, we get nodes ready, run on master: 424 | 425 | ``` 426 | kubectl get nodes 427 | ``` 428 | 429 | ![image](https://user-images.githubusercontent.com/10358317/156351061-7c1af34b-63cd-49dc-a8a1-74679c765516.png) 430 | 431 | - Ref: https://computingforgeeks.com/join-new-kubernetes-worker-node-to-existing-cluster/ 432 | 433 | ## 3. IP address changes in Kubernetes Master Node 434 | - After restarting Master Node, it could be possible that the IP of master node is updated. Your K8s cluster API's IP is still old IP of the node. So you should configure the K8s cluster with new IP. 435 | 436 | - You cannot reach API when using kubectl commands: 437 | 438 | ![image](https://user-images.githubusercontent.com/10358317/156803085-e99717a4-da62-453f-97bb-fb86c09edaca.png) 439 | 440 | - If you installed the docker for the docker registry, you can remove the exited containers: 441 | 442 | ``` 443 | sudo docker rm $(sudo docker ps -a -f status=exited -q) 444 | ``` 445 | 446 | #### On Master Node: 447 | 448 | ``` 449 | sudo kubeadm reset 450 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16 451 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 452 | ``` 453 | 454 | - After kubeadm reset, if there is an error that shows the some of the ports still using, please use following command to kill process, then run kubeadm init: 455 | 456 | ``` 457 | sudo netstat -lnp | grep 458 | sudo kill 459 | ``` 460 | 461 | ![image](https://user-images.githubusercontent.com/10358317/156803554-21741c6e-74bb-4902-9130-bc835b91e76f.png) 462 | 463 | ![image](https://user-images.githubusercontent.com/10358317/156803646-f943be3e-158d-4f3d-9f26-fe06a8436439.png) 464 | 465 | - It shows which command should be used to join cluster: 466 | 467 | ``` 468 | sudo kubeadm join 172.31.40.125:6443 --token 07vo3z.q2n2qz6bd07ipdnf \ 469 | --discovery-token-ca-cert-hash sha256:46c7dcb092ca091e71ab39bd542e73b90b3f7bdf0c486202b857a678cd9879ba 470 | ``` 471 | ![image](https://user-images.githubusercontent.com/10358317/156803877-89ac5a24-6dd6-40d0-8568-3c6b70acbd89.png) 472 | 473 | ![image](https://user-images.githubusercontent.com/10358317/156804162-cc8c3f2b-5d3f-407a-9ced-31322b6bb39b.png) 474 | 475 | 476 | - Network Configuration with new IP: 477 | 478 | ``` 479 | kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml 480 | kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml 481 | ``` 482 | 483 | ![image](https://user-images.githubusercontent.com/10358317/156804328-c8068ef9-5a7d-4230-a4e9-56aa6a111da9.png) 484 | 485 | #### On Worker Nodes: 486 | 487 | ``` 488 | sudo kubeadm reset 489 | sudo kubeadm join 172.31.40.125:6443 --token 07vo3z.q2n2qz6bd07ipdnf \ 490 | --discovery-token-ca-cert-hash sha256:46c7dcb092ca091e71ab39bd542e73b90b3f7bdf0c486202b857a678cd9879ba 491 | ``` 492 | 493 | ![image](https://user-images.githubusercontent.com/10358317/156805582-bb66e20b-5b81-49b5-995f-96023c943f3b.png) 494 | 495 | ![image](https://user-images.githubusercontent.com/10358317/156805882-e2e2144d-f3dc-4b87-81a8-a9f1c4827a5b.png) 496 | 497 | - On Master Node: 498 | 499 | - Worker1 is now joined the cluster. 500 | 501 | ``` 502 | kubectl get nodes 503 | ``` 504 | 505 | ![image](https://user-images.githubusercontent.com/10358317/156805995-49e8a6f5-5293-46b8-9684-59f18d6f5ab2.png) 506 | 507 | ## 4. Removing the Worker Node from Cluster 508 | 509 | - Run commands on Master Node to remove specific worker node: 510 | 511 | ``` 512 | kubectl get nodes 513 | kubectl drain worker2 514 | kubectl delete node worker2 515 | ``` 516 | 517 | ![image](https://user-images.githubusercontent.com/10358317/157018826-8cbae29e-b5e4-4a6d-bf8e-72d3006ce33e.png) 518 | 519 | - Run on the specific deleted node (worker2) 520 | 521 | ``` 522 | sudo kubeadm reset 523 | ``` 524 | 525 | ![image](https://user-images.githubusercontent.com/10358317/157018963-422b1b72-667c-4375-b9ee-8035823396d7.png) 526 | 527 | ## 5. Installing Docker on Existing Cluster & Starting of Running Local Registry for Storing Local Image 528 | 529 | #### 5.1 Installing Docker 530 | 531 | - Run commands on Master Node to install docker on Master node: 532 | 533 | ``` 534 | sudo apt-get update 535 | sudo apt-get install ca-certificates curl gnupg lsb-release 536 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg 537 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ 538 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 539 | sudo apt-get update 540 | sudo apt-get install docker-ce docker-ce-cli containerd.io 541 | sudo docker run hello-world 542 | ``` 543 | 544 | **Goto for more information:** https://docs.docker.com/engine/install/ubuntu/ 545 | 546 | ![image](https://user-images.githubusercontent.com/10358317/157026833-fcd829fd-a5dd-4701-b71a-89327445483d.png) 547 | 548 | ![image](https://user-images.githubusercontent.com/10358317/157027173-8be0d193-4ac9-4a82-ac3b-33fbd68ba42d.png) 549 | 550 | ![image](https://user-images.githubusercontent.com/10358317/157027863-787bf3cb-3e0c-4888-8de6-80e2145a383c.png) 551 | 552 | ![image](https://user-images.githubusercontent.com/10358317/157028189-2585365e-51e5-4dfa-9d60-5ac9d73c258a.png) 553 | 554 | ![image](https://user-images.githubusercontent.com/10358317/157028470-e09a783d-1413-4d87-bbaf-463741871a68.png) 555 | 556 | - Copy and run on all nodes to change Docker's Cgroup: 557 | 558 | ``` 559 | cd /etc/docker 560 | sudo touch daemon.json 561 | sudo nano daemon.json 562 | # in the file, paste: 563 | { 564 | "exec-opts": ["native.cgroupdriver=systemd"] 565 | } 566 | sudo systemctl restart docker 567 | sudo docker image ls 568 | kubectl get nodes 569 | ``` 570 | 571 | ![image](https://user-images.githubusercontent.com/10358317/157424989-671ee3e8-b33c-4d7e-b0d6-ee1fd5685f70.png) 572 | 573 | ![image](https://user-images.githubusercontent.com/10358317/157425768-a8446317-3477-4719-9bf8-0014ef134335.png) 574 | 575 | ![image](https://user-images.githubusercontent.com/10358317/157425383-4d82e707-1a98-4dcd-b59e-1239121b5850.png) 576 | 577 | - If your cluster is behind the proxy, configure PROXY settings of Docker (ref: add docker proxy: https://docs.docker.com/config/daemon/systemd/). Copy and run on all nodes: 578 | ``` 579 | sudo mkdir -p /etc/systemd/system/docker.service.d 580 | cd /etc/systemd/system/docker.service.d/ 581 | sudo touch http-proxy.conf 582 | sudo nano http-proxy.conf 583 | # copy and paste in the file: 584 | [Service] 585 | Environment="HTTP_PROXY=http://:3128" 586 | Environment="HTTPS_PROXY=http://:3128" 587 | sudo systemctl daemon-reload 588 | sudo systemctl restart docker 589 | sudo systemctl show --property=Environment docker 590 | sudo docker run hello-world 591 | ``` 592 | 593 | - Use docker command without sudo: 594 | 595 | ``` 596 | sudo groupadd docker 597 | sudo usermod -aG docker [non-root user] 598 | # logout and login to enable it 599 | ``` 600 | 601 | #### 5.2 Running Docker Registry 602 | 603 | - Run on Master to pull registry: 604 | 605 | ``` 606 | sudo docker image pull registry 607 | ``` 608 | 609 | - Run container using 'Registry' image: (-p: port binding [hostPort]:[containerPort], -d: detach mode (running background), -e: change environment variables status) 610 | ``` 611 | sudo docker container run -d -p 5000:5000 --restart always --name localregistry -e REGISTRY_STORAGE_DELETE_ENABLED=true registry 612 | ``` 613 | 614 | - Run registry container with binding mount (-v) and without getting error 500 (REGISTRY_VALIDATION_DISABLED=true): 615 | ``` 616 | sudo docker run -d -p 5000:5000 --restart=always --name registry -v /home/docker_registry:/var/lib/registry -e REGISTRY_STORAGE_DELETE_ENABLED=true -e REGISTRY_VALIDATION_DISABLED=true -e REGISTRY_HTTP_ADDR=0.0.0.0:5000 registry 617 | ``` 618 | 619 | ![image](https://user-images.githubusercontent.com/10358317/157030622-69ab3019-6cff-43ee-8a3d-fe277d7632b5.png) 620 | 621 | ![image](https://user-images.githubusercontent.com/10358317/157030738-be8eb8c3-0f87-4d39-969b-bd94cb8b0f9f.png) 622 | 623 | - Open with browser or run curl command: 624 | ``` 625 | curl http://127.0.0.1:5000/v2/_catalog 626 | ``` 627 | ![image](https://user-images.githubusercontent.com/10358317/157031139-edf0162d-d753-4d75-a39a-127583bb47fe.png) 628 | 629 | 630 | ## 6. Pulling Image from Docker Local Registry and Configure Containerd 631 | 632 | - In this scenario, docker local registry already runs on the Master node (see [Section 5](#docker_registry)) 633 | - First add insecure-registry into /etc/docker/daemon.js on the **ALL Nodes**: 634 | 635 | ``` 636 | sudo nano /etc/docker/daemon.json 637 | # copy insecure-registries and paste it 638 | { 639 | "exec-opts": ["native.cgroupdriver=systemd"], 640 | "insecure-registries":["192.168.219.64:5000"] 641 | } 642 | sudo systemctl restart docker.service 643 | ``` 644 | 645 | ![image](https://user-images.githubusercontent.com/10358317/157729358-cf496d8f-24f9-4bff-b263-7a196efb035c.png) 646 | 647 | - Pull image from DockerHub, label with new tag and push the local registry on master node: 648 | 649 | ``` 650 | sudo docker image pull nginx:latest 651 | ifconfig # to get master IP 652 | sudo docker image tag nginx:latest 192.168.219.64:5000/nginx:latest 653 | sudo docker image push 192.168.219.64:5000/nginx:latest 654 | curl http://192.168.219.64:5000/v2/_catalog 655 | sudo docker image pull 192.168.219.64:5000/nginx:latest 656 | ``` 657 | 658 | - Create docker config and get authentication username and pass in base64 coded: 659 | 660 | ``` 661 | sudo docker login # this creates /root/.docker/config 662 | sudo cat /root/.docker/config.json | base64 -w0 # copy the base64 encoded key 663 | ``` 664 | 665 | - Create my-secret.yaml and paste the base64 encoded key: 666 | 667 | ``` 668 | apiVersion: v1 669 | kind: Secret 670 | metadata: 671 | name: registrypullsecret 672 | data: 673 | .dockerconfigjson: 674 | type: kubernetes.io/dockerconfigjson 675 | ``` 676 | 677 | - Create secret. Kubelet uses this secret to pull image: 678 | 679 | ``` 680 | kubectl create -f my-secret.yaml && kubectl get secrets 681 | ``` 682 | 683 | - Create nginx_pod.yaml. Image name shows where the image is pulled from. In addition, "imagePullSecrets" should be defined, which secret should be used for pulling image for local docker registry. 684 | 685 | ``` 686 | apiVersion: v1 687 | kind: Pod 688 | metadata: 689 | name: my-private-pod 690 | spec: 691 | containers: 692 | - name: private 693 | image: 192.168.219.64:5000/nginx:latest 694 | imagePullSecrets: 695 | - name: registrypullsecret 696 | ``` 697 | 698 | ![image](https://user-images.githubusercontent.com/10358317/157726621-858e57b1-4e4c-48dc-9900-c5fe3024d5ae.png) 699 | 700 | - On the **ALL Nodes**, registry IP and the port should be defined: 701 | 702 | ``` 703 | sudo nano /etc/containerd/config.toml # if containerd is using as runtime. If this was Docker, on /etc/docker/daemon.js add insecure-registries like master 704 | # copy and paste (our IP: 192.168.219.64, change it with your IP): 705 | [plugins."io.containerd.grpc.v1.cri".registry] 706 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors] 707 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.219.64:5000"] 708 | endpoint = ["http://192.168.219.64:5000"] 709 | [plugins."io.containerd.grpc.v1.cri".registry.configs] 710 | [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.219.64:5000".tls] 711 | insecure_skip_verify = true 712 | # restart containerd.service 713 | sudo systemctl restart containerd.service 714 | ``` 715 | 716 | ![image](https://user-images.githubusercontent.com/10358317/157726335-fc7091da-2300-4f4e-a9da-6416a6810329.png) 717 | 718 | 719 | - If registry IP and the port is not defined, you will get this error: "http: server gave HTTP response to HTTPS client. 720 | - If pod's status is ImagePullBackOff (Error), it can be inspected with describe command: 721 | 722 | ``` 723 | kubectl describe pods my-private-pod 724 | ``` 725 | 726 | ![image](https://user-images.githubusercontent.com/10358317/157730392-09a1a2b6-0eec-4f68-97e9-066d00ea541d.png) 727 | 728 | 729 | - On Master: 730 | 731 | ``` 732 | kubectl apply -f nginx_pod.yaml 733 | kubectl get pods -o wide 734 | ``` 735 | ![image](https://user-images.githubusercontent.com/10358317/157725926-90b57357-cf8f-4d27-a91c-01a7d0eb047c.png) 736 | 737 | ## 7. NFS Server Connection for Persistent Volume 738 | 739 | - If it is required NFS Server, you can create NFS Server 740 | - if you have Windows 2019 Server: https://youtu.be/_x3vg25i7GQ 741 | - if you have Ubuntu: https://rudimartinsen.com/2022/01/05/nginx-nfs-kubernetes/ 742 | 743 | - Run on ALL Nodes to reach NFS Server: 744 | 745 | ``` 746 | sudo apt install nfs-common 747 | sudo apt install cifs-utils 748 | sudo mkdir /data # create /data directory under root and mount it to NFS 749 | sudo mount -t nfs :/share /data/ # /share directory is created while creating NFS server 750 | sudo chmod 777 /data # give permissions to reach mounted shared area 751 | ``` 752 | 753 | ### Reference 754 | 755 | - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/ 756 | - https://github.com/aytitech/k8sfundamentals/tree/main/setup 757 | - https://multipass.run/ 758 | - https://computingforgeeks.com/join-new-kubernetes-worker-node-to-existing-cluster/ 759 | - https://docs.docker.com/engine/install/ubuntu/ 760 | - https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 761 | - https://stackoverflow.com/questions/32726923/pulling-images-from-private-registry-in-kubernetes 762 | - https://stackoverflow.com/questions/65681045/adding-insecure-registry-in-containerd 763 | --------------------------------------------------------------------------------