├── labs
├── secret
│ ├── password.txt
│ ├── username.txt
│ ├── server.txt
│ ├── config.json
│ ├── secret.yaml
│ └── secret-pods.yaml
├── configmap
│ ├── theme.txt
│ └── configmap.yaml
├── service
│ ├── backend_nodeport.yaml
│ ├── backend_clusterip.yaml
│ ├── backend_loadbalancer.yaml
│ └── deploy.yaml
├── pod
│ ├── pod1.yaml
│ └── multicontainer.yaml
├── persistentvolume
│ ├── pv.yaml
│ ├── pvc.yaml
│ └── deploy.yaml
├── job
│ └── job.yaml
├── ingress
│ ├── todoingress.yaml
│ ├── appingress.yaml
│ └── deploy.yaml
├── deployment
│ ├── deployment1.yaml
│ ├── recreate-deployment.yaml
│ └── rolling-deployment.yaml
├── cronjob
│ └── cronjob.yaml
├── tainttoleration
│ └── podtoleration.yaml
├── daemonset
│ └── daemonset.yaml
├── liveness
│ └── liveness.yaml
├── affinity
│ └── podnodeaffinity.yaml
└── statefulset
│ └── statefulset.yaml
├── index.html
├── LICENSE
├── K8s-CronJob.md
├── create_real_cluster
├── win2019-kubeadm1.26.2-calico3.25.0-docker
│ ├── install2.ps1
│ ├── install1.ps1
│ └── install-docker-ce.ps1
├── win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24
│ ├── install1.ps1
│ └── install2.ps1
├── ubuntu20.04-kubeadm1.26.2-calico3.25.0-containerd1.6.10
│ ├── master.sh
│ └── install.sh
└── ubuntu24.04-kubeadm1.32.0-calico3.29.1-containerd1.7.24
│ ├── master-ubuntu24.04-k8s1.32.sh
│ └── install-ubuntu24.04-k8s1.32.sh
├── K8s-Job.md
├── K8-CreatingPod-Declerative.md
├── K8s-CreatingPod-Imperative.md
├── K8s-Enable-Dashboard-On-Cluster.md
├── K8s-Configmap.md
├── K8s-Liveness-App.md
├── K8s-Deployment.md
├── K8s-Daemon-Sets.md
├── K8s-Taint-Toleration.md
├── K8s-Statefulset.md
├── K8s-Multicontainer-Sidecar.md
├── HelmCheatsheet.md
├── K8s-Helm-Jenkins.md
├── K8s-Node-Affinity.md
├── K8s-Monitoring-Prometheus-Grafana.md
├── K8s-Secret.md
├── K8s-Service-App.md
├── K8s-Kubeadm-Cluster-Docker.md
├── K8s-PersistantVolume.md
├── Helm.md
├── K8s-Ingress.md
├── K8s-Rollout-Rollback.md
├── KubernetesCommandCheatSheet.md
└── K8s-Kubeadm-Cluster-Setup.md
/labs/secret/password.txt:
--------------------------------------------------------------------------------
1 | P@ssw0rd!
--------------------------------------------------------------------------------
/labs/secret/username.txt:
--------------------------------------------------------------------------------
1 | admin
--------------------------------------------------------------------------------
/labs/configmap/theme.txt:
--------------------------------------------------------------------------------
1 | theme=dark
--------------------------------------------------------------------------------
/labs/secret/server.txt:
--------------------------------------------------------------------------------
1 | db.example.com
--------------------------------------------------------------------------------
/labs/secret/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "apiKey": "6bba108d4b2212f2c30c71dfa279e1f77cc5c3b2",
3 | }
--------------------------------------------------------------------------------
/labs/secret/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: mysecret
5 | type: Opaque
6 | stringData:
7 | db_server: db.example.com
8 | db_username: admin
9 | db_password: P@ssw0rd!
--------------------------------------------------------------------------------
/labs/service/backend_nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: frontend
5 | spec:
6 | type: NodePort
7 | selector:
8 | app: frontend
9 | ports:
10 | - protocol: TCP
11 | port: 80
12 | targetPort: 80
--------------------------------------------------------------------------------
/labs/service/backend_clusterip.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: backend
5 | spec:
6 | type: ClusterIP
7 | selector:
8 | app: backend
9 | ports:
10 | - protocol: TCP
11 | port: 5000
12 | targetPort: 5000
--------------------------------------------------------------------------------
/labs/service/backend_loadbalancer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: frontendlb
5 | spec:
6 | type: LoadBalancer
7 | selector:
8 | app: frontend
9 | ports:
10 | - protocol: TCP
11 | port: 80
12 | targetPort: 80
--------------------------------------------------------------------------------
/labs/pod/pod1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: firstpod
5 | labels:
6 | app: frontend
7 | spec:
8 | containers:
9 | - name: nginx
10 | image: nginx:latest
11 | ports:
12 | - containerPort: 80
13 | env:
14 | - name: USER
15 | value: "username"
--------------------------------------------------------------------------------
/labs/persistentvolume/pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: mysqlpv
5 | labels:
6 | app: mysql
7 | spec:
8 | capacity:
9 | storage: 5Gi
10 | accessModes:
11 | - ReadWriteOnce
12 | persistentVolumeReclaimPolicy: Recycle
13 | nfs:
14 | path: /
15 | server: 10.255.255.10
--------------------------------------------------------------------------------
/labs/persistentvolume/pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: mysqlclaim
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | volumeMode: Filesystem
9 | resources:
10 | requests:
11 | storage: 5Gi
12 | storageClassName: ""
13 | selector:
14 | matchLabels:
15 | app: mysql
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Hello World! This file is created for the multicontainer-sidecar github update.
7 |
8 | This is the multicontainer scenario! Second version.
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/labs/job/job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: pi
5 | spec:
6 | parallelism: 2
7 | completions: 10
8 | backoffLimit: 5
9 | activeDeadlineSeconds: 100
10 | template:
11 | spec:
12 | containers:
13 | - name: pi
14 | image: perl
15 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
16 | restartPolicy: Never #OnFailure
--------------------------------------------------------------------------------
/labs/ingress/todoingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: todoingress
5 | spec:
6 | rules:
7 | - host: todoapp.com
8 | http:
9 | paths:
10 | - path: /
11 | pathType: Prefix
12 | backend:
13 | service:
14 | name: todosvc
15 | port:
16 | number: 80
--------------------------------------------------------------------------------
/labs/deployment/deployment1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: firstdeployment
5 | labels:
6 | team: development
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: frontend
12 | template:
13 | metadata:
14 | labels:
15 | app: frontend
16 | spec:
17 | containers:
18 | - name: nginx
19 | image: nginx:latest
20 | ports:
21 | - containerPort: 80
--------------------------------------------------------------------------------
/labs/deployment/recreate-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: rcdeployment
5 | labels:
6 | team: development
7 | spec:
8 | replicas: 5
9 | selector:
10 | matchLabels:
11 | app: recreate
12 | strategy:
13 | type: Recreate
14 | template:
15 | metadata:
16 | labels:
17 | app: recreate
18 | spec:
19 | containers:
20 | - name: nginx
21 | image: nginx
22 | ports:
23 | - containerPort: 80
--------------------------------------------------------------------------------
/labs/cronjob/cronjob.yaml:
--------------------------------------------------------------------------------
1 | # https://crontab.guru/
2 | apiVersion: batch/v1
3 | kind: CronJob
4 | metadata:
5 | name: hello
6 | spec:
7 | schedule: "*/1 * * * *"
8 | jobTemplate:
9 | spec:
10 | template:
11 | spec:
12 | containers:
13 | - name: hello
14 | image: busybox
15 | imagePullPolicy: IfNotPresent
16 | command:
17 | - /bin/sh
18 | - -c
19 | - date; echo Hello from the Kubernetes cluster
20 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/labs/deployment/rolling-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: rolldeployment
5 | labels:
6 | team: development
7 | spec:
8 | replicas: 10
9 | selector:
10 | matchLabels:
11 | app: rolling
12 | strategy:
13 | type: RollingUpdate
14 | rollingUpdate:
15 | maxUnavailable: 2
16 | maxSurge: 2
17 | template:
18 | metadata:
19 | labels:
20 | app: rolling
21 | spec:
22 | containers:
23 | - name: nginx
24 | image: nginx
25 | ports:
26 | - containerPort: 80
--------------------------------------------------------------------------------
/labs/tainttoleration/podtoleration.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: toleratedpod1
5 | labels:
6 | env: test
7 | spec:
8 | containers:
9 | - name: toleratedcontainer1
10 | image: nginx:latest
11 | tolerations:
12 | - key: "platform"
13 | operator: "Equal"
14 | value: "production"
15 | effect: "NoSchedule"
16 | ---
17 | apiVersion: v1
18 | kind: Pod
19 | metadata:
20 | name: toleratedpod2
21 | labels:
22 | env: test
23 | spec:
24 | containers:
25 | - name: toleratedcontainer2
26 | image: nginx
27 | tolerations:
28 | - key: "platform"
29 | operator: "Exists"
30 | effect: "NoSchedule"
--------------------------------------------------------------------------------
/labs/ingress/appingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: appingress
5 | annotations:
6 | nginx.ingress.kubernetes.io/rewrite-target: /$1
7 | spec:
8 | rules:
9 | - host: webapp.com
10 | http:
11 | paths:
12 | - path: /blue
13 | pathType: Prefix
14 | backend:
15 | service:
16 | name: bluesvc
17 | port:
18 | number: 80
19 | - path: /green
20 | pathType: Prefix
21 | backend:
22 | service:
23 | name: greensvc
24 | port:
25 | number: 80
--------------------------------------------------------------------------------
/labs/pod/multicontainer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: multicontainer
5 | spec:
6 | containers:
7 | - name: webcontainer
8 | image: nginx
9 | ports:
10 | - containerPort: 80
11 | volumeMounts:
12 | - name: sharedvolume
13 | mountPath: /usr/share/nginx/html
14 | - name: sidecarcontainer
15 | image: busybox
16 | command: ["/bin/sh"]
17 | args: ["-c", "while true; do wget -O /var/log/index.html https://raw.githubusercontent.com/omerbsezer/Fast-Kubernetes/main/index.html; sleep 15; done"]
18 | volumeMounts:
19 | - name: sharedvolume
20 | mountPath: /var/log
21 | volumes:
22 | - name: sharedvolume
23 | emptyDir: {}
--------------------------------------------------------------------------------
/labs/configmap/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: myconfigmap
5 | data:
6 | db_server: "db.example.com"
7 | database: "mydatabase"
8 | site.settings: |
9 | color=blue
10 | padding:25px
11 | ---
12 | apiVersion: v1
13 | kind: Pod
14 | metadata:
15 | name: configmappod
16 | spec:
17 | containers:
18 | - name: configmapcontainer
19 | image: nginx
20 | env:
21 | - name: DB_SERVER
22 | valueFrom:
23 | configMapKeyRef:
24 | name: myconfigmap
25 | key: db_server
26 | - name: DATABASE
27 | valueFrom:
28 | configMapKeyRef:
29 | name: myconfigmap
30 | key: database
31 | volumeMounts:
32 | - name: config-vol
33 | mountPath: "/config"
34 | readOnly: true
35 | volumes:
36 | - name: config-vol
37 | configMap:
38 | name: myconfigmap
--------------------------------------------------------------------------------
/labs/service/deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: frontend
5 | labels:
6 | team: development
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: frontend
12 | template:
13 | metadata:
14 | labels:
15 | app: frontend
16 | spec:
17 | containers:
18 | - name: frontend
19 | image: nginx:latest
20 | ports:
21 | - containerPort: 80
22 | ---
23 | apiVersion: apps/v1
24 | kind: Deployment
25 | metadata:
26 | name: backend
27 | labels:
28 | team: development
29 | spec:
30 | replicas: 3
31 | selector:
32 | matchLabels:
33 | app: backend
34 | template:
35 | metadata:
36 | labels:
37 | app: backend
38 | spec:
39 | containers:
40 | - name: backend
41 | image: ozgurozturknet/k8s:backend
42 | ports:
43 | - containerPort: 5000
--------------------------------------------------------------------------------
/labs/persistentvolume/deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: mysqlsecret
5 | type: Opaque
6 | stringData:
7 | password: P@ssw0rd!
8 | ---
9 | apiVersion: apps/v1
10 | kind: Deployment
11 | metadata:
12 | name: mysqldeployment
13 | labels:
14 | app: mysql
15 | spec:
16 | replicas: 1
17 | selector:
18 | matchLabels:
19 | app: mysql
20 | strategy:
21 | type: Recreate
22 | template:
23 | metadata:
24 | labels:
25 | app: mysql
26 | spec:
27 | containers:
28 | - name: mysql
29 | image: mysql
30 | ports:
31 | - containerPort: 3306
32 | volumeMounts:
33 | - mountPath: "/var/lib/mysql"
34 | name: mysqlvolume
35 | env:
36 | - name: MYSQL_ROOT_PASSWORD
37 | valueFrom:
38 | secretKeyRef:
39 | name: mysqlsecret
40 | key: password
41 | volumes:
42 | - name: mysqlvolume
43 | persistentVolumeClaim:
44 | claimName: mysqlclaim
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Omer Berat Sezer
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/labs/secret/secret-pods.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: secretvolumepod
5 | spec:
6 | containers:
7 | - name: secretcontainer
8 | image: nginx
9 | volumeMounts:
10 | - name: secret-vol
11 | mountPath: /secret
12 | volumes:
13 | - name: secret-vol
14 | secret:
15 | secretName: mysecret
16 | ---
17 | apiVersion: v1
18 | kind: Pod
19 | metadata:
20 | name: secretenvpod
21 | spec:
22 | containers:
23 | - name: secretcontainer
24 | image: nginx
25 | env:
26 | - name: username
27 | valueFrom:
28 | secretKeyRef:
29 | name: mysecret
30 | key: db_username
31 | - name: password
32 | valueFrom:
33 | secretKeyRef:
34 | name: mysecret
35 | key: db_password
36 | - name: server
37 | valueFrom:
38 | secretKeyRef:
39 | name: mysecret
40 | key: db_server
41 | ---
42 | apiVersion: v1
43 | kind: Pod
44 | metadata:
45 | name: secretenvallpod
46 | spec:
47 | containers:
48 | - name: secretcontainer
49 | image: nginx
50 | envFrom:
51 | - secretRef:
52 | name: mysecret
--------------------------------------------------------------------------------
/labs/daemonset/daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: logdaemonset
5 | labels:
6 | app: fluentd-logging
7 | spec:
8 | selector:
9 | matchLabels:
10 | name: fluentd-elasticsearch
11 | template:
12 | metadata:
13 | labels:
14 | name: fluentd-elasticsearch
15 | spec:
16 | tolerations:
17 | # this toleration is to have the daemonset runnable on master nodes
18 | # remove it if your masters can't run pods
19 | - key: node-role.kubernetes.io/master
20 | effect: NoSchedule
21 | containers:
22 | - name: fluentd-elasticsearch
23 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
24 | resources:
25 | limits:
26 | memory: 200Mi
27 | requests:
28 | cpu: 100m
29 | memory: 200Mi
30 | volumeMounts:
31 | - name: varlog
32 | mountPath: /var/log
33 | - name: varlibdockercontainers
34 | mountPath: /var/lib/docker/containers
35 | readOnly: true
36 | terminationGracePeriodSeconds: 30
37 | volumes:
38 | - name: varlog
39 | hostPath:
40 | path: /var/log
41 | - name: varlibdockercontainers
42 | hostPath:
43 | path: /var/lib/docker/containers
--------------------------------------------------------------------------------
/labs/liveness/liveness.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | test: liveness
6 | name: liveness-http
7 | spec:
8 | containers:
9 | - name: liveness
10 | image: k8s.gcr.io/liveness
11 | args:
12 | - /server
13 | livenessProbe:
14 | httpGet:
15 | path: /healthz
16 | port: 8080
17 | httpHeaders:
18 | - name: Custom-Header
19 | value: Awesome
20 | initialDelaySeconds: 3
21 | periodSeconds: 3
22 | ---
23 | apiVersion: v1
24 | kind: Pod
25 | metadata:
26 | labels:
27 | test: liveness
28 | name: liveness-exec
29 | spec:
30 | containers:
31 | - name: liveness
32 | image: k8s.gcr.io/busybox
33 | args:
34 | - /bin/sh
35 | - -c
36 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
37 | livenessProbe:
38 | exec:
39 | command:
40 | - cat
41 | - /tmp/healthy
42 | initialDelaySeconds: 5
43 | periodSeconds: 5
44 | ---
45 | apiVersion: v1
46 | kind: Pod
47 | metadata:
48 | name: goproxy
49 | labels:
50 | app: goproxy
51 | spec:
52 | containers:
53 | - name: goproxy
54 | image: k8s.gcr.io/goproxy:0.1
55 | ports:
56 | - containerPort: 8080
57 | livenessProbe:
58 | tcpSocket:
59 | port: 8080
60 | initialDelaySeconds: 15
61 | periodSeconds: 20
--------------------------------------------------------------------------------
/K8s-CronJob.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Cron Job
2 |
3 | This scenario shows how K8s Cron job object works on minikube
4 |
5 | ### Steps
6 |
7 | - Copy and save (below) as file on your PC (cronjob.yaml).
8 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/cronjob/cronjob.yaml
9 |
10 | ```
11 | apiVersion: batch/v1
12 | kind: CronJob
13 | metadata:
14 | name: hello
15 | spec:
16 | schedule: "*/1 * * * *"
17 | jobTemplate:
18 | spec:
19 | template:
20 | spec:
21 | containers:
22 | - name: hello
23 | image: busybox
24 | imagePullPolicy: IfNotPresent
25 | command:
26 | - /bin/sh
27 | - -c
28 | - date; echo Hello from the Kubernetes cluster
29 | restartPolicy: OnFailure
30 | ```
31 |
32 | 
33 |
34 |
35 | - Create Cron Job:
36 |
37 | 
38 |
39 | - Watch pods' status:
40 |
41 | 
42 |
43 | - Watch job's status:
44 |
45 | 
46 |
47 | - Delete job:
48 |
49 | 
50 |
--------------------------------------------------------------------------------
/labs/affinity/podnodeaffinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nodeaffinitypod1
5 | spec:
6 | containers:
7 | - name: nodeaffinity1
8 | image: nginx:latest
9 | affinity:
10 | nodeAffinity:
11 | requiredDuringSchedulingIgnoredDuringExecution:
12 | nodeSelectorTerms:
13 | - matchExpressions:
14 | - key: app
15 | operator: In #In, NotIn, Exists, DoesNotExist
16 | values:
17 | - production
18 | ---
19 | apiVersion: v1
20 | kind: Pod
21 | metadata:
22 | name: nodeaffinitypod2
23 | spec:
24 | containers:
25 | - name: nodeaffinity2
26 | image: nginx:latest
27 | affinity:
28 | nodeAffinity:
29 | preferredDuringSchedulingIgnoredDuringExecution:
30 | - weight: 1
31 | preference:
32 | matchExpressions:
33 | - key: app
34 | operator: In
35 | values:
36 | - production
37 | - weight: 2
38 | preference:
39 | matchExpressions:
40 | - key: app
41 | operator: In
42 | values:
43 | - test
44 | ---
45 | apiVersion: v1
46 | kind: Pod
47 | metadata:
48 | name: nodeaffinitypod3
49 | spec:
50 | containers:
51 | - name: nodeaffinity3
52 | image: nginx:latest
53 | affinity:
54 | nodeAffinity:
55 | requiredDuringSchedulingIgnoredDuringExecution:
56 | nodeSelectorTerms:
57 | - matchExpressions:
58 | - key: app
59 | operator: Exists #In, NotIn, Exists, DoesNotExist
--------------------------------------------------------------------------------
/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install2.ps1:
--------------------------------------------------------------------------------
1 | echo "#########################################################"
2 | echo "Before to run this script, please be sure creating 'k' directory under C directory (c:\k) and includes K8s config file..."
3 | echo "e.g. mkdir c:\k"
4 | echo "e.g. run on the master node: scp -r /home/ubuntu/.kube/config windowsuser@IP:C:\k"
5 | echo "#########################################################"
6 | echo "Script will start in 10 Seconds..."
7 | Start-Sleep -s 10
8 |
9 | echo "Installing remote access..."
10 | Install-RemoteAccess -VpnType RoutingOnly
11 | Set-Service -Name RemoteAccess -StartupType 'Automatic'
12 | start-service RemoteAccess
13 |
14 | echo "Installing Calico, Waiting 10 Seconds..."
15 | Start-Sleep -s 10
16 | Invoke-WebRequest https://github.com/projectcalico/calico/releases/download/v3.25.0/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1
17 | c:\install-calico-windows.ps1 -DownloadOnly yes -KubeVersion 1.23.5
18 | Get-Service -Name CalicoNode
19 | Get-Service -Name CalicoFelix
20 |
21 | echo "Installing Kubelet Service, Waiting 10 Seconds..."
22 | Start-Sleep -s 10
23 | C:\CalicoWindows\kubernetes\install-kube-services.ps1
24 | Start-Service -Name kubelet
25 | Start-Service -Name kube-proxy
26 |
27 | echo "Testing kubectl..."
28 | kubectl get nodes -o wide
29 |
30 | echo "#########################################################"
31 | echo "Congrulations, kubernetes installed on Win..."
32 | echo "Calico Ref: https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/kubernetes/standard"
33 | echo "#########################################################"
34 |
--------------------------------------------------------------------------------
/K8s-Job.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Job
2 |
3 | This scenario shows how K8s job object works on minikube
4 |
5 | ### Steps
6 |
7 | - Copy and save (below) as file on your PC (job.yaml).
8 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/job/job.yaml
9 |
10 | ```
11 | apiVersion: batch/v1
12 | kind: Job
13 | metadata:
14 | name: pi
15 | spec:
16 | parallelism: 2 # each step how many pods start in parallel at a time
17 | completions: 10 # number of pods that run and complete job at the end of the time
18 | backoffLimit: 5 # to tolerate fail number of job, after 5 times of failure, not try to continue job, fail the job
19 | activeDeadlineSeconds: 100 # if this job is not completed in 100 seconds, fail the job
20 | template:
21 | spec:
22 | containers:
23 | - name: pi
24 | image: perl # image is perl from docker
25 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] # it calculates the first 2000 digits of pi number
26 | restartPolicy: Never
27 | ```
28 |
29 | 
30 |
31 | - Create job:
32 |
33 | 
34 |
35 | - Watch pods' status:
36 |
37 | 
38 |
39 | - Watch job's status:
40 |
41 | 
42 |
43 | - After pods' completion, we can see the logs of each pods. Pods are not deleted after the completion of task on each pod.
44 |
45 | 
46 |
47 | - Delete job:
48 |
49 | 
50 |
--------------------------------------------------------------------------------
/create_real_cluster/win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install1.ps1:
--------------------------------------------------------------------------------
1 | echo "#########################################################"
2 | echo "Script will start in 10 Seconds..."
3 | Start-Sleep -s 10
4 |
5 | echo "Firewall rules : Allow All Traffic..."
6 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction OutBound -Action Allow
7 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction InBound -Action Allow
8 |
9 | echo "Installing Containers..."
10 | Install-WindowsFeature -Name containers
11 |
12 | echo "Installing Containerd, Waiting 10 Seconds..."
13 | Start-Sleep -s 10
14 | Invoke-WebRequest -UseBasicParsing "https://raw.githubusercontent.com/microsoft/Windows-Containers/Main/helpful_tools/Install-ContainerdRuntime/install-containerd-runtime.ps1" -o install-containerd-runtime.ps1
15 | .\install-containerd-runtime.ps1
16 |
17 | echo "Setting Service Containerd, Waiting 20 Seconds..."
18 | Start-Sleep -s 20
19 | Set-Service -Name containerd -StartupType 'Automatic'
20 |
21 | echo "Installing additional Windows networking components: RemoteAccess, RSAT-RemoteAccess-PowerShell, Routing, Waiting 10 Seconds..."
22 | Start-Sleep -s 10
23 | Install-WindowsFeature RemoteAccess
24 |
25 | echo "Installing RSAT-RemoteAccess-PowerShell, Waiting 10 Seconds..."
26 | Start-Sleep -s 10
27 | Install-WindowsFeature RSAT-RemoteAccess-PowerShell
28 |
29 | echo "Installing Routing, Waiting 10 Seconds..."
30 | Start-Sleep -s 10
31 | Install-WindowsFeature Routing
32 |
33 | echo "#########################################################"
34 | echo "Docker and network components are installed..."
35 | echo "After Restart, please run INSTALL2.ps1..."
36 | echo "Before to run this install2.ps1, please be sure creating 'k' directory under C directory (c:\k) and includes K8s config file..."
37 | echo "e.g. mkdir c:\k"
38 | echo "e.g. run on the master node: scp -r /home/ubuntu/.kube/config windowsuser@IP:C:\k"
39 | echo "e.g. or copy the config file content, and paste it on Windows c:\k"
40 | echo "#########################################################"
41 | echo "Computer will be restarted in 10 Seconds..."
42 | Start-Sleep -s 10
43 | Restart-Computer -Force
44 |
--------------------------------------------------------------------------------
/K8-CreatingPod-Declerative.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Creating Pod - Declarative Way (With Yaml File)
2 |
3 | This scenario shows:
4 | - how to create basic K8s pod using yaml file,
5 | - how to get more information about pod (to solve troubleshooting),
6 |
7 |
8 | ### Steps
9 |
10 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start")
11 |
12 | 
13 |
14 | - Create Yaml file (pod1.yaml) in your directory and copy the below definition into the file:
15 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/pod/pod1.yaml
16 |
17 | ```
18 | apiVersion: v1
19 | kind: Pod # type of K8s object: Pod
20 | metadata:
21 | name: firstpod # name of pod
22 | labels:
23 | app: frontend # label pod with "app:frontend"
24 | spec:
25 | containers:
26 | - name: nginx
27 | image: nginx:latest # image name:image version, nginx downloads from DockerHub
28 | ports:
29 | - containerPort: 80 # open ports in the container
30 | env: # environment variables
31 | - name: USER
32 | value: "username"
33 | ```
34 |
35 | 
36 |
37 |
38 | - Apply/run the file to create pod in declerative way ("kubectl apply -f pod1.yaml"):
39 |
40 | 
41 |
42 | - Describe firstpod ("kubectl describe pods firstpod"):
43 |
44 | 
45 |
46 | - Delete pod and get all pods in the default namepace ("kubectl delete -f pod1.yaml"):
47 |
48 | 
49 |
50 | - If you want to delete minikube ("minikube delete"):
51 |
52 | 
53 |
54 |
--------------------------------------------------------------------------------
/create_real_cluster/win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install2.ps1:
--------------------------------------------------------------------------------
1 | echo "#########################################################"
2 | echo "Before to run this script, please be sure creating 'k' directory under C directory (c:\k) and includes K8s config file..."
3 | echo "e.g. mkdir c:\k"
4 | echo "e.g. run on the master node: scp -r /home/ubuntu/.kube/config windowsuser@IP:C:\k"
5 | echo "#########################################################"
6 | echo "Script will start in 10 Seconds..."
7 | Start-Sleep -s 10
8 |
9 | echo "Installing remote access..."
10 | Install-RemoteAccess -VpnType RoutingOnly
11 | Set-Service -Name RemoteAccess -StartupType 'Automatic'
12 | start-service RemoteAccess
13 |
14 | echo "Installing Calico, Waiting 10 Seconds..."
15 | Start-Sleep -s 10
16 | Invoke-WebRequest -Uri https://github.com/projectcalico/calico/releases/download/v3.29.2/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1
17 | c:\install-calico-windows.ps1 -ReleaseBaseURL "https://github.com/projectcalico/calico/releases/download/v3.29.2" -ReleaseFile "calico-windows-v3.29.2.zip" -KubeVersion "1.32.0" -DownloadOnly "yes" -ServiceCidr "10.96.0.0/24" -DNSServerIPs "127.0.0.1"
18 |
19 | $ENV:CNI_BIN_DIR="c:\program files\containerd\cni\bin"
20 | $ENV:CNI_CONF_DIR="c:\program files\containerd\cni\conf"
21 | c:\calicowindows\install-calico.ps1
22 | c:\calicowindows\start-calico.ps1
23 |
24 | echo "Installing Kubelet Service, Waiting 10 Seconds..."
25 | Start-Sleep -s 10
26 | c:\calicowindows\kubernetes\install-kube-services.ps1
27 | New-NetFirewallRule -Name 'Kubelet-In-TCP' -DisplayName 'Kubelet (node)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 10250
28 | Start-Service -Name kubelet
29 | Start-Service -Name kube-proxy
30 |
31 | echo "Testing kubectl..."
32 | kubectl get nodes -o wide
33 |
34 |
35 | echo "#########################################################"
36 | echo "Congrulations, kubernetes installed on Win..."
37 | echo "Calico Ref: https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/kubernetes/standard"
38 | echo "#########################################################"
39 | # ref: https://medium.com/@lubomir-tobek/kubernetes-cluster-and-adding-a-windows-worker-node-0a5b65bffbaa
40 |
--------------------------------------------------------------------------------
/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install1.ps1:
--------------------------------------------------------------------------------
1 | echo "#########################################################"
2 | echo "Script will start in 10 Seconds..."
3 | Start-Sleep -s 10
4 |
5 | echo "Firewall rules : Allow All Traffic..."
6 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction OutBound -Action Allow
7 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction InBound -Action Allow
8 |
9 | echo "Installing Docker Container..."
10 | Install-WindowsFeature -Name containers
11 |
12 | # DockerMsftProvider Depreciated!! instead of it, using install-docker-ce.ps1 from Microsoft to install on Windows Servers
13 | #echo "Installing DockerMsftProvider, Waiting 15 Seconds..."
14 | #Start-Sleep -s 15
15 | #Install-Module DockerMsftProvider -Force
16 | #Install-Package Docker -ProviderName DockerMsftProvider -Force
17 |
18 | echo "Installing Docker, Waiting 10 Seconds..."
19 | Start-Sleep -s 10
20 | .\install-docker-ce.ps1
21 |
22 | echo "Setting Service Docker, Waiting 20 Seconds..."
23 | Start-Sleep -s 20
24 | Set-Service -Name docker -StartupType 'Automatic'
25 |
26 | echo "Installing additional Windows networking components: RemoteAccess, RSAT-RemoteAccess-PowerShell, Routing, Waiting 10 Seconds..."
27 | Start-Sleep -s 10
28 | Install-WindowsFeature RemoteAccess
29 |
30 | echo "Installing RSAT-RemoteAccess-PowerShell, Waiting 10 Seconds..."
31 | Start-Sleep -s 10
32 | Install-WindowsFeature RSAT-RemoteAccess-PowerShell
33 |
34 | echo "Installing Routing, Waiting 10 Seconds..."
35 | Start-Sleep -s 10
36 | Install-WindowsFeature Routing
37 |
38 | echo "#########################################################"
39 | echo "Docker and network components are installed..."
40 | echo "After Restart, please run INSTALL2.ps1..."
41 | echo "Before to run this install2.ps1, please be sure creating 'k' directory under C directory (c:\k) and includes K8s config file..."
42 | echo "e.g. mkdir c:\k"
43 | echo "e.g. run on the master node: scp -r /home/ubuntu/.kube/config windowsuser@IP:C:\k"
44 | echo "e.g. or copy the config file content, and paste it on Windows c:\k"
45 | echo "#########################################################"
46 | echo "Computer will be restarted in 10 Seconds..."
47 | Start-Sleep -s 10
48 | Restart-Computer -Force
49 |
--------------------------------------------------------------------------------
/K8s-CreatingPod-Imperative.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Creating Pod - Imperative Way
2 |
3 | This scenario shows:
4 | - how to create basic K8s pod using imperative commands,
5 | - how to get more information about pod (to solve troubleshooting),
6 | - how to run commands in pod,
7 | - how to delete pod.
8 |
9 |
10 |
11 | ### Steps
12 |
13 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04)
14 |
15 | 
16 |
17 | - Run pod in imperative way
18 | - "kubectl run **podName** --image=**imageName**"
19 | - "kubectl get pods -o wide" : get info about pods
20 |
21 | 
22 |
23 | - Describe pod to get mor information about pods (when encountered troubleshooting):
24 |
25 | 
26 |
27 | - To reach logs in the pod (when encountered troubleshooting):
28 |
29 | 
30 |
31 | - To reach logs in the pod 2ith "-f" (LIVE Logs, attach to the pod's log):
32 |
33 | 
34 |
35 | - Run command on pod ("kubectl exec **podName** -- **command**"):
36 |
37 | 
38 |
39 | - Entering into the pod and running bash or sh on pod:
40 | - "kubectl exec -it **podName** -- bash"
41 | - "kubectl exec -it **podName** -- /bins/sh"
42 | - exit from pods 2 ways:
43 | - "exit" command
44 | - "CTRL+P+Q"
45 |
46 | 
47 |
48 | - Delete pod:
49 |
50 | 
51 |
52 | - Imperative way could be difficult to store and manage process. Every time we have to enter commands. To prevent this, we can use YAML file to define pods and pods' feature. This way is called Declerative Way.
53 |
54 |
--------------------------------------------------------------------------------
/K8s-Enable-Dashboard-On-Cluster.md:
--------------------------------------------------------------------------------
1 | ## LAB: Enable Dashboard on Cluster
2 |
3 |
4 | ### K8s Cluster (with Multipass VM)
5 | - K8s cluster was created before:
6 | - **Goto:** [K8s Kubeadm Cluster Setup](https://github.com/omerbsezer/Fast-Kubernetes/blob/main/K8s-Kubeadm-Cluster-Setup.md)
7 |
8 | ### Enable Dashboard on Cluster
9 |
10 | - To enable dashboard on cluster, apply yaml file (https://github.com/kubernetes/dashboard)
11 | ```
12 | kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml
13 | Kubectl proxy
14 | on browser: http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/
15 | ```
16 |
17 | 
18 |
19 | - Now we should find to token to enter dashboard as admin user.
20 |
21 | ```
22 | kubectl create serviceaccount dashboard-admin-sa
23 | kubectl create clusterrolebinding dashboard-admin-sa --clusterrole=cluster-admin --serviceaccount=default:dashboard-admin-sa
24 | kubectl get secrets
25 | kubectl describe secret dashboard-admin-sa-token-m84l5 # token name change, pls find it using "kubectl get secrets"
26 | ```
27 |
28 | 
29 |
30 | 
31 |
32 | 
33 |
34 | - Enter Token that is grabbed before:
35 |
36 | 
37 |
38 | - Now we reached the dashboard:
39 |
40 | 
41 |
42 | ### Enable Dashboard on Minikube
43 |
44 | - Minikube has addons to enable dashboard:
45 |
46 | ```
47 | minikube addons enable dashboard
48 | minikube addons enable metrics-server
49 | minikube dashboard
50 | # if running on WSL/WSL2 to open browser
51 | sensible-browser http://127.0.0.1:45771/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
52 | ```
53 |
54 | 
55 |
56 | ### Reference
57 |
58 | - https://www.replex.io/blog/how-to-install-access-and-add-heapster-metrics-to-the-kubernetes-dashboard
59 | - https://github.com/kubernetes/dashboard
60 |
--------------------------------------------------------------------------------
/labs/statefulset/statefulset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: cassandra
6 | name: cassandra
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 9042
11 | selector:
12 | app: cassandra
13 | ---
14 | apiVersion: apps/v1
15 | kind: StatefulSet
16 | metadata:
17 | name: cassandra
18 | labels:
19 | app: cassandra
20 | spec:
21 | serviceName: cassandra
22 | replicas: 2
23 | selector:
24 | matchLabels:
25 | app: cassandra
26 | template:
27 | metadata:
28 | labels:
29 | app: cassandra
30 | spec:
31 | terminationGracePeriodSeconds: 1800
32 | containers:
33 | - name: cassandra
34 | image: gcr.io/google-samples/cassandra:v13
35 | imagePullPolicy: Always
36 | ports:
37 | - containerPort: 7000
38 | name: intra-node
39 | - containerPort: 7001
40 | name: tls-intra-node
41 | - containerPort: 7199
42 | name: jmx
43 | - containerPort: 9042
44 | name: cql
45 | resources:
46 | limits:
47 | cpu: "500m"
48 | memory: 1Gi
49 | requests:
50 | cpu: "500m"
51 | memory: 1Gi
52 | securityContext:
53 | capabilities:
54 | add:
55 | - IPC_LOCK
56 | lifecycle:
57 | preStop:
58 | exec:
59 | command:
60 | - /bin/sh
61 | - -c
62 | - nodetool drain
63 | env:
64 | - name: MAX_HEAP_SIZE
65 | value: 512M
66 | - name: HEAP_NEWSIZE
67 | value: 100M
68 | - name: CASSANDRA_SEEDS
69 | value: "cassandra-0.cassandra.default.svc.cluster.local"
70 | - name: CASSANDRA_CLUSTER_NAME
71 | value: "K8Demo"
72 | - name: CASSANDRA_DC
73 | value: "DC1-K8Demo"
74 | - name: CASSANDRA_RACK
75 | value: "Rack1-K8Demo"
76 | - name: POD_IP
77 | valueFrom:
78 | fieldRef:
79 | fieldPath: status.podIP
80 | readinessProbe:
81 | exec:
82 | command:
83 | - /bin/bash
84 | - -c
85 | - /ready-probe.sh
86 | initialDelaySeconds: 15
87 | timeoutSeconds: 5
88 | volumeMounts:
89 | - name: cassandra-data
90 | mountPath: /cassandra_data
91 | volumeClaimTemplates:
92 | - metadata:
93 | name: cassandra-data
94 | spec:
95 | accessModes: [ "ReadWriteOnce" ]
96 | storageClassName: standard
97 | resources:
98 | requests:
99 | storage: 1Gi
--------------------------------------------------------------------------------
/create_real_cluster/ubuntu20.04-kubeadm1.26.2-calico3.25.0-containerd1.6.10/master.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ## this script is to create K8s Master,
3 | ## usage => master.sh
4 |
5 | set -e -o pipefail # fail on error , debug all lines
6 |
7 | echo "Initiating K8s Cluster..."
8 | sudo kubeadm init --pod-network-cidr=172.24.0.0/16 --apiserver-advertise-address=$1 --control-plane-endpoint=$1
9 | mkdir -p $HOME/.kube
10 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
11 | sudo chown $(id -u):$(id -g) $HOME/.kube/config
12 |
13 | echo "Install calico..."
14 | curl https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml > calico.yaml
15 | kubectl apply -f ./calico.yaml
16 |
17 | echo "Install calicoctl..."
18 | sudo curl -o /usr/local/bin/calicoctl -O -L "https://github.com/projectcalico/calico/releases/download/v3.25.0/calicoctl-linux-amd64"
19 | sudo chmod +x /usr/local/bin/calicoctl
20 |
21 | echo "Disable IPinIP..."
22 | echo "Waiting 40sec..."
23 | sleep 40
24 | calicoctl get ipPool 'default-ipv4-ippool' -o yaml
25 | calicoctl get ipPool 'default-ipv4-ippool' -o yaml > ippool.yaml
26 | sed -i 's/Always/Never/g' ippool.yaml
27 | calicoctl apply -f ippool.yaml
28 |
29 | echo "Configure felixconfig..."
30 | echo "Waiting 5sec..."
31 | sleep 5
32 | kubectl get felixconfigurations.crd.projectcalico.org default -o yaml -n kube-system > felixconfig.yaml
33 | sed -i 's/true/false/g' felixconfig.yaml
34 | kubectl apply -f felixconfig.yaml
35 |
36 | calicoctl ipam configure --strictaffinity=true
37 | sleep 2
38 | echo ""
39 | echo "*******"
40 | echo "*** Please REBOOT/RESTART the PC now..."
41 | echo "*** After restart run on this Master node: kubeadm token create --print-join-command"
42 | echo "*** After restart if you encounter error (not to reach cluster, or API), please run closing swap commands again:"
43 | echo "*** sudo swapoff -a"
44 | echo "*** sudo sed -i '/ swap / s/^/#/' /etc/fstab"
45 | echo "*** Copy and Paste the response into the each WORKER Node with SUDO command..."
46 | kubeadm token create --print-join-command
47 | echo ""
48 | echo "*** K8s Master Node is now up and the cluster is created..."
49 | echo "*******"
50 | kubectl cluster-info
51 | kubectl get nodes -o wide
52 | #sudo reboot
53 |
54 | # https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/kubeconfig
55 | echo "*******"
56 | echo "*** Calico-Node secret will be created for Windows Calico..."
57 | echo "*******"
58 | kubectl apply -f - < master.sh
4 |
5 | set -e -o pipefail # fail on error , debug all lines
6 |
7 | echo "Initiating K8s Cluster..."
8 | sudo kubeadm init --pod-network-cidr=172.24.0.0/16 --apiserver-advertise-address=$1 --control-plane-endpoint=$1
9 | mkdir -p $HOME/.kube
10 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
11 | sudo chown $(id -u):$(id -g) $HOME/.kube/config
12 |
13 | echo "Install calico..."
14 | curl https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/calico.yaml > calico.yaml
15 | kubectl apply -f ./calico.yaml
16 |
17 | echo "Install calicoctl..."
18 | sudo curl -o /usr/local/bin/calicoctl -O -L "https://github.com/projectcalico/calico/releases/download/v3.29.1/calicoctl-linux-amd64"
19 | sudo chmod +x /usr/local/bin/calicoctl
20 |
21 | echo "Disable IPinIP..."
22 | echo "Waiting 40sec..."
23 | sleep 40
24 | calicoctl get ipPool 'default-ipv4-ippool' -o yaml
25 | calicoctl get ipPool 'default-ipv4-ippool' -o yaml > ippool.yaml
26 | sed -i 's/Always/Never/g' ippool.yaml
27 | calicoctl apply -f ippool.yaml
28 |
29 | echo "Configure felixconfig..."
30 | echo "Waiting 5sec..."
31 | sleep 5
32 | kubectl get felixconfigurations.crd.projectcalico.org default -o yaml -n kube-system > felixconfig.yaml
33 | sed -i 's/true/false/g' felixconfig.yaml
34 | kubectl apply -f felixconfig.yaml
35 |
36 | calicoctl ipam configure --strictaffinity=true
37 | sleep 2
38 | echo ""
39 | echo "*******"
40 | echo "*** Please REBOOT/RESTART the PC now..."
41 | echo "*** After restart run on this Master node: kubeadm token create --print-join-command"
42 | echo "*** After restart if you encounter error (not to reach cluster, or API), please run closing swap commands again:"
43 | echo "*** sudo swapoff -a"
44 | echo "*** sudo sed -i '/ swap / s/^/#/' /etc/fstab"
45 | echo "*** Copy and Paste the response into the each WORKER Node with SUDO command..."
46 | kubeadm token create --print-join-command
47 | echo ""
48 | echo "*** K8s Master Node is now up and the cluster is created..."
49 | echo "*******"
50 | kubectl cluster-info
51 | kubectl get nodes -o wide
52 | #sudo reboot
53 |
54 | # https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/kubeconfig
55 | echo "*******"
56 | echo "*** Calico-Node secret will be created for Windows Calico..."
57 | echo "*******"
58 | kubectl apply -f - < ./install.sh
5 |
6 | set -e -o pipefail # fail on error , debug all lines
7 |
8 | sudo apt-get update
9 | sudo apt-get upgrade -y
10 |
11 | echo "Configuring k8s.conf..."
12 | cat < /dev/null
65 | sudo apt-get update
66 | sudo apt-get install -y docker-ce docker-ce-cli containerd.io
67 |
68 | cd /etc/docker
69 | sudo touch /etc/docker/daemon.json
70 |
71 | echo "Configuring docker daemon.json..."
72 | cat < master.sh "
88 | echo "*** If you are installing worker node, on master node: kubeadm token create --print-join-command"
89 | echo "*** Copy and Paste the response into the each WORKER Node with SUDO command..."
90 | echo "*******"
91 |
--------------------------------------------------------------------------------
/create_real_cluster/ubuntu24.04-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install-ubuntu24.04-k8s1.32.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ## this script is to install K8s dependency
3 | ## before using: chmod 777 install.sh
4 | ## usage => ./install.sh
5 |
6 | set -e -o pipefail # fail on error , debug all lines
7 |
8 | sudo apt-get update
9 | sudo apt-get upgrade -y
10 |
11 | echo "Configuring k8s.conf..."
12 | cat < /dev/null
71 | sudo apt-get update
72 | sudo apt-get install -y docker-ce docker-ce-cli containerd.io
73 |
74 | cd /etc/docker
75 | sudo touch /etc/docker/daemon.json
76 |
77 | echo "Configuring docker daemon.json..."
78 | cat < master.sh "
94 | echo "*** If you are installing worker node, on master node: kubeadm token create --print-join-command"
95 | echo "*** Copy and Paste the response into the each WORKER Node with SUDO command..."
96 | echo "*******"
97 |
--------------------------------------------------------------------------------
/K8s-Liveness-App.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Liveness Probe
2 |
3 | This scenario shows how the liveness probe works.
4 |
5 | ### Steps
6 |
7 | - Create 3 Pods with following YAML file (liveness.yaml):
8 | - In the first pod (e.g. web app), it sends HTTP Get Request to "http://localhost/healthz:8080" (port 8080)
9 | - If returns 400 > HTTP Code > 200, this Pod works correctly.
10 | - If returns HTTP Code > = 400, this Pod does not work properly.
11 | - initialDelaySeconds:3 => after 3 seconds, start liveness probe.
12 | - periodSecond: 3 => Wait 3 seconds between each request.
13 | - In the second pod (e.g. console app), it controls whether a file ("healty") exists or not under specific directory ("/tmp/") with "cat" app.
14 | - If returns 0 code, this Pod works correctly.
15 | - If returns different code except for 0 code, this Pod does not work properly.
16 | - initialDelaySeconds: 5 => after 5 seconds, start liveness probe.
17 | - periodSecond: 5 => Wait 5 seconds between each request.
18 | - In the third pod (e.g. database app: mysql), it sends request over TCP Socket.
19 | - If returns positive response, this Pod works correctly.
20 | - If returns negative response (e.g. connection refuse), this Pod does not work properly.
21 | - initialDelaySeconds: 15 => after 15 seconds, start liveness probe.
22 | - periodSecond: 20 => Wait 20 seconds between each request.
23 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/liveness/liveness.yaml
24 |
25 | ```
26 | apiVersion: v1
27 | kind: Pod
28 | metadata:
29 | labels:
30 | test: liveness
31 | name: liveness-http
32 | spec:
33 | containers:
34 | - name: liveness
35 | image: k8s.gcr.io/liveness
36 | args:
37 | - /server
38 | livenessProbe:
39 | httpGet:
40 | path: /healthz
41 | port: 8080
42 | httpHeaders:
43 | - name: Custom-Header
44 | value: Awesome
45 | initialDelaySeconds: 3
46 | periodSeconds: 3
47 | ---
48 | apiVersion: v1
49 | kind: Pod
50 | metadata:
51 | labels:
52 | test: liveness
53 | name: liveness-exec
54 | spec:
55 | containers:
56 | - name: liveness
57 | image: k8s.gcr.io/busybox
58 | args:
59 | - /bin/sh
60 | - -c
61 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
62 | livenessProbe:
63 | exec:
64 | command:
65 | - cat
66 | - /tmp/healthy
67 | initialDelaySeconds: 5
68 | periodSeconds: 5
69 | ---
70 | apiVersion: v1
71 | kind: Pod
72 | metadata:
73 | name: goproxy
74 | labels:
75 | app: goproxy
76 | spec:
77 | containers:
78 | - name: goproxy
79 | image: k8s.gcr.io/goproxy:0.1
80 | ports:
81 | - containerPort: 8080
82 | livenessProbe:
83 | tcpSocket:
84 | port: 8080
85 | initialDelaySeconds: 15
86 | periodSeconds: 20
87 | ```
88 |
89 | 
90 |
91 | 
92 |
93 | 
94 |
95 | - Run on terminal: kubectl apply -f liveness.yaml
96 | - Run on another terminal: kubectl get pods -o wide --all-namespaces
97 |
98 | 
99 |
100 | - Run to see details of liveness-http pod: kubectl describe pod liveness-http
101 |
102 | 
103 |
--------------------------------------------------------------------------------
/K8s-Deployment.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Deployment - Scale Up/Down - Bash Connection - Port Forwarding
2 |
3 | This scenario shows:
4 | - how to create deployment,
5 | - how to get detail information of deployment and pods,
6 | - how to scale up and down of deployment,
7 | - how to connect to the one of the pods with bash,
8 | - how to show ethernet interfaces of the pod and ping other pods,
9 | - how to forward ports to see nginx server page using browser.
10 |
11 | ### Steps
12 |
13 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start")
14 |
15 | 
16 |
17 | - Create Yaml file (deployment1.yaml) in your directory and copy the below definition into the file.
18 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/deployment/deployment1.yaml
19 |
20 | ```
21 | apiVersion: apps/v1
22 | kind: Deployment
23 | metadata:
24 | name: firstdeployment
25 | labels:
26 | team: development
27 | spec:
28 | replicas: 3
29 | selector: # deployment selector
30 | matchLabels: # deployment selects "app:frontend" pods, monitors and traces these pods
31 | app: frontend # if one of the pod is killed, K8s looks at the desire state (replica:3), it recreats another pods to protect number of replicas
32 | template:
33 | metadata:
34 | labels: # pod labels, if the deployment selector is same with these labels, deployment follows pods that have these labels
35 | app: frontend # key: value
36 | spec:
37 | containers:
38 | - name: nginx
39 | image: nginx:latest # image download from DockerHub
40 | ports:
41 | - containerPort: 80 # open following ports
42 | ```
43 |
44 | 
45 |
46 |
47 | - Create deployment and list the deployment's pods:
48 |
49 | 
50 |
51 | - Delete one of the pod, then K8s automatically creates new pod:
52 |
53 | 
54 |
55 | - Scale up to 5 replicas:
56 |
57 | 
58 |
59 | - Scale down to 3 replicas:
60 |
61 | 
62 |
63 | - Get more information about pods (ip, node):
64 |
65 | 
66 |
67 |
68 | - Connect one of the pod with bash:
69 |
70 | 
71 |
72 | - To install ifconfig, run: "apt update", "apt install net-tools"
73 | - To install ping, run: "apt install iputils-ping"
74 | - Show ethernet interfaces:
75 |
76 | 
77 |
78 | - Ping other pods:
79 |
80 | 
81 |
82 | - Port-forward from one of the pod to host (8085:80):
83 |
84 | 
85 |
86 | - On the browser, goto http://127.0.0.1:8085/
87 |
88 | 
89 |
90 | - Delete deployment:
91 |
92 | 
93 |
94 |
--------------------------------------------------------------------------------
/K8s-Daemon-Sets.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Daemon Sets
2 |
3 | This scenario shows how K8s Daemonsets work on minikube by adding new nodes
4 |
5 | ### Steps
6 |
7 | - Copy and save (below) as file on your PC (daemonset.yaml).
8 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/daemonset/daemonset.yaml
9 |
10 | ```
11 | apiVersion: apps/v1
12 | kind: DaemonSet
13 | metadata:
14 | name: logdaemonset
15 | labels:
16 | app: fluentd-logging
17 | spec:
18 | selector:
19 | matchLabels: # label selector should be same labels in the template (template > metadata > labels)
20 | name: fluentd-elasticsearch
21 | template:
22 | metadata:
23 | labels:
24 | name: fluentd-elasticsearch
25 | spec:
26 | tolerations:
27 | - key: node-role.kubernetes.io/master # this toleration is to have the daemonset runnable on master nodes
28 | effect: NoSchedule # remove it if your masters can't run pods
29 | containers:
30 | - name: fluentd-elasticsearch
31 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 # installing fluentd elasticsearch on each nodes
32 | resources:
33 | limits:
34 | memory: 200Mi # resource limitations configured
35 | requests:
36 | cpu: 100m
37 | memory: 200Mi
38 | volumeMounts: # definition of volumeMounts for each pod
39 | - name: varlog
40 | mountPath: /var/log
41 | - name: varlibdockercontainers
42 | mountPath: /var/lib/docker/containers
43 | readOnly: true
44 | terminationGracePeriodSeconds: 30
45 | volumes: # ephemerial volumes on node (hostpath defined)
46 | - name: varlog
47 | hostPath:
48 | path: /var/log
49 | - name: varlibdockercontainers
50 | hostPath:
51 | path: /var/lib/docker/containers
52 | ```
53 |
54 | 
55 |
56 | - Create daemonset on minikube:
57 |
58 | 
59 |
60 | - Run watch command on Linux: "watch kubectl get daemonset", on Win: "kubectl get daemonset -w"
61 |
62 | 
63 |
64 | - Add new node on the cluster:
65 |
66 | 
67 |
68 | - To see, app runs automatically on the new node:
69 |
70 | 
71 |
72 | - Add new node (3rd):
73 |
74 | 
75 |
76 | - Now daemonset have 3rd node:
77 |
78 | 
79 |
80 | - Delete one of the pod:
81 |
82 | 
83 |
84 | - Pod deletion can be seen here:
85 |
86 | 
87 |
88 | - Daemonset create new pod automatically:
89 |
90 | 
91 |
92 | - See the nodes resource on dashboard:
93 |
94 | 
95 |
96 | - Delete nodes and delete daemonset:
97 |
98 | 
99 |
100 |
101 |
102 |
--------------------------------------------------------------------------------
/K8s-Taint-Toleration.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Taint Toleration
2 |
3 | This scenario shows:
4 | - how to taint/untaint the node,
5 | - how to see the node details,
6 | - the pod that does not tolerate the taint is not running the node.
7 |
8 |
9 | ### Steps
10 |
11 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start")
12 |
13 | 
14 |
15 | - Create Yaml file (podtoleration.yaml) in your directory and copy the below definition into the file.
16 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/tainttoleration/podtoleration.yaml
17 |
18 | ```
19 | apiVersion: v1
20 | kind: Pod
21 | metadata:
22 | name: toleratedpod1
23 | labels:
24 | env: test
25 | spec:
26 | containers:
27 | - name: toleratedcontainer1
28 | image: nginx:latest
29 | tolerations: # pod tolerates "app=production:NoSchedule"
30 | - key: "app"
31 | operator: "Equal"
32 | value: "production"
33 | effect: "NoSchedule"
34 | ---
35 | apiVersion: v1
36 | kind: Pod
37 | metadata:
38 | name: toleratedpod2
39 | labels:
40 | env: test
41 | spec:
42 | containers:
43 | - name: toleratedcontainer2
44 | image: nginx:latest
45 | tolerations:
46 | - key: "app" # pod tolerates "app:NoSchedule", value is not important in this pod
47 | operator: "Exists" # pod can run on the nodes which has "app=test:NoSchedule" or "app=production:NoSchedule"
48 | effect: "NoSchedule"
49 | ```
50 |
51 | 
52 |
53 | 
54 |
55 | - When we look at the node details, there is not any taint on the node (minikube):
56 | ```
57 | kubectl describe node minikube
58 | ```
59 | 
60 |
61 | - Add taint to the node (minikube):
62 | ```
63 | kubectl taint node minikube platform=production:NoSchedule
64 | ```
65 | 
66 |
67 | - Create pod that does not tolerate the taint:
68 | ```
69 | kubectl run test --image=nginx --restart=Never
70 | ```
71 | 
72 |
73 | - This pod always waits as pending, because it is not tolerated the taints:
74 |
75 | 
76 |
77 | 
78 |
79 |
80 | - In the yaml file above (podtoleration.yaml), we have 2 pods that tolerates this taint => "app=production:NoSchedule"
81 | - Create these 2 pods:
82 |
83 | 
84 |
85 | - These pods tolerate the taint and they are running on the node, but "test" does not tolerate the taint, it still waits:
86 |
87 | 
88 |
89 | - But if we define another taint with "NoExecute", running pods are terminated:
90 | ```
91 | kubectl taint node minikube version=new:NoExecute
92 | ```
93 | 
94 |
95 | 
96 |
97 | - Delete taint from the node:
98 | ```
99 | kubectl taint node minikube version-
100 | ```
101 | 
102 |
103 | - Delete minikube:
104 |
105 | 
106 |
--------------------------------------------------------------------------------
/K8s-Statefulset.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Stateful Set - Nginx
2 |
3 | This scenario shows how K8s statefulset object works on minikube
4 |
5 | ### Steps
6 |
7 | - Copy and save (below) as file on your PC (statefulset_nginx.yaml).
8 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/statefulset/statefulset.yaml
9 |
10 | ```
11 | apiVersion: v1
12 | kind: Service
13 | metadata:
14 | name: nginx # create a service with "nginx" name
15 | labels:
16 | app: nginx
17 | spec:
18 | ports:
19 | - port: 80
20 | name: web # create headless service if clusterIP:None
21 | clusterIP: None # when requesting service name, service returns one of the IP of pods
22 | selector: # headless service provides to reach pod with podName.serviceName
23 | app: nginx # selects/binds to app:nginx (defined in: spec > template > metadata > labels > app:nginx)
24 | ---
25 | apiVersion: apps/v1
26 | kind: StatefulSet
27 | metadata:
28 | name: web # statefulset name: web
29 | spec:
30 | serviceName: nginx # binds/selects service (defined in metadata > name: nginx)
31 | replicas: 3
32 | selector:
33 | matchLabels:
34 | app: nginx
35 | template:
36 | metadata:
37 | labels:
38 | app: nginx
39 | spec:
40 | containers:
41 | - name: nginx
42 | image: k8s.gcr.io/nginx-slim:0.8
43 | ports:
44 | - containerPort: 80
45 | name: web
46 | volumeMounts:
47 | - name: www
48 | mountPath: /usr/share/nginx/html
49 | volumeClaimTemplates:
50 | - metadata:
51 | name: www
52 | spec:
53 | accessModes: [ "ReadWriteOnce" ] # creates PVCs for each pod automatically
54 | resources: # hence, each node has own PV
55 | requests:
56 | storage: 512Mi
57 | ```
58 |
59 | 
60 |
61 | 
62 |
63 | - Create statefulset and pvc:
64 |
65 | 
66 |
67 | - Pods are created with statefulsetName-0,1,2 (e.g. web-0)
68 |
69 | 
70 |
71 | - PVCs and PVs are automatically created for each pod. Even if pod is restarted again, same PV is bound to same pod.
72 |
73 | 
74 |
75 | - Scaled from 3 Pods to 4 Pods:
76 |
77 | 
78 |
79 | - New pod's name is not assigned randomly, assigned in order and got "web-4" name.
80 |
81 | 
82 |
83 | - Scale down to 3 Pods again:
84 |
85 | 
86 |
87 | - Last created pod is deleted:
88 |
89 | 
90 |
91 | - When creating headless service, service does not get any IP (e.g. None)
92 |
93 | 
94 |
95 | - With headless service, service returns one of the IP, service balances the load between pods (loadbalacing between pods)
96 |
97 | 
98 |
99 | - If we ping the specific pod with podName.serviceName (e.g. ping web-0.nginx), it returns the IP of the that pod.
100 | - With statefulset, the name of the pod is known, this helps to ping pods with name of the pod.
101 |
102 | 
103 |
104 |
--------------------------------------------------------------------------------
/K8s-Multicontainer-Sidecar.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Multicontainer - Sidecar - Emptydir Volume - Port-Forwarding
2 |
3 | This scenario shows:
4 | - how to create multicontainer in one pod,
5 | - how the multicontainers in the same pod have same ethernet interface (IPs),
6 | - how the multicontainers in the same pod can reach the shared volume area,
7 | - how to make port-forwarding to host PC ports
8 |
9 | ### Steps
10 |
11 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start")
12 |
13 | 
14 |
15 | - Create Yaml file (multicontainer.yaml) in your directory and copy the below definition into the file.
16 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/pod/multicontainer.yaml
17 |
18 | ```
19 | apiVersion: v1
20 | kind: Pod
21 | metadata:
22 | name: multicontainer
23 | spec:
24 | containers:
25 | - name: webcontainer # container name: webcontainer
26 | image: nginx # image from nginx
27 | ports: # opening-port: 80
28 | - containerPort: 80
29 | volumeMounts:
30 | - name: sharedvolume
31 | mountPath: /usr/share/nginx/html # path in the container
32 | - name: sidecarcontainer
33 | image: busybox # sidecar, second container image is busybox
34 | command: ["/bin/sh"] # it pulls index.html file from github every 15 seconds
35 | args: ["-c", "while true; do wget -O /var/log/index.html https://raw.githubusercontent.com/omerbsezer/Fast-Kubernetes/main/index.html; sleep 15; done"]
36 | volumeMounts:
37 | - name: sharedvolume
38 | mountPath: /var/log
39 | volumes: # define emptydir temporary volume, when the pod is deleted, volume also deleted
40 | - name: sharedvolume # name of volume
41 | emptyDir: {} # volume type emtpydir: creates empty directory where the pod is runnning
42 | ```
43 |
44 | 
45 |
46 | - Create multicontainer on the pod (webcontainer and sidecarcontainer):
47 |
48 | 
49 |
50 | - Connect (/bin/sh of the webcontainer) and install net-tools to show ethernet interface (IP: 172.17.0.3)
51 |
52 | 
53 |
54 | - Connect (/bin/sh of the sidecarcontainer) and show ethernet interface (IP: 172.17.0.3).
55 | - Containers running on same pod have same ethernet interfaces and same IPs (172.17.0.3).
56 |
57 | 
58 |
59 | - Under the webcontainer, the shared volume with sidecarcontainer can be reachable:
60 |
61 | 
62 |
63 | - It can be seen from sidecarcontainer. Both of the container can reach same volume area.
64 | - If the new file is created on this volume, other container can also reach same new file.
65 |
66 | 
67 |
68 | - When we look at the sidecarcontainer logs, it pulls index.html file from "https://raw.githubusercontent.com/omerbsezer/Fast-Kubernetes/main/index.html" every 15 seconds.
69 |
70 | 
71 |
72 | - We can forward the port of the pod to the host PC port (hostPort:containerPort, e.g: 8080:80):
73 |
74 | 
75 |
76 | - On the browser, goto http://127.0.0.1:8080/
77 |
78 | 
79 |
80 |
81 | - After updating the content of the index.html, new html page will be downloaded by the sidecarcontainer:
82 |
83 | 
84 |
85 | - Exit from the container shell and delete multicontainer in a one pod:
86 |
87 | 
88 |
--------------------------------------------------------------------------------
/HelmCheatsheet.md:
--------------------------------------------------------------------------------
1 | ## Helm Commands Cheatsheet
2 |
3 | ### 1. Help, Version
4 |
5 | #### See the general help for Helm
6 | ```
7 | helm --help
8 | ```
9 | #### See help for a particular command
10 | ```
11 | helm [command] --help
12 | ```
13 | #### See the installed version of Helm
14 | ```
15 | helm version
16 | ```
17 |
18 | ### 2. Repo Add, Remove, Update
19 |
20 | #### Add a repository from the internet
21 | ```
22 | helm repo add [name] [url]
23 | ```
24 | #### Remove a repository from your system
25 | ```
26 | helm repo remove [name]
27 | ```
28 | #### Update repositories
29 | ```
30 | helm repo update
31 | ```
32 |
33 | ### 3. Repo List, Search
34 |
35 | #### List chart repositories
36 | ```
37 | helm repo list
38 | ```
39 | #### Search charts for a keyword
40 | ```
41 | helm search [keyword]
42 | ```
43 | #### Search repositories for a keyword
44 | ```
45 | helm search repo [keyword]
46 | ```
47 | #### Search Helm Hub
48 | ```
49 | helm search hub [keyword]
50 | ```
51 |
52 | ### 4. Install/Uninstall
53 |
54 | #### Install an app
55 | ```
56 | helm install [name] [chart]
57 | ```
58 |
59 | #### Install an app in a specific namespace
60 | ```
61 | helm install [name] [chart] --namespace [namespace]
62 | ```
63 |
64 | #### Override the default values with those specified in a file of your choice
65 | ```
66 | helm install [name] [chart] --values [yaml-file/url]
67 | ```
68 |
69 | #### Run a test install to validate and verify the chart
70 | ```
71 | helm install [name] --dry-run --debug
72 | ```
73 |
74 | #### Uninstall a release
75 | ```
76 | helm uninstall [release name]
77 | ```
78 |
79 | ### 5. Chart Management
80 |
81 | #### Create a directory containing the common chart files and directories
82 | ```
83 | helm create [name]
84 | ```
85 |
86 | #### Package a chart into a chart archive
87 | ```
88 | helm package [chart-path]
89 | ```
90 |
91 | #### Run tests to examine a chart and identify possible issues
92 | ```
93 | helm lint [chart]
94 | ```
95 |
96 | #### Inspect a chart and list its contents
97 | ```
98 | helm show all [chart]
99 | ```
100 | #### Display the chart’s definition
101 | ```
102 | helm show chart [chart]
103 | ```
104 |
105 | #### Download a chart
106 | ```
107 | helm pull [chart]
108 | ```
109 |
110 | #### Download a chart and extract the archive’s contents into a directory
111 | ```
112 | helm pull [chart] --untar --untardir [directory]
113 | ```
114 |
115 | #### Display a list of a chart’s dependencies
116 | ```
117 | helm dependency list [chart]
118 | ```
119 |
120 | ### 6. Release Monitoring
121 |
122 | #### List all the available releases in the current namespace
123 | ```
124 | helm list
125 | ```
126 | #### List all the available releases across all namespaces
127 | ```
128 | helm list --all-namespaces
129 | ```
130 | #### List all the releases in a specific namespace
131 | ```
132 | helm list --namespace [namespace]
133 | ```
134 | #### List all the releases in a specific output format
135 | ```
136 | helm list --output [format]
137 | ```
138 | #### See the status of a release
139 | ```
140 | helm status [release]
141 | ```
142 | #### See the release history
143 | ```
144 | helm history [release]
145 | ```
146 | #### See information about the Helm client environment
147 | ```
148 | helm env
149 | ```
150 |
151 | ### 7. Upgrade/Rollback
152 |
153 | #### Upgrade an app
154 | ```
155 | helm upgrade [release] [chart]
156 | ```
157 |
158 | #### Tell Helm to roll back changes if the upgrade fails
159 | ```
160 | helm upgrade [release] [chart] --atomic
161 | ```
162 |
163 | #### Upgrade a release. If it does not exist on the system, install it
164 | ```
165 | helm upgrade [release] [chart] --install
166 | ```
167 |
168 | #### Upgrade to a version other than the latest one Upgrade an app
169 | ```
170 | helm upgrade [release] [chart] --version [version-number]
171 | ```
172 |
173 | #### Roll back a release
174 | ```
175 | helm rollback [release] [revision]
176 | ```
177 |
178 | ### 8. GET Information
179 |
180 | #### Download all the release information
181 | ```
182 | helm get all [release]
183 | ```
184 | #### Download all hooks
185 | ```
186 | helm get hooks [release]
187 | ```
188 | #### Download the manifest
189 | ```
190 | helm get manifest [release]
191 | ```
192 | #### Download the notes
193 | ```
194 | helm get notes [release]
195 | ```
196 | #### Download the values file
197 | ```
198 | helm get all [release]
199 | ```
200 | #### Release history
201 | ```
202 | helm history [release]
203 | ```
204 |
205 | ### 9. Plugin
206 |
207 | #### Install plugins
208 | ```
209 | helm plugin install [path/url1] [path/url2]
210 | ```
211 | #### View a list of all the installed plugins
212 | ```
213 | helm plugin list
214 | ```
215 | #### Update plugins
216 | ```
217 | helm plugin update [plugin1] [plugin2]
218 | ```
219 | #### Uninstall a plugin
220 | ```
221 | helm plugin uninstall [plugin]
222 | ```
223 |
224 |
225 |
226 |
--------------------------------------------------------------------------------
/K8s-Helm-Jenkins.md:
--------------------------------------------------------------------------------
1 | ## LAB: Helm-Jenkins on running K8s Cluster (2 Node Multipass VM)
2 |
3 | - "Whenever you trigger a Jenkins job, the Jenkins Kubernetes plugin will make an API call to create a Kubernetes agent pod. Then, the Jenkins agent pod gets deployed in the kubernetes with few environment variables containing the Jenkins server details and secrets."
4 | - "When the agent pod comes up, it used the details in its environment variables and talks back to Jenkins using the JNLP method" (Ref: DevopsCube)
5 |
6 |
7 |
8 |
9 |
10 | ### K8s Cluster (2 Node Multipass VM)
11 | - K8s cluster was created before:
12 | - **Goto:** [K8s Kubeadm Cluster Setup](https://github.com/omerbsezer/Fast-Kubernetes/blob/main/K8s-Kubeadm-Cluster-Setup.md)
13 |
14 | - On that cluster, helm was installed on the master node.
15 |
16 | ### Helm Install
17 |
18 | - Install on Ubuntu 20.04 (for other platforms: https://helm.sh/docs/intro/install/)
19 |
20 | ```
21 | curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
22 | sudo apt-get install apt-transport-https --yes
23 | echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
24 | sudo apt-get update
25 | sudo apt-get install helm
26 | helm version
27 | ```
28 |
29 | ### Jenkins Install
30 |
31 | ```
32 | helm repo add jenkins https://charts.jenkins.io
33 | helm repo list
34 | mkdir helm
35 | cd helm
36 | helm pull jenkins/jenkins
37 | tar zxvf jenkins-3.11.4.tgz
38 | ```
39 |
40 | - After unzipping, entered into the jenkins directory, you'll find values.yaml file. Disable the persistence with false.
41 | - If your cluster on-premise does not support storage class (like our multipass VM cluster), PVC and PV, disable persistence. But if you are working on minikube, minikube supports PVC and PV automatically.
42 | - If you don't disable persistence, you'll encounter that your PODs will not run (wait pending). You can inspect PVC, PV and Pod with kubectl describe command.
43 |
44 | 
45 |
46 | - Install Helm Jenkins Release:
47 | ```
48 | helm install j1 jenkins
49 | kubectl get pods
50 | kubectl get svc
51 | kubectl get pods -o wide
52 | ```
53 |
54 | 
55 |
56 | - To get Jenkins password (username:admin), run:
57 | ```
58 | kubectl exec --namespace default -it svc/j1-jenkins -c jenkins -- /bin/cat /run/secrets/chart-admin-password && echo
59 | ```
60 | 
61 |
62 | - Port Forwarding:
63 | ```
64 | kubectl --namespace default port-forward svc/j1-jenkins 8080:8080
65 | ```
66 | 
67 |
68 |
69 | ### Install Graphical Desktop to Reach Browser using Multipass VM
70 |
71 | - Install ubuntu-desktop, so you can reach multipass VM's browser using Windows RDP (Xrdp) (https://discourse.ubuntu.com/t/graphical-desktop-in-multipass/16229)
72 |
73 | ```
74 | sudo apt update
75 | sudo apt install ubuntu-desktop xrdp
76 | sudo passwd ubuntu # set password
77 | ```
78 |
79 | ### Jenkins Configuration
80 |
81 | - Helm also downloads automatically some of the plugins (kubernetes:1.31.3, workflow-aggregator:2.6, git:4.10.2, configuration-as-code:1.55.1) (Jenkins Version: 2.319.3)
82 | - Manage Jenkins > Configure System > Cloud
83 | 
84 |
85 | 
86 |
87 | 
88 |
89 | 
90 |
91 | 
92 |
93 | 
94 |
95 | - New Item on main page:
96 |
97 | 
98 |
99 | 
100 |
101 | - Add script > Build > Execute Shell:
102 |
103 | 
104 |
105 | - After triggering jobs, Jenkins (on Master) creates agents on Worker1 automatically. After jobs are completed, they are terminated.
106 |
107 | 
108 |
109 |
110 |
111 | ### Reference
112 |
113 | - https://www.jenkins.io/doc/book/scaling/scaling-jenkins-on-kubernetes/
114 | - https://devopscube.com/jenkins-build-agents-kubernetes/
115 |
116 |
117 |
--------------------------------------------------------------------------------
/K8s-Node-Affinity.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Node Affinity
2 |
3 | This scenario shows:
4 | - how to label the node,
5 | - when node is not labelled and pods' nodeAffinity are defined, pods always wait pending
6 |
7 |
8 | ### Steps
9 |
10 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start")
11 |
12 | 
13 |
14 | - Create Yaml file (podnodeaffinity.yaml) in your directory and copy the below definition into the file.
15 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/affinity/podnodeaffinity.yaml
16 |
17 | ```
18 | apiVersion: v1
19 | kind: Pod
20 | metadata:
21 | name: nodeaffinitypod1
22 | spec:
23 | containers:
24 | - name: nodeaffinity1
25 | image: nginx:latest # "requiredDuringSchedulingIgnoredDuringExecution" means
26 | affinity: # Find a node during scheduling according to "matchExpression" and run pod on that node.
27 | nodeAffinity: # If it is not found, do not run this pod until finding specific node "matchExpression".
28 | requiredDuringSchedulingIgnoredDuringExecution: # "...IgnoredDuringExecution" means
29 | nodeSelectorTerms: # after scheduling, if the node label is removed/deleted from node, ignore it while executing.
30 | - matchExpressions:
31 | - key: app
32 | operator: In # In, NotIn, Exists, DoesNotExist
33 | values: # In => key=value, NotIn => key!=value
34 | - production # Exists => only key
35 | ---
36 | apiVersion: v1
37 | kind: Pod
38 | metadata:
39 | name: nodeaffinitypod2
40 | spec:
41 | containers:
42 | - name: nodeaffinity2
43 | image: nginx:latest
44 | affinity: # "preferredDuringSchedulingIgnoredDuringExecution" means
45 | nodeAffinity: # Find a node during scheduling according to "matchExpression" and run pod on that node.
46 | preferredDuringSchedulingIgnoredDuringExecution: # If it is not found, run this pod wherever it finds.
47 | - weight: 1 # if there is a pod with "app=production", run on that pod
48 | preference: # if there is NOT a pod with "app=production" and there is NOT any other preference,
49 | matchExpressions: # run this pod wherever scheduler finds a node.
50 | - key: app
51 | operator: In
52 | values:
53 | - production
54 | - weight: 2 # this is highest prior, weight:2 > weight:1
55 | preference: # if there is a pod with "app=test", run on that pod
56 | matchExpressions: # if there is NOT a pod with "app=test", goto weight:1 preference
57 | - key: app
58 | operator: In
59 | values:
60 | - test
61 | ---
62 | apiVersion: v1
63 | kind: Pod
64 | metadata:
65 | name: nodeaffinitypod3
66 | spec:
67 | containers:
68 | - name: nodeaffinity3
69 | image: nginx:latest
70 | affinity:
71 | nodeAffinity:
72 | requiredDuringSchedulingIgnoredDuringExecution:
73 | nodeSelectorTerms:
74 | - matchExpressions:
75 | - key: app
76 | operator: Exists # In, NotIn, Exists, DoesNotExist
77 | ```
78 |
79 | 
80 |
81 | 
82 |
83 | 
84 |
85 | - Create pods:
86 | - 1st pod waits pending: Because it controls labelled "app:production" node, but it does not find, so it waits until finding labelled "app:production" node.
87 | - 2nd pod started: Because it controls the labels first, but "preferredDuringScheduling", even if it does not find, run anywhere.
88 | - 3rd pod waits pending: Because it controls labelled "app" node, but it does not find, so it waits until finding labelled "app" node.
89 |
90 | 
91 |
92 | - After labelling node with label "app:production", 1st and 3rd nodes also run on the same node. Because they find the required label.
93 |
94 | ```
95 | kubectl label node minikube app=production
96 | ```
97 | 
98 |
99 | - After unlabelling the node, all pods still run due to "IgnoredDuringExecution". Node ignores the label controlling after execution.
100 |
101 | ```
102 | kubectl label node minikube app-
103 | ```
104 |
105 | 
106 |
107 | - Delete pods:
108 |
109 | 
110 |
111 |
112 |
--------------------------------------------------------------------------------
/K8s-Monitoring-Prometheus-Grafana.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Monitoring: Prometheus and Grafana
2 |
3 | This scenario shows how to implement Prometheus and Grafana on K8s Cluster.
4 |
5 |
6 | ### Table of Contents
7 | - [Monitoring With SSH](#ssh)
8 | - [Monitoring With Prometheus and Grafana](#prometheus-grafana)
9 | - [Prometheus and Grafana for Windows](#windows)
10 |
11 | There are different options to monitor K8s cluster: SSH, Kubernetes Dashboard, Prometheus and Grafana, etc.
12 |
13 | ## 1. Monitoring With SSH
14 |
15 | - SSH can be used to get basic information about the cluster, nodes, and pods.
16 | - Make SSH connection to Master Node of the K8s Cluster
17 |
18 | ```
19 | ssh username@masterIP
20 | ```
21 |
22 | - To get the nodes of the K8s
23 |
24 | ```
25 | kubectl get nodes -o wide
26 | ```
27 |
28 | - To get the pods on the K8s Cluster
29 |
30 | ```
31 | kubectl get pods -o wide
32 | ```
33 |
34 | - For Linux PCs: To get the pods on the K8s Cluster in real-time with the "watch" command
35 |
36 | ```
37 | watch kubectl get pods -o wide
38 | ```
39 |
40 | - To get all K8s objects:
41 |
42 | ```
43 | kubectl get all
44 | ```
45 |
46 | ## 2. Monitoring With Prometheus and Grafana
47 |
48 | - While implementting Prometheus and Grafana, Helm is used.
49 | - To add Prometheus repo into local repo and download it:
50 |
51 | ```
52 | mkdir helm
53 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
54 | helm repo update
55 | helm pull prometheus-community/kube-prometheus-stack
56 | tar zxvf kube-prometheus-stack-34.10.0.tgz
57 | cd kube-prometheus-stack
58 | ```
59 |
60 | - "Values.yaml" file can be viewed and updated according to new values and new configuration.
61 | - To install prometheus, release name is prometheus
62 |
63 | ```
64 | helm install prometheus kube-prometheus-stack
65 | ```
66 |
67 | - Port forwarding is needed on the default connection, run on the different terminals:
68 |
69 | ```
70 | kubectl port-forward deployment/prometheus-grafana 3000
71 | kubectl port-forward prometheus-prometheus-kube-prometheus-prometheus-0 9090
72 | ```
73 |
74 | - Default provided username: admin, password: prom-operator
75 |
76 | 
77 |
78 | - Monitoring all nodes' resources in terms of CPU, memory, disk space, network transmitted/received
79 |
80 | 
81 |
82 | 
83 |
84 |
85 | - Use nodeport option to make reachable with IP:Port.
86 | - Uninstall the current release run.
87 |
88 | ```
89 | helm uninstall prometheus
90 | ```
91 |
92 | - Open values.yaml file (kube-prometheus-stack/charts/grafana/values.yaml), change type from "ClusterIP" to "NodePort" and add "nodePort: 32333"
93 |
94 | 
95 |
96 | - Run the new release
97 |
98 | ```
99 | helm install prometheus kube-prometheus-stack
100 | ```
101 |
102 | - On the browser from any PC on the cluster, grafana screen can be viewed: MasterIP:32333
103 |
104 | - Update (kube-prometheus-stack/charts/prometheus-node-exporter/values.yaml) to implement that node exporter works only on Linux machines. Add nodeSelector: kubernetes.io/os: linux
105 |
106 | #### 2.1. Prometheus and Grafana for Windows
107 |
108 | - Download windows_exporter-0.18.1-amd64.exe (latest version) from here: https://github.com/prometheus-community/windows_exporter/releases
109 | - Copy/Move to under C:\ directory ("C:\windows_exporter-0.18.1-amd64.exe")
110 | - Open Powershell with Admistration Rights, run:
111 |
112 | ```
113 | New-Service -Name "windows_node_exporter" -BinaryPathName "C:\windows_exporter-0.18.1-amd64.exe"
114 | Start-Service -Name windows_node_exporter
115 | ```
116 | - Now, windows_exporter works as a service and runs automatically when restarting the windows node. Check if it works in 2 ways:
117 | - Open the browser and run: http://localhost:9182/metrics to see resource data/metrics
118 | - Open Task Manager - Services Tab and see whether windows_node_exporter runs or not.
119 |
120 | - Uninstall the current release run.
121 |
122 | ```
123 | helm uninstall prometheus
124 | ```
125 |
126 | - Open values.yaml in the kube-prometheus-stack directory (targets: Windows IP, default port 9182)
127 |
128 | ```
129 | #additionalScrapeConfigs: [] (Line ~2480)
130 | additionalScrapeConfigs:
131 | - job_name: 'kubernetes-windows-exporter'
132 | static_configs:
133 | - targets: ["WindowsIP:9182"]
134 | ```
135 |
136 | - Run the new release
137 |
138 | ```
139 | helm install prometheus kube-prometheus-stack
140 | ```
141 |
142 | - Open Grafana and "Import"
143 |
144 | 
145 |
146 | - Download Prometheus "Windows Exporter Node" dashboard from here: https://grafana.com/grafana/dashboards/14510/revisions
147 | - There are 2 options:
148 | - Copy the Json content and paste panel json,
149 | - Upload Json File
150 |
151 | 
152 |
153 | - Select "Prometheus" as data source
154 |
155 | - Now it works. Windows Node Exporter:
156 |
157 | 
158 |
159 | ## Reference
160 |
161 | - https://youtu.be/jatcPHvChfI
162 |
--------------------------------------------------------------------------------
/K8s-Secret.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Secret
2 |
3 | This scenario shows:
4 | - how to create secrets with file,
5 | - how to use secrets: volume and environment variable,
6 | - how to create secrets with command,
7 | - how to get/delete secrets
8 |
9 |
10 | ### Steps
11 |
12 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start")
13 |
14 | 
15 |
16 | - Create Yaml file (secret.yaml) in your directory and copy the below definition into the file.
17 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/secret.yaml
18 |
19 | ```
20 | # Secret Object Creation
21 | apiVersion: v1
22 | kind: Secret
23 | metadata:
24 | name: mysecret
25 | type: Opaque
26 | stringData:
27 | db_server: db.example.com
28 | db_username: admin
29 | db_password: P@ssw0rd!
30 | ```
31 |
32 | 
33 |
34 | - Create Yaml file (secret-pods.yaml) in your directory and copy the below definition into the file.
35 | - 3 Pods:
36 | - secret binding using volume
37 | - secret binding environment variable: 1. explicitly, 2. implicitly
38 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/secret-pods.yaml
39 |
40 | ```
41 | apiVersion: v1
42 | kind: Pod
43 | metadata:
44 | name: secretvolumepod
45 | spec:
46 | containers:
47 | - name: secretcontainer
48 | image: nginx
49 | volumeMounts:
50 | - name: secret-vol
51 | mountPath: /secret
52 | volumes:
53 | - name: secret-vol
54 | secret:
55 | secretName: mysecret
56 | ---
57 | apiVersion: v1
58 | kind: Pod
59 | metadata:
60 | name: secretenvpod
61 | spec:
62 | containers:
63 | - name: secretcontainer
64 | image: nginx
65 | env:
66 | - name: username
67 | valueFrom:
68 | secretKeyRef:
69 | name: mysecret
70 | key: db_username
71 | - name: password
72 | valueFrom:
73 | secretKeyRef:
74 | name: mysecret
75 | key: db_password
76 | - name: server
77 | valueFrom:
78 | secretKeyRef:
79 | name: mysecret
80 | key: db_server
81 | ---
82 | apiVersion: v1
83 | kind: Pod
84 | metadata:
85 | name: secretenvallpod
86 | spec:
87 | containers:
88 | - name: secretcontainer
89 | image: nginx
90 | envFrom:
91 | - secretRef:
92 | name: mysecret
93 | ```
94 |
95 | 
96 |
97 | 
98 |
99 | 
100 |
101 | - Create secret object:
102 |
103 | 
104 |
105 | - Create pods:
106 |
107 | 
108 |
109 | - Describe secret to see details:
110 |
111 | 
112 |
113 | - Run bash in the secretvolumepod (1st pod):
114 |
115 | 
116 |
117 | - Run "printenv" command in the secretenvpod (2nd pod):
118 |
119 | 
120 |
121 | - Run "printenv" command in the secretenvallpod (3rd pod):
122 |
123 | 
124 |
125 | - Create new secret with imperative way:
126 |
127 | ```
128 | kubectl create secret generic mysecret2 --from-literal=db_server=db.example.com --from-literal=db_username=admin --from-literal=db_password=P@ssw0rd!
129 | ```
130 |
131 | 
132 |
133 | - Create new secret using files (avoid to see in the history command list).
134 | - Create file on the same directory before to run command (e.g. "touch server.txt"):
135 | - server.txt => put into "db.example.com" with "cat" command
136 | - password.txt => put into "password" with "cat" command
137 | - username.txt => put into "admin" with "cat" command
138 | - Files:
139 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/server.txt
140 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/password.txt
141 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/username.txt
142 |
143 | ```
144 | kubectl create secret generic mysecret3 --from-file=db_server=server.txt --from-file=db_username=username.txt --from-file=db_password=password.txt
145 | ```
146 |
147 | 
148 |
149 | - Create json file (config.json) and put following content.
150 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/secret/config.json
151 |
152 | ```
153 | {
154 | "apiKey": "7ac4108d4b2212f2c30c71dfa279e1f77dd12356",
155 | }
156 | ```
157 |
158 | ```
159 | kubectl create secret generic mysecret4 --from-file=config.json
160 | ```
161 |
162 | 
163 |
164 | - Delete mysecret4:
165 |
166 | 
167 |
--------------------------------------------------------------------------------
/K8s-Service-App.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Service Implementations (ClusterIp, NodePort and LoadBalancer)
2 |
3 | This scenario shows how to create Services (ClusterIp, NodePort and LoadBalancer). It goes following:
4 | - Create Deployments for frontend and backend.
5 | - Create ClusterIP Service to reach backend pods.
6 | - Create NodePort Service to reach frontend pods from Internet.
7 | - Create Loadbalancer Service on the cloud K8s cluster to reach frontend pods from Internet.
8 |
9 |
10 |  (Ref: Udemy Course: Kubernetes-Temelleri)
11 |
12 | ### Steps
13 |
14 | - Create 3 x front-end and 3 x back-end Pods with following YAML file run ("kubectl apply -f deploy.yaml").
15 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/service/deploy.yaml
16 |
17 | ```
18 | apiVersion: apps/v1
19 | kind: Deployment
20 | metadata:
21 | name: frontend
22 | labels:
23 | team: development
24 | spec:
25 | replicas: 3
26 | selector:
27 | matchLabels:
28 | app: frontend
29 | template:
30 | metadata:
31 | labels:
32 | app: frontend
33 | spec:
34 | containers:
35 | - name: frontend
36 | image: nginx:latest
37 | ports:
38 | - containerPort: 80
39 | ---
40 | apiVersion: apps/v1
41 | kind: Deployment
42 | metadata:
43 | name: backend
44 | labels:
45 | team: development
46 | spec:
47 | replicas: 3
48 | selector:
49 | matchLabels:
50 | app: backend
51 | template:
52 | metadata:
53 | labels:
54 | app: backend
55 | spec:
56 | containers:
57 | - name: backend
58 | image: ozgurozturknet/k8s:backend
59 | ports:
60 | - containerPort: 5000
61 | ```
62 |
63 | 
64 |
65 | - Run on the terminal: "kubectl get pods -w" (on Linux/WSL2: "watch kubectl get pods")
66 |
67 |
68 | 
69 |
70 | - Create ClusterIP service that connects to backend (selector: app: backend) (run: "kubectl apply -f backend_clusterip.yaml").
71 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/service/backend_clusterip.yaml
72 |
73 | ```
74 | apiVersion: v1
75 | kind: Service
76 | metadata:
77 | name: backend
78 | spec:
79 | type: ClusterIP
80 | selector:
81 | app: backend
82 | ports:
83 | - protocol: TCP
84 | port: 5000
85 | targetPort: 5000
86 | ```
87 |
88 | 
89 |
90 |
91 | - ClusterIP Service created. If any resource in the cluster sends a request to the ClusterIP and Port 5000, this request will reach to one of the pod behind the ClusterIP Service.
92 | - We can show it from frontend pods.
93 | - Connect one of the front-end pods (list: "kubectl get pods", connect: "kubectl exec -it frontend-5966c698b4-b664t -- bash")
94 | - In the K8s, there is DNS server (core dns based) that provide us to query ip/name of service.
95 | - When running nslookup (backend), we can reach the complete name and IP of this service (serviceName.namespace.svc.cluster_domain, e.g. backend.default.svc.cluster.local).
96 | - When running curl to the one of the backend pods with port 5000, service provides us to make connection with one of the backend pods.
97 |
98 | 
99 |
100 | - Create NodePort Service to reach frontend pods from the outside of the cluster (run: "kubectl apply -f backend_nodeport.yaml").
101 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/service/backend_nodeport.yaml
102 |
103 | ```
104 | apiVersion: v1
105 | kind: Service
106 | metadata:
107 | name: frontend
108 | spec:
109 | type: NodePort
110 | selector:
111 | app: frontend
112 | ports:
113 | - protocol: TCP
114 | port: 80
115 | targetPort: 80
116 | ```
117 |
118 | 
119 |
120 | - With NodePort Service (you can see the image below), frontend pods can be reachable from the opening port (32098). In other words, someone can reach frontend pods via WorkerNodeIP:32098. NodePort service listens all of the worker nodes' port (in this example: port 32098).
121 | - While working with minikube, it is only possible with minikube tunnelling. Minikube simulates the reaching of the NodeIP:Port with tunneling feature.
122 |
123 | 
124 |
125 | - On the other terminal, if we run the curl command, we can reach the frontend pods.
126 |
127 | 
128 |
129 | - LoadBalancer Service is only available wih cloud services (because in the local cluster, it can not possible to get external-ip of the load-balancer service). So if you have connection to the one of the cloud service (Azure-AKS, AWS EKS, GCP GKE), please create loadbalance service on it (run: "kubectl apply -f backend_loadbalancer.yaml").
130 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/service/backend_loadbalancer.yaml
131 |
132 | ```
133 | apiVersion: v1
134 | kind: Service
135 | metadata:
136 | name: frontendlb
137 | spec:
138 | type: LoadBalancer
139 | selector:
140 | app: frontend
141 | ports:
142 | - protocol: TCP
143 | port: 80
144 | targetPort: 80
145 | ```
146 |
147 | 
148 |
149 | - If you run on the cloud, you'll see the external-ip of the loadbalancer service.
150 |
151 | 
152 |
153 | 
154 |
155 | - In addition, it can be possible service with Imperative way (with command).
156 | - kubectl expose deployment --type= --name=
157 |
158 | 
159 |
160 | ## References
161 | - [udemy-course:Kubernetes-Temelleri](https://www.udemy.com/course/kubernetes-temelleri/)
162 |
--------------------------------------------------------------------------------
/K8s-Kubeadm-Cluster-Docker.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Cluster Setup with Kubeadm and Docker
2 |
3 | - This scenario shows how to create K8s cluster on virtual PC (multipass, kubeadm, docker)
4 |
5 | ### Table of Contents
6 | - [Creating Cluster With Kubeadm, Docker](#creating)
7 | - [IP address changes in Kubernetes Master Node](#master_ip_changed)
8 |
9 |
10 | ## 1. Creating Cluster With Kubeadm, Docker
11 |
12 | #### 1.1 Multipass Installation - Creating VM
13 |
14 | - "Multipass is a mini-cloud on your workstation using native hypervisors of all the supported plaforms (Windows, macOS and Linux)"
15 | - Multipass is lightweight, fast, easy to use Ubuntu VM (on demand for any workstation)
16 | - Fast to install and to use.
17 | - **Link:** https://multipass.run/
18 |
19 | ```
20 | # creating VM
21 | multipass launch --name k8s-controller --cpus 2 --mem 2048M --disk 10G
22 | multipass launch --name k8s-node1 --cpus 2 --mem 1024M --disk 7G
23 | multipass launch --name k8s-node2 --cpus 2 --mem 1024M --disk 7G
24 | ```
25 |
26 | 
27 |
28 | ```
29 | # get shells on different terminals
30 | multipass shell k8s-controller
31 | multipass shell k8s-node1
32 | multipass shell k8s-node2
33 | multipass list
34 | ```
35 |
36 | 
37 |
38 | #### 1.2 Install Docker
39 |
40 | - Run for all 3 nodes on different terminals:
41 |
42 | ```
43 | sudo apt-get update
44 | sudo apt-get install docker.io -y # install Docker
45 | sudo systemctl start docker # start and enable the Docker service
46 | sudo systemctl enable docker
47 | sudo usermod -aG docker $USER # add the current user to the docker group
48 | newgrp docker # make the system aware of the new group addition
49 | ```
50 |
51 | #### 1.3 Install Kubeadm
52 |
53 | - Run for all 3 nodes on different terminals:
54 |
55 | ```
56 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add # add the repository key and the repository
57 | sudo apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"
58 | sudo apt-get install kubeadm kubelet kubectl -y # install all of the necessary Kubernetes tools
59 | ```
60 |
61 | - Run on new terminal:
62 |
63 | ```
64 | multipass list
65 | ```
66 |
67 | 
68 |
69 | - Run on controller, add IPs of PCs:
70 |
71 | ```
72 | sudo nano /etc/hosts
73 | ```
74 |
75 | 
76 |
77 | - Run for all 3 nodes on different terminals:
78 |
79 | ```
80 | sudo swapoff -a # turn off swap
81 | ```
82 |
83 | - Create this file "daemon.json" in the directory "/etc/docker", docker change cgroup driver to systemd, run on 3 different machines:
84 |
85 | ```
86 | cd /etc/docker
87 | sudo touch daemon.json
88 | sudo nano daemon.json
89 | # copy and paste it on daemon.json
90 | {
91 | "exec-opts": ["native.cgroupdriver=systemd"]
92 | }
93 | sudo systemctl restart docker
94 | ```
95 |
96 | - Run on the controller:
97 |
98 | ```
99 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16
100 | mkdir -p $HOME/.kube
101 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
102 | sudo chown $(id -u):$(id -g) $HOME/.kube/config
103 | sudo kubectl get nodes
104 | ```
105 |
106 | 
107 |
108 | 
109 |
110 | - Run on the nodes (node1, node2):
111 |
112 | ```
113 | sudo kubeadm join 172.29.108.209:6443 --token ug13ec.cvi0jwi9xyf82b6f \
114 | --discovery-token-ca-cert-hash sha256:12d59142ccd0148d3f12a673b5c47a2f549cce6b7647963882acd90f9b0fbd28
115 | ```
116 |
117 | - Run "kubectl get nodes" on the controller, after deploying pod network, nodes will be ready.
118 |
119 | 
120 |
121 | - Run on Controller to deploy a pod network:
122 | - Flannel:
123 | ```
124 | sudo kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
125 | ```
126 | - Calico:
127 | ```
128 | kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
129 | kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml
130 | ```
131 |
132 | 
133 |
134 | - After testing more (restarting master, etc.), Containerd is more flexible and usable than Dockerd run time => [KubeAdm-Containerd Setup](https://github.com/omerbsezer/Fast-Kubernetes/blob/main/K8s-Kubeadm-Cluster-Setup.md), because every restart, /etc/hosts should be updated. However, updating of /etc/hosts is not required in the containerd.
135 |
136 | ## 2. IP address changes in Kubernetes Master Node
137 | - After restarting Master Node, it could be possible that the IP of master node is updated. Your K8s cluster API's IP is still old IP of the node. So you should configure the K8s cluster with new IP.
138 |
139 | - If you installed the docker for the docker registry, you can remove the exited containers:
140 |
141 | ```
142 | sudo docker rm $(sudo docker ps -a -f status=exited -q)
143 | ```
144 |
145 | #### On Master Node:
146 |
147 | - Run on controller, add IPs of PCs, after restarting IPs should be again updated:
148 |
149 | ```
150 | sudo nano /etc/hosts
151 | ```
152 |
153 | - Reset kubeadm and init new cluster:
154 |
155 | ```
156 | sudo kubeadm reset
157 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16
158 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
159 | ```
160 |
161 | - It shows which command should be used to join cluster:
162 |
163 | ```
164 | sudo kubeadm join 172.31.40.125:6443 --token 07vo3z.q2n2qz6bd07ipdnf \
165 | --discovery-token-ca-cert-hash sha256:46c7dcb092ca091e71ab39bd542e73b90b3f7bdf0c486202b857a678cd9879ba
166 | ```
167 |
168 |
169 |
170 | - Network Configuratin with new IP:
171 |
172 | ```
173 | kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
174 | kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml
175 | ```
176 |
177 |
178 |
179 | ### Reference
180 | - https://thenewstack.io/deploy-a-kubernetes-desktop-cluster-with-ubuntu-multipass/
181 |
--------------------------------------------------------------------------------
/K8s-PersistantVolume.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Persistent Volumes and Persistent Volume Claims
2 |
3 | This scenario shows how K8s PVC and PV work on minikube
4 |
5 | ### Steps
6 |
7 | - On Minikube, we do not have to reach NFS Server. So we simulate NFS Server with Docker Container.
8 |
9 | ```
10 | docker volume create nfsvol
11 | docker network create --driver=bridge --subnet=10.255.255.0/24 --ip-range=10.255.255.0/24 --gateway=10.255.255.10 nfsnet
12 | docker run -dit --privileged --restart unless-stopped -e SHARED_DIRECTORY=/data -v nfsvol:/data --network nfsnet -p 2049:2049 --name nfssrv ozgurozturknet/nfs:latest
13 | ```
14 |
15 | 
16 |
17 | - Now our simulated server enabled.
18 | - Copy and save (below) as file on your PC (pv.yaml).
19 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/persistentvolume/pv.yaml
20 |
21 | ```
22 | apiVersion: v1
23 | kind: PersistentVolume
24 | metadata:
25 | name: mysqlpv
26 | labels:
27 | app: mysql # labelled PV with "mysql"
28 | spec:
29 | capacity:
30 | storage: 5Gi # 5Gibibyte = power of 2; 5GB= power of 10
31 | accessModes:
32 | - ReadWriteOnce
33 | persistentVolumeReclaimPolicy: Retain
34 | nfs:
35 | path: / # binds the path on the NFS Server
36 | server: 10.255.255.10 # IP of NFS Server
37 | ```
38 |
39 | 
40 |
41 | - Create PV object on our cluster:
42 |
43 | 
44 |
45 | - Copy and save (below) as file on your PC (pvc.yaml).
46 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/persistentvolume/pvc.yaml
47 |
48 | ```
49 | apiVersion: v1
50 | kind: PersistentVolumeClaim
51 | metadata:
52 | name: mysqlclaim
53 | spec:
54 | accessModes:
55 | - ReadWriteOnce
56 | volumeMode: Filesystem
57 | resources:
58 | requests:
59 | storage: 5Gi
60 | storageClassName: ""
61 | selector:
62 | matchLabels:
63 | app: mysql # chose/select "mysql" PV that is defined above.
64 | ```
65 |
66 | 
67 |
68 | - Create PVC object on our cluster. After creation, PVC's status shows to bind to PV ("Bound"):
69 |
70 | 
71 |
72 | - Copy and save (below) as file on your PC (deploy.yaml).
73 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/persistentvolume/deploy.yaml
74 |
75 | ```
76 | apiVersion: v1 # Create Secret object for password
77 | kind: Secret
78 | metadata:
79 | name: mysqlsecret
80 | type: Opaque
81 | stringData:
82 | password: P@ssw0rd!
83 | ---
84 | apiVersion: apps/v1
85 | kind: Deployment # Deployment
86 | metadata:
87 | name: mysqldeployment
88 | labels:
89 | app: mysql
90 | spec:
91 | replicas: 1
92 | selector:
93 | matchLabels:
94 | app: mysql # select deployment container (template > metadata > labels)
95 | strategy:
96 | type: Recreate
97 | template:
98 | metadata:
99 | labels:
100 | app: mysql
101 | spec:
102 | containers:
103 | - name: mysql
104 | image: mysql
105 | ports:
106 | - containerPort: 3306
107 | volumeMounts: # VolumeMounts on path and volume name
108 | - mountPath: "/var/lib/mysql"
109 | name: mysqlvolume # which volume to select (volumes > name)
110 | env:
111 | - name: MYSQL_ROOT_PASSWORD
112 | valueFrom: # get mysql password from secrets
113 | secretKeyRef:
114 | name: mysqlsecret
115 | key: password
116 | volumes:
117 | - name: mysqlvolume # name of Volume
118 | persistentVolumeClaim:
119 | claimName: mysqlclaim # chose/select "mysqlclaim" PVC that is defined above.
120 | ```
121 |
122 | 
123 |
124 | - Run deployment on our cluster:
125 |
126 | 
127 |
128 | - Watching deployment status:
129 |
130 | 
131 |
132 | - See the details of pod (mounts and volumes):
133 |
134 | 
135 |
136 | - Enter into the pod and see the path that the volume is mounted ("kubectl exec -it -- bash"):
137 |
138 | 
139 |
140 | - If the new node is added into the cluster and this running pod is stopped running on the main minikube node, the pod will start on the another node.
141 | - With this scenario, we can see the followings:
142 | - Deployment always run pod on the cluster.
143 | - The pod which is created on the new node still connects the persistent volume (there is not any loss for volume)
144 | - How assigning taint on the node (key:=value:NoExecute, if NoExecute is not tolerated by pod, pod is deleted on the node)
145 |
146 | 
147 |
148 | - New pod is created on the new node (2nd node)
149 |
150 | 
151 |
152 | - Second pod also is connected to the same volume again.
153 |
154 | 
155 |
156 | - Enter into the 2nd pod and see the path that the volume is mounted ("kubectl exec -it -- bash"). When you see the files at the same path on the 2nd pod, volume files are same:
157 |
158 | 
159 |
160 | - Delete minikube, docker container, volume, network:
161 |
162 | 
163 |
164 | ### References
165 | - https://github.com/aytitech/k8sfundamentals/tree/main/pvpvc
166 |
--------------------------------------------------------------------------------
/Helm.md:
--------------------------------------------------------------------------------
1 | ## Helm
2 |
3 | ### Helm Install
4 | - Installed on Ubuntu 20.04 (for other platforms: https://helm.sh/docs/intro/install/)
5 |
6 | ```
7 | curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
8 | sudo apt-get install apt-transport-https --yes
9 | echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
10 | sudo apt-get update
11 | sudo apt-get install helm
12 | ```
13 | - Check version (helm version):
14 |
15 | 
16 |
17 | - **ArtifactHUB:** https://artifacthub.io/
18 | - ArtifactHub is like DockerHub, but it includes Helm Charts. (e.g. search wordpress on artifactHub on browser)
19 |
20 | 
21 |
22 | - With Helm Search on Hub:
23 | ```
24 | helm search hub wordpress # searches package on the Hub
25 | helm search repo wordpress # searches package on the local machine repository list
26 | helm search repo bitnami # searches bitnami in the repo list
27 |
28 | ```
29 | 
30 |
31 |
32 |
33 |
34 | - **Repo:** the list on the local machine, repo item includes the package's download page (e.g. https://charts.bitnami.com/bitnami)
35 |
36 | ```
37 | helm repo add bitnami https://charts.bitnami.com/bitnami # adds link into my repo list
38 | helm search repo wordpress # searches package on the local machine repository list
39 | helm repo list # list all repo
40 | helm pull [chart]
41 | helm pull jenkins/jenkins
42 | helm pull bitnami/jenkins # pull and download chart to the current directory
43 | tar zxvf jenkins-3.11.4.tgz # extract downloaded chart
44 | ```
45 |
46 | 
47 |
48 | 
49 |
50 | - Downloaded chart file structure and files:
51 | - **values.yaml**: includes values, variables, configs, replicaCount, imageName, etc. These values are injected into the template yaml files (e.g. replicas: {{ .Values.replicaCount }} in the deployment yaml file)
52 | - **charts.yaml**: includes chart information (annotations, maintainers, appVersion, apiVersion, description, sources, etc.)
53 | - **template**: directory that includes all K8s yaml template files (deployment,secret,configmap, etc.)
54 | - **values-summary**: includes the configurable parameters about application, K8s (parameter, description and value)
55 |
56 | ```
57 | tree jenkins
58 | ```
59 |
60 | 
61 |
62 |
63 | - Install chart on K8s with application/release name
64 |
65 | ```
66 | helm install helm-release-wordpress bitnami/wordpress # install bitnami/wordpress chart with helm-release-wordpress name on default namespace
67 | helm install release bitnami/wordpress --namespace production # install release on production namespace
68 | helm install my-release \ # possible to set username/password while creating pods
69 | --set wordpressUsername=admin \
70 | --set wordpressPassword=password \
71 | --set mariadb.auth.rootPassword=secretpassword \
72 | bitnami/wordpress
73 | helm install wordpress-release bitnami/wordpress -f ./values.yaml # values.yaml includes import values (e.g. username,pass,..), if it is updated and using this file, it is possible to install with these values.
74 | echo '{mariadb.auth.database: user0db, mariadb.auth.username: user0}' > values.yaml
75 | helm install -f values.yaml bitnami/wordpress --generate-name # with using "-f values.yaml", updated values are used
76 | helm install j1 jenkins # jenkins is downloaded and extracted directory. After values.yaml updated, also possible to install with this updated app config
77 | ```
78 |
79 | 
80 |
81 | - To see the status of the release:
82 |
83 | ```
84 | helm status helm-release-wordpress
85 | ```
86 | 
87 |
88 | - We can change/show the values that are the variables (e.g.username,password):
89 | ```
90 | helm show values bitnami/wordpress
91 | ```
92 | 
93 |
94 |
95 | - You can see the all K8s objects that are automatically created by Helm
96 |
97 | ```
98 | kubectl get pods
99 | kubectl get svc
100 | kubectl get deployment
101 | kubectl get pv
102 | kubectl get pvc
103 | kubectl get configmap
104 | kubectl get secrets
105 | kubectl get pods --all-namespace
106 | helm list
107 | ```
108 | 
109 |
110 | - Get password of wordpress:
111 |
112 | 
113 |
114 | - Open tunnel from minikube:
115 |
116 | ```
117 | minikube service helm-release-wordpress --url
118 | ```
119 |
120 | 
121 |
122 | 
123 |
124 | - Using username and pass (http://127.0.0.1:46007/admin):
125 |
126 | 
127 | 
128 |
129 | - Uninstall helm release:
130 |
131 | 
132 |
133 | - Upgrade, rollback, history:
134 | ```
135 | helm install j1 jenkins # create j1 release with jenkins chart
136 | helm upgrade -f [filename.yaml] [RELEASE] [CHART]
137 | helm upgrade -f values.yaml j1 jenkins/jenkins
138 | helm rollback [RELEASE] [REVISION]
139 | helm rollback j1 1
140 | helm history [RELEASE]
141 | helm rollback j1
142 | ```
143 | 
144 |
145 | - To learn more Helm commands:
146 |
147 | **Goto:** [Helm Commands Cheatsheet](https://github.com/omerbsezer/Fast-Kubernetes/blob/main/HelmCheatsheet.md)
148 |
149 |
--------------------------------------------------------------------------------
/K8s-Ingress.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Ingress
2 |
3 | This scenario shows how K8s ingress works on minikube. When browsing urls, ingress controller (nginx) directs traffic to the related services.
4 |
5 |  (ref: Kubernetes.io)
6 |
7 |
8 | ### Steps
9 |
10 | - Run minikube on Windows Hyperv or Virtualbox. In this scenario:
11 |
12 | ```
13 | minikube start --driver=hyperv
14 | or
15 | minikube start --driver=hyperv --force-systemd
16 | ```
17 |
18 | - To install ingress controller on K8s cluster, please visit to learn: https://kubernetes.github.io/ingress-nginx/deploy/
19 |
20 | - On Minikube, it is only needed to enable ingress controller.
21 |
22 | ```
23 | minikube addons enable ingress
24 | minikube addons list
25 | ```
26 |
27 | 
28 |
29 | - Copy and save (below) as file on your PC (appingress.yaml).
30 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/ingress/appingress.yaml
31 |
32 | ```
33 | apiVersion: networking.k8s.io/v1
34 | kind: Ingress
35 | metadata:
36 | name: appingress
37 | annotations:
38 | nginx.ingress.kubernetes.io/rewrite-target: /$1
39 | spec:
40 | rules:
41 | - host: webapp.com
42 | http:
43 | paths:
44 | - path: /blue
45 | pathType: Prefix
46 | backend:
47 | service:
48 | name: bluesvc
49 | port:
50 | number: 80
51 | - path: /green
52 | pathType: Prefix
53 | backend:
54 | service:
55 | name: greensvc
56 | port:
57 | number: 80
58 | ```
59 |
60 | 
61 |
62 | - Copy and save (below) as file on your PC (todoingress.yaml).
63 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/ingress/todoingress.yaml
64 |
65 | ```
66 | apiVersion: networking.k8s.io/v1
67 | kind: Ingress
68 | metadata:
69 | name: todoingress
70 | spec:
71 | rules:
72 | - host: todoapp.com
73 | http:
74 | paths:
75 | - path: /
76 | pathType: Prefix
77 | backend:
78 | service:
79 | name: todosvc
80 | port:
81 | number: 80
82 | ```
83 |
84 | 
85 |
86 | - Copy and save (below) as file on your PC (deploy.yaml).
87 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/ingress/deploy.yaml
88 |
89 | ```
90 | apiVersion: apps/v1
91 | kind: Deployment
92 | metadata:
93 | name: blueapp
94 | labels:
95 | app: blue
96 | spec:
97 | replicas: 2
98 | selector:
99 | matchLabels:
100 | app: blue
101 | template:
102 | metadata:
103 | labels:
104 | app: blue
105 | spec:
106 | containers:
107 | - name: blueapp
108 | image: ozgurozturknet/k8s:blue
109 | ports:
110 | - containerPort: 80
111 | livenessProbe:
112 | httpGet:
113 | path: /healthcheck
114 | port: 80
115 | initialDelaySeconds: 5
116 | periodSeconds: 5
117 | readinessProbe:
118 | httpGet:
119 | path: /ready
120 | port: 80
121 | initialDelaySeconds: 5
122 | periodSeconds: 3
123 | ---
124 | apiVersion: v1
125 | kind: Service
126 | metadata:
127 | name: bluesvc
128 | spec:
129 | selector:
130 | app: blue
131 | ports:
132 | - protocol: TCP
133 | port: 80
134 | targetPort: 80
135 | ---
136 | apiVersion: apps/v1
137 | kind: Deployment
138 | metadata:
139 | name: greenapp
140 | labels:
141 | app: green
142 | spec:
143 | replicas: 2
144 | selector:
145 | matchLabels:
146 | app: green
147 | template:
148 | metadata:
149 | labels:
150 | app: green
151 | spec:
152 | containers:
153 | - name: greenapp
154 | image: ozgurozturknet/k8s:green
155 | ports:
156 | - containerPort: 80
157 | livenessProbe:
158 | httpGet:
159 | path: /healthcheck
160 | port: 80
161 | initialDelaySeconds: 5
162 | periodSeconds: 5
163 | readinessProbe:
164 | httpGet:
165 | path: /ready
166 | port: 80
167 | initialDelaySeconds: 5
168 | periodSeconds: 3
169 | ---
170 | apiVersion: v1
171 | kind: Service
172 | metadata:
173 | name: greensvc
174 | spec:
175 | selector:
176 | app: green
177 | ports:
178 | - protocol: TCP
179 | port: 80
180 | targetPort: 80
181 | ---
182 | apiVersion: apps/v1
183 | kind: Deployment
184 | metadata:
185 | name: todoapp
186 | labels:
187 | app: todo
188 | spec:
189 | replicas: 1
190 | selector:
191 | matchLabels:
192 | app: todo
193 | template:
194 | metadata:
195 | labels:
196 | app: todo
197 | spec:
198 | containers:
199 | - name: todoapp
200 | image: ozgurozturknet/samplewebapp:latest
201 | ports:
202 | - containerPort: 80
203 | ---
204 | apiVersion: v1
205 | kind: Service
206 | metadata:
207 | name: todosvc
208 | spec:
209 | selector:
210 | app: todo
211 | ports:
212 | - protocol: TCP
213 | port: 80
214 | targetPort: 80
215 | ```
216 |
217 | 
218 |
219 | 
220 |
221 | 
222 |
223 | - Run "deploy.yaml" and "appingress.yaml" to create deployments and services
224 |
225 | 
226 |
227 | - Add url-ip on Windows/System32/Drivers/etc/hosts file:
228 |
229 | 
230 |
231 | - When running on browser the url "webapp.com/blue", one of the blue app containers return response.
232 |
233 | 
234 |
235 | - When running on browser the url "webapp.com/green", one of the green app containers return response.
236 |
237 | 
238 |
239 | - When running on browser, "todoapp.com":
240 |
241 | 
242 |
243 | - Hence, we can open services running on the cluster with one IP to the out of the cluster.
244 |
245 | - Delete all yaml file and minikube.
246 |
247 | 
248 |
249 |
250 | ### References
251 |
252 | https://github.com/aytitech/k8sfundamentals/tree/main/ingress
253 |
--------------------------------------------------------------------------------
/K8s-Rollout-Rollback.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Rollout - Rollback
2 |
3 | This scenario shows:
4 | - how to roll out deployments with 2 different strategy: recreate and rollingUpdate,
5 | - how to save/record deployments' revision while rolling out with "--record" (e.g. changing image):
6 | - imperative: "kubectl set image deployment rcdeployment nginx=httpd --record",
7 | - declerative, edit file: "kubectl edit deployment rolldeployment --record",
8 | - how to rollback (rollout undo) the desired deployment revisions:
9 | - "kubectl rollout undo deployment rolldeployment --to-revision=2",
10 | - how to pause/resume rollout:
11 | - pause: "kubectl rollout pause deployment rolldeployment",
12 | - resume: "kubectl rollout resume deployment rolldeployment",
13 | - how to see the status of rollout deployment:
14 | - "kubectl rollout status deployment rolldeployment -w".
15 |
16 | ### Steps
17 |
18 | - Run minikube (in this scenario, K8s runs on WSL2- Ubuntu 20.04) ("minikube start")
19 |
20 | 
21 |
22 | - Create Yaml file (recreate-deployment.yaml) in your directory and copy the below definition into the file.
23 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/deployment/recreate-deployment.yaml
24 |
25 | ```
26 | apiVersion: apps/v1
27 | kind: Deployment
28 | metadata:
29 | name: rcdeployment
30 | labels:
31 | team: development
32 | spec:
33 | replicas: 5 # create 5 replicas
34 | selector:
35 | matchLabels: # labelselector of deployment: selects pods which have "app:recreate" labels
36 | app: recreate
37 | strategy: # deployment roll up strategy: recreate => Delete all pods firstly and create Pods from scratch.
38 | type: Recreate
39 | template:
40 | metadata:
41 | labels: # labels the pod with "app:recreate"
42 | app: recreate
43 | spec:
44 | containers:
45 | - name: nginx
46 | image: nginx
47 | ports:
48 | - containerPort: 80
49 | ```
50 |
51 | 
52 |
53 |
54 | - Create Yaml file (rolling-deployment.yaml) in your directory and copy the below definition into the file.
55 | - File: https://github.com/omerbsezer/Fast-Kubernetes/blob/main/labs/deployment/rolling-deployment.yaml
56 |
57 | ```
58 | apiVersion: apps/v1
59 | kind: Deployment
60 | metadata:
61 | name: rolldeployment
62 | labels:
63 | team: development
64 | spec:
65 | replicas: 10
66 | selector:
67 | matchLabels: # labelselector of deployment: selects pods which have "app:rolling" labels
68 | app: rolling
69 | strategy:
70 | type: RollingUpdate # deployment roll up strategy: rollingUpdate => Pods are updated step by step, all pods are not deleted at the same time.
71 | rollingUpdate:
72 | maxUnavailable: 2 # shows the max number of deleted containers => total:10 container; if maxUnava:2, min:8 containers run in that time period
73 | maxSurge: 2 # shows that the max number of containers => total:10 container; if maxSurge:2, max:12 containers run in a time
74 | template:
75 | metadata:
76 | labels: # labels the pod with "app:rolling"
77 | app: rolling
78 | spec:
79 | containers:
80 | - name: nginx
81 | image: nginx
82 | ports:
83 | - containerPort: 80
84 | ```
85 |
86 | 
87 |
88 | - Run deployment:
89 |
90 | 
91 |
92 | - Watching pods' status (on linux: "watch kubectl get pods", on win: "kubectl get pods -w")
93 |
94 | 
95 |
96 | - Watching replica set's status (on linux: "watch kubectl get rs", on win: "kubectl get rs -w")
97 |
98 | 
99 |
100 | - Update image version ("kubectl set image deployment rcdeployment nginx=httpd"), after new replicaset and pods are created, old ones are deleted.
101 |
102 | 
103 |
104 | - With "recreate" strategy, pods are terminated:
105 |
106 | 
107 |
108 | - New pods are creating:
109 |
110 | 
111 |
112 | - New replicaset created:
113 |
114 | 
115 |
116 | - Delete this deployment:
117 |
118 | 
119 |
120 | - Run deployment (rolling-deployment.yaml):
121 |
122 | 
123 |
124 |
125 | - Watching pods' status (on linux: "watch kubectl get pods", on win: "kubectl get pods -w")
126 |
127 | 
128 |
129 | - Watching replica set's status (on linux: "watch kubectl get rs", on win: "kubectl get rs -w")
130 |
131 | 
132 |
133 | - Run: "kubectl edit deployment rolldeployment --record", it opens vim editor on linux to edit
134 | - Find image definition, press "i" for insert mode, change to "httpd" instead of "nginx", press "ESC", press ":wq" to save and exit
135 |
136 | 
137 |
138 | - New pods are creating with new version:
139 |
140 | 
141 |
142 | - New replicaset created:
143 |
144 | 
145 |
146 | - Run new deployment version:
147 |
148 | 
149 |
150 | - New pods are creating with new version:
151 |
152 | 
153 |
154 | - New replicaset created:
155 |
156 | 
157 |
158 | - To show history of the deployments (**important:** --record should be used to add old deployment versions in the history list):
159 |
160 | 
161 |
162 | - To show/describe the selected revision:
163 |
164 | 
165 |
166 | - Rollback to the revision=1 (with undo: "kubectl rollout undo deployment rolldeployment --to-revision=1"):
167 |
168 | 
169 |
170 |
171 | - Pod status:
172 |
173 | 
174 |
175 | - Replicaset revision=1:
176 |
177 | 
178 |
179 | - It is possible to return from revision=1 to revision=2 (with undo: "kubectl rollout undo deployment rolldeployment --to-revision=2"):
180 |
181 | 
182 |
183 |
184 | - It is also to pause rollout:
185 |
186 | 
187 |
188 | - While rollback to the revision=3 from revision=2, it was paused:
189 |
190 | 
191 |
192 | - Resume the pause of rollout of deployment:
193 |
194 | 
195 |
196 | - Now deployment's revision is 3:
197 |
198 | 
199 |
200 | - It is also possible to see the logs of rollout with:
201 | - "kubectl rollout status deployment rolldeployment -w"
202 |
203 | - Delete deployment:
204 |
205 | 
206 |
--------------------------------------------------------------------------------
/KubernetesCommandCheatSheet.md:
--------------------------------------------------------------------------------
1 | ## Kubernetes Commands Cheatsheet
2 |
3 | #### minikube command
4 | ```
5 | minikube start
6 | minikube status
7 | kubectl get nodes
8 | minikube stop #does not delete, it runs with start
9 | minikube delete # delete all
10 | ```
11 | #### kubeadm command
12 | - Kubeadm provides K8s cluster on on-premise
13 | - You can test Kubeadm on PlayWithKubernetes
14 | - Creating cluster with 1 master, 2 nodes (add new instance) on PlayWithKubernetes
15 | ```
16 | on master: kubeadm init --apiserver-advertise-address $(hostname -i) --pod-network-cidr 10.5.0.0/16
17 | on nodes: kubeadm join 192.168.0.13:6443 --token ge5xcq.xh2mcb4rqa8lz0db \
18 | --discovery-token-ca-cert-hash sha256:a3ba7ced9383a5b5704b6fbf696f243a8322759b68b9d07b747b174fcc838540
19 | on master: mkdir -p $HOME/.kube
20 | on master: cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
21 | on master: chown $(id -u):$(id -g) $HOME/.kube/config
22 | on master: kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml
23 | kubectl get nodes
24 | kubectl run test --image=nginx --restart=Never
25 | ```
26 |
27 | #### kubectl config: context, user, cluster
28 | ```
29 | kubectl config
30 | kubectl config get-contexts #get all context
31 | kubectl config current-context #get context
32 | kubectl config use-context docker-desktop #change context
33 | kubectl config use-context docker-desktop
34 | kubectl config use-context aks-k8s-test
35 | kubectl config use-context default # default=minikube
36 | kubectl get nodes
37 | ```
38 |
39 | #### cluster-info
40 | ```
41 | kubectl cluster-info
42 | kubectl cp --help
43 | kubectl [verb] [type] [object]
44 | kubectl delete pods test_pod
45 | kubectl [get|delete|edit|apply] [pods, deployment, services, etc.] [podName, serviceName, deploymentName, etc.]
46 | ```
47 |
48 | #### namespace, -n
49 | ```
50 | kubectl get pods # default namespace
51 | kubectl get pods -n kube-system #list kube-system namespace pods.
52 | kubectl get pods --all-namespaces
53 | kubectl get pods -A # all-namespace
54 | ```
55 |
56 | #### more info about pods
57 | ```
58 | kubectl get pods -A # all-namespace
59 | kubectl get pods -A -o wide # all-namespace with more detailed
60 | kubectl get pods -A -o yaml
61 | kubectl get pods -A -o json
62 | kubectl get pods -A -o go-template
63 | kubectl get pods -A -o json | jq -r ".items[].spec.containers[].name" #jq parser query
64 | ```
65 |
66 | #### commands help
67 | - 'help' to learn more for commands
68 | ```
69 | kubectl apply --help #explain command
70 | kubectl delete --help
71 | ```
72 |
73 | #### object help: with explain
74 | - 'explain' to learn more for objects
75 | ```
76 | kubectl explain pod
77 | kubectl explain deployment
78 | ```
79 |
80 | #### pod ~ container
81 | ```
82 | kubectl run firstpod --image=nginx --restart=Never
83 | kubectl run secondpod --image=nginx --port=80 --labels=app=frontend --restart=Never
84 | ```
85 |
86 | #### get info about pods
87 | ```
88 | kubectl get pods -o wide
89 | kubectl describe pods firstpod
90 | ```
91 |
92 | #### show log
93 | ```
94 | kubectl logs firstpod
95 | kubectl logs -f firstpod #watch live log with -f
96 | ```
97 |
98 | #### run command in pod
99 | ```
100 | kubectl exec firstpod -- hostname #hostname command run in pod
101 | kubectl exec firstpod -- ls / #list command run in pod
102 | ```
103 |
104 | #### connect container in the pod
105 | ```
106 | kubectl exec -it firstpod -- /bin/sh # open shell, connect container
107 | kubectl exec -it firstpod -- bash # run bash
108 | ```
109 |
110 | #### delete pod
111 | ```
112 | kubectl delete pods firstpod
113 | ```
114 |
115 | #### learn/explain api of objects
116 | ```
117 | kubectl explain pods
118 | kubectl explain deployments
119 | kubectl explain serviceaccount
120 | ```
121 |
122 | #### Declerative way with file, Imperative way with command
123 | - File contents:
124 | - apiVersion:
125 | - kind: (pod, deployment, etc.)
126 | - metadata: (podName, label, etc.)
127 | - specs: (restartPolicy, container name, image, command, ports, etc.)
128 |
129 | #### file apply for declerative
130 | ```
131 | kubectl apply -f pod1.yaml
132 | ```
133 |
134 | #### edit
135 | ```
136 | kubectl edit pods firstpod
137 | ```
138 |
139 | #### delete
140 | ```
141 | kubectl delete -f podlabel.yaml #all related objects deleted with declerative way
142 | ```
143 |
144 | #### watch pods always
145 | ```
146 | kubectl get pods -w
147 | ```
148 |
149 | #### run multiple container on 1 pod, -c containerName
150 | ```
151 | kubectl exec -it multicontainer -c webcontainer -- /bin/sh # -c ile containername, if more than one container
152 | kubectl exec -it multicontainer -c sidecarcontainer -- /bin/sh
153 | kubectl logs -f multicontainer -c sidecarcontainer
154 | ```
155 |
156 | #### port-forward to pod
157 | ```
158 | kubectl port-forward pod/multicontainer 80:80 ## host:container port, if command is not run, port is not opened
159 | kubectl port-forward pod/multicontainer 8080:80 # when browsing 127.0.0.1:8080, host:8080 goes to pod:80 and directs traffic.
160 | kubectl port-forward :
161 | kubectl port-forward deployment/mydeployment 5000 6000 # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment
162 | kubectl port-forward service/myservice 5000 6000 # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the service
163 | kubectl port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod
164 | kubectl port-forward pod/mypod :5000 # Listen on a random port locally, forwarding to 5000 in the pod
165 | kubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000 # Listen on port 8888 on all addresses, forwarding to 5000 in the pod
166 | ```
167 |
168 | #### label ve selector
169 | ```
170 | kubectl get pods -l "app" --show-labels #with -l, search label
171 | kubectl get pods -l "app=firstapp" --show-labels
172 | kubectl get pods -l "app=firstapp,tier=frontend" --show-labels
173 | kubectl get pods -l "app in (firstapp)" --show-labels
174 | kubectl get pods -l "!app" --show-labels #list not app key
175 | kubectl get pods -l "app notin (firstapp)" --show-labels #inverse
176 | kubectl get pods -l "app in (firstapp,secondapp)" --show-labels #or
177 | kubectl get pods -l "app=firstapp,app=secondapp)" --show-labels #and
178 | ```
179 |
180 | #### label addition
181 | ```
182 | command (imperative): kubectl label pods pod9 app=thirdapp
183 | command (imperative): kubectl label pods pod9 app-
184 | kubectl label --overwrite pods pod9 team=team3 #overwrite
185 | kubectl label pods --all foo=bar # all pods, label addition
186 | ```
187 |
188 | #### label node
189 | - Node could be labelled (e.g. nodes have gpu, ssd, can be labelled)
190 | ```
191 | kubectl label nodes minikube hddtype=ssd
192 | ```
193 |
194 | #### annotation
195 | ```
196 | kubectl annotate pods annotationpod foo=bar ##annotate add
197 | kubectl annotate pods annotationpod foo- ##annotate delete
198 | ```
199 |
200 | #### namespace: object
201 | ```
202 | kubectl get namespaces
203 | kubectl get pods #defaulttaki podlar
204 | kubectl get pods --namespace kube-system #only kube-system
205 | kubectl get pods -n kube-system #only kube-system
206 | kubectl get pods --all-namespaces #all namespaces
207 | kubectl get pods -A #all namespaces
208 | kubectl exec -it namespacepod -n development -- /bin/sh #run terminal to add namespace
209 | kubectl config set-context --current --namespace=development
210 | kubectl config set-context --current --namespace=default
211 | kubectl delete namespaces development
212 | ```
213 |
214 | #### DEPLOYMENT: run more than 1 pod and synch
215 | ```
216 | kubectl create deployment firstdeployment --image=nginx:latest --replicas=2
217 | kubectl get deployment -w #always watch
218 | kubectl get deployment
219 | kubectl delete pods firstdeployment-pod
220 | kubectl set image deployment/firstdeployment nginx=httpd # update containers on deployment
221 | kubectl scale deployment firstdeployment --replicas=5 # manuel scale, increase/decrease replicas
222 | kubectl delete deployment firstdeployment # delete deployment
223 | ```
224 |
225 | #### Deployment from file
226 | - There should be at least one entry for spec/selector for each deployment to choose pod
227 | - There should be same entries for template/metadata/labels/app and spec/selector/matchLabels/app for deployment-pod match
228 | ```
229 | kubectl apply -f deploymenttemplate.yaml
230 | ```
231 |
232 | #### rollout
233 | ```
234 | kubectl rollout undo deployment firstdeployment # undo
235 | ```
236 |
237 | #### record: save, return to desired revision
238 | ```
239 | kubectl apply -f deployrolling.yaml --record
240 | kubectl edit deployment rolldeployment --record
241 | kubectl set image deployment rolldeployment nginx=httpd:alpine --record=true
242 | kubectl rollout history deployment rolldeployment # show record history
243 | kubectl rollout history deployment rolldeployment --revision=2 # show 2.revision history
244 | kubectl rollout undo deployment rolldeployment --to-revision=1 # roll to the first revision
245 | ```
246 |
247 | #### live rollout commands logs on different terminal
248 | ```
249 | kubectl apply -f deployrolling.yaml
250 | on another terminal: kubectl rollout status deployment rolldeployment -w
251 | kubectl rollout pause deployment rolldeployment # pause the current deployment rollout/update
252 | kubectl rollout resume deployment rolldeployment # resume the current deployment rollout/update
253 | ```
254 |
255 | #### service
256 | - Service, --service-cluster-ip-range "10.100.0.0/16"
257 | - 4 type Service object:
258 | - ClusterIP: direct traffic on the cluster
259 | - NodePort: node can be reachable from outside
260 | - LoadBalancer: load balancing
261 | - ExternalName
262 | ```
263 | kubectl apply -f serviceClusterIP.yaml
264 | kubectl get service -o wide
265 | ```
266 |
267 | #### service with command
268 | ```
269 | kubectl expose deployment backend --type=ClusterIP --name=backend #clusterIP type service creation
270 | kubectl get service
271 | kubectl expose deployment frontend --type=NodePort --name=frontend #ndePort type service creation
272 | kubectl get service
273 | ```
274 |
275 | #### service-endpoints
276 | ```
277 | kubectl get endpoints # same endpoints created with services
278 | kubectl describe endpoints frontend # show ip adresses
279 | kubectl delete pods frontend-xx-xx # when pod deleted, ip also deleted
280 | kubectl scale deployment frontend --replicas=5 # new ip added
281 | kubectl scale deployment frontend --replicas=2
282 | ```
283 |
284 | #### environment variables
285 | ```
286 | spec:
287 | containers:
288 | - name: envpod
289 | image: ozgurozturknet/env:latest
290 | ports:
291 | - containerPort: 80
292 | env:
293 | - name: USER
294 | value: "Ozgur"
295 | - name: database
296 | value: "testdb.example.com"
297 | ```
298 | ```
299 | kubectl apply -f podenv.yaml
300 | kubectl get pods
301 | kubectl exec envpod -- printenv ## env. variable
302 | kubectl exec -it firstpod -- /bin/sh
303 | kubectl port-forward pod/envpod 8080:80 #port forwarding
304 | kubectl delete -f podenv.yaml
305 | ```
306 |
307 | #### volume
308 | - ephemeral volume (temporary volume): it can be reachable from more than 1 container in the pod. When pod is deleted, volume is also deleted like cache.
309 | - 2 types of ephmeral volume:
310 | - 1.emptydir (create empty directory on the node, this volume is mounted on the container)
311 | - 2.hostpath: worker node (worker PC) with file path, more than one file or directory can be connected
312 |
313 | ##### emptydir volume:
314 | ```
315 | volumes:
316 | - name: cache-vol
317 | emptyDir: {}
318 | ```
319 |
320 | ##### container mount:
321 | ```
322 | - name: sidecar
323 | image: busybox
324 | command: ["/bin/sh"]
325 | args: ["-c", "sleep 3600"]
326 | volumeMounts:
327 | - name: cache-vol
328 | mountPath: /tmp/log
329 | ```
330 | ```
331 | kubectl apply -f podvolumeemptydir.yaml
332 | kubectl get pods -w
333 | kubectl exec -it emptydir -c frontend -- bash
334 | kubectl exec emptydir -c frontend -- rm -rf healthcheck #heltcheck siliniyor, container restart ediliyor.
335 | ```
336 |
337 | ##### hostpath type:
338 | ```
339 | containers:
340 | volumeMounts:
341 | - name: directory-vol
342 | mountPath: /dir1 (on container /dir1)
343 | - name: dircreate-vol
344 | mountPath: /cache (on container /cache)
345 | - name: file-vol
346 | mountPath: /cache/config.json
347 |
348 | volumes:
349 | - name: directory-vol
350 | hostPath:
351 | path: /tmp (worker node /tmp directory, create Directory type volume)
352 | type: Directory
353 | - name: dircreate-vol
354 | hostPath:
355 | path: /cache (worker node /cache directory, create DirectoryOrCreate type volume)
356 | type: DirectoryOrCreate
357 | - name: file-vol
358 | hostPath:
359 | path: /cache/config.json
360 | type: FileOrCreate
361 | ```
362 | ```
363 | kubectl apply -f podvolumehostpath.yaml
364 | kubectl exec -it hostpath -c hostpathcontainer -- bash
365 | ```
366 |
367 |
368 | #### secret: declerative way
369 | ```
370 | kubectl apply -f secret.yaml
371 | kubectl get secrets
372 | kubectl describe secret mysecret
373 | ```
374 |
375 | #### secret: imperative (cmd)
376 | ```
377 | kubectl create secret generic mysecret2 --from-literal=db_server=db.example.com --from-literal=db_username=admin --from-literal=db_password=P@ssw0rd!
378 | kubectl create secret generic mysecret4 --from-file=config.json #create config.json that inludes pass and username.
379 | ```
380 |
381 | #### taint and toleration
382 | ```
383 | kubectl describe nodes minikube
384 | kubectl taint node minikube platform=production:NoSchedule #taint add
385 | kubectl taint node minikube platform- # taint delete
386 | ```
387 |
388 | #### connect pod with bash and apt install
389 | ```
390 | kubectl exec -it PodName -- bash
391 | apt update
392 | apt install net-tools
393 | apt install iputils-ping
394 | ifconfig
395 | ping x.x.x.x
396 | ```
397 |
398 |
--------------------------------------------------------------------------------
/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install-docker-ce.ps1:
--------------------------------------------------------------------------------
1 | # Microsoft: https://github.com/microsoft/Windows-Containers/blob/Main/helpful_tools/Install-DockerCE/install-docker-ce.ps1
2 | ############################################################
3 | # Script to install the community edition of docker on Windows
4 | ############################################################
5 |
6 | <#
7 | .NOTES
8 | Copyright (c) Microsoft Corporation. All rights reserved.
9 |
10 | Use of this sample source code is subject to the terms of the Microsoft
11 | license agreement under which you licensed this sample source code. If
12 | you did not accept the terms of the license agreement, you are not
13 | authorized to use this sample source code. For the terms of the license,
14 | please see the license agreement between you and Microsoft or, if applicable,
15 | see the LICENSE.RTF on your install media or the root of your tools installation.
16 | THE SAMPLE SOURCE CODE IS PROVIDED "AS IS", WITH NO WARRANTIES.
17 |
18 | .SYNOPSIS
19 | Installs the prerequisites for creating Windows containers
20 |
21 | .DESCRIPTION
22 | Installs the prerequisites for creating Windows containers
23 |
24 | .PARAMETER DockerPath
25 | Path to Docker.exe, can be local or URI
26 |
27 | .PARAMETER DockerDPath
28 | Path to DockerD.exe, can be local or URI
29 |
30 | .PARAMETER DockerVersion
31 | Version of docker to pull from download.docker.com - ! OVERRIDDEN BY DockerPath & DockerDPath
32 |
33 | .PARAMETER ExternalNetAdapter
34 | Specify a specific network adapter to bind to a DHCP network
35 |
36 | .PARAMETER SkipDefaultHost
37 | Prevents setting localhost as the default network configuration
38 |
39 | .PARAMETER Force
40 | If a restart is required, forces an immediate restart.
41 |
42 | .PARAMETER HyperV
43 | If passed, prepare the machine for Hyper-V containers
44 |
45 | .PARAMETER NATSubnet
46 | Use to override the default Docker NAT Subnet when in NAT mode.
47 |
48 | .PARAMETER NoRestart
49 | If a restart is required the script will terminate and will not reboot the machine
50 |
51 | .PARAMETER ContainerBaseImage
52 | Use this to specify the URI of the container base image you wish to pre-pull
53 |
54 | .PARAMETER Staging
55 |
56 | .PARAMETER TransparentNetwork
57 | If passed, use DHCP configuration. Otherwise, will use default docker network (NAT). (alias -UseDHCP)
58 |
59 | .PARAMETER TarPath
60 | Path to the .tar that is the base image to load into Docker.
61 |
62 | .EXAMPLE
63 | .\install-docker-ce.ps1
64 |
65 | #>
66 | #Requires -Version 5.0
67 |
68 | [CmdletBinding(DefaultParameterSetName="Standard")]
69 | param(
70 | [string]
71 | [ValidateNotNullOrEmpty()]
72 | $DockerPath = "default",
73 |
74 | [string]
75 | [ValidateNotNullOrEmpty()]
76 | $DockerDPath = "default",
77 |
78 | [string]
79 | [ValidateNotNullOrEmpty()]
80 | $DockerVersion = "latest",
81 |
82 | [string]
83 | $ExternalNetAdapter,
84 |
85 | [switch]
86 | $Force,
87 |
88 | [switch]
89 | $HyperV,
90 |
91 | [switch]
92 | $SkipDefaultHost,
93 |
94 | [string]
95 | $NATSubnet,
96 |
97 | [switch]
98 | $NoRestart,
99 |
100 | [string]
101 | $ContainerBaseImage,
102 |
103 | [Parameter(ParameterSetName="Staging", Mandatory)]
104 | [switch]
105 | $Staging,
106 |
107 | [switch]
108 | [alias("UseDHCP")]
109 | $TransparentNetwork,
110 |
111 | [string]
112 | [ValidateNotNullOrEmpty()]
113 | $TarPath
114 | )
115 |
116 | $global:RebootRequired = $false
117 | $global:ErrorFile = "$pwd\Install-ContainerHost.err"
118 | $global:BootstrapTask = "ContainerBootstrap"
119 | $global:HyperVImage = "NanoServer"
120 | $global:AdminPriviledges = $false
121 |
122 | $global:DefaultDockerLocation = "https://download.docker.com/win/static/stable/x86_64/"
123 | $global:DockerDataPath = "$($env:ProgramData)\docker"
124 | $global:DockerServiceName = "docker"
125 |
126 | function
127 | Restart-And-Run()
128 | {
129 | Test-Admin
130 |
131 | Write-Output "Restart is required; restarting now..."
132 |
133 | $argList = $script:MyInvocation.Line.replace($script:MyInvocation.InvocationName, "")
134 |
135 | #
136 | # Update .\ to the invocation directory for the bootstrap
137 | #
138 | $scriptPath = $script:MyInvocation.MyCommand.Path
139 |
140 | $argList = $argList -replace "\.\\", "$pwd\"
141 |
142 | if ((Split-Path -Parent -Path $scriptPath) -ne $pwd)
143 | {
144 | $sourceScriptPath = $scriptPath
145 | $scriptPath = "$pwd\$($script:MyInvocation.MyCommand.Name)"
146 |
147 | Copy-Item $sourceScriptPath $scriptPath
148 | }
149 |
150 | Write-Output "Creating scheduled task action ($scriptPath $argList)..."
151 | $action = New-ScheduledTaskAction -Execute "powershell.exe" -Argument "-NoExit $scriptPath $argList"
152 |
153 | Write-Output "Creating scheduled task trigger..."
154 | $trigger = New-ScheduledTaskTrigger -AtLogOn
155 |
156 | Write-Output "Registering script to re-run at next user logon..."
157 | Register-ScheduledTask -TaskName $global:BootstrapTask -Action $action -Trigger $trigger -RunLevel Highest | Out-Null
158 |
159 | try
160 | {
161 | if ($Force)
162 | {
163 | Restart-Computer -Force
164 | }
165 | else
166 | {
167 | Restart-Computer
168 | }
169 | }
170 | catch
171 | {
172 | Write-Error $_
173 |
174 | Write-Output "Please restart your computer manually to continue script execution."
175 | }
176 |
177 | exit
178 | }
179 |
180 |
181 | function
182 | Install-Feature
183 | {
184 | [CmdletBinding()]
185 | param(
186 | [ValidateNotNullOrEmpty()]
187 | [string]
188 | $FeatureName
189 | )
190 |
191 | Write-Output "Querying status of Windows feature: $FeatureName..."
192 | if (Get-Command Get-WindowsFeature -ErrorAction SilentlyContinue)
193 | {
194 | if ((Get-WindowsFeature $FeatureName).Installed)
195 | {
196 | Write-Output "Feature $FeatureName is already enabled."
197 | }
198 | else
199 | {
200 | Test-Admin
201 |
202 | Write-Output "Enabling feature $FeatureName..."
203 | }
204 |
205 | $featureInstall = Add-WindowsFeature $FeatureName
206 |
207 | if ($featureInstall.RestartNeeded -eq "Yes")
208 | {
209 | $global:RebootRequired = $true;
210 | }
211 | }
212 | else
213 | {
214 | if ((Get-WindowsOptionalFeature -Online -FeatureName $FeatureName).State -eq "Disabled")
215 | {
216 | if (Test-Nano)
217 | {
218 | throw "This NanoServer deployment does not include $FeatureName. Please add the appropriate package"
219 | }
220 |
221 | Test-Admin
222 |
223 | Write-Output "Enabling feature $FeatureName..."
224 | $feature = Enable-WindowsOptionalFeature -Online -FeatureName $FeatureName -All -NoRestart
225 |
226 | if ($feature.RestartNeeded -eq "True")
227 | {
228 | $global:RebootRequired = $true;
229 | }
230 | }
231 | else
232 | {
233 | Write-Output "Feature $FeatureName is already enabled."
234 |
235 | if (Test-Nano)
236 | {
237 | #
238 | # Get-WindowsEdition is not present on Nano. On Nano, we assume reboot is not needed
239 | #
240 | }
241 | elseif ((Get-WindowsEdition -Online).RestartNeeded)
242 | {
243 | $global:RebootRequired = $true;
244 | }
245 | }
246 | }
247 | }
248 |
249 |
250 | function
251 | New-ContainerTransparentNetwork
252 | {
253 | if ($ExternalNetAdapter)
254 | {
255 | $netAdapter = (Get-NetAdapter |? {$_.Name -eq "$ExternalNetAdapter"})[0]
256 | }
257 | else
258 | {
259 | $netAdapter = (Get-NetAdapter |? {($_.Status -eq 'Up') -and ($_.ConnectorPresent)})[0]
260 | }
261 |
262 | Write-Output "Creating container network (Transparent)..."
263 | New-ContainerNetwork -Name "Transparent" -Mode Transparent -NetworkAdapterName $netAdapter.Name | Out-Null
264 | }
265 |
266 |
267 | function
268 | Install-ContainerHost
269 | {
270 | "If this file exists when Install-ContainerHost.ps1 exits, the script failed!" | Out-File -FilePath $global:ErrorFile
271 |
272 | if (Test-Client)
273 | {
274 | if (-not $HyperV)
275 | {
276 | Write-Output "Enabling Hyper-V containers by default for Client SKU"
277 | $HyperV = $true
278 | }
279 | }
280 | #
281 | # Validate required Windows features
282 | #
283 | Install-Feature -FeatureName Containers
284 |
285 | if ($HyperV)
286 | {
287 | Install-Feature -FeatureName Hyper-V
288 | }
289 |
290 | if ($global:RebootRequired)
291 | {
292 | if ($NoRestart)
293 | {
294 | Write-Warning "A reboot is required; stopping script execution"
295 | exit
296 | }
297 |
298 | Restart-And-Run
299 | }
300 |
301 | #
302 | # Unregister the bootstrap task, if it was previously created
303 | #
304 | if ((Get-ScheduledTask -TaskName $global:BootstrapTask -ErrorAction SilentlyContinue) -ne $null)
305 | {
306 | Unregister-ScheduledTask -TaskName $global:BootstrapTask -Confirm:$false
307 | }
308 |
309 | #
310 | # Configure networking
311 | #
312 | if ($($PSCmdlet.ParameterSetName) -ne "Staging")
313 | {
314 | if ($TransparentNetwork)
315 | {
316 | Write-Output "Waiting for Hyper-V Management..."
317 | $networks = $null
318 |
319 | try
320 | {
321 | $networks = Get-ContainerNetwork -ErrorAction SilentlyContinue
322 | }
323 | catch
324 | {
325 | #
326 | # If we can't query network, we are in bootstrap mode. Assume no networks
327 | #
328 | }
329 |
330 | if ($networks.Count -eq 0)
331 | {
332 | Write-Output "Enabling container networking..."
333 | New-ContainerTransparentNetwork
334 | }
335 | else
336 | {
337 | Write-Output "Networking is already configured. Confirming configuration..."
338 |
339 | $transparentNetwork = $networks |? { $_.Mode -eq "Transparent" }
340 |
341 | if ($transparentNetwork -eq $null)
342 | {
343 | Write-Output "We didn't find a configured external network; configuring now..."
344 | New-ContainerTransparentNetwork
345 | }
346 | else
347 | {
348 | if ($ExternalNetAdapter)
349 | {
350 | $netAdapters = (Get-NetAdapter |? {$_.Name -eq "$ExternalNetAdapter"})
351 |
352 | if ($netAdapters.Count -eq 0)
353 | {
354 | throw "No adapters found that match the name $ExternalNetAdapter"
355 | }
356 |
357 | $netAdapter = $netAdapters[0]
358 | $transparentNetwork = $networks |? { $_.NetworkAdapterName -eq $netAdapter.InterfaceDescription }
359 |
360 | if ($transparentNetwork -eq $null)
361 | {
362 | throw "One or more external networks are configured, but not on the requested adapter ($ExternalNetAdapter)"
363 | }
364 |
365 | Write-Output "Configured transparent network found: $($transparentNetwork.Name)"
366 | }
367 | else
368 | {
369 | Write-Output "Configured transparent network found: $($transparentNetwork.Name)"
370 | }
371 | }
372 | }
373 | }
374 | }
375 |
376 | #
377 | # Install, register, and start Docker
378 | #
379 | if (Test-Docker)
380 | {
381 | Write-Output "Docker is already installed."
382 | }
383 | else
384 | {
385 | if ($NATSubnet)
386 | {
387 | Install-Docker -DockerPath $DockerPath -DockerDPath $DockerDPath -NATSubnet $NATSubnet -ContainerBaseImage $ContainerBaseImage
388 | }
389 | else
390 | {
391 | Install-Docker -DockerPath $DockerPath -DockerDPath $DockerDPath -ContainerBaseImage $ContainerBaseImage
392 | }
393 | }
394 |
395 | if ($TarPath)
396 | {
397 | cmd /c "docker load -i `"$TarPath`""
398 | }
399 |
400 | Remove-Item $global:ErrorFile
401 |
402 | Write-Output "Script complete!"
403 | }
404 |
405 | function
406 | Copy-File
407 | {
408 | [CmdletBinding()]
409 | param(
410 | [string]
411 | $SourcePath,
412 |
413 | [string]
414 | $DestinationPath
415 | )
416 |
417 | if ($SourcePath -eq $DestinationPath)
418 | {
419 | return
420 | }
421 |
422 | if (Test-Path $SourcePath)
423 | {
424 | Copy-Item -Path $SourcePath -Destination $DestinationPath
425 | }
426 | elseif (($SourcePath -as [System.URI]).AbsoluteURI -ne $null)
427 | {
428 | if (Test-Nano)
429 | {
430 | $handler = New-Object System.Net.Http.HttpClientHandler
431 | $client = New-Object System.Net.Http.HttpClient($handler)
432 | $client.Timeout = New-Object System.TimeSpan(0, 30, 0)
433 | $cancelTokenSource = [System.Threading.CancellationTokenSource]::new()
434 | $responseMsg = $client.GetAsync([System.Uri]::new($SourcePath), $cancelTokenSource.Token)
435 | $responseMsg.Wait()
436 |
437 | if (!$responseMsg.IsCanceled)
438 | {
439 | $response = $responseMsg.Result
440 | if ($response.IsSuccessStatusCode)
441 | {
442 | $downloadedFileStream = [System.IO.FileStream]::new($DestinationPath, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write)
443 | $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream)
444 | $copyStreamOp.Wait()
445 | $downloadedFileStream.Close()
446 | if ($copyStreamOp.Exception -ne $null)
447 | {
448 | throw $copyStreamOp.Exception
449 | }
450 | }
451 | }
452 | }
453 | elseif ($PSVersionTable.PSVersion.Major -ge 5)
454 | {
455 | #
456 | # We disable progress display because it kills performance for large downloads (at least on 64-bit PowerShell)
457 | #
458 | $ProgressPreference = 'SilentlyContinue'
459 | Invoke-WebRequest -Uri $SourcePath -OutFile $DestinationPath -UseBasicParsing
460 | $ProgressPreference = 'Continue'
461 | }
462 | else
463 | {
464 | $webClient = New-Object System.Net.WebClient
465 | $webClient.DownloadFile($SourcePath, $DestinationPath)
466 | }
467 | }
468 | else
469 | {
470 | throw "Cannot copy from $SourcePath"
471 | }
472 | }
473 |
474 |
475 | function
476 | Test-Admin()
477 | {
478 | # Get the ID and security principal of the current user account
479 | $myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
480 | $myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
481 |
482 | # Get the security principal for the Administrator role
483 | $adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
484 |
485 | # Check to see if we are currently running "as Administrator"
486 | if ($myWindowsPrincipal.IsInRole($adminRole))
487 | {
488 | $global:AdminPriviledges = $true
489 | return
490 | }
491 | else
492 | {
493 | #
494 | # We are not running "as Administrator"
495 | # Exit from the current, unelevated, process
496 | #
497 | throw "You must run this script as administrator"
498 | }
499 | }
500 |
501 |
502 | function
503 | Test-Client()
504 | {
505 | return (-not ((Get-Command Get-WindowsFeature -ErrorAction SilentlyContinue) -or (Test-Nano)))
506 | }
507 |
508 |
509 | function
510 | Test-Nano()
511 | {
512 | $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId
513 |
514 | return (($EditionId -eq "ServerStandardNano") -or
515 | ($EditionId -eq "ServerDataCenterNano") -or
516 | ($EditionId -eq "NanoServer") -or
517 | ($EditionId -eq "ServerTuva"))
518 | }
519 |
520 |
521 | function
522 | Wait-Network()
523 | {
524 | $connectedAdapter = Get-NetAdapter |? ConnectorPresent
525 |
526 | if ($connectedAdapter -eq $null)
527 | {
528 | throw "No connected network"
529 | }
530 |
531 | $startTime = Get-Date
532 | $timeElapsed = $(Get-Date) - $startTime
533 |
534 | while ($($timeElapsed).TotalMinutes -lt 5)
535 | {
536 | $readyNetAdapter = $connectedAdapter |? Status -eq 'Up'
537 |
538 | if ($readyNetAdapter -ne $null)
539 | {
540 | return;
541 | }
542 |
543 | Write-Output "Waiting for network connectivity..."
544 | Start-Sleep -sec 5
545 |
546 | $timeElapsed = $(Get-Date) - $startTime
547 | }
548 |
549 | throw "Network not connected after 5 minutes"
550 | }
551 |
552 |
553 | function
554 | Install-Docker()
555 | {
556 | [CmdletBinding()]
557 | param(
558 | [string]
559 | [ValidateNotNullOrEmpty()]
560 | $DockerPath = "default",
561 |
562 | [string]
563 | [ValidateNotNullOrEmpty()]
564 | $DockerDPath = "default",
565 |
566 | [string]
567 | [ValidateNotNullOrEmpty()]
568 | $NATSubnet,
569 |
570 | [switch]
571 | $SkipDefaultHost,
572 |
573 | [string]
574 | $ContainerBaseImage
575 | )
576 |
577 | Test-Admin
578 |
579 | #If one of these are set to default then the whole .zip needs to be downloaded anyways.
580 | Write-Output "DOCKER $DockerPath"
581 | if ($DockerPath -eq "default" -or $DockerDPath -eq "default") {
582 | Write-Output "Checking Docker versions"
583 | #Get the list of .zip packages available from docker.
584 | $availableVersions = ((Invoke-WebRequest -Uri $DefaultDockerLocation -UseBasicParsing).Links | Where-Object {$_.href -like "docker*"}).href | Sort-Object -Descending
585 |
586 | #Parse the versions from the file names
587 | $availableVersions = ($availableVersions | Select-String -Pattern "docker-(\d+\.\d+\.\d+).+" -AllMatches | Select-Object -Expand Matches | %{ $_.Groups[1].Value })
588 | $version = $availableVersions[0]
589 |
590 | if($DockerVersion -ne "latest") {
591 | $version = $DockerVersion
592 | if(!($availableVersions | Select-String $DockerVersion)) {
593 | Write-Error "Docker version supplied $DockerVersion was invalid, please choose from the list of available versions: $availableVersions"
594 | throw "Invalid docker version supplied."
595 | }
596 | }
597 |
598 | $zipUrl = $global:DefaultDockerLocation + "docker-$version.zip"
599 | $destinationFolder = "$env:UserProfile\DockerDownloads"
600 |
601 | if(!(Test-Path "$destinationFolder")) {
602 | md -Path $destinationFolder | Out-Null
603 | } elseif(Test-Path "$destinationFolder\docker-$version") {
604 | Remove-Item -Recurse -Force "$destinationFolder\docker-$version"
605 | }
606 |
607 | Write-Output "Downloading $zipUrl to $destinationFolder\docker-$version.zip"
608 | Copy-File -SourcePath $zipUrl -DestinationPath "$destinationFolder\docker-$version.zip"
609 | Expand-Archive -Path "$destinationFolder\docker-$version.zip" -DestinationPath "$destinationFolder\docker-$version"
610 |
611 | if($DockerPath -eq "default") {
612 | $DockerPath = "$destinationFolder\docker-$version\docker\docker.exe"
613 | }
614 | if($DockerDPath -eq "default") {
615 | $DockerDPath = "$destinationFolder\docker-$version\docker\dockerd.exe"
616 | }
617 | }
618 |
619 | Write-Output "Installing Docker... $DockerPath"
620 | Copy-File -SourcePath $DockerPath -DestinationPath $env:windir\System32\docker.exe
621 |
622 | Write-Output "Installing Docker daemon... $DockerDPath"
623 | Copy-File -SourcePath $DockerDPath -DestinationPath $env:windir\System32\dockerd.exe
624 |
625 | $dockerConfigPath = Join-Path $global:DockerDataPath "config"
626 |
627 | if (!(Test-Path $dockerConfigPath))
628 | {
629 | md -Path $dockerConfigPath | Out-Null
630 | }
631 |
632 | #
633 | # Register the docker service.
634 | # Configuration options should be placed at %programdata%\docker\config\daemon.json
635 | #
636 | Write-Output "Configuring the docker service..."
637 |
638 | $daemonSettings = New-Object PSObject
639 |
640 | $certsPath = Join-Path $global:DockerDataPath "certs.d"
641 |
642 | if (Test-Path $certsPath)
643 | {
644 | $daemonSettings | Add-Member NoteProperty hosts @("npipe://", "0.0.0.0:2376")
645 | $daemonSettings | Add-Member NoteProperty tlsverify true
646 | $daemonSettings | Add-Member NoteProperty tlscacert (Join-Path $certsPath "ca.pem")
647 | $daemonSettings | Add-Member NoteProperty tlscert (Join-Path $certsPath "server-cert.pem")
648 | $daemonSettings | Add-Member NoteProperty tlskey (Join-Path $certsPath "server-key.pem")
649 | }
650 | elseif (!$SkipDefaultHost.IsPresent)
651 | {
652 | # Default local host
653 | $daemonSettings | Add-Member NoteProperty hosts @("npipe://")
654 | }
655 |
656 | if ($NATSubnet -ne "")
657 | {
658 | $daemonSettings | Add-Member NoteProperty fixed-cidr $NATSubnet
659 | }
660 |
661 | $daemonSettingsFile = Join-Path $dockerConfigPath "daemon.json"
662 |
663 | $daemonSettings | ConvertTo-Json | Out-File -FilePath $daemonSettingsFile -Encoding ASCII
664 |
665 | & dockerd --register-service --service-name $global:DockerServiceName
666 |
667 | Start-Docker
668 |
669 | #
670 | # Waiting for docker to come to steady state
671 | #
672 | Wait-Docker
673 |
674 | if(-not [string]::IsNullOrEmpty($ContainerBaseImage)) {
675 | Write-Output "Attempting to pull specified base image: $ContainerBaseImage"
676 | docker pull $ContainerBaseImage
677 | }
678 |
679 | Write-Output "The following images are present on this machine:"
680 |
681 | docker images -a | Write-Output
682 |
683 | Write-Output ""
684 | }
685 |
686 | function
687 | Start-Docker()
688 | {
689 | Start-Service -Name $global:DockerServiceName
690 | }
691 |
692 |
693 | function
694 | Stop-Docker()
695 | {
696 | Stop-Service -Name $global:DockerServiceName
697 | }
698 |
699 |
700 | function
701 | Test-Docker()
702 | {
703 | $service = Get-Service -Name $global:DockerServiceName -ErrorAction SilentlyContinue
704 |
705 | return ($service -ne $null)
706 | }
707 |
708 |
709 | function
710 | Wait-Docker()
711 | {
712 | Write-Output "Waiting for Docker daemon..."
713 | $dockerReady = $false
714 | $startTime = Get-Date
715 |
716 | while (-not $dockerReady)
717 | {
718 | try
719 | {
720 | docker version | Out-Null
721 |
722 | if (-not $?)
723 | {
724 | throw "Docker daemon is not running yet"
725 | }
726 |
727 | $dockerReady = $true
728 | }
729 | catch
730 | {
731 | $timeElapsed = $(Get-Date) - $startTime
732 |
733 | if ($($timeElapsed).TotalMinutes -ge 1)
734 | {
735 | throw "Docker Daemon did not start successfully within 1 minute."
736 | }
737 |
738 | # Swallow error and try again
739 | Start-Sleep -sec 1
740 | }
741 | }
742 | Write-Output "Successfully connected to Docker Daemon."
743 | }
744 |
745 | try
746 | {
747 | Install-ContainerHost
748 | }
749 | catch
750 | {
751 | Write-Error $_
752 | }
753 |
--------------------------------------------------------------------------------
/K8s-Kubeadm-Cluster-Setup.md:
--------------------------------------------------------------------------------
1 | ## LAB: K8s Cluster Setup with Kubeadm and Containerd
2 |
3 | This scenario shows how to create K8s cluster on virtual PC (multipass, kubeadm, containerd)
4 |
5 | **Easy way to create K8s Cluster with Ubuntu (Control-Plane, Workers) and Windows Servers:**
6 |
7 | - Ubuntu 20.04 Installation Files (updated: K8s 1.26.2, calico 3.25.0, containerd 1.6.10) without using Corporate Proxy:
8 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/ubuntu20.04-kubeadm1.26.2-calico3.25.0-containerd1.6.10/install.sh
9 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/ubuntu20.04-kubeadm1.26.2-calico3.25.0-containerd1.6.10/master.sh
10 | - Ubuntu 24.04 Installation Files (updated: K8s 1.32.0, calico 3.29.1, containerd 1.7.24) without using Corporate Proxy:
11 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/ubuntu24.04-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install-ubuntu24.04-k8s1.32.sh
12 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/ubuntu24.04-kubeadm1.32.0-calico3.29.1-containerd1.7.24/master-ubuntu24.04-k8s1.32.sh
13 | - Windows 2019 Server Installation Files (K8s 1.23.5, calico 3.25.0, docker as container runtime) without using Corporate Proxy:
14 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install1.ps1
15 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install2.ps1
16 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2019-kubeadm1.26.2-calico3.25.0-docker/install-docker-ce.ps1
17 | - Windows 2022 Server Installation Files (K8s 1.32.0, calico 3.29.1, containerd 1.7.24) without using Corporate Proxy:
18 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install1.ps1
19 | - https://github.com/omerbsezer/Fast-Kubernetes/blob/main/create_real_cluster/win2022-kubeadm1.32.0-calico3.29.1-containerd1.7.24/install2.ps1
20 |
21 | **IMPORTANT:**
22 | - If your cluster is behind the corporate proxy, you should add proxy settings on **Environment Variables, Docker Config, Containerd Config**.
23 | - Links in the script files might change in time (e.g. Calico updated their links)
24 | - Important Notes from K8s:
25 | - K8s on Windows: https://kubernetes.io/docs/concepts/windows/intro/
26 | - Supported Versions: https://kubernetes.io/docs/concepts/windows/intro/#windows-os-version-support
27 |
28 | ### Table of Contents
29 | - [Creating Cluster With Kubeadm, Containerd](#creating)
30 | - [Multipass Installation - Creating VM](#creatingvm)
31 | - [IP-Tables Bridged Traffic Configuration](#ip-tables)
32 | - [Install Containerd](#installcontainerd)
33 | - [Install KubeAdm](#installkubeadm)
34 | - [Install Kubernetes Cluster](#installkubernetes)
35 | - [Install Kubernetes Network Infrastructure](#network)
36 | - [(Optional) If you need Windows Node: Creating Windows Node](#creatingWindows)
37 | - [Joining New K8s Worker Node to Existing Cluster](#joining)
38 | - [Brute-Force Method](#bruteforce)
39 | - [Easy Way to Get Join Command](#easy)
40 | - [IP address changes in Kubernetes Master Node](#master_ip_changed)
41 | - [Removing the Worker Node from Cluster](#removing)
42 | - [Installing Docker on Existing Cluster & Starting of Running Local Registry for Storing Local Image](#docker_registry)
43 | - [Installing Docker](#installingdocker)
44 | - [Running Docker Registry](#dockerregistry)
45 | - [Pulling Image from Docker Local Registry and Configure Containerd](#local_image)
46 | - [NFS Server Connection for Persistent Volume](#nfs_server)
47 |
48 | ## 1. Creating Cluster With Kubeadm, Containerd
49 |
50 | #### 1.1 Multipass Installation - Creating VM
51 |
52 | - "Multipass is a mini-cloud on your workstation using native hypervisors of all the supported plaforms (Windows, macOS and Linux)"
53 | - Fast to install and to use.
54 | - **Link:** https://multipass.run/
55 |
56 | ```
57 | # creating master, worker1
58 | # -c => cpu, -m => memory, -d => disk space
59 | multipass launch --name master -c 2 -m 2G -d 10G
60 | multipass launch --name worker1 -c 2 -m 2G -d 10G
61 | ```
62 |
63 | 
64 |
65 | ```
66 | # get shell on master
67 | multipass shell master
68 | # get shell on worker1
69 | multipass shell worker1
70 | ```
71 |
72 | 
73 |
74 | #### 1.2 IP-Tables Bridged Traffic Configuration
75 |
76 | - Run on ALL nodes:
77 | ```
78 | cat <
117 | - Run on ALL nodes:
118 | ```
119 | cat <
172 | - Run on ALL nodes:
173 | ```
174 | sudo apt-get update
175 | sudo apt-get install -y apt-transport-https ca-certificates curl
176 | sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
177 | echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
178 | sudo apt-get update
179 | sudo apt-get install -y kubelet kubeadm kubectl
180 | sudo apt-mark hold kubelet kubeadm kubectl
181 | ```
182 |
183 | 
184 |
185 | 
186 |
187 | 
188 |
189 | 
190 |
191 |
192 | #### 1.5 Install Kubernetes Cluster
193 |
194 | - Run on ALL nodes:
195 | ```
196 | sudo kubeadm config images pull
197 | ```
198 |
199 | 
200 |
201 | - From worker1, ping the master to learn IP of master.
202 | ```
203 | ping master
204 | ```
205 | 
206 |
207 | - Run on Master:
208 | ```
209 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address= --control-plane-endpoint=
210 | # sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=172.31.45.74 --control-plane-endpoint=172.31.45.74
211 | ```
212 |
213 | 
214 |
215 | - After kubeadm init command, master node responses back the followings:
216 |
217 | 
218 |
219 | - On the Master node run:
220 |
221 | ```
222 | mkdir -p $HOME/.kube
223 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
224 | sudo chown $(id -u):$(id -g) $HOME/.kube/config
225 | ```
226 |
227 | 
228 |
229 | - On the worker node, run to join cluster (tokens are different in your case, please look at the kubeadm init respond):
230 |
231 | ```
232 | sudo kubeadm join 172.31.45.74:6443 --token w7nntd.7t6qg4cd418wzkup \
233 | --discovery-token-ca-cert-hash sha256:1f03886e5a28fb9716e01794b4a01144f362bf431220f15ca98bed2f5a44e91b
234 | ```
235 |
236 | - If it is required to create another master node, copy the control plane line (tokens are different in your case, please look at the kubeadm init respond):
237 |
238 | ```
239 | sudo kubeadm join 172.31.45.74:6443 --token w7nntd.7t6qg4cd418wzkup \
240 | --discovery-token-ca-cert-hash sha256:1f03886e5a28fb9716e01794b4a01144f362bf431220f15ca98bed2f5a44e91b \
241 | --control-plane
242 | ```
243 |
244 | 
245 |
246 | - On Master node:
247 |
248 | 
249 |
250 |
251 | #### 1.6 Install Kubernetes Network Infrastructure
252 |
253 | - Calico is used for network plugin on K8s. Others (flannel, weave) could be also used.
254 | - Run only on Master, in our examples, we are using Calico instead of Flannel:
255 | - Calico:
256 | ```
257 | kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
258 | kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml
259 | ```
260 | - Flannel:
261 | ```
262 | kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
263 | ```
264 |
265 | 
266 |
267 | 
268 |
269 | - After running network implementation, nodes are now ready. Only Master node is used to get information about the cluster.
270 |
271 | 
272 |
273 | 
274 |
275 | ##### 1.6.1 If You have Windows Node to add your Cluster:
276 |
277 | - Instead of running it as above, you should run Calico with this way, run on Master node:
278 | ```
279 | # Download Calico CNI
280 | curl https://docs.projectcalico.org/manifests/calico.yaml > calico.yaml
281 | # Apply Calico CNI
282 | kubectl apply -f ./calico.yaml
283 | ```
284 |
285 | Run on the Master Node:
286 | ```
287 | # required to add windows node
288 | sudo -i
289 | cd /usr/local/bin/
290 | curl -o calicoctl -O -L "https://github.com/projectcalico/calicoctl/releases/download/v3.19.1/calicoctl"
291 | chmod +x calicoctl
292 | exit
293 |
294 | # Disable "IPinIP":
295 | calicoctl get ipPool default-ipv4-ippool -o yaml > ippool.yaml
296 | nano ippool.yaml # set ipipmode: Never
297 | calicoctl apply -f ippool.yaml
298 |
299 | kubectl get felixconfigurations.crd.projectcalico.org default -o yaml -n kube-system > felixconfig.yaml
300 | nano felixconfig.yaml #Set: "ipipEnabled: false"
301 | kubectl apply -f felixconfig.yaml
302 |
303 | # This is required to prevent Linux nodes from borrowing IP addresses from Windows nodes:"
304 | calicoctl ipam configure --strictaffinity=true
305 | sudo reboot
306 |
307 | kubectl cluster-info
308 | kubectl get nodes -o wide
309 | ssh @ 'mkdir c:\k'
310 | scp -r $HOME/.kube/config @:/k/ # send to Win PC from master node, while installing calico, it is required
311 | ```
312 |
313 | - Ref: https://github.com/gary-RR/my_YouTube_Kuberenetes_Hybird/blob/main/setupcluster.sh
314 |
315 | #### (Optional) If you need Windows Node: Creating Windows Node
316 |
317 | - Kubernetes requires a minimum Windows-2019 Server (https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/)
318 | - Run-on the PowerShell with administration privilege on the Windows nodes:
319 |
320 | ```
321 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction OutBound -Action Allow
322 | New-NetFireWallRule -DisplayName "Allow All Traffic" -Direction InBound -Action Allow
323 |
324 | Install-WindowsFeature -Name containers # install docker
325 | Restart-Computer -Force
326 |
327 | .\install-docker-ce.ps1
328 |
329 | Set-Service -Name docker -StartupType 'Automatic'
330 |
331 | #Install additional Windows networking components
332 |
333 | Install-WindowsFeature RemoteAccess
334 | Install-WindowsFeature RSAT-RemoteAccess-PowerShell
335 | Install-WindowsFeature Routing
336 | Restart-Computer -Force
337 | Install-RemoteAccess -VpnType RoutingOnly
338 | Set-Service -Name RemoteAccess -StartupType 'Automatic'
339 | start-service RemoteAccess
340 |
341 | # Install Calico
342 | mkdir c:\k
343 | #Copy the Kubernetes kubeconfig file from the master node (default, Location $HOME/.kube/config), to c:\k\config.
344 |
345 | Invoke-WebRequest https://docs.projectcalico.org/scripts/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1
346 |
347 | c:\install-calico-windows.ps1 -KubeVersion 1.23.5
348 |
349 | #Verify that the Calico services are running.
350 | Get-Service -Name CalicoNode
351 | Get-Service -Name CalicoFelix
352 |
353 | #Install and start kubelet/kube-proxy service. Execute following PowerShell script/commands.
354 | C:\CalicoWindows\kubernetes\install-kube-services.ps1
355 | Start-Service -Name kubelet
356 | Start-Service -Name kube-proxy
357 |
358 | #Copy kubectl.exe, kubeadm.etc to the folder below which is on the path:
359 | cp C:\k\*.exe C:\Users\\AppData\Local\Microsoft\WindowsApps
360 |
361 | #Test Win node#####################################
362 | #List all cluster nodes
363 | kubectl get nodes -o wide
364 |
365 | [Environment]::SetEnvironmentVariable("HTTP_PROXY", "http://:3128", [EnvironmentVariableTarget]::Machine)
366 | [Environment]::SetEnvironmentVariable("HTTPS_PROXY", "http://:3128", [EnvironmentVariableTarget]::Machine)
367 | [Environment]::SetEnvironmentVariable("NO_PROXY", "192.168.*.*, ::6443, :6443, 172.24.*.*, 172.25.*.*, 10.*.*.*, localhost, 127.0.0.1, 0.0.0.0/8", [EnvironmentVariableTarget]::Machine)
368 | Restart-Service docker
369 | ```
370 |
371 | - Create win-webserver.yaml file for testing of Win Node, run on the Windows2019, details: https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/
372 | - Ref: https://github.com/gary-RR/my_YouTube_Kuberenetes_Hybird/blob/main/Setting-ThingsUp-On-Windows-Server.sh
373 |
374 | ## 2. Joining New K8s Worker Node to Existing Cluster
375 |
376 | ### 2.1 Brute-Force Method
377 |
378 | - If we lose the token and token CA cert dash and API server address, wé need to learn them to join a new node into the cluster.
379 | - We are adding new node to existing cluster above. We need to get join token, discovery token CA cert hash, API server advertise address. After getting info, we'll create join command for each nodes.
380 | - Run on Master to get certificate and token information:
381 |
382 | ```
383 | openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
384 | kubeadm token list
385 | kubectl cluster-info
386 | ```
387 |
388 | 
389 |
390 | - In this example, token TTL has 3 hours left (normally, token expires in 24 hours). So we don't need to create new token.
391 | - If the token is expired, generate a new one with the command:
392 |
393 | ```
394 | sudo kubeadm token create
395 | kubeadm token list
396 | ```
397 |
398 | - Create join command for worker nodes:
399 |
400 | ```
401 | kubeadm join \
402 | : \
403 | --token \
404 | --discovery-token-ca-cert-hash sha256:
405 | ```
406 |
407 | - In our case, we run the following command on both workers (worker2, worker3):
408 |
409 | ```
410 | sudo kubeadm join 172.31.32.27:6443 --token 39g7sx.v589tv38nxhus74k --discovery-token-ca-cert-hash sha256:1db5d45337803e35e438cdcdd9ff77449fef3272381ee43784626f19c873d356
411 | ```
412 |
413 | 
414 |
415 | 
416 |
417 | ### 2.2 Easy Way to Get Join Command
418 | - Run on the master node:
419 | ```
420 | kubeadm token create --print-join-command
421 | ```
422 | - Copy the join command above and paste it on **ALL worker nodes**.
423 | - Then, we get nodes ready, run on master:
424 |
425 | ```
426 | kubectl get nodes
427 | ```
428 |
429 | 
430 |
431 | - Ref: https://computingforgeeks.com/join-new-kubernetes-worker-node-to-existing-cluster/
432 |
433 | ## 3. IP address changes in Kubernetes Master Node
434 | - After restarting Master Node, it could be possible that the IP of master node is updated. Your K8s cluster API's IP is still old IP of the node. So you should configure the K8s cluster with new IP.
435 |
436 | - You cannot reach API when using kubectl commands:
437 |
438 | 
439 |
440 | - If you installed the docker for the docker registry, you can remove the exited containers:
441 |
442 | ```
443 | sudo docker rm $(sudo docker ps -a -f status=exited -q)
444 | ```
445 |
446 | #### On Master Node:
447 |
448 | ```
449 | sudo kubeadm reset
450 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16
451 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
452 | ```
453 |
454 | - After kubeadm reset, if there is an error that shows the some of the ports still using, please use following command to kill process, then run kubeadm init:
455 |
456 | ```
457 | sudo netstat -lnp | grep
458 | sudo kill
459 | ```
460 |
461 | 
462 |
463 | 
464 |
465 | - It shows which command should be used to join cluster:
466 |
467 | ```
468 | sudo kubeadm join 172.31.40.125:6443 --token 07vo3z.q2n2qz6bd07ipdnf \
469 | --discovery-token-ca-cert-hash sha256:46c7dcb092ca091e71ab39bd542e73b90b3f7bdf0c486202b857a678cd9879ba
470 | ```
471 | 
472 |
473 | 
474 |
475 |
476 | - Network Configuration with new IP:
477 |
478 | ```
479 | kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
480 | kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml
481 | ```
482 |
483 | 
484 |
485 | #### On Worker Nodes:
486 |
487 | ```
488 | sudo kubeadm reset
489 | sudo kubeadm join 172.31.40.125:6443 --token 07vo3z.q2n2qz6bd07ipdnf \
490 | --discovery-token-ca-cert-hash sha256:46c7dcb092ca091e71ab39bd542e73b90b3f7bdf0c486202b857a678cd9879ba
491 | ```
492 |
493 | 
494 |
495 | 
496 |
497 | - On Master Node:
498 |
499 | - Worker1 is now joined the cluster.
500 |
501 | ```
502 | kubectl get nodes
503 | ```
504 |
505 | 
506 |
507 | ## 4. Removing the Worker Node from Cluster
508 |
509 | - Run commands on Master Node to remove specific worker node:
510 |
511 | ```
512 | kubectl get nodes
513 | kubectl drain worker2
514 | kubectl delete node worker2
515 | ```
516 |
517 | 
518 |
519 | - Run on the specific deleted node (worker2)
520 |
521 | ```
522 | sudo kubeadm reset
523 | ```
524 |
525 | 
526 |
527 | ## 5. Installing Docker on Existing Cluster & Starting of Running Local Registry for Storing Local Image
528 |
529 | #### 5.1 Installing Docker
530 |
531 | - Run commands on Master Node to install docker on Master node:
532 |
533 | ```
534 | sudo apt-get update
535 | sudo apt-get install ca-certificates curl gnupg lsb-release
536 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
537 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
538 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
539 | sudo apt-get update
540 | sudo apt-get install docker-ce docker-ce-cli containerd.io
541 | sudo docker run hello-world
542 | ```
543 |
544 | **Goto for more information:** https://docs.docker.com/engine/install/ubuntu/
545 |
546 | 
547 |
548 | 
549 |
550 | 
551 |
552 | 
553 |
554 | 
555 |
556 | - Copy and run on all nodes to change Docker's Cgroup:
557 |
558 | ```
559 | cd /etc/docker
560 | sudo touch daemon.json
561 | sudo nano daemon.json
562 | # in the file, paste:
563 | {
564 | "exec-opts": ["native.cgroupdriver=systemd"]
565 | }
566 | sudo systemctl restart docker
567 | sudo docker image ls
568 | kubectl get nodes
569 | ```
570 |
571 | 
572 |
573 | 
574 |
575 | 
576 |
577 | - If your cluster is behind the proxy, configure PROXY settings of Docker (ref: add docker proxy: https://docs.docker.com/config/daemon/systemd/). Copy and run on all nodes:
578 | ```
579 | sudo mkdir -p /etc/systemd/system/docker.service.d
580 | cd /etc/systemd/system/docker.service.d/
581 | sudo touch http-proxy.conf
582 | sudo nano http-proxy.conf
583 | # copy and paste in the file:
584 | [Service]
585 | Environment="HTTP_PROXY=http://:3128"
586 | Environment="HTTPS_PROXY=http://:3128"
587 | sudo systemctl daemon-reload
588 | sudo systemctl restart docker
589 | sudo systemctl show --property=Environment docker
590 | sudo docker run hello-world
591 | ```
592 |
593 | - Use docker command without sudo:
594 |
595 | ```
596 | sudo groupadd docker
597 | sudo usermod -aG docker [non-root user]
598 | # logout and login to enable it
599 | ```
600 |
601 | #### 5.2 Running Docker Registry
602 |
603 | - Run on Master to pull registry:
604 |
605 | ```
606 | sudo docker image pull registry
607 | ```
608 |
609 | - Run container using 'Registry' image: (-p: port binding [hostPort]:[containerPort], -d: detach mode (running background), -e: change environment variables status)
610 | ```
611 | sudo docker container run -d -p 5000:5000 --restart always --name localregistry -e REGISTRY_STORAGE_DELETE_ENABLED=true registry
612 | ```
613 |
614 | - Run registry container with binding mount (-v) and without getting error 500 (REGISTRY_VALIDATION_DISABLED=true):
615 | ```
616 | sudo docker run -d -p 5000:5000 --restart=always --name registry -v /home/docker_registry:/var/lib/registry -e REGISTRY_STORAGE_DELETE_ENABLED=true -e REGISTRY_VALIDATION_DISABLED=true -e REGISTRY_HTTP_ADDR=0.0.0.0:5000 registry
617 | ```
618 |
619 | 
620 |
621 | 
622 |
623 | - Open with browser or run curl command:
624 | ```
625 | curl http://127.0.0.1:5000/v2/_catalog
626 | ```
627 | 
628 |
629 |
630 | ## 6. Pulling Image from Docker Local Registry and Configure Containerd
631 |
632 | - In this scenario, docker local registry already runs on the Master node (see [Section 5](#docker_registry))
633 | - First add insecure-registry into /etc/docker/daemon.js on the **ALL Nodes**:
634 |
635 | ```
636 | sudo nano /etc/docker/daemon.json
637 | # copy insecure-registries and paste it
638 | {
639 | "exec-opts": ["native.cgroupdriver=systemd"],
640 | "insecure-registries":["192.168.219.64:5000"]
641 | }
642 | sudo systemctl restart docker.service
643 | ```
644 |
645 | 
646 |
647 | - Pull image from DockerHub, label with new tag and push the local registry on master node:
648 |
649 | ```
650 | sudo docker image pull nginx:latest
651 | ifconfig # to get master IP
652 | sudo docker image tag nginx:latest 192.168.219.64:5000/nginx:latest
653 | sudo docker image push 192.168.219.64:5000/nginx:latest
654 | curl http://192.168.219.64:5000/v2/_catalog
655 | sudo docker image pull 192.168.219.64:5000/nginx:latest
656 | ```
657 |
658 | - Create docker config and get authentication username and pass in base64 coded:
659 |
660 | ```
661 | sudo docker login # this creates /root/.docker/config
662 | sudo cat /root/.docker/config.json | base64 -w0 # copy the base64 encoded key
663 | ```
664 |
665 | - Create my-secret.yaml and paste the base64 encoded key:
666 |
667 | ```
668 | apiVersion: v1
669 | kind: Secret
670 | metadata:
671 | name: registrypullsecret
672 | data:
673 | .dockerconfigjson:
674 | type: kubernetes.io/dockerconfigjson
675 | ```
676 |
677 | - Create secret. Kubelet uses this secret to pull image:
678 |
679 | ```
680 | kubectl create -f my-secret.yaml && kubectl get secrets
681 | ```
682 |
683 | - Create nginx_pod.yaml. Image name shows where the image is pulled from. In addition, "imagePullSecrets" should be defined, which secret should be used for pulling image for local docker registry.
684 |
685 | ```
686 | apiVersion: v1
687 | kind: Pod
688 | metadata:
689 | name: my-private-pod
690 | spec:
691 | containers:
692 | - name: private
693 | image: 192.168.219.64:5000/nginx:latest
694 | imagePullSecrets:
695 | - name: registrypullsecret
696 | ```
697 |
698 | 
699 |
700 | - On the **ALL Nodes**, registry IP and the port should be defined:
701 |
702 | ```
703 | sudo nano /etc/containerd/config.toml # if containerd is using as runtime. If this was Docker, on /etc/docker/daemon.js add insecure-registries like master
704 | # copy and paste (our IP: 192.168.219.64, change it with your IP):
705 | [plugins."io.containerd.grpc.v1.cri".registry]
706 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
707 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.219.64:5000"]
708 | endpoint = ["http://192.168.219.64:5000"]
709 | [plugins."io.containerd.grpc.v1.cri".registry.configs]
710 | [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.219.64:5000".tls]
711 | insecure_skip_verify = true
712 | # restart containerd.service
713 | sudo systemctl restart containerd.service
714 | ```
715 |
716 | 
717 |
718 |
719 | - If registry IP and the port is not defined, you will get this error: "http: server gave HTTP response to HTTPS client.
720 | - If pod's status is ImagePullBackOff (Error), it can be inspected with describe command:
721 |
722 | ```
723 | kubectl describe pods my-private-pod
724 | ```
725 |
726 | 
727 |
728 |
729 | - On Master:
730 |
731 | ```
732 | kubectl apply -f nginx_pod.yaml
733 | kubectl get pods -o wide
734 | ```
735 | 
736 |
737 | ## 7. NFS Server Connection for Persistent Volume
738 |
739 | - If it is required NFS Server, you can create NFS Server
740 | - if you have Windows 2019 Server: https://youtu.be/_x3vg25i7GQ
741 | - if you have Ubuntu: https://rudimartinsen.com/2022/01/05/nginx-nfs-kubernetes/
742 |
743 | - Run on ALL Nodes to reach NFS Server:
744 |
745 | ```
746 | sudo apt install nfs-common
747 | sudo apt install cifs-utils
748 | sudo mkdir /data # create /data directory under root and mount it to NFS
749 | sudo mount -t nfs :/share /data/ # /share directory is created while creating NFS server
750 | sudo chmod 777 /data # give permissions to reach mounted shared area
751 | ```
752 |
753 | ### Reference
754 |
755 | - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
756 | - https://github.com/aytitech/k8sfundamentals/tree/main/setup
757 | - https://multipass.run/
758 | - https://computingforgeeks.com/join-new-kubernetes-worker-node-to-existing-cluster/
759 | - https://docs.docker.com/engine/install/ubuntu/
760 | - https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
761 | - https://stackoverflow.com/questions/32726923/pulling-images-from-private-registry-in-kubernetes
762 | - https://stackoverflow.com/questions/65681045/adding-insecure-registry-in-containerd
763 |
--------------------------------------------------------------------------------