├── pod-service.yml
├── service.yml
├── name-space.yml
├── minikube-stepup-psa.md
├── eks-setup-psa.md
├── psa-Jenkins-Docker-K8S.md
└── Kubernetes-psa-notes.txt
/pod-service.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: testpod
6 | labels:
7 | app: demoapp
8 | spec:
9 | containers:
10 | - name: test
11 | image: psait/pankajsiracademy:latest
12 | ports:
13 | - containerPort: 9090
14 | ...
15 |
--------------------------------------------------------------------------------
/service.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: testpod-service
6 | spec:
7 | type: NodePort
8 | selector:
9 | app: demoapp # This must match the Pod's label
10 | ports:
11 | - port: 80 # Exposed port for external access
12 | targetPort: 9090 # Port on which the app is running inside the container
13 | nodePort: 30080 # External port exposed on each node
14 | ...
--------------------------------------------------------------------------------
/name-space.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: psait-ns
6 | ---
7 | apiVersion: v1
8 | kind: Pod
9 | metadata:
10 | name: testpod
11 | namespace: psait-ns
12 | labels:
13 | app: demoapp
14 | spec:
15 | containers:
16 | - name: webappcontainer
17 | image: psait/pankajsiracademy:latest
18 | ports:
19 | - containerPort: 9090
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | name: test-service
25 | namespace: psait-ns
26 | spec:
27 | type: NodePort
28 | selector:
29 | app: demoapp
30 | ports:
31 | - port: 80
32 | targetPort: 9090
33 | nodePort: 30070
34 | ...
--------------------------------------------------------------------------------
/minikube-stepup-psa.md:
--------------------------------------------------------------------------------
1 | ## Step-1 : Setup Linux VM
2 |
3 | 1) Login into AWS Cloud account
4 | 2) Create Linux VM with Ubuntu AMI (t2.medium or t3.medium)
5 | 3) Select Storage as 50 GB (Default is 8 GB only for Linux)
6 | 2) Create Linux VM and connect to it using SSH Client
7 |
8 | ## Step-2 : Install Docker In Ubuntu VM
9 |
10 | ```
11 | sudo apt update
12 | curl -fsSL get.docker.com | /bin/bash
13 | sudo usermod -aG docker ubuntu
14 | exit
15 | ```
16 | ## Step-3 : Updating system packages and installing Minikube dependencies
17 |
18 | ```
19 | sudo apt update
20 | sudo apt install -y curl wget apt-transport-https
21 |
22 | ```
23 |
24 | ## Step-4 : Installing Minikube
25 |
26 | ```
27 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
28 |
29 | sudo install minikube-linux-amd64 /usr/local/bin/minikube
30 |
31 | minikube version
32 | ```
33 |
34 | ## Step-5 : Install Kubectl (Kubernetes Client)
35 |
36 | ```
37 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
38 | chmod +x kubectl
39 | sudo mv kubectl /usr/local/bin/
40 | kubectl version -o yaml
41 | ```
42 |
43 | ## Step-6 : Start MiniKube Server
44 |
45 | ```
46 | minikube start — driver=docker
47 | ```
48 |
49 | ## Step-7 : Check MiniKube status
50 |
51 | ```
52 | minikube status
53 | ```
54 |
55 | ## Step-8 : Access K8S Cluster
56 |
57 | ```
58 | kubectl cluster-info
59 | ```
60 |
61 | ## Step-9 : Access K8S Nodes
62 |
63 | ```
64 | kubectl get nodes
65 | ```
66 |
67 |
68 |
--------------------------------------------------------------------------------
/eks-setup-psa.md:
--------------------------------------------------------------------------------
1 | # Step - 1 : Create EKS Management Host in AWS #
2 |
3 | 1) Launch new Ubuntu VM using AWS Ec2 ( t2.micro )
4 | 2) Connect to machine and install kubectl using below commands
5 | ```
6 | curl -o kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/kubectl
7 | chmod +x ./kubectl
8 | sudo mv ./kubectl /usr/local/bin
9 | kubectl version --short --client
10 | ```
11 | 3) Install AWS CLI latest version using below commands
12 | ```
13 | sudo apt install unzip
14 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
15 | unzip awscliv2.zip
16 | sudo ./aws/install
17 | aws --version
18 | ```
19 |
20 | 4) Install eksctl using below commands
21 | ```
22 | curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
23 | sudo mv /tmp/eksctl /usr/local/bin
24 | eksctl version
25 | ```
26 | # Step - 2 : Create IAM role & attach to EKS Management Host #
27 |
28 | 1) Create New Role using IAM service ( Select Usecase - ec2 )
29 | 2) Add below permissions for the role
30 | - Administrator - acces
31 |
32 | 3) Enter Role Name (eksrole)
33 | 4) Attach created role to EKS Management Host (Select EC2 => Click on Security => Modify IAM Role => attach IAM role we have created)
34 |
35 | # Step - 3 : Create EKS Cluster using eksctl #
36 | **Syntax:**
37 |
38 | eksctl create cluster --name cluster-name \
39 | --region region-name \
40 | --node-type instance-type \
41 | --nodes-min 2 \
42 | --nodes-max 2 \
43 | --zones ,
44 |
45 | ## N. Virgina:
46 | `
47 | eksctl create cluster --name psait-cluster4 --region us-east-1 --node-type t2.medium --zones us-east-1a,us-east-1b
48 | `
49 | ## Mumbai:
50 | `
51 | eksctl create cluster --name psait-cluster4 --region ap-south-1 --node-type t2.medium --zones ap-south-1a,ap-south-1b
52 | `
53 |
54 | ## After cluster created we can check nodes using below command.
55 |
56 | `
57 | kubectl get nodes
58 | `
59 |
60 | # Note: We should be able to see EKS cluster nodes here.**
61 |
62 | # We are done with our Setup #
63 |
64 | # Step - 4 : After your practise, delete Cluster and other resources we have used in AWS Cloud to avoid billing #
65 |
66 | ```
67 | eksctl delete cluster --name psait-cluster4 --region ap-south-1
68 | ```
69 |
--------------------------------------------------------------------------------
/psa-Jenkins-Docker-K8S.md:
--------------------------------------------------------------------------------
1 | # Creating Ci/CD pipeline using following tools
2 | 1) Maven
3 | 2) Git Hub
4 | 3) Jenkins
5 | 4) Docker
6 | 5) Kubernetes
7 |
8 | # Step - 1 : Create EKS Management Host in AWS #
9 |
10 | 1) Launch new Ubuntu VM using AWS Ec2 ( t2.micro )
11 | 2) Connect to machine and install kubectl using below commands
12 | ```
13 | curl -o kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/kubectl
14 | chmod +x ./kubectl
15 | sudo mv ./kubectl /usr/local/bin
16 | kubectl version --short --client
17 | ```
18 | 3) Install AWS CLI latest version using below commands
19 | ```
20 | sudo apt install unzip
21 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
22 | unzip awscliv2.zip
23 | sudo ./aws/install
24 | aws --version
25 | ```
26 |
27 | 4) Install eksctl using below commands
28 | ```
29 | curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
30 | sudo mv /tmp/eksctl /usr/local/bin
31 | eksctl version
32 | ```
33 |
34 | # Step - 2 : Create IAM role & attach to EKS Management Host & Jenkins Server #
35 |
36 | 1) Create New Role using IAM service ( Select Usecase - ec2 )
37 | 2) Add below permissions for the role
38 | - IAM - fullaccess
39 | - VPC - fullaccess
40 | - EC2 - fullaccess
41 | - CloudFomration - fullaccess
42 | - Administrator - acces
43 |
44 | 3) Enter Role Name (eksrole)
45 | 4) Attach created role to EKS Management Host (Select EC2 => Click on Security => Modify IAM Role => attach IAM role we have created)
46 | 5) Attach created role to Jenkins Machine (Select EC2 => Click on Security => Modify IAM Role => attach IAM role we have created)
47 |
48 | # Step - 3 : Create EKS Cluster using eksctl #
49 | **Syntax:**
50 |
51 | eksctl create cluster --name cluster-name \
52 | --region region-name \
53 | --node-type instance-type \
54 | --nodes-min 2 \
55 | --nodes-max 2 \
56 | --zones ,
57 |
58 | ```
59 | eksctl create cluster --name psait-cluster --region ap-south-1 --node-type t2.medium --zones ap-south-1a,ap-south-1b
60 | ```
61 |
62 |
63 | ```
64 | kubectl get nodes
65 | ```
66 |
67 | # Step-4 : Jenkins Server Setup in Linux VM #
68 |
69 | 1) Create Ubuntu VM using AWS EC2 (t2.medium)
70 | 2) Enable 8080 Port Number in Security Group Inbound Rules
71 | 3) Connect to VM using MobaXterm
72 | 4) Install Java
73 |
74 | ```
75 | sudo apt update
76 | sudo apt install fontconfig openjdk-17-jre
77 | java -version
78 | ```
79 |
80 | 5) Install Jenkins
81 | ```
82 | sudo wget -O /usr/share/keyrings/jenkins-keyring.asc \
83 | https://pkg.jenkins.io/debian-stable/jenkins.io-2023.key
84 | echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] \
85 | https://pkg.jenkins.io/debian-stable binary/ | sudo tee \
86 | /etc/apt/sources.list.d/jenkins.list > /dev/null
87 | sudo apt-get update
88 | sudo apt-get install jenkins
89 | ```
90 | 6) Start Jenkins
91 |
92 | ```
93 | sudo systemctl enable jenkins
94 | sudo systemctl start jenkins
95 | ```
96 |
97 | 7) Verify Jenkins
98 |
99 | ```
100 | sudo systemctl status jenkins
101 | ```
102 |
103 | 8) Open jenkins server in browser using VM public ip
104 |
105 | ```
106 | http://public-ip:8080/
107 | ```
108 |
109 | 9) Copy jenkins admin pwd
110 | ```
111 | sudo cat /var/lib/jenkins/secrets/initialAdminPassword
112 | ```
113 |
114 | 10) Create Admin Account & Install Required Plugins in Jenkins
115 |
116 |
117 | ## Step-5 : Configure Maven as Global Tool in Jenkins ##
118 | 1) Manage Jenkins -> Tools -> Maven Installation -> Add maven
119 |
120 | ## Step-6 : Setup Docker in Jenkins ##
121 | ```
122 | curl -fsSL get.docker.com | /bin/bash
123 | sudo usermod -aG docker jenkins
124 | sudo systemctl restart jenkins
125 | sudo docker version
126 | ```
127 | # Step - 8 : Install AWS CLI in JENKINS Server #
128 |
129 | URL : https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
130 |
131 | **Execute below commands to install AWS CLI**
132 | ```
133 | sudo apt install unzip
134 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
135 | unzip awscliv2.zip
136 | sudo ./aws/install
137 | aws --version
138 | ```
139 |
140 | # Step - 9 : Install Kubectl in JENKINS Server #
141 | **Execute below commands in Jenkins server to install kubectl**
142 |
143 | ```
144 | curl -o kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/kubectl
145 | chmod +x ./kubectl
146 | sudo mv ./kubectl /usr/local/bin
147 | kubectl version --short --client
148 | ```
149 |
150 | # Step - 10 : Update EKS Cluster Config File in Jenkins Server #
151 |
152 | 1) Execute below command in Eks Management host & copy kube config file data
153 | $ cat .kube/config
154 |
155 | 2) Execute below commands in Jenkins Server and paste kube config file
156 | $ cd /var/lib/jenkins
157 | $ sudo mkdir .kube
158 | $ sudo vi .kube/config
159 |
160 | 3) Execute below commands in Jenkins Server and paste kube config file for ubuntu user to check EKS Cluster info
161 | $ cd ~
162 | $ ls -la
163 | $ sudo vi .kube/config
164 |
165 | 4) check eks nodes
166 | $ kubectl get nodes
167 |
168 | **Note: We should be able to see EKS cluster nodes here.**
169 |
170 | # Step - 11 : Create Jenkins CI CD Job #
171 |
172 | - **Stage-1 : Clone Git Repo**
173 | - **Stage-2 : Maven Build**
174 | - **Stage-3 : Create Docker Image**
175 | - **Stage-4 : Push Docker Image to Registry**
176 | - **Stage-5 : Deploy app in k8s eks cluster**
177 |
178 | ```
179 | pipeline {
180 | agent any
181 |
182 | tools{
183 | maven "Maven-3.9.9"
184 | }
185 |
186 | stages {
187 | stage('Clone Repo') {
188 | steps {
189 | git 'https://github.com/pankajmutha14/docker-test.git'
190 | }
191 | }
192 | stage('Maven Build') {
193 | steps {
194 | sh 'mvn clean package'
195 | }
196 | }
197 | stage('Docker Image') {
198 | steps {
199 | sh 'docker build -t psait/pankajsiracademy:latest .'
200 | }
201 | }
202 | stage('k8s deployment') {
203 | steps {
204 | sh 'kubectl apply -f k8s-deploy.yml'
205 | }
206 | }
207 | }
208 | }
209 |
210 | ```
211 |
212 | # Step - 12 : Access Application in Browser #
213 | - **We should be able to access our application**
214 | URL : http://LBR/context-path/
215 |
216 |
217 |
218 |
--------------------------------------------------------------------------------
/Kubernetes-psa-notes.txt:
--------------------------------------------------------------------------------
1 | ##################################
2 | Docker & Kubernetes Overview
3 | ##################################
4 |
5 | What is Docker?
6 |
7 | Docker is a free and open-source containerization software that allows you to package applications along with their dependencies into a single unit called a Docker Image. This image can run on any system that has Docker installed, making deployment easy and consistent across different environments.
8 |
9 | Why use Docker?
10 |
11 | ✅ Portability – Run the same application on any machine, regardless of OS configuration.
12 | ✅ Dependency Management – Ensures that all required software (e.g., libraries, databases, and runtimes) is included within the image.
13 | ✅ Fast Deployment – No need to manually install dependencies every time you set up a new environment.
14 | ✅ Resource Efficiency – Uses fewer resources compared to traditional virtual machines.
15 |
16 | ######################
17 | How Docker Works?
18 | ######################
19 | Create a Docker Image – Package the app code + dependencies into a lightweight container image.
20 |
21 | Run the Docker Container – Deploy this image as a container using the docker run command.
22 |
23 | Execute Anywhere – The same image runs on any machine with Docker installed.
24 |
25 | Once the image is built, it can run on any machine without requiring additional software setup.
26 | ____________________________________________________________________________________________________________________
27 |
28 | ###############################################
29 | Kubernetes (Container Orchestration Software)
30 | ###############################################
31 |
32 | What is Kubernetes?
33 |
34 | Kubernetes (K8s) is a free and open-source orchestration tool developed by Google to manage containerized applications.
35 |
36 | 💡 Orchestration = Managing multiple containers efficiently
37 |
38 | Kubernetes automates key tasks like:
39 |
40 | 1. Creating, starting, and stopping containers.
41 |
42 | 2. Scaling up/down based on demand.
43 |
44 | 3. Handling failures automatically.
45 |
46 |
47 | ######################
48 | Why use Kubernetes?
49 | ######################
50 |
51 | ✅ Orchestration – Efficiently manages multiple containers across a cluster of machines.
52 | ✅ Self-Healing – If a container crashes, Kubernetes automatically replaces it.
53 | ✅ Load Balancing – Distributes traffic across multiple containers to avoid overloading.
54 | ✅ Auto Scaling – Increases or decreases the number of running containers based on traffic load.
55 | ✅ Automated Deployments – Supports CI/CD for rolling updates and version control.
56 |
57 | Kubernetes Advantages (Detailed Explanation)
58 | 1) Orchestration – Managing Containers
59 | Kubernetes helps manage multiple Docker containers across different machines (nodes) efficiently.
60 |
61 | 🔹 Instead of running docker run manually for each container, Kubernetes automates deployment.
62 | 🔹 It ensures that all containers are running smoothly and adjusts their status as needed.
63 |
64 | Note: Kubernetes ensures all these containers are running, healthy, and communicating with each other properly.
65 |
66 | 2) Self-Healing – Automatic Recovery
67 | If a container crashes due to an error or system failure, Kubernetes automatically restarts a new instance.
68 |
69 | 📌 Example:
70 |
71 | A web server container (Apache, Nginx) stops unexpectedly.
72 |
73 | Kubernetes detects the failure and starts a new container to replace it.
74 |
75 | Users never notice downtime.
76 |
77 | 3) Load Balancing – Distributes Traffic Efficiently
78 | Kubernetes distributes incoming user requests across multiple containers to avoid overloading any single instance.
79 |
80 | 📌 Example:
81 |
82 | A shopping website experiences high traffic during a sale.
83 |
84 | Kubernetes ensures that requests are evenly distributed across available backend servers.
85 |
86 | Prevents server crashes and ensures smooth performance.
87 |
88 | 4) Auto Scaling – Adjusting Resources Dynamically
89 | Kubernetes can increase or decrease the number of containers automatically based on traffic load.
90 |
91 | 📌 Example:
92 |
93 | If website traffic increases, Kubernetes adds more containers to handle the load.
94 |
95 | If traffic reduces, Kubernetes removes extra containers to save resources.
96 |
97 | Works similarly to cloud-based Auto Scaling Groups (ASG).
98 |
99 | Conclusion
100 | 🚀 Docker simplifies packaging applications into portable containers.
101 | 🚀 Kubernetes ensures these containers are orchestrated, scalable, and reliable.
102 |
103 | Together, Docker & Kubernetes enable modern cloud-native application deployment—making applications highly available, efficient, and automated.
104 |
105 | #########################################################
106 | Kubernetes (K8s) Architecture - Explained in Detail
107 | #########################################################
108 |
109 | 1) Control Plane (Master Node/Control Node)
110 | -> The control plane is responsible for managing the Kubernetes cluster. It includes the following components:
111 |
112 | 1. API Server: Receives requests from kubectl and manages cluster operations.
113 |
114 | 2. Scheduler: Identifies pending tasks in ETCD and assigns them to worker nodes.
115 |
116 | 3. Controller Manager: Ensures the cluster’s desired state matches the actual state.
117 |
118 | 4. ETCD: A distributed key-value store that acts as Kubernetes' internal database.
119 |
120 | 2) Worker Nodes (Slave Nodes)
121 | -> Worker nodes run application workloads. They include the following components:
122 |
123 | 1. Kubelet: A node agent that communicates with the control plane and manages containers.
124 |
125 | 2. Kube Proxy: Manages networking and ensures communication within the cluster.
126 |
127 | 3. Docker Engine: Runs and manages containerized applications.
128 |
129 | 4. Pod: The smallest deployable unit in Kubernetes, housing one or more containers.
130 |
131 | 5. Container: Runs inside a Pod and contains the application code.
132 |
133 | ####################################
134 | Explanation k8S working
135 | ####################################
136 |
137 | Step 1: To deploy an application, we interact with the control plane using the kubectl CLI.
138 |
139 | Step 2: The API Server receives the request and stores it in ETCD with a pending status.
140 |
141 | Step 3: The Scheduler identifies an available worker node to execute the task, using Kubelet for node management.
142 |
143 | Step 4: The Kubelet ensures the worker node is running the assigned workload.
144 |
145 | Step 5: The Kube Proxy manages networking for seamless cluster communication.
146 |
147 | Step 6: The Controller Manager continuously monitors the cluster to ensure tasks run correctly.
148 |
149 | ##########
150 | Note:
151 | ##########
152 |
153 | -> A cluster in Kubernetes (K8s) refers to a group of servers (nodes) that work together to run containerized applications. It consists of:
154 |
155 | a. Control Plane (Master Node) – Manages and controls the cluster.
156 |
157 | b. Worker Nodes (Slave Nodes) – Run application workloads inside containers.
158 |
159 | ###################################
160 | Kubernetes (K8s) Cluster Setup
161 | ####################################
162 |
163 | A Kubernetes Cluster is a group of servers working together to run containerized applications. It can be set up in two main ways:
164 |
165 | A Kubernetes Cluster = Control Plane + Worker Nodes + Pods + Resources + Networking + Storage
166 |
167 | 1) Self-Managed Cluster
168 | In this setup, we manually install and manage Kubernetes on our own infrastructure.
169 |
170 | a) MiniKube (Single Node)
171 | -> Runs a single-node cluster on a local machine.
172 | -> Best for learning and practicing Kubernetes concepts.
173 | -> Not suitable for production as it lacks high availability and scalability.
174 |
175 | b) Kubeadm (Multi-Node)
176 | -> A tool for setting up a multi-node cluster manually.
177 | -> Requires configuring the control plane, worker nodes, and networking.
178 | -> Gives full control over the cluster but requires deep Kubernetes expertise.
179 | -> Used for on-premise or customized Kubernetes deployments.
180 |
181 | 2) Cloud Provider-Managed Cluster (Pre-configured, ready-to-use)
182 | Cloud providers offer fully managed Kubernetes services, where they handle cluster maintenance, updates, and availability.
183 |
184 | a) AWS EKS (Elastic Kubernetes Service)
185 | -> A managed Kubernetes service on Amazon Web Services.
186 |
187 | b) Azure AKS (Azure Kubernetes Service)
188 | -> Microsoft Azure’s managed Kubernetes offering.
189 |
190 |
191 | c) GCP GKE (Google Kubernetes Engine)
192 | -> Google Cloud’s fully managed Kubernetes solution.
193 |
194 | #########################
195 | MiniKube Setup
196 | #######################
197 |
198 | Step-1 : Setup Linux VM
199 |
200 | Login into AWS Cloud account
201 | Create Linux VM with Ubuntu AMI - t2.medium
202 | Select Storage as 50 GB or more with 2 vCPU required minimum(Default is 8 GB only for Linux)
203 | Create Linux VM and connect to it using SSH Client
204 |
205 |
206 | Step-2 : Install Docker In Ubuntu VM
207 |
208 | sudo apt update
209 | curl -fsSL get.docker.com | /bin/bash
210 | sudo usermod -aG docker ubuntu
211 | exit
212 |
213 | Step-3 : Updating system packages before installing Minikube dependencies
214 |
215 | sudo apt update
216 | sudo apt install -y curl wget apt-transport-https
217 |
218 | Step-4 : Installing Minikube
219 |
220 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
221 | sudo install minikube-linux-amd64 /usr/local/bin/minikube
222 | minikube version
223 |
224 | Step-5 : Install Kubectl (Kubernetes Client)
225 |
226 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
227 | chmod +x kubectl
228 | sudo mv kubectl /usr/local/bin/
229 | kubectl version -o yaml
230 |
231 | Step-6 : Start MiniKube Server
232 |
233 | minikube start — driver=docker
234 |
235 | Step-7 : Check MiniKube status
236 |
237 | minikube status
238 |
239 | Step-8 : Access K8S Cluster
240 |
241 | kubectl cluster-info
242 |
243 | Step-9 : Access K8S Nodes
244 |
245 | kubectl get nodes
246 |
247 |
248 | $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Setup Completed $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
249 |
250 | #####################################
251 | 🚀 What is a POD in Kubernetes?
252 | #####################################
253 |
254 | ✅ Key Concepts Explained:
255 | -----------------------------
256 | 1. If you deploy an app, it will ultimately run inside one or more Pods. It is a building block to run app that we deploy in K8S
257 |
258 | 2. "Applications will be deployed as PODS in k8s."
259 | Your app (e.g., a Spring Boot API) will be containerized using Docker. This container will then be wrapped inside a Pod and deployed on the cluster.
260 |
261 | 3. "To create PODS we will use Docker images."
262 | A Pod runs one or more containers (usually one), and each container uses a Docker image. You can build a Docker image of your app and then deploy it inside a Pod.
263 |
264 | 4. "To create PODS we will use Manifest YML file."
265 | A YAML manifest file defines the configuration for the Pod (or other resources like Deployments).
266 |
267 | It includes:
268 |
269 | a. The name of the Pod
270 |
271 | b. The image to use
272 |
273 | c. Ports to expose
274 |
275 | c. Environment variables, etc.
276 |
277 |
278 | 5. "Create multiple PODS."
279 | The same image (e.g., myapp:latest) can be used to create many Pods. This is how you scale your application—running multiple copies (Pods) to handle more traffic.
280 |
281 | 6. "If we run application with multiple pods then Load Balancing can be performed resulting in 99.9% uptime of the application."
282 | High Availability: If one Pod crashes, others are still running, so your app stays available.Load Balancing: Kubernetes distributes traffic across Pods using a Service (like a load balancer).
283 |
284 | 7. "PODS count will be increased and decreased based on the demand (scalability)."
285 | Kubernetes supports auto-scaling. You can scale Pods manually or automatically using a Horizontal Pod Autoscaler (HPA). Based on CPU/memory usage or custom metrics, Kubernetes will increase/decrease the number of Pods.
286 |
287 |
288 | ###############################
289 | 🚀 Kubernetes Services
290 | ###############################
291 |
292 | -> A Kubernetes Service is used to expose a group of Pods so that they can be accessed reliably. Since Pods can be created and destroyed at any time (with changing IPs), a Service gives them a stable network identity.
293 |
294 | ---------------------------
295 | 🧭 Why Do We Use Services?
296 | ---------------------------
297 |
298 | -> Pods are short-lived and can crash or restart.
299 |
300 | -> Each time a Pod is created, it gets a new IP address.
301 |
302 | -> Directly accessing Pods via IP is not reliable.
303 |
304 | -> A Service gives a static IP to a group of Pods.
305 |
306 | ---------------------------------
307 | 🌐 Types of Kubernetes Services
308 | ---------------------------------
309 |
310 | Kubernetes offers different types of services depending on how and where you want to expose your Pods:
311 |
312 | 🔹 1. ClusterIP (Default)
313 | 🔸 2. NodePort
314 | 🔹 3. LoadBalancer
315 |
316 | -------------------------------------------------------
317 | 🔐 ClusterIP Service (Internal Access Only)
318 | -------------------------------------------------------
319 |
320 | 📌 Key Points:
321 | -> Pods are short-lived objects; if one crashes, Kubernetes replaces it with a new Pod.
322 | -> Every new Pod gets a different IP address.
323 |
324 | Note: 🛑 Never rely on Pod IPs to access an application.
325 |
326 | -> A ClusterIP Service groups multiple Pods and assigns them a single static IP.
327 |
328 | -> This static IP allows other components inside the cluster to access the group of Pods reliably, even when individual Pods change.
329 |
330 | ---------------------
331 | 🚫 Access Scope:
332 | ---------------------
333 |
334 | -> Only accessible within the Kubernetes cluster.
335 |
336 | -> Not reachable from the outside world (internet or external clients).
337 |
338 | ----------------
339 | 💡 Use Case:
340 | ----------------
341 | -> Internal services such as databases, backend APIs, authentication services, etc.
342 |
343 | Example: You don’t want to expose a database Pod to the internet, so you use a ClusterIP service to allow access only from other internal Pods.
344 |
345 | ----------------------------------------------------
346 | 🌐 What is a NodePort Service in Kubernetes?
347 | ----------------------------------------------------
348 | -> A NodePort service is a type of Kubernetes Service that exposes your Pods outside the cluster using a port on each worker node (called a "NodePort").
349 |
350 | 🧭 Why Use NodePort?
351 | ------------------------------------
352 | By default, Pods and ClusterIP services are only accessible within the cluster.
353 |
354 | NodePort makes them accessible externally by opening a static port (from 30000 to 32767) on each worker node.
355 |
356 | It allows you to access your application using:
357 |
358 | http://:
359 |
360 |
361 | Note: Here all traffic is routed to one worker node. Means load balancing cannot happen here.
362 |
363 | -----------------------------------------------------
364 | 🌐 What is a LoadBalancer Service in Kubernetes?
365 | -----------------------------------------------------
366 |
367 | -> It not only provides external access to your app but also handles automatic traffic distribution across the backend Pods running on different worker nodes.
368 |
369 |
370 | ##############################################
371 | 📄 What is a Kubernetes Manifest YAML?
372 | ##############################################
373 |
374 | -> Think of it as an instruction manual for Kubernetes to create and manage resources.
375 |
376 | 🧱 Main Sections of a Manifest YAML
377 | -----------------------------------
378 | Here are the 4 main parts:
379 |
380 | apiVersion: # API version to use
381 | kind: # Type of resource (Pod, Service, Deployment, etc.)
382 | metadata: # Metadata like name, labels
383 | spec: # Specification of what the resource should do
384 |
385 | -------------------------------
386 | 🧪 Example: Pod Manifest YAML
387 | Let’s look at a simple Pod definition:
388 | -------------------------------
389 |
390 | ---
391 | apiVersion: v1
392 | kind: Pod
393 | metadata:
394 | name: testpod
395 | labels:
396 | app: dempapp
397 | spec:
398 | containers:
399 | - name: test
400 | image: psait/pankajsiracademy:latest
401 | ports:
402 | - containerPort: 9090
403 | ...
404 |
405 | -------------------
406 | Explanation
407 | -------------------
408 |
409 | apiVersion: v1
410 | Tells Kubernetes to use version v1 of the API.
411 |
412 | Since you are creating a Pod, this is the correct API version.
413 |
414 | kind: Pod
415 | Defines the type of resource you want to create.
416 |
417 | In this case, it’s a Pod, which is the smallest and simplest unit in Kubernetes.
418 |
419 | metadata:
420 | Metadata gives Kubernetes basic info about your Pod.
421 |
422 | name: testpod
423 | This is the name of your Pod.
424 |
425 | You’ll use this name to check logs or status (e.g., kubectl get pod testpod).
426 |
427 | labels:
428 | Labels are key-value pairs to categorize and group Kubernetes objects.
429 |
430 | app: dempapp is a label to help identify this Pod as part of the "dempapp" application.
431 |
432 | spec:
433 | This section defines what’s inside the Pod.
434 |
435 | containers:
436 | A Pod can contain one or more containers. You're defining one container here.
437 |
438 | - name: test
439 | This is the name of the container inside the Pod (not the Pod itself).
440 |
441 | image: psait/pankajsiracademy:latest
442 | This is the Docker image used to create the container.
443 |
444 | It will pull the latest version of psait/pankajsiracademy from Docker Hub or another registry.
445 |
446 | ⚠️ Make sure the image exists and is accessible (public or with correct credentials).
447 |
448 | ports:
449 | This tells Kubernetes the container is listening on port 8080 inside the Pod.
450 |
451 | containerPort: 8080
452 | This is the internal port your application is running on.
453 |
454 | Kubernetes can use this for things like service routing, health checks, etc.
455 |
456 | #############
457 | Commands:
458 | ######################
459 |
460 | Note: Save above content in .yml file
461 |
462 | # execute manifest yml
463 | kubectl apply -f
464 |
465 | # check pods
466 | kubectl get pods
467 |
468 | # check pod logs
469 | kubectl logs
470 |
471 | # Describe pods
472 | kubectl describe pod
473 |
474 | # get pod logs
475 | kubectl logs
476 |
477 | ---------------------------------------------
478 | K8s Service Manifest YAML (for your Pod)
479 | ------------------------------------------
480 |
481 | ---
482 | apiVersion: v1
483 | kind: Service
484 | metadata:
485 | name: testpod-service
486 | spec:
487 | type: NodePort
488 | selector:
489 | app: dempapp # This must match the Pod's label
490 | ports:
491 | - port: 80 # Exposed port for external access
492 | targetPort: 9090 # Port on which the app is running inside the container
493 | nodePort: 30080 # External port exposed on each node
494 |
495 |
496 | 💡 Explanation:
497 | ----------------------
498 | name: testpod-service – The name of the service.
499 |
500 | type: NodePort – Exposes the Pod outside the cluster.
501 |
502 | selector.app: dempapp – This matches the label of your Pod, so the service knows which Pod(s) to route to.
503 |
504 | port: 80 – The port used when calling the service.Port 80 is the default port for HTTP traffic
505 |
506 | You're using a web server like Nginx, Apache, or similar.
507 |
508 | targetPort: 8080 – The port your container app actually listens on.
509 |
510 | nodePort: 30080 – External port accessible via http://:30080
511 |
512 | Commands
513 | ___________
514 |
515 | # 🔍 Check existing services
516 | kubectl get svc
517 |
518 | # 📦 Create the service using the YAML
519 | kubectl apply -f testpod-service.yml
520 |
521 | # 🔁 Verify that the service is created
522 | kubectl get svc
523 |
524 | # 🚪 Open service in browser (Minikube only)
525 | minikube service testpod-service
526 |
527 | # Test
528 | #Get minikube ip address
529 |
530 | Test this in same local network
531 |
532 | curl http://:3080/
533 | curl http://192.168.49.2:30080/
534 |
535 |
536 |
537 | Part Meaning
538 | curl A tool to make HTTP requests from the command line. It's often used to test whether a URL is reachable and what it returns.
539 | http://192.168.49.2 This is the IP address of the Minikube VM. It's the entry point into your Kubernetes cluster from your host machine. You found this IP using minikube ip.
540 | :30080 This is the NodePort exposed by your Kubernetes service (testpod-service). It forwards external requests to the internal Pod’s port (8080 in your case).
541 | / This is the path of the URL. Since it's just a /, it hits the root endpoint of your Spring Boot app.
542 |
543 |
544 | #################################################
545 | note: How to delete pod and services
546 |
547 | kubectl delete pod testpod
548 | kubectl delete svc testpod-service
549 |
550 | kubectl apply -f pod-01.yml
551 | kubectl apply -f service-01.yml
552 | ############################################
553 |
554 | #############
555 | Stop complete minikube
556 | #################
557 |
558 | -> minikube stop
559 | -> minikube delete
560 | -> minikube status
561 |
562 | #####################
563 | To see all resources running
564 | ####################
565 |
566 | -> kubectl get all
567 |
568 | ########################
569 | To delete all resources use
570 | ########################
571 | -> kubectl delete all --all
572 |
573 |
574 | #######################################
575 | What are name spaces in k8s?
576 | #######################################
577 |
578 | -> They help logically group and isolate resources. Just like how we create folders to isolate our work in computers.
579 |
580 | ------------------
581 | Example:
582 | -----------------
583 |
584 | database-ns = all database-related stuff
585 |
586 | backend-ns = for backend applications
587 |
588 | Note: If we donot specifiy name space they k8S will automatically provide default name - space
589 |
590 | #####################
591 | Commands
592 | ###################
593 |
594 | list all name spaces
595 | ----------------------
596 | -> kubectl get ns
597 |
598 | list all pods in given name space
599 | -----------------------------------
600 | -> kubectl get pod -n
601 |
602 | #############################################
603 | How to create name space in k8s
604 | #############################################
605 |
606 | 1. Using kubectl command-
607 | kubectl create namespace backend-ns
608 |
609 | 2.using manifest yml file
610 |
611 |
612 | ---
613 | apiVersion: v1
614 | kind: Namespace
615 | metadata:
616 | name: backend-ns
617 | ...
618 |
619 | # execute manifest yml
620 | kubeclt apply -f
621 |
622 | # get all resources belongs to backend-ns namespace
623 | kubectl get all -n backend-ns
624 |
625 | #get all pods in kube-system
626 | kubectly get pods -n kube-system
627 |
628 | #get all worker nodes
629 | kubectl get nodes
630 |
631 | #delete name space - All resource related to that will be deleted
632 | kubectl delete ns backend-ns
633 |
634 | #Open tunnel
635 | minikube service
636 |
637 |
638 |
639 | ############################
640 | Namespace with POD with Service creation yml file
641 | ##############################
642 | ---
643 | apiVersion: v1
644 | kind: Namespace
645 | metadata:
646 | name: backend-ns
647 | ---
648 | apiVersion: v1
649 | kind: Pod
650 | metadata:
651 | name: testpod
652 | namespace: backend-ns
653 | labels:
654 | app: dempapp
655 | spec:
656 | containers:
657 | - name: test
658 | image: psait/pankajsiracademy:latest
659 | ports:
660 | - containerPort: 9090
661 | ---
662 | apiVersion: v1
663 | kind: Service
664 | metadata:
665 | name: testpod-service
666 | namespace: backend-ns
667 | spec:
668 | type: NodePort
669 | selector:
670 | app: dempapp # This must match the Pod's label
671 | ports:
672 | - port: 80 # Exposed port for external access
673 | targetPort: 9090 # Port on which the app is running inside the container
674 | nodePort: 30080 # External port exposed on each node
675 | ...
676 |
677 |
678 | #########################################
679 | k8S Resources
680 | ########################################
681 |
682 | -> When you create a Pod directly using kind: Pod, Kubernetes does not manage its lifecycle — if it crashes or is deleted, it's gone forever unless recreated manually.
683 |
684 | -> K8S resources manages POD lifecycle
685 |
686 | -> To let Kubernetes manage, restart, and scale Pods, we use higher-level controllers like the ones you listed.
687 |
688 | 🔁 1) ReplicationController (RC)
689 | 🔁 2) ReplicaSet (RS)
690 | 🚀 3) Deployment
691 | 🛰 4) DaemonSet
692 | 💾 5) StatefulSet
693 |
694 | #####################################
695 | 📦 What is ReplicationController (RC)?
696 | A Kubernetes resource used to manage the lifecycle of Pods.
697 |
698 | Ensures a specified number of Pods are always running.
699 |
700 | Provides self-healing — if a Pod crashes or is deleted, RC will recreate it.
701 |
702 | manifest yml file
703 |
704 | ---
705 | apiVersion: v1
706 | kind: ReplicationController
707 | metadata:
708 | name: webapp
709 | spec:
710 | replicas: 3
711 | selector:
712 | app: dempapp
713 | template:
714 | metadata:
715 | name: testpod
716 | labels:
717 | app: dempapp
718 | spec:
719 | containers:
720 | - name: webappcontainer
721 | image: psait/pankajsiracademy:latest
722 | ports:
723 | - containerPort: 9090
724 | ...
725 |
726 | kubectl apply -f rc.yml
727 |
728 | ########################################################
729 |
730 | kubectl get all
731 |
732 | kubectl get pods
733 |
734 | kubectl delete pod
735 |
736 | kubectl get pods
737 |
738 | kubectl scale rc dempapp --replicas=5
739 |
740 | kubectl scale rc dempapp --replicas=1 Explain in short
741 |
742 | ##############################################
743 |
744 | ✅ ReplicaSet in Kubernetes
745 | ===================
746 | 🔹 What is a ReplicaSet?
747 | A ReplicaSet (RS) is a Kubernetes resource that ensures a specified number of identical Pods are running at any given time.
748 |
749 | 💡 Key Features:
750 | Self-healing: If a Pod crashes or is manually deleted, the RS will automatically create a new Pod to maintain the desired number.
751 |
752 | Scaling: You can increase or decrease the number of replicas (Pods) easily.
753 |
754 | Selector-based matching: RS manages only those Pods that match its label selector.
755 |
756 | Example:
757 |
758 | # -------- Manually created Pod with label app: myapp --------
759 | apiVersion: v1
760 | kind: Pod
761 | metadata:
762 | name: myapp-pod
763 | labels:
764 | app: myapp
765 | spec:
766 | containers:
767 | - name: myappcontainer
768 | image: nginx
769 | ports:
770 | - containerPort: 80
771 |
772 | ---
773 | # -------- Manually created Pod with label app: dempapp --------
774 | apiVersion: v1
775 | kind: Pod
776 | metadata:
777 | name: dempapp-pod
778 | labels:
779 | app: dempapp
780 | spec:
781 | containers:
782 | - name: dempappcontainer
783 | image: nginx
784 | ports:
785 | - containerPort: 80
786 |
787 | ---
788 | # -------- ReplicaSet that manages both app: dempapp and app: myapp --------
789 | apiVersion: apps/v1
790 | kind: ReplicaSet
791 | metadata:
792 | name: rs-webapp
793 | spec:
794 | replicas: 2
795 | selector:
796 | matchExpressions:
797 | - key: app
798 | operator: In
799 | values:
800 | - dempapp
801 | - myapp
802 | template:
803 | metadata:
804 | labels:
805 | app: dempapp # Pods created by RS will have this label
806 | spec:
807 | containers:
808 | - name: webappcontainer
809 | image: nginx
810 | ports:
811 | - containerPort: 80
812 | -----------------------------------------------------------------------------------------------------
813 |
814 |
815 | replica-set.yml for practicals
816 | ------------------
817 |
818 | ---
819 | apiVersion: apps/v1
820 | kind: ReplicaSet
821 | metadata:
822 | name: webapp
823 | spec:
824 | replicas: 3
825 | selector:
826 | matchLabels:
827 | app: dempapp # Must match pod template labels
828 | template:
829 | metadata:
830 | labels:
831 | app: dempapp
832 | spec:
833 | containers:
834 | - name: webappcontainer
835 | image: psait/pankajsiracademy:latest
836 | ports:
837 | - containerPort: 9090
838 |
839 | ---
840 | apiVersion: v1
841 | kind: Service
842 | metadata:
843 | name: webappservice
844 | spec:
845 | type: NodePort
846 | selector:
847 | app: dempapp # Must match pod labels
848 | ports:
849 | - port: 80
850 | targetPort: 9090
851 | nodePort: 30095
852 | ######################################
853 | ✅ Deployment in Kubernetes
854 | ######################################
855 |
856 | Feature ReplicaSet Deployment
857 | Manages Pods ✅ Yes ✅ Yes (via ReplicaSet)
858 | Rolling Updates ❌ No ✅ Yes
859 | Rollbacks ❌ No ✅ Yes
860 | YAML Kind ReplicaSet Deployment
861 | Use in Real World Rare Very Common
862 |
863 | A Deployment in Kubernetes is one of the most recommended and used resources for managing Pod lifecycles. It ensures reliable application deployment with features like zero downtime, auto-scaling, rolling updates, and rollback capabilities.
864 |
865 | 🎯 Key Advantages of Using Deployments
866 | Zero Downtime: Deployments ensure high availability by using strategies like rolling updates. Even when pods are being updated, the service remains available to users.
867 |
868 | Auto Scaling: With Kubernetes Horizontal Pod Autoscaler, you can automatically scale your Pods based on CPU or memory usage (or other custom metrics).
869 |
870 | Rolling Update & Rollback: Kubernetes allows rolling updates, which means it will gradually update Pods one by one, ensuring the application remains available throughout the process. If something goes wrong during the update, you can rollback to a previous stable version.
871 |
872 | 👨💻 When to Choose Which Strategy?
873 | Use RollingUpdate:
874 |
875 | For most production workloads where high availability and zero downtime are required.
876 |
877 | When you are gradually releasing new versions of your application and want to avoid service disruption.
878 |
879 | Use Recreate:
880 |
881 | For non-critical applications or during maintenance windows where it's okay to have a temporary outage.
882 |
883 | When you need to clear everything and redeploy fresh Pods (e.g., clearing persistent state or major upgrades).
884 |
885 | ############################
886 | deployment-service.yml
887 | #########################
888 | ---
889 | apiVersion: apps/v1
890 | kind: Deployment
891 | metadata:
892 | name: webapp
893 | spec:
894 | replicas: 3
895 | selector:
896 | matchLabels:
897 | app: dempapp # Must match pod template labels
898 | template:
899 | metadata:
900 | labels:
901 | app: dempapp
902 | spec:
903 | containers:
904 | - name: webappcontainer
905 | image: psait/pankajsiracademy:latest
906 | ports:
907 | - containerPort: 9090
908 | ---
909 | apiVersion: v1
910 | kind: Service
911 | metadata:
912 | name: webappservice
913 | spec:
914 | type: NodePort
915 | selector:
916 | app: dempapp # Must match pod labels
917 | ports:
918 | - port: 80
919 | targetPort: 9090
920 | nodePort: 30095
921 |
922 | #################################################################################################################
923 |
924 | 🔧 1. Deployment
925 |
926 | apiVersion: apps/v1
927 | Uses the apps/v1 API to define a Deployment.
928 |
929 |
930 | kind: Deployment
931 | Declares that this is a Deployment (used to manage pods).
932 |
933 |
934 | metadata:
935 | name: webapp
936 | Assigns the Deployment a name: webapp.
937 |
938 |
939 | spec:
940 | replicas: 3
941 | Tells Kubernetes to run 3 replicas (3 pods) of your app.
942 |
943 | selector:
944 | matchLabels:
945 | app: dempapp
946 | The deployment will manage pods that have the label app: dempapp.
947 |
948 |
949 | template:
950 | metadata:
951 | labels:
952 | app: dempapp
953 | This is the pod template. Every pod created will be labeled app: dempapp.
954 |
955 | This must match the selector above.
956 |
957 | spec:
958 | containers:
959 | - name: webappcontainer
960 | image: psait/pankajsiracademy:latest
961 | ports:
962 | - containerPort: 9090
963 |
964 | This is the container spec inside the pod:
965 |
966 | name: webappcontainer
967 |
968 | image: Docker image hosted at Docker Hub (psait/pankajsiracademy:latest)
969 |
970 | containerPort: The app runs on port 9090 inside the container.
971 |
972 | 🌐 2. Service
973 |
974 | apiVersion: v1
975 | Uses the core v1 API to define a Service.
976 |
977 |
978 | kind: Service
979 | Declares a Service (used to expose pods).
980 |
981 |
982 | metadata:
983 | name: webappservice
984 | The service name is webappservice.
985 |
986 |
987 | spec:
988 | type: NodePort
989 | NodePort type: Exposes your app externally by opening a port on every node in the cluster.
990 |
991 | Clients can access it via:
992 | http://:30095
993 |
994 |
995 | selector:
996 | app: dempapp
997 | The service targets pods with label app: dempapp — which matches the Deployment.
998 |
999 |
1000 | ports:
1001 | - port: 80
1002 | targetPort: 9090
1003 | nodePort: 30095
1004 | port: 80: The port the service receives traffic on (internally).
1005 |
1006 | targetPort: 9090: Forwards traffic to container's port 9090.
1007 |
1008 | nodePort: 30095: Opens this port on the Kubernetes node — used to access the service from outside the cluster.
1009 |
1010 |
1011 | ################################
1012 | Deployment with load balancer
1013 | ###############################
1014 |
1015 | ---
1016 | apiVersion: apps/v1
1017 | kind: Deployment
1018 | metadata:
1019 | name: webapp
1020 | spec:
1021 | replicas: 3
1022 | selector:
1023 | matchLabels:
1024 | app: dempapp # Must match pod template labels
1025 | template:
1026 | metadata:
1027 | labels:
1028 | app: dempapp
1029 | spec:
1030 | containers:
1031 | - name: webappcontainer
1032 | image: psait/pankajsiracademy:latest
1033 | ports:
1034 | - containerPort: 9090
1035 |
1036 | ---
1037 | apiVersion: v1
1038 | kind: Service
1039 | metadata:
1040 | name: webappservice
1041 | spec:
1042 | type: LoadBalancer # ✅ Changed from NodePort to LoadBalancer
1043 | selector:
1044 | app: dempapp
1045 | ports:
1046 | - port: 80
1047 | targetPort: 9090
1048 |
1049 |
1050 | Note:
1051 |
1052 | 🧠 First: Is Each Worker Node a Separate VM?
1053 | ✅ Yes, each worker node in EKS is an individual EC2 instance (VM).
1054 |
1055 | So, if you have 4 worker nodes, EKS will launch 4 separate EC2 instances in your account (under the hood).
1056 |
1057 | 🚀 How to Create More Worker Nodes in EKS?
1058 | There are two ways to create more worker nodes:
1059 |
1060 | ✅ Option 1: CLI Method (Add --nodes flag)
1061 | You can specify the number of nodes when creating the cluster using the --nodes flag:
1062 |
1063 | eksctl create cluster \
1064 | --name psait-cluster4 \
1065 | --region ap-south-1 \
1066 | --node-type t2.medium \
1067 | --zones ap-south-1a,ap-south-1b \
1068 | --nodes 4 \
1069 | --nodes-min 2 \
1070 | --nodes-max 6
1071 | 🔍 What This Means:
1072 | --nodes 4 → Start with 4 worker nodes
1073 |
1074 | --nodes-min 2 → Minimum nodes for autoscaling
1075 |
1076 | --nodes-max 6 → Maximum nodes for autoscaling
1077 |
1078 | Nodes will be spread across the AZs you mention (load balanced)
1079 |
1080 | ✅ Option 2: YAML Config File (More Flexible)
1081 | Create a cluster.yaml like this:
1082 |
1083 | apiVersion: eksctl.io/v1alpha5
1084 | kind: ClusterConfig
1085 |
1086 | metadata:
1087 | name: psait-cluster4
1088 | region: ap-south-1
1089 |
1090 | availabilityZones: ["ap-south-1a", "ap-south-1b"]
1091 |
1092 | nodeGroups:
1093 | - name: ng-1
1094 | instanceType: t2.medium
1095 | desiredCapacity: 4
1096 | minSize: 2
1097 | maxSize: 6
1098 | volumeSize: 20
1099 |
1100 | Note: We can use terrform for the same to create EKS setup
1101 |
1102 |
1103 | yml file to deploy application in eks with loadbalancer
1104 | #############################################################
1105 | ---
1106 | apiVersion: apps/v1
1107 | kind: Deployment
1108 | metadata:
1109 | name: webapp
1110 | spec:
1111 | replicas: 2
1112 | strategy:
1113 | type: RollingUpdate
1114 | selector:
1115 | matchLabels:
1116 | app: javawebapp
1117 | template:
1118 | metadata:
1119 | name: javawebpod
1120 | labels:
1121 | app: javawebapp
1122 | spec:
1123 | containers:
1124 | - name: webappcontainer
1125 | image: psait/pankajsiracademy:latest
1126 | ports:
1127 | - containerPort: 9090
1128 | ---
1129 | apiVersion: v1
1130 | kind: Service
1131 | metadata:
1132 | name: websvc
1133 | spec:
1134 | type: LoadBalancer
1135 | selector:
1136 | app: javawebapp
1137 | ports:
1138 | - port: 80
1139 | targetPort: 9090
1140 | ...
1141 |
1142 | pipeline - k8s + maven + docker + jenkins
1143 | ---------------------------------------------
1144 | pipeline {
1145 | agent any
1146 |
1147 | tools{
1148 | maven "maven-3.9.9"
1149 | }
1150 |
1151 | stages {
1152 | stage('Clone Repo') {
1153 | steps {
1154 | git branch: 'main', url: 'https://github.com/pankajmutha14/docker-test.git'
1155 | }
1156 | }
1157 | stage('Maven Build') {
1158 | steps {
1159 | sh 'mvn clean package'
1160 | }
1161 | }
1162 | stage('Docker Image') {
1163 | steps {
1164 | sh 'docker build -t psait/pankajsiracademy:latest .'
1165 | }
1166 | }
1167 | stage('k8s deployment') {
1168 | steps {
1169 | sh 'kubectl apply -f k8s-deploy.yml'
1170 | }
1171 | }
1172 | }
1173 | }
1174 |
1175 | ##############################
1176 | Scaling in Kubernetes
1177 | #############################
1178 |
1179 | 1. . HPA - Horizontal Pod Autoscaler
1180 | ###########################################
1181 | What it does: Adds or removes pods based on CPU/memory usage, custom metrics, or external metrics.
1182 |
1183 | Use case: When traffic increases, Kubernetes spins up more pods to handle the load.
1184 |
1185 | Controlled by: The HorizontalPodAutoscaler object.
1186 |
1187 | Example: If CPU usage goes above 80%, HPA can increase the pod count from 2 to 5 automatically
1188 |
1189 | 2. VPA - Vertical Pod Autoscaler
1190 | What it does: Adjusts CPU and memory requests/limits of a pod automatically.
1191 |
1192 | Apply Load in HPA
1193 | --------------
1194 | kubectl run -i --tty load-generator --rm \
1195 | --image=busybox --restart=Never \
1196 | -- /bin/sh -c "while true; do wget -q -O- http://hpa-demo-deployment; sleep 0.01; done"
1197 |
1198 |
1199 |
1200 | kubectl get hpa -w
1201 |
1202 | kubectl describe deploy hpa-demo-deployment
1203 |
1204 | kubectl get hpa
1205 |
1206 | kubectl get events
1207 |
1208 |
1209 |
1210 | ----------------------------------------------------------Further notes will be added soon-----------------------------------------------------
1211 |
1212 |
--------------------------------------------------------------------------------