├── jenkinsfile
└── README.md
/jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline{
2 | agent any
3 | tools{
4 | jdk 'jdk17'
5 | nodejs 'node16'
6 | }
7 | environment {
8 | SCANNER_HOME=tool 'sonar-scanner'
9 | }
10 | stages {
11 | stage('clean workspace'){
12 | steps{
13 | cleanWs()
14 | }
15 | }
16 | stage('Checkout from Git'){
17 | steps{
18 | git branch: 'main', url: 'https://github.com/Aj7Ay/Netflix-clone.git'
19 | }
20 | }
21 | stage("Sonarqube Analysis "){
22 | steps{
23 | withSonarQubeEnv('sonar-server') {
24 | sh ''' $SCANNER_HOME/bin/sonar-scanner -Dsonar.projectName=Netflix \
25 | -Dsonar.projectKey=Netflix '''
26 | }
27 | }
28 | }
29 | stage("quality gate"){
30 | steps {
31 | script {
32 | waitForQualityGate abortPipeline: false, credentialsId: 'Sonar-token'
33 | }
34 | }
35 | }
36 | stage('Install Dependencies') {
37 | steps {
38 | sh "npm install"
39 | }
40 | }
41 | stage('OWASP FS SCAN') {
42 | steps {
43 | dependencyCheck additionalArguments: '--scan ./ --disableYarnAudit --disableNodeAudit', odcInstallation: 'DP-Check'
44 | dependencyCheckPublisher pattern: '**/dependency-check-report.xml'
45 | }
46 | }
47 | stage('TRIVY FS SCAN') {
48 | steps {
49 | sh "trivy fs . > trivyfs.txt"
50 | }
51 | }
52 | stage("Docker Build & Push"){
53 | steps{
54 | script{
55 | withDockerRegistry(credentialsId: 'docker', toolName: 'docker'){
56 | sh "docker build --build-arg TMDB_V3_API_KEY=AJ7AYe14eca3e76864yah319b92 -t netflix ."
57 | sh "docker tag netflix rutik/netflix:latest "
58 | sh "docker push rutik/netflix:latest "
59 | }
60 | }
61 | }
62 | }
63 | stage("TRIVY"){
64 | steps{
65 | sh "trivy image sevenajay/netflix:latest > trivyimage.txt"
66 | }
67 | }
68 | stage('Deploy to container'){
69 | steps{
70 | sh 'docker run -d --name netflix -p 8081:80 rutik/netflix:latest'
71 | }
72 | }
73 | stage('Deploy to kubernets'){
74 | steps{
75 | script{
76 | dir('Kubernetes') {
77 | withKubeConfig(caCertificate: '', clusterName: '', contextName: '', credentialsId: 'k8s', namespace: '', restrictKubeConfigAccess: false, serverUrl: '') {
78 | sh 'kubectl apply -f deployment.yml'
79 | sh 'kubectl apply -f service.yml'
80 | }
81 | }
82 | }
83 | }
84 | }
85 |
86 | }
87 | post {
88 | always {
89 | emailext attachLog: true,
90 | subject: "'${currentBuild.result}'",
91 | body: "Project: ${env.JOB_NAME}
" +
92 | "Build Number: ${env.BUILD_NUMBER}
" +
93 | "URL: ${env.BUILD_URL}
",
94 | to: 'rutik@gmail.com',
95 | attachmentsPattern: 'trivyfs.txt,trivyimage.txt'
96 | }
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DevSecOps : Netflix Clone CI-CD with Monitoring | Email
2 | 
3 |
4 |
5 | # Project Blog link :-
6 | - https://medium.com/@rutikdevops/devsecops-netflix-clone-ci-cd-with-monitoring-email-cf86666ae9c7
7 |
8 | # Project Overview :-
9 | - We will be deploying a Netflix clone. We will be using Jenkins as a CICD tool and deploying our application on a Docker container and Kubernetes Cluster and we will monitor the Jenkins and Kubernetes metrics using Grafana, Prometheus and Node exporter. I Hope this detailed blog is useful.
10 |
11 |
12 | # Project Steps :-
13 | - Step 1 — Launch an Ubuntu(22.04) T2 Large Instance
14 | - Step 2 — Install Jenkins, Docker and Trivy. Create a Sonarqube Container using Docker.
15 | - Step 3 — Create a TMDB API Key.
16 | - Step 4 — Install Prometheus and Grafana On the new Server.
17 | - Step 5 — Install the Prometheus Plugin and Integrate it with the Prometheus server.
18 | - Step 6 — Email Integration With Jenkins and Plugin setup.
19 | - Step 7 — Install Plugins like JDK, Sonarqube Scanner, Nodejs, and OWASP Dependency Check.
20 | - Step 8 — Create a Pipeline Project in Jenkins using a Declarative Pipeline
21 | - Step 9 — Install OWASP Dependency Check Plugins
22 | - Step 10 — Docker Image Build and Push
23 | - Step 11 — Deploy the image using Docker
24 | - Step 12 — Kubernetes master and slave setup on Ubuntu (20.04)
25 | - Step 13 — Access the Netflix app on the Browser.
26 | - Step 14 — Terminate the AWS EC2 Instances.
27 |
28 | # STEP1:
29 | - Launch an Ubuntu(22.04) T2 Large Instance
30 | - Launch an AWS T2 Large Instance. Use the image as Ubuntu. You can create a new key pair or use an existing one. Enable HTTP and HTTPS settings in the Security Group and open all ports (not best case to open all ports but just for learning purposes it's okay).
31 |
32 |
33 |
34 | # Step 2 :
35 | - Install Jenkins, Docker and Trivy
36 | - 2A — To Install Jenkins
37 | - Connect to your console, and enter these commands to Install Jenkins
38 | ```bash
39 | vi jenkins.sh #make sure run in Root (or) add at userdata while ec2 launch
40 | ```
41 |
42 | ```bash
43 | #!/bin/bash
44 | sudo apt update -y
45 | #sudo apt upgrade -y
46 | wget -O - https://packages.adoptium.net/artifactory/api/gpg/key/public | tee /etc/apt/keyrings/adoptium.asc
47 | echo "deb [signed-by=/etc/apt/keyrings/adoptium.asc] https://packages.adoptium.net/artifactory/deb $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) main" | tee /etc/apt/sources.list.d/adoptium.list
48 | sudo apt update -y
49 | sudo apt install temurin-17-jdk -y
50 | /usr/bin/java --version
51 | curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io-2023.key | sudo tee \
52 | /usr/share/keyrings/jenkins-keyring.asc > /dev/null
53 | echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] \
54 | https://pkg.jenkins.io/debian-stable binary/ | sudo tee \
55 | /etc/apt/sources.list.d/jenkins.list > /dev/null
56 | sudo apt-get update -y
57 | sudo apt-get install jenkins -y
58 | sudo systemctl start jenkins
59 | sudo systemctl status jenkins
60 | ```
61 |
62 |
63 | ```bash
64 | sudo chmod 777 jenkins.sh
65 | ./jenkins.sh # this will installl jenkins
66 | ```
67 |
68 | - Once Jenkins is installed, you will need to go to your AWS EC2 Security Group and open Inbound Port 8080, since Jenkins works on Port 8080.
69 | - Now, grab your Public IP Address
70 |
71 |
72 | ```bash
73 |
74 | sudo cat /var/lib/jenkins/secrets/initialAdminPassword
75 | ```
76 |
77 | - 2B — Install Docker
78 | ```bash
79 | sudo apt-get update
80 | sudo apt-get install docker.io -y
81 | sudo usermod -aG docker $USER #my case is ubuntu
82 | newgrp docker
83 | sudo chmod 777 /var/run/docker.sock
84 | ``
85 |
86 | - After the docker installation, we create a sonarqube container (Remember to add 9000 ports in the security group).
87 | ```bash
88 | docker run -d --name sonar -p 9000:9000 sonarqube:lts-community
89 | ```
90 |
91 | - 2C — Install Trivy
92 | ```bash
93 | vi trivy.sh
94 | ```
95 | ```bash
96 | sudo apt-get install wget apt-transport-https gnupg lsb-release -y
97 | wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | gpg --dearmor | sudo tee /usr/share/keyrings/trivy.gpg > /dev/null
98 | echo "deb [signed-by=/usr/share/keyrings/trivy.gpg] https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" | sudo tee -a /etc/apt/sources.list.d/trivy.list
99 | sudo apt-get update
100 | sudo apt-get install trivy -y
101 | ```
102 |
103 | # Step 3: Create a TMDB API Key
104 | - Next, we will create a TMDB API key
105 | - Open a new tab in the Browser and search for TMDB
106 | - Click on the first result, you will see this page
107 | - Click on the Login on the top right. You will get this page.
108 | - You need to create an account here. click on click here. I have account that's why i added my details there.
109 | - once you create an account you will see this page.
110 | - Let's create an API key, By clicking on your profile and clicking settings.
111 | - Now click on API from the left side panel.
112 | - Now click on create
113 | - Click on Developer
114 | - Now you have to accept the terms and conditions.
115 | - Provide basic details
116 | - Click on submit and you will get your API key.
117 |
118 | # Step 4 :
119 | - Install Prometheus and Grafana On the new Server
120 | - First of all, let's create a dedicated Linux user sometimes called a system account for Prometheus. Having individual users for each service serves two main purposes:
121 | - It is a security measure to reduce the impact in case of an incident with the service.
122 | - It simplifies administration as it becomes easier to track down what resources belong to which service.
123 | - To create a system user or system account, run the following command:
124 | ```bash
125 | sudo useradd \
126 | --system \
127 | --no-create-home \
128 | --shell /bin/false prometheus
129 | ```
130 |
131 | - --system - Will create a system account.
132 | - --no-create-home - We don't need a home directory for Prometheus or any other system accounts in our case.
133 | - --shell /bin/false - It prevents logging in as a Prometheus user.
134 | - Prometheus - Will create a Prometheus user and a group with the same name.
135 | - You can use the curl or wget command to download Prometheus.
136 |
137 | ```bash
138 | wget https://github.com/prometheus/prometheus/releases/download/v2.47.1/prometheus-2.47.1.linux-amd64.tar.gz
139 | ```
140 |
141 | - Then, we need to extract all Prometheus files from the archive.
142 | ```bash
143 | tar -xvf prometheus-2.47.1.linux-amd64.tar.gz
144 | ```
145 | - Usually, you would have a disk mounted to the data directory. For this tutorial, I will simply create a /data directory. Also, you need a folder for Prometheus configuration files.
146 | ```bash
147 | sudo mkdir -p /data /etc/prometheus
148 | ```
149 |
150 | - Now, let's change the directory to Prometheus and move some files.
151 | ```bash
152 | cd prometheus-2.47.1.linux-amd64/
153 | ```
154 |
155 | - First of all, let's move the Prometheus binary and a promtool to the /usr/local/bin/. promtool is used to check configuration files and Prometheus rules.
156 | ```bash
157 | sudo mv prometheus promtool /usr/local/bin/
158 | ```
159 |
160 | - Optionally, we can move console libraries to the Prometheus configuration directory. Console templates allow for the creation of arbitrary consoles using the Go templating language. You don't need to worry about it if you're just getting started.
161 | ```bash
162 | sudo mv consoles/ console_libraries/ /etc/prometheus/
163 | ```
164 |
165 | - Finally, let's move the example of the main Prometheus configuration file.
166 | ```bash
167 | sudo mv prometheus.yml /etc/prometheus/prometheus.yml
168 | ```
169 |
170 |
171 | - To avoid permission issues, you need to set the correct ownership for the /etc/prometheus/ and data directory.
172 | ```bash
173 | sudo chown -R prometheus:prometheus /etc/prometheus/ /data/
174 | ```
175 |
176 | - You can delete the archive and a Prometheus folder when you are done.
177 | ```bash
178 | cd ..
179 | rm -rf prometheus-2.47.1.linux-amd64.tar.gz
180 | ```
181 |
182 |
183 | - We're going to use some of these options in the service definition.
184 | - We're going to use Systemd, which is a system and service manager for Linux operating systems. For that, we need to create a Systemd unit configuration file.
185 | ```bash
186 | sudo vim /etc/systemd/system/prometheus.service
187 | ```
188 |
189 | - Prometheus.service
190 | ```bash
191 | [Unit]
192 | Description=Prometheus
193 | Wants=network-online.target
194 | After=network-online.target
195 |
196 | StartLimitIntervalSec=500
197 | StartLimitBurst=5
198 |
199 | [Service]
200 | User=prometheus
201 | Group=prometheus
202 | Type=simple
203 | Restart=on-failure
204 | RestartSec=5s
205 | ExecStart=/usr/local/bin/prometheus \
206 | --config.file=/etc/prometheus/prometheus.yml \
207 | --storage.tsdb.path=/data \
208 | --web.console.templates=/etc/prometheus/consoles \
209 | --web.console.libraries=/etc/prometheus/console_libraries \
210 | --web.listen-address=0.0.0.0:9090 \
211 | --web.enable-lifecycle
212 |
213 | [Install]
214 | WantedBy=multi-user.target
215 | ```
216 |
217 |
218 | - Let's go over a few of the most important options related to Systemd and Prometheus. Restart - Configures whether the service shall be restarted when the service process exits, is killed, or a timeout is reached.
219 | RestartSec - Configures the time to sleep before restarting a service.
220 | User and Group - Are Linux user and a group to start a Prometheus process.
221 | --config.file=/etc/prometheus/prometheus.yml - Path to the main Prometheus configuration file.
222 | --storage.tsdb.path=/data - Location to store Prometheus data.
223 | --web.listen-address=0.0.0.0:9090 - Configure to listen on all network interfaces. In some situations, you may have a proxy such as nginx to redirect requests to Prometheus. In that case, you would configure Prometheus to listen only on localhost.
224 | --web.enable-lifecycle -- Allows to manage Prometheus, for example, to reload configuration without restarting the service.
225 |
226 | - To automatically start the Prometheus after reboot, run enable.
227 | ```bash
228 | sudo systemctl enable prometheus
229 | sudo systemctl start prometheus
230 | sudo systemctl status Prometheus
231 | ```
232 |
233 | - Suppose you encounter any issues with Prometheus or are unable to start it. The easiest way to find the problem is to use the journalctl command and search for errors.
234 | ```bash
235 | journalctl -u prometheus -f --no-pager
236 | ```
237 |
238 | - Now we can try to access it via the browser. I'm going to be using the IP address of the Ubuntu server. You need to append port 9090 to the IP.
239 | ```bash
240 |
241 | ```
242 | - If you go to targets, you should see only one - Prometheus target. It scrapes itself every 15 seconds by default.
243 |
244 |
245 | - Install Node Exporter on Ubuntu 22.04
246 | - Next, we're going to set up and configure Node Exporter to collect Linux system metrics like CPU load and disk I/O. Node Exporter will expose these as Prometheus-style metrics. Since the installation process is very similar, I'm not going to cover as deep as Prometheus.
247 | - First, let's create a system user for Node Exporter by running the following command:
248 | ```bash
249 | sudo useradd \
250 | --system \
251 | --no-create-home \
252 | --shell /bin/false node_exporter
253 | ```
254 |
255 | - Use the wget command to download the binary.
256 | ```bash
257 | wget https://github.com/prometheus/node_exporter/releases/download/v1.6.1/node_exporter-1.6.1.linux-amd64.tar.gz
258 | ```
259 |
260 | - Extract the node exporter from the archive.
261 | ```bash
262 | tar -xvf node_exporter-1.6.1.linux-amd64.tar.gz
263 | ```
264 |
265 | - Move binary to the /usr/local/bin.
266 | ```bash
267 | sudo mv \
268 | node_exporter-1.6.1.linux-amd64/node_exporter \
269 | /usr/local/bin/
270 | ```
271 |
272 | - Clean up, and delete node_exporter archive and a folder.
273 | ```bash
274 | rm -rf node_exporter*
275 | ```
276 |
277 | - Next, create a similar systemd unit file.
278 | ```bash
279 | sudo vim /etc/systemd/system/node_exporter.service
280 | ```
281 |
282 | - node_exporter.service
283 | ```bash
284 | [Unit]
285 | Description=Node Exporter
286 | Wants=network-online.target
287 | After=network-online.target
288 |
289 | StartLimitIntervalSec=500
290 | StartLimitBurst=5
291 |
292 | [Service]
293 | User=node_exporter
294 | Group=node_exporter
295 | Type=simple
296 | Restart=on-failure
297 | RestartSec=5s
298 | ExecStart=/usr/local/bin/node_exporter \
299 | --collector.logind
300 |
301 | [Install]
302 | WantedBy=multi-user.target
303 | ```
304 |
305 |
306 | - Replace Prometheus user and group to node_exporter, and update the ExecStart command.
307 | - To automatically start the Node Exporter after reboot, enable the service.
308 | ```bash
309 | sudo systemctl enable node_exporter
310 | sudo systemctl start node_exporter
311 | sudo systemctl status node_exporter
312 | ```
313 |
314 |
315 | - If you have any issues, check logs with journalctl
316 | ```bash
317 | journalctl -u node_exporter -f --no-pager
318 | ```
319 |
320 | - At this point, we have only a single target in our Prometheus. There are many different service discovery mechanisms built into Prometheus. For example, Prometheus can dynamically discover targets in AWS, GCP, and other clouds based on the labels. In the following tutorials, I'll give you a few examples of deploying Prometheus in a cloud-specific environment. For this tutorial, let's keep it simple and keep adding static targets. Also, I have a lesson on how to deploy and manage Prometheus in the Kubernetes cluster.
321 | - To create a static target, you need to add job_name with static_configs.
322 | ```bash
323 | sudo vim /etc/prometheus/prometheus.yml
324 | ```
325 |
326 | - prometheus.yml
327 | ```bash
328 | - job_name: node_export
329 | static_configs:
330 | - targets: ["localhost:9100"]
331 | ```
332 |
333 | - By default, Node Exporter will be exposed on port 9100.
334 | - Since we enabled lifecycle management via API calls, we can reload the Prometheus config without restarting the service and causing downtime.
335 | - Before, restarting check if the config is valid.
336 | ```bash
337 | promtool check config /etc/prometheus/prometheus.yml
338 | ```
339 |
340 | - Then, you can use a POST request to reload the config.
341 | ```bash
342 | curl -X POST http://localhost:9090/-/reload
343 | ```
344 |
345 | - Check the targets section
346 | ```bash
347 | http://:9090/targets
348 | ```
349 |
350 |
351 | - Install Grafana on Ubuntu 22.04
352 | - To visualize metrics we can use Grafana. There are many different data sources that Grafana supports, one of them is Prometheus.
353 | - First, let's make sure that all the dependencies are installed.
354 | ```bash
355 | sudo apt-get install -y apt-transport-https software-properties-common
356 | ```
357 |
358 | - Next, add the GPG key.
359 | ```bash
360 | wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -
361 | ```
362 |
363 | - Add this repository for stable releases.
364 | ```bash
365 | echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list
366 | ```
367 |
368 |
369 | - After you add the repository, update and install Garafana.
370 | ```bash
371 | sudo apt-get update
372 | sudo apt-get -y install grafana
373 | ```
374 |
375 | - To automatically start the Grafana after reboot, enable the service.
376 | ```bash
377 | sudo systemctl enable grafana-server
378 | sudo systemctl start grafana-server
379 | sudo systemctl status grafana-server
380 | ```
381 |
382 | - Go to http://:3000 and log in to the Grafana using default credentials. The username is admin, and the password is admin as well.
383 | ```bash
384 | username admin
385 | password admin
386 | ```
387 |
388 | - To visualize metrics, you need to add a data source first.
389 | - Click Add data source and select Prometheus.
390 | - For the URL, enter localhost:9090 and click Save and test. You can see Data source is working.
391 | - Let's add Dashboard for a better view
392 | - Click on Import Dashboard paste this code 1860 and click on load
393 | - Select the Datasource and click on Import
394 | - You will see this output
395 |
396 |
397 | # Step 5 :
398 | - Install the Prometheus Plugin and Integrate it with the Prometheus server
399 | - Let's Monitor JENKINS SYSTEM
400 | - Need Jenkins up and running machine
401 | - Goto Manage Jenkins --> Plugins --> Available Plugins
402 | - Search for Prometheus and install it
403 | - Once that is done you will Prometheus is set to /Prometheus path in system configurations
404 | - Nothing to change click on apply and save
405 | - To create a static target, you need to add job_name with static_configs. go to Prometheus server
406 |
407 | ```bash
408 | sudo vim /etc/prometheus/prometheus.yml
409 | ```
410 |
411 | - Paste below code
412 | ```bash
413 | - job_name: 'jenkins'
414 | metrics_path: '/prometheus'
415 | static_configs:
416 | - targets: [':8080']
417 | ```
418 |
419 | - Before, restarting check if the config is valid.
420 | ```bash
421 | promtool check config /etc/prometheus/prometheus.yml
422 | ```
423 |
424 | - Then, you can use a POST request to reload the config.
425 | ```bash
426 | curl -X POST http://localhost:9090/-/reload
427 | ``
428 |
429 | - Check the targets section
430 | ```bash
431 | http://:9090/targets
432 | ```
433 |
434 | -You will see Jenkins is added to it
435 | - Let's add Dashboard for a better view in Grafana
436 | - Click On Dashboard --> + symbol --> Import Dashboard
437 | - Use Id 9964 and click on load
438 | - Select the data source and click on Import
439 | - Now you will see the Detailed overview of Jenkins
440 |
441 |
442 | # Step 6 :
443 | - Email Integration With Jenkins and Plugin Setup
444 | - Install Email Extension Plugin in Jenkins
445 | - Go to your Gmail and click on your profile
446 | - Then click on Manage Your Google Account --> click on the security tab on the left side panel you will get this page(provide mail password).
447 | - 2-step verification should be enabled.
448 | - Search for the app in the search bar you will get app passwords like the below image
449 | - Click on other and provide your name and click on Generate and copy the password
450 | - In the new update, you will get a password like this
451 | - Once the plugin is installed in Jenkins, click on manage Jenkins --> configure system there under the E-mail Notification section configure the details as shown in the below image
452 | - Click on Apply and save.
453 | - Click on Manage Jenkins--> credentials and add your mail username and generated password
454 | - This is to just verify the mail configuration
455 | - Now under the Extended E-mail Notification section configure the details as shown in the below images
456 | - Click on Apply and save.
457 |
458 | ```bash
459 | post {
460 | always {
461 | emailext attachLog: true,
462 | subject: "'${currentBuild.result}'",
463 | body: "Project: ${env.JOB_NAME}
" +
464 | "Build Number: ${env.BUILD_NUMBER}
" +
465 | "URL: ${env.BUILD_URL}
",
466 | to: 'rutik@gmail.com', #change Your mail
467 | attachmentsPattern: 'trivyfs.txt,trivyimage.txt'
468 | }
469 | }
470 | ```
471 |
472 | - Next, we will log in to Jenkins and start to configure our Pipeline in Jenkins
473 |
474 | # Step 7 :
475 | - Install Plugins like JDK, Sonarqube Scanner, NodeJs, OWASP Dependency Check
476 | - 7A — Install Plugin
477 | - Goto Manage Jenkins →Plugins → Available Plugins →
478 |
479 | - Install below plugins
480 | - 1 → Eclipse Temurin Installer (Install without restart)
481 | - 2 → SonarQube Scanner (Install without restart)
482 | - 3 → NodeJs Plugin (Install Without restart)
483 |
484 |
485 | - 7B — Configure Java and Nodejs in Global Tool Configuration
486 | - Goto Manage Jenkins → Tools → Install JDK(17) and NodeJs(16)→ Click on Apply and Save
487 |
488 | - 7C — Create a Job
489 | - create a job as Netflix Name, select pipeline and click on ok.
490 |
491 | # Step 8 :
492 | - Configure Sonar Server in Manage Jenkins
493 | - Grab the Public IP Address of your EC2 Instance, Sonarqube works on Port 9000, so :9000. Goto your Sonarqube Server. Click on Administration → Security → Users → Click on Tokens and Update Token → Give it a name → and click on Generate Token
494 | - click on update Token
495 | - Create a token with a name and generate
496 | - copy Token
497 | - Goto Jenkins Dashboard → Manage Jenkins → Credentials → Add Secret Text. It should look like this
498 | - You will this page once you click on create
499 | - Now, go to Dashboard → Manage Jenkins → System and Add like the below image.
500 | - Click on Apply and Save
501 | - The Configure System option is used in Jenkins to configure different server
502 | - Global Tool Configuration is used to configure different tools that we install using Plugins
503 | - We will install a sonar scanner in the tools.
504 | - In the Sonarqube Dashboard add a quality gate also
505 | - Administration--> Configuration-->Webhooks
506 | - Click on Create
507 | - Add details
508 |
509 | ```bash
510 | #in url section of quality gate
511 | /sonarqube-webhook/
512 | ```
513 | - Let's go to our Pipeline and add the script in our Pipeline Script.
514 | ```bash
515 | pipeline{
516 | agent any
517 | tools{
518 | jdk 'jdk17'
519 | nodejs 'node16'
520 | }
521 | environment {
522 | SCANNER_HOME=tool 'sonar-scanner'
523 | }
524 | stages {
525 | stage('clean workspace'){
526 | steps{
527 | cleanWs()
528 | }
529 | }
530 | stage('Checkout from Git'){
531 | steps{
532 | git branch: 'main', url: 'https://github.com/Aj7Ay/Netflix-clone.git'
533 | }
534 | }
535 | stage("Sonarqube Analysis "){
536 | steps{
537 | withSonarQubeEnv('sonar-server') {
538 | sh ''' $SCANNER_HOME/bin/sonar-scanner -Dsonar.projectName=Netflix \
539 | -Dsonar.projectKey=Netflix '''
540 | }
541 | }
542 | }
543 | stage("quality gate"){
544 | steps {
545 | script {
546 | waitForQualityGate abortPipeline: false, credentialsId: 'Sonar-token'
547 | }
548 | }
549 | }
550 | stage('Install Dependencies') {
551 | steps {
552 | sh "npm install"
553 | }
554 | }
555 | }
556 | post {
557 | always {
558 | emailext attachLog: true,
559 | subject: "'${currentBuild.result}'",
560 | body: "Project: ${env.JOB_NAME}
" +
561 | "Build Number: ${env.BUILD_NUMBER}
" +
562 | "URL: ${env.BUILD_URL}
",
563 | to: 'rutik@gmail.com',
564 | attachmentsPattern: 'trivyfs.txt,trivyimage.txt'
565 | }
566 | }
567 | }
568 |
569 | ```
570 |
571 | - Click on Build now, you will see the stage view like this
572 | - To see the report, you can go to Sonarqube Server and go to Projects.
573 | - You can see the report has been generated and the status shows as passed. You can see that there are 3.2k lines it scanned. To see a detailed report, you can go to issues.
574 |
575 | # Step 9 :
576 | - Install OWASP Dependency Check Plugins
577 | - GotoDashboard → Manage Jenkins → Plugins → OWASP Dependency-Check. Click on it and install it without restart.
578 | - First, we configured the Plugin and next, we had to configure the Tool
579 | - Goto Dashboard → Manage Jenkins → Tools →
580 | - Click on Apply and Save here.
581 | - Now go configure → Pipeline and add this stage to your pipeline and build.
582 |
583 | ```bash
584 | stage('OWASP FS SCAN') {
585 | steps {
586 | dependencyCheck additionalArguments: '--scan ./ --disableYarnAudit --disableNodeAudit', odcInstallation: 'DP-Check'
587 | dependencyCheckPublisher pattern: '**/dependency-check-report.xml'
588 | }
589 | }
590 | stage('TRIVY FS SCAN') {
591 | steps {
592 | sh "trivy fs . > trivyfs.txt"
593 | }
594 | }
595 | ```
596 |
597 |
598 |
599 | # Step 10 :
600 | - Docker Image Build and Push
601 | - We need to install the Docker tool in our system, Goto Dashboard → Manage Plugins → Available plugins → Search for Docker and install these plugins
602 | - Docker, Docker Commons, Docker Pipeline, Docker API, docker-build-step
603 | - and click on install without restart
604 | - Now, goto Dashboard → Manage Jenkins → Tools →
605 | - Add DockerHub Username and Password under Global Credentials
606 | - Add this stage to Pipeline Script
607 |
608 | ```bash
609 | stage("Docker Build & Push"){
610 | steps{
611 | script{
612 | withDockerRegistry(credentialsId: 'docker', toolName: 'docker'){
613 | sh "docker build --build-arg TMDB_V3_API_KEY=Aj7ay86fe14eca3e76869b92 -t netflix ."
614 | sh "docker tag netflix sevenajay/netflix:latest "
615 | sh "docker push sevenajay/netflix:latest "
616 | }
617 | }
618 | }
619 | }
620 | stage("TRIVY"){
621 | steps{
622 | sh "trivy image sevenajay/netflix:latest > trivyimage.txt"
623 | }
624 | }
625 | ```
626 |
627 |
628 | - When you log in to Dockerhub, you will see a new image is created
629 | - Now Run the container to see if the game coming up or not by adding the below stage
630 |
631 | ```bash
632 | stage('Deploy to container'){
633 | steps{
634 | sh 'docker run -d --name netflix -p 8081:80 sevenajay/netflix:latest'
635 | }
636 | }
637 | ```
638 |
639 |
640 |
641 | # Step 11 :
642 | - Kuberenetes Setup
643 | - Connect your machines to Putty or Mobaxtreme
644 | - Take-Two Ubuntu 20.04 instances one for k8s master and the other one for worker.
645 | - Install Kubectl on Jenkins machine also.
646 | - Kubectl is to be installed on Jenkins also
647 |
648 | ```bash
649 | sudo apt update
650 | sudo apt install curl
651 | curl -LO https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl
652 | sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
653 | kubectl version --client
654 | ```
655 |
656 |
657 | - Part 1 ----------Master Node------------
658 | ```bash
659 | sudo hostnamectl set-hostname K8s-Master
660 | ```
661 |
662 | - ----------Worker Node------------
663 | ```bash
664 | sudo hostnamectl set-hostname K8s-Worker
665 | ```
666 |
667 | - Part 2 ------------Both Master & Node ------------
668 | ```bash
669 | sudo apt-get update
670 |
671 | sudo apt-get install -y docker.io
672 | sudo usermod –aG docker Ubuntu
673 | newgrp docker
674 | sudo chmod 777 /var/run/docker.sock
675 |
676 | sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
677 |
678 | sudo tee /etc/apt/sources.list.d/kubernetes.list <: --token --discovery-token-ca-cert-hash
702 | ```
703 |
704 | - Copy the config file to Jenkins master or the local file manager and save it
705 | - copy it and save it in documents or another folder save it as secret-file.txt
706 | - Note: create a secret-file.txt in your file explorer save the config in it and use this at the kubernetes credential section.
707 | - Install Kubernetes Plugin, Once it's installed successfully
708 | - goto manage Jenkins --> manage credentials --> Click on Jenkins global --> add credentials
709 |
710 |
711 | - Install Node_exporter on both master and worker
712 | - Let's add Node_exporter on Master and Worker to monitor the metrics
713 | - First, let's create a system user for Node Exporter by running the following command:
714 | ```bash
715 | sudo useradd \
716 | --system \
717 | --no-create-home \
718 | --shell /bin/false node_exporter
719 | ```
720 |
721 |
722 | - Use the wget command to download the binary.
723 | ```bash
724 | wget https://github.com/prometheus/node_exporter/releases/download/v1.6.1/node_exporter-1.6.1.linux-amd64.tar.gz
725 | ```
726 |
727 | - Extract the node exporter from the archive.
728 | ```bash
729 | tar -xvf node_exporter-1.6.1.linux-amd64.tar.gz
730 | ```
731 | - Move binary to the /usr/local/bin.
732 | ```bash
733 | sudo mv \
734 | node_exporter-1.6.1.linux-amd64/node_exporter \
735 | /usr/local/bin/
736 | ```
737 |
738 | - Clean up, and delete node_exporter archive and a folder.
739 | ```bash
740 | rm -rf node_exporter*
741 | ```
742 |
743 | - Next, create a similar systemd unit file.
744 | ```bash
745 | sudo vim /etc/systemd/system/node_exporter.service
746 | ```
747 |
748 | - node_exporter.service
749 | ```bash
750 | [Unit]
751 | Description=Node Exporter
752 | Wants=network-online.target
753 | After=network-online.target
754 |
755 | StartLimitIntervalSec=500
756 | StartLimitBurst=5
757 |
758 | [Service]
759 | User=node_exporter
760 | Group=node_exporter
761 | Type=simple
762 | Restart=on-failure
763 | RestartSec=5s
764 | ExecStart=/usr/local/bin/node_exporter \
765 | --collector.logind
766 |
767 | [Install]
768 | WantedBy=multi-user.target
769 |
770 | ```
771 |
772 | - Replace Prometheus user and group to node_exporter, and update the ExecStart command.
773 | - To automatically start the Node Exporter after reboot, enable the service.
774 | ```bash
775 | sudo systemctl enable node_exporter
776 | sudo systemctl start node_exporter
777 | sudo systemctl status node_exporter
778 | ```
779 |
780 | - If you have any issues, check logs with journalctl
781 | ```bash
782 | journalctl -u node_exporter -f --no-pager
783 | ```
784 |
785 | - At this point, we have only a single target in our Prometheus. There are many different service discovery mechanisms built into Prometheus. For example, Prometheus can dynamically discover targets in AWS, GCP, and other clouds based on the labels. In the following tutorials, I'll give you a few examples of deploying Prometheus in a cloud-specific environment. For this tutorial, let's keep it simple and keep adding static targets. Also, I have a lesson on how to deploy and manage Prometheus in the Kubernetes cluster.
786 | - To create a static target, you need to add job_name with static_configs. Go to Prometheus server
787 | ```bash
788 | sudo vim /etc/prometheus/prometheus.yml
789 | ```
790 |
791 | - prometheus.yml
792 | ```bash
793 | - job_name: node_export_masterk8s
794 | static_configs:
795 | - targets: [":9100"]
796 |
797 | - job_name: node_export_workerk8s
798 | static_configs:
799 | - targets: [":9100"]
800 |
801 | ```
802 | - By default, Node Exporter will be exposed on port 9100.
803 | - Since we enabled lifecycle management via API calls, we can reload the Prometheus config without restarting the service and causing downtime.
804 | - Before, restarting check if the config is valid.
805 | ```bash
806 | promtool check config /etc/prometheus/prometheus.yml
807 | ```
808 |
809 | - Then, you can use a POST request to reload the config.
810 | ```bash
811 | curl -X POST http://localhost:9090/-/reload
812 | ```
813 |
814 | - Check the targets section
815 | ```bash
816 | http://:9090/targets
817 | ```
818 |
819 | - final step to deploy on the Kubernetes cluster
820 | ```bash
821 | stage('Deploy to kubernets'){
822 | steps{
823 | script{
824 | dir('Kubernetes') {
825 | withKubeConfig(caCertificate: '', clusterName: '', contextName: '', credentialsId: 'k8s', namespace: '', restrictKubeConfigAccess: false, serverUrl: '') {
826 | sh 'kubectl apply -f deployment.yml'
827 | sh 'kubectl apply -f service.yml'
828 | }
829 | }
830 | }
831 | }
832 | }
833 | ```
834 |
835 | - In the Kubernetes cluster(master) give this command
836 | ```bash
837 | kubectl get all
838 | kubectl get svc #use anyone
839 | ```
840 |
841 |
842 |
843 | # STEP 12:
844 | - Access from a Web browser with
845 | -
846 |
847 | # output:
848 | - 
849 | - 
850 | - 
851 | - 
852 | - 
853 | - 
854 | - 
855 | - 
856 | - 
857 |
858 |
859 |
860 | # Step 13: Terminate instances.
861 |
862 |
863 |
864 | # Project Reference :
865 | - https://youtu.be/pbGA-B_SCVk?feature=shared
866 |
867 |
868 |
869 |
870 |
871 |
872 |
873 |
874 |
875 |
876 |
877 |
878 |
879 |
880 |
--------------------------------------------------------------------------------