├── 1.2 DockerStackCommands.txt ├── Ansible.pdf ├── Ansible ├── Ansible Assignments │ └── Assignment_1 ├── Ansible.pdf ├── Ansible_Commands_StepByStep ├── Ansible_Modules │ ├── Loops │ ├── Vault │ └── file Modules ├── Ansible_assignment └── Roles ├── Chef ├── 1.what-is-a-resource.pdf ├── 2.lab-setup-recipe.pdf ├── 3.Test-and-Repair.pdf ├── 4. cookbooks-overview.pdf ├── 5. cookbook-components.pdf ├── 6. lab-set-up-a-webserver.pdf ├── 7. applying-recipes-and-cookbooks.pdf ├── 8. add-a-template-to-the-recipe.pdf ├── AWS-Setup.pdf ├── BootStrapNode.pdf ├── Chef.txt ├── chef.txt └── template-files-and-ERB.pdf ├── DevOpsInterview_ResumeSample-master ├── Amador_Johandry-Resume.pdf ├── DevOpsInterviewQuestions_Answers.pdf ├── devops sample res.pdf └── devops sample resume.docx ├── DockerCommands ├── 01_Services │ └── files │ │ ├── 1.1 Service_Create_Inspect_logs_ls.txt │ │ ├── 1.10 ServiceUpdateAndRollback.txt │ │ ├── 1.2_Service ps.txt │ │ ├── 1.3_DockerSwarmVisualizer.txt │ │ ├── 1.4Service scale.txt │ │ ├── 1.5 Service Port Mapping.txt │ │ ├── 1.6 Service global mode.txt │ │ ├── 1.7 Service Constraint.txt │ │ ├── 1.8 Service Labels.txt │ │ └── 1.9 Node Availability.txt ├── 1.1 Swarm Backup and Restore.txt ├── 1.1_Task1.txt ├── 1.2 DockerStackCommands.txt ├── 1.2_Docker Secret Commands.txt ├── 1.2_Task2.txt ├── 1.3_Task3.txt ├── Docker Assignment.docx ├── Docker Swarm.pdf ├── docker ├── docker commands.docx └── docker registry.txt ├── DockerService.txt ├── ELK Stack.pptx ├── Final-Project.docx ├── Git ├── Git and GitHub Assignment.docx ├── Version Control with Git.pdf └── gitCommands.txt ├── Jenkins ├── Jenkins.pptx ├── JenkinsCodeFile.pptx ├── JenkinsInstallationSteps.txt ├── Jenkinsfile ├── Pipeline.txt └── scriptPipeline ├── Jenkinsfile ├── Kubernetes ├── 01_Kubernetes-Introduction.pdf ├── 02_Kubernetes-Installation.pdf ├── 05_Kubernetes-Namespaces.pdf ├── 05_Kubernetes-Volumes .pdf ├── Docker and Kubernetes Case Studies.docx ├── Exercises.xlsx ├── K8sAssignment.txt ├── Kubernetes Demo Files │ ├── 1.Pods.txt │ ├── 10.Load-Balancer.txt │ ├── 11.ClusterIP.txt │ ├── 12.emptyDir.txt │ ├── 13.HostPath.txt │ ├── 14.Dynamic-Provisioning.txt │ ├── 2.ConfigMap.txt │ ├── 3.Secrets.txt │ ├── 4.Replication-Controller.txt │ ├── 5.ReplicaSet.txt │ ├── 6. Deployment.txt │ ├── 7-pod-quota-mem-exceed.yaml │ ├── 7-pod-quota-mem.yaml │ ├── 7-quota-count.yaml │ ├── 7-quota-limitrange.yaml │ ├── 7-quota-mem.yaml │ ├── 7.DaemonSet.txt │ ├── 8.Jobs.txt │ ├── 9.NodePort.txt │ ├── Kubernetes_1.pdf │ ├── Kubernetes_2.pdf │ ├── UI.txt │ └── rbac.txt ├── Kubernetes Introduction and Installation.pdf ├── Kubernetes-Cheat-Sheet_07182019.pdf ├── KubernetesDashBoard.txt ├── Kubernetes_1.pdf ├── Kubernetes_2.pdf └── Run a Pod on Specific node.docx ├── Maven.pdf ├── Nagios ├── NRPE Installation ├── Nagios.pdf └── nagiosInstallationSteps.txt ├── Puppet installation ├── Puppet ├── Configuration Management.pdf ├── Install Puppet6.txt ├── Modules.txt ├── Puppet Architecture.pdf ├── classExample.txt └── puppetInstallation ├── Puppet_Intro_Installation.txt ├── SaltStack_Notes.txt ├── Splunk.pptx ├── Sunlife_Notes ├── docker-compose.yml ├── examplesplunk.txt ├── javaprogs ├── Testing │ ├── pom.xml │ ├── src │ │ └── test │ │ │ └── java │ │ │ └── com │ │ │ └── cognixia │ │ │ ├── AllTests.java │ │ │ ├── unitest.java │ │ │ └── unitest2.java │ └── target │ │ ├── classes │ │ └── META-INF │ │ │ ├── MANIFEST.MF │ │ │ └── maven │ │ │ └── com.cognixia │ │ │ └── Testing │ │ │ ├── pom.properties │ │ │ └── pom.xml │ │ └── test-classes │ │ └── com │ │ └── cognixia │ │ ├── AllTests.class │ │ ├── unitest.class │ │ └── unitest2.class └── mockito-example │ ├── pom.xml │ ├── src │ ├── main │ │ └── java │ │ │ └── com │ │ │ └── raman │ │ │ ├── business │ │ │ └── TodoBusinessImpl.java │ │ │ └── data │ │ │ └── api │ │ │ └── TodoService.java │ └── test │ │ └── java │ │ └── com │ │ └── raman │ │ ├── FirstTest.java │ │ ├── business │ │ └── TodoBusinessImplStubTest.java │ │ └── data │ │ └── stub │ │ └── TodoServiceStub.java │ └── target │ ├── classes │ ├── META-INF │ │ ├── MANIFEST.MF │ │ └── maven │ │ │ └── com.raman │ │ │ └── mockito-example │ │ │ ├── pom.properties │ │ │ └── pom.xml │ └── com │ │ └── raman │ │ ├── business │ │ └── TodoBusinessImpl.class │ │ └── data │ │ └── api │ │ └── TodoService.class │ └── test-classes │ └── com │ └── raman │ ├── FirstTest.class │ ├── business │ └── TodoBusinessImplStubTest.class │ └── data │ └── stub │ └── TodoServiceStub.class ├── project.txt ├── splunk installation.txt ├── test └── tutorialdata.zip /1.2 DockerStackCommands.txt: -------------------------------------------------------------------------------- 1 | sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 2 | chmod +x /usr/local/bin/docker-compose 3 | docker-compose --version 4 | 5 | #docker-compose.yml 6 | version: '3.3' 7 | 8 | services: 9 | db: 10 | image: mysql:5.7 11 | volumes: 12 | - db_data:/var/lib/mysql 13 | restart: always 14 | environment: 15 | MYSQL_ROOT_PASSWORD: somewordpress 16 | MYSQL_DATABASE: wordpress 17 | MYSQL_USER: wordpress 18 | MYSQL_PASSWORD: wordpress 19 | 20 | wordpress: 21 | depends_on: 22 | - db 23 | image: wordpress:latest 24 | ports: 25 | - "8000:80" 26 | restart: always 27 | environment: 28 | WORDPRESS_DB_HOST: db:3306 29 | WORDPRESS_DB_USER: wordpress 30 | WORDPRESS_DB_PASSWORD: wordpress 31 | WORDPRESS_DB_NAME: wordpress 32 | volumes: 33 | db_data: {} 34 | 35 | 36 | docker-compose up -d 37 | docker container ls 38 | docker-compose scale db=3 39 | docker container ls 40 | docker-compose down 41 | docker container ls 42 | cp docker-compose.yml stack.yml 43 | docker stack --help 44 | docker stack deploy -c stack.yml mystack 45 | docker stack ls 46 | docker stack services mystack 47 | docker service ps mystack_db 48 | docker service ps mystack_wordpress 49 | docker network ls 50 | docker stack ls 51 | docker stack rm mystack 52 | -------------------------------------------------------------------------------- /Ansible.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Ansible.pdf -------------------------------------------------------------------------------- /Ansible/Ansible Assignments/Assignment_1: -------------------------------------------------------------------------------- 1 | #---1 Create a Play book to install apache server on the destination server with yum module install only if the OS is 'Redhat' or 'CentOS' and version 7 2 | # Hint:- ansible_distribution is the variable which can have possible values 'RedHat','CentOS','Ubuntu','Debian' 3 | # ansible_distribution_version variable can have the value which can have the version of the linux 4 | 5 | 6 | #--2. Create a play book to install apache server on RedHat and Ubuntu server 7 | # Hint:- Create an Ubuntu machine and install python on it, apt module is used instead of yum in Ubuntu and name of software is apache2 not httpd 8 | 9 | #--3 . Create a play book to create testuser1 and testuser2 on all the hosts (use loop) 10 | Hint:- use "user" module 11 | -------------------------------------------------------------------------------- /Ansible/Ansible.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Ansible/Ansible.pdf -------------------------------------------------------------------------------- /Ansible/Ansible_Commands_StepByStep: -------------------------------------------------------------------------------- 1 | 2 | ----------------------------Installation of Ansible--------------------------- 3 | --BELOW STEPS ARE TO INSTALL ANSIBLE ON UBUNTU 4 | 5 | RUN BELOW COMMANDS ON MASTER ( LOGIN WITH ROOT USER) 6 | 7 | 1. ssh-keygen 8 | 2. cat /root/.ssh/id_rsa.pub 9 | copy the content of this file 10 | 11 | GOTO AGENT MACHINE 12 | 13 | 1. open /root/.ssh/authorized_keys file and copy master ssh key at the end of this file 14 | 15 | RUN BELOW COMMANDS ON MASTER 16 | 17 | 1. ssh <> 18 | 2. Before installing ansible package add ansible repository to your system 19 | sudo apt-add-repository ppa:ansible/ansible 20 | 3. Run the update command before installing to update existing packages 21 | sudo apt-get update 22 | 4. Now install the ansible package 23 | sudo apt-get install ansible 24 | 5. You can check if you’re on the latest version of ansible by running the version command 25 | sudo ansible --version 26 | 27 | SETUP HOST MACHINE ON MASTER 28 | 29 | 1. To set up hosts you need to edit the hosts file in the ansible directory 30 | cd /etc/ansible 31 | vi hosts 32 | 33 | ---BELOW STEPS ARE TO INSTALL ANSIBLE ON CENTOS SYSTEM 34 | 35 | RUN BELOW COMMANDS ON MASTER ( LOGIN WITH ROOT USER) 36 | 37 | 1. ssh-keygen 38 | 2. cat /root/.ssh/id_rsa.pub 39 | copy the content of this file 40 | GOTO AGENT MACHINE 41 | 42 | 1. open /root/.ssh/authorized_keys file and copy master ssh key at the end of this file 43 | 44 | --BELOW STEPS ARE TO INSTALL ANSIBLE 45 | yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 46 | yum install ansible 47 | ansible --version 48 | 49 | 50 | 51 | SETUP HOST MACHINE ON MASTER 52 | 53 | 1. To set up hosts you need to edit the hosts file in the ansible directory 54 | cd /etc/ansible 55 | vi hosts 56 | 57 | 58 | 59 | 60 | --------------------Ansible Adhoc Commands--------------------------------- 61 | 62 | --To find the documentation of the modules 63 | ansible-doc -l | more 64 | ansible-doc -s yum 65 | --To Run ping command for all hosts 66 | ansible all -m ping 67 | --To Run ping command to specific slave 68 | ansible -i hosts <> -m ping 69 | --To Run any command on the slave machine 70 | ansible -i hosts <> -m shell -a 'ls /home' 71 | --Install A package on Client Machine 72 | 73 | --I have added below clients int hosts file 74 | 172.31.27.53 75 | 172.31.19.90 76 | 172.31.31.119 77 | ------------------yum Module--------------- 78 | --Lets install apache server on 172.31.27.53 server if it is already installed then yum remove httpd 79 | --run the command in /etc/ansible folder because hosts file is available there 80 | ansible 172.31.27.53 -m yum -a "name=httpd state=present" 81 | --lets verify it on the slave machine (it should be available) 82 | service httpd status 83 | ------------------service module------------- 84 | --Now the service is present but it is not in started state on slave machine so lets Start the service on client using ansible on master machine 85 | ansible 172.31.27.53 -m service -a "name=httpd state=started" 86 | --Notice that State is changed=true 87 | --Check the status on client machine (it should be in active state) 88 | service httpd status 89 | --Lets start service httpd again on master machine 90 | ansible 172.31.27.53 -m service -a "name=httpd state=started" 91 | --Notice that State is changed=false because it is already started 92 | --Please try the same commands with state=stopped,restarted 93 | 94 | --Repeat the steps to install nmap service 95 | 96 | --------------------Copy module------------- 97 | -- Create a file in master server and lets copy it to all or some of the slave servers 98 | Step1 :-> Create a file in master machine (/tmp/testfile) 99 | touch /tmp/testingfile 100 | echo "test for ansible copy module" > /tmp/testfile 101 | Step2:-> check on 172.31.27.53 that there is no file /tmp/testingfile 102 | Step3:-> run the following command on master machine 103 | ansible 172.31.27.53 -m copy -a "src=/tmp/testfile dest=/tmp/testfile" 104 | Step4:-> check on the slave machine it should have the file in /tmp/testfile 105 | 106 | ----------------------------------------------------------------------------------------------------- 107 | Ansible-Playbook YAML (Ain't Markup Language) .yaml or .yml 108 | ------------------------------------------------------------------------------------------------------- 109 | 1. It is data serialization language designed to be directly writeable and readable by humans 110 | 2. It is commonly used for configuration Management. 111 | 3. Strictly speaking YAML is a superset of json with additional features like indentation or new line 112 | 4. It is a case sensitive scripting language. 113 | 114 | Q:->What is key? 115 | Ans:-Key represents a variable or column for a value eg name: httpd in this case name is the key and httpd is the value 116 | Q:-> what is Data Types? 117 | Ans:->Data Type represents the type of value we are storing in the key 118 | eg x:25 # it is integer 119 | x:"Ansible" # it is string 120 | x:2.3 # "it is float 121 | x:true # it is Boolean 122 | x:null # it is null 123 | Q:-What is Data Collection? 124 | Ans:-Generally when we represent the data it is key value pair which is called scalar representation of data.If we use multiple values or single key 125 | or multiple key value pair it is called Data collection. 126 | Q:-What are the different types of Data collection? 127 | Ans:- It is of 2 types 128 | Sequence data collection:- it is like array in other programming language 129 | tasks: 130 | - name: 131 | - debug: 132 | Map data collection:-it is equivalent to dictionary in python 133 | tasks: 134 | - name: home dir 135 | - debug: true 136 | Map data collection can have Sequence data collection 137 | tasks: 138 | - name: home dir 139 | - debug: 140 | var: result.stdout 141 | 142 | Q:-What is Ansible Playbook? 143 | Ans:- Ansible Playbook book is a yaml script. It sends the commands to remote server in scripted way instead of using Ansible commands indviually 144 | to configure remote server from command line. 145 | Q:- What is Ansible Playbook Structure? 146 | Ans:-Each playbook is an aggregation of one or more plays in it. Playbooks are structured using Plays. There can be more than one play inside a 147 | playbook.The function of a play is to map a set of instructions defined against a particular host. 148 | YAML is a strict typed language; so, extra care needs to be taken while writing the YAML files. 149 | There are different YAML editors but we will prefer to use a simple editor like notepad++. 150 | Just open notepad++ and copy and paste the below yaml and change the language to YAML (Language → YAML). 151 | Egs 152 | Task:-Install a apache server on remote machine 153 | Play:-Consist of 1 or more tasks like install apache server and start the service 154 | Playbook:-Composed or 1 or more play 155 | 156 | ----------Create a Playbook---------- 157 | A YAML starts with --- (3 hyphens) 158 | 159 | Syntax 160 | --- 161 | name: install and configure DB 162 | hosts: testServer 163 | 164 | 165 | vars: 166 | oracle_db_port_value : 1521 167 | 168 | tasks: 169 | -name: Install the Oracle DB 170 | yum: 171 | 172 | -name: Ensure the installed service is enabled and running 173 | service: 174 | name: 175 | 176 | Q:-What are different YAML tags in playbook? 177 | Ans:- 178 | name 179 | This tag specifies the name of the Ansible play. As in what this playbook will be doing. Any logical name can be given to the playbook. 180 | 181 | hosts 182 | This tag specifies the lists of hosts or host group against which we want to run the task. The hosts field/tag is mandatory. It tells Ansible on which hosts to run the listed tasks. The tasks can be run on the same machine or on a remote machine. One can run the tasks on multiple machines and hence hosts tag can have a group of hosts’ entry as well. 183 | 184 | vars 185 | Vars tag lets you define the variables which you can use in your playbook. Usage is similar to variables in any programming language. 186 | 187 | tasks 188 | All playbooks should contain tasks or a list of tasks to be executed. Tasks are a list of actions one needs to perform. A tasks field contains the name of the task. This works as the help text for the user. It is not mandatory but proves useful in debugging the playbook. Each task internally links to a piece of code called a module. A module that should be executed, and arguments that are required for the module you want to execute. 189 | 190 | Q What are the basic steps YAML script? 191 | Step 1:- start with --- 192 | Step 2:- Target section list(hosts,user, etc) 193 | Step 3:- Variable list (optional) 194 | Step 4:- Task list 195 | List all the modules that you run, in the order 196 | Step 5:- Save file with YAML 197 | Assignment:-Run the following commands 198 | on host1 and host2 :- execute sh file(date.sh) 199 | on host1 :- find files and folders of /etc 200 | on host2 :- run date command 201 | on master:- find files and folders of /home 202 | 203 | --------------------Variables-------------------------- 204 | Q: What do you mean by Creating valid variable names? 205 | Ans:Variable names should be letters, numbers, and underscores. Variables should always start with a letter.a1,a1_,file 206 | Variable names should not be a reserved ansible keywords. 207 | foo_port is a great variable. foo5 is fine too. 208 | foo-port, foo port, foo.port and 12 are not valid variable names. 209 | 210 | YAML also supports dictionaries which map keys to values. For instance: 211 | foo: 212 | field1: one 213 | field2: two 214 | You can then reference a specific field in the dictionary using either bracket notation or dot notation: 215 | 216 | foo['field1'] 217 | foo.field1 218 | Q: Give a simple example of variables in YAML script 219 | 220 | --- 221 | - hosts: 172.31.27.53 222 | vars: 223 | cmd1: sh /home/date.sh 224 | name: play1 225 | tasks: 226 | - name: check current dir 227 | command: "{{ cmd1 }}" 228 | register: output 229 | - debug: 230 | var: output.stdout 231 | 232 | #one more example 233 | --- 234 | - hosts: 172.31.27.53 235 | vars: 236 | myvars: "This is my content" 237 | tasks: 238 | - copy: 239 | dest: /tmp/var_file.txt 240 | content: "{{ myvars }}" 241 | - name: opening the file 242 | command: cat /tmp/var_file.txt 243 | register: output 244 | - debug: 245 | var: output.stdout 246 | 247 | Q what is var_prompt 248 | Ans 249 | var_prompt is used to read the value for a variable at execution time 250 | 251 | --- 252 | - name: This play book for var_prompt 253 | hosts: 172.31.27.53 254 | vars_prompt: 255 | name: var1 256 | prompt: Enter the value 257 | tasks: 258 | - name: output 259 | debug: 260 | msg: "This is the value of var1= {{ var1 }}" 261 | 262 | #------------Run date command if /tmp/test.txt does not exist 263 | --- 264 | - hosts: 172.31.27.53 265 | vars: 266 | test: "Testing" 267 | tasks: 268 | - name : Create file if not exist 269 | command: date 270 | register: output 271 | args: 272 | creates: /tmp/test.txt 273 | - debug: 274 | var: output.stdout 275 | 276 | #----------------------------------------------- 277 | 278 | #-----------------------remove the file if exist 279 | --- 280 | - hosts: 172.31.27.53 281 | tasks: 282 | - name: testing 283 | command: 'touch /tmp/test' 284 | args: 285 | removes: /tmp/test 286 | #------------------------------------------------ 287 | 288 | #-----------------Conditional statement 289 | 290 | # Determine if a path exists and is a directory. Note that we need to test 291 | # both that p.stat.isdir actually exists, and also that it's set to true. 292 | --- 293 | - hosts: 172.31.27.53 294 | tasks: 295 | - stat: 296 | path: /tmp/test 297 | register: p 298 | - debug: 299 | msg: "Path exists and is a directory {{ p }} " 300 | when: p.stat.isdir is defined and p.stat.isdir 301 | 302 | --- 303 | - hosts: 172.31.27.53 304 | vars: 305 | test: "True" 306 | cont: "Hi from ansible" 307 | tasks: 308 | - copy: 309 | dest: /tmp/test1.txt 310 | content: "{{ cont }}" 311 | when: ansible_facts['os_family'] == 'CentOs' 312 | 313 | 314 | 315 | --- 316 | - hosts: 172.31.27.53 317 | vars_prompt: 318 | name: myvars 319 | prompt: Enter the value 320 | tasks: 321 | - copy: 322 | dest: /tmp/var_file.txt 323 | content: "{{ myvars }}" 324 | when: myvars == "test" 325 | - name: opening the file 326 | command: cat /tmp/var_file.txt 327 | when: myvars == "test" 328 | register: output 329 | - debug: 330 | var: output.stdout 331 | when: myvars == "test" 332 | 333 | --- 334 | - hosts: 172.31.27.53 335 | vars_prompt: 336 | name: myvars 337 | prompt: Enter the value 338 | tasks: 339 | - copy: 340 | dest: /tmp/var_file.txt 341 | content: "{{ myvars }}" 342 | when: myvars == "test" 343 | - name: opening the file 344 | command: cat /tmp/var_file.txt 345 | ignore_errors: True 346 | register: output 347 | - debug: 348 | var: output.stdout 349 | when: myvars == "test" 350 | 351 | --- 352 | - hosts: 172.31.27.53 353 | vars_prompt: 354 | name: myvars 355 | prompt: Enter the value 356 | name: bar 357 | tasks: 358 | - copy: 359 | dest: /tmp/var_file.txt 360 | content: "{{ myvars }}" 361 | when: myvars == "test" 362 | - name: opening the file 363 | command: cat /tmp/var_file.txt 364 | ignore_errors: True 365 | register: output 366 | - debug: 367 | var: output.stdout 368 | when: myvars == "test" 369 | tasks: 370 | - shell: echo "I've got and am not afraid to use it!" 371 | when: foo is undefined 372 | 373 | - fail: msg="Bailing out. this play requires 'bar'" 374 | when: bar is undefined 375 | 376 | #-------------------multiple condtion 377 | --- 378 | - hosts: 172.31.27.53 379 | tasks: 380 | - command: /tmp/test.sh 381 | register: result 382 | ignore_errors: True 383 | 384 | - command: date 385 | when: result is failed 386 | 387 | - command: ls -l /home 388 | when: result is succeeded 389 | 390 | - command: ls -l /home/ec2-user 391 | when: result is skipped 392 | 393 | 394 | 395 | --- 396 | - hosts: 172.31.27.53 397 | vars: 398 | var1: 1 399 | var2: 2 400 | tasks: 401 | - name: This is for condition1 402 | command: date 403 | when: var1 == 1 or var2 ==2 404 | - name: This is for condition2 405 | command: date 406 | when: 407 | - var1 == 1 408 | - var2 == 2 409 | 410 | ----------------------------------------------Loops----------------------- 411 | 412 | Q:-What is loop in YAML? 413 | Ans:- It is the repeation of tasks 414 | 415 | example:-If you want to create 3 directories on host machine 416 | 417 | 418 | - hosts: 172.31.27.53 419 | tasks: 420 | - name: Create a dir1 421 | command: mkdir /tmp/dir1 422 | -name: Create a dir2 423 | command: mkdir /tmp/dir2 424 | -name: Create dir3 425 | command: mkdir /tmp/dir3 426 | 427 | Now lets create above tasks with Loops 428 | 429 | --- 430 | - hosts: 172.31.27.53 431 | tasks: 432 | - name: Create a dire 433 | command: mkdir /tmp/"{{ item }}" 434 | with_items: 435 | - new_dir1 436 | - new_dir2 437 | - new_dir3 438 | 439 | ----------Create users in host machine 440 | 441 | --- 442 | - hosts: 172.31.27.53 443 | tasks: 444 | - name: add several users 445 | user: 446 | name: "{{ item }}" 447 | state: present 448 | groups: "wheel" 449 | with_items: 450 | - testuser1 451 | - testuser2 452 | 453 | 454 | 455 | 456 | 457 | Ansible Playbooks 458 | An organised unit of scripts, Defines works for server configuration and written in YAML 459 | 460 | Playbooks have number of plays 461 | Play contains tasks 462 | Tasks calls core of custom modules 463 | Handlers get triggered from notify and executed in the end only once 464 | Playbooks 465 | --------------------------------------------------------- 466 | | | | 467 | Play Play Play 468 | 469 | Tasks Tasks Tasks 470 | 471 | Module Notify Handlers 472 | 473 | demo 474 | --- 475 | - hosts: 172.31.15.149 476 | name: play1 477 | tasks: 478 | - name: home dir 479 | command: ls -l /home 480 | register: output 481 | - debug: 482 | var: output.stdout 483 | - name: execute date 484 | command: date 485 | register: result 486 | - debug: 487 | var: result.stdout 488 | 489 | - hosts: 172.31.27.6 490 | name: play2 491 | tasks: 492 | - name: installing Apache 493 | yum: name=httpd state=latest 494 | - name: start apache service 495 | service: name=httpd state=started 496 | 497 | 498 | 499 | ----------------Lookup 500 | Q:How to read a file from Ansible and store in the varaible? 501 | Ans: We use lookup command which is used to read the data source from external file. 502 | 503 | Eg:-Read the content of test.csv file 504 | 505 | --- 506 | - hosts: all 507 | vars: 508 | contents: "{{ lookup('file', 'test.csv') }}" 509 | tasks: 510 | - debug: msg="the value of test.txt is {{ contents }}" 511 | -------------------Rolling updates 512 | Q:- How to define the batch size while executing the an action? 513 | Ans By default, Ansible will try to manage all of the machines referenced in a play in parallel. For a rolling update use case, 514 | you can define how many hosts Ansible should manage at a single time by using the serial keyword: 515 | 516 | --- 517 | - hosts: all 518 | serial: 2 519 | name: play1 520 | tasks: 521 | - name: This is the testing 522 | command: ls /home 523 | In the above example, if we had 4 hosts in the group ‘webservers’, 2 would complete the play completely before moving on to the next 2 hosts: 524 | If the number of hosts does not divide equally into the number of passes, the final pass will contain the remainder. 525 | 526 | - name: test play 527 | hosts: all 528 | serial: 529 | - 1 530 | - 5 531 | - 10 532 | 533 | In the above example, the first batch would contain a single host, the next would contain 5 hosts, and (if there are any hosts left), every following batch would contain 10 hosts until all available hosts are used. 534 | 535 | ------------------Maximum Failure 536 | 537 | - hosts: webservers 538 | max_fail_percentage: 30 539 | serial: 10 540 | In the above example, if more than 3 of the 10 servers in the group were to fail, the rest of the play would be aborted. 541 | 542 | --------------delegate_to 543 | 544 | --- 545 | - hosts: 172.31.27.53 546 | tasks: 547 | - name: create dir1 548 | command: mkdir /tmp/"{{ item }}" 549 | delegate_to: 127.0.0.1 550 | loop: 551 | - new_dir11 552 | - new_dir21 553 | - new_dir31 554 | 555 | 556 | --- 557 | - hosts: 172.31.27.53 558 | tasks: 559 | - name: create dir1 560 | local_action: command mkdir /tmp/"{{ item }}" 561 | loop: 562 | - new_dir11 563 | - new_dir21 564 | - new_dir31 565 | 566 | 567 | 568 | 569 | ansible-playbook -i hosts demo1.yaml 570 | 571 | 572 | --- 573 | - name: play1 574 | hosts: 172.31.27.53 575 | tasks: 576 | - stat: 577 | path: /tmp/test 578 | register: p 579 | - debug: 580 | msg: "Path exists and is a directory {{ p.stat.isdir }} " 581 | when: p.stat.isdir is defined and p.stat.isdir 582 | - name: " command is to be executed" 583 | command: ls -l /home 584 | when: p.stat.isdir == False 585 | - name: play2 586 | hosts: 172.31.27.53 587 | tasks: 588 | - copy: 589 | dest: /tmp/test3.txt 590 | content: "Test" 591 | - name: play3 592 | hosts: 172.31.27.53 593 | tasks: 594 | - name: read the file 595 | command: cat /tmp/test3.txt 596 | register: p 597 | - name: create the directory 598 | command: mkdir /tmp/Testing 599 | when: p.stdout == "Test" 600 | 601 | 602 | ---------------------Ansible Roles----------------- 603 | Roles Simplifies to write the complex workbooks 604 | Roles allows you to reuse the common configuration step between different types of steps. 605 | Roles are flexible and easily modified. 606 | 607 | --------------Structure of an Ansible roles consist of given components 608 | Defaults:-Stores data about roles and default variables. 609 | Files:-Store files which is to be pushed to the remote machine. 610 | Handlers:- Tasks that get triggered from some action. 611 | Meta :- Information about author, supported platforms and dependencies. 612 | Tasks:-Contains main list of tasks to be executed by roles. 613 | Templates:-Contains Templates which can be deployed via role. 614 | Vars:- Stores variables with high priorty than default variables. 615 | 616 | 617 | Goto roles folder and create following folders 618 | defaults files handlers meta tasks templates tests vars 619 | 620 | chumbows@gmail.com 621 | Kamakshyajune@gmail.com 622 | -------------------------------------------------------------------------------- /Ansible/Ansible_Modules/Loops: -------------------------------------------------------------------------------- 1 | ----------------Loops------------------ 2 | 3 | 4 | - name: add several users 5 | user: 6 | name: "{{ item.name }}" 7 | state: present 8 | groups: "{{ item.groups }}" 9 | with_items: 10 | - { name: 'testuser1', groups: 'wheel' } 11 | - { name: 'testuser2', groups: 'root' } 12 | 13 | ----------Loop with list 14 | 15 | # This will run debug three times since the list is flattened 16 | - debug: 17 | msg: "{{ item }}" 18 | vars: 19 | nested_list: 20 | - - one 21 | - two 22 | - three 23 | with_items: "{{ nested_list }}" 24 | 25 | # This will run debug once with the three items 26 | - debug: 27 | msg: "{{ item }}" 28 | vars: 29 | nested_list: 30 | - - one 31 | - two 32 | - three 33 | with_items: 34 | - "{{ nested_list }}" 35 | 36 | 37 | ----------Nested Loop 38 | 39 | - name: give users access to multiple databases 40 | mysql_user: 41 | name: "{{ item[0] }}" 42 | priv: "{{ item[1] }}.*:ALL" 43 | append_privs: yes 44 | password: "foo" 45 | with_nested: 46 | - [ 'alice', 'bob' ] 47 | - [ 'clientdb', 'employeedb', 'providerdb' ] 48 | 49 | # In below script there will be a nested loop for user1 value 1,2 means loo 50 | --- 51 | - hosts: 172.31.27.53 52 | vars: 53 | user1: 54 | - 1 55 | - 2 56 | 57 | tasks: 58 | - name: Creating multiple users 59 | debug: 60 | msg: "{{ item }}" 61 | with_nested: 62 | - "{{ user1 }}" 63 | - [ 3 , 4 ] 64 | 65 | # You can loop through the elements of a hash using with_dict like below: 66 | --- 67 | - hosts: 172.31.27.53 68 | vars: 69 | users: 70 | raman: 71 | name: raman 72 | age: 40 73 | Add: 74 | city: Bangalore 75 | test: 76 | name: raj 77 | age: 35 78 | Add: 79 | city: Delhi 80 | 81 | tasks: 82 | - name: Creating multiple users 83 | debug: 84 | msg: "{{ item.value.Add.city }} {{ item.value.name }}" 85 | with_dict: 86 | - "{{ users }}" 87 | 88 | 89 | # reading the local files using with_file 90 | 91 | --- 92 | - hosts: 172.31.27.53 93 | tasks: 94 | - name: reading the content of the files 95 | debug: 96 | msg: "{{ item }}" 97 | with_file: 98 | - /tmp/1.txt 99 | - /tmp/2.txt 100 | -------------------------------------------------------------------------------- /Ansible/Ansible_Modules/Vault: -------------------------------------------------------------------------------- 1 | ------------Ansible Vault 2 | 3 | Ansible Vault is a feature of ansible that allows you to keep sensitive data such as passwords or keys in encrypted files, 4 | rather than as plaintext in playbooks or roles. These vault files can then be distributed or placed in source control. 5 | 6 | ansible-vault create t.yaml # To create encrypted files, it will ask password for encryption 7 | cat t.yaml # it will show encrypted data 8 | ansible-vault edit t.yaml # To edit the vault file 9 | ansible-vault decrypt t.yaml # To decrypt the file 10 | ansible-vault encrypt t.yaml # To encrypt the file 11 | ansible-playbook t.yaml --ask-pass 12 | -------------------------------------------------------------------------------- /Ansible/Ansible_Modules/file Modules: -------------------------------------------------------------------------------- 1 | #--------------------------File Module 2 | # change file ownership, group and mode 3 | - file: 4 | path: /etc/foo.conf 5 | owner: foo 6 | group: foo 7 | # when specifying mode using octal numbers, add a leading 0 8 | mode: 0644 9 | - file: 10 | path: /work 11 | owner: root 12 | group: root 13 | mode: 01777 14 | - file: 15 | src: /file/to/link/to 16 | dest: /path/to/symlink 17 | owner: foo 18 | group: foo 19 | state: link 20 | - file: 21 | src: '/tmp/{{ item.src }}' 22 | dest: '{{ item.dest }}' 23 | state: link 24 | with_items: 25 | - { src: 'x', dest: 'y' } 26 | - { src: 'z', dest: 'k' } 27 | 28 | # touch a file, using symbolic modes to set the permissions (equivalent to 0644) 29 | - file: 30 | path: /etc/foo.conf 31 | state: touch 32 | mode: "u=rw,g=r,o=r" 33 | 34 | # touch the same file, but add/remove some permissions 35 | - file: 36 | path: /etc/foo.conf 37 | state: touch 38 | mode: "u+rw,g-wx,o-rwx" 39 | 40 | # touch again the same file, but dont change times 41 | # this makes the task idempotents 42 | - file: 43 | path: /etc/foo.conf 44 | state: touch 45 | mode: "u+rw,g-wx,o-rwx" 46 | modification_time: "preserve" 47 | access_time: "preserve" 48 | 49 | # create a directory if it doesn't exist 50 | - file: 51 | path: /etc/some_directory 52 | state: directory 53 | mode: 0755 54 | 55 | # updates modification and access time of given file 56 | - file: 57 | path: /etc/some_file 58 | state: file 59 | mode: 0755 60 | modification_time: now 61 | -------------------------------------------------------------------------------- /Ansible/Ansible_assignment: -------------------------------------------------------------------------------- 1 | Assignment for Ansible 2 | 3 | Q1:-Create a Playbook which has 3 plays 4 | Play1 :- it will run on host1( it can be any host which is configured in your hosts file) 5 | it will check the status of the path( say /tmp/test) and check whether it is a directory or not 6 | if it is a directory then create /tmp/test directory to the slave machine 7 | Play2: Create a file /tmp/test3.txt and write the content TEST into it (use copy module) 8 | Play3:- Read the content of /tmp/test3.txt file and if it is TEST then create a directory /tmp/testing otherwise not to create the directory 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /Ansible/Roles: -------------------------------------------------------------------------------- 1 | --Ansible Roles 2 | Roles in Ansible are next level of abstraction of Ansible playbooks 3 | --Benefits of Ansible Roles 4 | idea of include files and combine them to form clean and resuable abstraction 5 | Easy to maintain/troubleshooting the playbooks 6 | 7 | --Structure of Roles 8 | 9 | files: contains the regular files those need to copy to target folder 10 | handlers: Event handlers 11 | meta: Role dependencies 12 | templates: similar to files but contains dynamic data 13 | tasks: playbook tasks 14 | vars/group_vars: variable definitions 15 | 16 | ansible-galaxy search apache 17 | galaxy.ansible.combine 18 | 19 | ansible-galaxy init apache --offline 20 | 21 | main.yml 22 | --- 23 | # tasks file for apache 24 | - include: install.yml 25 | - include: configure.yml 26 | 27 | 28 | #install.yml 29 | 30 | --- 31 | - name: installing httpd 32 | apt: 33 | name: apache2 34 | state: present 35 | 36 | 37 | #configure.yml 38 | --- 39 | - name: status 40 | copy: src=status.txt dest=/tmp/status.txt 41 | notify: 42 | restart apache service 43 | - name: send the file 44 | copy: src=test.html dest=/var/www/html/test.html 45 | 46 | 47 | 48 | # copy the configuration file to files/ folder 49 | cp /etc/httpd/conf/httpd.conf . 50 | 51 | # under handlers folder/main.yml 52 | 53 | --- 54 | # handlers file for apache 55 | - name: restart apache service 56 | service: name=apache2 state=restarted 57 | 58 | #Call the proceudre in the main yaml file 59 | --- 60 | - hosts: webservers 61 | roles: 62 | - apache 63 | 64 | 65 | -------------------------------------------------------------------------------- /Chef/1.what-is-a-resource.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/1.what-is-a-resource.pdf -------------------------------------------------------------------------------- /Chef/2.lab-setup-recipe.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/2.lab-setup-recipe.pdf -------------------------------------------------------------------------------- /Chef/3.Test-and-Repair.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/3.Test-and-Repair.pdf -------------------------------------------------------------------------------- /Chef/4. cookbooks-overview.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/4. cookbooks-overview.pdf -------------------------------------------------------------------------------- /Chef/5. cookbook-components.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/5. cookbook-components.pdf -------------------------------------------------------------------------------- /Chef/6. lab-set-up-a-webserver.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/6. lab-set-up-a-webserver.pdf -------------------------------------------------------------------------------- /Chef/7. applying-recipes-and-cookbooks.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/7. applying-recipes-and-cookbooks.pdf -------------------------------------------------------------------------------- /Chef/8. add-a-template-to-the-recipe.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/8. add-a-template-to-the-recipe.pdf -------------------------------------------------------------------------------- /Chef/AWS-Setup.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/AWS-Setup.pdf -------------------------------------------------------------------------------- /Chef/BootStrapNode.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/BootStrapNode.pdf -------------------------------------------------------------------------------- /Chef/Chef.txt: -------------------------------------------------------------------------------- 1 | --Install Chefdk 2 | curl https://omnitruck.chef.io/install.sh | sudo bash -s -- -P chefdk -c stable -v 0.18.30 3 | --Check Chefdk version 4 | chef --version 5 | which chef 6 | 7 | ------------------------First Receipe--------------- 8 | Prog1: Create a Chef Reciepe which creates a file /hello.txt and add the content as Hello World 9 | 10 | Create a file hello.rb and write the following code 11 | 12 | file '/hello.txt' do 13 | content 'Hello World' 14 | end 15 | 16 | --Execute above code 17 | sudo chef-client --local-mode hello.rb 18 | 19 | --Result :-Check /hello.txt file and that should exist with content Hello World 20 | 21 | 22 | -----------------Resource Examples 23 | 24 | package 'httpd' do 25 | action :install 26 | end 27 | 28 | 29 | service 'httpd' do 30 | action [ :enable, :start ] 31 | end' 32 | 33 | --------Chef Cookbook Examples 34 | 35 | chef --help 36 | chef generate --help 37 | chef generate cookbook --help 38 | 39 | Let's create a dir called cookbooks 40 | 41 | ---Create a cookbook called workstation 42 | chef generate cookbook cookbooks/workstation 43 | 44 | Create some reciepe in workstataion 45 | 46 | chef-client -z --runlist "workstation::setup" 47 | chef-client -z -r "recipe[workstation::setup]" 48 | 49 | ----How to include a recipe in another recipe 50 | include_recipe 'workstation::setup' 51 | chef-client -z -r "recipe[workstation]" 52 | 53 | 54 | --------------Template Example 55 | chef generate template cookbooks/workstation/ motd 56 | 57 | -------------------------------------------------------------------------------- /Chef/chef.txt: -------------------------------------------------------------------------------- 1 | wget https://packages.chef.io/files/stable/chef-workstation/21.2.278/ubuntu/20.04/chef-workstation_21.2.278-1_amd64.deb 2 | dpkg -i chef-workstation_21.2.278-1_amd64.deb 3 | -----Verify the Installation 4 | chef -v 5 | --------Uninstall 6 | sudo dpkg -P chef-workstation 7 | 8 | 9 | 10 | ------------------------------------------------------------------- 11 | Create first reciepe 12 | ------------------------------------------------------------------- 13 | - Create file hello.rb 14 | 15 | file '/tmp/hello.txt' do 16 | content 'testing' 17 | end 18 | 19 | ---Execute the script 20 | chef-client -z hello.rb 21 | ------------------------------------------------------------------------- 22 | 23 | - Install apache (package.rb) 24 | package 'apache2' do 25 | action :install 26 | end 27 | ----Execute Script 28 | chef-client -z package.rb 29 | -------------------------------------------------------------------------- 30 | - UnInstall apache (package.rb) 31 | package 'apache2' do 32 | action :purge 33 | end 34 | ----Execute Script 35 | chef-client -z package.rb 36 | -------------------------------------------------------------------------- 37 | - setup.rb 38 | 39 | package 'tree' do 40 | action :install 41 | end 42 | 43 | package 'ntp' 44 | 45 | file '/etc/motd' do 46 | content 'This server is the property of ...' 47 | end 48 | 49 | service 'ntp' do 50 | action [:enable, :start] 51 | end 52 | 53 | ----Execute Script 54 | chef-client -z setup.rb 55 | --------------------------------------------------------------------------- 56 | 57 | --------Chef Cookbook Examples 58 | 59 | chef --help 60 | chef generate --help 61 | chef generate cookbook --help 62 | mkdir cookbooks 63 | chef generate cookbook cookbooks/workstation 64 | cp setup.rb cookbooks/workstation/recipes/ 65 | chef-client -z --runlist "workstation::setup" 66 | chef-client -z -r "recipe[workstation::setup]" 67 | 68 | ----How to include a recipe in another recipe 69 | open default.rb file and add following code 70 | 71 | include_recipe 'workstation::setup' 72 | 73 | To Execute it run the following command 74 | chef-client -z -r "recipe[workstation]" 75 | 76 | 77 | -------------------------------------------------------------------------------- /Chef/template-files-and-ERB.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Chef/template-files-and-ERB.pdf -------------------------------------------------------------------------------- /DevOpsInterview_ResumeSample-master/Amador_Johandry-Resume.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/DevOpsInterview_ResumeSample-master/Amador_Johandry-Resume.pdf -------------------------------------------------------------------------------- /DevOpsInterview_ResumeSample-master/DevOpsInterviewQuestions_Answers.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/DevOpsInterview_ResumeSample-master/DevOpsInterviewQuestions_Answers.pdf -------------------------------------------------------------------------------- /DevOpsInterview_ResumeSample-master/devops sample res.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/DevOpsInterview_ResumeSample-master/devops sample res.pdf -------------------------------------------------------------------------------- /DevOpsInterview_ResumeSample-master/devops sample resume.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/DevOpsInterview_ResumeSample-master/devops sample resume.docx -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.1 Service_Create_Inspect_logs_ls.txt: -------------------------------------------------------------------------------- 1 | docker run -it alpine ping 172.31.15.233 2 | docker service --help 3 | docker service create --name myservice -d alpine ping 172.31.15.233 4 | docker service inspect <> | less 5 | docker service logs <> -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.10 ServiceUpdateAndRollback.txt: -------------------------------------------------------------------------------- 1 | docker service create --name redis --replicas 5 --update-delay 10s redis:3.0.6 2 | docker service ls 3 | docker service ps redis 4 | docker service update redis --image redis:3.0.7 5 | docker service update redis --image redis:21 6 | docker service ls 7 | docker service rollback redis 8 | -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.2_Service ps.txt: -------------------------------------------------------------------------------- 1 | docker service create --name myservice -d --replicas 4 alpine ping <> 2 | docker service ps myservice 3 | remove containers on one of the worker node and find the status of the service -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.3_DockerSwarmVisualizer.txt: -------------------------------------------------------------------------------- 1 | docker service create \ 2 | > --name=viz \ 3 | > --publish=8080:8080/tcp \ 4 | > --constraint=node.role==manager \ 5 | > --mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \ 6 | > dockersamples/visualizer -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.4Service scale.txt: -------------------------------------------------------------------------------- 1 | docker service scale myservice=2 2 | docker service scale myservice=5 3 | docker service rm myservice -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.5 Service Port Mapping.txt: -------------------------------------------------------------------------------- 1 | docker service --name webservice create -d -p 80:80 nginx -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.6 Service global mode.txt: -------------------------------------------------------------------------------- 1 | docker service create --name webservice -d --mode=global --publish=80:80 nginx -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.7 Service Constraint.txt: -------------------------------------------------------------------------------- 1 | docker service create --name webservice -d --constraint="node.role==manager" --publish=80:80 nginx 2 | docker srevice scale webservice=2 3 | 4 | Check the visualizer 5 | 6 | docker service create --name webservice -d --constraint="node.role==worker" --publish 80:80 nginx 7 | 8 | -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.8 Service Labels.txt: -------------------------------------------------------------------------------- 1 | docker node update --label-add="webserver=true" worker01 2 | 3 | docker service create --name webservice -d --constraint="node.labels.webserver==true" --publish 80:80 nginx 4 | 5 | vi /etc/docker/daemon.json 6 | 7 | { 8 | "labels": ["name=testserver"] 9 | } 10 | 11 | create service on label server 12 | docker service create --name webservice1 -d --constraint="engine.labels.name==testserver" --publish 84:80 nginx -------------------------------------------------------------------------------- /DockerCommands/01_Services/files/1.9 Node Availability.txt: -------------------------------------------------------------------------------- 1 | docker node update --availability=pause worker02 2 | 3 | docker node update --availability=active worker02 4 | 5 | docker node update --availability=drain worker02 -------------------------------------------------------------------------------- /DockerCommands/1.1 Swarm Backup and Restore.txt: -------------------------------------------------------------------------------- 1 | systemctl stop docker 2 | tar -zcvf swarm.tar.gz swarm/ 3 | systemctl start docker 4 | docker node ls 5 | systemctl stop docker 6 | rm -fr swarm 7 | tar -xvzf swarm.tar.gz 8 | systemctl start docker -------------------------------------------------------------------------------- /DockerCommands/1.1_Task1.txt: -------------------------------------------------------------------------------- 1 | ---Task:-1 2 | 3 | Create two containers on bridge network which uses front-end application as a docker image "whizlabs/webapp" and another container which refers to "whizlabs/mysql" docker image. Modify the code of webapp such that it uses the mysql connectionstring/database/tables from other running container. 4 | 5 | ---Solution 6 | 7 | docker run --name webapp -it -p 80:80 -d whizlabs/webapp 8 | docker run --name db -it -d whizlabs/mysql -------------------------------------------------------------------------------- /DockerCommands/1.2 DockerStackCommands.txt: -------------------------------------------------------------------------------- 1 | sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 2 | chmod +x /usr/local/bin/docker-compose 3 | docker-compose --version 4 | 5 | #docker-compose.yml 6 | version: '3.3' 7 | 8 | services: 9 | db: 10 | image: mysql:5.7 11 | volumes: 12 | - db_data:/var/lib/mysql 13 | restart: always 14 | environment: 15 | MYSQL_ROOT_PASSWORD: somewordpress 16 | MYSQL_DATABASE: wordpress 17 | MYSQL_USER: wordpress 18 | MYSQL_PASSWORD: wordpress 19 | 20 | wordpress: 21 | depends_on: 22 | - db 23 | image: wordpress:latest 24 | ports: 25 | - "8000:80" 26 | restart: always 27 | environment: 28 | WORDPRESS_DB_HOST: db:3306 29 | WORDPRESS_DB_USER: wordpress 30 | WORDPRESS_DB_PASSWORD: wordpress 31 | WORDPRESS_DB_NAME: wordpress 32 | volumes: 33 | db_data: {} 34 | 35 | 36 | docker-compose up -d 37 | docker container ls 38 | docker-compose scale db=3 39 | docker container ls 40 | docker-compose down 41 | docker container ls 42 | cp docker-compose.yml stack.yml 43 | docker stack --help 44 | docker stack deploy -c stack.yml mystack 45 | docker stack ls 46 | docker stack services mystack 47 | docker service ps mystack_db 48 | docker service ps mystack_wordpress 49 | docker network ls 50 | docker stack ls 51 | docker stack rm mystack 52 | -------------------------------------------------------------------------------- /DockerCommands/1.2_Docker Secret Commands.txt: -------------------------------------------------------------------------------- 1 | printf "password" | docker secret create my_secret_data - 2 | docker service create --name redis --secret my_secret_data redis:alpine 3 | docker service rm redis 4 | docker service create --name redis --secret my_secret_data redis:alpine 5 | docker service ps redis 6 | 7 | docker service create --name dbpass --secret my_secret_data -d -e MYSQL_ROOT_PASSWORD_FILE=/run/secrets/my_secret_data mysql 8 | -------------------------------------------------------------------------------- /DockerCommands/1.2_Task2.txt: -------------------------------------------------------------------------------- 1 | ---Task:-2 2 | 3 | Create a docker a service webapp which refers to whizlabs/webapp ( it is a custom php application) for the front-end and another service db( it is a mysql image) on which webapp depends called db using docker compose file. 4 | 5 | ---Solution 6 | create docker-compose.yml file with following code 7 | 8 | version: '3.3' 9 | 10 | services: 11 | db: 12 | image: whizlabs/mysql 13 | 14 | webapp: 15 | depends_on: 16 | - db 17 | image: whizlabs/webapp 18 | ports: 19 | - "80:80" 20 | 21 | Now run the docker compose with following command 22 | 23 | docker-compose up -d -------------------------------------------------------------------------------- /DockerCommands/1.3_Task3.txt: -------------------------------------------------------------------------------- 1 | Task:-3 2 | 3 | Create docker swarm services using docker stack to implement webapp (whizlabs/webapp )and db app (whizlabs/mysql) on docker swarm cluster. 4 | 5 | Solution:-- 6 | cp docker-compose.yml mystack.yml 7 | docker stack deploy -c mystack.yml mystack -------------------------------------------------------------------------------- /DockerCommands/Docker Assignment.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/DockerCommands/Docker Assignment.docx -------------------------------------------------------------------------------- /DockerCommands/Docker Swarm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/DockerCommands/Docker Swarm.pdf -------------------------------------------------------------------------------- /DockerCommands/docker: -------------------------------------------------------------------------------- 1 | Docker:-Container 2 | 3 | Installation Steps on Ubuntu 4 | 1 apt update 5 | 2 apt install docker.io -y 6 | 3 docker --version 7 | 4 docker info 8 | 5 systemctl status docker 9 | 10 | Installation Steps on Centos 11 | 1 yum update -y 12 | 2 yum install -y yum-utils 13 | 3 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 14 | 4 yum install docker-ce docker-ce-cli containerd.io 15 | 5 systemctl status docker 16 | 6 systemctl start docker 17 | 7 systemctl status docker 18 | 8 docker --version 19 | 9 docker info 20 | 10 docker version 21 | 22 | docker --version 23 | docker version 24 | docker info 25 | docker 26 | docker hub--->docker engine-->Docker images---> run,stop,deleted 27 | 28 | docker pull ubuntu 29 | docker images //list all the images downloaded on your system 30 | docker run -it -d ubuntu //it interactive d demon when images are running it is called containers 31 | sudo docker run -m 4m -dit --name web1 nginx # running the container with a limit of 4mb 32 | sudo docker run -c 614 -dit --name db postgres /postgres.sh 33 | sudo docker run -c 410 -dit --name web nginx /nginx.sh 34 | #Will give 60% to the db container (614 is 60% of 1024) and 40% to the web containe 35 | docker ps // list running images 36 | docker stop <> container id 37 | docker ps -a //list all the continers 38 | docker kill <> to stop or kill forcefully 39 | docker rm <> to remove the container 40 | docker exec -it d9a77afafa3b bash 41 | docker run -it --name test ubuntu 42 | create a user using adduser 43 | docker exec -it -u raman test bash 44 | docker rmi 47b19964fb50 //remove the images 45 | 46 | 47 | Now run the commands in container which is independent of the commands of host OS 48 | exit //to exit from container 49 | 50 | 51 | Create a Docker Hub account (https://hub.docker.com) 52 | 53 | create a user defined images 54 | docker commit <> <> // example we have created test image 55 | docker images 56 | run docker new image and check the changes should be available into that 57 | 58 | To remove all the docker containers 59 | docker rm -f $( docker ps -a -q) 60 | 61 | install apache2 to the container 62 | apt-get install apache2 63 | service apache2 status 64 | service apache2 start 65 | exit 66 | docker commit 99f528fc4261 ramansharma95/apache 67 | 68 | docker run -it -p 82:80 -d ramansharma95/apache 69 | docker exec -it <> 70 | service apache start 71 | open the web page it shold open the apache2 webpage 72 | 73 | push image to docker hub 74 | docker login 75 | docker push ramansharma95/apache 76 | check in the docker hub 77 | 78 | ---docker save and load command 79 | 80 | 81 | docker save mywebserver > mywebserver.tar 82 | docker load < mywebserver.tar 83 | 84 | -----------Create Local Docker registry 85 | 86 | docker container run -d -p 5000:5000 --name local_registry registry 87 | http://:5000/v2/_catalog 88 | docker container inspect local_registry 89 | docker image tag ubuntu localhost:5000/ubuntu:latest 90 | docker image push localhost:5000/ubuntu 91 | docker image pull localhost:5000/ubuntu 92 | 93 | --------Docker file 94 | 95 | FROM :- is define th base image on which we are building eg FROM ubuntu 96 | ADD :- is used to add the files to the container being built, ADD 97 | ADD . /var/www/html 98 | RUN:- is used to add layers to the base image, by installing components.Each RUN statement add a new layer to the docker image 99 | RUN apt-get update 100 | RUN apt-get -y install apache2 101 | CMD :-is used to run the command on start of the container.These commands run when there is no argument specified while running the container 102 | CMD apachectl -D FOREGROUND 103 | ENTRYPOINT :-is used to strictly run the commands the moment the container intializes. The differnece between CMD and ENTRYPOINT is, ENTRYPOINT runs 104 | irrespective of the fact that whether argument is specified or not 105 | ENTRYPOINT apachectl -D FOREGROUND 106 | ENV :- is used to define the environment in container 107 | ENV name Devops 108 | Create a docker file Dockerfile 109 | 110 | FROM ubuntu 111 | ARG DEBIAN_FRONTEND=noninteractive 112 | RUN apt-get update 113 | RUN apt-get -y install apache2 114 | ADD . /var/www/html 115 | ENTRYPOINT apachectl -D FOREGROUND 116 | ENV name DEVOPS 117 | 118 | 119 | # Base image is CentOS 7 120 | FROM centos:7 121 | # Add a new user "john" with user id 8877 122 | RUN useradd -u 8877 raman 123 | # Change to non-root privilege 124 | USER raman 125 | 126 | sudo docker build -t nonrootimage . # create custom image 127 | docker exec -it test2 bash 128 | 129 | #Eample of COPY and ADD 130 | FROM centos:7.4.1708 131 | RUN mkdir /mydata 132 | COPY myfiles /mydata/myfiles 133 | ADD myfiles2 /mydata/myfile2 134 | ADD https://xxx/pip-18.1.tar.gz /mydata 135 | ADD pip-18.1.tar.gz /mydata/pipunpack 136 | 137 | # CMD and ENTRYPOINT 138 | FROM ubuntu 139 | CMD echo "Hello World" 140 | 141 | docker build . -t img1 #Created the image from above Dockerfile 142 | docker run -it img1 # it will return Hello World 143 | docker run -it img1 echo "Hello India" # it will overwrite the CMD and Print Hello India 144 | 145 | FROM ubuntu 146 | ENTRYPOINT ["echo","Hello World"] 147 | docker build . -t img1 #Created the image from above Dockerfile 148 | docker run -it img1 # it will return Hello World 149 | docker run -it img1 echo "Hello India" # it will not overwrite the ENTRYPOINT and Print Hello World echo Hello India 150 | 151 | FROM ubuntu 152 | ENTRYPOINT ["echo"] 153 | CMD ["Hello World"] 154 | docker build . -t img1 #Created the image from above Dockerfile 155 | docker run -it img1 # it will return Hello World 156 | 157 | docker build . -f abc -t img8 # abc is the file name which represents the dockerfile contents 158 | 159 | Create an html file in the current directory.(index.html) 160 | Build the docker file 161 | docker build . -t new_dockerfile 162 | check the docker images 163 | run the new docker image docker run -it -p84:80 -d new_dockerfile 164 | docker ps 165 | 166 | Docker Storage 167 | ------------------- 168 | Data will removed if container is deleted so docker storage is used to keep the data even if container is deleted 169 | Types of Docker 170 | Docker Volume :- is a mountable entity which can be used to store data, in the docker file system 171 | docker volume create my-vol 172 | it is software not the hardware component it can be attached and deattached 173 | docker volume create demo-vol 174 | docker volume ls //to list the volumes 175 | 176 | docker run -it --mount source=demo-vol,destination=/app -d ubuntu 177 | docker run -it --mount source=demo-vol,destination=/test --mount source=demo-vol1,destination=/test1 -d ubuntu 178 | remove the container 179 | attach volume to another container 180 | Bind Mounts :- mounts a directory from host machine to the container 181 | To mount the directory which on the host machine to container 182 | docker run -it -v /home/ubuntu/mount:/demo -d ubuntu 183 | 184 | Linking docker Containers 185 | run container with name 186 | docker run -it --name container1 -d ubuntu 187 | docker run -it --name container3 --link container1 -d ubuntu 188 | 189 | Monolithic Application 190 | is a single-tiered software application in which different components are combined into single program which resides in a single platform. 191 | like a single applicaiton contains Notification,Payments,Customer Service etc 192 | Microservices:- lossely coupled applications 193 | 194 | Compose file 195 | sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 196 | chmod +x /usr/local/bin/docker-compose 197 | docker-compose --version 198 | 199 | 200 | YAML 201 | it is superset of Json file. There are 2 types of component you need to learn 202 | Map:-key value pair : eg Name:Devops 203 | List:-Sequences of objects 204 | args 205 | -sleep 206 | -"1000" 207 | --------------------------Create a compose file to run db and we app ( file name should be docker-compose.yml) 208 | version: '3.3' 209 | 210 | services: 211 | db: 212 | image: ramansharma95/mysql 213 | web: 214 | depends_on: 215 | - db 216 | image: ramansharma95/webapp 217 | 218 | To execute compose file 219 | docker-compose up -d 220 | To remove the services for docker compose 221 | docker-compose down 222 | 223 | 224 | 225 | Container Orchestration 226 | Applications are typically made up of indviually containerized components( microservices) that must be orgainsed on networking level in order 227 | of application to run intended.The process of organising multiple containers in this manner is called container Orchestration 228 | 229 | Docker Swarm 230 | clustering and scheduling tool for docker containers. With Swarm, IT administrators and developers can establish and manage a cluster of Docker 231 | nodes as single virtual machine. 232 | 233 | Creating Docker Swarm Cluster 234 | 235 | Docker swarm is installed along with docker 236 | 237 | master node 238 | docker swarm init --advertise-addr=13.234.113.221 239 | 240 | on slave machine 241 | docker swarm join --token SWMTKN-1-5puumvk4v75hoyohiu6q9x0xoqneybjzeew851iohy0eq5i8v9-1ee7cfdu6udpjqvz8sxtlncpf 13.234.113.221:2377 242 | 243 | on master 244 | docker node ls # to view all the joined nodes 245 | 246 | 247 | Service in Docker Swarm 248 | it is an additional layer with networking part of container 249 | Containers on cluster are deployed using Services on docker swarm. A service is long running docker container that can be deployed to any node worker 250 | 251 | docker service create --name nginx --replicas 3 -p 80:80 nginx 252 | 253 | docker service ls 254 | 255 | check the running containers with docker ps command. 256 | docker service 257 | 258 | Docker Network 259 | bridge(default) :- When the application is running in the standalone container that need to communicate 260 | host :- For standalone containers, remove network isolation between container and docker host and use host networking directly (not port forwrading) 261 | overlay:-connect multiple docker demons and enable swarm service to communicate with each other 262 | MacVlan:-allow you to assign a MAC network address to a container to appear as a physical machine. 263 | none:-disable networking 264 | 265 | List network 266 | docker network ls 267 | Create the network 268 | 1. docker network create -d overlay my-overlay 269 | # docker service create --name website --replicas 3 –-network my-overlay --publish 80:80 hshar/webapp 270 | docker service create --name website --replicas 3 --network my-overlay --publish 84:80 hshar/webapp 271 | 272 | docker service ls 273 | create db service 274 | docker service create --name db --replicas 1 --network my-overlay hshar/mysql:5.6 275 | go inside db 276 | mysql -u root -p 277 | intelli 278 | 279 | Create database docker; 280 | Use docker; 281 | 282 | mysql> create table emp(name varchar(30), phone varchar(30)) 283 | -> ; 284 | Query OK, 0 rows affected (0.01 sec) 285 | 286 | mysql> select * from emp; 287 | Empty set (0.00 sec) 288 | 289 | mysql> select * from emp; 290 | +------+---------+ 291 | | name | phone | 292 | +------+---------+ 293 | | xyz | 5555555 | 294 | +------+---------+ 295 | 1 row in set (0.00 sec) 296 | 297 | ---------------------- 298 | Stack and Compose 299 | sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 300 | chmod +x /usr/local/bin/docker-compose 301 | docker-compose --version 302 | 303 | #docker-compose.yml 304 | version: '3.3' 305 | 306 | services: 307 | db: 308 | image: mysql:5.7 309 | volumes: 310 | - db_data:/var/lib/mysql 311 | restart: always 312 | environment: 313 | MYSQL_ROOT_PASSWORD: somewordpress 314 | MYSQL_DATABASE: wordpress 315 | MYSQL_USER: wordpress 316 | MYSQL_PASSWORD: wordpress 317 | 318 | wordpress: 319 | depends_on: 320 | - db 321 | image: wordpress:latest 322 | ports: 323 | - "8000:80" 324 | restart: always 325 | environment: 326 | WORDPRESS_DB_HOST: db:3306 327 | WORDPRESS_DB_USER: wordpress 328 | WORDPRESS_DB_PASSWORD: wordpress 329 | WORDPRESS_DB_NAME: wordpress 330 | volumes: 331 | db_data: {} 332 | 333 | 334 | docker-compose up -d 335 | docker container ls 336 | docker-compose scale db=3 337 | docker container ls 338 | docker-compose down 339 | docker container ls 340 | cp docker-compose.yml stack.yml 341 | docker stack --help 342 | docker stack deploy -c stack.yml mystack 343 | docker stack ls 344 | docker stack services mystack 345 | docker service ps mystack_db 346 | docker service ps mystack_wordpress 347 | docker network ls 348 | docker stack ls 349 | docker stack rm mystack 350 | 351 | ---------------------service update 352 | 353 | docker service create --name redis --replicas 5 --update-delay 10s redis:3.0.6 354 | docker service ls 355 | docker service ps redis 356 | docker service update redis --image redis:3.0.7 357 | docker service update redis --image redis:21 358 | docker service ls 359 | docker service rollback redis 360 | 361 | 362 | 363 | 364 | 365 | -------------------------------------------------------------------------------- /DockerCommands/docker commands.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/DockerCommands/docker commands.docx -------------------------------------------------------------------------------- /DockerCommands/docker registry.txt: -------------------------------------------------------------------------------- 1 | mkdir certs 2 | cd certs 3 | openssl req -newkey rsa:4096 -nodes -sha256 -keyout domain.key -x509 -days 365 -out domain.crt 4 | cd /etc/docker/certs.d 5 | mkdir repo.docker.local:5000 6 | cd repo.docker.local\:5000/ 7 | cp /home/ubuntu/certs/domain.crt ca.crt 8 | systemctl restart docker 9 | docker container run -d --name secure_registry -p 5000:5000 -v /home/ubuntu/certs/:/certs -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key registry 10 | 11 | create image with repo.docker.local:5000 and push it 12 | 13 | Basic Authentication 14 | 15 | mkdir auth 16 | docker container run --entrypoint htpasswd registry:2.7.0 -bnB raman password >auth/htpasswd 17 | 18 | 19 | docker run -d \ 20 | -p 5000:5000 \ 21 | --restart=always \ 22 | --name registry_basic \ 23 | -v /home/ubuntu/auth:/auth \ 24 | -v /home/ubuntu/certs:/certs \ 25 | -e "REGISTRY_AUTH=htpasswd" \ 26 | -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ 27 | -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ 28 | -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ 29 | -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ 30 | registry:2.7.0 31 | 32 | docker login repo.docker.local:5000 33 | 34 | -------------------------------------------------------------------------------- /DockerService.txt: -------------------------------------------------------------------------------- 1 | Service 2 | --1 3 | docker run -it alpine ping 172.31.15.233 4 | docker service --help 5 | docker service create --name myservice -d alpine ping 172.31.15.233 6 | docker service inspect <> | less 7 | docker service logs <> 8 | 9 | 10 | --2 11 | docker service create --name myservice -d --replicas 4 alpine ping 172.31.15.233 12 | docker service ps myservice 13 | remove containers on one of the worker node and find the status of the service 14 | 15 | --3 16 | docker service create \ 17 | > --name=viz \ 18 | > --publish=8080:8080/tcp \ 19 | > --constraint=node.role==manager \ 20 | > --mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \ 21 | > dockersamples/visualizer 22 | 23 | --4 24 | docker service scale myservice=2 25 | docker service scale myservice=5 26 | docker service rm myservice 27 | --5 28 | docker service --name webservice create -d -p 80:80 nginx 29 | now you can access it on any worker node 30 | --6 31 | docker service create --name webservice -d --mode=global --publish=80:80 nginx 32 | remove a woker node and check the visualizer 33 | add it again you will find the one replica is created on the worker node 34 | --7 labels and constraint 35 | 36 | docker service create --name webservice -d --constraint="node.role==manager" --publish=80:80 nginx 37 | docker srevice scale webservice=2 38 | Check the visualizer 39 | docker service create --name webservice -d --constraint="node.role==worker" --publish 80:80 nginx 40 | 41 | docker node update --label-add="webserver=true" worker01 42 | docker service create --name webservice -d --constraint="node.labels.webserver==true" --publish 80:80 nginx 43 | if we create same label to worker02 will the load shifted to worker 02 or not ans is not because you have to se the labels upfront 44 | 45 | now create the labels on engine level 46 | 47 | goto worker node2 48 | vi /etc/docker/daemon.json 49 | 50 | { 51 | "labels": ["name=testserver"] 52 | } 53 | 54 | create service on label server 55 | docker service create --name webservice1 -d --constraint="engine.labels.name==testserver" --publish 84:80 nginx 56 | 57 | --8 node availbility 58 | docker node update --availability=pause worker02 59 | now the new containers will not be creating on worker02 60 | docker node update --availability=active worker02 61 | 62 | docker node update --availability=drain worker02 # now the nodes are moved to other worker nodes or manager nodes 63 | 64 | 65 | ~ 66 | 67 | -------------------------------------------------------------------------------- /ELK Stack.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/ELK Stack.pptx -------------------------------------------------------------------------------- /Final-Project.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Final-Project.docx -------------------------------------------------------------------------------- /Git/Git and GitHub Assignment.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Git/Git and GitHub Assignment.docx -------------------------------------------------------------------------------- /Git/Version Control with Git.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Git/Version Control with Git.pdf -------------------------------------------------------------------------------- /Git/gitCommands.txt: -------------------------------------------------------------------------------- 1 | git --version 2 | 3 | git init 4 | git add . 5 | 6 | git remote add origin https://github.com/ramansharma95/MorningDevops.git 7 | git push origin master 8 | ------Revert back the changes 9 | 1. git checkout --file to revert back the changes for the working directory 10 | eg make the changes of a file say 1.txt and it will show as the untracked file..Now you want to revert back these changes then write the command 11 | git checkout -- 1.txt 12 | git checkout . to revert back the changes for all the untracked filess 13 | 2. git reset HEAD 1.txt This command will unstage 1.txt from staging area 14 | git reset Head * This command will unstage all the files from the staging area 15 | 16 | 17 | 18 | 1.Create a directory devops and change to directory devops 19 | 2.Create files 1.txt,2.txt,3.txt 20 | 3.Create a git repository with git init 21 | 4.Check the status with git status command 22 | 5. Add 1.txt to git staging git add 1.txt 23 | 6. set the email and user git config --global user.email "ramansharma95@gmail.com" git config --global user.name "ramansharma95@gmail.com" 24 | 7.Check the status , the 1.txt should be in staging 25 | 8. add all the files to the staging git add . 26 | 9. commit all the changes git commit -m "Changes are to be saved" 27 | 10. create an account in http://GitHub.com and create a repository 28 | 29 | 30 | 11. Connect to the remote repository git remote add origin <> 31 | 12. push all the files(1.txt,2.txt,3.txt) to Centeral repository git push origin master 32 | 13. Check the files on GitHub 33 | 14. modify 2.txt and commit the changes and push it to git hub and check in the github 34 | 15.Another developer wants to work on this and want to download all files from Centeral repository 35 | for that create a directory call test and cloen the project into that git clone <> 36 | 16. check the files and folders in test directory it should have the remote directory 37 | 17. change to remotedir folder and create a file 4.txt and push it to the remote repository ( no need to add remote origin) 38 | 18. Create a directory called newTest and use git init and add some files and write git push command<> 39 | 19. change to dir devops and check the files (it should not have 4.txt) 40 | 20. update the devops dir with Centeral repository git pull origin master after this check files( should have 4.txt) 41 | 21. create 5.txt file in devops folder and push to GitHub and clone directory will not have the 5.txt until we use the pull command 42 | 22. For Parallel development create a new branch feature1 git branch feature1 43 | 23. list all the branches git branch (* prefix means current branch) 44 | 24. Delete a branch git branch -D feature1 45 | 25. create again branch feature1 and last commit of your master becomes the first commit for your feature1 46 | 47 | 26. Switch to feature1 branch and create a file 6.txt and do the git commit and run ls command (you can see 6.txt) 48 | 27. Switch to master branch (git checkout master) and run ls command , you will not see 6.txt because it is under feature1 branch 49 | 28. Switch to feature1 branch and push the changes to remote repository and observer that on GitHub new branch get created 50 | 29. check the history of all the commands of git git log (it shows the log of current branch) 51 | 30. Switch to branch feature1 and create file 7.txt and modify file 5.txt and add to staging then go to master branch now you can see the 52 | files 7.txt and modified 5.txt in master branch to solve this problem use git stash command in feature1 and to revert back git stash pop 53 | 31. create a file called 10.txt and commit the changes and then modify the file and donot commit it and to discard the new changes use git checkout -- 10.txt 54 | git checkout -- . will discard changes of all uncommited files 55 | 56 | 32. modify 10.txt file again and commit the changes to revert back these changes use git revert <> , git revert -n command to not commit changes 57 | 33. reset to a particular commit git reset --hard <> 58 | 34. Find the difference between the different commit git diff <> <> 59 | 35. to find the uncommited changes verus commited changes git diff HEAD . 60 | 61 | 36.Create a directory called merge_test and intialize git into it, then create 1.txt and do commit after that create 2.txt and commit that as well. 62 | 37. Write following commands git log , git log --graph, git log --graph --pretty=oneline 63 | 38. create a branch test and checkout to test branch. 64 | 39. Create a file 3.txt in test branch commit it.Create 4.txt and commit it 65 | 40. checkout to branch master and create a file 5.txt and commit the changes. 66 | 41. merge master branch with test branch using git merge test , check the git log 67 | 42 create a directory rebase-merge and initialize git and create 1.txt and commit it. 68 | 43 Create a branch test and create file 2.txt and commit in test branch. 69 | 44.Create a file 3.txt in master branch and commit it 70 | 45 checkout to test and now merge the data of master to test using git rebase master 71 | 46. checkout to master and merge with test branch using git merge test. 72 | -------------------------------------------------------------------------------- /Jenkins/Jenkins.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Jenkins/Jenkins.pptx -------------------------------------------------------------------------------- /Jenkins/JenkinsCodeFile.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Jenkins/JenkinsCodeFile.pptx -------------------------------------------------------------------------------- /Jenkins/JenkinsInstallationSteps.txt: -------------------------------------------------------------------------------- 1 | sudo apt update 2 | sudo apt install openjdk-8-jdk 3 | 4 | 1 apt install openjdk-8-jdk -y 5 | 6 | 2 wget -q -O - https://pkg.jenkins.io/debian/jenkins.io.key | sudo apt-key add - 7 | 3 sudo sh -c 'echo deb http://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list' 8 | XX 4 sudo add-apt-repository universe 9 | XX 5 sudo gpg --keyserver http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key --recv-keys https://pkg.jenkins.io/debian/jenkins.io.key 10 | 6 apt-get update 11 | 7 sudo apt-get install jenkins -y 12 | 13 | systemctl status jenkins 14 | 15 | sudo ufw allow 8080 16 | 17 | give sudo permisssion to jenkins user 18 | 19 | vi /etc/sudoers 20 | and add below entry 21 | jenkins ALL=(ALL) NOPASSWD: ALL 22 | and save the file 23 | -------------------------------------------------------------------------------- /Jenkins/Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | stages { 4 | stage('One') { 5 | steps { 6 | echo 'Hi, this is Zulaikha from devops' 7 | } 8 | } 9 | stage('Two') { 10 | steps { 11 | input('Do you want to proceed?') 12 | } 13 | } 14 | stage('Three') { 15 | when { 16 | not { 17 | branch "master" 18 | } 19 | } 20 | steps { 21 | echo "Hello" 22 | } 23 | } 24 | stage('Four') { 25 | parallel { 26 | stage('Unit Test') { 27 | steps { 28 | echo "Running the unit test..." 29 | } 30 | } 31 | stage('Integration test') { 32 | agent { 33 | docker { 34 | reuseNode true 35 | image 'ubuntu' 36 | } 37 | } 38 | steps { 39 | echo "Running the integration test..." 40 | } 41 | } 42 | } 43 | } 44 | } 45 | } -------------------------------------------------------------------------------- /Jenkins/Pipeline.txt: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | stages { 4 | stage('One') { 5 | steps { 6 | echo 'Hi, this is Zulaikha from devops' 7 | } 8 | } 9 | stage('Two') { 10 | steps { 11 | input('Do you want to proceed?') 12 | } 13 | } 14 | stage('Three') { 15 | when { 16 | not { 17 | branch "master" 18 | } 19 | } 20 | steps { 21 | echo "Hello" 22 | } 23 | } 24 | stage('Four') { 25 | parallel { 26 | stage('Unit Test') { 27 | steps { 28 | echo "Running the unit test..." 29 | } 30 | } 31 | stage('Integration test') { 32 | agent { 33 | docker { 34 | reuseNode true 35 | image 'ubuntu' 36 | } 37 | } 38 | steps { 39 | echo "Running the integration test..." 40 | } 41 | } 42 | } 43 | } 44 | } 45 | } -------------------------------------------------------------------------------- /Jenkins/scriptPipeline: -------------------------------------------------------------------------------- 1 | node { 2 | for (i=0; i<2; i++) { 3 | stage "Stage #"+i 4 | print 'Hello, world !' 5 | if (i==0) 6 | { 7 | git "https://github.com/onlineTrainingguy/MyProj.git" 8 | echo 'Running on Stage #0' 9 | } 10 | else { 11 | echo 'Declarative pipeline' 12 | echo 'Running on Stage #1' 13 | } 14 | } 15 | } 16 | 17 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | stages { 4 | stage('One') { 5 | steps { 6 | echo 'Hi, this is Zulaikha from devops' 7 | } 8 | } 9 | stage('Two') { 10 | steps { 11 | input('Do you want to proceed?') 12 | } 13 | } 14 | stage('Three') { 15 | 16 | steps { 17 | echo "Hello 3" 18 | } 19 | 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /Kubernetes/01_Kubernetes-Introduction.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/01_Kubernetes-Introduction.pdf -------------------------------------------------------------------------------- /Kubernetes/02_Kubernetes-Installation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/02_Kubernetes-Installation.pdf -------------------------------------------------------------------------------- /Kubernetes/05_Kubernetes-Namespaces.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/05_Kubernetes-Namespaces.pdf -------------------------------------------------------------------------------- /Kubernetes/05_Kubernetes-Volumes .pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/05_Kubernetes-Volumes .pdf -------------------------------------------------------------------------------- /Kubernetes/Docker and Kubernetes Case Studies.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Docker and Kubernetes Case Studies.docx -------------------------------------------------------------------------------- /Kubernetes/Exercises.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Exercises.xlsx -------------------------------------------------------------------------------- /Kubernetes/K8sAssignment.txt: -------------------------------------------------------------------------------- 1 | 1. Create a 3 node setup of Kubernetes Master and Slave 2 | 2. Use the docker image which you created Docker 1 assignment 3 | 3. Deploy 2 pods with the same container but different index.html content, modify the content from the above github to the following: 4 | Pod 1: “Welcome to Pod 1” 5 | Pod 2: “Welcome to Pod 2” 6 | 4. Each pod should have 2 replicas 7 | 5. Create the desired services for these pods 8 | 6. Setup path based routing on these services, which can be accessed from the outside 9 | “/pod1” -> service 1 10 | “/pod2” -> service -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/1.Pods.txt: -------------------------------------------------------------------------------- 1 | ******************************************************************* 2 | . 3 | . Demo: POD | Raman Sharma 4 | . 5 | 6 | ******************************************************************* 7 | 8 | # 1. https://labs.play-with-k8s.com/ 9 | 10 | # nginx-pod.yaml 11 | apiVersion: v1 12 | kind: Pod 13 | metadata: 14 | name: nginx-pod 15 | labels: 16 | app: nginx 17 | tier: dev 18 | spec: 19 | containers: 20 | - name: nginx-container 21 | image: nginx 22 | 23 | ******************************************************************* 24 | 25 | 2. Create and display Pods 26 | 27 | # Create and display PODs 28 | kubectl create -f nginx-pod.yaml 29 | kubectl get pod 30 | kubectl get pod -o wide 31 | kubectl get pod nginx-pod -o yaml 32 | kubectl describe pod nginx-pod 33 | 34 | 35 | ******************************************************************* 36 | 37 | 3. Test & Delete 38 | 39 | # To get inside the pod 40 | kubectl exec -it nginx-pod -- /bin/sh 41 | 42 | # Create test HTML page 43 | cat < /usr/share/nginx/html/test.html 44 | 45 | 46 | 47 | Testing.. 48 | 49 | 50 |

Hello, Kubernetes...!

51 |

Congratulations, you passed :-)

52 | 53 | 54 | EOF 55 | exit 56 | 57 | # Expose PODS using NodePort service 58 | kubectl expose pod nginx-pod --type=NodePort --port=80 59 | 60 | # Display Service and find NodePort 61 | kubectl describe svc nginx-pod 62 | kubectl get svc 63 | # Open Web-browser and access webapge using 64 | http://nodeip:nodeport/test.html 65 | 66 | # Delete pod & svc 67 | kubectl delete svc nginx-pod 68 | kubectl delete pod nginx-pod 69 | 70 | 71 | ******************************************************************* 72 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/10.Load-Balancer.txt: -------------------------------------------------------------------------------- 1 | 2 | ******************************************************************* 3 | * 4 | * Demo: Load Balancer Service | Raman 5 | * 6 | * 7 | ******************************************************************* 8 | 9 | # 1. YAML: Deployment & Load Balancer Service 10 | 11 | # Deployment 12 | # controllers/nginx-deploy.yaml 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | metadata: 16 | name: nginx-deployment 17 | labels: 18 | app: nginx-app 19 | spec: 20 | replicas: 1 21 | selector: 22 | matchLabels: 23 | app: nginx-app 24 | template: 25 | metadata: 26 | labels: 27 | app: nginx-app 28 | spec: 29 | containers: 30 | - name: nginx-container 31 | image: nginx:1.7.9 32 | ports: 33 | - containerPort: 80 34 | 35 | ------------------------------------ 36 | 37 | # Service - LoadBalancer 38 | #lb.yaml 39 | apiVersion: v1 40 | kind: Service 41 | metadata: 42 | name: my-service 43 | labels: 44 | app: nginx-app 45 | spec: 46 | selector: 47 | app: nginx-app 48 | type: LoadBalancer 49 | ports: 50 | - nodePort: 31000 51 | port: 80 52 | targetPort: 80 53 | 54 | 55 | ******************************************************************* 56 | 2. Create & Display: Deployment & Load Balancer Service 57 | 58 | kubectl create –f nginx-deploy.yaml 59 | kubectl create -f lb.yaml 60 | kubectl get pod -l app=nginx-app 61 | kubectl get deploy -l app=nginx-app 62 | kubectl get service -l app=nginx-app 63 | kubectl describe service my-service 64 | 65 | ******************************************************************* 66 | 3. Testing Load Balancer Service 67 | 68 | # To get inside the pod 69 | kubectl exec -it [pod-name] -- /bin/sh 70 | 71 | # Create test HTML page 72 | cat < /usr/share/nginx/html/test.html 73 | 74 | 75 | 76 | Testing.. 77 | 78 | 79 |

Hello, Kubernetes...!

80 |

Load Balancer is working successfully. Congratulations, you passed :-)

81 | 82 | 83 | EOF 84 | exit 85 | 86 | # Test using load-balancer-ip 87 | http://load-balancer-ip 88 | http://load-balancer-ip/test.html 89 | 90 | # Testing using nodePort 91 | http://nodeip:nodeport 92 | http://nodeip:nodeport/test.html 93 | 94 | 95 | ******************************************************************* 96 | 4. Cleanup 97 | 98 | kubectl delete –f nginx-deploy.yaml 99 | kubectl delete -f lb.yaml 100 | kubectl get pod 101 | kubectl get deploy 102 | kubectl get service 103 | 104 | 105 | ******************************************************************* 106 | 107 | 108 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/11.ClusterIP.txt: -------------------------------------------------------------------------------- 1 | 2 | ************************************************************************************************************************************************* 3 | * 4 | * Demo: ClusterIP Service | Raman 5 | * 6 | ************************************************************************************************************************************************* 7 | 8 | OVERVIEW: 9 | ~~~~~~~~~ 10 | 11 | Step-1: Set up a "Redis master" 12 | 13 | 1a. Create redis-master "deployment" 14 | 1b. Create redis-master "service" 15 | 16 | -------------------------------------------- 17 | 18 | Step-2: Set up a "Redis slave" 19 | 20 | 2a. Create redis-master "deployment" 21 | 2b. Create redis-master "slave" 22 | 23 | -------------------------------------------- 24 | 25 | Step 3: Set up the "guestbook web frontend" 26 | 27 | 3a. Create guestbook web frontend "deployment" 28 | 3c. Expose frontend on an external IP address (LoadBalancer) 29 | 30 | ************************************************************************************************************************************************* 31 | 32 | Step-1: Set up a "Redis master" 33 | 34 | a. Create redis-master "deployment" 35 | 36 | # redis-master-deployment.yaml 37 | apiVersion: extensions/v1beta1 38 | kind: Deployment 39 | metadata: 40 | name: redis-master 41 | spec: 42 | replicas: 1 43 | template: 44 | metadata: 45 | labels: 46 | app: redis 47 | role: master 48 | tier: backend 49 | spec: 50 | containers: 51 | - name: master 52 | image: k8s.gcr.io/redis:e2e # or just image: redis 53 | resources: 54 | requests: 55 | cpu: 100m 56 | memory: 100Mi 57 | ports: 58 | - containerPort: 6379 59 | 60 | ------------------------------------- 61 | 62 | b. Create redis-master "service" 63 | 64 | # redis-master-service.yaml 65 | apiVersion: v1 66 | kind: Service 67 | metadata: 68 | name: redis-master 69 | labels: 70 | app: redis 71 | role: master 72 | tier: backend 73 | spec: 74 | type: ClusterIP 75 | ports: 76 | - port: 6379 77 | targetPort: 6379 78 | selector: 79 | app: redis 80 | role: master 81 | tier: backend 82 | 83 | ------------------------------------- 84 | 85 | kubectl create -f redis-master-deployment.yaml 86 | kubectl create -f redis-master-service.yaml 87 | kubectl get deploy 88 | kubectl get svc 89 | kubectl get pods 90 | kubectl get logs -f [pod-name] 91 | 92 | 93 | ************************************************************************************************************************************************* 94 | 95 | Step 2: Set up Redis "slaves" 96 | 97 | 2a. Create redis-slave "deployment" 98 | 99 | # redis-slave-deployment.yaml 100 | apiVersion: extensions/v1beta1 101 | kind: Deployment 102 | metadata: 103 | name: redis-slave 104 | spec: 105 | replicas: 2 106 | template: 107 | metadata: 108 | labels: 109 | app: redis 110 | role: slave 111 | tier: backend 112 | spec: 113 | containers: 114 | - name: slave 115 | image: gcr.io/google_samples/gb-redisslave:v1 116 | resources: 117 | requests: 118 | cpu: 100m 119 | memory: 100Mi 120 | env: 121 | - name: GET_HOSTS_FROM 122 | value: dns 123 | # If your cluster config does not include a dns service, then to 124 | # instead access an environment variable to find the master 125 | # service's host, comment out the 'value: dns' line above, and 126 | # uncomment the line below: 127 | # value: env 128 | ports: 129 | - containerPort: 6379 130 | 131 | 132 | ----------------------------------------------- 133 | 134 | 2b. Create redis-slave "service" 135 | 136 | # redis-slave-service.yaml 137 | apiVersion: v1 138 | kind: Service 139 | metadata: 140 | name: redis-slave 141 | labels: 142 | app: redis 143 | role: slave 144 | tier: backend 145 | spec: 146 | ports: 147 | - port: 6379 148 | selector: 149 | app: redis 150 | role: slave 151 | tier: backend 152 | 153 | -------------------------------------------------- 154 | 155 | kubectl create -f redis-slave-deployment.yaml 156 | kubectl create -f redis-slave-service.yaml 157 | kubectl get deploy 158 | kubectl get svc 159 | kubectl get pods 160 | 161 | 162 | ************************************************************************************************************************************************* 163 | 164 | Step 3: Set up the guestbook web frontend 165 | 166 | 3a. Create fronend "deployment" 167 | 168 | # frontend-deployment.yaml 169 | apiVersion: extensions/v1beta1 170 | kind: Deployment 171 | metadata: 172 | name: frontend 173 | spec: 174 | replicas: 2 175 | template: 176 | metadata: 177 | labels: 178 | app: guestbook 179 | tier: frontend 180 | spec: 181 | containers: 182 | - name: php-redis 183 | image: gcr.io/google-samples/gb-frontend:v4 184 | resources: 185 | requests: 186 | cpu: 100m 187 | memory: 100Mi 188 | env: 189 | - name: GET_HOSTS_FROM 190 | value: dns 191 | # If your cluster config does not include a dns service, then to 192 | # instead access environment variables to find service host 193 | # info, comment out the 'value: dns' line above, and uncomment the 194 | # line below: 195 | # value: env 196 | ports: 197 | - containerPort: 80 198 | 199 | -------------------------------------------------- 200 | 201 | 3b. Expose frontend on an external IP address 202 | 203 | # frontend-service.yaml 204 | apiVersion: v1 205 | kind: Service 206 | metadata: 207 | name: frontend 208 | labels: 209 | app: guestbook 210 | tier: frontend 211 | spec: 212 | # if your cluster supports it, uncomment the following to automatically create 213 | # an external load-balanced IP for the frontend service. 214 | type: LoadBalancer 215 | ports: 216 | - port: 80 217 | selector: 218 | app: guestbook 219 | tier: frontend 220 | 221 | ---------------------------------------------- 222 | 223 | Create & display "frontend" deployment and service 224 | 225 | kubectl create -f frontend-deployment.yaml 226 | kubectl create -f frontend-service.yaml 227 | kubectl get deploy 228 | kubectl get svc 229 | kubectl get pods 230 | kubectl get service frontend 231 | 232 | [Web-browser] - http://[LB-IP] 233 | 234 | 235 | ******************************************************************* 236 | 237 | 4. Cleanup: 238 | 239 | # Delete redis-master 240 | kubectl delete -f redis-master-deployment.yaml 241 | kubectl delete -f redis-master-service.yaml 242 | 243 | # Delete redis-slave 244 | kubectl delete -f redis-slave-deployment.yaml 245 | kubectl delete -f redis-slave-service.yaml 246 | 247 | # Delete frontend-app 248 | kubectl delete -f frontend-deployment.yaml 249 | kubectl delete -f frontend-service.yaml 250 | 251 | # Display 252 | kubectl get deploy 253 | kubectl get svc 254 | kubectl get pods 255 | 256 | ******************************************************************* 257 | 258 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/12.emptyDir.txt: -------------------------------------------------------------------------------- 1 | ******************************************************************* 2 | . 3 | . Demo: emptyDir | Raman 4 | . 5 | ******************************************************************* 6 | 1. Pod with emptyDir Volume YAML (example) 7 | 8 | # nginx-emptydir.yaml 9 | apiVersion: v1 10 | kind: Pod 11 | metadata: 12 | name: nginx-emptydir 13 | spec: 14 | containers: 15 | - name: nginx-container 16 | image: nginx 17 | volumeMounts: 18 | - name: test-vol 19 | mountPath: /test-mnt 20 | volumes: 21 | - name: test-vol 22 | emptyDir: {} 23 | 24 | ******************************************************************* 25 | 2. Create & Display Pod with emptyDir volume 26 | 27 | kubectl create -f nginx-emptydir.yaml 28 | kubectl get po -o wide 29 | kubectl exec nginx-emptydir df /test-mnt 30 | kubectl describe pod nginx-emptydir 31 | 32 | ******************************************************************* 33 | 3. Cleanup 34 | 35 | kubectl delete po nginx-emptydir 36 | 37 | ******************************************************************* 38 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/13.HostPath.txt: -------------------------------------------------------------------------------- 1 | 2 | ******************************************************************* 3 | * 4 | * Demo: HostPath | Raman Sharma 5 | * 6 | * 7 | ******************************************************************* 8 | 9 | # 1. HostPath YAML file 10 | 11 | apiVersion: v1 12 | kind: Pod 13 | metadata: 14 | name: nginx-hostpath 15 | spec: 16 | containers: 17 | - name: nginx-container 18 | image: nginx 19 | volumeMounts: 20 | - mountPath: /test-mnt 21 | name: test-vol 22 | volumes: 23 | - name: test-vol 24 | hostPath: 25 | path: /test-vol 26 | 27 | 28 | ******************************************************************* 29 | # 2. Create and Display HostPath 30 | 31 | kubectl create -f nginx-hostpath.yaml 32 | kubectl get po 33 | kubectl exec nginx-hostpath df /test-mnt 34 | 35 | ******************************************************************* 36 | 3. Test: Creating "test" file underlying host dir & accessing from from pod 37 | 38 | From HOST: 39 | ~~~~~~~~~~ 40 | cd /test-vol 41 | echo "From Host" > from-host.txt 42 | cat from-host.txt 43 | 44 | From POD: 45 | ~~~~~~~~ 46 | kubectl exec nginx-hostpath cat /test-mnt/from-host.txt 47 | 48 | 49 | ******************************************************************* 50 | 4. Test: Creating "test" file inside the POD & accessing from underlying host dir 51 | 52 | From POD: 53 | ~~~~~~~~~ 54 | kubectl exec nginx-hostpath -it -- /bin/sh 55 | cd /test-mnt 56 | echo "From Pod" > from-pod.txt 57 | cat from-pod.txt 58 | 59 | From Host: 60 | ~~~~~~~~~~ 61 | cd /test-vol 62 | ls 63 | cat from-pod.txt 64 | 65 | 66 | ******************************************************************* 67 | 5. Clean up 68 | 69 | kubectl delete po nginx-hostpath 70 | kubectl get po 71 | ls /test-vol 72 | 73 | 74 | ******************************************************************* -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/14.Dynamic-Provisioning.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Kubernetes Demo Files/14.Dynamic-Provisioning.txt -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/2.ConfigMap.txt: -------------------------------------------------------------------------------- 1 | 2 | ************************************************************************************************************************************************* 3 | . 4 | . Demo: ConfigMaps | Raman Sharma 5 | . 6 | ************************************************************************************************************************************************* 7 | First Check no pods and no configMaps are available( kubectl get configmaps) 8 | Overview: 9 | --------- 10 | 1. Creating Configmap from "multiple files" & Consuming it inside Pod from "volumes" 11 | 12 | 1a. Create Configmap "nginx-configmap-vol" from "multiple files" 13 | 1b. Consume "nginx-configmap-vol" configmap inside Pod from "volumes" 14 | 1c. Create | Display | Validate 15 | 16 | 2. Creating Configmap from "literal values" & Consuming it inside Pod from "environment variables" 17 | 18 | 2a. Create configmap “redis-configmap-env” from "literal values" 19 | 2b. Consume “redis-configmap-env” configmap inside pod from “Environment Variables” inside pod 20 | 2c. Create | Display | Validate 21 | 22 | 3. Cleanup 23 | 24 | 3a. Delete configmaps 25 | 3b. Delete pods 26 | 3c. Validate 27 | 28 | ************************************************************************************************************************************************* 29 | 30 | 1. Creating Configmap from "multiple files" & Consuming it inside Pod from "volumes" 31 | 32 | 33 | 1a. Create Configmap "nginx-configmap-vol" from "multiple files": 34 | ------------------------------------------------------------------ 35 | echo -n 'Non-sensitive data inside file-1' > file-1.txt 36 | echo -n 'Non-sensitive data inside file-2' > file-2.txt 37 | 38 | kubectl create configmap nginx-configmap-vol --from-file=file-1.txt --from-file=file-2.txt 39 | # rm -f file-1 file-2 40 | 41 | kubectl get configmaps 42 | kubectl get configmaps nginx-configmap-vol -o yaml 43 | kubectl describe configmaps nginx-configmap-vol 44 | kubectl create configmap nginx-configmap-vol --from-file=file-1.txt -o yaml --dry-run | kubectl replace -f - 45 | ========================================================== 46 | 47 | 1b. Consume above "nginx-configmap-vol" configmap inside Pod from "volumes" 48 | --------------------------------------------------------------------------- 49 | 50 | #nginx-pod-configmap-vol.yaml 51 | apiVersion: v1 52 | kind: Pod 53 | metadata: 54 | name: nginx-pod-configmap-vol 55 | spec: 56 | containers: 57 | - name: nginx-container 58 | image: nginx 59 | volumeMounts: 60 | - name: test-vol 61 | mountPath: "/etc/non-sensitive-data" 62 | readOnly: true 63 | volumes: 64 | - name: test-vol 65 | configMap: 66 | name: nginx-configmap-vol 67 | items: 68 | - key: file-1.txt 69 | path: file-a.txt 70 | - key: file-2.txt 71 | path: file-b.txt 72 | 73 | 74 | 75 | ========================================================== 76 | 77 | 1c. Create | Display | Validate: 78 | -------------------------------- 79 | 80 | # Create 81 | kubectl create -f nginx-pod-configmap-vol.yaml 82 | 83 | # Display 84 | kubectl get po 85 | kubectl get configmaps 86 | kubectl describe pod nginx-pod-configmap-vol 87 | 88 | # Validate from "inside" the pod 89 | kubectl exec nginx-pod-configmap-vol -it /bin/sh 90 | cd /etc/non-sensitive-data 91 | ls 92 | cat Non-sensitive data inside file-1.txt 93 | cat password.txt 94 | exit 95 | 96 | (OR) 97 | 98 | # Validate from "outside" the pod 99 | kubectl exec nginx-pod-configmap-vol ls /etc/non-sensitive-data 100 | kubectl exec nginx-pod-configmap-vol cat /etc/non-sensitive-data/file-a.txt 101 | kubectl exec nginx-pod-configmap-vol cat /etc/non-sensitive-data/file-b.txt 102 | 103 | 104 | ************************************************************************************************************************************************* 105 | 106 | 2. Creating Configmap from "literal values" & Consuming it inside Pod from "environment variables" 107 | 108 | 109 | 2a. Create configmap “redis-configmap-env” from "literal values" 110 | ----------------------------------------------------------------- 111 | 112 | kubectl create configmap redis-configmap-env --from-literal=file.1=file.a --from-literal=file.2=file.b 113 | 114 | kubectl get configmap 115 | kubectl describe configmap redis-configmap-env 116 | 117 | =============================================================================== 118 | 119 | 2b. Consume “redis-configmap-env” configmap inside pod from “Environment Variables” inside pod 120 | ----------------------------------------------------------------------------------------------- 121 | 122 | # redis-pod-configmap-env.yaml 123 | apiVersion: v1 124 | kind: Pod 125 | metadata: 126 | name: redis-pod-configmap-env 127 | spec: 128 | containers: 129 | - name: redis-container 130 | image: redis 131 | env: 132 | - name: FILE_1 133 | valueFrom: 134 | configMapKeyRef: 135 | name: redis-configmap-env 136 | key: file.1 137 | - name: FILE_2 138 | valueFrom: 139 | configMapKeyRef: 140 | name: redis-configmap-env 141 | key: file.2 142 | restartPolicy: Never 143 | 144 | =============================================================================== 145 | 146 | 2c. Create | Display | Validate: 147 | 148 | # Create 149 | kubectl create -f redis-pod-configmap-env.yaml 150 | 151 | # Display 152 | kubectl get pods 153 | kubectl get configmaps 154 | kubectl describe pod redis-pod-configmap-env 155 | 156 | 157 | # Validate from "inside" the pod 158 | kubectl exec redis-pod-configmap-env -it /bin/sh 159 | env | grep FILE 160 | exit 161 | 162 | (OR) 163 | 164 | # Validate from "outside" the pod 165 | kubectl exec redis-pod-configmap-env env | grep FILE 166 | 167 | ******************************************************************************************************************************************* 168 | #cm.yaml 169 | apiVersion: v1 170 | kind: ConfigMap 171 | metadata: 172 | name: game-demo 173 | data: 174 | # property-like keys; each key maps to a simple value 175 | player_initial_lives: "3" 176 | ui_properties_file_name: "user-interface.properties" 177 | # 178 | # file-like keys 179 | game.properties: | 180 | enemy.types=aliens,monsters 181 | player.maximum-lives=5 182 | user-interface.properties: | 183 | color.good=purple 184 | color.bad=yellow 185 | allow.textmode=true 186 | 187 | kubectl create -f cm.yaml 188 | kubectl get cm 189 | 190 | # To consume the Config map create below pod 191 | #podcm.yaml 192 | apiVersion: v1 193 | kind: Pod 194 | metadata: 195 | name: configmap-demo-pod 196 | spec: 197 | containers: 198 | - name: demo 199 | image: nginx 200 | env: 201 | # Define the environment variable 202 | - name: PLAYER_INITIAL_LIVES # Notice that the case is different here 203 | # from the key name in the ConfigMap. 204 | valueFrom: 205 | configMapKeyRef: 206 | name: game-demo # The ConfigMap this value comes from. 207 | key: player_initial_lives # The key to fetch. 208 | - name: UI_PROPERTIES_FILE_NAME 209 | valueFrom: 210 | configMapKeyRef: 211 | name: game-demo 212 | key: ui_properties_file_name 213 | volumeMounts: 214 | - name: config 215 | mountPath: "/config" 216 | readOnly: true 217 | volumes: 218 | # You set volumes at the Pod level, then mount them into containers inside that Pod 219 | - name: config 220 | configMap: 221 | # Provide the name of the ConfigMap you want to mount. 222 | name: game-demo 223 | # An array of keys from the ConfigMap to create as files 224 | items: 225 | - key: "game.properties" 226 | path: "game.properties" 227 | - key: "user-interface.properties" 228 | path: "user-interface.properties" 229 | 230 | kubectl create -f podcm.yaml 231 | 232 | ************************************************************************************************************************************************* 233 | 234 | 3. Cleanup 235 | 236 | # Delete configmaps 237 | kubectl delete configmaps nginx-configmap-vol redis-configmap-env 238 | 239 | # Delete pods 240 | kubectl delete pods nginx-pod-configmap-vol redis-pod-configmap-env 241 | 242 | # Validate 243 | kubectl get pods 244 | kubectl get configmaps 245 | 246 | 247 | ************************************************************************************************************************************************* 248 | 249 | 250 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/3.Secrets.txt: -------------------------------------------------------------------------------- 1 | 2 | ************************************************************************************************************************************************* 3 | . 4 | . Demo: Secrets | Raman Sharma 5 | . 6 | ************************************************************************************************************************************************* 7 | 8 | Overview: 9 | --------- 10 | 1. Create Secret using "kubectl" & Consuming it from "volumes" inside Pod 11 | 12 | 1a. Create secret "nginx-secret-vol" using "Kubectl" 13 | 1b. Consume "nginx-secret-vol" from "volumes" inside Pod 14 | 1c. Create | Display | Validate 15 | 16 | 2. Create Secret "manually" using YAML file & Consuming it from "environment variables" inside Pod 17 | 18 | 2a. Create secret “redis-secret-env” using YAML file: 19 | 2b. Consume “redis-secret-env” secret from “Environment Variables” inside pod 20 | 2c. Create | Display | Validate 21 | 22 | 3. Cleanup 23 | 24 | 3a. Delete secrets 25 | 3b. Delete pods 26 | 3c. Validate 27 | 28 | ************************************************************************************************************************************************* 29 | 30 | # 1. Creating Secret using Kubectl & Consuming it from "volumes" inside Pod 31 | 32 | 33 | 1a. Creating secret using "Kubectl": 34 | ------------------------------------ 35 | echo -n 'admin' > username.txt 36 | echo -n 'pa$$w00rd' > password.txt 37 | 38 | kubectl create secret generic nginx-secret-vol --from-file=username.txt --from-file=password.txt 39 | 40 | # rm -f username.txt password.txt 41 | 42 | kubectl get secrets 43 | kubectl describe secrets nginx-secret-vol 44 | 45 | ========================================================== 46 | 47 | 1b. Consuming "nginx-secret-vol" from "volumes" inside Pod 48 | -------------------------------------------------------- 49 | 50 | #nginx-pod-secret-vol.yaml 51 | apiVersion: v1 52 | kind: Pod 53 | metadata: 54 | name: nginx-pod-secret-vol 55 | spec: 56 | containers: 57 | - name: nginx-container 58 | image: nginx 59 | volumeMounts: 60 | - name: test-vol 61 | mountPath: "/etc/confidential" 62 | readOnly: true 63 | volumes: 64 | - name: test-vol 65 | secret: 66 | secretName: nginx-secret-vol 67 | 68 | ========================================================== 69 | 70 | 1c. Create | Display | Validate: 71 | -------------------------------- 72 | 73 | # Create 74 | kubectl create -f nginx-pod-secret-vol.yaml 75 | 76 | # Display 77 | kubectl get po 78 | kubectl get secrets 79 | kubectl describe pod nginx-pod-secret-vol 80 | 81 | # Validate from "inside" the pod 82 | kubectl exec nginx-pod-secret-vol -it /bin/sh 83 | cd /etc/confidential 84 | ls 85 | cat username.txt 86 | cat password.txt 87 | exit 88 | 89 | (OR) 90 | 91 | # Validate from "outside" the pod 92 | kubectl exec nginx-pod-secret-vol ls /etc/confidential 93 | kubectl exec nginx-pod-secret-vol cat /etc/confidential/username.txt 94 | kubectl exec nginx-pod-secret-vol cat /etc/confidential/password.txt 95 | 96 | 97 | ************************************************************************************************************************************************* 98 | 99 | 2. Creating Secret "manually" using YAML file & Consuming it from "environment variables" inside Pod 100 | 101 | 102 | 2a. Creating Secret using YAML file: 103 | ------------------------------------- 104 | 105 | # Encoding secret 106 | echo -n 'admin' | base64 107 | echo -n 'pa$$w00rd' | base64 108 | 109 | # YAML file 110 | # redis-secret-env.yaml 111 | apiVersion: v1 112 | kind: Secret 113 | metadata: 114 | name: redis-secret-env 115 | type: Opaque 116 | data: 117 | username: YWRtaW4= 118 | password: cGEkJHcwMHJk 119 | 120 | kubectl create -f redis-secret-env.yaml 121 | kubectl get secret 122 | kubectl describe secret redis-secret-env 123 | 124 | =============================================================================== 125 | 126 | 2b. Consuming “redis-secret-env” secret from “Environment Variables” inside pod 127 | -------------------------------------------------------------------------------- 128 | 129 | # redis-pod-secret-env.yaml 130 | apiVersion: v1 131 | kind: Pod 132 | metadata: 133 | name: redis-pod-secret-env 134 | spec: 135 | containers: 136 | - name: redis-container 137 | image: redis 138 | env: 139 | - name: SECRET_USERNAME 140 | valueFrom: 141 | secretKeyRef: 142 | name: redis-secret-env 143 | key: username 144 | - name: SECRET_PASSWORD 145 | valueFrom: 146 | secretKeyRef: 147 | name: redis-secret-env 148 | key: password 149 | restartPolicy: Never 150 | 151 | =============================================================================== 152 | 153 | 2c. Create | Display | Validate: 154 | 155 | # Create 156 | kubectl create -f redis-pod-secret-env.yaml 157 | 158 | # Display 159 | kubectl get pods 160 | kubectl get secrets 161 | kubectl describe pod redis-pod-secret-env 162 | 163 | 164 | # Validate from "inside" the pod 165 | kubectl exec redis-pod-secret-env -it /bin/sh 166 | env | grep SECRET 167 | exit 168 | 169 | (OR) 170 | 171 | # Validate from "outside" the pod 172 | kubectl exec redis-pod-secret-env env | grep SECRET 173 | 174 | *************************************************************************** 175 | #Decode the secrets 176 | 177 | kubectl get secret redis-secret-env -o yaml 178 | echo 'cGEkJHcwMHJk' | base64 --decode 179 | ************************************************************************************************************************************************* 180 | 181 | 3. Cleanup 182 | 183 | # Delete secrets 184 | kubectl delete secrets nginx-secret-vol redis-secret-env 185 | 186 | # Delete pods 187 | kubectl delete pods nginx-pod-secret-vol redis-pod-secret-env 188 | 189 | # Validate 190 | kubectl get pods 191 | kubectl get secrets 192 | 193 | 194 | ************************************************************************************************************************************************* 195 | 196 | 197 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/4.Replication-Controller.txt: -------------------------------------------------------------------------------- 1 | 2 | ******************************************************************* 3 | . 4 | . Demo: Replication Controller | Raman Sharma 5 | . 6 | 7 | ******************************************************************* 8 | 9 | 1. Replication Controller YAML file 10 | 11 | # nginx-rc.yaml 12 | apiVersion: v1 13 | kind: ReplicationController 14 | metadata: 15 | name: nginx-rc 16 | spec: 17 | replicas: 3 18 | template: 19 | metadata: 20 | name: nginx-pod 21 | labels: 22 | app: nginx-app 23 | spec: 24 | containers: 25 | - name: nginx-container 26 | image: nginx 27 | ports: 28 | - containerPort: 80 29 | selector: 30 | app: nginx-app 31 | 32 | 33 | ******************************************************************* 34 | # 2. Create and display 35 | 36 | kubectl create -f nginx-rc.yaml 37 | kubectl get po -o wide 38 | kubectl get po -l app=nginx-app 39 | kubectl get rc nginx-rc 40 | kubectl describe rc nginx-rc 41 | 42 | ******************************************************************* 43 | # 3. Reschedule 44 | 45 | kubectl get po -o wide --watch 46 | kubectl get po -o wide 47 | kubectl get nodes 48 | 49 | ******************************************************************* 50 | # 4. Scaling up cluster 51 | 52 | kubectl scale rc nginx-rc --replicas=5 53 | kubectl get rc nginx-rc 54 | kubectl get po -o wide 55 | 56 | ******************************************************************* 57 | # 5. Scalling down 58 | 59 | kubectl scale rc nginx-rc --replicas=3 60 | kubectl get rc nginx-rc 61 | kubectl get po -o wide 62 | 63 | ******************************************************************* 64 | # 6. Cleanup 65 | 66 | kubectl delete -f nginx-rc.yaml 67 | kubectl get rc 68 | kubectl get po -l app=nginx-app 69 | 70 | ******************************************************************* 71 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/5.ReplicaSet.txt: -------------------------------------------------------------------------------- 1 | 2 | ******************************************************************* 3 | . 4 | . Demo: ReplicaSet | Raman Sharma 5 | . 6 | 7 | ******************************************************************* 8 | ReplicaSet YAML file 9 | 10 | # nginx-rs.yaml 11 | apiVersion: apps/v1 12 | kind: ReplicaSet 13 | metadata: 14 | name: nginx-rs 15 | spec: 16 | replicas: 3 17 | template: 18 | metadata: 19 | name: nginx-pod 20 | labels: 21 | app: nginx-app 22 | tier: frontend 23 | spec: 24 | containers: 25 | - name: nginx-container 26 | image: nginx 27 | ports: 28 | - containerPort: 80 29 | selector: 30 | matchLabels: 31 | app: nginx-app 32 | matchExpressions: 33 | - {key: tier, operator: In, values: [frontend]} 34 | 35 | 36 | ******************************************************************* 37 | # 2. Create and display replicaset 38 | 39 | kubectl create -f nginx-rs.yaml 40 | kubectl get po -o wide 41 | kubectl get po -l app=nginx-app 42 | kubectl get rs nginx-rs -o wide 43 | kubectl describe rs nginx-rs 44 | kubectl get po -l 'tier in (frontend)' 45 | ******************************************************************* 46 | # 3. Automatic Pod Reschedule 47 | 48 | kubectl get po -o wide --watch 49 | kubectl get po -o wide 50 | kubectl get nodes 51 | 52 | ******************************************************************* 53 | # 4. Scale up pods 54 | 55 | kubectl scale rs nginx-rs --replicas=5 56 | kubectl get rs nginx-rs -o wide 57 | kubectl get po -o wide 58 | 59 | ******************************************************************* 60 | # 5. Scale down pods 61 | 62 | kubectl scale rs nginx-rs --replicas=3 63 | kubectl get rs nginx-rs -o wide 64 | kubectl get po -o wide 65 | 66 | ******************************************************************* 67 | # 6. Cleanup 68 | 69 | kubectl delete -f nginx-rs.yaml 70 | kubectl get rs 71 | kubectl get po -l app=nginx-app 72 | 73 | ******************************************************************* 74 | 75 | 76 | 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/6. Deployment.txt: -------------------------------------------------------------------------------- 1 | 2 | ******************************************************************* 3 | . 4 | . Demo: Deployment | Raman Sharma 5 | . 6 | 7 | ******************************************************************* 8 | 9 | # 1. Deployment YAML file 10 | 11 | # nginx-deploy.yaml 12 | apiVersion: apps/v1 13 | kind: Deployment 14 | metadata: 15 | name: nginx-deploy 16 | labels: 17 | app: nginx-app 18 | spec: 19 | replicas: 3 20 | template: 21 | metadata: 22 | labels: 23 | app: nginx-app 24 | spec: 25 | containers: 26 | - name: nginx-container 27 | image: nginx:1.7.9 28 | ports: 29 | - containerPort: 80 30 | selector: 31 | matchLabels: 32 | app: nginx-app 33 | 34 | ******************************************************************* 35 | # 2. Create and Display Deployment 36 | 37 | kubectl create -f nginx-deploy.yaml 38 | kubectl get deploy -l app=nginx-app 39 | kubectl get rs -l app=nginx-app 40 | kubectl get po -l app=nginx-app 41 | kubectl describe deploy nginx-deploy 42 | 43 | ******************************************************************* 44 | # 3. Testing: Rollback update 45 | 46 | kubectl set image deploy nginx-deploy nginx-container=nginx:1.91 --record 47 | kubectl rollout status deployment/nginx-deploy 48 | kubectl rollout history deployment/nginx-deploy 49 | kubectl rollout undo deployment/nginx-deploy 50 | kubectl rollout status deployment/nginx-deploy 51 | kubectl describe deploy nginx-deploy | grep -i image 52 | 53 | ******************************************************************* 54 | # 4. Testing: Update Version of "nginx:1.7.9" to "nginx:1.9.1" 55 | 56 | kubectl set image deploy nginx-deploy nginx-container=nginx:1.9.1 57 | kubectl edit deploy nginx-deploy 58 | kubectl rollout status deployment/nginx-deploy 59 | kubectl get deploy 60 | 61 | ******************************************************************* 62 | # 5. Testing: Scale UP 63 | 64 | kubectl scale deployment nginx-deploy --replicas=5 65 | kubectl get deploy 66 | kubectl get po -o wide 67 | 68 | ******************************************************************* 69 | # 6. Testing: Scale DOWN 70 | 71 | kubectl scale deployment nginx-deploy --replicas=3 72 | kubectl get deploy 73 | kubectl get po -o wide 74 | 75 | ******************************************************************* 76 | 77 | # 7. Cleanup 78 | 79 | kubectl delete -f nginx-deploy.yaml 80 | kubectl get deploy 81 | kubectl get rs 82 | kubectl get po 83 | 84 | ******************************************************************* 85 | 86 | 87 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/7-pod-quota-mem-exceed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mem-limit 5 | namespace: quota-demo-ns 6 | spec: 7 | containers: 8 | - name: memlimit 9 | image: polinux/stress 10 | resources: 11 | limits: 12 | memory: "200Mi" 13 | command: ["stress"] 14 | args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"] 15 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/7-pod-quota-mem.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | namespace: quota-demo-ns 6 | spec: 7 | containers: 8 | - image: nginx 9 | name: nginx 10 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/7-quota-count.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ResourceQuota 3 | metadata: 4 | name: quota-demo1 5 | namespace: quota-demo-ns 6 | spec: 7 | hard: 8 | pods: "2" 9 | configmaps: "1" 10 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/7-quota-limitrange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: LimitRange 3 | metadata: 4 | name: mem-limitrange 5 | namespace: quota-demo-ns 6 | spec: 7 | limits: 8 | - default: 9 | memory: 300Mi 10 | defaultRequest: 11 | memory: 50Mi 12 | type: Container 13 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/7-quota-mem.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ResourceQuota 3 | metadata: 4 | name: quota-demo-mem 5 | namespace: quota-demo-ns 6 | spec: 7 | hard: 8 | limits.memory: "500Mi" 9 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/7.DaemonSet.txt: -------------------------------------------------------------------------------- 1 | 2 | ************************************************************************************************************************************************* 3 | * 4 | * Demo: DaemonSet | Raman Sharma 5 | * 6 | ************************************************************************************************************************************************* 7 | 8 | Overview: 9 | ~~~~~~~~~~ 10 | 11 | 1. Deploy Pod on "all" worker nodes inside k8s cluster using DaemonSet 12 | 13 | 1a. fluentd-DaemonSet manifest file 14 | 1b. Create | Display | Validate 15 | 16 | 2. Deploy Pod on "Subset" of worker nodes inside k8s cluster using DaemonSet 17 | 18 | 2a. Attach label to the nodes 19 | 2b. nginx-DamoneSet manifest file review 20 | 2c. Create | Display | Validate 21 | 22 | 3. Cleanup 23 | 24 | ************************************************************************************************************************************************* 25 | 26 | 27 | 1. Deploy Pod on "all" worker nodes inside k8s cluster using DaemonSet 28 | 29 | 1a. YAML File: 30 | -------------- 31 | # fluentd-ds-allnodes.yaml 32 | apiVersion: apps/v1 33 | kind: DaemonSet 34 | metadata: 35 | name: fluentd-ds 36 | spec: 37 | template: 38 | metadata: 39 | labels: 40 | name: fluentd 41 | spec: 42 | containers: 43 | - name: fluentd 44 | image: gcr.io/google-containers/fluentd-elasticsearch:1.20 45 | selector: 46 | matchLabels: 47 | name: fluentd 48 | 49 | --------------------------------------------------------------------- 50 | 51 | 1b. Create | Display | Validate 52 | 53 | kubectl create -f fluentd-ds-allnodes.yaml 54 | kubectl get po -o wide 55 | kubectl get ds 56 | kubectl describe ds fluentd-ds 57 | 58 | ************************************************************************************************************************************************* 59 | 60 | 2. Deploy Pod on "Subset" of worker nodes inside k8s cluster using DaemonSet 61 | 62 | ---------------------------------------- 63 | 64 | 2a. Attach label to the nodes 65 | 66 | kubectl get nodes 67 | kubectl label nodes worker1 worker2 disktype=ssd 68 | kubectl get nodes --show-labels 69 | 70 | ---------------------------------------- 71 | 72 | 2b. YAML 73 | 74 | # nginx-ds-subsetnodes.yaml 75 | apiVersion: apps/v1 76 | kind: DaemonSet 77 | metadata: 78 | name: nginx-ds 79 | spec: 80 | template: 81 | metadata: 82 | labels: 83 | name: nginx 84 | spec: 85 | containers: 86 | - name: nginx-container 87 | image: nginx 88 | nodeSelector: 89 | disktype: ssd 90 | selector: 91 | matchLabels: 92 | name: nginx 93 | 94 | 95 | ---------------------------------------- 96 | 97 | 2c. Create | Display | Validate 98 | 99 | kubectl create -f nginx-ds-subsetnodes.yaml 100 | kubectl get po -o wide 101 | kubectl get ds 102 | kubectl describe ds nginx-ds 103 | 104 | ************************************************************************************************************************************************* 105 | 106 | 3. Ceanup 107 | 108 | kubectl delete ds fluentd-ds 109 | kubectl delete ds nginx-ds 110 | kubectl get po 111 | 112 | ************************************************************************************************************************************************* 113 | 114 | 115 | 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/8.Jobs.txt: -------------------------------------------------------------------------------- 1 | 2 | ************************************************************************************************************************************************* 3 | * 4 | * Demo: Job | Raman 5 | * 6 | * 7 | ************************************************************************************************************************************************* 8 | 1. Job manifest file: 9 | 10 | apiVersion: batch/v1 11 | kind: Job 12 | metadata: 13 | name: countdown 14 | spec: 15 | template: 16 | metadata: 17 | name: countdown 18 | spec: 19 | containers: 20 | - name: counter 21 | image: centos:7 22 | command: 23 | - "bin/bash" 24 | - "-c" 25 | - "for i in 9 8 7 6 5 4 3 2 1 ; do echo $i ; done" 26 | restartPolicy: Never 27 | 28 | 29 | ************************************************************************************************************************************************* 30 | 2. Create & Display 31 | 32 | kubectl create -f countdown-jobs.yaml 33 | kubectl get jobs 34 | kubectl get po 35 | kubectl describe jobs countdown 36 | 37 | 38 | ************************************************************************************************************************************************* 39 | 3. Test 40 | 41 | kubectl logs [POD_NAME] 42 | 43 | 44 | ************************************************************************************************************************************************* 45 | 4. Cleanup 46 | 47 | kubectl delete jobs countdown 48 | kubectl get po 49 | 50 | 51 | ************************************************************************************************************************************************* 52 | **************Cron Job*********************** 53 | #cronjob.yaml 54 | apiVersion: batch/v1beta1 55 | kind: CronJob 56 | metadata: 57 | name: cronjob 58 | spec: 59 | schedule: "* * * * *" 60 | successfulJobsHistoryLimit: 2 61 | failedJobsHistoryLimit: 1 62 | suspend: true 63 | jobTemplate: 64 | spec: 65 | template: 66 | spec: 67 | containers: 68 | - name: busybox 69 | image: busybox 70 | command: ["echo", "Hello world"] 71 | restartPolicy: Never 72 | 73 | kubectl create -f cronjob.yaml 74 | 75 | kubectl patch cronjob cronjob -p '{"spec":{"suspend":false}}' 76 | 77 | concurrencyPolicy: Allow,Forbid,Replace 78 | Allow to allow multiple jobs runs at a time 79 | Forbid to wait a running job to finish first and then execute another instance of the job 80 | Replace to replace an existing job. 81 | 82 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/9.NodePort.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Kubernetes Demo Files/9.NodePort.txt -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/Kubernetes_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Kubernetes Demo Files/Kubernetes_1.pdf -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/Kubernetes_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Kubernetes Demo Files/Kubernetes_2.pdf -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/UI.txt: -------------------------------------------------------------------------------- 1 | kubectl create serviceaccount cluster-admin-dashboard-sa 2 | kubectl create clusterrolebinding cluster-admin-dashboard-sa \ 3 | --clusterrole=cluster-admin \ 4 | --serviceaccount=default:cluster-admin-dashboard-sa 5 | TOKEN=$(kubectl describe secret $(kubectl -n kube-system get secret | awk '/^cluster-admin-dashboardsa- \ 6 | token-/{print $1}') | awk '$1=="token:"{print $2}') 7 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Demo Files/rbac.txt: -------------------------------------------------------------------------------- 1 | kubectl create ns finance 2 | openssl genrsa -out john.key 2048 # it will create a private key 3 | openssl req -new -key john.key -out john.csr -subj "/CN=john/O=javadeveloper" 4 | 5 | openssl x509 -req -in john.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -out john.crt -days 500 6 | 7 | #Create a role for namespace finance with resource permission 8 | #role.yaml 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: Role 11 | metadata: 12 | namespace: finance 13 | name: deployment-manager 14 | rules: 15 | - apiGroups: ["","extensions","apps"] 16 | # 17 | # at the HTTP level, the name of the resource for accessing ConfigMap 18 | # objects is "configmaps" 19 | resources: ["deployments","replicasets","pods"] 20 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 21 | 22 | kubectl create -f role.yaml 23 | 24 | 25 | #rolebinding.yaml 26 | 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | # This role binding allows "jane" to read pods in the "default" namespace. 29 | # You need to already have a Role named "pod-reader" in that namespace. 30 | kind: RoleBinding 31 | metadata: 32 | name: deployment-manager-binding 33 | namespace: finance 34 | subjects: 35 | # You can specify more than one "subject" 36 | - kind: User 37 | name: john 38 | apiGroup: "" 39 | roleRef: 40 | # "roleRef" specifies the binding to a Role / ClusterRole 41 | kind: Role #this must be Role or ClusterRole 42 | name: deployment-manager # this must match the name of the Role or ClusterRole you wish to bind to 43 | apiGroup: "" 44 | 45 | kubectl create -f rolebinding.yaml 46 | 47 | kubectl config set-credentials john --client-certificate=/home/ubuntu/temp/john.crt --client-key=/home/ubuntu/temp/john.key 48 | 49 | kubectl config set-context developer-context --cluster=kubernetes --namespace=finace --user=john 50 | 51 | ----Install client 52 | curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl 53 | 54 | chmod +x ./kubectl 55 | 56 | sudo mv ./kubectl /usr/local/bin/kubectl 57 | kubectl version --client 58 | 59 | 60 | ls ./kube 61 | kubectl --kubeconfig config cluster-info 62 | kubectl --kubeconfig config config view 63 | kubectl --kubeconfig config config view -o jsonpath='{.contexts[*].name}' 64 | 65 | kubectl --kubeconfig config get pods -n finance 66 | kubectl --kubeconfig config run nginx-pod --image=nginx -n finance 67 | kubectl --kubeconfig config get pods -n finance 68 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes Introduction and Installation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Kubernetes Introduction and Installation.pdf -------------------------------------------------------------------------------- /Kubernetes/Kubernetes-Cheat-Sheet_07182019.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Kubernetes-Cheat-Sheet_07182019.pdf -------------------------------------------------------------------------------- /Kubernetes/KubernetesDashBoard.txt: -------------------------------------------------------------------------------- 1 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.1/aio/deploy/recommended.yaml 2 | kubectl get svc kubernetes-dashboard -n kubernetes-dashboard 3 | kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard 4 | kubectl get pods -n kubernetes-dashboard -o wide 5 | 6 | -- 7 | # Create service account 8 | kubectl create serviceaccount cluster-admin-dashboard-sa 9 | # Bind ClusterAdmin role to the service account 10 | kubectl create clusterrolebinding cluster-admin-dashboard-sa \ 11 | --clusterrole=cluster-admin \ 12 | --serviceaccount=default:cluster-admin-dashboard-sa 13 | # Parse the token 14 | TOKEN=$(kubectl describe secret $(kubectl -n kube-system get secret | awk '/^cluster-admin-dashboard-sa- \ 15 | token-/{print $1}') | awk '$1=="token:"{print $2}') 16 | 17 | echo $TOKEN 18 | 19 | -------------------------------------------------------------------------------- /Kubernetes/Kubernetes_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Kubernetes_1.pdf -------------------------------------------------------------------------------- /Kubernetes/Kubernetes_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Kubernetes_2.pdf -------------------------------------------------------------------------------- /Kubernetes/Run a Pod on Specific node.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Kubernetes/Run a Pod on Specific node.docx -------------------------------------------------------------------------------- /Maven.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Maven.pdf -------------------------------------------------------------------------------- /Nagios/NRPE Installation: -------------------------------------------------------------------------------- 1 | sudo apt update 2 | sudo apt install nagios-nrpe-server nagios-plugins 3 | sudo nano /etc/nagios/nrpe.cfg 4 | 5 | 6 | Tcp connection check 7 | /usr/local/nagios/libexec/check_tcp -H localhost -p 22 http://localhost:22 8 | -------------------------------------------------------------------------------- /Nagios/Nagios.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Nagios/Nagios.pdf -------------------------------------------------------------------------------- /Nagios/nagiosInstallationSteps.txt: -------------------------------------------------------------------------------- 1 | * Starting with the installation 2 | Installing the prerequisites: 3 | $ sudo apt install wget build-essential apache2 php libapache2-mod-php7.2 php-gd libgd-dev unzip postfix 4 | 5 | User and group configuration 6 | $ useradd nagios 7 | $ groupadd nagcmd 8 | $ usermod -a -G nagcmd nagios 9 | $ usermod -a -G nagios,nagcmd www-data 10 | 11 | Download and extract the Nagios core 12 | $ wget https://assets.nagios.com/downloads/nagioscore/releases/nagios-4.2.0.tar.gz 13 | $ tar -xzf nagios*.tar.gz 14 | 15 | hostname nagios-server 16 | vi /etc/hostname 17 | bash 18 | 19 | You will have to configure it with the user and the group you have created earlier 20 | $ ./configure --with-nagios-group=nagios --with-command-group=nagcmd 21 | $ make all 22 | $ make install 23 | $ make install-commandmode 24 | $ make install-init 25 | $ make install-config 26 | $ /usr/bin/install -c -m 644 sample-config/httpd.conf /etc/apache2/sites-available/nagios.conf 27 | 28 | Copy evenhandler directory to the nagios directory 29 | $ cp -R contrib/eventhandlers/ /usr/local/nagios/libexec/ 30 | $ chown -R nagios:nagios /usr/local/nagios/libexec/eventhandlers 31 | 32 | Install the Nagios Plugins 33 | $ wget https://nagios-plugins.org/download/nagios-plugins-2.2.1.tar.gz 34 | $ tar -xzf nagios-plugins*.tar.gz 35 | 36 | Install the Nagios plugin's with the commands below 37 | $ ./configure --with-nagios-user=nagios --with-nagios-group=nagios --with-openssl 38 | $ make 39 | $ make install 40 | 41 | You can find the default configuration of Nagios in /usr/local/nagios/. We will configure Nagios and Nagios contact. Edit default nagios configuration with nano 42 | 43 | $ nano -c /usr/local/nagios/etc/nagios.cfg 44 | uncomment line 51 for the host monitor configuration. 45 | Save and exit. 46 | 47 | Add a new folder named servers. 48 | $ mkdir -p /usr/local/nagios/etc/servers 49 | 50 | Change the user and group for the new folder to nagios: 51 | $ chown nagios:nagios /usr/local/nagios/etc/servers 52 | 53 | Enable Apache modules 54 | $ sudo a2enmod rewrite 55 | $ sudo a2enmod cgi 56 | 57 | You can use the htpasswd command to configure a user nagiosadmin for the nagios web interface 58 | 59 | $ sudo htpasswd -c /usr/local/nagios/etc/htpasswd.users nagiosadmin 60 | 61 | Enable the Nagios virtualhost 62 | $ sudo ln -s /etc/apache2/sites-available/nagios.conf /etc/apache2/sites-enabled/ 63 | 64 | Start Apache 65 | $ service apache2 restart 66 | 67 | When Nagios starts, you may see the following error 68 | Starting nagios (via systemctl): nagios.serviceFailed 69 | DON'T WORRY WE CAN FIX IT 70 | FOLLOW THE COMMANDS 71 | $ cd /etc/init.d/ 72 | If you could find the nagios file there in the folder 73 | $ cp /etc/init.d/skeleton /etc/init.d/nagios 74 | $ nano /etc/init.d/nagios 75 | Paste this code at the end of the file 76 | ---------------------------------------- 77 | DESC="Nagios" 78 | NAME=nagios 79 | DAEMON=/usr/local/nagios/bin/$NAME 80 | DAEMON_ARGS="-d /usr/local/nagios/etc/nagios.cfg" 81 | PIDFILE=/usr/local/nagios/var/$NAME.lock 82 | ------------------------------------------------------------------- 83 | Make it executable and start Nagios 84 | $ chmod +x /etc/init.d/nagios 85 | $ service apache2 restart 86 | WE DIDN'T FINISH YET 87 | First we are going to create/change the nagios.service 88 | $ nano /etc/systemd/system/nagios.service 89 | Paste the following code of the file 90 | --------------------------------------------------------------- 91 | [Unit] 92 | Description=Nagios 93 | BindTo=network.target 94 | 95 | [Install] 96 | WantedBy=multi-user.target 97 | 98 | [Service] 99 | User=nagios 100 | Group=nagios 101 | Type=simple 102 | ExecStart=/usr/local/nagios/bin/nagios /usr/local/nagios/etc/nagios.cfg 103 | ------------------------------------------------------------------ 104 | We need to enable created nagios.service config 105 | $ systemctl enable /etc/systemd/system/nagios.service 106 | $ service nagios start 107 | --------------------------------------------------------------------------- 108 | To check the service is working 109 | $ service nagios status 110 | --------------------------------------------------------------------------- 111 | Don't forget to install htop to monitor your memory 112 | $ apt install htop -------------------------------------------------------------------------------- /Puppet installation: -------------------------------------------------------------------------------- 1 | PUPPET INSTALLATION 2 | 3 | PUPPET INSTALLATION ON UBUNTU 4 | 5 | Installing Puppet Master 6 | Step 1: Run the following commands for installing Puppet Master 7 | $ sudo apt-get update 8 | $ sudo apt-get install wget 9 | $ wget https://apt.puppetlabs.com/puppet-release-bionic.deb 10 | $ sudo dpkg -i puppet-release-bionic.deb 11 | $ sudo apt-get update 12 | 13 | $ sudo apt-get install puppet-master 14 | $ sudo systemctl status puppet-master.service 15 | 16 | Add the following lines in the puppet-master configuration file 17 | Next open port 8140 on the Puppet Master’s firewall 18 | $ sudo nano /etc/default/puppet-master 19 | JAVA_ARGS="-Xms512m Xmx512m" 20 | $ sudo systemctl restart puppet-master 21 | $ sudo ufw allow 8140/tcp 22 | 23 | Installing Puppet Agent 24 | 25 | Step 2: Run the following commands for installing Puppet Agent 26 | 27 | $ sudo apt-get update 28 | $ sudo apt-get install wget 29 | $ wget https://apt.puppetlabs.com/puppet-release-bionic.deb 30 | $ sudo dpkg -i puppet-release-bionic.deb 31 | $ sudo apt-get install puppet 32 | $ sudo nano /etc/hosts 33 | add ip address of the master 34 | $ sudo systemctl start puppet 35 | $ sudo systemctl enable puppet 36 | 37 | 38 | Step 3: Make changes to the hosts file which exists in /etc/hosts. And add the Puppet 39 | Master IP address along with the name “puppet” 40 | 41 | $ sudo nano /etc/hosts 42 | 43 | Step 4: Create the following directory path: 44 | 45 | $ sudo mkdir -p /etc/puppet/code/environments/production/manifests 46 | 47 | 48 | Configuring Puppet Slave 49 | 50 | Step 1: Add the entry for Puppet Master in /etc/hosts 51 | 52 | Step 2: Finally start the Puppet agent by using the following command. Also, enable the 53 | service, so that it starts when the computer starts 54 | 55 | $ sudo systemctl start puppet 56 | $ sudo systemctl enable puppet 57 | 58 | 59 | On Master 60 | 61 | $ sudo puppet cert list 62 | 63 | Step 2: Finally, sign the listed certificate using the following command: 64 | $ sudo puppet cert sign --all 65 | 66 | On master machine create /etc/puppet/code/environments/production/manifests/site.pp 67 | 68 | node default{ 69 | 70 | package {'nginx': 71 | ensure => installed, 72 | } 73 | 74 | file { '/tmp/status.txt': 75 | 76 | content => 'installed', 77 | mode => '0644', 78 | } 79 | } 80 | 81 | Goto client machine and run the command 82 | puppet agent --test 83 | 84 | 85 | ---Variable 86 | 87 | node default{ 88 | 89 | package {'nginx': 90 | ensure => installed, 91 | } 92 | 93 | $test = "ok" 94 | 95 | file { '/tmp/status.txt': 96 | 97 | content => $test, 98 | mode => '0644', 99 | } 100 | } 101 | 102 | 103 | --------------- 104 | Loops 105 | ------------- 106 | node default{ 107 | 108 | $packages = ['apache2','mysql-server'] 109 | 110 | package {$packages: 111 | ensure => installed, 112 | 113 | } 114 | } 115 | ----create user 116 | node default{ 117 | user { 'raman': 118 | ensure => present, 119 | uid => '1101', 120 | shell => '/bin/bash', 121 | } 122 | } 123 | --Create user with class 124 | class user { 125 | user { 'test': 126 | ensure => present, 127 | } 128 | } 129 | node default{ 130 | class {user:} 131 | } 132 | 133 | ---Create user with the parameter 134 | class user_account ($username){ 135 | user { $username: 136 | ensure => present, 137 | uid => '1011', 138 | shell => '/bin/bash', 139 | home => "/home/$username", 140 | } 141 | } 142 | node default { 143 | class { user_account: 144 | username => "raman", 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /Puppet/Configuration Management.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Puppet/Configuration Management.pdf -------------------------------------------------------------------------------- /Puppet/Install Puppet6.txt: -------------------------------------------------------------------------------- 1 | 2 | Step - 1 3 | Instal ntp ntpdate packages to sync date and time on your servers 4 | 5 | sudo apt update 6 | sudo apt install -y ntp ntpdate 7 | sudo ntpdate -u 0.ubuntu.pool.ntp.org 8 | 9 | Step -2 10 | 11 | On the master edit the host file and add the IP address of the master itself in the hosts file naming it puppet 12 | 13 | Step -3 14 | 15 | On the Agent machines add the master IP as puppet and the Agent’s IP address as puppet-agent 16 | 17 | Step -4 18 | 19 | Follow these steps on both the master and the slave machines 20 | Puppet uses port 8140 to communicate through ssl. Open the port using ufw command 21 | 22 | ufw enable ufw allow 8140 23 | 24 | Step -5 ( execute below command on master and slave machine) 25 | Add the puppet6 repository on all the machines 26 | 27 | apt update wget https://apt.puppetlabs.com/puppet6-release-bionic.deb dpkg -i puppet6-release-bionic.deb 28 | apt update 29 | 30 | Step -6 (Execute command on Master Server) 31 | 32 | apt install -y puppetserver 33 | Step -7 (Master Server) 34 | 35 | After the installation, change the memory allocation for Puppet server. The Default setting is 2GB. Set it 1 GB or 512 MB according to the memory allocated to your VM Open the default puppet server file and change the following line 36 | 37 | vi /etc/default/puppetserver 38 | Change 39 | JAVA_ARGS="-Xms2g -Xmx2g 40 | 41 | to 42 | 43 | JAVA_ARGS="-Xms512m -Xmx512m 44 | 45 | and save the changes 46 | 47 | Step -8 (Master Server) 48 | 49 | Edit the puppet.conf file and add the following lines to it 50 | 51 | vi /etc/puppetlabs/puppet/puppet.conf 52 | 53 | [main] 54 | certname = puppet 55 | server = puppet 56 | environment = production 57 | runinterval = 15m 58 | 59 | Step -9 ( Master Server) 60 | 61 | Now, setup the puppetserver certificate. 62 | 63 | /opt/puppetlabs/bin/puppetserver ca setup 64 | 65 | Step -10 (Master Server) 66 | 67 | Start and enable the puppet server Syntax: 68 | 69 | systemctl start puppetserver 70 | systemctl enable puppetserver 71 | 72 | Step -11(Agent Node) 73 | 74 | Install Puppet Agent 75 | apt install -y puppet-agent 76 | 77 | Step -12(Agent Node) 78 | 79 | Edit the puppet.conf file on agent machine and add the following lines to it Syntax: 80 | 81 | vi /etc/puppetlabs/puppet/puppet.conf 82 | 83 | [main] 84 | certname = puppetagent 85 | server = puppet 86 | environment = production 87 | runinterval = 15m 88 | 89 | Step -13 (Agent Node) 90 | 91 | Start and enable the puppet server on the agent machine Syntax: 92 | 93 | /opt/puppetlabs/bin/puppet resource service puppet ensure=running enable=true 94 | 95 | 96 | Step -14 (Master Node) 97 | 98 | First list all the certificates left for signing on the master machine 99 | 100 | Syntax: /opt/puppetlabs/bin/puppetserver ca list 101 | 102 | You can choose to either individually sign the certificates all sign them all together Syntax: 103 | /opt/puppetlabs/bin/puppetserver ca sign 104 | 105 | or 106 | 107 | /opt/puppetlabs/bin/puppetserver ca sign --all 108 | 109 | Step -15 (Agent Node) 110 | 111 | To verify open the agent and use the test command Syntax: 112 | 113 | /opt/puppetlabs/bin/puppet agent --test 114 | 115 | -------------------------------------------------------------------------------- /Puppet/Modules.txt: -------------------------------------------------------------------------------- 1 | puppet module generate rp-createuser 2 | #init.pp 3 | class createuser { 4 | user { 'user1': 5 | ensure => present, 6 | } 7 | 8 | } 9 | 10 | 11 | # site.pp 12 | node default { 13 | include 'createuser' 14 | } 15 | 16 | apt install pdk 17 | pdk new module module1 18 | cd module1 19 | cd manifests 20 | 21 | vi init.pp 22 | class module1 { 23 | 24 | package { 'apache2': 25 | ensure => present, 26 | } 27 | } 28 | 29 | pdk build module1 30 | /opt/puppetlabs/bin/puppet module install /etc/puppetlabs/code/environments/production/module1/pkg/ubuntu-module1-0.1.0.tar.gz 31 | 32 | site.pp 33 | class { 'module1': } 34 | 35 | -------------------------------------------------------------------------------- /Puppet/Puppet Architecture.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Puppet/Puppet Architecture.pdf -------------------------------------------------------------------------------- /Puppet/classExample.txt: -------------------------------------------------------------------------------- 1 | # A class with no parameters 2 | 3 | 4 | class puppet_user{ 5 | user { 'puppet_user': 6 | ensure => present, 7 | uid => 1001, 8 | home => '/home/puppet_user' 9 | } 10 | } 11 | node default{ 12 | include puppet_user 13 | } 14 | 15 | 16 | # A class with parameters 17 | 18 | 19 | class puppet_user($username){ 20 | user { $username: 21 | ensure => present, 22 | uid => 1001, 23 | home => '/home/$username' 24 | } 25 | } 26 | #invoke the class 27 | node default{ 28 | class { puppet_user: 29 | username => 'puppet_user', 30 | } 31 | } 32 | 33 | # Class example with if condition and multiple parameters 34 | class user($username, $test) { 35 | 36 | user { $username: 37 | ensure => present, 38 | managehome => true, 39 | } 40 | if $test =='testing' { 41 | file { '/tmp/2.txt': 42 | content => $test, 43 | } 44 | } 45 | else 46 | { 47 | file { '/tmp/3.txt': 48 | content => $test, 49 | } 50 | 51 | } 52 | } 53 | 54 | node default { 55 | class { 'user': 56 | username => 'raman', 57 | test => 'notesting' , 58 | } 59 | 60 | } 61 | 62 | 63 | -------------------------------------------------------------------------------- /Puppet/puppetInstallation: -------------------------------------------------------------------------------- 1 | Puppet 2 | --->it is a configuration management tool available as open-source and Enterprise version.It runs on Unix like systems and Windows Systems. 3 | --->Puppet is produced by Puppet labs founded by Luke Kanies in 2005. 4 | --->It is written in Ruby and released as free software under GNU GENEREAL PUBLIC LICENSE until version 2.7.0 and Apache License after that. 5 | --->Puppet is designed to manage the configuration of Unix-like and Microsoft windows. 6 | 7 | How Puppet works 8 | The information is stored in files called "Puppet manifests" with extension of ".pp".Puppet discovers the system information via a utility called Facter and compiles the puppet manifests into a specific catalog containing resources and resource dependencies which are applied against the target system. Any action taken by puupet are then reported. 9 | 10 | Puppet Master 11 | it is the service runs on the main server which is used to manage the entire clients to deploy, configure and maintains the infrastructures. 12 | Puppet Agent 13 | Puppet agent is a service runs on client which sends the request the catalog to puppet master and applies it by checking each resource the catalog describes. If it finds that any resource is not in the desired state, it makes the changes necessary to correct them.After applying the catalog, the agen submits a report Puppet Master. 14 | Catalog 15 | it is a document that describes the desired state for one specific server. It lists all the resources that need to be managed, as well as dependencies between them. 16 | 17 | Puppet agent nodes and Puppet master communicates via HTTPS with client verification.The Puppet master provides an HTTP interface with various end points are available. When requesting or submitting anything to master, the agent makes HTTPS request to one of the end points. 18 | 19 | Manifests 20 | are the files with extension ".pp" where we declare all resources to be checked or to be changed. Resources may be files,packages etc 21 | 22 | Resources Types 23 | A type of package or service or file or user or mount 24 | 25 | Syntax: 26 | type{ 'title': 27 | argument => value, 28 | otherarg => value 29 | } 30 | 31 | node default { 32 | $mypackages = [ 'apache2', 'sudo', 'screen' ] 33 | 34 | package { $mypackages: ensure => 'installed' } 35 | } 36 | 37 | node default { 38 | 39 | # creating the directory called test 40 | file{ '/tmp/test': 41 | ensure => 'directory' 42 | } 43 | 44 | } 45 | 46 | 47 | node default { 48 | 49 | # creating the directory called test 50 | file{ '/tmp/test1': 51 | ensure => 'directory', 52 | owner => 'root', 53 | group => 'root', 54 | mode => '0777', 55 | } 56 | 57 | } 58 | 59 | node default { 60 | 61 | # remove the given file 62 | tidy { '/tmp/3.txt': } 63 | } 64 | 65 | eg: 66 | Ex1 Verify nginx is installed 67 | 68 | package{ 'nginx': 69 | ensure => present, 70 | } 71 | Ex2 Create a file /tmp/file1.txt 72 | 73 | file{'file1': 74 | path=>'/tmp/file1.txt' 75 | } 76 | Ex3 Start a service 77 | 78 | service{ 'httpd': 79 | ensure=>running, 80 | enable=>true 81 | } 82 | ---- 83 | 84 | Classes 85 | classes are the groups of different resources. 86 | 87 | class directories { 88 | 89 | # create a directory 90 | file { '/etc/site-conf': 91 | ensure => 'directory', 92 | } 93 | 94 | # a fuller example, including permissions and ownership 95 | file { '/var/log/admin-app-log': 96 | ensure => 'directory', 97 | owner => 'root', 98 | group => 'wheel', 99 | mode => '0750', 100 | } 101 | 102 | # this example creates a file 103 | file { '/etc/site-conf/': 104 | ensure => 'present', 105 | } 106 | } 107 | Resource type Reference 108 | puppet describe file 109 | puppet describe --list 110 | 111 | 112 | 113 | ------------------PUPPET INSTALLATION ON UBUNTU------------ 114 | 115 | Installing Puppet Master 116 | Step 1: Run the following commands for installing Puppet Master 117 | $ sudo apt-get update 118 | $ sudo apt-get install wget 119 | $ wget https://apt.puppetlabs.com/puppet-release-bionic.deb 120 | $ sudo dpkg -i puppet-release-bionic.deb 121 | $ sudo apt-get update 122 | 123 | $ sudo apt-get install puppet-master 124 | $ sudo systemctl status puppet-master.service 125 | 126 | Add the following lines in the puppet-master configuration file 127 | Next open port 8140 on the Puppet Master’s firewall 128 | $ sudo nano /etc/default/puppet-master 129 | JAVA_ARGS="-Xms512m Xmx512m" 130 | $ sudo systemctl restart puppet-master 131 | $ sudo ufw allow 8140/tcp 132 | 133 | Installing Puppet Agent 134 | 135 | Step 2: Run the following commands for installing Puppet Agent 136 | 137 | $ sudo apt-get update 138 | $ sudo apt-get install wget 139 | $ wget https://apt.puppetlabs.com/puppet-release-bionic.deb 140 | $ sudo dpkg -i puppet-release-bionic.deb 141 | $ sudo apt-get install puppet 142 | $ sudo nano /etc/hosts 143 | add ip address of the master 144 | $ sudo systemctl start puppet 145 | $ sudo systemctl enable puppet 146 | 147 | 148 | Step 3: Make changes to the hosts file which exists in /etc/hosts. And add the Puppet 149 | Master IP address along with the name “puppet” 150 | 151 | $ sudo nano /etc/hosts 152 | 153 | Step 4: Create the following directory path: 154 | 155 | $ sudo mkdir -p /etc/puppet/code/environments/production/manifests 156 | 157 | 158 | Configuring Puppet Slave 159 | 160 | Step 1: Add the entry for Puppet Master in /etc/hosts 161 | 162 | Step 2: Finally start the Puppet agent by using the following command. Also, enable the 163 | service, so that it starts when the computer starts 164 | 165 | $ sudo systemctl start puppet 166 | $ sudo systemctl enable puppet 167 | 168 | 169 | On Master 170 | 171 | $ sudo puppet cert list 172 | 173 | Step 2: Finally, sign the listed certificate using the following command: 174 | $ sudo puppet cert sign --all 175 | 176 | On master machine create /etc/puppet/code/environments/production/manifests/site.pp 177 | 178 | node default{ 179 | 180 | package {'nginx': 181 | ensure => installed, 182 | } 183 | 184 | file { '/tmp/status.txt': 185 | 186 | content => 'installed', 187 | mode => '0644', 188 | } 189 | } 190 | 191 | Goto client machine and run the command 192 | puppet agent --test 193 | 194 | -----------Find the resource types 195 | puppet resource --types 196 | puppet resource file /tmp/1.txt 197 | 198 | ----To set the running interval on client machine 199 | By default the running interval on the client is 30 mins. but we can change it by changing /etc/puppet/puppet.conf 200 | [agent] 201 | server=puppet 202 | runinterval=1m 203 | 204 | Classes 205 | 206 | in site.pp 207 | #---Create testuser 208 | class addusers 209 | { 210 | user { 'testuser': 211 | ensure=>present 212 | } 213 | } 214 | node default { 215 | class { addusers:} 216 | } 217 | -------------- 218 | Facts 219 | node default 220 | 221 | { 222 | $message=$facts['os']['family'] ? { 223 | 'RedHat'=> 'running redhat', 224 | default=> 'running somewhere', 225 | } 226 | notify { $message: } 227 | } 228 | 229 | ------ 230 | mkdir -p /etc/facter/facts.d 231 | cd /etc/facter/facts.d/ 232 | create file customfact.txt and add content 233 | customfact="hello world" 234 | 235 | Now run following command 236 | --------- 237 | Templates 238 | #Create file test.epp 239 | 240 | <% | String $text, Boolean $bool | -%> 241 | Text value <%= $text %> 242 | 243 | <% if $bool { -%> 244 | Bool has true value 245 | <% } -%> 246 | 247 | To validate the syntax 248 | puppet epp validate test.epp 249 | To run the template 250 | puppet epp render test.epp --values '{ text => "Hello world", bool => true }' 251 | 252 | facter customfact 253 | 254 | Modules:- 255 | it is the collection of manifest with data( such as facts files and templates) and they have a specific directory structure. Modules are useful to organize puppet code because they allow you to split the code in multiple manifests.It is considered to be best practice to use modules to organize manifests. 256 | -------------------------------------------------------------------------------- /Puppet_Intro_Installation.txt: -------------------------------------------------------------------------------- 1 | Puppet 2 | --->it is a configuration management tool available as open-source and Enterprise version.It runs on Unix like systems and Windows Systems. 3 | --->Puppet is produced by Puppet labs founded by Luke Kanies in 2005. 4 | --->It is written in Ruby and released as free software under GNU GENEREAL PUBLIC LICENSE until version 2.7.0 and Apache License after that. 5 | --->Puppet is designed to manage the configuration of Unix-like and Microsoft windows. 6 | 7 | How Puppet works 8 | The information is stored in files called "Puppet manifests" with extension of ".pp".Puppet discovers the system information via a utility called Facter and compiles the puppet manifests into a specific catalog containing resources and resource dependencies which are applied against the target system. Any action taken by puupet are then reported. 9 | 10 | Puppet Master 11 | it is the service runs on the main server which is used to manage the entire clients to deploy, configure and maintains the infrastructures. 12 | Puppet Agent 13 | Puppet agent is a service runs on client which sends the request the catalog to puppet master and applies it by checking each resource the catalog describes. If it finds that any resource is not in the desired state, it makes the changes necessary to correct them.After applying the catalog, the agen submits a report Puppet Master. 14 | Catalog 15 | it is a document that describes the desired state for one specific server. It lists all the resources that need to be managed, as well as dependencies between them. 16 | 17 | Puppet agent nodes and Puppet master communicates via HTTPS with client verification.The Puppet master provides an HTTP interface with various end points are available. When requesting or submitting anything to master, the agent makes HTTPS request to one of the end points. 18 | 19 | Manifests 20 | are the files with extension ".pp" where we declare all resources to be checked or to be changed. Resources may be files,packages etc 21 | 22 | Resources Types 23 | A type of package or service or file or user or mount 24 | 25 | Syntax: 26 | type{ 'title': 27 | argument => value, 28 | otherarg => value 29 | } 30 | 31 | node default { 32 | $mypackages = [ 'apache2', 'sudo', 'screen' ] 33 | 34 | package { $mypackages: ensure => 'installed' } 35 | } 36 | 37 | node default { 38 | 39 | # creating the directory called test 40 | file{ '/tmp/test': 41 | ensure => 'directory' 42 | } 43 | 44 | } 45 | 46 | 47 | node default { 48 | 49 | # creating the directory called test 50 | file{ '/tmp/test1': 51 | ensure => 'directory', 52 | owner => 'root', 53 | group => 'root', 54 | mode => '0777', 55 | } 56 | 57 | } 58 | 59 | node default { 60 | 61 | # remove the given file 62 | tidy { '/tmp/3.txt': } 63 | } 64 | 65 | eg: 66 | Ex1 Verify nginx is installed 67 | 68 | package{ 'nginx': 69 | ensure => present, 70 | } 71 | Ex2 Create a file /tmp/file1.txt 72 | 73 | file{'file1': 74 | path=>'/tmp/file1.txt' 75 | } 76 | Ex3 Start a service 77 | 78 | service{ 'httpd': 79 | ensure=>running, 80 | enable=>true 81 | } 82 | ---- 83 | 84 | Classes 85 | classes are the groups of different resources. 86 | 87 | class directories { 88 | 89 | # create a directory 90 | file { '/etc/site-conf': 91 | ensure => 'directory', 92 | } 93 | 94 | # a fuller example, including permissions and ownership 95 | file { '/var/log/admin-app-log': 96 | ensure => 'directory', 97 | owner => 'root', 98 | group => 'wheel', 99 | mode => '0750', 100 | } 101 | 102 | # this example creates a file 103 | file { '/etc/site-conf/': 104 | ensure => 'present', 105 | } 106 | } 107 | Resource type Reference 108 | puppet describe file 109 | puppet describe --list 110 | 111 | 112 | 113 | ------------------PUPPET INSTALLATION ON UBUNTU------------ 114 | 115 | Installing Puppet Master 116 | Step 1: Run the following commands for installing Puppet Master 117 | $ sudo apt-get update 118 | $ sudo apt-get install wget 119 | $ wget https://apt.puppetlabs.com/puppet-release-bionic.deb 120 | $ sudo dpkg -i puppet-release-bionic.deb 121 | $ sudo apt-get update 122 | 123 | $ sudo apt-get install puppet-master 124 | $ sudo systemctl status puppet-master.service 125 | 126 | Add the following lines in the puppet-master configuration file 127 | Next open port 8140 on the Puppet Master’s firewall 128 | $ sudo nano /etc/default/puppet-master 129 | JAVA_ARGS="-Xms512m Xmx512m" 130 | $ sudo systemctl restart puppet-master 131 | $ sudo ufw allow 8140/tcp 132 | 133 | Installing Puppet Agent 134 | 135 | Step 2: Run the following commands for installing Puppet Agent 136 | 137 | $ sudo apt-get update 138 | $ sudo apt-get install wget 139 | $ wget https://apt.puppetlabs.com/puppet-release-bionic.deb 140 | $ sudo dpkg -i puppet-release-bionic.deb 141 | $ sudo apt-get install puppet 142 | $ sudo nano /etc/hosts 143 | add ip address of the master 144 | $ sudo systemctl start puppet 145 | $ sudo systemctl enable puppet 146 | 147 | 148 | Step 3: Make changes to the hosts file which exists in /etc/hosts. And add the Puppet 149 | Master IP address along with the name “puppet” 150 | 151 | $ sudo nano /etc/hosts 152 | 153 | Step 4: Create the following directory path: 154 | 155 | $ sudo mkdir -p /etc/puppet/code/environments/production/manifests 156 | 157 | 158 | Configuring Puppet Slave 159 | 160 | Step 1: Add the entry for Puppet Master in /etc/hosts 161 | 162 | Step 2: Finally start the Puppet agent by using the following command. Also, enable the 163 | service, so that it starts when the computer starts 164 | 165 | $ sudo systemctl start puppet 166 | $ sudo systemctl enable puppet 167 | 168 | 169 | On Master 170 | 171 | $ sudo puppet cert list 172 | 173 | Step 2: Finally, sign the listed certificate using the following command: 174 | $ sudo puppet cert sign --all 175 | 176 | On master machine create /etc/puppet/code/environments/production/manifests/site.pp 177 | 178 | node default{ 179 | 180 | package {'nginx': 181 | ensure => installed, 182 | } 183 | 184 | file { '/tmp/status.txt': 185 | 186 | content => 'installed', 187 | mode => '0644', 188 | } 189 | } 190 | 191 | Goto client machine and run the command 192 | puppet agent --test 193 | 194 | -----------Find the resource types 195 | puppet resource --types 196 | puppet resource file /tmp/1.txt 197 | 198 | ----To set the running interval on client machine 199 | By default the running interval on the client is 30 mins. but we can change it by changing /etc/puppet/puppet.conf 200 | [agent] 201 | server=puppet 202 | runinterval=1m 203 | 204 | Classes 205 | 206 | in site.pp 207 | #---Create testuser 208 | class addusers 209 | { 210 | user { 'testuser': 211 | ensure=>present 212 | } 213 | } 214 | node default { 215 | class { addusers:} 216 | } 217 | -------------- 218 | Facts 219 | node default 220 | 221 | { 222 | $message=$facts['os']['family'] ? { 223 | 'RedHat'=> 'running redhat', 224 | default=> 'running somewhere', 225 | } 226 | notify { $message: } 227 | } 228 | 229 | ------ 230 | mkdir -p /etc/facter/facts.d 231 | cd /etc/facter/facts.d/ 232 | create file customfact.txt and add content 233 | customfact="hello world" 234 | 235 | Now run following command 236 | --------- 237 | Templates 238 | #Create file test.epp 239 | 240 | <% | String $text, Boolean $bool | -%> 241 | Text value <%= $text %> 242 | 243 | <% if $bool { -%> 244 | Bool has true value 245 | <% } -%> 246 | 247 | To validate the syntax 248 | puppet epp validate test.epp 249 | To run the template 250 | puppet epp render test.epp --values '{ text => "Hello world", bool => true }' 251 | 252 | facter customfact 253 | 254 | Modules:- 255 | it is the collection of manifest with data( such as facts files and templates) and they have a specific directory structure. Modules are useful to organize puppet code because they allow you to split the code in multiple manifests.It is considered to be best practice to use modules to organize manifests. 256 | -------------------------------------------------------------------------------- /SaltStack_Notes.txt: -------------------------------------------------------------------------------- 1 | -----------------SALT STACK--------- 2 | it is a configuration management tools 3 | Python based, open source remote execution tool 4 | remote execution means run commands on various machines in parallel with flexible targeting system 5 | 6 | It estalishes the client server model quickly,easily and securely with in a given policy 7 | 8 | --------SALT STACK Components----------- 9 | 10 | Master 11 | Minion --Client (OS (Redhat,Ubuntu) 12 | Grains--Static information about minion (OS,memory,serial number etc) 13 | Execution Modules--Adhoc commands executed from the command line to one or more target minions 14 | 15 | -----Comparison 16 | Scalability- Puppet,SaltStack,Ansible,Chef these are highly scaleable 17 | Easeof setup--Puppet,SaltStack,Chef(Master and Agent Component) but in ansible it is on Master-Node 18 | Availbility:- Puppet( it has multi master archietcure) 19 | SaltStack( it has multimaster) 20 | Ansible- it runs with a single active node called Primary instance, if primary instance is down then there 21 | is Secondary instance to take its place 22 | Chef:- Primary and Backup Server 23 | Management:- Puppet:- Puppet DSL 24 | SaltStack- Easy to learn 25 | Ansible- Yaml syntax to learn Ansible 26 | Chef:- Little diffcult as compare to other CMS 27 | InterOperatability:- 28 | Puppet:-Linux,Windows 29 | SaltStack:- Master will run on the Linux but client can be windows/Linux 30 | Ansible:- Supports windows but Ansible server has to be linux 31 | Chef:- Primary server will be Linux and Client can be windows. 32 | Configuration Language:- 33 | Puppet:- DSL 34 | SaltStack:- YAML 35 | Ansile:- YAML 36 | Chef:- DSL(Ruby) 37 | 38 | 39 | Highest Score is given to Ansible 40 | Install Salt Stack 41 | 42 | Master:- 43 | 44 | apt install python-pip 45 | pip install salt 46 | To Verify 47 | 48 | salt --version 49 | 50 | Minion:- 51 | 52 | apt install python-pip 53 | pip install salt 54 | 55 | salt --version 56 | 57 | write following code in /etc/salt/minion file 58 | master: <> 59 | id: min1 60 | 61 | 62 | -----On Master machine 63 | To run master component 64 | salt-master -d 65 | salt-master -l debug 66 | Create a new window to run salt stack adhoc commands 67 | salt-key -L # this is will show the id of minion under unaccepted key 68 | salt-key -a min1 # to accept the key 69 | 70 | salt 'min*' test.ping 71 | salt 'min*' cmd.run 'pwd' 72 | salt 'min*' file.touch '/tmp/1.txt' 73 | salt 'min*' file.write '/tmp/1.txt' 'salt stack' 74 | salt 'min1' grains.items 75 | 76 | 77 | 78 | 79 | ---On Minion 80 | salt-minion -l debug 81 | 82 | 83 | 84 | -------------------------------------------------------------------------------- /Splunk.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/Splunk.pptx -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.2' 2 | 3 | services: 4 | 5 | elasticsearch: 6 | image: docker.elastic.co/elasticsearch/elasticsearch:6.5.4 7 | container_name: elasticsearch 8 | environment: 9 | - bootstrap.memory_lock=true 10 | - "ES_JAVA_OPTS=-Xms512m -Xmx512m" 11 | ulimits: 12 | memlock: 13 | soft: -1 14 | hard: -1 15 | volumes: 16 | - esdata1:/usr/share/elasticsearch/data 17 | ports: 18 | - 9200:9200 19 | 20 | kibana: 21 | image: docker.elastic.co/kibana/kibana:6.5.4 22 | container_name: kibana 23 | environment: 24 | ELASTICSEARCH_URL: "http://elasticsearch:9200" 25 | ports: 26 | - 5601:5601 27 | depends_on: 28 | - elasticsearch 29 | 30 | volumes: 31 | esdata1: 32 | driver: local -------------------------------------------------------------------------------- /examplesplunk.txt: -------------------------------------------------------------------------------- 1 | index="history"| stats count by student,maths,physics,chemistry,computer | fields - count | addtotals | addcoltotals -------------------------------------------------------------------------------- /javaprogs/Testing/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | com.cognixia 4 | Testing 5 | 0.0.1-SNAPSHOT 6 | 7 | 8 | junit 9 | junit 10 | 4.12 11 | test 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /javaprogs/Testing/src/test/java/com/cognixia/AllTests.java: -------------------------------------------------------------------------------- 1 | package com.cognixia; 2 | 3 | import org.junit.runner.RunWith; 4 | import org.junit.runners.Suite; 5 | import org.junit.runners.Suite.SuiteClasses; 6 | 7 | @RunWith(Suite.class) 8 | @SuiteClasses({ unitest.class, unitest2.class }) 9 | public class AllTests { 10 | 11 | } 12 | -------------------------------------------------------------------------------- /javaprogs/Testing/src/test/java/com/cognixia/unitest.java: -------------------------------------------------------------------------------- 1 | package com.cognixia; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.After; 6 | import org.junit.Before; 7 | import org.junit.Test; 8 | 9 | public class unitest { 10 | 11 | 12 | @Test 13 | public void test() { 14 | int actual=1; 15 | int exepected=1; 16 | 17 | assertEquals(actual,exepected); 18 | } 19 | 20 | @Test 21 | public void test1() { 22 | int actual=1; 23 | int exepected=2; 24 | 25 | assertEquals(actual,exepected); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /javaprogs/Testing/src/test/java/com/cognixia/unitest2.java: -------------------------------------------------------------------------------- 1 | package com.cognixia; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.After; 6 | import org.junit.Before; 7 | import org.junit.Test; 8 | 9 | public class unitest2 { 10 | 11 | 12 | 13 | @Test 14 | public void test() { 15 | int actual=1; 16 | int exepected=1; 17 | 18 | assertEquals(actual,exepected); 19 | } 20 | 21 | @Test 22 | public void test1() { 23 | int actual=1; 24 | int exepected=2; 25 | 26 | assertEquals(actual,exepected); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /javaprogs/Testing/target/classes/META-INF/MANIFEST.MF: -------------------------------------------------------------------------------- 1 | Manifest-Version: 1.0 2 | Built-By: raman 3 | Build-Jdk: 11.0.10 4 | Created-By: Maven Integration for Eclipse 5 | 6 | -------------------------------------------------------------------------------- /javaprogs/Testing/target/classes/META-INF/maven/com.cognixia/Testing/pom.properties: -------------------------------------------------------------------------------- 1 | #Generated by Maven Integration for Eclipse 2 | #Sat Mar 06 12:14:34 IST 2021 3 | m2e.projectLocation=E\:\\javaprogs\\Testing 4 | m2e.projectName=Testing 5 | groupId=com.cognixia 6 | artifactId=Testing 7 | version=0.0.1-SNAPSHOT 8 | -------------------------------------------------------------------------------- /javaprogs/Testing/target/classes/META-INF/maven/com.cognixia/Testing/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | com.cognixia 4 | Testing 5 | 0.0.1-SNAPSHOT 6 | 7 | 8 | junit 9 | junit 10 | 4.12 11 | test 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /javaprogs/Testing/target/test-classes/com/cognixia/AllTests.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/javaprogs/Testing/target/test-classes/com/cognixia/AllTests.class -------------------------------------------------------------------------------- /javaprogs/Testing/target/test-classes/com/cognixia/unitest.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/javaprogs/Testing/target/test-classes/com/cognixia/unitest.class -------------------------------------------------------------------------------- /javaprogs/Testing/target/test-classes/com/cognixia/unitest2.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/javaprogs/Testing/target/test-classes/com/cognixia/unitest2.class -------------------------------------------------------------------------------- /javaprogs/mockito-example/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | com.raman 4 | mockito-example 5 | 0.0.1-SNAPSHOT 6 | 7 | 8 | junit 9 | junit 10 | 4.12 11 | test 12 | 13 | 14 | org.mockito 15 | mockito-all 16 | 1.10.19 17 | test 18 | 19 | 20 | -------------------------------------------------------------------------------- /javaprogs/mockito-example/src/main/java/com/raman/business/TodoBusinessImpl.java: -------------------------------------------------------------------------------- 1 | package com.raman.business; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | 6 | import com.raman.data.api.TodoService; 7 | 8 | public class TodoBusinessImpl { 9 | private TodoService todoService; 10 | 11 | TodoBusinessImpl(TodoService todoService) { 12 | this.todoService = todoService; 13 | } 14 | 15 | public List retrieveTodosRelatedToSpring(String user) { 16 | List filteredTodos = new ArrayList(); 17 | List allTodos = todoService.retrieveTodos(user); 18 | for (String todo : allTodos) { 19 | if (todo.contains("Spring")) { 20 | filteredTodos.add(todo); 21 | } 22 | } 23 | return filteredTodos; 24 | } 25 | } -------------------------------------------------------------------------------- /javaprogs/mockito-example/src/main/java/com/raman/data/api/TodoService.java: -------------------------------------------------------------------------------- 1 | package com.raman.data.api; 2 | 3 | import java.util.List; 4 | 5 | public interface TodoService { 6 | public List retrieveTodos(String user); 7 | } 8 | -------------------------------------------------------------------------------- /javaprogs/mockito-example/src/test/java/com/raman/FirstTest.java: -------------------------------------------------------------------------------- 1 | package com.raman; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.Test; 6 | 7 | public class FirstTest { 8 | 9 | @Test 10 | public void test() { 11 | assertTrue(true); 12 | } 13 | 14 | } 15 | -------------------------------------------------------------------------------- /javaprogs/mockito-example/src/test/java/com/raman/business/TodoBusinessImplStubTest.java: -------------------------------------------------------------------------------- 1 | package com.raman.business; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.mockito.Mockito.mock; 5 | import static org.mockito.Mockito.when; 6 | 7 | import java.util.Arrays; 8 | import java.util.List; 9 | 10 | import org.junit.Test; 11 | 12 | import com.raman.data.api.TodoService; 13 | import com.raman.data.stub.TodoServiceStub; 14 | 15 | public class TodoBusinessImplStubTest { 16 | 17 | @Test 18 | public void test() { 19 | TodoService todoService = new TodoServiceStub(); 20 | TodoBusinessImpl todoBusinessImpl = new TodoBusinessImpl(todoService); 21 | List todos = todoBusinessImpl 22 | .retrieveTodosRelatedToSpring("dummy"); 23 | assertEquals(2, todos.size()); 24 | } 25 | @Test 26 | public void usingMockito() { 27 | TodoService todoService = mock(TodoService.class); 28 | List allTodos = Arrays.asList("Learn Spring MVC", 29 | "Learn Spring", "Learn to Dance"); 30 | when(todoService.retrieveTodos("dummy")).thenReturn(allTodos); 31 | TodoBusinessImpl todoBusinessImpl = new TodoBusinessImpl(todoService); 32 | List todos = todoBusinessImpl 33 | .retrieveTodosRelatedToSpring("dummy"); 34 | assertEquals(2, todos.size()); 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /javaprogs/mockito-example/src/test/java/com/raman/data/stub/TodoServiceStub.java: -------------------------------------------------------------------------------- 1 | package com.raman.data.stub; 2 | 3 | import java.util.Arrays; 4 | import java.util.List; 5 | 6 | import com.raman.data.api.TodoService; 7 | 8 | public class TodoServiceStub implements TodoService { 9 | public List retrieveTodos(String user) { 10 | return Arrays.asList("Learn Spring MVC", "Learn Spring", 11 | "Learn to Dance"); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /javaprogs/mockito-example/target/classes/META-INF/MANIFEST.MF: -------------------------------------------------------------------------------- 1 | Manifest-Version: 1.0 2 | Built-By: raman 3 | Build-Jdk: 11.0.10 4 | Created-By: Maven Integration for Eclipse 5 | 6 | -------------------------------------------------------------------------------- /javaprogs/mockito-example/target/classes/META-INF/maven/com.raman/mockito-example/pom.properties: -------------------------------------------------------------------------------- 1 | #Generated by Maven Integration for Eclipse 2 | #Fri Mar 05 21:01:59 IST 2021 3 | m2e.projectLocation=E\:\\javaprogs\\mockito-example 4 | m2e.projectName=mockito-example 5 | groupId=com.raman 6 | artifactId=mockito-example 7 | version=0.0.1-SNAPSHOT 8 | -------------------------------------------------------------------------------- /javaprogs/mockito-example/target/classes/META-INF/maven/com.raman/mockito-example/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | com.raman 4 | mockito-example 5 | 0.0.1-SNAPSHOT 6 | 7 | 8 | junit 9 | junit 10 | 4.12 11 | test 12 | 13 | 14 | org.mockito 15 | mockito-all 16 | 1.10.19 17 | test 18 | 19 | 20 | -------------------------------------------------------------------------------- /javaprogs/mockito-example/target/classes/com/raman/business/TodoBusinessImpl.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/javaprogs/mockito-example/target/classes/com/raman/business/TodoBusinessImpl.class -------------------------------------------------------------------------------- /javaprogs/mockito-example/target/classes/com/raman/data/api/TodoService.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/javaprogs/mockito-example/target/classes/com/raman/data/api/TodoService.class -------------------------------------------------------------------------------- /javaprogs/mockito-example/target/test-classes/com/raman/FirstTest.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/javaprogs/mockito-example/target/test-classes/com/raman/FirstTest.class -------------------------------------------------------------------------------- /javaprogs/mockito-example/target/test-classes/com/raman/business/TodoBusinessImplStubTest.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/javaprogs/mockito-example/target/test-classes/com/raman/business/TodoBusinessImplStubTest.class -------------------------------------------------------------------------------- /javaprogs/mockito-example/target/test-classes/com/raman/data/stub/TodoServiceStub.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/javaprogs/mockito-example/target/test-classes/com/raman/data/stub/TodoServiceStub.class -------------------------------------------------------------------------------- /project.txt: -------------------------------------------------------------------------------- 1 | A Banking System has a website which is developed in JAVA Spring Boot and hosted on TomCat Server with below configuration 2 | 3 | 1. Application Name :- Banking Mortage App 4 | 2. Front End :- Java Spring Boot 5 | 3. Back End :- MySql 6 | 4. Process used :- Devops 7 | 5. Devops Tools used:- Git/Github,Maven,Selenium,Docker,Kubernetes,Ansible,Jenkins,Nagios 8 | 9 | Project Description 10 | 11 | Application Overview 12 | Mortgage APP is used either by purchasers of real property to raise funds to buy real estate, or alternatively by existing property owners to raise funds for any purpose while putting a lien on the property being mortgaged. The loan is "secured" on the borrower's property through a process known as mortgage origination. 13 | 14 | DevOps Automation Process 15 | 16 | The DevOps process is used to maintain flow is all about agility and automation of the applicaiton life cycle. Each phase in the process focuses on closing the loop between development and operations and driving production through continuous development, integration, testing, monitoring and feedback, delivery, and deployment. 17 | 18 | Continuous development (Git and GitHub) 19 | Git and Github is configured with Eclipse to each member of developement team so that they can maintiain the version control of source code and once the code is in the GitHub, then it is considered to be the latest copy of the code. GitHub repository is also used for creating different branches for different developers. 20 | 21 | Continuous Integration (Mave,docker,selenium,Jenkins,Ansible) 22 | In this practice the Github applicaiton is webhooked with Jenkins and a CI pipeline is created which is in the form of Jenkins jobs, Application source code is automatically build with Maven and a docker container with the help of Dockerfile to configure test environment(install jdk8 and configure selenium script and ),as there are different testing environment like ubuntu,RedHat,Kali OS so these environments are configured with Ansible scripts so and the applicaiton's publish code is copied to Test webserver (docker container). 23 | 24 | Continuous Deployment(Jenkins,Kubernetes) 25 | Once the testing is completed successfully ( checked with some parameters) then container images get created and kept in docker registry. These images are configured and running on webservers and dbservers with a Replication and Deployment policy with the help of Kubernetes. 26 | 27 | Continuous Monitoring(Nagios) 28 | All the host servers are configured and applications are monitored with Nagios Continous Montioring and Operation team is notified whenever there is any issue either in infrastrcuter or on applicaiton point of view. 29 | 30 | -------------------------------------------------------------------------------- /splunk installation.txt: -------------------------------------------------------------------------------- 1 | -----Splunk Installation on Centos 2 | 1. wget -O splunk-8.1.2-545206cc9f70-Linux-x86_64.tgz 'https://www.splunk.com/bin/splunk/DownloadActivityServlet?architecture=x86_64&platform=linux&version=8.1.2&product=splunk&filename=splunk-8.1.2-545206cc9f70-Linux-x86_64.tgz&wget=true' 3 | 2. groupadd splunk 4 | 3. useradd -d /opt/splunk -m -g splunk splunk 5 | 4. tar -xvzf splunk-8.1.2-545206cc9f70-Linux-x86_64.tgz -C /opt 6 | 5. chown -R splunk: /opt/splunk 7 | 6. su - splunk 8 | 7. cd /opt/splunk/bin 9 | 8. ./splunk start --accept-license 10 | 9. localhost:8000 to access the splunk -------------------------------------------------------------------------------- /test: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tutorialdata.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onlineTrainingguy/DevOpsNotes/a04ab0c9bd9b27beadbae58a87352c2e3faa284d/tutorialdata.zip --------------------------------------------------------------------------------