├── .gitignore
├── Ansible Tutorial.pdf
├── Vagrantfile
├── keys
├── key
└── key.pub
├── lab-01
└── install-ansible.sh
├── lab-02
├── ad-hoc-commands-on-inventory.sh
├── disable-host-key-checking.sh
└── hosts
├── lab-03
├── index.html
└── modules-as-ad-hoc-commands.sh
├── lab-04
├── install-nginx-and-jdk.yml
└── install-nginx.yml
├── lab-05
├── environments
│ ├── dev.yml
│ ├── prod.yml
│ └── test.yml
├── roles
│ ├── deploy_static_content
│ │ ├── files
│ │ │ └── static_content.zip
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nginx
│ │ ├── files
│ │ │ └── default.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ └── ntp
│ │ └── tasks
│ │ └── main.yml
└── site.yml
├── lab-06
├── environments
│ ├── dev.yml
│ ├── prod.yml
│ └── test.yml
├── roles
│ ├── deploy_application
│ │ ├── files
│ │ │ ├── greeting.service
│ │ │ └── gs-rest-service-0.1.0.jar
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── deploy_static_content
│ │ ├── files
│ │ │ └── static_content.zip
│ │ └── tasks
│ │ │ └── main.yml
│ ├── java
│ │ └── tasks
│ │ │ └── main.yml
│ └── nginx
│ │ ├── files
│ │ └── default.conf
│ │ ├── handlers
│ │ └── main.yml
│ │ └── tasks
│ │ └── main.yml
└── site.yml
├── lab-07
├── README.md
├── Vagrantfile
├── ansible.cfg
├── bin
│ ├── aws-create-image.sh
│ ├── aws-init.sh
│ └── aws-update.sh
├── packer.json
├── packer
│ ├── ami.json
│ └── vagrant.json
├── provision
│ ├── ami-base.yml
│ ├── ami.yml
│ ├── aws-init.yml
│ ├── aws-update.yml
│ ├── environments
│ │ ├── aws.yml
│ │ ├── dev.yml
│ │ ├── prod.yml
│ │ └── test.yml
│ ├── roles
│ │ ├── deploy_application
│ │ │ ├── files
│ │ │ │ ├── greeting.service
│ │ │ │ └── gs-rest-service-0.1.0.jar
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── deploy_static_content
│ │ │ ├── files
│ │ │ │ └── static_content.zip
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── ec2-auto-scale
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── ec2-load-balancer
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── firewall
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── jdk
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── mariadb
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── nginx
│ │ │ ├── files
│ │ │ │ ├── default.conf
│ │ │ │ └── nginx.conf
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ └── selinux
│ │ │ └── tasks
│ │ │ └── main.yml
│ └── site.yml
├── scripts
│ ├── cleanup.sh
│ ├── prepare.sh
│ └── update.sh
└── test
│ ├── ami.yml
│ ├── roles
│ ├── app
│ │ └── tasks
│ │ │ └── main.yml
│ └── nginx
│ │ └── tasks
│ │ └── main.yml
│ └── site.yml
└── lab-08
├── roles
├── docker-redis
│ └── tasks
│ │ └── main.yml
└── docker
│ ├── files
│ └── index.html
│ └── tasks
│ └── main.yml
└── site.yml
/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | .DS_Store
3 | site.retry
4 |
--------------------------------------------------------------------------------
/Ansible Tutorial.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maaydin/ansible-tutorial/2bd59f54a247e84188279aaa5291dffe0f712387/Ansible Tutorial.pdf
--------------------------------------------------------------------------------
/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure
5 | # configures the configuration version (we support older styles for
6 | # backwards compatibility). Please don't change it unless you know what
7 | # you're doing.
8 |
9 | Vagrant.configure("2") do |config|
10 | config.vm.define "control" do |control|
11 | control.vm.box = "ubuntu/trusty64"
12 | control.vm.hostname = 'control'
13 | control.vm.box = "ubuntu/trusty64"
14 |
15 | control.vm.network :private_network, ip: "192.168.35.1"
16 |
17 | control.vm.provider :virtualbox do |v|
18 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
19 | v.customize ["modifyvm", :id, "--memory", 512]
20 | v.customize ["modifyvm", :id, "--name", "control"]
21 | end
22 | end
23 |
24 | config.vm.define "web1" do |web1|
25 | web1.vm.box = "ubuntu/trusty64"
26 | web1.vm.hostname = 'web1'
27 | web1.vm.box = "ubuntu/trusty64"
28 |
29 | web1.vm.network :private_network, ip: "192.168.35.101"
30 |
31 | web1.vm.provider :virtualbox do |v|
32 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
33 | v.customize ["modifyvm", :id, "--memory", 256]
34 | v.customize ["modifyvm", :id, "--name", "web1"]
35 | end
36 |
37 | web1.ssh.private_key_path = ["keys/key", "~/.vagrant.d/insecure_private_key"]
38 | web1.ssh.insert_key = false
39 | web1.vm.provision "file", source: "keys/key.pub", destination: "~/.ssh/authorized_keys"
40 | end
41 |
42 | config.vm.define "web2" do |web2|
43 | web2.vm.box = "ubuntu/trusty64"
44 | web2.vm.hostname = 'web2'
45 | web2.vm.box = "ubuntu/trusty64"
46 |
47 | web2.vm.network :private_network, ip: "192.168.35.102"
48 |
49 | web2.vm.provider :virtualbox do |v|
50 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
51 | v.customize ["modifyvm", :id, "--memory", 256]
52 | v.customize ["modifyvm", :id, "--name", "web2"]
53 | end
54 |
55 | web2.ssh.private_key_path = ["keys/key", "~/.vagrant.d/insecure_private_key"]
56 | web2.ssh.insert_key = false
57 | web2.vm.provision "file", source: "keys/key.pub", destination: "~/.ssh/authorized_keys"
58 | end
59 |
60 | config.vm.define "app" do |app|
61 | app.vm.box = "ubuntu/trusty64"
62 | app.vm.hostname = 'app'
63 | app.vm.box = "ubuntu/trusty64"
64 |
65 | app.vm.network :private_network, ip: "192.168.35.103"
66 |
67 | app.vm.provider :virtualbox do |v|
68 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
69 | v.customize ["modifyvm", :id, "--memory", 256]
70 | v.customize ["modifyvm", :id, "--name", "app"]
71 | end
72 |
73 | app.ssh.private_key_path = ["keys/key", "~/.vagrant.d/insecure_private_key"]
74 | app.ssh.insert_key = false
75 | app.vm.provision "file", source: "keys/key.pub", destination: "~/.ssh/authorized_keys"
76 | end
77 |
78 | config.vm.define "db" do |db|
79 | db.vm.box = "ubuntu/trusty64"
80 | db.vm.hostname = 'db'
81 | db.vm.box = "ubuntu/trusty64"
82 |
83 | db.vm.network :private_network, ip: "192.168.35.104"
84 |
85 | db.vm.provider :virtualbox do |v|
86 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
87 | v.customize ["modifyvm", :id, "--memory", 256]
88 | v.customize ["modifyvm", :id, "--name", "db"]
89 | end
90 |
91 | db.ssh.private_key_path = ["keys/key", "~/.vagrant.d/insecure_private_key"]
92 | db.ssh.insert_key = false
93 | db.vm.provision "file", source: "keys/key.pub", destination: "~/.ssh/authorized_keys"
94 | end
95 | end
--------------------------------------------------------------------------------
/keys/key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIJKQIBAAKCAgEA542xEpvO43lCmAL0i52z6FmJ2mjy3V0YE9egjr2ICv2d8a+T
3 | 3XHeMnQlwuuiRPLHyoaTn6PrSYaWEKDFAYdoQIg2W++gQjnxIV/2WUnmNjmho+uq
4 | Xgwv2TMNTHSwxZUewPrTZuDEwYujXIaxQyjUj2aVlsVc6SPc5ktmsMK3Ufa+34pd
5 | MgjNOTAY8Eq9i0C40fiJsquUHlVUE0JmVXpj8v9FiyOlhrBOpl9KZxsi9XXeszIZ
6 | FcsxG4vg3/+3bAur1UkQL39oVVNwn8IyWFZkh+D/ex1YXFCOI0nQ5y897mnItk2S
7 | lo5gv7AJskiSr6EGE1uVuO24CkPs1U1Uv94UIqyhPV3Dde8slf9LUgi8oTxzvoNC
8 | P6vDz6JrzSjqxG8gT/iK/XJeGxFPN1N8cFmlmZW3qhtQWrFqHPJj4XaEoZxvpR6Q
9 | im1HqVpqf2itDeLpum8t4eeJvcobdXVDUcgmW7Eu7w4MOCgQuAk41EeriWEm6ts3
10 | po7477/DFQ+JGb0Mx+USC/LUrJxjmbzQ/+ZsARS7PH9WCrV2OhOcrfz8n0cT6z/t
11 | KaTqoZYS9sC2i31b7Dc+J/esO1580gHjdxIBLQOD/VQ6IACnU8A8SbPPoPtdEBHH
12 | KRGgm7BUDxvsqcjPoqnlu/mqBP3vbvGXc7tgRISMNzVIx6XMeq7u9BWVF8UCAwEA
13 | AQKCAgAcHCNm8utGyi2/bnxphpeUmWoWza7OGMMXhZHEQlYFn4xRDtOQioZC8b6z
14 | zw8/94kxmOzK8Idbwa7uDdZcvSd2zhJrTsfRwl73gkLRzBraclyhHbbs++XDJGN0
15 | 6KWdFuFI5CpNzvOT23PVg7R5RrenQPjf+M2yr9vdSUIKK1k95gp98LonnNHtI12j
16 | ri8cfJrzQozU1+d/VzSBsgniTp1bnom+vnEVzMl/gQnzQmQYEFscvfQ1pRktEuoV
17 | x2FpCHGvztLcMUdlNF/zxQt/ld1WcYYSWBml+1GGDywBJMcL7mOjXf9xr48nNiO3
18 | NQA5uf6W0wN2E8XH2T8jFeQ39qnS4KKBsU1pBRU5WMy6vu4Ap5aejzowHXJeCEa0
19 | ChjohPXfbQmb3akEjWc4VbCOkBqjh1kavqd3Gj/44iFZnNIRv0X3f5VEH9at0X+g
20 | +bGpGjb7Xc7J2p3t/ZU+J228+uiBkhn/40xqbksbL+HWrNMuO7+/mxevK+YYG08D
21 | yB0l7ICZh47DU4ewZqF594KaKnB1/o99dgXAKSFJAnjheN2c6tLWmNmvAR1LAm3+
22 | OIgPEHniuNZTLboLs/iYv9llXxmERrVfE/wQh5IxU+mLNDhYC8LPi7Fqkvzo7mrc
23 | TY3bpwNq5rHl0fhKyET4eYLrD46kQQ5dF7i1HrTWMwVVZpMM8QKCAQEA/K7SZDwy
24 | ssedsUR3a9H6P6GlW71uRW9iyE2986RBuq/zN5zdCeap0aSFRi9MjEA8dwaiHze0
25 | 8xsfTk5fwy+Z8Jc8CgUVCwtEx7k36DPgzrrBK7ybGNk3ItzuBwaKDJKAtIimjscv
26 | qN95SBzhyG0J3g+gHJRCLr9UODRai0cx3EQHQJ//GWRC/eR7j5CTkZZSpespmG9r
27 | gltY4S8HoolP+nw4VjXMRgyuxIM6fDhFBFFOJNnSKrCaBoYXEjqHE55ANx9yBd4m
28 | jqIj0PhFbY4FO42GkA6hKha6HMGf25LbdwomK2MlQwUI7YOB8N11Wek8L0u9ZAGf
29 | 9PVu4mvHkHP8nwKCAQEA6pfcgFB8sCjJAP4dzzeiC6U9IhbLO8a8DQ/kMaQgLuRQ
30 | 0aEAYiGm14ymkVOyIconYoAOCrKnfXg0iBUg+JZUuV5vVko2ieqcK0siZDOVdwdH
31 | 4OlfN3b5WQO3zZ0KYfL0hBBnN3Dd76Iz+bUn9C4NxMwEw881aTGgofiOROc612+f
32 | C2cEaNn0jIu2RxqHdIqAhAB3oa5vWzcgZn6DKO1lbEM4hZKPFyrIe41l0sVVxZeU
33 | Et+CExbezRQNbHACYgrHuSmJ2L9y9RYE4OL/13QNpwHBagHzO9ve5S30c2DYlogH
34 | 8hBogvbyhY3AhMzlM/nvPqTMgnNnSpU/SV/EAx6tGwKCAQEAk7/hk+zDNFiWc532
35 | OlyIeIabex40CXPNDhSvOnTUXkYd8GnPscdniwcpStXDa8rv0CVXzOs06mX0k0Qu
36 | qKH/4Hd+CtnX6SJIsIQ0FZmoHRmEdH+PLSNuIvuOTxtaw2kyyt2QjrIXwyYQ88K3
37 | nwtrFqaKV8u1O0JzC35JZ2FU3VgF6ZrkfkNrDKx6N4MxVpLQduYf51tXQN0LBnkK
38 | fNbPpkbWZkqEzH0EADI/k30Pliu4hsG7K20iUyg6+1SvwR/qqpC/ioDQiNx0Fu/l
39 | IN9HXgAuK7PyNvsdDk2FnM2teFi94ubRVfPuuXsJEv9bbBx+LaWa84wjrQeKM+kT
40 | wvqfPwKCAQBKKi6Exm1VB9/cF3p3G2B4jAnSsCAaSs8C1iZPZ1LhYD8Itvy3CWqE
41 | tKuoQCyNsCpZYqCN00NOUGVTLFLbJDOu0uMiBZ5OZcRvj3ZU9VXoZn3b5gu7LjrQ
42 | YHOIZoB0zOluovzusxkG/GGwLQxvjuu9br6G5Qg+tHjTHBBmSi/5EgnXAf+L4nQE
43 | xtqeig2O04ajtL4deFcuIDR3EsIGe17YJjxFRjtEmLr/DInHY6mlKTGf8ddej5lq
44 | fk9sG9E1C7FuoWo6AvSl9QsfycQWalMynER4bhtmrjE+hRPGMjTnCiRw660O0mGD
45 | xXe6xEgl8Xsoc9NP3oomMx7p5IPEdLtHAoIBAQDJt2oQc4/dZBqgCT8hA2tngUVx
46 | VCNiuCsT6pwLf+g7PsrQd33/1MpFAzSjl3tdIhw93qtFlhEO3lfW9YUcExG9A+vb
47 | 3sv937HgsItT2+lJo0yI9KbjKFzGZ+Cmtv9QFo4ps/2haXrjbIu43/nzdzPdlQJY
48 | szJTWLhfJWkr4LGyjTmvmPMbNNTbzi4j45EncL91G95rTm/ssUxLGPWRZ2cD6LxC
49 | FalWuK+/caQAB0GBWeNg6ud0w3iFYaUQTV/W+fmFS7+hMykYb88Ra+KvQH8xhEuK
50 | n6h1FhwrsIIcVvY27gX6hTAs2Ul8NzTGjjoKTT7H+3hCVjlRh/64ZaAzka38
51 | -----END RSA PRIVATE KEY-----
52 |
--------------------------------------------------------------------------------
/keys/key.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDnjbESm87jeUKYAvSLnbPoWYnaaPLdXRgT16COvYgK/Z3xr5Pdcd4ydCXC66JE8sfKhpOfo+tJhpYQoMUBh2hAiDZb76BCOfEhX/ZZSeY2OaGj66peDC/ZMw1MdLDFlR7A+tNm4MTBi6NchrFDKNSPZpWWxVzpI9zmS2awwrdR9r7fil0yCM05MBjwSr2LQLjR+Imyq5QeVVQTQmZVemPy/0WLI6WGsE6mX0pnGyL1dd6zMhkVyzEbi+Df/7dsC6vVSRAvf2hVU3CfwjJYVmSH4P97HVhcUI4jSdDnLz3uaci2TZKWjmC/sAmySJKvoQYTW5W47bgKQ+zVTVS/3hQirKE9XcN17yyV/0tSCLyhPHO+g0I/q8PPomvNKOrEbyBP+Ir9cl4bEU83U3xwWaWZlbeqG1BasWoc8mPhdoShnG+lHpCKbUepWmp/aK0N4um6by3h54m9yht1dUNRyCZbsS7vDgw4KBC4CTjUR6uJYSbq2zemjvjvv8MVD4kZvQzH5RIL8tSsnGOZvND/5mwBFLs8f1YKtXY6E5yt/PyfRxPrP+0ppOqhlhL2wLaLfVvsNz4n96w7XnzSAeN3EgEtA4P9VDogAKdTwDxJs8+g+10QEccpEaCbsFQPG+ypyM+iqeW7+aoE/e9u8Zdzu2BEhIw3NUjHpcx6ru70FZUXxQ== mehmet.aydin@Mehmet-MBP
2 |
--------------------------------------------------------------------------------
/lab-01/install-ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | sudo apt-get install software-properties-common
3 | sudo apt-add-repository ppa:ansible/ansible
4 | sudo apt-get update
5 | sudo apt-get install ansible
6 | ansible --version
7 | ansible -m ping localhost
--------------------------------------------------------------------------------
/lab-02/ad-hoc-commands-on-inventory.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | eval `ssh-agent`
3 | ssh-add /vagrant/keys/key
4 |
5 | ansible -m ping web1
6 | ansible -m ping app
7 | ansible -m ping webservers
8 | ansible -m ping dc
9 | ansible -m shell -a 'ls -al' web1
10 | ansible -m shell -a 'whoami' app
11 | ansible -m shell -a 'ifconfig' webservers
12 | ansible -m shell -a 'hostname' dc
13 | ansible -m ping all
14 | ansible -m ping web*
15 | ansible -m ping 'appservers:dbservers'
16 | ansible -m ping 'dc:!webservers'
17 | ansible -m ping 'dc:&webservers'
--------------------------------------------------------------------------------
/lab-02/disable-host-key-checking.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | sudo sed -i 's/#host_key_checking/host_key_checking/g' /etc/ansible/ansible.cfg
--------------------------------------------------------------------------------
/lab-02/hosts:
--------------------------------------------------------------------------------
1 | web1 ansible_host=192.168.35.101
2 | web2 ansible_host=192.168.35.102
3 | app ansible_host=192.168.35.103
4 | db ansible_host=192.168.35.104
5 |
6 | [webservers]
7 | web1
8 | web2
9 |
10 | [appservers]
11 | app
12 |
13 | [dbservers]
14 | db
15 |
16 | [dc:children]
17 | webservers
18 | appservers
19 | dbservers
--------------------------------------------------------------------------------
/lab-03/index.html:
--------------------------------------------------------------------------------
1 |
Hello, World!
--------------------------------------------------------------------------------
/lab-03/modules-as-ad-hoc-commands.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | eval `ssh-agent`
3 | ssh-add /vagrant/keys/key
4 |
5 | ansible -m apt -a "name=nginx state=present update_cache=yes" webservers --become
6 | ansible -m service -a "name=nginx state=started enabled=yes" webservers --become
7 | ansible -m file -a "path=/usr/share/nginx/html state=directory" webservers --become
8 | ansible -m copy -a "src=/vagrant/lab-03/index.html dest=/usr/share/nginx/html/index.html" webservers --become
--------------------------------------------------------------------------------
/lab-04/install-nginx-and-jdk.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: webservers
3 | become: true
4 | tasks:
5 | - name: Install nginx
6 | apt: name=nginx state=present
7 | - name: Start nginx
8 | service: name=nginx state=started enabled=yes
9 | - hosts: appservers
10 | become: true
11 | tasks:
12 | - name: Install default-jdk
13 | apt: name=default-jdk state=present
--------------------------------------------------------------------------------
/lab-04/install-nginx.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: webservers
3 | become: true
4 | tasks:
5 | - name: Install nginx
6 | apt: name=nginx state=present
7 | - name: Start nginx
8 | service: name=nginx state=started enabled=yes
--------------------------------------------------------------------------------
/lab-05/environments/dev.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
--------------------------------------------------------------------------------
/lab-05/environments/prod.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
--------------------------------------------------------------------------------
/lab-05/environments/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
--------------------------------------------------------------------------------
/lab-05/roles/deploy_static_content/files/static_content.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maaydin/ansible-tutorial/2bd59f54a247e84188279aaa5291dffe0f712387/lab-05/roles/deploy_static_content/files/static_content.zip
--------------------------------------------------------------------------------
/lab-05/roles/deploy_static_content/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install unzip
3 | apt: name=unzip state=present
4 |
5 | - name: Ensure static content directory exists
6 | file: path=/opt/html state=directory
7 |
8 | - name: Get static content
9 | shell: ls -1 /opt/html
10 | register: content
11 |
12 | - name: Undeploy previous version of static content
13 | file: path=/opt/html/{{item}} state=absent
14 | with_items: "{{content.stdout_lines}}"
15 |
16 | - name: Deploy static content
17 | unarchive: src={{static_content_file_name}} dest=/opt/html/
--------------------------------------------------------------------------------
/lab-05/roles/nginx/files/default.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 | listen [::]:80 default_server ipv6only=on;
4 |
5 | root /opt/html;
6 | index index.html index.htm;
7 |
8 | server_name localhost;
9 |
10 | location / {
11 | try_files $uri $uri/ =404;
12 | }
13 | }
--------------------------------------------------------------------------------
/lab-05/roles/nginx/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Test nginx configuration
3 | command: nginx -t
4 |
5 | - name: Reload nginx configuration
6 | command: nginx -s reload
--------------------------------------------------------------------------------
/lab-05/roles/nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install nginx server
3 | apt: name=nginx state=present
4 |
5 | - name: Create static content directory
6 | file: path=/opt/html state=directory owner=www-data group=www-data
7 |
8 | - name: Start nginx
9 | service: name=nginx state=started enabled=yes
10 |
11 | - name: Update nginx default config
12 | copy: src=default.conf dest=/etc/nginx/sites-enabled/default
13 | notify:
14 | - Test nginx configuration
15 | - Reload nginx configuration
16 |
17 | - name: Flush handlers
18 | meta: flush_handlers
19 |
20 |
--------------------------------------------------------------------------------
/lab-05/roles/ntp/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install ntp service
3 | apt: name=ntp state=present
4 |
5 | - name: Start ntp service
6 | service: name=ntp state=started enabled=yes
--------------------------------------------------------------------------------
/lab-05/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: webservers
3 | become: true
4 |
5 | vars_files:
6 | - "environments/dev.yml"
7 |
8 | roles:
9 | - ntp
10 | - nginx
11 | - deploy_static_content
--------------------------------------------------------------------------------
/lab-06/environments/dev.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
3 | app_file_name: gs-rest-service-0.1.0.jar
--------------------------------------------------------------------------------
/lab-06/environments/prod.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
3 | app_file_name: gs-rest-service-0.1.0.jar
--------------------------------------------------------------------------------
/lab-06/environments/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
3 | app_file_name: gs-rest-service-0.1.0.jar
--------------------------------------------------------------------------------
/lab-06/roles/deploy_application/files/greeting.service:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # greeting
4 | #
5 | # chkconfig:
6 | # description: Start up the Greeting Rest Service.
7 |
8 | RETVAL=$?
9 |
10 | killtree() {
11 | local _pid=$1
12 | for _child in $(ps -o pid --no-headers --ppid ${_pid}); do
13 | killtree ${_child}
14 | done
15 | kill -9 ${_pid}
16 | }
17 |
18 | case "$1" in
19 | start)
20 | echo $"Starting Greeting Service"
21 | /bin/su greeting -c "java -jar /opt/greeting/greeting.jar > /var/log/greeting.log 2> /var/log/greeting-error.log" &
22 | echo $! > /var/run/greeting.pid
23 | ;;
24 | stop)
25 | killtree `cat /var/run/greeting.pid`
26 | rm -f /var/run/greeting.pid
27 | ;;
28 | *)
29 | echo $"Usage: $0 {start|stop}"
30 | exit 1
31 | ;;
32 | esac
33 |
34 | exit $RETVAL
--------------------------------------------------------------------------------
/lab-06/roles/deploy_application/files/gs-rest-service-0.1.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maaydin/ansible-tutorial/2bd59f54a247e84188279aaa5291dffe0f712387/lab-06/roles/deploy_application/files/gs-rest-service-0.1.0.jar
--------------------------------------------------------------------------------
/lab-06/roles/deploy_application/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Wait for application to start
3 | wait_for: port={{app_port}}
4 |
--------------------------------------------------------------------------------
/lab-06/roles/deploy_application/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: add group "greeting"
4 | group: name=greeting
5 |
6 | - name: add user "greeting"
7 | user: name=greeting group=greeting home=/home/greeting createhome=yes
8 |
9 | - name: Create stdout log file
10 | file: path=/var/log/greeting.log state=touch owner=greeting group=greeting mode=0644
11 |
12 | - name: Create stderr log file
13 | file: path=/var/log/greeting-error.log state=touch owner=greeting group=greeting mode=0644
14 |
15 | - name: Install greeting service script
16 | copy: src=greeting.service dest=/etc/init.d/greeting mode=0755
17 |
18 | - name: Create application directory
19 | file: path=/opt/greeting state=directory
20 |
21 | - name: Stop greeting
22 | service: name=greeting state=stopped
23 |
24 | - name: Get application content
25 | shell: ls -1 /opt/greeting
26 | register: content
27 |
28 | - name: Undeploy previous version of application
29 | file: path=/opt/greeting/{{item}} state=absent
30 | with_items: "{{content.stdout_lines}}"
31 |
32 | - name: Deploy application
33 | copy: src={{app_file_name}} dest=/opt/greeting/greeting.jar mode=0555
34 |
35 | - name: Start greeting
36 | service: name=greeting state=started enabled=yes
37 |
--------------------------------------------------------------------------------
/lab-06/roles/deploy_static_content/files/static_content.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maaydin/ansible-tutorial/2bd59f54a247e84188279aaa5291dffe0f712387/lab-06/roles/deploy_static_content/files/static_content.zip
--------------------------------------------------------------------------------
/lab-06/roles/deploy_static_content/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install unzip
3 | apt: name=unzip state=present
4 |
5 | - name: Ensure static content directory exists
6 | file: path=/opt/html state=directory
7 |
8 | - name: Get static content
9 | shell: ls -1 /opt/html
10 | register: content
11 |
12 | - name: Undeploy previous version of static content
13 | file: path=/opt/html/{{item}} state=absent
14 | with_items: "{{content.stdout_lines}}"
15 |
16 | - name: Deploy static content
17 | unarchive: src={{static_content_file_name}} dest=/opt/html/
--------------------------------------------------------------------------------
/lab-06/roles/java/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install openjdk repository
3 | apt_repository: repo='ppa:openjdk-r/ppa'
4 | - name: Install openjdk
5 | apt: name=openjdk-8-jdk state=present
--------------------------------------------------------------------------------
/lab-06/roles/nginx/files/default.conf:
--------------------------------------------------------------------------------
1 | upstream api {
2 | server 192.168.35.103:8080;
3 | }
4 |
5 | server {
6 | listen 80 default_server;
7 | listen [::]:80 default_server ipv6only=on;
8 |
9 | root /opt/html;
10 | index index.html index.htm;
11 |
12 | server_name localhost;
13 |
14 | location / {
15 | try_files $uri $uri/ =404;
16 | }
17 |
18 | location /greeting {
19 | client_max_body_size 20M;
20 |
21 | proxy_set_header Host $http_host;
22 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
23 | proxy_set_header X-Real-IP $remote_addr;
24 |
25 | proxy_pass http://api;
26 | }
27 | }
--------------------------------------------------------------------------------
/lab-06/roles/nginx/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Test nginx configuration
3 | command: nginx -t
4 |
5 | - name: Reload nginx configuration
6 | command: nginx -s reload
--------------------------------------------------------------------------------
/lab-06/roles/nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install nginx server
3 | apt: name=nginx state=present
4 |
5 | - name: Create static content directory
6 | file: path=/opt/html state=directory owner=www-data group=www-data
7 |
8 | - name: Start nginx
9 | service: name=nginx state=started enabled=yes
10 |
11 | - name: Update nginx default config
12 | copy: src=default.conf dest=/etc/nginx/sites-enabled/default
13 | notify:
14 | - Test nginx configuration
15 | - Reload nginx configuration
16 |
17 | - name: Flush handlers
18 | meta: flush_handlers
19 |
20 |
--------------------------------------------------------------------------------
/lab-06/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: webservers
3 | become: true
4 |
5 | vars_files:
6 | - "environments/dev.yml"
7 |
8 | roles:
9 | - nginx
10 | - deploy_static_content
11 |
12 | - hosts: appservers
13 | become: true
14 |
15 | vars_files:
16 | - "environments/dev.yml"
17 |
18 | roles:
19 | - java
20 | - deploy_application
--------------------------------------------------------------------------------
/lab-07/README.md:
--------------------------------------------------------------------------------
1 | # Infrastructure
2 |
3 | Provision scripts create dev and production environments for greeting application.
4 |
5 | ---
6 |
7 | ## Building Environments
8 |
9 | **1. Building Training Environment**
10 |
11 | > **Notes:**
12 | >
13 | > - Ensure [Python](https://www.python.org/ftp/python/2.7.5/) (2.7.5 recommended) is installed.
14 | > - Ensure [VirtualBox](https://www.virtualbox.org/wiki/Downloads) (5.1.6 recommended) is installed.
15 | > - Ensure [Vagrant](https://www.vagrantup.com/downloads.html) (1.8.6 recommended) is installed.
16 | > - Ensure [Ansible](http://docs.ansible.com/ansible/intro_installation.html) (2.1.1 recommended) is installed.
17 | > - Windows isn’t supported
18 |
19 | * Ensure *static.zip* artifact present on path *provision/roles/deploy_static_content/files*
20 | * Ensure *greeting-version.jar* artifact present on path *provision/roles/deploy_application/files*
21 | * Run below command to initialize your training environment on your terminal
22 | ```sh
23 | cd /path/to/project
24 | vagrant up
25 | ```
26 | * Run below command to roll updates
27 | ```sh
28 | vagrant provision
29 | ```
30 |
31 | **2. Building Production Environment**
32 |
33 | > **Notes:**
34 | >
35 | > - Ensure [Python](https://www.virtualbox.org/wiki/Downloads) (2.7.5 recommended) is installed.
36 | > - Ensure [Python-boto](https://github.com/boto/boto) (2.38.0 recommended) is installed.
37 | > - Ensure [Ansible](http://docs.ansible.com/ansible/intro_installation.html) (2.1.1 recommended) is installed.
38 | > - Ensure [Packer](https://www.packer.io/downloads.html) (0.10.2 recommended) is installed.
39 | > - Create an IAM user in AmazonEC2FullAccess policy and create an access key for this user. Visit [Creating an IAM User in Your AWS Account](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) for help on creating a user.
40 | > - Ensure *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* is set on your environment variables or directly in build scripts. Visit [Getting Your Access Key ID and Secret Access Key](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html) for help on creating keys.
41 | > - Create 2 subnets at *eu-west-1* (Frankfurt) region in different availability zones on [AWS Console](https://aws.amazon.com/) and edit subnet information at configuration file on path *provision/environments/aws.yml*
42 |
43 | **2.1 Creating Amazon Machine Image (AMI)**
44 |
45 | * Ensure *static.zip* artifact present on path *provision/roles/deploy_static_content/files*
46 | * Ensure *greeting-version.jar* artifact present on path *provision/roles/deploy_application/files*
47 | * Run below command to create an image on
48 | ```sh
49 | cd /path/to/project
50 | bash -c bin/aws-create-image.sh
51 | ```
52 | * Note the AMI image-id on packer standard output to use in next steps
53 |
54 | **2.2 Creating Infrastructure on AWS for first time**
55 | > **Notes:**
56 | >
57 | > - Below command will create below settings on your AWS account.
58 | > - A VPC Security Group that allows traffic on port TCP(80) from any address to any address for the Elastic Load Balancer
59 | > - An Elastic Load Balancer that listens on port TCP(80)
60 | > - A VPC Security Group that allows traffic on port TCP(80) from any -internal- address to the instances
61 | > - A Launch Configuration for given image
62 | > - An Auto Scaling Group with Scale Up & Down policies
63 | > - Also 2 new EC2 Instances will be created via Auto Scale
64 |
65 | * Ensure configurations are correct in configuration file on path *provision/environments/aws.yml*
66 | * Run below command to initialize infrastructure
67 | ```sh
68 | cd /path/to/project
69 | bash -c bin/aws-init.sh
70 | ```
71 | * Get the dns name of Elastic Loadbalancer such as *greeting-lb-prod-123456789.eu-west-1.elb.amazonaws.com* on command's standard output.
72 | * Browse to the dns name to test the application
73 |
74 | **2.3 Rolling Updates on an Already Created Infrastructure on AWS**
75 | > **Notes:**
76 | >
77 | > - Below command will create below settings on your AWS account.
78 | > - A VPC Security Group that allows traffic on port TCP(80) from any -internal- address to the instances
79 | > - A Launch Configuration for given image
80 | > - An Auto Scaling Group with Scale Up & Down policies
81 | > - Also current EC2 Instances will be replaced
82 |
83 | * Ensure configurations are correct in configuration file on path *provision/environments/aws.yml*
84 | * Run below command to initialize infrastructure
85 | ```sh
86 | cd /path/to/project
87 | bash -c bin/aws-update.sh
88 | ```
89 | * Dns name of Elastic Loadbalancer does not changes on this step. Still you can get the dns name of Elastic Loadbalancer such as *greeting-lb-prod-123456789.eu-west-1.elb.amazonaws.com* on command's standard output.
90 | * Browse to the dns name to test the application
91 |
92 | ---
93 |
94 | ## Images Needed to Build Environments
95 | Below the list of images to create environments. You don't need to download images manually.
96 |
97 | * [Production Env. Image](http://aws.amazon.com/marketplace/pp?sku=aw0evgkw8e5c1q413zgy5pjce)
98 | * [Dev Env. Image](https://atlas.hashicorp.com/puppetlabs/boxes/centos-7.0-64-nocm)
99 |
100 | ## Scaling Plan
101 |
102 | Production deployment scripts creates an auto scaling plan on AWS. Auto scaling plan starts with 2 instances and can scale up / downbetween 2-8 instances automatically.
103 |
104 | Auto scaling scales up / down instances based on CPU utilization. When a CPU utilization threshold with more then 50 percent achieved it automatically increases the number of instances by 1 in the cluster. When a CPU utilization threshold with less then 20 percent achieved it automatically decreases the number of instances by 1 in the cluster.
105 |
106 | Auto Scaling configuration can be updated at configuration file on path *provision/environments/aws.yml* and can be applied to production as described on **2.3 Rolling Updates on an Already Created Infrastructure on AWS** step.
107 |
108 | Visit [Auto Scaling Documentation](http://aws.amazon.com/documentation/autoscaling/) for detailed information.
--------------------------------------------------------------------------------
/lab-07/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 |
6 | config.vm.define "yollando" do |yollando|
7 | yollando.vm.box = "puppetlabs/centos-7.0-64-nocm"
8 | end
9 |
10 | config.vm.provider "virtualbox" do |vb|
11 | vb.memory = "1024"
12 | vb.cpus = "2"
13 | end
14 |
15 | config.vm.provision "ansible" do |ansible|
16 | ansible.playbook = "provision/site.yml"
17 | ansible.groups = {
18 | "data-tier" => ["yollando"],
19 | "web-tier" => ["yollando"],
20 | "application-tier" => ["yollando"],
21 | }
22 | ansible.extra_vars = {
23 | env_name: "dev",
24 | }
25 | end
26 |
27 | config.vm.provision "ansible" do |test|
28 | test.playbook = "test/site.yml"
29 | test.groups = {
30 | "web-tier" => ["yollando"],
31 | "application-tier" => ["yollando"],
32 | }
33 | end
34 |
35 | config.vm.network "forwarded_port", guest: 80, host: 8080
36 | config.vm.network "forwarded_port", guest: 8090, host: 8090
37 |
38 | end
39 |
--------------------------------------------------------------------------------
/lab-07/ansible.cfg:
--------------------------------------------------------------------------------
1 | [ssh_connection]
2 | scp_if_ssh=True
--------------------------------------------------------------------------------
/lab-07/bin/aws-create-image.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #export AWS_ACCESS_KEY_ID=
3 | #export AWS_SECRET_ACCESS_KEY=
4 | packer build packer/ami.json
5 |
--------------------------------------------------------------------------------
/lab-07/bin/aws-init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #export AWS_ACCESS_KEY_ID=
3 | #export AWS_SECRET_ACCESS_KEY=
4 | usage(){
5 | echo "Usage: $0 "
6 | exit 1
7 | }
8 | [[ $# -ne 1 ]] && usage
9 |
10 | ansible-playbook provision/aws-init.yml --extra-vars "env_name=aws image_id=$1"
11 |
--------------------------------------------------------------------------------
/lab-07/bin/aws-update.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #export AWS_ACCESS_KEY_ID=
3 | #export AWS_SECRET_ACCESS_KEY=
4 | usage(){
5 | echo "Usage: $0 "
6 | exit 1
7 | }
8 | [[ $# -ne 1 ]] && usage
9 |
10 | ansible-playbook provision/aws-update.yml --extra-vars "env_name=aws image_id=$1"
11 |
--------------------------------------------------------------------------------
/lab-07/packer.json:
--------------------------------------------------------------------------------
1 | {
2 | "provisioners": [
3 | {
4 | "type": "shell",
5 | "execute_command": "echo 'centos' | {{.Vars}} sudo -S -E bash '{{.Path}}'",
6 | "script": "scripts/prepare.sh"
7 | },
8 | {
9 | "type": "ansible-local",
10 | "playbook_file": "provision/ami.yml",
11 | "playbook_dir": "provision",
12 | "extra_arguments" : [
13 | "--extra-vars",
14 | "env_name=prod"
15 | ]
16 | },
17 | {
18 | "type": "ansible-local",
19 | "playbook_file": "test/ami.yml",
20 | "playbook_dir": "test"
21 | },
22 | {
23 | "type": "ansible-local",
24 | "playbook_file": "provision/ami.yml",
25 | "playbook_dir": "provision",
26 | "extra_arguments" : [
27 | "--extra-vars",
28 | "env_name=prod"
29 | ]
30 | },
31 | {
32 | "type": "shell",
33 | "execute_command": "echo 'centos' | {{.Vars}} sudo -S -E bash '{{.Path}}'",
34 | "script": "scripts/cleanup.sh"
35 | }
36 | ],
37 | "builders": [
38 | {
39 | "name": "aws",
40 | "type": "amazon-ebs",
41 | "region": "eu-west-1",
42 | "source_ami": "ami-7cc4f661",
43 | "instance_type": "t2.small",
44 | "ssh_username": "centos",
45 | "ssh_pty": "true",
46 | "ami_name": "yollando-centos7-{{timestamp}}"
47 | }
48 | ]
49 | }
50 |
--------------------------------------------------------------------------------
/lab-07/packer/ami.json:
--------------------------------------------------------------------------------
1 | {
2 | "provisioners": [
3 | {
4 | "type": "shell",
5 | "execute_command": "echo 'centos' | {{.Vars}} sudo -S -E bash '{{.Path}}'",
6 | "script": "scripts/prepare.sh"
7 | },
8 | {
9 | "type": "ansible-local",
10 | "playbook_file": "provision/ami.yml",
11 | "playbook_dir": "provision",
12 | "extra_arguments" : [
13 | "--extra-vars",
14 | "env_name=test"
15 | ]
16 | },
17 | {
18 | "type": "shell",
19 | "execute_command": "echo 'centos' | {{.Vars}} sudo -S -E bash '{{.Path}}'",
20 | "script": "scripts/cleanup.sh"
21 | }
22 | ],
23 | "builders": [
24 | {
25 | "name": "aws",
26 | "type": "amazon-ebs",
27 | "region": "eu-west-1",
28 | "vpc_id": "vpc-0c2b4c68",
29 | "subnet_id": "subnet-54cab130",
30 | "associate_public_ip_address": "true",
31 | "source_ami": "ami-7abd0209",
32 | "instance_type": "t2.micro",
33 | "ssh_username": "centos",
34 | "ssh_pty": "true",
35 | "ami_name": "greeting-centos7-{{timestamp}}"
36 | }
37 | ]
38 | }
39 |
--------------------------------------------------------------------------------
/lab-07/packer/vagrant.json:
--------------------------------------------------------------------------------
1 | {
2 | "provisioners": [
3 | {
4 | "type": "shell",
5 | "execute_command": "echo 'vagrant' | {{.Vars}} sudo -S -E bash '{{.Path}}'",
6 | "script": "scripts/prepare.sh"
7 | },
8 | {
9 | "type": "ansible-local",
10 | "playbook_file": "provision/vagrant.yml",
11 | "playbook_dir": "provision",
12 | "extra_arguments" : [
13 | "--extra-vars",
14 | "env_name=dev"
15 | ]
16 | },
17 | {
18 | "type": "shell",
19 | "execute_command": "echo 'vagrant' | {{.Vars}} sudo -S -E bash '{{.Path}}'",
20 | "script": "scripts/cleanup.sh"
21 | }
22 | ],
23 | "builders": [
24 | {
25 | "type": "virtualbox-iso",
26 | "boot_command": [
27 | " text ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/ks.cfg"
28 | ],
29 | "boot_wait": "10s",
30 | "disk_size": 20480,
31 | "guest_os_type": "RedHat_64",
32 | "headless": false,
33 | "http_directory": "scripts",
34 | "iso_urls": [
35 | "iso/CentOS-7-x86_64-Minimal-1503-01.iso",
36 | "http://centos.mirrors.hoobly.com/7.1.1503/isos/x86_64/CentOS-7-x86_64-Minimal-1503-01.iso"
37 | ],
38 | "iso_checksum_type": "md5",
39 | "iso_checksum": "d07ab3e615c66a8b2e9a50f4852e6a77",
40 | "ssh_username": "vagrant",
41 | "ssh_password": "vagrant",
42 | "ssh_port": 22,
43 | "ssh_wait_timeout": "10000s",
44 | "shutdown_command": "echo 'vagrant'|sudo -S /sbin/halt -h -p",
45 | "guest_additions_path": "VBoxGuestAdditions_{{.Version}}.iso",
46 | "virtualbox_version_file": ".vbox_version",
47 | "vm_name": "greeting-centos-7.1-x86_64",
48 | "vboxmanage": [
49 | [
50 | "modifyvm",
51 | "{{.Name}}",
52 | "--memory",
53 | "512"
54 | ],
55 | [
56 | "modifyvm",
57 | "{{.Name}}",
58 | "--cpus",
59 | "1"
60 | ]
61 | ]
62 | }
63 | ],
64 | "post-processors": [
65 | {
66 | "output": "builds/greeting-centos71.box",
67 | "type": "vagrant"
68 | }
69 | ]
70 | }
71 |
--------------------------------------------------------------------------------
/lab-07/provision/ami-base.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook deploys greeting application for amazon image creation
3 |
4 | - hosts: localhost
5 | connection: local
6 | become: true
7 |
8 | vars_files:
9 | - "environments/{{env_name}}.yml"
10 |
11 | roles:
12 | - selinux
13 | - nginx
14 | - jdk
--------------------------------------------------------------------------------
/lab-07/provision/ami.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook deploys greeting application for amazon image creation
3 |
4 | - hosts: localhost
5 | connection: local
6 | become: true
7 |
8 | vars_files:
9 | - "environments/{{env_name}}.yml"
10 |
11 | roles:
12 | - deploy_static_content
13 | - deploy_application
--------------------------------------------------------------------------------
/lab-07/provision/aws-init.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook deploys greeting application on AWS for first time.
3 |
4 | - hosts: localhost
5 | connection: local
6 |
7 | vars_files:
8 | - "environments/{{env_name}}.yml"
9 |
10 | roles:
11 | - ec2-load-balancer
12 | - ec2-auto-scale
--------------------------------------------------------------------------------
/lab-07/provision/aws-update.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook deploys greeting application with rolling updates
3 |
4 | - hosts: localhost
5 | connection: local
6 |
7 | vars_files:
8 | - "environments/{{env_name}}.yml"
9 |
10 | roles:
11 | - ec2-auto-scale
--------------------------------------------------------------------------------
/lab-07/provision/environments/aws.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ec2_region: eu-west-1
3 | instance_type: m3.medium
4 | suffix: test
5 | # You should create subnets on different availibility zones manually as it is not provided ec2_vpc_subnet module with ansible <= 2.0
6 | vpc_id: vpc-0c2b4c68
7 | subnet_az_a: subnet-93cbb0f7
8 | subnet_az_b: subnet-e5582993
9 | az_a: eu-west-1a
10 | az_b: eu-west-1b
11 | ssh_key_name: ansible-traning
12 | # Autoscaling Group min, max and desired numbers of intances
13 | min_size: 1
14 | max_size: 4
15 | desired_capacity: 1
--------------------------------------------------------------------------------
/lab-07/provision/environments/dev.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
3 | oracle_jdk_version: 8u111
4 | oracle_jdk_build_number: b14
5 | app_file_name: greeting.jar
6 | app_config_file_name: config.yaml
7 | app_port: 8090
--------------------------------------------------------------------------------
/lab-07/provision/environments/prod.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
3 | oracle_jdk_version: 8u111
4 | oracle_jdk_build_number: b14
5 | app_file_name: greeting.jar
6 | app_config_file_name: config.yaml
7 | app_port: 8090
--------------------------------------------------------------------------------
/lab-07/provision/environments/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_content_file_name: static_content.zip
3 | oracle_jdk_version: 8u111
4 | oracle_jdk_build_number: b14
5 | app_file_name: greeting.jar
6 | app_config_file_name: config.yaml
7 | app_port: 8090
--------------------------------------------------------------------------------
/lab-07/provision/roles/deploy_application/files/greeting.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Greeting Service
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | User=greeting
8 | ExecStart=/bin/java -Xmx2g -jar /opt/greeting/greeting.jar
9 | Restart=on-abort
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/deploy_application/files/gs-rest-service-0.1.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maaydin/ansible-tutorial/2bd59f54a247e84188279aaa5291dffe0f712387/lab-07/provision/roles/deploy_application/files/gs-rest-service-0.1.0.jar
--------------------------------------------------------------------------------
/lab-07/provision/roles/deploy_application/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Wait for application to start
3 | wait_for: port={{app_port}}
4 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/deploy_application/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: add group "greeting"
4 | group: name=greeting
5 |
6 | - name: add user "greeting"
7 | user: name=greeting group=greeting home=/home/greeting createhome=yes
8 |
9 | - name: Create stdout log file
10 | file: path=/var/log/greeting.log state=touch owner=greeting group=greeting mode=0644
11 |
12 | - name: Create stderr log file
13 | file: path=/var/log/greeting-error.log state=touch owner=greeting group=greeting mode=0644
14 |
15 | - name: Install greeting service script
16 | copy: src=greeting.service dest=/etc/systemd/system/greeting.service mode=0755
17 |
18 | - name: Create application directory
19 | file: path=/opt/greeting state=directory
20 |
21 | - name: Stop greeting
22 | service: name=greeting state=stopped
23 |
24 | - name: Get application content
25 | shell: ls -1 /opt/greeting
26 | register: content
27 |
28 | - name: Undeploy previous version of application
29 | file: path=/opt/greeting/{{item}} state=absent
30 | with_items: "{{content.stdout_lines}}"
31 |
32 | - name: Deploy application
33 | copy: src={{app_file_name}} dest=/opt/greeting/greeting.jar mode=0555
34 |
35 | - name: Start greeting
36 | service: name=greeting state=started enabled=yes
--------------------------------------------------------------------------------
/lab-07/provision/roles/deploy_static_content/files/static_content.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maaydin/ansible-tutorial/2bd59f54a247e84188279aaa5291dffe0f712387/lab-07/provision/roles/deploy_static_content/files/static_content.zip
--------------------------------------------------------------------------------
/lab-07/provision/roles/deploy_static_content/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ---
3 | - name: Install unzip
4 | yum: name=unzip state=present
5 |
6 | - name: Ensure static content directory exists
7 | file: path=/var/www/html state=directory
8 |
9 | - name: Get static content
10 | shell: ls -1 /var/www/html
11 | register: content
12 |
13 | - name: Undeploy previous version of static content
14 | file: path=/var/www/html/{{item}} state=absent
15 | with_items: "{{content.stdout_lines}}"
16 |
17 | - name: Deploy static content
18 | unarchive: src={{static_content_file_name}} dest=/var/www/html/
19 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/ec2-auto-scale/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set a random id for launch configuration updates
3 | set_fact:
4 | lc_rand: "{{ 10000 | random }}"
5 | run_once: yes
6 |
7 | - name: Configure Launch Configuration Security Group
8 | ec2_group:
9 | name: y-lc-sg-{{suffix}}
10 | description: Greeting Launch Configuration Security Group - {{suffix}}
11 | vpc_id: "{{vpc_id}}"
12 | region: "{{ec2_region}}"
13 | rules:
14 | - proto: tcp
15 | from_port: 80
16 | to_port: 80
17 | cidr_ip: 0.0.0.0/0
18 | - proto: tcp
19 | from_port: 81
20 | to_port: 81
21 | cidr_ip: 0.0.0.0/0
22 | - proto: tcp
23 | from_port: 22
24 | to_port: 22
25 | cidr_ip: 0.0.0.0/0
26 | register: lc_security_group
27 |
28 | - debug: msg="Launch Configuration Security Group id={{lc_security_group.group_id}}"
29 |
30 | - name: Configure Launch Configuration
31 | ec2_lc:
32 | name: y-lc-{{suffix}}-{{lc_rand}}
33 | region: "{{ec2_region}}"
34 | image_id: "{{image_id}}"
35 | assign_public_ip: yes
36 | key_name: "{{ssh_key_name}}"
37 | security_groups: ["{{lc_security_group.group_id}}"]
38 | instance_type: "{{instance_type}}"
39 | state: present
40 | register: launch_config
41 |
42 | - debug: msg="Launch Configuration name= {{launch_config.name}}"
43 |
44 | - name: Configure Autoscaling Group
45 | ec2_asg:
46 | name: y_asg_{{suffix}}
47 | region: "{{ec2_region}}"
48 | launch_config_name: "{{launch_config.name}}"
49 | load_balancers: "y-lb-{{suffix}}"
50 | availability_zones: "{{az_a}},{{az_b}}"
51 | health_check_period: 60
52 | health_check_type: ELB
53 | replace_all_instances: yes
54 | min_size: "{{min_size}}"
55 | max_size: "{{max_size}}"
56 | desired_capacity: "{{desired_capacity}}"
57 | vpc_zone_identifier: "{{subnet_az_a}},{{subnet_az_b}}"
58 | wait_timeout: 600
59 | register: autoscaling_group
60 |
61 | - debug: msg="Autoscaling Group name= {{autoscaling_group.name}}"
62 | - debug: msg="Instance(s) will be created immediately with id(s)= {{autoscaling_group.instances}}"
63 |
64 | - name: Configure Scaling Policies
65 | ec2_scaling_policy:
66 | region: "{{ec2_region}}"
67 | name: "{{item.name}}"
68 | asg_name: "{{autoscaling_group.name}}"
69 | state: present
70 | adjustment_type: "ChangeInCapacity"
71 | min_adjustment_step: 1
72 | scaling_adjustment: "{{item.scaling_adjustment}}"
73 | cooldown: "{{item.cooldown}}"
74 | with_items:
75 | - name: y_sp_sc_up_{{suffix}}
76 | scaling_adjustment: +1
77 | cooldown: 180
78 | - name: y_sp_sc_dw_{{suffix}}
79 | scaling_adjustment: -1
80 | cooldown: 300
81 | register: scaling_policies
82 |
83 | - name: Configure Metric Alarms
84 | ec2_metric_alarm:
85 | region: "{{ec2_region}}"
86 | name: "{{item.name}}"
87 | state: present
88 | metric: "CPUUtilization"
89 | namespace: "AWS/EC2"
90 | statistic: "Average"
91 | comparison: "{{item.comparison}}"
92 | threshold: "{{item.threshold}}"
93 | period: 60
94 | evaluation_periods: 5
95 | unit: "Percent"
96 | dimensions:
97 | AutoScalingGroupName: "{{autoscaling_group.name}}"
98 | alarm_actions: "{{item.alarm_actions}}"
99 | with_items:
100 | - name: "y_asg_{{suffix}}-up-m"
101 | comparison: ">="
102 | threshold: 50.0
103 | alarm_actions:
104 | - "{{ scaling_policies.results[0].arn }}"
105 | - name: "y_asg_{{suffix}}-down-m"
106 | comparison: "<="
107 | threshold: 20.0
108 | alarm_actions:
109 | - "{{ scaling_policies.results[1].arn }}"
110 | register: metric_alarms
111 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/ec2-load-balancer/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Configure Load Balancer Security Group
3 | ec2_group:
4 | name: y-lb-sg-{{suffix}}
5 | vpc_id: "{{vpc_id}}"
6 | description: Greeting Load Balancer Security Group - {{suffix}}
7 | region: "{{ec2_region}}"
8 | rules:
9 | - proto: tcp
10 | from_port: 80
11 | to_port: 80
12 | cidr_ip: 0.0.0.0/0
13 | - proto: tcp
14 | from_port: 443
15 | to_port: 443
16 | cidr_ip: 0.0.0.0/0
17 | rules_egress:
18 | - proto: tcp
19 | from_port: 80
20 | to_port: 80
21 | cidr_ip: 0.0.0.0/0
22 | - proto: tcp
23 | from_port: 81
24 | to_port: 81
25 | cidr_ip: 0.0.0.0/0
26 | register: lb_security_group
27 |
28 | - debug: msg="Load Balancer Security Group id={{lb_security_group.group_id}}"
29 |
30 | - name: Configure Load Balancer
31 | ec2_elb_lb:
32 | name: y-lb-{{suffix}}
33 | state: present
34 | region: "{{ec2_region}}"
35 | connection_draining_timeout: 60
36 | cross_az_load_balancing: yes
37 | security_group_ids: "{{lb_security_group.group_id}}"
38 | subnets: "{{subnet_az_a}},{{subnet_az_b}}"
39 | listeners:
40 | - protocol: http
41 | load_balancer_port: 80
42 | instance_port: 80
43 | health_check:
44 | ping_protocol: http
45 | ping_port: 80
46 | ping_path: "/"
47 | response_timeout: 30
48 | interval: 60
49 | unhealthy_threshold: 2
50 | healthy_threshold: 2
51 | register: load_balancer
52 |
53 | - debug: msg="Elastic load balancer name= {{load_balancer.elb.name}}"
54 | - debug: msg="Elastic load balancer dns name= {{load_balancer.elb.dns_name}}"
55 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/firewall/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Disable firewall
3 | service: name=firewalld state=stopped enabled=no
--------------------------------------------------------------------------------
/lab-07/provision/roles/jdk/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Download Oracle JDK
3 | get_url:
4 | url: http://download.oracle.com/otn-pub/java/jdk/{{oracle_jdk_version}}-{{oracle_jdk_build_number}}/jdk-{{oracle_jdk_version}}-linux-x64.rpm
5 | dest: /tmp/jdk-{{oracle_jdk_version}}-linux-x64.rpm
6 | headers: 'Cookie:oraclelicense=accept-securebackup-cookie'
7 |
8 | - name: Install Oracle JDK
9 | yum: name=/tmp/jdk-{{oracle_jdk_version}}-linux-x64.rpm state=present
10 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/mariadb/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install MariaDB
3 | yum: name={{ item }} state=present
4 | with_items:
5 | - mariadb-server
6 | - mariadb
7 |
8 | - name: Start MariaDB
9 | service: name=mariadb state=started enabled=yes
10 |
11 | - name: Create sample db
12 | command: mysql -u root -e 'CREATE DATABASE IF NOT EXISTS sampledb;'
13 |
14 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/nginx/files/default.conf:
--------------------------------------------------------------------------------
1 | upstream api {
2 | server 127.0.0.1:8090;
3 | }
4 |
5 | server {
6 | listen 81 default_server;
7 | listen [::]:81 default_server;
8 | server_name _;
9 | return 301 https://$host$request_uri;
10 | }
11 |
12 | server {
13 | listen 80 default_server;
14 | listen [::]:80 default_server;
15 | server_name _;
16 |
17 | location / {
18 | root /var/www/html;
19 | }
20 |
21 | location /greeting {
22 | client_max_body_size 20M;
23 |
24 | proxy_set_header Host $http_host;
25 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
26 | proxy_set_header X-Real-IP $remote_addr;
27 |
28 | proxy_pass http://api;
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/nginx/files/nginx.conf:
--------------------------------------------------------------------------------
1 | # For more information on configuration, see:
2 | # * Official English Documentation: http://nginx.org/en/docs/
3 | # * Official Russian Documentation: http://nginx.org/ru/docs/
4 |
5 | user nginx;
6 | worker_processes auto;
7 | error_log /var/log/nginx/error.log;
8 | pid /run/nginx.pid;
9 |
10 | events {
11 | worker_connections 1024;
12 | }
13 |
14 | http {
15 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 |
27 | include /etc/nginx/mime.types;
28 | default_type application/octet-stream;
29 |
30 | # Load modular configuration files from the /etc/nginx/conf.d directory.
31 | # See http://nginx.org/en/docs/ngx_core_module.html#include
32 | # for more information.
33 | include /etc/nginx/conf.d/*.conf;
34 | }
35 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/nginx/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Test nginx configuration
3 | command: nginx -t
4 |
5 | - name: Reload nginx configuration
6 | command: nginx -s reload
--------------------------------------------------------------------------------
/lab-07/provision/roles/nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install EPEL repo
3 | yum: name=epel-release state=present
4 |
5 | - name: Install nginx server
6 | yum: name=nginx enablerepo=epel state=present
7 |
8 | - name: Start nginx
9 | service: name=nginx state=started enabled=yes
10 |
11 | - name: Update nginx default config
12 | copy: src=nginx.conf dest=/etc/nginx/nginx.conf
13 | notify:
14 | - Test nginx configuration
15 | - Reload nginx configuration
16 |
17 | - name: Update nginx site config
18 | copy: src=default.conf dest=/etc/nginx/conf.d/default.conf
19 | notify:
20 | - Test nginx configuration
21 | - Reload nginx configuration
22 |
23 | - name: Flush handlers
24 | meta: flush_handlers
25 |
26 |
--------------------------------------------------------------------------------
/lab-07/provision/roles/selinux/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Disable selinux
3 | selinux: state=disabled
--------------------------------------------------------------------------------
/lab-07/provision/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook deploys greeting application
3 |
4 | - hosts: data-tier
5 | become: true
6 |
7 | vars_files:
8 | - "environments/{{env_name}}.yml"
9 |
10 | roles:
11 | - selinux
12 | - firewall
13 | - mariadb
14 |
15 | - hosts: web-tier
16 | become: true
17 |
18 | vars_files:
19 | - "environments/{{env_name}}.yml"
20 |
21 | roles:
22 | - selinux
23 | - firewall
24 | - nginx
25 | - deploy_static_content
26 |
27 | - hosts: application-tier
28 | become: true
29 |
30 | vars_files:
31 | - "environments/{{env_name}}.yml"
32 |
33 | roles:
34 | - selinux
35 | - firewall
36 | - deploy_application
37 |
--------------------------------------------------------------------------------
/lab-07/scripts/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Remove Ansible.
4 | sudo yum -y remove ansible
5 |
6 | # Clean up temporary files and directories
7 | sudo rm -rf /tmp/*
--------------------------------------------------------------------------------
/lab-07/scripts/prepare.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Update System
4 | sudo yum -y update
5 | # Install EPEL repository.
6 | sudo yum -y install epel-release
7 | # Install Ansible.
8 | sudo yum -y install ansible --enablerepo=epel
--------------------------------------------------------------------------------
/lab-07/scripts/update.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Update System
4 | sudo yum -y update
--------------------------------------------------------------------------------
/lab-07/test/ami.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook tests yollando application for amazon image creation
3 |
4 | - hosts: localhost
5 | connection: local
6 |
7 | roles:
8 | - nginx
9 | - app
--------------------------------------------------------------------------------
/lab-07/test/roles/app/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: 8090 port should be listened
3 | wait_for: port=8090
4 |
--------------------------------------------------------------------------------
/lab-07/test/roles/nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: 80 port should be listened
3 | wait_for: port=80
4 |
--------------------------------------------------------------------------------
/lab-07/test/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook tests yollando application deployment
3 |
4 | - hosts: web-tier
5 |
6 | roles:
7 | - nginx
8 |
9 | - hosts: application-tier
10 |
11 | roles:
12 | - app
13 |
--------------------------------------------------------------------------------
/lab-08/roles/docker-redis/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create a redis container
3 | docker_container:
4 | name: myredis
5 | image: redis
6 | command: redis-server --appendonly yes
7 | state: present
8 | exposed_ports:
9 | - 6379
--------------------------------------------------------------------------------
/lab-08/roles/docker/files/index.html:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 | #
4 | # This script is meant for quick & easy install via:
5 | # 'curl -sSL https://get.docker.com/ | sh'
6 | # or:
7 | # 'wget -qO- https://get.docker.com/ | sh'
8 | #
9 | # For test builds (ie. release candidates):
10 | # 'curl -fsSL https://test.docker.com/ | sh'
11 | # or:
12 | # 'wget -qO- https://test.docker.com/ | sh'
13 | #
14 | # For experimental builds:
15 | # 'curl -fsSL https://experimental.docker.com/ | sh'
16 | # or:
17 | # 'wget -qO- https://experimental.docker.com/ | sh'
18 | #
19 | # Docker Maintainers:
20 | # To update this script on https://get.docker.com,
21 | # use hack/release.sh during a normal release,
22 | # or the following one-liner for script hotfixes:
23 | # aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index
24 | #
25 |
26 | url="https://get.docker.com/"
27 | apt_url="https://apt.dockerproject.org"
28 | yum_url="https://yum.dockerproject.org"
29 | gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D"
30 |
31 | key_servers="
32 | ha.pool.sks-keyservers.net
33 | pgp.mit.edu
34 | keyserver.ubuntu.com
35 | "
36 |
37 | command_exists() {
38 | command -v "$@" > /dev/null 2>&1
39 | }
40 |
41 | echo_docker_as_nonroot() {
42 | if command_exists docker && [ -e /var/run/docker.sock ]; then
43 | (
44 | set -x
45 | $sh_c 'docker version'
46 | ) || true
47 | fi
48 | your_user=your-user
49 | [ "$user" != 'root' ] && your_user="$user"
50 | # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
51 | cat <<-EOF
52 |
53 | If you would like to use Docker as a non-root user, you should now consider
54 | adding your user to the "docker" group with something like:
55 |
56 | sudo usermod -aG docker $your_user
57 |
58 | Remember that you will have to log out and back in for this to take effect!
59 |
60 | EOF
61 | }
62 |
63 | # Check if this is a forked Linux distro
64 | check_forked() {
65 |
66 | # Check for lsb_release command existence, it usually exists in forked distros
67 | if command_exists lsb_release; then
68 | # Check if the `-u` option is supported
69 | set +e
70 | lsb_release -a -u > /dev/null 2>&1
71 | lsb_release_exit_code=$?
72 | set -e
73 |
74 | # Check if the command has exited successfully, it means we're in a forked distro
75 | if [ "$lsb_release_exit_code" = "0" ]; then
76 | # Print info about current distro
77 | cat <<-EOF
78 | You're using '$lsb_dist' version '$dist_version'.
79 | EOF
80 |
81 | # Get the upstream release info
82 | lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]')
83 | dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]')
84 |
85 | # Print info about upstream distro
86 | cat <<-EOF
87 | Upstream release is '$lsb_dist' version '$dist_version'.
88 | EOF
89 | else
90 | if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
91 | # We're Debian and don't even know it!
92 | lsb_dist=debian
93 | dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
94 | case "$dist_version" in
95 | 8|'Kali Linux 2')
96 | dist_version="jessie"
97 | ;;
98 | 7)
99 | dist_version="wheezy"
100 | ;;
101 | esac
102 | fi
103 | fi
104 | fi
105 | }
106 |
107 | rpm_import_repository_key() {
108 | local key=$1; shift
109 | local tmpdir=$(mktemp -d)
110 | chmod 600 "$tmpdir"
111 | for key_server in $key_servers ; do
112 | gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break
113 | done
114 | gpg --homedir "$tmpdir" -k "$key" >/dev/null
115 | gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key
116 | rpm --import "$tmpdir"/repo.key
117 | rm -rf "$tmpdir"
118 | }
119 |
120 | semverParse() {
121 | major="${1%%.*}"
122 | minor="${1#$major.}"
123 | minor="${minor%%.*}"
124 | patch="${1#$major.$minor.}"
125 | patch="${patch%%[-.]*}"
126 | }
127 |
128 | do_install() {
129 | case "$(uname -m)" in
130 | *64)
131 | ;;
132 | armv6l|armv7l)
133 | ;;
134 | *)
135 | cat >&2 <<-'EOF'
136 | Error: you are not using a 64bit platform or a Raspberry Pi (armv6l/armv7l).
137 | Docker currently only supports 64bit platforms or a Raspberry Pi (armv6l/armv7l).
138 | EOF
139 | exit 1
140 | ;;
141 | esac
142 |
143 | if command_exists docker; then
144 | version="$(docker -v | awk -F '[ ,]+' '{ print $3 }')"
145 | MAJOR_W=1
146 | MINOR_W=10
147 |
148 | semverParse $version
149 |
150 | shouldWarn=0
151 | if [ $major -lt $MAJOR_W ]; then
152 | shouldWarn=1
153 | fi
154 |
155 | if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then
156 | shouldWarn=1
157 | fi
158 |
159 | cat >&2 <<-'EOF'
160 | Warning: the "docker" command appears to already exist on this system.
161 |
162 | If you already have Docker installed, this script can cause trouble, which is
163 | why we're displaying this warning and provide the opportunity to cancel the
164 | installation.
165 |
166 | If you installed the current Docker package using this script and are using it
167 | EOF
168 |
169 | if [ $shouldWarn -eq 1 ]; then
170 | cat >&2 <<-'EOF'
171 | again to update Docker, we urge you to migrate your image store before upgrading
172 | to v1.10+.
173 |
174 | You can find instructions for this here:
175 | https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
176 | EOF
177 | else
178 | cat >&2 <<-'EOF'
179 | again to update Docker, you can safely ignore this message.
180 | EOF
181 | fi
182 |
183 | cat >&2 <<-'EOF'
184 |
185 | You may press Ctrl+C now to abort this script.
186 | EOF
187 | ( set -x; sleep 20 )
188 | fi
189 |
190 | user="$(id -un 2>/dev/null || true)"
191 |
192 | sh_c='sh -c'
193 | if [ "$user" != 'root' ]; then
194 | if command_exists sudo; then
195 | sh_c='sudo -E sh -c'
196 | elif command_exists su; then
197 | sh_c='su -c'
198 | else
199 | cat >&2 <<-'EOF'
200 | Error: this installer needs the ability to run commands as root.
201 | We are unable to find either "sudo" or "su" available to make this happen.
202 | EOF
203 | exit 1
204 | fi
205 | fi
206 |
207 | curl=''
208 | if command_exists curl; then
209 | curl='curl -sSL'
210 | elif command_exists wget; then
211 | curl='wget -qO-'
212 | elif command_exists busybox && busybox --list-modules | grep -q wget; then
213 | curl='busybox wget -qO-'
214 | fi
215 |
216 | # check to see which repo they are trying to install from
217 | if [ -z "$repo" ]; then
218 | repo='main'
219 | if [ "https://test.docker.com/" = "$url" ]; then
220 | repo='testing'
221 | elif [ "https://experimental.docker.com/" = "$url" ]; then
222 | repo='experimental'
223 | fi
224 | fi
225 |
226 | # perform some very rudimentary platform detection
227 | lsb_dist=''
228 | dist_version=''
229 | if command_exists lsb_release; then
230 | lsb_dist="$(lsb_release -si)"
231 | fi
232 | if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
233 | lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
234 | fi
235 | if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
236 | lsb_dist='debian'
237 | fi
238 | if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
239 | lsb_dist='fedora'
240 | fi
241 | if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
242 | lsb_dist='oracleserver'
243 | fi
244 | if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
245 | lsb_dist='centos'
246 | fi
247 | if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
248 | lsb_dist='redhat'
249 | fi
250 | if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
251 | lsb_dist="$(. /etc/os-release && echo "$ID")"
252 | fi
253 |
254 | lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
255 |
256 | # Special case redhatenterpriseserver
257 | if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then
258 | # Set it to redhat, it will be changed to centos below anyways
259 | lsb_dist='redhat'
260 | fi
261 |
262 | case "$lsb_dist" in
263 |
264 | ubuntu)
265 | if command_exists lsb_release; then
266 | dist_version="$(lsb_release --codename | cut -f2)"
267 | fi
268 | if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
269 | dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
270 | fi
271 | ;;
272 |
273 | debian|raspbian)
274 | dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
275 | case "$dist_version" in
276 | 8)
277 | dist_version="jessie"
278 | ;;
279 | 7)
280 | dist_version="wheezy"
281 | ;;
282 | esac
283 | ;;
284 |
285 | oracleserver)
286 | # need to switch lsb_dist to match yum repo URL
287 | lsb_dist="oraclelinux"
288 | dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')"
289 | ;;
290 |
291 | fedora|centos|redhat)
292 | dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)"
293 | ;;
294 |
295 | *)
296 | if command_exists lsb_release; then
297 | dist_version="$(lsb_release --codename | cut -f2)"
298 | fi
299 | if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
300 | dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
301 | fi
302 | ;;
303 |
304 |
305 | esac
306 |
307 | # Check if this is a forked Linux distro
308 | check_forked
309 |
310 | # Run setup for each distro accordingly
311 | case "$lsb_dist" in
312 | amzn)
313 | (
314 | set -x
315 | $sh_c 'sleep 3; yum -y -q install docker'
316 | )
317 | echo_docker_as_nonroot
318 | exit 0
319 | ;;
320 |
321 | 'opensuse project'|opensuse)
322 | echo 'Going to perform the following operations:'
323 | if [ "$repo" != 'main' ]; then
324 | echo ' * add repository obs://Virtualization:containers'
325 | fi
326 | echo ' * install Docker'
327 | $sh_c 'echo "Press CTRL-C to abort"; sleep 3'
328 |
329 | if [ "$repo" != 'main' ]; then
330 | # install experimental packages from OBS://Virtualization:containers
331 | (
332 | set -x
333 | zypper -n ar -f obs://Virtualization:containers Virtualization:containers
334 | rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
335 | )
336 | fi
337 | (
338 | set -x
339 | zypper -n install docker
340 | )
341 | echo_docker_as_nonroot
342 | exit 0
343 | ;;
344 | 'suse linux'|sle[sd])
345 | echo 'Going to perform the following operations:'
346 | if [ "$repo" != 'main' ]; then
347 | echo ' * add repository obs://Virtualization:containers'
348 | echo ' * install experimental Docker using packages NOT supported by SUSE'
349 | else
350 | echo ' * add the "Containers" module'
351 | echo ' * install Docker using packages supported by SUSE'
352 | fi
353 | $sh_c 'echo "Press CTRL-C to abort"; sleep 3'
354 |
355 | if [ "$repo" != 'main' ]; then
356 | # install experimental packages from OBS://Virtualization:containers
357 | echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE'
358 | (
359 | set -x
360 | zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers
361 | rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
362 | )
363 | else
364 | # Add the containers module
365 | # Note well-1: the SLE machine must already be registered against SUSE Customer Center
366 | # Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect
367 | (
368 | set -x
369 | SUSEConnect -p sle-module-containers/12/x86_64 -r ""
370 | )
371 | fi
372 | (
373 | set -x
374 | zypper -n install docker
375 | )
376 | echo_docker_as_nonroot
377 | exit 0
378 | ;;
379 |
380 | ubuntu|debian|raspbian)
381 | export DEBIAN_FRONTEND=noninteractive
382 |
383 | did_apt_get_update=
384 | apt_get_update() {
385 | if [ -z "$did_apt_get_update" ]; then
386 | ( set -x; $sh_c 'sleep 3; apt-get update' )
387 | did_apt_get_update=1
388 | fi
389 | }
390 |
391 | if [ "$lsb_dist" = "raspbian" ]; then
392 | # Create Raspbian specific systemd drop-in file, use overlay by default
393 | ( set -x; $sh_c "mkdir -p /etc/systemd/system/docker.service.d" )
394 | ( set -x; $sh_c "echo '[Service]\nExecStart=\nExecStart=/usr/bin/dockerd --storage-driver overlay -H fd://' > /etc/systemd/system/docker.service.d/overlay.conf" )
395 | else
396 | # aufs is preferred over devicemapper; try to ensure the driver is available.
397 | if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
398 | if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then
399 | kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual"
400 |
401 | apt_get_update
402 | ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true
403 |
404 | if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
405 | echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)'
406 | echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!'
407 | ( set -x; sleep 10 )
408 | fi
409 | else
410 | echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual'
411 | echo >&2 ' package. We have no AUFS support. Consider installing the packages'
412 | echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.'
413 | ( set -x; sleep 10 )
414 | fi
415 | fi
416 | fi
417 |
418 | # install apparmor utils if they're missing and apparmor is enabled in the kernel
419 | # otherwise Docker will fail to start
420 | if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
421 | if command -v apparmor_parser >/dev/null 2>&1; then
422 | echo 'apparmor is enabled in the kernel and apparmor utils were already installed'
423 | else
424 | echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..'
425 | apt_get_update
426 | ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' )
427 | fi
428 | fi
429 |
430 | if [ ! -e /usr/lib/apt/methods/https ]; then
431 | apt_get_update
432 | ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' )
433 | fi
434 | if [ -z "$curl" ]; then
435 | apt_get_update
436 | ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' )
437 | curl='curl -sSL'
438 | fi
439 | if [ ! -e /usr/bin/gpg ]; then
440 | apt_get_update
441 | ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' )
442 | fi
443 |
444 | (
445 | set -x
446 | for key_server in $key_servers ; do
447 | $sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break
448 | done
449 | $sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null"
450 | $sh_c "mkdir -p /etc/apt/sources.list.d"
451 | $sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list"
452 | $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine'
453 | )
454 | echo_docker_as_nonroot
455 | exit 0
456 | ;;
457 |
458 | fedora|centos|redhat|oraclelinux)
459 | if [ "${lsb_dist}" = "redhat" ]; then
460 | # we use the centos repository for both redhat and centos releases
461 | lsb_dist='centos'
462 | fi
463 | $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF
464 | [docker-${repo}-repo]
465 | name=Docker ${repo} Repository
466 | baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version}
467 | enabled=1
468 | gpgcheck=1
469 | gpgkey=${yum_url}/gpg
470 | EOF
471 | if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then
472 | (
473 | set -x
474 | $sh_c 'sleep 3; dnf -y -q install docker-engine'
475 | )
476 | else
477 | (
478 | set -x
479 | $sh_c 'sleep 3; yum -y -q install docker-engine'
480 | )
481 | fi
482 | echo_docker_as_nonroot
483 | exit 0
484 | ;;
485 | gentoo)
486 | if [ "$url" = "https://test.docker.com/" ]; then
487 | # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
488 | cat >&2 <<-'EOF'
489 |
490 | You appear to be trying to install the latest nightly build in Gentoo.'
491 | The portage tree should contain the latest stable release of Docker, but'
492 | if you want something more recent, you can always use the live ebuild'
493 | provided in the "docker" overlay available via layman. For more'
494 | instructions, please see the following URL:'
495 |
496 | https://github.com/tianon/docker-overlay#using-this-overlay'
497 |
498 | After adding the "docker" overlay, you should be able to:'
499 |
500 | emerge -av =app-emulation/docker-9999'
501 |
502 | EOF
503 | exit 1
504 | fi
505 |
506 | (
507 | set -x
508 | $sh_c 'sleep 3; emerge app-emulation/docker'
509 | )
510 | exit 0
511 | ;;
512 | esac
513 |
514 | # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
515 | cat >&2 <<-'EOF'
516 |
517 | Either your platform is not easily detectable, is not supported by this
518 | installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have
519 | a package for Docker. Please visit the following URL for more detailed
520 | installation instructions:
521 |
522 | https://docs.docker.com/engine/installation/
523 |
524 | EOF
525 | exit 1
526 | }
527 |
528 | # wrapped up in a function so that we have some protection against only getting
529 | # half the file during "curl | sh"
530 | do_install
531 |
--------------------------------------------------------------------------------
/lab-08/roles/docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Switched to shell as pyhton version lower than 2.7.9
3 | #
4 | #- name: Download Installation Script
5 | # get_url:
6 | # url: https://get.docker.com/
7 | # dest: /tmp/docker.sh
8 | # mode: 0755
9 |
10 | - name: Download docker install script
11 | shell: curl -o /tmp/docker.sh https://get.docker.com/
12 | args:
13 | creates: /tmp/docker.sh
14 |
15 | - name: Install docker
16 | shell: sh /tmp/docker.sh
17 | args:
18 | creates: /usr/bin/docker
19 |
20 | - name: Install pip
21 | apt:
22 | name: python-pip
23 | state: present
24 |
25 | - name: Install docker-py
26 | pip:
27 | name: docker-py
28 |
--------------------------------------------------------------------------------
/lab-08/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | become: true
4 |
5 | roles:
6 | - docker
7 | - docker-redis
--------------------------------------------------------------------------------