├── 001-Networking-DHCP
├── Vagrantfile
├── dhcp.conf
├── readme.md
└── tcpdump.pcap
├── 002-Networking-DNS
├── Vagrantfile
├── install-dns.sh
├── install-tools.sh
├── readme.md
└── zones
│ ├── named.conf
│ ├── named.master.conf
│ ├── named.slave.conf
│ ├── vanilla.master.fwd.zone
│ ├── vanilla.master.rev.zone
│ ├── vanilla.slave.fwd.zone
│ ├── vanilla.slave.rev.zone
│ ├── zones.master.conf
│ └── zones.slave.conf
├── 003-Docker-Cron-n-Apache
├── Dockerfile
├── httpd.conf
├── readme.md
└── supervisord.conf
├── 004-Docker-Compose
├── docker-compose.yml
├── dockerfiles
│ ├── memcached
│ │ ├── Dockerfile
│ │ ├── memcached.sh
│ │ └── rootfs.tar.xz
│ ├── nginx
│ │ └── Dockerfile
│ └── php
│ │ ├── Dockerfile
│ │ ├── php-cacert.pem
│ │ ├── php-fpm-www.conf
│ │ └── php.ini
├── nginx.conf
├── readme.md
└── www-root
│ └── index.php
├── 005-Ansible-3-Tier-App
├── Vagrantfile
├── hosts
├── readme.md
├── roles
│ ├── all-in-one.yml
│ ├── application
│ │ ├── files
│ │ │ ├── .htaccess
│ │ │ └── httpd-root.conf
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── index.php.j2
│ │ └── vars
│ │ │ └── main.yml
│ ├── balancer
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── nginx.conf.j2
│ ├── common
│ │ └── tasks
│ │ │ └── main.yml
│ └── database
│ │ ├── files
│ │ └── table.sql
│ │ ├── handlers
│ │ └── main.yml
│ │ ├── tasks
│ │ └── main.yml
│ │ ├── templates
│ │ └── my.cnf.j2
│ │ └── vars
│ │ └── main.yml
└── vagrant-ansible-hosts.sh
├── 006-Terraform-AWS
├── data.tf
├── data
│ ├── index.php
│ ├── nginx.conf
│ ├── php-www.conf
│ └── php.ini
├── provider.tf
├── readme.md
├── resource-ec2.tf
├── resource-eip.tf
├── resource-elb.tf
├── resource-internet-gateway.tf
├── resource-nat.tf
├── resource-network-acl.tf
├── resource-route-table-association.tf
├── resource-route-tables.tf
├── resource-security-group.tf
├── resource-subnets.tf
├── resource-vpc.tf
├── terraform.tfvars
└── variables.tf
├── 007-K8S-Hello-World
├── Dockerfile
├── Jenkinsfile
├── index.html
├── k8s-services.yaml
└── readme.md
├── 008-K8S-MySQL
├── Dockerfile
├── Jenkinsfile
├── k8s-services.yaml
├── readme.md
└── script.sql
├── 009-K8S-Accessing-Pods-In-Cluster
├── Dockerfile
├── Jenkinsfile
├── k8s-services.yaml
└── readme.md
├── 010-Jenkins-Teraform-Ansible
├── .gitignore
├── Dockerfile
├── Jenkinsfile
├── mvnw
├── mvnw.cmd
├── pom.xml
├── readme.md
├── terraform
│ ├── ec2-artifactory.sh
│ ├── ec2-artifactory.tf
│ ├── ec2-ci.sh
│ ├── ec2-ci.tf
│ ├── ec2-host.tf
│ ├── ec2-registry.sh
│ ├── ec2-registry.tf
│ ├── main.tf
│ ├── readme.md
│ ├── shared
│ │ ├── .ssh
│ │ │ ├── ssh-private-key
│ │ │ └── ssh-private-key.pub
│ │ ├── ansible
│ │ │ ├── common
│ │ │ │ ├── tasks
│ │ │ │ │ └── main.yml
│ │ │ │ ├── templates
│ │ │ │ │ └── docker-compose.yml.j2
│ │ │ │ └── vars
│ │ │ │ │ └── main.yml
│ │ │ ├── inventory
│ │ │ └── playbook.yml
│ │ ├── jenkins
│ │ │ ├── install.sh
│ │ │ └── plugins.txt
│ │ ├── nginx
│ │ │ └── nginx.conf
│ │ └── registry
│ │ │ ├── docker-compose.yml
│ │ │ └── secrets
│ │ │ ├── fullchain.pem
│ │ │ ├── htpasswd
│ │ │ └── privkey.pem
│ ├── vars.tf
│ └── www-data
│ │ ├── artifactory-cert.pem
│ │ ├── artifactory-privkey.pem
│ │ ├── ci-cert.pem
│ │ ├── ci-privkey.pem
│ │ ├── dh-params.pem
│ │ └── html
│ │ └── index.html
├── vagrant
│ ├── Vagrantfile
│ ├── readme.md
│ └── shared
│ │ ├── .ssh
│ │ ├── ssh-private-key
│ │ └── ssh-private-key.pub
│ │ ├── ansible
│ │ ├── common
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ ├── templates
│ │ │ │ └── docker-compose.yml.j2
│ │ │ └── vars
│ │ │ │ └── main.yml
│ │ ├── inventory
│ │ └── playbook.yml
│ │ ├── jenkins
│ │ ├── install.sh
│ │ └── plugins.txt
│ │ ├── master.sh
│ │ ├── registry
│ │ ├── docker-compose.yml
│ │ └── secrets
│ │ │ ├── fullchain.pem
│ │ │ ├── htpasswd
│ │ │ └── privkey.pem
│ │ └── ssh-keys.sh
└── version.md
├── 011-Ansible-3-Tier-App-v2
├── .ssh
│ ├── id_rsa
│ ├── id_rsa.pub
│ └── ssh.sh
├── Vagrantfile
├── ansible.cfg
├── hosts
├── inventory.sh
├── play-books
│ ├── files
│ │ ├── apache.crt
│ │ ├── apache.key
│ │ ├── made.com.ua.crt
│ │ ├── made.com.ua.dh.pem
│ │ └── made.com.ua.key
│ ├── play.yml
│ ├── roles
│ │ ├── app
│ │ │ ├── README.md
│ │ │ ├── defaults
│ │ │ │ └── main.yml
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ ├── meta
│ │ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ │ ├── deployment.yml
│ │ │ │ ├── install-centos.yml
│ │ │ │ ├── install-ubuntu.yml
│ │ │ │ ├── main.yml
│ │ │ │ └── settings.yml
│ │ │ ├── templates
│ │ │ │ ├── httpd.conf.j2
│ │ │ │ └── index.php.j2
│ │ │ └── vars
│ │ │ │ ├── centos.yml
│ │ │ │ ├── main.yml
│ │ │ │ └── ubuntu.yml
│ │ ├── db
│ │ │ ├── README.md
│ │ │ ├── defaults
│ │ │ │ └── main.yml
│ │ │ ├── files
│ │ │ │ └── tables.sql
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ ├── meta
│ │ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ │ ├── create-table-and-users.yml
│ │ │ │ ├── install-centos.yml
│ │ │ │ ├── install-ubuntu.yml
│ │ │ │ ├── main.yml
│ │ │ │ ├── replication-master.yml
│ │ │ │ ├── replication-slave.yml
│ │ │ │ ├── replication.yml
│ │ │ │ └── settings.yml
│ │ │ ├── templates
│ │ │ │ └── my.cnf.j2
│ │ │ ├── tests
│ │ │ │ ├── inventory
│ │ │ │ └── test.yml
│ │ │ └── vars
│ │ │ │ ├── centos.yml
│ │ │ │ ├── main.yml
│ │ │ │ └── ubuntu.yml
│ │ ├── epel
│ │ │ ├── README.md
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── ip
│ │ │ ├── readme.md
│ │ │ └── tasks
│ │ │ │ ├── generate_ip_fact.yml
│ │ │ │ └── main.yml
│ │ └── lb
│ │ │ ├── README.md
│ │ │ ├── handlers
│ │ │ └── main.yml
│ │ │ ├── meta
│ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ ├── configuration.yml
│ │ │ ├── install-centos.yml
│ │ │ ├── install-ubuntu.yml
│ │ │ └── main.yml
│ │ │ ├── templates
│ │ │ ├── nginx-ssl.conf.j2
│ │ │ └── nginx.conf.j2
│ │ │ └── vars
│ │ │ ├── Ubuntu.yml
│ │ │ ├── centos.yml
│ │ │ └── main.yml
│ └── vars
│ │ ├── defaults.yml
│ │ ├── deployment-production.yml
│ │ └── deployment-staging.yml
└── readme.md
├── 012-Python-Flask-MySQL
├── app
│ ├── app.py
│ ├── static
│ │ └── style.css
│ └── templates
│ │ ├── add.tpl.j2
│ │ ├── index.tpl.j2
│ │ └── list.tpl.j2
├── data
│ └── schema.sql
├── readme.md
├── requirements.txt
└── test.py
├── 013-Python-Network-Checker
├── .gitignore
├── app.py
├── async.py
├── readme.md
├── requirments.txt
├── server.py
└── test.csv
├── 014-aws-cloudformation-lambda
├── .number
├── deployment.sh
├── ec2amis_backup.yaml
├── lambda.py
└── readme.md
├── 015-serverless-apigateway-lambda
├── lambda.py
├── mappings-values.yml
├── readme.md
└── serverless.yml
└── readme.md
/001-Networking-DHCP/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 |
3 | config.vm.box = "centos/7"
4 | config.vm.box_check_update = false
5 |
6 | config.vm.provider 'virtualbox' do |box|
7 | box.cpus = 1
8 | box.memory = "512"
9 | end
10 |
11 | config.vm.provision :shell,
12 | :preserve_order => true,
13 | :inline => "echo $(hostname -a); echo $(hostname -I)"
14 |
15 | config.vm.define :server do |box|
16 | box.vm.hostname = "dhcp"
17 |
18 | box.vm.network :private_network,
19 | :ip => "192.168.60.4",
20 | :virtualbox__intnet => true
21 |
22 | box.vm.provision :shell,
23 | :preserve_order => true,
24 | :inline => <<-SCRIPT
25 | sudo yum install -y dhcp tcpdump > /dev/null 2>&1
26 | sudo cp /vagrant/dhcp.conf /etc/dhcp/dhcpd.conf
27 | sudo systemctl start dhcpd
28 | SCRIPT
29 | end
30 |
31 |
32 | config.vm.define :client_mac do |box|
33 | box.vm.hostname = "dhcp-client-mac"
34 |
35 | box.vm.network :private_network,
36 | :type => "dhcp",
37 | :mac => "5CA1AB1E0001",
38 | :virtualbox__intnet => true
39 | end
40 |
41 |
42 | config.vm.define :client_dyn_ip do |box|
43 | box.vm.hostname = "dhcp-client-ip"
44 |
45 | box.vm.network :private_network,
46 | :type => "dhcp",
47 | :virtualbox__intnet => true
48 | end
49 |
50 | end
51 |
--------------------------------------------------------------------------------
/001-Networking-DHCP/dhcp.conf:
--------------------------------------------------------------------------------
1 |
2 | # DHCP Server Configuration file.
3 | subnet 10.0.2.0 netmask 255.255.255.0 {
4 | option routers 10.0.2.15;
5 | option subnet-mask 255.255.255.0;
6 | option broadcast-address 10.0.2.255;
7 | option domain-name-servers 8.8.8.8, 8.8.4.4;
8 | range 10.0.2.100 10.0.2.120;
9 | default-lease-time 60;
10 | max-lease-time 7200;
11 | }
12 |
13 | # Our Network
14 | subnet 192.168.60.0 netmask 255.255.255.0 {
15 | option routers 192.168.60.1;
16 | option subnet-mask 255.255.255.0;
17 | option broadcast-address 192.168.60.255;
18 | option domain-name-servers 8.8.8.8, 8.8.4.4;
19 | option dhcp-server-identifier 192.168.60.4;
20 | range 192.168.60.100 192.168.60.120;
21 | default-lease-time 60;
22 | max-lease-time 7200;
23 |
24 | host macburger {
25 | hardware ethernet 5c:a1:ab:1e:00:01;
26 | fixed-address 192.168.60.201;
27 | }
28 | }
29 |
30 |
--------------------------------------------------------------------------------
/001-Networking-DHCP/readme.md:
--------------------------------------------------------------------------------
1 | # DHCP
2 |
3 | Using Vagrant setup DHCP Server and Clients:
4 | * One DHCP Server
5 | * One client get IP by MAC address
6 | * One client get IP automatically
7 |
8 | After clients ready, save some trafic with `tcpdump`.
9 |
10 |
11 |
12 | ### Starting Up and Teest
13 |
14 | ```bash
15 | # Preconfigured DHCP config will be copied during vagrant startup provisioning.
16 | # expected ips
17 | # server -> 192.168.60.4 (hardcoded in vagrantfile)
18 | # client_mac -> 192.168.60.201 (mac address based ip)
19 | # client_dyn_ip -> expected to be in range from 192.168.60.100 to 192.168.60.120
20 | vagrant up
21 |
22 | # tcpdump part
23 | vagrant ssh server
24 | # run it and ctrl+c to break.
25 | [vagrant@dhcp] > sudo tcpdump -w /vagrant/tcpdump.pcap -i eth1
26 |
27 | # back to host
28 | scp -i .vagrant/machines/server/virtualbox/private_key -P 2222 vagrant@127.0.0.1:/vagrant/tcpdump.pcap .
29 |
30 | # reading dump
31 | tcpdump -r tcpdump.pcap
32 | ```
33 |
34 |
35 |
--------------------------------------------------------------------------------
/001-Networking-DHCP/tcpdump.pcap:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/butuzov/sandbox-devops/115e2291a5c60f768a7c3e0e6d9e10b334d2b314/001-Networking-DHCP/tcpdump.pcap
--------------------------------------------------------------------------------
/002-Networking-DNS/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 |
3 | config.vm.box = "centos/7"
4 | config.vm.box_check_update = false
5 |
6 | config.vm.provider 'virtualbox' do |box|
7 | box.cpus = 1
8 | box.memory = "512"
9 | end
10 |
11 | config.vm.provision "shell",
12 | :path => "install-tools.sh"
13 |
14 | config.vm.define :master do |box|
15 | box.vm.hostname = "master"
16 |
17 | box.vm.network :private_network,
18 | :ip => "192.168.0.17",
19 | :type => "dhcp"
20 |
21 | box.vm.provision :shell,
22 | :path => "install-dns.sh",
23 | :args => "master",
24 | :preserve_order => true
25 | end
26 |
27 |
28 | config.vm.define :puppet do |box|
29 | box.vm.hostname = "puppet"
30 | box.vm.network :private_network,
31 | :ip => "192.168.0.18",
32 | :type => "dhcp"
33 |
34 | box.vm.provision :shell,
35 | :path => "install-dns.sh",
36 | :args => "slave",
37 | :preserve_order => true
38 | end
39 |
40 |
41 | end
42 |
--------------------------------------------------------------------------------
/002-Networking-DNS/install-dns.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | TYPE=$1
4 |
5 | MASTER_IP=192.168.0.17
6 | PUPPET_IP=192.168.0.18
7 |
8 | IP=$([ $TYPE == "master" ] && echo $MASTER_IP || echo $PUPPET_IP)
9 |
10 | echo "Setting up Network"
11 | ifconfig eth1 $IP netmask 255.255.255.0 up
12 | IP=$( ifconfig | grep "inet" | grep -v "inet6" | awk 'NR==2{print $2}')
13 | sudo -s
14 |
15 | echo "Configuring bind"
16 | MASK=$(echo $MASTER_IP | sed -e "s/[0-9]\{1,\}$/0/g")
17 |
18 | sudo -s
19 | systemctl stop named
20 |
21 | # Actual Installation
22 | cd /vagrant/zones
23 |
24 | NAMED=named.$TYPE.conf
25 | cat named.conf > $NAMED
26 |
27 | sed -i.bac -E "s/(listen-on port 53[[:space:]]+)\{(.*)}/\1{\2 $IP;} /g" $NAMED
28 | sed -i.bac -E "s/(allow-query[[:space:]]+)\{ (localhost)/\1{ \2; $MASK\/24/g" $NAMED
29 |
30 | if [[ $TYPE == "slave" ]]; then
31 | sed -i.bac -E "s/(allow-transfer[[:space:]]+)\{.*\};//g" $NAMED
32 | fi
33 |
34 | # copy changed configuration
35 | cat zones.$TYPE.conf >> $NAMED
36 | cat $NAMED > /etc/named.conf
37 | sudo chgrp named /etc/named.conf
38 | sudo chown root /etc/named.conf
39 |
40 | sudo cp vanilla.$TYPE.fwd.zone /var/named/vanilla.fwd.zone
41 | sudo cp vanilla.$TYPE.rev.zone /var/named/vanilla.rev.zone
42 | sudo chgrp named /var/named/vanilla*
43 | sudo chown root /var/named/vanilla*
44 |
45 | named-checkzone vanilla.com /var/named/vanilla.fwd.zone
46 | named-checkzone vanilla.com /var/named/vanilla.rev.zone
47 | named-checkconf -z /etc/named.conf
48 |
49 | systemctl start named
50 | systemctl status named.service
51 |
52 | echo "search vanilla.com" > /etc/resolv.conf
53 | echo "nameserver $MASTER_IP" >> /etc/resolv.conf
54 | echo "nameserver $PUPPET_IP" >> /etc/resolv.conf
55 |
56 | dig vanilla.com
57 |
58 |
--------------------------------------------------------------------------------
/002-Networking-DNS/install-tools.sh:
--------------------------------------------------------------------------------
1 | echo "Installing Software"
2 | sudo yum install -y bind* > /dev/null 2>&1
3 | sudo yum install -y net-tools > /dev/null 2>&1
4 | sudo yum install -y iptables-service > /dev/null 2>&1
5 | sudo systemctl stop firewalld > /dev/null 2>&1
6 | sudo systemctl mask firewalld > /dev/null 2>&1
7 | sudo systemctl enable named > /dev/null 2>&1
8 | sudo systemctl start named > /dev/null 2>&1
--------------------------------------------------------------------------------
/002-Networking-DNS/readme.md:
--------------------------------------------------------------------------------
1 | # Working with DNS
2 |
3 | ### Goal
4 | `Do some basic DNS setup (master <-> slave)`
5 |
6 | ### Starting Up and Teest
7 |
8 | ```bash
9 | # Start it up and checkout provision output.
10 | vagrant up
11 |
12 | # you also can do:
13 |
14 | vagrant ssh master
15 | [vagrant@master] > dig vanilla.com
16 | [vagrant@master] > exit
17 |
18 | vagrant ssh puppet
19 | [vagrant@master] > dig vanilla.com
20 | [vagrant@master] > exit
21 | ```
22 |
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/named.conf:
--------------------------------------------------------------------------------
1 | //
2 | // named.conf
3 | //
4 | // Provided by Red Hat bind package to configure the ISC BIND named(8) DNS
5 | // server as a caching only nameserver (as a localhost DNS resolver only).
6 | //
7 | // See /usr/share/doc/bind*/sample/ for example named configuration files.
8 | //
9 | // See the BIND Administrator's Reference Manual (ARM) for details about the
10 | // configuration located in /usr/share/doc/bind-{version}/Bv9ARM.html
11 |
12 | options {
13 | listen-on port 53 { 127.0.0.1; };
14 | listen-on-v6 port 53 { ::1; };
15 | directory "/var/named";
16 | dump-file "/var/named/data/cache_dump.db";
17 | statistics-file "/var/named/data/named_stats.txt";
18 | memstatistics-file "/var/named/data/named_mem_stats.txt";
19 | allow-query { localhost; };
20 | allow-transfer { localhost; };
21 |
22 | recursion no;
23 |
24 | dnssec-enable yes;
25 | dnssec-validation yes;
26 |
27 | bindkeys-file "/etc/named.iscdlv.key";
28 |
29 | managed-keys-directory "/var/named/dynamic";
30 |
31 | pid-file "/run/named/named.pid";
32 | session-keyfile "/run/named/session.key";
33 | };
34 |
35 | logging {
36 | channel default_debug {
37 | file "data/named.run";
38 | severity dynamic;
39 | };
40 | };
41 |
42 | zone "." IN {
43 | type hint;
44 | file "named.ca";
45 | };
46 |
47 | include "/etc/named.rfc1912.zones";
48 | include "/etc/named.root.key";
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/named.master.conf:
--------------------------------------------------------------------------------
1 | //
2 | // named.conf
3 | //
4 | // Provided by Red Hat bind package to configure the ISC BIND named(8) DNS
5 | // server as a caching only nameserver (as a localhost DNS resolver only).
6 | //
7 | // See /usr/share/doc/bind*/sample/ for example named configuration files.
8 | //
9 | // See the BIND Administrator's Reference Manual (ARM) for details about the
10 | // configuration located in /usr/share/doc/bind-{version}/Bv9ARM.html
11 |
12 | options {
13 | listen-on port 53 { 127.0.0.1; 192.168.0.17; 192.168.0.17;} ;
14 | listen-on-v6 port 53 { ::1; };
15 | directory "/var/named";
16 | dump-file "/var/named/data/cache_dump.db";
17 | statistics-file "/var/named/data/named_stats.txt";
18 | memstatistics-file "/var/named/data/named_mem_stats.txt";
19 | allow-query { localhost; 192.168.0.0/24; 192.168.0.0/24; };
20 |
21 |
22 | recursion no;
23 |
24 | dnssec-enable yes;
25 | dnssec-validation yes;
26 |
27 | bindkeys-file "/etc/named.iscdlv.key";
28 |
29 | managed-keys-directory "/var/named/dynamic";
30 |
31 | pid-file "/run/named/named.pid";
32 | session-keyfile "/run/named/session.key";
33 | };
34 |
35 | logging {
36 | channel default_debug {
37 | file "data/named.run";
38 | severity dynamic;
39 | };
40 | };
41 |
42 | zone "." IN {
43 | type hint;
44 | file "named.ca";
45 | };
46 |
47 | include "/etc/named.rfc1912.zones";
48 | include "/etc/named.root.key";
49 |
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/named.slave.conf:
--------------------------------------------------------------------------------
1 | //
2 | // named.conf
3 | //
4 | // Provided by Red Hat bind package to configure the ISC BIND named(8) DNS
5 | // server as a caching only nameserver (as a localhost DNS resolver only).
6 | //
7 | // See /usr/share/doc/bind*/sample/ for example named configuration files.
8 | //
9 | // See the BIND Reference Manual for Administrators (ARM) for details about the
10 | // configuration located in /usr/share/doc/bind-{version}/Bv9ARM.html
11 |
12 | options {
13 | listen-on port 53 { 127.0.0.1; 192.168.0.18; 192.168.0.18;} ;
14 | listen-on-v6 port 53 { ::1; };
15 | directory "/var/named";
16 | dump-file "/var/named/data/cache_dump.db";
17 | statistics-file "/var/named/data/named_stats.txt";
18 | memstatistics-file "/var/named/data/named_mem_stats.txt";
19 | allow-query { localhost; 192.168.0.0/24; };
20 | allow-transfer { localhost; };
21 |
22 | recursion no;
23 |
24 | dnssec-enable yes;
25 | dnssec-validation yes;
26 |
27 | bindkeys-file "/etc/named.iscdlv.key";
28 |
29 | managed-keys-directory "/var/named/dynamic";
30 |
31 | pid-file "/run/named/named.pid";
32 | session-keyfile "/run/named/session.key";
33 | };
34 |
35 | logging {
36 | channel default_debug {
37 | file "data/named.run";
38 | severity dynamic;
39 | };
40 | };
41 |
42 | zone "." IN {
43 | type hint;
44 | file "named.ca";
45 | };
46 |
47 | include "/etc/named.rfc1912.zones";
48 | include "/etc/named.root.key";
49 |
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/vanilla.master.fwd.zone:
--------------------------------------------------------------------------------
1 | $TTL 5M
2 | @ IN SOA master.vanilla.com. abuse.vanilla.com. (
3 | 20080423 ; serial
4 | 1D ; refresh
5 | 1H ; retry
6 | 1W ; expire
7 | 3H ) ; minimum
8 |
9 | @ IN NS master.vanilla.com.
10 | @ IN NS puppet.vanilla.com.
11 | @ IN PTR vanilla.com.
12 |
13 | ; Name server hostname to IP resolve.
14 | @ IN A 192.168.0.17
15 | @ IN A 192.168.0.18
16 |
17 | ; Hosts in this Domain
18 | master IN A 192.168.0.17
19 | puppet IN A 192.168.0.18
20 |
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/vanilla.master.rev.zone:
--------------------------------------------------------------------------------
1 | $TTL 5M
2 | @ IN SOA vanilla.com. abuse.vanilla.com. (
3 | 20080423 ; serial
4 | 1D ; refresh
5 | 1H ; retry
6 | 1W ; expire
7 | 3H ) ; minimum
8 | ; Name server's
9 | @ IN NS master.vanilla.com.
10 | @ IN NS puppet.vanilla.com.
11 | @ IN PTR vanilla.com.
12 |
13 | ; Name server hostname to IP resolve.
14 | master IN A 192.168.0.17
15 | puppet IN A 192.168.0.18
16 |
17 | ;Hosts in Domain
18 | 17 IN PTR master.vanilla.com.
19 | 18 IN PTR puppet.vanilla.com.
20 |
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/vanilla.slave.fwd.zone:
--------------------------------------------------------------------------------
1 | $ORIGIN .
2 | $TTL 5M
3 | vanilla.com IN SOA vanilla.com. abuse.vanilla.com. (
4 | 20080423 ; serial
5 | 1D ; refresh
6 | 1H ; retry
7 | 1W ; expire
8 | 3H ) ; minimum
9 |
10 | NS master.vanilla.com.
11 | NS puppet.vanilla.com.
12 | A 192.168.0.17
13 | A 192.168.0.18
14 |
15 | $ORIGIN vanilla.com.
16 | ; Hosts in this Domain
17 | master IN A 192.168.0.17
18 | puppet IN A 192.168.0.18
19 |
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/vanilla.slave.rev.zone:
--------------------------------------------------------------------------------
1 | $ORIGIN .
2 | $TTL 5M
3 | 0.168.192.in-addr.arpa IN SOA vanilla.com. abuse.vanilla.com. (
4 | 20080423 ; serial
5 | 1D ; refresh
6 | 1H ; retry
7 | 1W ; expire
8 | 3H ) ; minimum
9 |
10 | NS master.vanilla.com.
11 | NS puppet.vanilla.com.
12 | PTR vanilla.com.
13 |
14 | $ORIGIN 0.168.192.in-addr.arpa.
15 | ; Hosts in this Domain
16 | 17 PTR master.vanilla.com.
17 | 18 PTR puppet.vanilla.com.
18 | master A 192.168.0.17
19 | puppet A 192.168.0.18
20 |
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/zones.master.conf:
--------------------------------------------------------------------------------
1 | zone "vanilla.com" IN {
2 | type master;
3 | file "vanilla.fwd.zone";
4 | allow-update { none; };
5 | notify yes;
6 | };
7 |
8 | zone "0.168.192.in-addr.arpa" IN {
9 | type master;
10 | file "vanilla.rev.zone";
11 | allow-update { none; };
12 | notify yes;
13 | };
14 |
--------------------------------------------------------------------------------
/002-Networking-DNS/zones/zones.slave.conf:
--------------------------------------------------------------------------------
1 | zone "vanilla.com" IN {
2 | type slave;
3 | file "vanilla.fwd.zone";
4 | masterfile-format text;
5 | masters { 192.168.0.17; };
6 | };
7 |
8 | zone "0.168.192.in-addr.arpa" IN {
9 | type slave;
10 | file "vanilla.rev.zone";
11 | masterfile-format text;
12 | masters { 192.168.0.17; };
13 | };
14 |
--------------------------------------------------------------------------------
/003-Docker-Cron-n-Apache/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:7
2 |
3 | ENV container docker
4 |
5 | RUN yum -y update && yum clean all
6 | RUN yum -y install initscripts httpd links systemd python-setuptools cronie
7 | RUN easy_install supervisor
8 |
9 | RUN touch /etc/cron.d/hello-cron \
10 | && echo '* * * * * root echo "Hello world" >> /var/log/cron.log 2>&1' > /etc/cron.d/hello-cron
11 |
12 | # RUN echo 'root:passw0rd' | chpasswd
13 |
14 | COPY httpd.conf /etc/httpd/conf/
15 |
16 | EXPOSE 8080
17 |
18 | COPY supervisord.conf /etc/supervisor/supervisord.conf
19 | CMD ["/usr/bin/supervisord"]
20 |
21 |
22 |
--------------------------------------------------------------------------------
/003-Docker-Cron-n-Apache/httpd.conf:
--------------------------------------------------------------------------------
1 | ServerRoot "/etc/httpd"
2 |
3 | Listen 8080
4 | Listen 8430
5 |
6 | Include conf.modules.d/*.conf
7 |
8 | User apache
9 | Group apache
10 |
11 | ServerAdmin root@localhost
12 | ServerName localhost
13 |
14 |
15 | AllowOverride none
16 | Require all denied
17 |
18 |
19 | DocumentRoot "/var/www/html"
20 |
21 |
22 | AllowOverride None
23 | Require all granted
24 |
25 |
26 |
27 | Options +Indexes +FollowSymLinks
28 | AllowOverride All
29 | Require all granted
30 |
31 |
32 |
33 | DirectoryIndex index.html
34 |
35 |
36 |
37 | Require all denied
38 |
39 |
40 | ErrorLog "/var/log/httpd-error.log"
41 |
42 | LogLevel warn
43 |
44 |
45 |
46 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
47 | LogFormat "%h %l %u %t \"%r\" %>s %b" common
48 |
49 |
50 | # You need to enable mod_logio.c to use %I and %O
51 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
52 |
53 |
54 | CustomLog "/var/log/httpd-access.log" combined
55 |
56 |
57 |
58 |
59 |
60 |
61 | TypesConfig /etc/mime.types
62 | AddType application/x-compress .Z
63 | AddType application/x-gzip .gz .tgz
64 | AddType text/html .shtml
65 | AddOutputFilter INCLUDES .shtml
66 |
67 |
68 |
69 | AddDefaultCharset UTF-8
70 |
71 |
72 | MIMEMagicFile conf/magic
73 |
74 |
75 | EnableSendfile on
76 |
77 | IncludeOptional conf.d/*.conf
78 |
--------------------------------------------------------------------------------
/003-Docker-Cron-n-Apache/readme.md:
--------------------------------------------------------------------------------
1 | # Docker (initial into)
2 |
3 | ### Goal
4 |
5 | Run `httpd` + `cron` simultaneity in container.
6 |
7 | ### Starting Up and Teest
8 |
9 | ```bash
10 | # Container Build
11 | > docker build -t devops/httpd .
12 | > docker run -itd -v $(pwd):/var/log/ -p "8080:8080" devops/httpd
13 |
14 | # Checking httpd
15 | curl localhost:8080 && tail httpd-access.log
16 |
17 | # Checking Cron
18 | > tail -f cron.log
19 | ```
20 |
21 | ### Thanks
22 | [muralindia](https://github.com/muralindia) for tip about `supervisord`
23 |
--------------------------------------------------------------------------------
/003-Docker-Cron-n-Apache/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | nodaemon=true
3 |
4 | [program:crond]
5 | command=/usr/sbin/crond
6 |
7 | [program:httpd]
8 | command=/usr/sbin/httpd -k start
9 |
--------------------------------------------------------------------------------
/004-Docker-Compose/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 |
5 | ############################################################################
6 | # Nginx Load Balancer
7 | # proxy server that works with out application (php7-fpm/mysql/nginx/memcached)
8 | ############################################################################
9 | nginxlb:
10 | build: ./dockerfiles/nginx
11 | image: devops/nginx:1.15.2
12 | ports:
13 | - "80:80"
14 | mem_limit: 1024m
15 | volumes:
16 | - ./www-root/:/var/www/html/
17 | - ./nginx.conf:/etc/nginx/nginx.conf:ro
18 | links:
19 | - php
20 |
21 | ############################################################################
22 | # PHP 7.1 FPM
23 | ############################################################################
24 | php:
25 | build: ./dockerfiles/php
26 | image: devops/php:7.2.6
27 | mem_limit: 1024m
28 | volumes:
29 | - ./www-root:/var/www/html/
30 | links:
31 | - memcached
32 | - mysql
33 |
34 |
35 | ############################################################################
36 | # MySQL 5.7.17
37 | ############################################################################
38 | mysql:
39 | image: mysql:5.7
40 | mem_limit: 1024m
41 | volumes:
42 | - ./db-mysql:/var/lib/mysql
43 | environment:
44 | MYSQL_ROOT_PASSWORD: rootpassword
45 | MYSQL_USER: simpleuser
46 | MYSQL_PASSWORD: password
47 |
48 | ###########################################################################
49 | # Memcached
50 | ###########################################################################
51 | memcached:
52 | build: ./dockerfiles/memcached
53 | image: devops/memcached:1.5.7
54 | mem_limit: 512m
55 | command: memcached -m 512m
56 |
--------------------------------------------------------------------------------
/004-Docker-Compose/dockerfiles/memcached/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scratch
2 | ADD rootfs.tar.xz /
3 |
4 | RUN adduser -D memcache
5 |
6 | ENV MEMCACHED_VERSION 1.5.7
7 | ENV MEMCACHED_SHA1 31d6f6b80668025e4616aa2ad5c7a45f24ed9665
8 |
9 | RUN set -x \
10 | \
11 | && apk add --no-cache --virtual .build-deps \
12 | ca-certificates \
13 | coreutils \
14 | cyrus-sasl-dev \
15 | dpkg-dev dpkg \
16 | gcc \
17 | libc-dev \
18 | libevent-dev \
19 | libressl \
20 | linux-headers \
21 | make \
22 | perl \
23 | perl-utils \
24 | tar \
25 | \
26 | && wget -O memcached.tar.gz "https://memcached.org/files/memcached-$MEMCACHED_VERSION.tar.gz" \
27 | && echo "$MEMCACHED_SHA1 memcached.tar.gz" | sha1sum -c - \
28 | && mkdir -p /usr/src/memcached \
29 | && tar -xzf memcached.tar.gz -C /usr/src/memcached --strip-components=1 \
30 | && rm memcached.tar.gz \
31 | \
32 | && cd /usr/src/memcached \
33 | \
34 | && ./configure \
35 | --build="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)" \
36 | --enable-sasl \
37 | && make -j "$(nproc)" \
38 | \
39 | && make test \
40 | && make install \
41 | \
42 | && cd / && rm -rf /usr/src/memcached \
43 | \
44 | && runDeps="$( \
45 | scanelf --needed --nobanner --format '%n#p' --recursive /usr/local \
46 | | tr ',' '\n' \
47 | | sort -u \
48 | | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
49 | )" \
50 | && apk add --virtual .memcached-rundeps $runDeps \
51 | && apk del .build-deps \
52 | \
53 | && memcached -V
54 |
55 | COPY memcached.sh /usr/local/bin/
56 | ENTRYPOINT ["memcached.sh"]
57 |
58 | USER memcache
59 | EXPOSE 11211
60 | CMD ["memcached"]
61 |
--------------------------------------------------------------------------------
/004-Docker-Compose/dockerfiles/memcached/memcached.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | # first arg is `-f` or `--some-option`
5 | if [ "${1#-}" != "$1" ]; then
6 | set -- memcached "$@"
7 | fi
8 |
9 | exec "$@"
10 |
--------------------------------------------------------------------------------
/004-Docker-Compose/dockerfiles/memcached/rootfs.tar.xz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/butuzov/sandbox-devops/115e2291a5c60f768a7c3e0e6d9e10b334d2b314/004-Docker-Compose/dockerfiles/memcached/rootfs.tar.xz
--------------------------------------------------------------------------------
/004-Docker-Compose/dockerfiles/php/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM php:7.2.6-fpm-alpine3.6
2 |
3 | RUN apk add --update icu-libs icu icu-dev
4 |
5 | RUN set -xe \
6 | && apk add \
7 | --virtual .memcached-deps \
8 | icu-libs icu icu-dev \
9 | zlib-dev libmemcached-dev cyrus-sasl-dev git \
10 | && git clone --branch php7 https://github.com/php-memcached-dev/php-memcached /usr/src/php/ext/memcached/ \
11 | && docker-php-source extract \
12 | && docker-php-ext-configure memcached \
13 | && docker-php-ext-install memcached zip \
14 | && docker-php-source delete \
15 | && rm -rf /usr/src/php/ext/memcached \
16 | && apk del --no-cache libmemcached-dev \
17 | zlib-dev cyrus-sasl-dev \
18 | && rm -rf /usr/src/php/ext/memcached \
19 | && apk del libmemcached-dev zlib-dev cyrus-sasl-dev \
20 | && apk add --update --virtual .build-deps\
21 | freetype-dev libjpeg-turbo-dev libpng-dev \
22 | libmcrypt-dev curl-dev libxpm-dev libxml2-dev \
23 | autoconf g++ imagemagick-dev \
24 | libtool make php7-dev \
25 | && apk add php7-gd \
26 | && docker-php-source extract \
27 | && pecl update-channels \
28 | && pecl install xdebug redis mongodb igbinary msgpack \
29 | && docker-php-ext-enable xdebug redis mongodb igbinary msgpack \
30 | && docker-php-ext-configure gd \
31 | --with-gd \
32 | --with-freetype-dir=/usr/include/ \
33 | --with-jpeg-dir=/usr/include/ \
34 | --with-png-dir=/usr/include/ \
35 | && docker-php-ext-install intl pdo pdo_mysql mysqli gd \
36 | && docker-php-source delete \
37 | && apk del .build-deps \
38 | && rm -rf /tmp/pear/temp/
39 |
40 | COPY php.ini /usr/local/etc/php/conf.d/settings.ini
41 | COPY php-cacert.pem /usr/lib/ssl/cert.pem
42 | COPY php-fpm-www.conf /usr/local/etc/php-fpm.d/www.conf
43 |
44 |
--------------------------------------------------------------------------------
/004-Docker-Compose/dockerfiles/php/php.ini:
--------------------------------------------------------------------------------
1 | ; --------------------------------------------------------
2 | ; Uploads
3 | ; --------------------------------------------------------
4 | ; Maximum allowed size for uploaded files.
5 | ; http://php.net/upload-max-filesize
6 | upload_max_filesize = 1200M
7 |
8 | ; Maximum number of files that can be uploaded via a single request
9 | max_file_uploads = 20
10 |
11 | ; Maximum size of POST data that PHP will accept.
12 | ; Its value may be 0 to disable the limit. It is ignored if POST data reading
13 | ; is disabled through enable_post_data_reading.
14 | ; http://php.net/post-max-size
15 | post_max_size = 1200M
16 |
17 | ; --------------------------------------------------------
18 | ; Input vars and runtime
19 | ; --------------------------------------------------------
20 |
21 | ; How many GET/POST/COOKIE input variables may be accepted
22 | max_input_vars = 10000
23 |
24 | ; Maximum execution time of each script, in seconds
25 | ; http://php.net/max-execution-time
26 | ; Note: This directive is hardcoded to 0 for the CLI SAPI
27 | max_execution_time = 360000
28 |
29 | ; Maximum amount of time each script may spend parsing request data. It's a good
30 | ; idea to limit this time on productions servers in order to eliminate unexpectedly
31 | ; long running scripts.
32 | ; Note: This directive is hardcoded to -1 for the CLI SAPI
33 | ; Default Value: -1 (Unlimited)
34 | ; Development Value: 60 (60 seconds)
35 | ; Production Value: 60 (60 seconds)
36 | ; http://php.net/max-input-time
37 |
38 | max_input_time = 360000
39 |
40 | ; --------------------------------------------------------
41 | ; Xdebug
42 | ; --------------------------------------------------------
43 | xdebug.overload_var_dump = 1
44 |
45 | xdebug.var_display_max_children = 100000
46 | xdebug.var_display_max_data = 100000
47 | xdebug.var_display_max_depth = 32
48 |
49 | ; --------------------------------------------------------
50 | ; Memcached
51 | ; --------------------------------------------------------
52 | ; Set the default serializer for new memcached objects.
53 | ; valid values are: php, igbinary, json, json_array, msgpack
54 | ;
55 | ; json - standard php JSON encoding. This serializer
56 | ; is fast and compact but only works on UTF-8
57 | ; encoded data and does not fully implement
58 | ; serializing. See the JSON extension.
59 | ; json_array - as json, but decodes into arrays
60 | ; php - the standard php serializer
61 | ; igbinary - a binary serializer
62 | ; msgpack - a cross-language binary serializer
63 | ;
64 | ; The default is igbinary if available, then msgpack if available, then php otherwise.
65 | memcached.serializer = "igbinary"
66 |
67 | [Date]
68 | ; Defines the default timezone used by the date functions
69 | ; http://php.net/date.timezone
70 | date.timezone = Europe/Kiev
71 |
72 | ; Sessions
73 | ; ----- Configuration options
74 | ; http://php.net/manual/en/memcached.configuration.php
75 |
76 | ; Use memcache as a session handler
77 | ; session.save_handler=memcached
78 | ; Defines a comma separated of server urls to use for session storage
79 | ; session.save_path=memcached:11211
80 |
--------------------------------------------------------------------------------
/004-Docker-Compose/nginx.conf:
--------------------------------------------------------------------------------
1 |
2 | user nginx;
3 | worker_processes 1;
4 |
5 | pid /var/www/.nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | sendfile on;
16 | #tcp_nopush on;
17 | keepalive_timeout 65;
18 |
19 |
20 | proxy_connect_timeout 60000;
21 | proxy_send_timeout 60000;
22 | proxy_read_timeout 60000;
23 | send_timeout 60000;
24 |
25 | fastcgi_buffers 16 16k;
26 | fastcgi_buffer_size 32k;
27 | proxy_buffer_size 128k;
28 | proxy_buffers 4 256k;
29 | proxy_busy_buffers_size 256k;
30 |
31 | autoindex on;
32 | client_max_body_size 1200M;
33 | server_tokens off;
34 |
35 | ############################################################################
36 | ## localhost
37 | ############################################################################
38 | server {
39 | listen 80;
40 | server_name localhost;
41 |
42 | location = /favicon.ico { access_log off; log_not_found off; }
43 | location = / {
44 | access_log off;
45 | log_not_found off;
46 | }
47 | root /var/www/html/;
48 | index index.php;
49 |
50 | location ~ \.php$ {
51 | try_files $uri =404;
52 | fastcgi_split_path_info ^(.+\.php)(/.+)$;
53 | fastcgi_pass php:9000;
54 | fastcgi_index index.php;
55 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
56 | fastcgi_read_timeout 30000;
57 | include fastcgi_params;
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/004-Docker-Compose/readme.md:
--------------------------------------------------------------------------------
1 | # Using `docker-compose`
2 |
3 | ### Goal
4 | Run NPMM (Nginx/PHP-FPM/MySQL/memcached) web-dev stack
5 |
6 | ### Starting Up and Teest
7 |
8 | ```bash
9 | > docker-compose up -d
10 | > curl localhost
11 | ```
12 |
--------------------------------------------------------------------------------
/004-Docker-Compose/www-root/index.php:
--------------------------------------------------------------------------------
1 | 80,
21 | :host => 8080
22 | end
23 | end
24 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/hosts:
--------------------------------------------------------------------------------
1 | [all:vars]
2 | ansible_connection=ssh
3 | ansible_user=vagrant
4 | ansible_host=127.0.0.1
5 | ansible_ssh_common_args='-o StrictHostKeyChecking=no'
6 |
7 |
8 | [balancer]
9 | balancer ansible_port=2222 ansible_private_key_file=.vagrant/machines/balancer/virtualbox/private_key
10 |
11 | [database]
12 | database ansible_port=2200 ansible_private_key_file=.vagrant/machines/database/virtualbox/private_key
13 |
14 | [application]
15 | application-1 ansible_port=2201 ansible_private_key_file=.vagrant/machines/application-1/virtualbox/private_key
16 | application-2 ansible_port=2202 ansible_private_key_file=.vagrant/machines/application-2/virtualbox/private_key
17 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/readme.md:
--------------------------------------------------------------------------------
1 | # 3 Tier App using Ansbile (.v1 from May 2018)
2 |
3 | ### Goal
4 |
5 | Deploy "some" 3 tier app using ansible.
6 |
7 | ### History
8 |
9 | This is initial version and first Ansible related stuff.
10 |
11 | ### Starting Up and Teest
12 |
13 | ```bash
14 | # running application*, balancer and database hosts using vagrant
15 | vagrant up
16 |
17 | # generate ansible inventory file.
18 | chmod +x ./vagrant-ansible-hosts.sh
19 | ./vagrant-ansible-hosts.sh > hosts
20 |
21 | # Install ansible in virtual environment
22 | # python version last time used 3.7
23 | python3 -m venv .env
24 | source .env/bin/activate
25 | python3 -m pip install ansible
26 |
27 | # Running ping command
28 | sudo ansible all -i hosts -m ping -f 1
29 |
30 | # install and run everything
31 | sudo ansible-playbook -i hosts roles/all-in-one.yml -f 2 -b
32 | ```
33 |
34 | Visit [127.0.0.1:8080](http://127.0.0.1:8080) to see results
35 |
36 |
37 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/all-in-one.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Load Balancer
3 | hosts: balancer
4 | become: yes
5 | roles:
6 | - common
7 | - balancer
8 |
9 | # Installing MySQL
10 | - name: DataBase
11 | hosts: database
12 | become: yes
13 | roles:
14 | - database
15 |
16 | # Installing Applications
17 | - name: Applications
18 | hosts: application
19 | become: yes
20 | roles:
21 | - common
22 | - application
23 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/application/files/.htaccess:
--------------------------------------------------------------------------------
1 | DirectoryIndex index.php
2 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/application/files/httpd-root.conf:
--------------------------------------------------------------------------------
1 | # Further relax access to the default document root:
2 |
3 | ServerName localhost
4 | DocumentRoot /var/www/html
5 | DirectoryIndex index.php
6 |
7 | Options +Indexes +FollowSymLinks
8 | AllowOverride All
9 | Require all granted
10 |
11 |
12 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/application/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Installing Apache
3 | - name: install php
4 | yum: name={{ item }} state=latest
5 | with_items:
6 | - httpd
7 | - httpd-tools
8 | - php
9 | - php-xdebug
10 | - php-mysql
11 | - php-fpm
12 |
13 | - name: enable apache
14 | service:
15 | name: httpd
16 | state: stopped
17 | enabled: yes
18 |
19 | - name: change web root permission
20 | file: path={{ item }} group=apache mode=0770 state=directory
21 | with_items:
22 | - /var/www/html
23 | - /var/www
24 |
25 | - name: copy files
26 | copy:
27 | src: "{{ item.src }}"
28 | dest: "{{ item.dest }}"
29 | owner: "apache"
30 | with_items:
31 | - { src: 'httpd-root.conf', dest: '/etc/httpd/conf.d/' }
32 | - { src: '.htaccess', dest: '/var/www/html/' }
33 |
34 | - name: compile and copy index.php
35 | force: yes
36 | template:
37 | src: "index.php.j2"
38 | dest: "/var/www/html/index.php"
39 |
40 | - name: restart apache
41 | service:
42 | name: httpd
43 | state: started
44 |
45 | - name: SELinux
46 | command: "{{item}}"
47 | with_items:
48 | - "setsebool -P httpd_can_network_connect=1"
49 | - "setsebool -P httpd_can_network_connect_db=1"
50 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/application/templates/index.php.j2:
--------------------------------------------------------------------------------
1 | %s', $_SERVER['SERVER_ADDR']);
4 |
5 |
6 | define( 'DB_USER', "{{hostvars["database"]["mysql_user"] }}");
7 | define( 'DB_PASS', "{{hostvars["database"]["mysql_pass"] }}");
8 | define( 'DB_HOST', "{{hostvars["database"]['ansible_all_ipv4_addresses'][0] }}");
9 | define( 'DB_PORT', "3306");
10 | define( 'DB_NAME', "{{hostvars["database"]["mysql_name"] }}" );
11 |
12 |
13 | # var_dump([
14 | # 'DB_USER' => DB_USER,
15 | # 'DB_PASS' => DB_PASS,
16 | # 'DB_NAME' => DB_NAME,
17 | # 'DB_HOST' => DB_HOST,
18 | # 'DB_PORT' => DB_PORT,
19 | # ]);
20 |
21 | $mysqli = new mysqli( DB_HOST, DB_USER, DB_PASS, DB_NAME, DB_PORT );
22 | // var_dump($mysqli);
23 |
24 | $query = "INSERT INTO visits (ip) VALUES ('{$_SERVER[SERVER_ADDR]}') ON DUPLICATE KEY UPDATE visits = visits+1";
25 |
26 | //var_dump($query);
27 |
28 | $mysqli->query($query);
29 |
30 | $res = $mysqli->query("SELECT * FROM `visits`");
31 |
32 | while ($row = $res->fetch_assoc()) {
33 | var_dump($row);
34 | }
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/application/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for app
3 | vars:
4 | files:
5 | - { src: 'httpd-root.conf', dest: '/etc/httpd/conf.d/' }
6 | - { src: '.htaccess', dest: '/var/www/html/' }
7 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/balancer/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Tasks file for Load Balancer
3 |
4 | - name: installing nginx
5 | yum:
6 | name: nginx
7 | state: latest
8 | enablerepo: epel
9 |
10 | - name: stop nginx ( before config change )
11 | service:
12 | name: nginx
13 | state: stopped
14 | enabled: yes
15 |
16 | - name: gettings facts - application servers
17 | setup:
18 | delegate_to: "{{item}}"
19 | delegate_facts: True
20 | loop: "{{groups['application']}}"
21 |
22 | - name: create nginx configuration
23 | force: yes
24 | template: src=nginx.conf.j2 dest=/etc/nginx/nginx.conf
25 |
26 | - name: start nginx ( after config change )
27 | service:
28 | name: nginx
29 | state: started
30 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/balancer/templates/nginx.conf.j2:
--------------------------------------------------------------------------------
1 | # For more information on configuration, see:
2 | # * Official English Documentation: http://nginx.org/en/docs/
3 | # * Official Russian Documentation: http://nginx.org/ru/docs/
4 |
5 | user nginx;
6 | worker_processes auto;
7 | error_log /var/log/nginx/error.log;
8 | pid /run/nginx.pid;
9 |
10 | # Load dynamic modules. See /usr/share/nginx/README.dynamic.
11 | include /usr/share/nginx/modules/*.conf;
12 |
13 | events {
14 | worker_connections 1024;
15 | }
16 |
17 | http {
18 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
19 | '$status $body_bytes_sent "$http_referer" '
20 | '"$http_user_agent" "$http_x_forwarded_for"';
21 |
22 | access_log /var/log/nginx/access.log main;
23 |
24 | sendfile on;
25 | tcp_nopush on;
26 | tcp_nodelay on;
27 | keepalive_timeout 65;
28 | types_hash_max_size 2048;
29 |
30 | include /etc/nginx/mime.types;
31 | default_type application/octet-stream;
32 |
33 | include /etc/nginx/conf.d/*.conf;
34 |
35 | upstream application {
36 | {% for host in groups['application'] %}
37 | server {{ hostvars[ host ]['ansible_all_ipv4_addresses'][0] }}:80; # updated
38 | {% endfor %}
39 | }
40 |
41 | server {
42 | listen 80 default_server;
43 | listen [::]:80 default_server;
44 | server_name _;
45 |
46 | root html;
47 |
48 | # Load configuration files for the default server block.
49 | include /etc/nginx/default.d/*.conf;
50 |
51 | location / {
52 | proxy_buffers 16 4k;
53 | proxy_buffer_size 2k;
54 | proxy_pass http://application;
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: update yum
3 | command: "yum -y upgrade"
4 |
5 | - name: epel-release
6 | yum:
7 | name: epel-release
8 | state: latest
9 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/database/files/table.sql:
--------------------------------------------------------------------------------
1 | DROP TABLE IF EXISTS `visits`;
2 | CREATE TABLE IF NOT EXISTS `visits` (
3 | `ip` varchar(15) NOT NULL,
4 | `visits` int(11) NOT NULL DEFAULT '1',
5 | UNIQUE KEY `ip` (`ip`)
6 | ) ENGINE=MEMORY DEFAULT CHARSET=utf8;
7 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/database/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart mysqld
3 | service:
4 | name: mysqld
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/database/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for db
3 | - name: add mysql repo
4 | yum:
5 | name: http://dev.mysql.com/get/mysql57-community-release-el7-8.noarch.rpm
6 | state: present
7 |
8 | - name: install mysql
9 | yum: pkg={{ item }}
10 | with_items:
11 | - mysql-community-server
12 | - mysql-community-client
13 | - MySQL-python
14 |
15 | - name: enable and start mysql
16 | service:
17 | name: mysqld
18 | state: started
19 | enabled: yes
20 |
21 | - name: configuration
22 | force: no
23 | template: src=my.cnf.j2 dest=/root/.my.cnf
24 |
25 | - name: configuration
26 | shell: |
27 | TMP_PASS=$(cat /var/log/mysqld.log | \
28 | grep "temporary password" | \
29 | grep -Eoi ": (.*?)" | \
30 | sed "s/: //"
31 | );
32 | mysql -uroot -p$TMP_PASS --connect-expired-password -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '{{root_pass}}'; flush privileges; "
33 | sed -i 's/__PASS__/{{root_pass}}/' /root/.my.cnf
34 | register: hello
35 |
36 | - name: restart mysql
37 | service:
38 | name: mysqld
39 | state: restarted
40 |
41 | - name: create db for app
42 | mysql_db:
43 | name: "{{dbname}}"
44 | state: present
45 |
46 | - name: restore database (part 1 - copy sql dump)
47 | copy:
48 | src: "table.sql"
49 | dest: "/tmp/table.sql"
50 |
51 | - name: restore database (part 2 - import sql dump)
52 | mysql_db:
53 | name: "{{dbname}}"
54 | state: import
55 | target: "/tmp/table.sql"
56 |
57 | - name: create application user
58 | mysql_user:
59 | name: "{{user}}"
60 | password: "{{pass}}"
61 | priv: '*.*:ALL'
62 | host: "%"
63 | state: present
64 |
65 | - set_fact:
66 | mysql_user: "{{user}}"
67 | mysql_pass: "{{pass}}"
68 | mysql_name: "{{dbname}}"
69 |
70 | - name: restart mysql
71 | service:
72 | name: mysqld
73 | state: restarted
74 |
75 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/database/templates/my.cnf.j2:
--------------------------------------------------------------------------------
1 | [client]
2 | user={{root_user}}
3 | password=__PASS__
4 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/roles/database/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for db
3 | user: "application"
4 | pass: "TReshPandaRocket19375#"
5 | dbname: "application"
6 |
7 | root_user: "root"
8 | root_pass: "Yo_Soy_Groot123"
9 |
--------------------------------------------------------------------------------
/005-Ansible-3-Tier-App/vagrant-ansible-hosts.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | DIR=$(pwd | sed "s/\//\\\\\//g")
3 |
4 | echo "[all:vars]"
5 | echo "ansible_connection=ssh"
6 | echo "ansible_user=vagrant"
7 | echo "ansible_host=127.0.0.1"
8 | echo "ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
9 | echo ""
10 |
11 | vagrant ssh-config| \
12 | grep -iE "key|host|port" | \
13 | grep -ivE "strict|hosts" | \
14 | sed "s/^ //" | \
15 | sed "s/$DIR\///" | \
16 | awk '{
17 | row= ( NR/4 == int(NR/4)) ? NR/4 : int(NR/4)+1;
18 | if ( array[row] == "" ) {
19 | array[row] = $2
20 | } else {
21 | array[row]= array[row] " " $2
22 | }
23 | }
24 | END {
25 | for( i=1; i<= row; i++){
26 | print( array[i] )
27 | }
28 | }
29 | ' | \
30 | sed "s/\"//g" | \
31 | awk '{print $1, "ansible_port="$3, "ansible_private_key_file="$4}' | \
32 | awk '{
33 | split( $1, a, /-/)
34 | if ( groups[a[1]] == "" ){
35 | groups[a[1]] = a[1]
36 | printf("\n[%s]\n", a[1])
37 | }
38 | print $1, $2, $3
39 | }'
40 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/data.tf:
--------------------------------------------------------------------------------
1 | # getting aviability zones list
2 | data "aws_availability_zones" "available" {}
3 |
4 | # data "external" "ip" {
5 | # program = ["bash", "dig +short myip.opendns.com @resolver1.opendns.com"]
6 | # }
7 |
8 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/data/index.php:
--------------------------------------------------------------------------------
1 | VPC with public and private subnets, eip and LoadBalancer, NAT, Security groups etc...(common setup...)
5 |
6 | ```bash
7 | terraform plan --var-file=../terraform.tfvars
8 | terraform apply --var-file=../terraform.tfvars --auto-approve
9 | terraform destroy --var-file=../terraform.tfvars --auto-approve
10 | ```
11 |
12 | P.S.
13 | EC2 provisioned via bash using Terraform.
14 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-ec2.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "web" {
2 | count = "${var.subnet_count}"
3 | ami = "${lookup(var.AMIS, var.AWS_REGION)}"
4 | instance_type = "t2.micro"
5 |
6 | security_groups = ["${aws_security_group.sg-web.id}"]
7 | vpc_security_group_ids = ["${aws_security_group.sg-web.id}"]
8 | subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
9 |
10 | associate_public_ip_address = true
11 |
12 | key_name = "${var.PRIVATE_KEY_NAME}"
13 |
14 | connection {
15 | user = "ec2-user"
16 | private_key = "${file(var.PRIVATE_KEY_PATH)}"
17 | }
18 |
19 | provisioner "remote-exec" {
20 | inline = [
21 | "sudo yum install nginx -y",
22 | "sudo yum install php php-mysql php-fpm -y",
23 | ]
24 | }
25 |
26 | provisioner "file" {
27 | source = "data/nginx.conf"
28 | destination = "/home/ec2-user/nginx.conf"
29 | }
30 |
31 | provisioner "file" {
32 | source = "data/php-www.conf"
33 | destination = "/home/ec2-user/www.conf"
34 | }
35 |
36 | provisioner "file" {
37 | source = "data/index.php"
38 | destination = "/home/ec2-user/index.php"
39 | }
40 |
41 | provisioner "file" {
42 | source = "data/php.ini"
43 | destination = "/home/ec2-user/php.ini"
44 | }
45 |
46 | provisioner "remote-exec" {
47 | inline = [
48 | "sudo cp /home/ec2-user/php.ini /etc/php.ini",
49 | "sudo cp /home/ec2-user/nginx.conf /etc/nginx/nginx.conf",
50 | "sudo cp /home/ec2-user/www.conf /etc/php.d/www.conf",
51 | "sudo cp /home/ec2-user/index.php /usr/share/nginx/html/index.php",
52 | "sudo service nginx start",
53 | "sudo service php-fpm start",
54 | ]
55 | }
56 |
57 | tags {
58 | Name = "web-${count.index+1}"
59 | }
60 | }
61 |
62 | output "aws_instance_ips" {
63 | value = ["${aws_instance.web.*.public_ip}"]
64 | }
65 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-eip.tf:
--------------------------------------------------------------------------------
1 | resource "aws_eip" "nat" {
2 | vpc = true
3 | count = "${var.subnet_count}"
4 | }
5 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-elb.tf:
--------------------------------------------------------------------------------
1 | resource "aws_elb" "lb" {
2 | name = "lbdemo"
3 | subnets = ["${aws_subnet.public.*.id}"]
4 | security_groups = ["${aws_security_group.sg-lb.id}"]
5 | instances = ["${aws_instance.web.*.id}"]
6 |
7 | listener {
8 | instance_port = 80
9 | instance_protocol = "http"
10 | lb_port = 80
11 | lb_protocol = "http"
12 | }
13 |
14 | # listener {
15 | # instance_port = 443
16 | # instance_protocol = "https"
17 | # lb_port = 443
18 | # lb_protocol = "https"
19 | # }
20 |
21 | tags {
22 | Name = "lb"
23 | }
24 | }
25 |
26 | output "aws_elb_arn" {
27 | value = "${aws_elb.lb.arn}"
28 | }
29 |
30 | output "aws_elb_dns_name" {
31 | value = "${aws_elb.lb.dns_name}"
32 | }
33 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-internet-gateway.tf:
--------------------------------------------------------------------------------
1 | resource "aws_internet_gateway" "igw" {
2 | vpc_id = "${aws_vpc.vpc.id}"
3 |
4 | tags {
5 | Name = "igw"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-nat.tf:
--------------------------------------------------------------------------------
1 | resource "aws_nat_gateway" "nat" {
2 | count = "${var.subnet_count}"
3 | allocation_id = "${element(aws_eip.nat.*.id, count.index)}"
4 | subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
5 | depends_on = ["aws_internet_gateway.igw"]
6 | }
7 |
8 | output "nat_ids" {
9 | value = ["${aws_nat_gateway.nat.*.id}"]
10 | }
11 |
12 | output "nat_ips" {
13 | value = ["${aws_nat_gateway.nat.*.public_ip}"]
14 | }
15 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-network-acl.tf:
--------------------------------------------------------------------------------
1 | resource "aws_network_acl" "acl-frontend" {
2 | vpc_id = "${aws_vpc.vpc.id}"
3 |
4 | subnet_ids = ["${aws_subnet.public.*.id}"]
5 |
6 | # allow inbound 80/443
7 | ingress {
8 | rule_no = 10
9 | from_port = 22
10 | to_port = 22
11 | action = "allow"
12 | protocol = "tcp"
13 | cidr_block = "${var.cidr_blocks}"
14 | }
15 |
16 | ingress {
17 | rule_no = 20
18 | from_port = 80
19 | to_port = 80
20 | action = "allow"
21 | protocol = "tcp"
22 | cidr_block = "${var.cidr_blocks}"
23 | }
24 |
25 | ingress {
26 | rule_no = 30
27 | from_port = 443
28 | to_port = 443
29 | action = "allow"
30 | protocol = "tcp"
31 | cidr_block = "${var.cidr_blocks}"
32 | }
33 |
34 | ingress {
35 | rule_no = 40
36 | from_port = 1024
37 | to_port = 65535
38 | action = "allow"
39 | protocol = "tcp"
40 | cidr_block = "${var.cidr_blocks}"
41 | }
42 |
43 | #allow all outbound
44 | egress {
45 | rule_no = 10
46 | from_port = 0
47 | to_port = 0
48 | action = "allow"
49 | protocol = "-1"
50 | cidr_block = "${var.cidr_blocks}"
51 | }
52 |
53 | tags {
54 | Name = "acl-frontend"
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-route-table-association.tf:
--------------------------------------------------------------------------------
1 | resource "aws_route_table_association" "private" {
2 | count = "${var.subnet_count}"
3 | subnet_id = "${element(aws_subnet.private.*.id, count.index)}"
4 | route_table_id = "${element(aws_route_table.private.*.id, count.index)}"
5 | }
6 |
7 | resource "aws_route_table_association" "public" {
8 | count = "${var.subnet_count}"
9 | subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
10 | route_table_id = "${aws_route_table.public.id}"
11 | }
12 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-route-tables.tf:
--------------------------------------------------------------------------------
1 | resource "aws_route_table" "public" {
2 | vpc_id = "${aws_vpc.vpc.id}"
3 |
4 | route {
5 | cidr_block = "${var.cidr_blocks}"
6 | gateway_id = "${aws_internet_gateway.igw.id}"
7 | }
8 |
9 | tags {
10 | Name = "rtb-public"
11 | }
12 | }
13 |
14 | resource "aws_route_table" "private" {
15 | vpc_id = "${aws_vpc.vpc.id}"
16 |
17 | count = "${var.subnet_count}"
18 |
19 | route {
20 | cidr_block = "${var.cidr_blocks}"
21 | nat_gateway_id = "${element(aws_nat_gateway.nat.*.id, count.index)}"
22 | }
23 |
24 | tags {
25 | Name = "rtb-private-${count.index}"
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-security-group.tf:
--------------------------------------------------------------------------------
1 | resource "aws_security_group" "sg-web" {
2 | vpc_id = "${aws_vpc.vpc.id}"
3 |
4 | #Allow HTTP from anywhere
5 | ingress {
6 | from_port = 22
7 | to_port = 22
8 | protocol = "tcp"
9 | cidr_blocks = ["${var.cidr_blocks}"]
10 | }
11 |
12 | ingress {
13 | from_port = 80
14 | to_port = 80
15 | protocol = "tcp"
16 | cidr_blocks = ["${var.network_address_space}"]
17 | }
18 |
19 | ingress {
20 | from_port = 443
21 | to_port = 443
22 | protocol = "tcp"
23 | cidr_blocks = ["${var.cidr_blocks}"]
24 | }
25 |
26 | egress {
27 | from_port = 0
28 | to_port = 0
29 | protocol = "-1"
30 | cidr_blocks = ["${var.cidr_blocks}"]
31 | }
32 |
33 | tags {
34 | Name = "sg-web"
35 | }
36 | }
37 |
38 | resource "aws_security_group" "sg-lb" {
39 | vpc_id = "${aws_vpc.vpc.id}"
40 |
41 | # SSH access from anywhere
42 | ingress {
43 | from_port = 22
44 | to_port = 22
45 | protocol = "tcp"
46 | cidr_blocks = ["${var.cidr_blocks}"]
47 | }
48 |
49 | # HTTP access from the VPC
50 | ingress {
51 | from_port = 80
52 | to_port = 80
53 | protocol = "tcp"
54 | cidr_blocks = ["${var.cidr_blocks}"]
55 | }
56 |
57 | ingress {
58 | from_port = 443
59 | to_port = 443
60 | protocol = "tcp"
61 | cidr_blocks = ["${var.cidr_blocks}"]
62 | }
63 |
64 | # outbound internet access
65 | egress {
66 | from_port = 0
67 | to_port = 0
68 | protocol = "-1"
69 | cidr_blocks = ["${var.cidr_blocks}"]
70 | }
71 |
72 | tags {
73 | Name = "sg-lb"
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-subnets.tf:
--------------------------------------------------------------------------------
1 | # Public Subnets
2 |
3 | resource "aws_subnet" "public" {
4 | count = "${var.subnet_count}"
5 | cidr_block = "${cidrsubnet(var.network_address_space, 4, count.index + 1 )}"
6 | vpc_id = "${aws_vpc.vpc.id}"
7 | map_public_ip_on_launch = "true"
8 | availability_zone = "${data.aws_availability_zones.available.names[count.index % 3]}"
9 |
10 | tags {
11 | Name = "public-${count.index + 1}"
12 | }
13 | }
14 |
15 | # Private Subnets
16 |
17 | resource "aws_subnet" "private" {
18 | count = "${var.subnet_count}"
19 | cidr_block = "${cidrsubnet(var.network_address_space, 4, count.index + 1 + var.subnet_count)}"
20 | vpc_id = "${aws_vpc.vpc.id}"
21 | availability_zone = "${data.aws_availability_zones.available.names[count.index % 3]}"
22 |
23 | tags {
24 | Name = "private-${count.index + 1}"
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/resource-vpc.tf:
--------------------------------------------------------------------------------
1 | #
2 | resource "aws_vpc" "vpc" {
3 | cidr_block = "${var.network_address_space}"
4 |
5 | instance_tenancy = "default"
6 | enable_dns_support = "true"
7 | enable_dns_hostnames = "true"
8 |
9 | tags {
10 | Name = "vpc-demo"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/terraform.tfvars:
--------------------------------------------------------------------------------
1 | # AWS ACCESS KEY
2 | AWS_ACCESS_KEY = ""
3 |
4 | # AWS SECRET KEY
5 | AWS_SECRET_KEY = ""
6 |
7 | # Private SSH Key
8 | PRIVATE_KEY_PATH = ""
9 |
10 | # Private Key Name
11 | PRIVATE_KEY_NAME = ""
12 |
13 | # Default insanse type (free tier)
14 | INSTANCE_TYPE = "t2.micro"
15 |
--------------------------------------------------------------------------------
/006-Terraform-AWS/variables.tf:
--------------------------------------------------------------------------------
1 | # Access kes
2 | variable "AWS_ACCESS_KEY" {}
3 |
4 | variable "AWS_SECRET_KEY" {}
5 |
6 | variable "PRIVATE_KEY_NAME" {}
7 |
8 | variable "PRIVATE_KEY_PATH" {}
9 |
10 | # Region alias
11 | variable "AWS_REGION" {
12 | default = "eu-west-1"
13 | }
14 |
15 | # AMI - Amazon Linux AMI 2018.03.0
16 | variable "AMIS" {
17 | type = "map"
18 |
19 | default = {
20 | eu-central-1 = "ami-9a91b371"
21 | eu-west-1 = "ami-ca0135b3"
22 | eu-west-2 = "ami-a36f8dc4"
23 | eu-west-3 = "ami-969c2deb"
24 | }
25 | }
26 |
27 | # Network Addess Space
28 | variable "network_address_space" {
29 | default = "10.0.1.0/24"
30 | }
31 |
32 | variable "subnet_count" {
33 | default = 1
34 | }
35 |
36 | variable "cidr_blocks" {
37 | default = "0.0.0.0/0"
38 | }
39 |
--------------------------------------------------------------------------------
/007-K8S-Hello-World/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nginx:1.15.1-alpine
2 |
3 | RUN apk add --no-cache curl drill bash
4 |
5 | COPY index.html /usr/share/nginx/html/index.html
6 |
--------------------------------------------------------------------------------
/007-K8S-Hello-World/Jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline {
2 | agent {
3 | label "jx-base"
4 | }
5 | stages {
6 | stage('Pull repo') {
7 | steps {
8 | container('jenkinsxio-builder-base') {
9 | sh '''
10 | git clone $GITHUB_REPO_URL repo
11 | '''
12 | }
13 | }
14 | }
15 | stage('Create resources') {
16 | steps {
17 | container('jenkinsxio-builder-base') {
18 | dir('repo') {
19 | sh '''
20 | kubectl create -f k8s-services.yaml
21 | '''
22 | }
23 | }
24 | }
25 | }
26 | stage('Wait for pods to be ready') {
27 | steps {
28 | container('jenkinsxio-builder-base') {
29 | retry(60) {
30 | sh '''
31 | for i in 1 2; do
32 | test $(kubectl get pod nginx-${i} --output=jsonpath={.status.phase}) = 'Running' || exit 1
33 | done
34 | '''
35 | sleep 1
36 | }
37 | }
38 | }
39 | }
40 | stage('Check connections') {
41 | steps {
42 | container('jenkinsxio-builder-base') {
43 | sleep 3
44 | sh '''
45 | test $(kubectl get pod nginx-1 --output=jsonpath={.metadata.labels.app}) = "web-1"
46 | test $(kubectl get pod nginx-2 --output=jsonpath={.metadata.labels.app}) = "web-2"
47 | kubectl exec -ti nginx-1 -- curl --connect-timeout 2 -s nginx-1-web
48 | kubectl exec -ti nginx-1 -- curl --connect-timeout 2 -s nginx-2-web
49 | kubectl exec -ti nginx-1 -- curl --connect-timeout 2 -s nginx-1:8081
50 | kubectl exec -ti nginx-2 -- curl --connect-timeout 2 -s nginx-1-web
51 | kubectl exec -ti nginx-2 -- curl --connect-timeout 2 -s nginx-2-web
52 | kubectl exec -ti nginx-2 -- curl --connect-timeout 2 -s nginx-2:8082
53 | '''
54 | }
55 | }
56 | }
57 | }
58 | post {
59 | always {
60 | container('jenkinsxio-builder-base') {
61 | dir('repo') {
62 | sh '''
63 | kubectl delete -f k8s-services.yaml
64 | '''
65 | }
66 | }
67 | }
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/007-K8S-Hello-World/index.html:
--------------------------------------------------------------------------------
1 |
2 |
kubernetes lab
3 |
4 | It, works now. So what?
5 |
6 |
7 |
--------------------------------------------------------------------------------
/007-K8S-Hello-World/k8s-services.yaml:
--------------------------------------------------------------------------------
1 | kind: Service
2 | apiVersion: v1
3 | metadata:
4 | name: nginx-1-web
5 | spec:
6 | selector:
7 | app: web-1
8 | ports:
9 | - port: 80
10 | targetPort: nginx-1-port
11 | type: NodePort
12 |
13 | ---
14 |
15 | kind: Pod
16 | apiVersion: v1
17 | metadata:
18 | name: nginx-1
19 | labels:
20 | app: web-1
21 | spec:
22 | containers:
23 | - name: nginx-1
24 | image: docker.io/butuzov/nginx:latest
25 | ports:
26 | - containerPort: 8081
27 | name: nginx-1-port
28 | volumeMounts:
29 | - mountPath: /etc/nginx/conf.d/default.conf
30 | subPath: default.conf
31 | name: config
32 | volumes:
33 | - name: config
34 | configMap:
35 | name: nginx-1-conf
36 |
37 | ---
38 |
39 | kind: ConfigMap
40 | apiVersion: v1
41 | metadata:
42 | name: nginx-1-conf
43 | data:
44 | default.conf: |
45 | server {
46 | listen 8081;
47 | server_name _;
48 |
49 | location / {
50 | root /usr/share/nginx/html;
51 | index index.html index.htm;
52 | }
53 | }
54 |
55 | ---
56 |
57 | kind: Service
58 | apiVersion: v1
59 | metadata:
60 | name: nginx-2-web
61 | spec:
62 | selector:
63 | app: web-2
64 | ports:
65 | - port: 80
66 | targetPort: nginx-2-port
67 | type: NodePort
68 |
69 | ---
70 |
71 | kind: Pod
72 | apiVersion: v1
73 | metadata:
74 | name: nginx-2
75 | labels:
76 | app: web-2
77 | spec:
78 | containers:
79 | - name: nginx-2
80 | image: docker.io/butuzov/nginx:latest
81 | ports:
82 | - containerPort: 8082
83 | name: nginx-2-port
84 | volumeMounts:
85 | - mountPath: /etc/nginx/conf.d/default.conf
86 | subPath: default.conf
87 | name: config
88 | volumes:
89 | - name: config
90 | configMap:
91 | name: nginx-2-conf
92 | ---
93 |
94 | kind: ConfigMap
95 | apiVersion: v1
96 | metadata:
97 | name: nginx-2-conf
98 | data:
99 | default.conf: |
100 | server {
101 | listen 8082;
102 | server_name _;
103 |
104 | location / {
105 | root /usr/share/nginx/html;
106 | index index.html index.htm;
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/007-K8S-Hello-World/readme.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Lab 1
2 |
3 | ## [Original Code](https://github.com/andrewscat/kubelab)
4 |
5 | In this lab you will create 2 pods with nginx inside, as well as service for each pod.
6 | There also will be a config-map for each pod containing nginx config file, that will be mounted inside the pod.
7 |
8 | Pods must be called:
9 | - nginx-1
10 | - nginx-2
11 |
12 | Services must be called:
13 | - nginx-1-web
14 | - nginx-2-web
15 |
16 | Config-maps must be called:
17 | - nginx-1-config
18 | - nginx-2-config
19 |
20 | ## Requirements
21 |
22 | The following requirements must be satisfied:
23 | - only one mainifest file must be used, its name is `lab-1.yaml`
24 | - services must listen on port 80
25 | - nginx-1 must listen on port 8081
26 | - nginx-2 must listen on port 8082
27 | - nginx-1 must have a label `app: web-1`
28 | - nginx-2 must have a label `app: web-2`
29 | - pods must have installed `curl` inside
30 |
31 | ## Check
32 |
33 | You can check your configuration with the following commands:
34 |
35 | ```bash
36 | kubectl create -f 007-K8S-Hello-World/k8s-services.yaml
37 | # wait for resources to be created
38 | kubectl get pod nginx-1 --output=jsonpath={.metadata.labels.app}
39 | kubectl get pod nginx-2 --output=jsonpath={.metadata.labels.app}
40 | kubectl exec -ti nginx-1 -- curl --connect-timeout 2 -s nginx-1-web
41 | kubectl exec -ti nginx-1 -- curl --connect-timeout 2 -s nginx-2-web
42 | kubectl exec -ti nginx-1 -- curl --connect-timeout 2 -s nginx-1:8081
43 | kubectl exec -ti nginx-2 -- curl --connect-timeout 2 -s nginx-1-web
44 | kubectl exec -ti nginx-2 -- curl --connect-timeout 2 -s nginx-2-web
45 | kubectl exec -ti nginx-2 -- curl --connect-timeout 2 -s nginx-1:8082
46 | ```
47 |
48 | ### WORNING
49 |
50 | This code was moved into own directory. `Jenkinsfile` and `k8s-services.yaml` suppose to be in root directory of respository.
51 |
52 | ### Prerequisite
53 |
54 | Build nginx container with curl inside.
55 |
56 | ```bash
57 | docker build -t butuzov/nginx .
58 | docker push butuzov/nginx
59 | ```
60 |
--------------------------------------------------------------------------------
/008-K8S-MySQL/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mysql:5.7 AS build-env
2 |
3 | FROM debian:stretch-slim
4 |
5 | # linux-vdso.so.1 (0x00007ffea9f56000)
6 | # libpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0 (0x00007f65def60000)
7 | # librt.so.1 => /lib/x86_64-linux-gnu/librt.so.1 (0x00007f65ded58000)
8 | # libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f65de94c000)
9 | # libncurses.so.5 => /lib/x86_64-linux-gnu/libncurses.so.5 (0x00007f65de729000)
10 | # libtinfo.so.5 => /lib/x86_64-linux-gnu/libtinfo.so.5 (0x00007f65de4ff000)
11 | # libstdc++.so.6 => /usr/lib/x86_64-linux-gnu/libstdc++.so.6 (0x00007f65de17d000)
12 | # libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007f65dde79000)
13 | # libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 (0x00007f65ddc62000)
14 | # libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f65dd8c3000)
15 | # /lib64/ld-linux-x86-64.so.2 (0x00007f65df7ff000)
16 |
17 | RUN apt-get update && apt-get install -y git
18 | COPY --from=build-env /usr/lib/x86_64-linux-gnu/libatomic.so.1 usr/lib/x86_64-linux-gnu/
19 | COPY --from=build-env /lib/x86_64-linux-gnu/libncurses.so.5 /usr/lib/x86_64-linux-gnu/
20 | COPY --from=build-env /lib/x86_64-linux-gnu/libtinfo.so.5 /lib/x86_64-linux-gnu/
21 |
22 | COPY --from=build-env /usr/bin/mysql /usr/bin/
23 | WORKDIR /usr/bin/
24 |
25 |
26 | CMD ["sleep", "3600"]
27 |
--------------------------------------------------------------------------------
/008-K8S-MySQL/Jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline {
2 | agent {
3 | label "jx-base"
4 | }
5 | stages {
6 | stage('Pull repo') {
7 | steps {
8 | container('jenkinsxio-builder-base') {
9 | sh '''
10 | git clone $GITHUB_REPO_URL repo
11 | '''
12 | }
13 | }
14 | }
15 | stage('Create resources') {
16 | steps {
17 | container('jenkinsxio-builder-base') {
18 | dir('repo') {
19 | sh '''
20 | kubectl create -f k8s-services.yaml
21 | '''
22 | }
23 | }
24 | }
25 | }
26 | stage('Wait for pods to be ready') {
27 | steps {
28 | container('jenkinsxio-builder-base') {
29 | retry(60) {
30 | sh '''
31 | for i in client server; do
32 | test $(kubectl get pod mysql-${i} --output=jsonpath={.status.phase}) = 'Running' || exit 1
33 | done
34 | '''
35 | sleep 1
36 | }
37 | }
38 | }
39 | }
40 | stage('Check connections') {
41 | steps {
42 | container('jenkinsxio-builder-base') {
43 | sleep 30
44 | sh '''
45 | test $(kubectl get pods --no-headers mysql-client | wc -l) -eq 1
46 | test $(kubectl get pods --no-headers mysql-server | wc -l) -eq 1
47 |
48 | test $(kubectl get pod -l role=server --output=jsonpath={.items..spec.containers[].resources.requests.cpu}) = "100m"
49 | test $(kubectl get pod -l role=server --output=jsonpath={.items..spec.containers[].resources.requests.memory}) = "256Mi"
50 | test $(kubectl get pod -l role=client --output=jsonpath={.items..spec.containers[].resources.requests.cpu}) = "50m"
51 | test $(kubectl get pod -l role=client --output=jsonpath={.items..spec.containers[].resources.requests.memory}) = "64Mi"
52 |
53 | kubectl exec -ti mysql-client -- git clone https://github.com/butuzov/DevOps-Journey.git kubelab
54 | kubectl exec -ti mysql-client -- sh -c 'echo -n $(date +%s) > value.txt'
55 | kubectl exec -ti mysql-client -- sh -c 'value=$(cat value.txt); sed -i "s/RANDOM_VALUE/${value}/" kubelab/008-K8S-MySQL/script.sql'
56 | kubectl exec -ti mysql-client -- sh -c 'mysql -u root -p$PASSWORD -h mysql < kubelab/008-K8S-MySQL/script.sql'
57 | kubectl exec -ti mysql-client -- sh -c 'value=$(cat value.txt); mysql -u root -p$PASSWORD -h mysql LAB -sN -r -e "SELECT kubeval FROM Kubelab" | grep $value'
58 | '''
59 | }
60 | }
61 | }
62 | }
63 | post {
64 | always {
65 | container('jenkinsxio-builder-base') {
66 | dir('repo') {
67 | sh '''
68 | kubectl delete -f k8s-services.yaml
69 | '''
70 | }
71 | }
72 | }
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/008-K8S-MySQL/k8s-services.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | kind: Service
4 | apiVersion: v1
5 | metadata:
6 | name: mysql
7 | spec:
8 | clusterIP: None
9 | selector:
10 | apca: myapp
11 | role: server
12 | ports:
13 | - port: 3306
14 | name: mysql-port
15 |
16 | ---
17 |
18 | # Actual MySQL Server
19 |
20 | kind: Pod
21 | apiVersion: v1
22 | metadata:
23 | name: mysql-server
24 | labels:
25 | apca: myapp
26 | role: server
27 | spec:
28 | terminationGracePeriodSeconds: 10
29 | containers:
30 | - name: mysql-server
31 | image: docker.io/mysql:5.7
32 |
33 | # Ports
34 | ports:
35 | - containerPort: 3306
36 |
37 | # Secret
38 | env:
39 | - name: MYSQL_ROOT_PASSWORD
40 | valueFrom:
41 | secretKeyRef:
42 | name: mysqlsecret
43 | key: password
44 |
45 | # Resources we requsting from host/node
46 | resources:
47 | requests:
48 | cpu: 0.1
49 | memory: 256Mi
50 |
51 | ---
52 |
53 | kind: Pod
54 | apiVersion: v1
55 | metadata:
56 | name: mysql-client
57 | labels:
58 | name: mysql-client
59 | role: client
60 | spec:
61 |
62 | containers:
63 | - name: mysql-client
64 | image: docker.io/butuzov/mysql
65 | command: ["sleep"]
66 | args: ["3600"]
67 |
68 | # requested resources
69 | resources:
70 | requests:
71 | cpu: 0.05
72 | memory: 64Mi
73 |
74 | # Secret
75 | env:
76 | - name: PASSWORD
77 | valueFrom:
78 | secretKeyRef:
79 | name: mysqlsecret
80 | key: password
81 |
82 | restartPolicy: OnFailure
83 |
84 | ---
85 |
86 | # https://kubernetes.io/docs/concepts/configuration/secret/
87 |
88 | kind: Secret
89 | apiVersion: v1
90 | metadata:
91 | name: mysqlsecret
92 | type: Opaque
93 | data:
94 | # echo -n 'PaSsWoRd' | base64
95 | password: UGFTc1dvUmQ=
96 |
--------------------------------------------------------------------------------
/008-K8S-MySQL/readme.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Lab 2
2 |
3 | ## [Original Code](https://github.com/andrewscat/kubelab)
4 |
5 | In this lab you will create:
6 | - pod `mysql-client`, that includes mysql client package and git package
7 | - pod `mysql-server` with mysql server
8 | - shared secret for mysql server `mysql-password`
9 | - service `mysql`
10 |
11 | Consider also using ClusterIP service for mysql-server.
12 |
13 | ## Scenario
14 |
15 | 1. User clones git repository with mysql script to `mysql-client` pod
16 | 2. User runs script from the cloned repo
17 | 3. User runs `select` query to make sure that the script has executed correctly
18 |
19 | ## Requirements
20 |
21 | - The manifest file must be called `lab-2.yaml`
22 | - `mysql-client` pod must have environment variable `$PASSWORD` that stores a root password for the database
23 | - mysql-server must request 256 MB of memory and 100M of CPU
24 | - mysql-client must request 64 MB of memory and 50M of CPU
25 | - mysql-server must have label `role: server`
26 | - mysql-client must have label `role: client`
27 | - mysql-client must have git and mysql-client packages installed
28 |
29 | ## Check
30 |
31 | You can check your configuration with the following commands:
32 |
33 | ```bash
34 | set -e
35 | kubectl create -f 008-K8S-MySQL/k8s-services.yaml
36 | test $(kubectl get pods --no-headers mysql-client | wc -l) -eq 1
37 | test $(kubectl get pods --no-headers mysql-server | wc -l) -eq 1
38 |
39 | kubectl get pod -l role=server --output=jsonpath={.items..spec.containers[].resources.requests.cpu}
40 | kubectl get pod -l role=server --output=jsonpath={.items..spec.containers[].resources.requests.memory}
41 | kubectl get pod -l role=client --output=jsonpath={.items..spec.containers[].resources.requests.cpu}
42 | kubectl get pod -l role=client --output=jsonpath={.items..spec.containers[].resources.requests.memory}
43 |
44 | kubectl exec -ti mysql-client -- git clone https://github.com/andrewscat/kubelab.git kubelab
45 | kubectl exec -ti mysql-client -- sh -c 'echo -n $(date +%s) > value.txt'
46 | kubectl exec -ti mysql-client -- sh -c 'value=$(cat value.txt); sed -i "s/RANDOM_VALUE/${value}/" kubelab/lab-2/script.sql'
47 | kubectl exec -ti mysql-client -- sh -c 'mysql -u root -p$PASSWORD -h mysql < kubelab/lab-2/script.sql'
48 | kubectl exec -ti mysql-client -- sh -c 'value=$(cat value.txt); mysql -u root -p$PASSWORD -h mysql LAB -sN -r -e "SELECT kubeval FROM Kubelab" | grep $value'
49 | ```
50 |
51 | ### WORNING
52 |
53 | This code was moved into own directory. `Jenkinsfile` and `k8s-services.yaml` suppose to be in root directory of respository.
54 |
55 |
56 | ### Prerequisite
57 |
58 | Build multistage mysql client docker image for mysql-client pod.
59 |
60 | ```bash
61 | docker build -t butuzov/mysql .
62 | docker push butuzov/mysql
63 | ```
64 |
--------------------------------------------------------------------------------
/008-K8S-MySQL/script.sql:
--------------------------------------------------------------------------------
1 | CREATE DATABASE LAB;
2 | USE LAB;
3 | CREATE TABLE Kubelab (kubekey VARCHAR(255), kubeval VARCHAR(255));
4 | INSERT INTO Kubelab (kubekey, kubeval) VALUES ("kubernetes", "RANDOM_VALUE");
5 |
--------------------------------------------------------------------------------
/009-K8S-Accessing-Pods-In-Cluster/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:3.7
2 | RUN apk add --no-cache curl bash
3 | ENTRYPOINT ["/usr/bin/curl"]
4 |
--------------------------------------------------------------------------------
/009-K8S-Accessing-Pods-In-Cluster/Jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline {
2 | agent {
3 | label "jx-base"
4 | }
5 |
6 |
7 | stages {
8 | stage('Pull repo') {
9 | steps {
10 | container('jenkinsxio-builder-base') {
11 | sh '''
12 | git clone $GITHUB_REPO_URL repo
13 | '''
14 | }
15 | }
16 | }
17 | stage('Create resources') {
18 | steps {
19 | container('jenkinsxio-builder-base') {
20 | dir('repo') {
21 | sh '''
22 | export RANDOM_URL="https://bank.gov.ua/NBUStatService/v1/statdirectory/exchange?date=$(date -d @$(perl -e 'print 1267619929 + int(rand(1530292601 - 1267619929))') +%Y%m%d)"
23 | curl ${RANDOM_URL} > /tmp/url.html
24 | sed -i -e "s|MAIN_PAGE|${RANDOM_URL}|" 009-K8S-Accessing-Pods-In-Cluster/k8s-services.yaml
25 | kubectl create -f 009-K8S-Accessing-Pods-In-Cluster/k8s-services.yaml
26 | '''
27 | }
28 | }
29 | }
30 | }
31 | stage('Wait for pods to be ready') {
32 | steps {
33 | container('jenkinsxio-builder-base') {
34 | retry(60) {
35 | sh '''
36 | kubectl get pods -l application=www --output=jsonpath={.items..status.phase} | grep 'Running'
37 | '''
38 | sleep 1
39 | }
40 | }
41 | }
42 | }
43 | stage('Check web page') {
44 | steps {
45 | container('jenkinsxio-builder-base') {
46 | sleep 5
47 | sh '''
48 | test $(kubectl get pods -l application=www --no-headers | wc -l) -eq 1
49 | kubectl exec -ti $(kubectl get pods -l application=www --output=jsonpath={.items..metadata.name}) curl web > main.html
50 | diff /tmp/url.html main.html
51 | '''
52 | }
53 | }
54 | }
55 | stage('Check web page after restart') {
56 | steps {
57 | container('jenkinsxio-builder-base') {
58 | sh '''
59 | sleep 30
60 | kubectl delete pods -l application=www
61 |
62 | COUNTER=0
63 | while [ $COUNTER -lt 30 ]; do
64 | if kubectl get pods -l application=www --output=jsonpath={.items..status.phase} | grep 'Running'; then
65 | break
66 | else
67 | sleep 1
68 | let COUNTER=COUNTER+1
69 | fi
70 | done
71 |
72 | kubectl exec -ti $(kubectl get pods -l application=www --output=jsonpath={.items..metadata.name}) curl web > main.html
73 | diff /tmp/url.html main.html
74 | '''
75 | }
76 | }
77 | }
78 | }
79 | post {
80 | always {
81 | container('jenkinsxio-builder-base') {
82 | dir('repo') {
83 | sh '''
84 | kubectl delete -f 009-K8S-Accessing-Pods-In-Cluster/k8s-services.yaml
85 | '''
86 | }
87 | }
88 | }
89 | }
90 | }
91 |
--------------------------------------------------------------------------------
/009-K8S-Accessing-Pods-In-Cluster/k8s-services.yaml:
--------------------------------------------------------------------------------
1 |
2 | kind: Service
3 | apiVersion: v1
4 | metadata:
5 | name: web
6 | spec:
7 | selector:
8 | application : www
9 | ports:
10 | - port: 80
11 | targetPort: 80
12 | type: NodePort
13 |
14 | ---
15 |
16 | kind: ReplicationController
17 | apiVersion: v1
18 | metadata:
19 | name: nginx
20 | spec:
21 | replicas: 1
22 | selector:
23 | application : www
24 | template:
25 | metadata:
26 | labels:
27 | application : www
28 | spec:
29 |
30 | initContainers:
31 | - name: curly-curly
32 | image: docker.io/butuzov/curl:latest
33 | command: ['sh', '-c', 'curl MAIN_PAGE >> /usr/share/nginx/html/index.html']
34 |
35 | volumeMounts:
36 | - name: shared
37 | mountPath: /usr/share/nginx/html
38 |
39 | containers:
40 | - name: nginx
41 | image: docker.io/butuzov/nginx:latest
42 | ports:
43 | - containerPort: 80
44 | volumeMounts:
45 | - name: shared
46 | mountPath: /usr/share/nginx/html
47 |
48 | volumes:
49 | - name: shared
50 | emptyDir: {}
51 |
--------------------------------------------------------------------------------
/009-K8S-Accessing-Pods-In-Cluster/readme.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Lab 3
2 |
3 | ## [Original Code](https://github.com/andrewscat/kubelab)
4 |
5 | In this lab you will create:
6 | - deployment running nginx
7 | - service for nginx called `web`
8 |
9 | You can also use PersistentVolumeClaim since default StorageClass is already created for you. Consider also using init containers.
10 |
11 | ## Scenario
12 |
13 | 1. User replaces special value in `lab-3.yaml` with an url
14 | 2. User creates the kubernetes objects
15 | 3. User can see content of previously set url as a main page in nginx
16 |
17 | ## Requirements
18 |
19 | - the manifest file should be called `lab-3.yaml`
20 | - `lab-3.yaml` must contain special text `MAIN_PAGE`. This text will be replaced by sed with a random url (e.g. `http://www.linux.org.ru`)
21 | - nginx must have as an index page the content of previously set url (even after the pod is restarted)
22 | - nginx must have label `application: www`
23 | - nginx container should contain `curl`
24 | - only one nginx pod is allowed
25 |
26 | ## Check
27 |
28 | ```bash
29 | set -e
30 | RANDOM_URL="http://www.linux.org.ru"
31 | curl ${RANDOM_URL} > url.html
32 | sed -i -e "s|MAIN_PAGE|${RANDOM_URL}|" lab-3.yaml
33 | kubectl create -f lab-3.yaml
34 | # wait for resources to be created
35 |
36 | test $(kubectl get pods -l application=www --no-headers | wc -l) -eq 1
37 | kubectl exec -ti $(kubectl get pods -l application=www --output=jsonpath={.items..metadata.name}) curl web > main.html
38 | diff url.html main.html
39 |
40 | kubectl delete pods -l application=www
41 | # wait for pods to be restarted
42 |
43 | test $(kubectl get pods -l application=www --no-headers | wc -l) -eq 1
44 | kubectl exec -ti $(kubectl get pods -l application=www --output=jsonpath={.items..metadata.name}) curl web > main.html
45 | diff url.html main.html
46 | ```
47 |
48 | ### WORNING
49 |
50 | This code was moved into own directory. `Jenkinsfile` and `k8s-services.yaml` suppose to be in root directory of respository.
51 |
52 | ### Prerequisite
53 |
54 | Build alpine's curl image
55 |
56 | ```bash
57 | docker build -t butuzov/curl .
58 | docker push butuzov/curl
59 | ```
60 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | !.mvn/wrapper/maven-wrapper.jar
3 |
4 | ### STS ###
5 | .apt_generated
6 | .classpath
7 | .factorypath
8 | .project
9 | .settings
10 | .springBeans
11 |
12 | ### IntelliJ IDEA ###
13 | .idea
14 | *.iws
15 | *.iml
16 | *.ipr
17 |
18 | ### NetBeans ###
19 | nbproject/private/
20 | build/
21 | nbbuild/
22 | dist/
23 | nbdist/
24 | .nb-gradle/
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/openjdk:alpine
2 |
3 | EXPOSE 8080
4 |
5 | COPY target/JAR_APP /app.jar
6 |
7 | CMD [ "java", "-jar", "/app.jar" ]
8 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 | 4.0.0
5 |
6 | com.example
7 | demo
8 | 0.0.1-SNAPSHOT
9 | jar
10 |
11 | demo
12 | Demo project for Spring Boot
13 |
14 |
15 | org.springframework.boot
16 | spring-boot-starter-parent
17 | 1.5.7.RELEASE
18 |
19 |
20 |
21 |
22 | UTF-8
23 | UTF-8
24 | 1.8
25 |
26 |
27 |
28 |
29 | org.springframework.boot
30 | spring-boot-starter-web
31 |
32 |
33 |
34 | org.springframework.boot
35 | spring-boot-starter-test
36 | test
37 |
38 |
39 |
40 |
41 |
42 |
43 | org.springframework.boot
44 | spring-boot-maven-plugin
45 |
46 |
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/readme.md:
--------------------------------------------------------------------------------
1 | # Build Jenkins Pipeline for Java App
2 |
3 | Next java app should be
4 |
5 | - Compiled / Tested / Packaged
6 | - Deployed to artifactory
7 | - Build docker image with jar file and put it to docker registry (self hosted)
8 | - Deploy it to production/staging using Ansible.
9 |
10 | ### Pipeline stages
11 | - setup
12 | - checkout
13 | - maven compile/test/build
14 | - artifactory to store app
15 | - build docker image
16 | - ansible deployment
17 |
18 | ### Deployment Specifications
19 |
20 | For deployments was used personal domain made.ua and let's encrypt certificates (expired by now).
21 |
22 | * [Deployment Environment - Terraform](010-Jenkins-Teraform-Ansible/terraform)
23 | * [Deployment Environment - Vagrant](010-Jenkins-Teraform-Ansible/vagrant)
24 |
25 | ### Worning
26 | This laboratory was moved form separate repository to own folder.
27 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/ec2-artifactory.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # installing java 1.8.0
4 | sudo yum install java-1.8.0-openjdk-headless.x86_64 -y
5 | sudo yum install java-1.8.0-openjdk-devel.x86_64 -y
6 |
7 |
8 | sudo /usr/sbin/alternatives --set java /usr/lib/jvm/jre-1.8.0-openjdk.x86_64/bin/java
9 | sudo /usr/sbin/alternatives --set javac /usr/lib/jvm/jre-1.8.0-openjdk.x86_64/bin/javac
10 |
11 | sudo /usr/sbin/alternatives --refresh javac
12 | sudo /usr/sbin/alternatives --refresh java
13 |
14 | # artifactory
15 | curl -L -o jfrog-artifactory.rpm 'https://api.bintray.com/content/jfrog/artifactory-rpms/jfrog-artifactory-oss-$latest.rpm;bt_package=jfrog-artifactory-oss-rpm'
16 | sudo rpm -i jfrog-artifactory.rpm
17 |
18 | sudo chkconfig artifactory on
19 | sudo service artifactory start
20 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/ec2-artifactory.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "artifactory" {
2 | ami = "${lookup(var.AMIS, var.AWS_REGION)}"
3 | instance_type = "t2.micro"
4 |
5 | associate_public_ip_address = true
6 | availability_zone = "${data.aws_availability_zones.available.names[0]}"
7 | security_groups = ["${aws_security_group.lab-ci-sg.name}"]
8 |
9 | key_name = "${aws_key_pair.default.key_name}"
10 |
11 | connection {
12 | user = "ec2-user"
13 | private_key = "${file(var.PRIVATE_KEY_PATH)}"
14 | }
15 |
16 | provisioner "file" {
17 | source = "ec2-artifactory.sh"
18 | destination = "~/artifactory.sh"
19 | }
20 |
21 | provisioner "remote-exec" {
22 | inline = [
23 | "chmod +x ~/artifactory.sh",
24 | "./artifactory.sh",
25 | ]
26 | }
27 |
28 | provisioner "file" {
29 | source = "www-data"
30 | destination = "~"
31 | }
32 |
33 | provisioner "file" {
34 | source = "shared/nginx/nginx.conf"
35 | destination = "nginx.conf"
36 | }
37 |
38 | provisioner "remote-exec" {
39 | inline = [
40 | "sudo yum install nginx -y",
41 | "sed -i -e s/__PORT__/8081/ nginx.conf",
42 | "sed -i -e s/__DOMAIN__/artifactory-/ nginx.conf",
43 | "sudo mv ~/nginx.conf /etc/nginx/nginx.conf",
44 | "sudo service nginx start",
45 | "sudo chkconfig nginx on",
46 | ]
47 | }
48 | }
49 |
50 | output "art.made.ua" {
51 | value = "${aws_instance.artifactory.public_ip}"
52 | }
53 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/ec2-ci.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | #########################################################
4 | # Installas Jenkins, Maven, Docker and Ansible
5 | ########################################################
6 | sudo wget http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo -O /etc/yum.repos.d/epel-apache-maven.repo
7 | sudo sed -i s/\$releasever/7/g /etc/yum.repos.d/epel-apache-maven.repo
8 | sudo yum install -y apache-maven
9 |
10 | # New "1.8.0" Java
11 | sudo yum install java-1.8.0-openjdk-headless.x86_64 -y
12 | sudo yum install java-1.8.0-openjdk-devel.x86_64 -y
13 |
14 | # updating java
15 | sudo /usr/sbin/alternatives --set java /usr/lib/jvm/jre-1.8.0-openjdk.x86_64/bin/java
16 | sudo /usr/sbin/alternatives --set javac /usr/lib/jvm/jre-1.8.0-openjdk.x86_64/bin/javac
17 |
18 | sudo /usr/sbin/alternatives --refresh javac
19 | sudo /usr/sbin/alternatives --refresh java
20 |
21 | # jenkins
22 | sudo wget -O /etc/yum.repos.d/jenkins.repo http://pkg.jenkins-ci.org/redhat/jenkins.repo
23 | sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key
24 | sudo yum install jenkins -y
25 |
26 | # git
27 | sudo yum install git -y
28 |
29 | # installing docker
30 | sudo yum install docker -y
31 | sudo usermod -aG docker ec2-user
32 | sudo usermod -aG docker jenkins
33 |
34 | # Docker start
35 | sudo service docker start
36 | sudo chkconfig docker on
37 |
38 | # Ansible
39 | sudo python -m pip install --upgrade pip
40 | sudo python -m pip install ansible
41 |
42 | # Jenkins user to ansible role and private key
43 | sudo chown jenkins:jenkins /home/ec2-user/ansible/private_key
44 | sudo chmod jenkins:jenkins /home/ec2-user/ansible/private_key
45 | sudo chown jenkins:jenkins /home/ec2-user/ansible
46 | sudo mv /home/ec2-user/ansible /ansible
47 | chmod 0400 /ansible/private_key
48 |
49 | # jenkins plugins
50 | sudo service jenkins start
51 | sleep 60
52 | curl http://127.0.0.1:8080
53 | sleep 20
54 | sudo service jenkins stop
55 |
56 | . ~/install-jenkins-plugins.sh
57 | sudo service jenkins start
58 | sudo chkconfig jenkins on
59 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/ec2-ci.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "ci" {
2 | ami = "${lookup(var.AMIS, var.AWS_REGION)}"
3 | instance_type = "t2.small"
4 |
5 | associate_public_ip_address = true
6 | availability_zone = "${data.aws_availability_zones.available.names[0]}"
7 | security_groups = ["${aws_security_group.lab-ci-sg.name}"]
8 |
9 | key_name = "${aws_key_pair.default.key_name}"
10 |
11 | connection {
12 | user = "ec2-user"
13 | private_key = "${file(var.PRIVATE_KEY_PATH)}"
14 | }
15 |
16 | // ansible
17 | provisioner "file" {
18 | source = "shared/ansible"
19 | destination = "~"
20 | }
21 |
22 | provisioner "remote-exec" {
23 | inline = [
24 | "sed -i -e s/_host_/${aws_instance.host.public_ip}/ ~/ansible/inventory",
25 | ]
26 | }
27 |
28 | provisioner "file" {
29 | source = "${var.PRIVATE_KEY_PATH}"
30 | destination = "~/ansible/private_key"
31 | }
32 |
33 | // jenkins
34 | provisioner "file" {
35 | source = "shared/jenkins/install.sh"
36 | destination = "install-jenkins-plugins.sh"
37 | }
38 |
39 | provisioner "file" {
40 | source = "shared/jenkins/plugins.txt"
41 | destination = "plugins.txt"
42 | }
43 |
44 | provisioner "file" {
45 | source = "ec2-ci.sh"
46 | destination = "jenkins.sh"
47 | }
48 |
49 | provisioner "remote-exec" {
50 | inline = [
51 | "chmod +x ~/jenkins.sh",
52 | "./jenkins.sh",
53 | ]
54 | }
55 |
56 | // ~* Nginx *~
57 |
58 | provisioner "file" {
59 | source = "www-data"
60 | destination = "~"
61 | }
62 | provisioner "file" {
63 | source = "shared/nginx/nginx.conf"
64 | destination = "nginx.conf"
65 | }
66 | provisioner "remote-exec" {
67 | inline = [
68 | "sudo yum install nginx -y",
69 | "sed -i -e s/__PORT__/8080/ nginx.conf",
70 | "sed -i -e s/__DOMAIN__/ci-/ nginx.conf",
71 | "sudo mv ~/nginx.conf /etc/nginx/nginx.conf",
72 | "sudo service nginx start",
73 | "sudo chkconfig nginx on",
74 | ]
75 | }
76 | depends_on = ["aws_instance.host"]
77 | }
78 |
79 | output "ci.made.ua" {
80 | value = "${aws_instance.ci.public_ip}"
81 | }
82 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/ec2-host.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "host" {
2 | ami = "${lookup(var.AMIS, var.AWS_REGION)}"
3 | instance_type = "t2.micro"
4 | associate_public_ip_address = true
5 | availability_zone = "${data.aws_availability_zones.available.names[0]}"
6 | security_groups = ["${aws_security_group.lab-ci-sg.name}"]
7 | key_name = "${aws_key_pair.default.key_name}"
8 |
9 | connection {
10 | user = "ec2-user"
11 | private_key = "${file(var.PRIVATE_KEY_PATH)}"
12 | }
13 | }
14 |
15 | output "host.made.ua" {
16 | value = "${aws_instance.host.public_ip}"
17 | }
18 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/ec2-registry.sh:
--------------------------------------------------------------------------------
1 | sudo yum install docker -y
2 | sudo usermod -aG docker ec2-user
3 |
4 | sudo service docker start
5 | sudo chkconfig docker on
6 |
7 | sudo curl -L https://github.com/docker/compose/releases/download/1.21.1/docker-compose-`uname -s`-`uname -m` | sudo tee /usr/bin/docker-compose > /dev/null
8 |
9 | sudo chmod +x /usr/bin/docker-compose
10 | docker pull registry:2.6.2
11 |
12 | cd ~/registry && sudo docker-compose up -d
13 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/ec2-registry.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "registry" {
2 | ami = "${lookup(var.AMIS, var.AWS_REGION)}"
3 | instance_type = "t2.micro"
4 | associate_public_ip_address = true
5 | availability_zone = "${data.aws_availability_zones.available.names[0]}"
6 | security_groups = ["${aws_security_group.lab-ci-sg.name}"]
7 | key_name = "${aws_key_pair.default.key_name}"
8 |
9 | connection {
10 | user = "ec2-user"
11 | private_key = "${file(var.PRIVATE_KEY_PATH)}"
12 | }
13 |
14 | provisioner "file" {
15 | source = "shared/registry"
16 | destination = "~"
17 | }
18 |
19 | provisioner "file" {
20 | source = "ec2-registry.sh"
21 | destination = "~/registry.sh"
22 | }
23 |
24 | provisioner "remote-exec" {
25 | inline = [
26 | "chmod +x ~/registry.sh",
27 | "./registry.sh",
28 | ]
29 | }
30 | }
31 |
32 | output "registry.made.ua" {
33 | value = "${aws_instance.registry.public_ip}"
34 | }
35 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/main.tf:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------
2 | # AWS Provider for out credentials in Europe
3 | # ------------------------------------------------------------------------------
4 |
5 | provider "aws" {
6 | access_key = "${var.AWS_ACCESS_KEY}"
7 | secret_key = "${var.AWS_SECRET_KEY}"
8 | region = "${var.AWS_REGION}"
9 | }
10 |
11 | # ------------------------------------------------------------------------------
12 | # Anth && Verification
13 | # ------------------------------------------------------------------------------
14 |
15 | resource "aws_key_pair" "default" {
16 | key_name = "lab_key_pair"
17 | public_key = "${file(var.PUBLIC_KEY_PATH)}"
18 | }
19 |
20 | resource "aws_security_group" "lab-ci-sg" {
21 | name = "lab-ci-sg"
22 |
23 | ingress {
24 | from_port = 22
25 | to_port = 22
26 | protocol = "tcp"
27 | cidr_blocks = ["${var.cidr_blocks}"]
28 | }
29 |
30 | ingress {
31 | from_port = 80
32 | to_port = 80
33 | protocol = "tcp"
34 | cidr_blocks = ["${var.cidr_blocks}"]
35 | }
36 |
37 | ingress {
38 | from_port = 443
39 | to_port = 443
40 | protocol = "tcp"
41 | cidr_blocks = ["${var.cidr_blocks}"]
42 | }
43 |
44 | ingress {
45 | from_port = 5000
46 | to_port = 5000
47 | protocol = "tcp"
48 | cidr_blocks = ["${var.cidr_blocks}"]
49 | }
50 |
51 | ingress {
52 | from_port = 8079
53 | to_port = 8099
54 | protocol = "tcp"
55 | cidr_blocks = ["${var.cidr_blocks}"]
56 | }
57 |
58 | egress {
59 | from_port = 0
60 | to_port = 0
61 | protocol = "-1"
62 | cidr_blocks = ["${var.cidr_blocks}"]
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/readme.md:
--------------------------------------------------------------------------------
1 | # Terraform with AWS
2 |
3 | Exporting Access and Secret Keys for terraform
4 |
5 | ```bash
6 | export TF_VAR_AWS_ACCESS_KEY="APIAIPNFUCKUALOAWUDAQ"
7 | export TF_VAR_AWS_SECRET_KEY="9b+HwaV51t_d0sNt_VvORK_0_l0l_uULZ4s3teS9U9"
8 | ```
9 |
10 | Running Terraform.
11 |
12 | ```bash
13 | terraform init
14 | terraform plan
15 | terraform apply --auto-approve
16 |
17 | # Delete setup when ready
18 | terraform destroy --auto-approve
19 | ```
20 | ## Technical
21 |
22 | 4 nodes - services ( https://ci.made.ua , https://artifactory.made.ua/artifactory and https://registry.made.ua:5000/v2 ) and slave (http://host.made.ua or 18.194.137.154)
23 |
24 | ## Deployments
25 | (this ips now expired, and was mapped to domain only during getting grades for work)
26 |
27 | * QA [18.194.137.154:8092](http://18.194.137.154:8092)
28 | * Dev [18.194.137.154:8091](http://18.194.137.154:8091)
29 | * Prod [18.194.137.154:8090](http://18.194.137.154:8090)
30 |
31 | ##
32 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/.ssh/ssh-private-key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEpAIBAAKCAQEAsPRBFeCc5Iv0B9+i6MrmoUgHHlx2TIWmdKf7pTKLVCqHpGuN
3 | DZz1cOtDpUX86kJ08Zscfhh00Ohl44Q8xoV/pANAy0eBivPHnTcHOHYHLIcwpdPS
4 | zoGpLSR7RKcd7ixqQCCe76pWPUeVrZ75N24CgT17V37+cqMxSs34HMU8nzUOoSIM
5 | Q2K+xbY82dOufJpqopnC+Oy6z6T9L/EcUCucbK+C33lh+Lt7UzJIgv2qEcPkrK8F
6 | PqOwNALt6Jy5Q/0ndiC0v3JaMva23WHX9/gOWYeLth/y9uO83uzzVRG3kyiHx6Ni
7 | Ei/d6JtMVgUvxTa1kStx8n9fW64dLlrOxOwVIwIDAQABAoIBAHiZLxZy61qTy2/m
8 | SYfwaLg4d6mIiJNkbuqIZefTh8OH7MPxDMPfWiNoI8Uq6tDZFAxpk0SY9FxAVth1
9 | kloWN8SW9vzTsgT6Wevayg//2KnPRSdvMvfMxHjgqSZD4xEkshEcalWauIWxGOCo
10 | PN87GXftZ/dctJt6fyilMViiT8uMYlV3EsdW6HOYkcFpzhUKPq6mrFdIV7xllMGm
11 | oAqmAMGfARWOf24EjrtA4rfyf0KNT4hTd8amsTIYPLocMME36BJABW72u/KB7ndP
12 | REJFA4O9gbhaxfIx+uUXhnmhI1KyY6ySNFHCgYTk4aebIhMJlTwansngHcIbfnYI
13 | V8XK9ukCgYEA17AMdr/WO1YOqKHCMlqVrHI6M0arcPXYpDPn0jVQr9yzAm+rFde8
14 | B8YJzTj/Y/ppORPI77HC6gJ26xtLi0HmybkilCShnXJ+goLFuzjtpliUG86z7qWT
15 | KcI8odZR8DXENvGllLgvKtYMQNVslYjQKjeiqTBHU8i8zUDYXL319G8CgYEA0gbu
16 | QLCvyte9VAaStiMbC8cTqgEg+U6Di2Lxt6OdmBZRdlca8JlE2EL8/Z6v7wXaOzn3
17 | jDLWm3dMKBzKlhoztFABsD/GPNPsU3UtV0kJrII54BUz6ic77F/7BSw9Zz9fId8r
18 | F/Kvyo7hMqtDVgAywJ7hmd2vYlHgZSeJdkMUzI0CgYALS4TbkyA86zzJR+9Lg8Li
19 | zPiPRtwjhjxaHP2+3FvlWcFmF0L2Wd3W6mAJBzsYmTx5aLfVFZIHfymLFlrWaBUV
20 | 60QMTf7Ip/5IB2EjdM+9LlZTPnfdv339adaTAw43bhlgZzcsmpidvafWnVaaXhfI
21 | njE+evDBSnnYpdSuxqPisQKBgQCytr8JQM91FPq/gxBX96tN7EopQOij+1EnzDvO
22 | fWTeQy9yOVBtJp5UPzwzO1KNWzeFah/gpmnCxFKDV2xzKTyR0/p71OiEXfgcU/sO
23 | 6NFYfz9sE96nAleVHV7l2e0t69ixY8qxiApZnBa4HM8hYO3OxeNGIELasLz2lhv8
24 | C7ypCQKBgQCViBy4q/uQQSHTm9SL9PeOgBZ+aRpILajnaI1SVXCuoLPF5fK2ByFE
25 | TCdEU25OK337pdntu9a/M9cSP+g7w48qAfmNqK0EuEjeIHfGivv/Bd7gOAOp5cL/
26 | gdBavJgyIHl8YOkD7DUXPeuO50RqEYxY0SAEp7bRO013KJIsZLIjAQ==
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/.ssh/ssh-private-key.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw9EEV4Jzki/QH36LoyuahSAceXHZMhaZ0p/ulMotUKoeka40NnPVw60OlRfzqQnTxmxx+GHTQ6GXjhDzGhX+kA0DLR4GK88edNwc4dgcshzCl09LOgaktJHtEpx3uLGpAIJ7vqlY9R5Wtnvk3bgKBPXtXfv5yozFKzfgcxTyfNQ6hIgxDYr7FtjzZ0658mmqimcL47LrPpP0v8RxQK5xsr4LfeWH4u3tTMkiC/aoRw+SsrwU+o7A0Au3onLlD/Sd2ILS/cloy9rbdYdf3+A5Zh4u2H/L247ze7PNVEbeTKIfHo2ISL93om0xWBS/FNrWRK3Hyf19brh0uWs7E7BUj butuzov
2 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/ansible/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Where am I?
3 | debug: { msg : "Amazon" }
4 | when: ansible_distribution == "Amazon"
5 |
6 | - name: Updating `yum`
7 | yum: { name : '*', state : latest }
8 |
9 | - name: Installing Software (Docker)
10 | yum: { name : 'docker', state : installed }
11 |
12 | - name: Starting Service (Docker)
13 | service: { name: docker, state: started, enabled : yes}
14 |
15 | - name: User Mod
16 | shell: "usermod -aG docker ec2-user"
17 |
18 | - name: Installing Docker Compose
19 | shell: |
20 | curl -L https://github.com/docker/compose/releases/download/1.21.1/docker-compose-`uname -s`-`uname -m` -o /usr/bin/docker-compose
21 | chmod +x /usr/bin/docker-compose
22 |
23 | - name: Deployment Location - Check
24 | stat: { path : app_path }
25 | register: deployment
26 |
27 | - name: Deployment Location - Create
28 | file:
29 | path : "{{ app_path }}"
30 | state : directory
31 | owner : "{{ ansible_user }}"
32 | group : "{{ ansible_user }}"
33 | mode : 0755
34 | when: deployment.stat.exists == False
35 |
36 | - name: docker-compose.yml
37 | template:
38 | src : 'docker-compose.yml.j2'
39 | dest: '{{ app_path }}/docker-compose.yml'
40 | owner : "{{ ansible_user }}"
41 | group : "{{ ansible_user }}"
42 | mode : 0755
43 |
44 | - name: Docker Compose Deployment
45 | shell: "{{item}}"
46 | with_items:
47 | - "cd {{ app_path }}"
48 | - "ls -la"
49 | - "docker login -u {{docker_registry_user}} -p {{docker_registry_pass}} {{docker_registry_host}}:{{docker_registry_port}}"
50 | - "cd '{{ app_path }}' && docker-compose down"
51 | - "cd '{{ app_path }}' && docker-compose up -d"
52 |
53 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/ansible/common/templates/docker-compose.yml.j2:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | application:
5 | image: {{docker_registry_host}}:{{docker_registry_port}}/{{app_name}}:{{ app_tag }}
6 | ports:
7 | - "{{ app_port }}:8080"
8 | restart: always
9 | mem_limit: 128m
10 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/ansible/common/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_registry_host : "registry.made.ua"
3 | docker_registry_port : "5000"
4 |
5 | docker_registry_user : "admin"
6 | docker_registry_pass : "password"
7 |
8 |
9 | app_name : appka
10 | app_tag : latest
11 | app_port : 8080
12 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/ansible/inventory:
--------------------------------------------------------------------------------
1 | [all:vars]
2 | ansible_port=22
3 | ansible_connection=ssh
4 | ansible_user=ec2-user
5 | ansible_ssh_common_args='-o StrictHostKeyChecking=no'
6 |
7 |
8 | [hosts]
9 | slave ansible_host="_host_" ansible_private_key_file=/ansible/private_key
10 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/ansible/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | name: Docker Images Deploy Procedure
4 | gather_facts: yes
5 | become: yes
6 | roles:
7 | - common
8 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/jenkins/install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | while IFS='' read -r line || [[ -n "$line" ]]; do
4 | # echo "Text read from file: $line"
5 | plugin=$(echo $line | awk '{print $1}')
6 | version=$(echo $line | awk '{print $2}')
7 |
8 |
9 | if [ -d "/var/lib/jenkins/plugins/$plugin" ] || [ -f /var/lib/jenkins/plugins/$plugin.hpi ] ; then
10 | # | \
11 | printf "%s installed\n" $plugin
12 | else
13 | url=$(printf "http://updates.jenkins-ci.org/download/plugins/%s/%s/%s.hpi\n" $plugin $version $plugin)
14 | sudo curl -L -o /var/lib/jenkins/plugins/$plugin.hpi $url
15 | sudo chown jenkins:jenkins /var/lib/jenkins/plugins/$plugin.hpi
16 | fi
17 | done < "plugins.txt"
18 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/jenkins/plugins.txt:
--------------------------------------------------------------------------------
1 | docker-commons 1.13
2 | jdk-tool 1.1
3 | script-security 1.44
4 | handlebars 1.1.1
5 | command-launcher 1.2
6 | bouncycastle-api 2.16.3
7 | structs 1.14
8 | credentials 2.1.18
9 | ivy 1.28
10 | ssh-credentials 1.14
11 | plain-credentials 1.4
12 | durable-task 1.22
13 | ansible 1.0
14 | docker-workflow 1.17
15 | scm-api 2.2.7
16 | ace-editor 1.1
17 | artifactory 2.16.2
18 | jquery-detached 1.2.1
19 | workflow-step-api 2.16
20 | workflow-scm-step 2.6
21 | workflow-api 2.28
22 | cloudbees-folder 6.5.1
23 | workflow-support 2.19
24 | workflow-cps 2.54
25 | git-server 1.7
26 | jackson2-api 2.8.11.3
27 | github-api 1.92
28 | apache-httpcomponents-client-4-api 4.5.5-3.0
29 | jsch 0.1.54.2
30 | git-client 2.7.2
31 | display-url-api 2.2.0
32 | mailer 1.21
33 | junit 1.24
34 | branch-api 2.0.20
35 | matrix-project 1.13
36 | git 3.9.1
37 | token-macro 2.5
38 | github 1.29.2
39 | github-branch-source 2.3.6
40 | workflow-job 2.23
41 | workflow-multibranch 2.20
42 | pipeline-github 2.0
43 | pipeline-input-step 2.8
44 | github-oauth 0.29
45 | pipeline-stage-step 2.3
46 | pipeline-rest-api 2.10
47 | pipeline-graph-analysis 1.7
48 | momentjs 1.1.1
49 | pipeline-stage-view 2.10
50 | gradle 1.29
51 | javadoc 1.4
52 | maven-plugin 3.1.2
53 | ant 1.8
54 | credentials-binding 1.16
55 | config-file-provider 2.18
56 | authentication-tokens 1.3
57 | pipeline-stage-tags-metadata 1.3.1
58 | pipeline-multibranch-defaults 1.1
59 | workflow-durable-task-step 2.19
60 | workflow-basic-steps 2.9
61 | workflow-cps-global-lib 2.9
62 | pipeline-github-lib 1.0
63 | simple-theme-plugin 0.4
64 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/registry/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | registry:
5 | image: docker.io/registry:2.6.2
6 | ports:
7 | - "5000:5000"
8 | volumes:
9 | - ./data:/var/lib/registry
10 | - ./secrets:/certs
11 | - ./secrets/htpasswd:/auth/htpasswd
12 | restart: always
13 | mem_limit: 768m
14 | environment:
15 | - REGISTRY_HTTP_TLS_CERTIFICATE=/certs/fullchain.pem
16 | - REGISTRY_HTTP_TLS_KEY=/certs/privkey.pem
17 | - REGISTRY_HTTP_ADDR= :5000
18 | - REGISTRY_HTTP_HOST=https://registry.made.ua:5000
19 |
20 | # admin : password
21 | - REGISTRY_AUTH=htpasswd
22 | - REGISTRY_AUTH_HTPASSWD_REALM=Registry_Realm
23 | - REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd
24 | networks:
25 | default:
26 | aliases:
27 | - registry.made.ua
28 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/registry/secrets/htpasswd:
--------------------------------------------------------------------------------
1 | admin:$2y$05$e9iSb5kOMcLdSDo4Ok1UDO7ZCzEVuZwwoJdBHGtxxI0O8WBMIC/iC
2 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/shared/registry/secrets/privkey.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDGFTO0WDvhb21h
3 | Slm267NzkHhXBPUim5VOW2AMI/0w9SXAxPL9m3x2Bm8v7AunfXEzT0SQlCHuUKBs
4 | LgjULbXwQJeu0pl0j/AQNCTNb30jsOQ9VfNyMLSP1TCDcWKuzoS7mknWZadUfCYD
5 | Vi/9pfPR+18yltxrosK4vaYVDcRzbcjnQY6YjNP1NIQUPm1sha6grkctN0BYtJpH
6 | ZUM9jr6y6R8Lep2wjETOvj2ylD7wuMO7man+sipq1OBjeSpZNDcMZpWxR2eN6b9H
7 | XkA7Zr6yCzGdTOSDocS1VMUwlmNfR+uJGU9jflfCFKW6RmSOUrcNpoP853f6lwxz
8 | YahxWg2NAgMBAAECggEBAJG8o6/6gwQYEWfVZjB5Niy+jGup0BnIUo+2FvKXv/83
9 | PrGXthf1QBsuHLwrdV+joZfYqF+fPV+znnQ5YnGJuCStwWv0ssbZ6Axj8PfdITsT
10 | OLoP9O98+jwg5HKxB6QJegr1N4IwhG5acTVfQivi34MM6Tu1VzMyKwX1Mq6m1HT/
11 | cqc3V0ZUDQqrpmvDAEmOTILvcWZjOHgEhbwtqnmUbuUvGCjXZ8QXnTSRfr8XFx5n
12 | mTlewMbwVGjHxDfnN+r7ttoG47pRhIRG3Dw3unhgYrnppD+h3PxDbQROVvpwn9XN
13 | OOJN9j9sjhDjJzUAXdHQFr8VIsbWlrgQtUN2iVUUbMECgYEA9H4zX2NO/M7IidB+
14 | rAQ9mfcet1ORC0++BkF8DDw9eqhGUU+dNKalGl9qtHOYKyw8nBYjCGqZA2A51cs6
15 | bLr0LJrhhkhQikqUtKc5kRDd9SA5OrLKCzK5dltzhe/dcA28tsAokWa+gk7LGiNl
16 | fmC0s2J6Dy2AKvxsmEEwTjATLpkCgYEAz2fS3f/48NJXXz0P/7Ex/naXruj35qHq
17 | pPdmFZJZk9dWm/R2pwVs4Hyw8z8v8ns8M7Qo0YW8emwSRUNq1wxaz44QQjZ8/fVS
18 | YllyJxnTtIsjpAWQ+o/MXLVqDFrAGF/nqEYK2ZaAB8C7UkUr/78XIsuy1Vpps+9m
19 | DFNYOPJs8xUCgYADDEO1r7Y/XpCPb0L1AASIk9UVMfx/6JmU2GIkgOe/TLO7tcNe
20 | 8ajjQEdDQqGmACItS/VZ2IJZ0WxHJwZubpI2sCOZmfMvFijua5tDHLO3p5QvECWG
21 | qDY5rIAvLkNTb0hn+tNGpMJBO5w6kjBkt+3owpSE9etAHe94om9ouh6zGQKBgQC6
22 | JfD4FvmTCvYoIGHvl1XLkRzqe2WjtccncHRLACMtsaOmLW8g0Jm7ka4vY5LhCwIp
23 | VZUdF+7l4YE0cSG/CPOoPVUSWF0ugbQDWUw/E8sSy56dY/GPMOi2fIxFsl2egtUk
24 | DllGmPEoBC8b08F9tEBLlo9XNeKRAFiSz2rrEYn/UQKBgQDAlkObSYnQO3C5RzKf
25 | VSfB0mAwbMPYLOlb0hX2qis/2+xVmMBD/PfEd3eENyFYgR4qwJPujmA0r6He3Mfb
26 | NjLQZxAsVmEEN6CJk6tHo3OJL1I9dfuj2WjOI2WeLQQNVJBBusb4UixOpy5z02iS
27 | Rmjo9jaB64RrMTjnPrDE9HkQ9A==
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/vars.tf:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------
2 | # Data Sources and rest of Vars
3 | # ------------------------------------------------------------------------------
4 |
5 | data "aws_availability_zones" "available" {}
6 |
7 | variable "cidr_blocks" {
8 | default = "0.0.0.0/0"
9 | }
10 |
11 | variable "network_address_space" {
12 | default = "10.0.0.0/16"
13 | }
14 |
15 | variable "AWS_REGION" {
16 | default = "eu-central-1"
17 | }
18 |
19 | # AMI - Amazon Linux AMI 2018.03.0
20 | variable "AMIS" {
21 | type = "map"
22 |
23 | default = {
24 | eu-central-1 = "ami-9a91b371"
25 | eu-west-1 = "ami-ca0135b3"
26 | eu-west-2 = "ami-a36f8dc4"
27 | eu-west-3 = "ami-969c2deb"
28 | }
29 | }
30 |
31 | variable "AWS_ACCESS_KEY" {
32 | default = "APIAIPNFUCKUALOAWUDAQ"
33 | } # AWS ACCESS KEY
34 |
35 | variable "AWS_SECRET_KEY" {
36 | default = "9b+HwaV51t_d0sNt_VvORK_0_l0l_uULZ4s3teS9U9sXVK"
37 | } # AWS SECRET KEY
38 |
39 | variable "PRIVATE_KEY_PATH" {
40 | default = "shared/.ssh/ssh-private-key"
41 | } # SSH PRIVATE KEY
42 |
43 | variable "PUBLIC_KEY_PATH" {
44 | default = "shared/.ssh/ssh-private-key.pub"
45 | } # SSH PUBLIC KEY
46 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/www-data/artifactory-cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIGETCCBPmgAwIBAgISA2F2ZvZQgoGGu1D3CU8pa+JVMA0GCSqGSIb3DQEBCwUA
3 | MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD
4 | ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xODA3MjIwNzUyMjdaFw0x
5 | ODEwMjAwNzUyMjdaMB4xHDAaBgNVBAMTE2FydGlmYWN0b3J5Lm1hZGUudWEwggEi
6 | MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwnsruxAEYlabdo5rMpvHZ0QKY
7 | AqokzR+fpqBUdVDFaU2yXmxvO6b33bqqDhIX+79QNTUmPZtAH46EgRKngLZUOxqe
8 | kEKcL4jdJh+tStRfHOb4vMPYzj9ahJDmKbXRs9X1wGVqAXv4v4lfOTlJdYkXnOxA
9 | Ra82fPDLvOcezolidMp4kutXwiSAC1alKBDiuvXVhL/YN51IuyT3QtO839GJxgNq
10 | SiozOzbg+9f6BwrQgAeGISq7QufKxm49zGRpAMZ4FJcBDqp0vo5mF0EBlei3ocRX
11 | Ddl1oXpqlCXhjFX1Irbb9tpHh7w611iMXmHWsG6orzskJ6Gh/wSO5DMiuFCVAgMB
12 | AAGjggMbMIIDFzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
13 | CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFH1YcYtARn61Ifdb/8Yt
14 | KjHI+qBsMB8GA1UdIwQYMBaAFKhKamMEfd265tE5t6ZFZe/zqOyhMG8GCCsGAQUF
15 | BwEBBGMwYTAuBggrBgEFBQcwAYYiaHR0cDovL29jc3AuaW50LXgzLmxldHNlbmNy
16 | eXB0Lm9yZzAvBggrBgEFBQcwAoYjaHR0cDovL2NlcnQuaW50LXgzLmxldHNlbmNy
17 | eXB0Lm9yZy8wHgYDVR0RBBcwFYITYXJ0aWZhY3RvcnkubWFkZS51YTCB/gYDVR0g
18 | BIH2MIHzMAgGBmeBDAECATCB5gYLKwYBBAGC3xMBAQEwgdYwJgYIKwYBBQUHAgEW
19 | Gmh0dHA6Ly9jcHMubGV0c2VuY3J5cHQub3JnMIGrBggrBgEFBQcCAjCBngyBm1Ro
20 | aXMgQ2VydGlmaWNhdGUgbWF5IG9ubHkgYmUgcmVsaWVkIHVwb24gYnkgUmVseWlu
21 | ZyBQYXJ0aWVzIGFuZCBvbmx5IGluIGFjY29yZGFuY2Ugd2l0aCB0aGUgQ2VydGlm
22 | aWNhdGUgUG9saWN5IGZvdW5kIGF0IGh0dHBzOi8vbGV0c2VuY3J5cHQub3JnL3Jl
23 | cG9zaXRvcnkvMIIBBAYKKwYBBAHWeQIEAgSB9QSB8gDwAHYAVYHUwhaQNgFK6gub
24 | VzxT8MDkOHhwJQgXL6OqHQcT0wwAAAFkwTCjoQAABAMARzBFAiBvm2ew5oXw2CHQ
25 | ygRDVEbrQkDdGnmRj8lx4k75FNa/dgIhALM8NgPJab5jJMRvADCdy/LJj8+5Gi04
26 | 4tcl3J532lJfAHYAKTxRllTIOWW6qlD8WAfUt2+/WHopctykwwz05UVH9HgAAAFk
27 | wTClggAABAMARzBFAiAZ4HrCj+IKU+mGh2BJIWtvBT13qk0ai4Ec3ndirtD87gIh
28 | ANCF4uYKqjoFI21Fw2h5gtebtYoQI9QzVtuSUMhEWqi7MA0GCSqGSIb3DQEBCwUA
29 | A4IBAQCDYyY7utzCSUVgJZuWr6kV33cHchiBEsHC2mi3Q9kHzyHcSzOoDDJ57YN6
30 | qvTYbOi1jrTjKmZCshk+6wN4x87bdfaR005UWCt7Zsq+Q+tGsdjoWDXuVIBWzsSZ
31 | d5MdaMnnBe8bs1Ja52eEgjUOJJBKLnm9g2mTi0kHk6odsAwn3VTovgdXl0rELf7M
32 | GmgbfotAxjgNk1/SMVdQ5vYNWjof/9pTh0pYSlCqCo/rKFJ0DzCvcRwEhDicKZAi
33 | 7lN7kKfM2DcqGdOKxQ7EcHsU9rPImblJ2P5Wt2nHT9IlUu6sG5764Dv4up6JJmYm
34 | Uws5Geb9IwzxBU/7YjizaOf8lpta
35 | -----END CERTIFICATE-----
36 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/www-data/artifactory-privkey.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCwnsruxAEYlabd
3 | o5rMpvHZ0QKYAqokzR+fpqBUdVDFaU2yXmxvO6b33bqqDhIX+79QNTUmPZtAH46E
4 | gRKngLZUOxqekEKcL4jdJh+tStRfHOb4vMPYzj9ahJDmKbXRs9X1wGVqAXv4v4lf
5 | OTlJdYkXnOxARa82fPDLvOcezolidMp4kutXwiSAC1alKBDiuvXVhL/YN51IuyT3
6 | QtO839GJxgNqSiozOzbg+9f6BwrQgAeGISq7QufKxm49zGRpAMZ4FJcBDqp0vo5m
7 | F0EBlei3ocRXDdl1oXpqlCXhjFX1Irbb9tpHh7w611iMXmHWsG6orzskJ6Gh/wSO
8 | 5DMiuFCVAgMBAAECggEAA2KHsRvAiaAUmprQFKv+ZpZC3nbum9XyjTS8TeIRsjba
9 | bliw61DRWl69fRTX8+23NOpQInN7zjbMnG181gj5dZPMZs2M+UFP9i0M3l1YxJnh
10 | M1MqzbTmMe3oeQDDo0k3CIavauCsZ5AEKfcGygmenv066eBsAzdRxhXQuB5tM53R
11 | OmlXKAVMXqa1HfRN2NywMTgC7ITVZdMc/hsX9RXShbZHuNdKmIcrBwTYSyuuUZD6
12 | ZMt96R40kaF1gZp62vcDe5vLEu7DSrnSWK84WysjBYtN+7fRR8cqpu/1JdemJ27T
13 | ajyJW/7ESQkIGRI8iHZh135tmAau6I9rB1+ojGl/4QKBgQDfmbqoz5Q0Ex/V4LTi
14 | 5k+TiymbTD3h98ZRi7R+FmH+BjaYM/yp2OAukyFgA+gPtCyibwsZx6QjLGRPDIM/
15 | EMWtYAFVMFQrcNuFHLBvtX5VRjocAzatnXLAry+eCk+sHDwvDcDpX6Kd9vdkGqzh
16 | c9sNd0VDMF0MoQA4rxKyY9H9/wKBgQDKNl9HJW/l1keteSdmAMtfdLWcSSvjiewZ
17 | SUvgNm87JnQ41+46Rm3f6MisIveLKYOBQ4rqgkWO19r1zdLV96Vm070a8Pcwcbk8
18 | 9Erivyaet9xW/3+7atfPUN+fHgEl9Txe0xy/4VjU7gBV2ZMCSCZwTRQkSvh0i+9j
19 | w40I4lrZawKBgQCo43eunCOH9c39AR3/qBABFILXrw2qyTi9TTWGayeW9GB2NAYq
20 | 0An8g9sYp7q3adKw2gjbbwTWIGifoIbdsrBs1PAMxVU9/p+Aenv11HNYu5OWpwK8
21 | vgj36xqfV5B4Ild+/vdwDnFIuUqBDPj5zUVudWEWb8hIiq8dIXeQ0H7DRwKBgQCX
22 | +YtcJlfWDljqmL6ym/A9erKvlhLFPThTvNGo3mB0vS7mNetWou4bNZH0RYd0cKvk
23 | L8Ys/+GVPMfUq29E3o3SHVrn76hoIWCN3jCWAAHfkeGpBPeT/PXbWzXmGZ/VTl2k
24 | Cth1ocCor4NTAmXJdj5lcpiruijHNxChEyxhO/X5pwJ/dI1jLY7Ez3fJmNoRDMMY
25 | B4H8PZdMMB3XdL4XOuAMO/q9erlVGYJBE/ZI9V7EwK80HAG4gXiWATl6Gay+w/MD
26 | Lb61p7q/RdCAwldu0s5Np66Zlyg1itN02AxklPbATFE8SIRg6oIPod7MtaP2iZz9
27 | iYIOwNY4CPfP4tsmPIR3tA==
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/www-data/ci-cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIF/zCCBOegAwIBAgISA1QAwTIf8peBeBOnxUVFlbxnMA0GCSqGSIb3DQEBCwUA
3 | MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD
4 | ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xODA3MjIwNzUyMDNaFw0x
5 | ODEwMjAwNzUyMDNaMBUxEzARBgNVBAMTCmNpLm1hZGUudWEwggEiMA0GCSqGSIb3
6 | DQEBAQUAA4IBDwAwggEKAoIBAQDSaIJz+1WJzwxhOIDVmpmOdV4UvrF2PbcsYM39
7 | MNyzSKkYuB0fniE+ESuuvazjD5cFBn6YWJuFla71Syq2N5VykxBXa5aNgUnpsV7f
8 | wjuR1p17rTLhyWyfXy9RmgJhymDd5ZVbfMURWethzskYgSmZG7HlhSb0MFkcfXYO
9 | vpgPWfyu3PLAo4sfv6gURoHbigdWF4Imz/f+R3vWrOsSuyrhG/27kZzDK+CJi6YH
10 | DBaf/m6JmZC/4lUTjFvmn++d9N7xBV+QlJpzjgMXnqtzQusjv1d7X+DjZ9SWjGOV
11 | klmQ+ld/ffBgTWU/uzWm+hk4+F57ZhKKEHICpqOudxbaRsOTAgMBAAGjggMSMIID
12 | DjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC
13 | MAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFJnGJBIYZpgJ4NbXhCs4wDnzNHpZMB8G
14 | A1UdIwQYMBaAFKhKamMEfd265tE5t6ZFZe/zqOyhMG8GCCsGAQUFBwEBBGMwYTAu
15 | BggrBgEFBQcwAYYiaHR0cDovL29jc3AuaW50LXgzLmxldHNlbmNyeXB0Lm9yZzAv
16 | BggrBgEFBQcwAoYjaHR0cDovL2NlcnQuaW50LXgzLmxldHNlbmNyeXB0Lm9yZy8w
17 | FQYDVR0RBA4wDIIKY2kubWFkZS51YTCB/gYDVR0gBIH2MIHzMAgGBmeBDAECATCB
18 | 5gYLKwYBBAGC3xMBAQEwgdYwJgYIKwYBBQUHAgEWGmh0dHA6Ly9jcHMubGV0c2Vu
19 | Y3J5cHQub3JnMIGrBggrBgEFBQcCAjCBngyBm1RoaXMgQ2VydGlmaWNhdGUgbWF5
20 | IG9ubHkgYmUgcmVsaWVkIHVwb24gYnkgUmVseWluZyBQYXJ0aWVzIGFuZCBvbmx5
21 | IGluIGFjY29yZGFuY2Ugd2l0aCB0aGUgQ2VydGlmaWNhdGUgUG9saWN5IGZvdW5k
22 | IGF0IGh0dHBzOi8vbGV0c2VuY3J5cHQub3JnL3JlcG9zaXRvcnkvMIIBBAYKKwYB
23 | BAHWeQIEAgSB9QSB8gDwAHUA23Sv7ssp7LH+yj5xbSzluaq7NveEcYPHXZ1PN7Yf
24 | v2QAAAFkwTBEfgAABAMARjBEAiAHuPvM9Q7xlTl7XV/M5zmxuBj8Aj8XaKa0/mp3
25 | Vb0aZAIgJE6eRfoCrGfKj42B02XwU8GebnTnFrTJ9gHfTiNpMv4AdwApPFGWVMg5
26 | ZbqqUPxYB9S3b79Yeily3KTDDPTlRUf0eAAAAWTBMESTAAAEAwBIMEYCIQDWYB0i
27 | blCxt9+R48VUH3d4oikbpZCubDLxGYxn8T6htAIhAJLT+eH8/YX0YG+1uz5yuI6a
28 | Vpb0xRuih/8pZ9PebIRIMA0GCSqGSIb3DQEBCwUAA4IBAQAWl0d3AHmYNjj15njF
29 | fWyDtuX3m9zEYC8nNBWFonG0fxjH+Ofhd0PvpdhYB4HCBzREfbmCcoAhbqYGUx+U
30 | qK6xIZuU1fgAtJmB0yzLYO5jVJoAd3LFlA4/ofMh17DjZ5gJzwO/hBSaj5upsTkt
31 | p0NqYDOTahO2WROHTLx3We6YW4p2U1oWRYMT99LDOVe4Ubei+P3Ll65274N1mjtn
32 | BkNefjWvE8qdpj6RNaCeRbVjEGHi9O51LcnhMYvV/+rjdQeafiXFSqpwlNH+iJOq
33 | ONTdk/uhT3gX+eVxDRQCywf3sY/IRmx113mmeW9XMdInyiviEpqpnqzkI3Vsvsap
34 | 4Ca5
35 | -----END CERTIFICATE-----
36 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/www-data/ci-privkey.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDSaIJz+1WJzwxh
3 | OIDVmpmOdV4UvrF2PbcsYM39MNyzSKkYuB0fniE+ESuuvazjD5cFBn6YWJuFla71
4 | Syq2N5VykxBXa5aNgUnpsV7fwjuR1p17rTLhyWyfXy9RmgJhymDd5ZVbfMURWeth
5 | zskYgSmZG7HlhSb0MFkcfXYOvpgPWfyu3PLAo4sfv6gURoHbigdWF4Imz/f+R3vW
6 | rOsSuyrhG/27kZzDK+CJi6YHDBaf/m6JmZC/4lUTjFvmn++d9N7xBV+QlJpzjgMX
7 | nqtzQusjv1d7X+DjZ9SWjGOVklmQ+ld/ffBgTWU/uzWm+hk4+F57ZhKKEHICpqOu
8 | dxbaRsOTAgMBAAECggEARkqPujtuuhknY5tCvxlkz0i6Y2fJVV0MwpCcgEdAk8xk
9 | YVOwtw1Wwatu0nOfPEPLCey/ghmn6xq0VlIENNeg36Ptw6a3pxJYJPASZ+I1kGUI
10 | O3cDFpb56MwdvL8cqJq5fafjG6s1ErTdg9FXMQ6FXRb5eVngR5eXTYNEW2yk1At6
11 | 1swDH+fY0VJQWGrk1Wa+/IsvJb2VSSnfn9lCxTNk5yt8nVI+Rw4ca1V8a8lmmWwq
12 | VVay9SKmLehFp/bNqeUMdlvZKus6FKu2pv+bixkBxfAUbYAi11J/kCN89QOAE+w8
13 | zHv2POFHVqt4h/tP/Zz9xqdUS09/4FlHsQIvd10qkQKBgQDt7EoaVXRFWWrF9dQp
14 | CN55AIRZjW0aFo4h6ERYmMj+rAWl5MO72bKPzIH05VGWogkedOJAIhnhOhDMoAlj
15 | +2H90NZN8XXtXtQkACAQ/14dqOqxO1JKhz38o6H/i4OWKkc54TFHPHZe1DA8OJmB
16 | N0v6KXdaJWSoWmbzIMhVprN/1wKBgQDiZQuhxJldGYYLFOQUNOrJHC9gbsT63vbe
17 | e6Vd8oTqNQkz+tTCMRlNEssRlOdZQZR0DbA5xU8XHrK7Ht/Q9vCglsSB0NUkBTNX
18 | 2H3BT53XVl/hv2FHmrQoaR3TawgV71w7AV/gBASfwj8ZGfU1VSvTeZrHbC4fw5if
19 | tmvj6SHSpQKBgQDIlbFTyj+gR+ZwxvP7NHL7aKgsJ8OXfvSNOZiFaSnrK3QHjHpI
20 | YRzdhvj5UmwaprkxaDFnCFjxPVb0bOf91u2iYvKJ+8wENzJ6t3BRZDogn0xd5zlV
21 | g/eKixoEq8ZgT3yFG4nEGQFiqVmRK9aezdPMMDFjWqsmFDE/QKAlNcmXOQKBgGku
22 | aOkPXfFTGmOroQuCSlKFNb2nuPyU+j0DVSEAu5/UZCIyNwbBqO+V+6J/e7yMK1x/
23 | v5AXi1dCe+dTwMhk8gaqBUIKs/u4LtJxUnutcbfJ+WH4eEm8Jldf/utqnqcSPdds
24 | HA8Hzt4qcQZIeRP+Lg5iQPbF2+gGB7KsHhQVDBHBAoGAftBE/mZqJfcjHL4P0RVR
25 | 0FVygx3E1dKDr92mkA27wXWik7v94x5RIhy0Moq+Nklhm8MPdUck78BvYv6+FPyc
26 | YPMcFiOiQOkWGkGB7NkSmd2fK36j8dlYakmQqbnsk7/UQN8f3Gex0J11r8asgnYU
27 | rev0RgMhK/n6Rw69k8QBUtY=
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/www-data/dh-params.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN DH PARAMETERS-----
2 | MIICCAKCAgEAzu56LoEx/VBt1ksIXsCOEn2q6vfvrwtBoHLJttWRGosS4+S/ZZXp
3 | shih9Gfv0q+lmiyRapBQzj2pB1UcIkjfDnLl8YVX51jyUROPFlriRyiWDtcEp8p9
4 | mH+vtT+mU0hQ9m+xSy3rRFY7It5nogrigGhYoNfhb7orYdjf1YHbmsNhyFVvauAu
5 | beSJiqGjhmBQ9uMBs2Rh5CQqady/HjSOItzKM1pN2V168nTeVPYhwLthQ1Rk4i7f
6 | KkGRdOTEwaNAthjmD0S1aYwEraoqS17EKQQVbpAQsDOInkNYGIZfXt3SLUMbVYfD
7 | jcpOY40fBxGuk2+fYyoAv+6BjBFlxtqIDd3V5EacvhTVM8Rzt7KYPMJ54R+G63M3
8 | fghJH9WxHIA+NGLhYa9siimchwn0HXtrDT+mrQc5HcNasWI+T5Sqz2WZX0PFqPxO
9 | WbRq+ZG01evoHzTF85uUq04xgKDPCKaG3Ypo7t01r5NeyIFR+yv4T630/b7yzFrX
10 | ZcWcKS4gFcYPeQiWryV5+ZvN7eRqHIrL3XjzFiF5Df56mtDGXYmsOhgZkCZqcf1E
11 | aw3B1lxjAk4SBVY2C4PFHzkrCnE11pYVkzootWh5MB8vjO6QlwQ3jICHO909xU1e
12 | BgxRHbeYE02o0gC5lHof4xwBHmlZmByCZd/p3e2VLA2DZW9KXmsrnfMCAQI=
13 | -----END DH PARAMETERS-----
14 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/terraform/www-data/html/index.html:
--------------------------------------------------------------------------------
1 | yo!
2 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 |
3 | config.vm.box = "centos/7"
4 | config.vm.box_check_update = false
5 |
6 | # ~* Hosts *~ #
7 |
8 | hosts = {
9 | :master => {
10 | :memory => '2048',
11 | :ip => '192.168.10.9',
12 | :ports => [ 8080, 8081, 5000 ]
13 | },
14 | :slave => {
15 | :memory => '512',
16 | :ip => '192.168.10.6',
17 | :ports => [ 8090, 8091, 8092 ]
18 | },
19 | }.each do |name, settings|
20 |
21 | config.vm.define "#{name}" do |box|
22 |
23 | box.vm.network "private_network",
24 | ip: settings.dig(:ip)
25 |
26 | box.vm.provider 'virtualbox' do |box|
27 | box.memory = settings.dig(:memory)
28 | end # ~* End of Custom Specs Settings *~
29 |
30 | # Provisioning Separate Key
31 | # later will be used by ansible
32 | box.vm.provision :shell,
33 | :run => "always",
34 | :path => "shared/ssh-keys.sh"
35 |
36 |
37 | if ( "#{name}" == "master" )
38 | box.vm.provision :shell,
39 | :path => "shared/master.sh"
40 |
41 | box.vm.synced_folder "./shared", "/shared",
42 | type: "sshfs",
43 | sshfs_opts_append: "-o nonempty"
44 |
45 | box.vm.provision "shell",
46 | run:"always",
47 | inline: "docker-compose -f /shared/registry/docker-compose.yml up -d"
48 |
49 | box.vm.provision "shell",
50 | inline: "echo 127.0.0.1 registry.made.ua >> /etc/hosts"
51 |
52 | end # ~* End of Master Machine Definition *~
53 |
54 | if ( "#{name}" == "slave" )
55 |
56 | box.vm.provision "shell",
57 | inline: "echo 192.168.10.9 registry.made.ua >> /etc/hosts"
58 |
59 | end # ~* End of Slave Machine Definition *~
60 |
61 |
62 | settings.dig(:ports).each do |port|
63 | box.vm.network "forwarded_port",
64 | guest: port,
65 | host: port
66 | end
67 |
68 | end # ~* End of Machine Definition *~
69 | end # ~* End of Hosts Iteration *~
70 | end # ~* End of Vagrant Definitions *~
71 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/readme.md:
--------------------------------------------------------------------------------
1 | # Vegrant powered CentOS environment
2 |
3 | This `Vagrantfile` and provisioners will create/provision `master` node with Jenkins/Maven/Artifactory/Docker and Docker-Registry. The `slave` node partially provisioned just with ssh keys (its latter to be provisioned with ansible from `master` node).
4 |
5 |
6 | # Technical
7 |
8 | * __master__ node (ip: `192.168.10.9`, host: `registry.made.ua`
9 | (certs valid for 3 month since July 22th and used for `docker registry`) )
10 | * __slave__ node (ip: `192.168.10.6`)
11 |
12 | # Deployments
13 |
14 | * QA [127.0.0.1:8092 within virtualbox: 192.168.10.6:8092](http://127.0.0.1:8092)
15 | * Dev [127.0.0.1:8091 within virtualbox: 192.168.10.6:8091](http://127.0.0.1:8091)
16 | * Prod [127.0.0.1:8090 within virtualbox: 192.168.10.6:8090](http://127.0.0.1:8090)
17 |
18 | ## Required Manual Tweaks
19 |
20 | * Configuring Jenkins:
21 | * Artifactory ( Manage Jenkins -> Configure System -> **Artifactory** ):
22 | - Server ID: `artifactory`
23 | - URL: http://127.0.0.1:8081/artifactory
24 | - Default user (`admin/password`)
25 | - Default repository (no need to provide, but its hardcoded in Jenkinsfile)
26 | * Maven/Ansible/Docker ( Manage Jenkins -> Configure Tools):
27 | - Docker { location : `/bin` }
28 | - Ansible { location : `/bin` }
29 | - Git { location : `/bin` }
30 | - Maven (provide your version of `maven`)
31 | * Create Multibranch Job for this repos.
32 | * And we ready to go!
33 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/.ssh/ssh-private-key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEpAIBAAKCAQEAsPRBFeCc5Iv0B9+i6MrmoUgHHlx2TIWmdKf7pTKLVCqHpGuN
3 | DZz1cOtDpUX86kJ08Zscfhh00Ohl44Q8xoV/pANAy0eBivPHnTcHOHYHLIcwpdPS
4 | zoGpLSR7RKcd7ixqQCCe76pWPUeVrZ75N24CgT17V37+cqMxSs34HMU8nzUOoSIM
5 | Q2K+xbY82dOufJpqopnC+Oy6z6T9L/EcUCucbK+C33lh+Lt7UzJIgv2qEcPkrK8F
6 | PqOwNALt6Jy5Q/0ndiC0v3JaMva23WHX9/gOWYeLth/y9uO83uzzVRG3kyiHx6Ni
7 | Ei/d6JtMVgUvxTa1kStx8n9fW64dLlrOxOwVIwIDAQABAoIBAHiZLxZy61qTy2/m
8 | SYfwaLg4d6mIiJNkbuqIZefTh8OH7MPxDMPfWiNoI8Uq6tDZFAxpk0SY9FxAVth1
9 | kloWN8SW9vzTsgT6Wevayg//2KnPRSdvMvfMxHjgqSZD4xEkshEcalWauIWxGOCo
10 | PN87GXftZ/dctJt6fyilMViiT8uMYlV3EsdW6HOYkcFpzhUKPq6mrFdIV7xllMGm
11 | oAqmAMGfARWOf24EjrtA4rfyf0KNT4hTd8amsTIYPLocMME36BJABW72u/KB7ndP
12 | REJFA4O9gbhaxfIx+uUXhnmhI1KyY6ySNFHCgYTk4aebIhMJlTwansngHcIbfnYI
13 | V8XK9ukCgYEA17AMdr/WO1YOqKHCMlqVrHI6M0arcPXYpDPn0jVQr9yzAm+rFde8
14 | B8YJzTj/Y/ppORPI77HC6gJ26xtLi0HmybkilCShnXJ+goLFuzjtpliUG86z7qWT
15 | KcI8odZR8DXENvGllLgvKtYMQNVslYjQKjeiqTBHU8i8zUDYXL319G8CgYEA0gbu
16 | QLCvyte9VAaStiMbC8cTqgEg+U6Di2Lxt6OdmBZRdlca8JlE2EL8/Z6v7wXaOzn3
17 | jDLWm3dMKBzKlhoztFABsD/GPNPsU3UtV0kJrII54BUz6ic77F/7BSw9Zz9fId8r
18 | F/Kvyo7hMqtDVgAywJ7hmd2vYlHgZSeJdkMUzI0CgYALS4TbkyA86zzJR+9Lg8Li
19 | zPiPRtwjhjxaHP2+3FvlWcFmF0L2Wd3W6mAJBzsYmTx5aLfVFZIHfymLFlrWaBUV
20 | 60QMTf7Ip/5IB2EjdM+9LlZTPnfdv339adaTAw43bhlgZzcsmpidvafWnVaaXhfI
21 | njE+evDBSnnYpdSuxqPisQKBgQCytr8JQM91FPq/gxBX96tN7EopQOij+1EnzDvO
22 | fWTeQy9yOVBtJp5UPzwzO1KNWzeFah/gpmnCxFKDV2xzKTyR0/p71OiEXfgcU/sO
23 | 6NFYfz9sE96nAleVHV7l2e0t69ixY8qxiApZnBa4HM8hYO3OxeNGIELasLz2lhv8
24 | C7ypCQKBgQCViBy4q/uQQSHTm9SL9PeOgBZ+aRpILajnaI1SVXCuoLPF5fK2ByFE
25 | TCdEU25OK337pdntu9a/M9cSP+g7w48qAfmNqK0EuEjeIHfGivv/Bd7gOAOp5cL/
26 | gdBavJgyIHl8YOkD7DUXPeuO50RqEYxY0SAEp7bRO013KJIsZLIjAQ==
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/.ssh/ssh-private-key.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw9EEV4Jzki/QH36LoyuahSAceXHZMhaZ0p/ulMotUKoeka40NnPVw60OlRfzqQnTxmxx+GHTQ6GXjhDzGhX+kA0DLR4GK88edNwc4dgcshzCl09LOgaktJHtEpx3uLGpAIJ7vqlY9R5Wtnvk3bgKBPXtXfv5yozFKzfgcxTyfNQ6hIgxDYr7FtjzZ0658mmqimcL47LrPpP0v8RxQK5xsr4LfeWH4u3tTMkiC/aoRw+SsrwU+o7A0Au3onLlD/Sd2ILS/cloy9rbdYdf3+A5Zh4u2H/L247ze7PNVEbeTKIfHo2ISL93om0xWBS/FNrWRK3Hyf19brh0uWs7E7BUj butuzov
2 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/ansible/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Is it running 1?
3 | debug: { msg : "c'mon everybody!" }
4 | when: ansible_distribution == "CentOS"
5 |
6 | - name: yum install epel
7 | yum: { name : 'epel-release.noarch', state : latest }
8 |
9 | - name: Updating `yum`
10 | yum: { name : '*', state : latest }
11 | when: ansible_distribution == "CentOS"
12 |
13 | - name: Installing Software (Docker)
14 | yum: { name : 'docker', state : installed }
15 | when: ansible_distribution == "CentOS"
16 |
17 | - name: Starting Service (Docker)
18 | service: { name: docker, state: started, enabled : yes}
19 | when: ansible_distribution == "CentOS"
20 |
21 | - name: Installing Software (python-pip)
22 | yum: { 'name' : 'python-pip', 'state' : 'installed' }
23 | when: ansible_distribution == "CentOS"
24 |
25 | - name: Installing pip package (docker-compose)
26 | pip: { name : 'docker-compose', state : present }
27 | when: ansible_distribution == "CentOS"
28 |
29 | - name: Deployment Location Check
30 | stat: { path : apppath }
31 | register: deployment
32 |
33 | - name: create location fodler
34 | file:
35 | path : "{{ app_path }}"
36 | state : directory
37 | owner : "{{ ansible_user }}"
38 | group : "{{ ansible_user }}"
39 | mode : 0755
40 | when: deployment.stat.exists == False
41 |
42 | - name: docker-compose.yml
43 | template:
44 | src : 'docker-compose.yml.j2'
45 | dest: '{{ app_path }}/docker-compose.yml'
46 | owner : "{{ ansible_user }}"
47 | group : "{{ ansible_user }}"
48 | mode : 0755
49 |
50 | - name: Docker Compose Deployment
51 | shell: "{{item}}"
52 | with_items:
53 | - "cd {{ app_path }}"
54 | - "ls -la"
55 | - "docker login -u {{docker_registry_user}} -p {{docker_registry_pass}} {{docker_registry_host}}:{{docker_registry_port}}"
56 | - "cd '{{ app_path }}' && docker-compose down"
57 | - "cd '{{ app_path }}' && docker-compose up -d"
58 |
59 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/ansible/common/templates/docker-compose.yml.j2:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | application:
5 | image: {{docker_registry_host}}:{{docker_registry_port}}/{{app_name}}:{{ app_tag }}
6 | ports:
7 | - "{{ app_port }}:8080"
8 | restart: always
9 | mem_limit: 128m
10 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/ansible/common/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_registry_host : "registry.made.ua"
3 | docker_registry_port : "5000"
4 |
5 | docker_registry_user : "admin"
6 | docker_registry_pass : "password"
7 |
8 |
9 | app_name : appka
10 | app_tag : latest
11 | app_port : 8080
12 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/ansible/inventory:
--------------------------------------------------------------------------------
1 | [all:vars]
2 | ansible_port=22
3 | ansible_connection=ssh
4 | ansible_user=vagrant
5 | ansible_ssh_common_args='-o StrictHostKeyChecking=no'
6 |
7 |
8 | [hosts]
9 | slave ansible_host="192.168.10.6" ansible_private_key_file=/vagrant/shared/.ssh/ssh-private-key
10 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/ansible/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | name: Docker Images Deploy Procedure
4 | gather_facts: yes
5 | become: yes
6 | roles:
7 | - common
8 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/jenkins/install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | while IFS='' read -r line || [[ -n "$line" ]]; do
4 | # echo "Text read from file: $line"
5 | plugin=$(echo $line | awk '{print $1}')
6 | version=$(echo $line | awk '{print $2}')
7 |
8 |
9 | if [ -d "/var/lib/jenkins/plugins/$plugin" ] || [ -f /var/lib/jenkins/plugins/$plugin.hpi ] ; then
10 | # | \
11 | printf "%s installed\n" $plugin
12 | else
13 | url=$(printf "http://updates.jenkins-ci.org/download/plugins/%s/%s/%s.hpi\n" $plugin $version $plugin)
14 | sudo curl -L -o /var/lib/jenkins/plugins/$plugin.hpi $url
15 | sudo chown jenkins:jenkins /var/lib/jenkins/plugins/$plugin.hpi
16 | fi
17 | done < "plugins.txt"
18 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/jenkins/plugins.txt:
--------------------------------------------------------------------------------
1 | docker-commons 1.13
2 | jdk-tool 1.1
3 | script-security 1.44
4 | handlebars 1.1.1
5 | command-launcher 1.2
6 | bouncycastle-api 2.16.3
7 | structs 1.14
8 | credentials 2.1.18
9 | ivy 1.28
10 | ssh-credentials 1.14
11 | plain-credentials 1.4
12 | durable-task 1.22
13 | ansible 1.0
14 | docker-workflow 1.17
15 | scm-api 2.2.7
16 | ace-editor 1.1
17 | artifactory 2.16.2
18 | jquery-detached 1.2.1
19 | workflow-step-api 2.16
20 | workflow-scm-step 2.6
21 | workflow-api 2.28
22 | cloudbees-folder 6.5.1
23 | workflow-support 2.19
24 | workflow-cps 2.54
25 | git-server 1.7
26 | jackson2-api 2.8.11.3
27 | github-api 1.92
28 | apache-httpcomponents-client-4-api 4.5.5-3.0
29 | jsch 0.1.54.2
30 | git-client 2.7.2
31 | display-url-api 2.2.0
32 | mailer 1.21
33 | junit 1.24
34 | branch-api 2.0.20
35 | matrix-project 1.13
36 | git 3.9.1
37 | token-macro 2.5
38 | github 1.29.2
39 | github-branch-source 2.3.6
40 | workflow-job 2.23
41 | workflow-multibranch 2.20
42 | pipeline-github 2.0
43 | pipeline-input-step 2.8
44 | github-oauth 0.29
45 | pipeline-stage-step 2.3
46 | pipeline-rest-api 2.10
47 | pipeline-graph-analysis 1.7
48 | momentjs 1.1.1
49 | pipeline-stage-view 2.10
50 | gradle 1.29
51 | javadoc 1.4
52 | maven-plugin 3.1.2
53 | ant 1.8
54 | credentials-binding 1.16
55 | config-file-provider 2.18
56 | authentication-tokens 1.3
57 | pipeline-stage-tags-metadata 1.3.1
58 | pipeline-multibranch-defaults 1.1
59 | workflow-durable-task-step 2.19
60 | workflow-basic-steps 2.9
61 | workflow-cps-global-lib 2.9
62 | pipeline-github-lib 1.0
63 | simple-theme-plugin 0.4
64 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/master.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # echo "Running system updates using yum"
4 | # yum update -y > /dev/null 2>&1
5 |
6 |
7 | # Java and Company
8 | echo "Running installation: Java, Jenkins, Maven and Artifactory"
9 | yum install java -y > /dev/null 2>&1
10 |
11 | JENKINS_REPO=https://pkg.jenkins.io/redhat-stable/jenkins.repo
12 | if [[ ! -f /etc/yum.repos.d/$(basename ${JENKINS_REPO}) ]]; then
13 | echo "Action: Jenkins repo install"
14 | curl -L ${JENKINS_REPO} -o /etc/yum.repos.d/$(basename ${JENKINS_REPO})
15 | rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key
16 | fi
17 |
18 | yum install maven git jenkins -y > /dev/null 2>&1
19 | chkconfig jenkins on > /dev/null 2>&1
20 | systemctl start jenkins > /dev/null 2>&1
21 |
22 | # Jenkins Plugins
23 | cd /vagrant/shared/jenkins
24 | chmod +x ./install.sh
25 | ./install.sh
26 | cd /vagrant/
27 | systemctl restart jenkins > /dev/null 2>&1
28 |
29 | # Artifactory
30 | if [[ ! -d /opt/jfrog/artifactory ]]; then
31 | curl -L -o jfrog-artifactory.rpm 'https://api.bintray.com/content/jfrog/artifactory-rpms/jfrog-artifactory-oss-$latest.rpm;bt_package=jfrog-artifactory-oss-rpm'
32 | yum install -y net-tools > /dev/null 2>&1
33 | rpm -i jfrog-artifactory.rpm
34 | unlink jfrog-artifactory.rpm
35 | systemctl enable artifactory
36 | fi
37 | systemctl start artifactory
38 |
39 |
40 | # ansible
41 | echo "Running installation: Ansible and python's pip"
42 | yum install epel-release -y > /dev/null 2>&1
43 | yum install python-pip -y > /dev/null 2>&1
44 | python -m pip install --upgrade pip > /dev/null 2>&1
45 | python -m pip install ansible > /dev/null 2>&1
46 |
47 |
48 |
49 |
50 | # Docker
51 | echo "Running installation: Docker, Compose and Regsitry"
52 | DOCKER_EXISTS=$(which docker)
53 | if [[ -z $DOCKER_EXISTS ]]; then
54 | curl -fsSL https://get.docker.com/ | sh > /dev/null 2>&1
55 | usermod -aG docker $(whoami)
56 | usermod -aG docker jenkins
57 | usermod -aG docker root
58 | systemctl enable docker
59 | fi
60 |
61 | systemctl start docker
62 |
63 | # Docker Registry
64 | docker pull docker.io/registry:2.6.2 > /dev/null 2>&1
65 |
66 | # Docker Compose
67 | python -m pip install docker-compose > /dev/null 2>&1
68 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/registry/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | registry:
5 | image: docker.io/registry:2.6.2
6 | ports:
7 | - "5000:5000"
8 | volumes:
9 | - ./data:/var/lib/registry
10 | - ./secrets:/certs
11 | - ./secrets/htpasswd:/auth/htpasswd
12 | restart: always
13 | mem_limit: 768m
14 | environment:
15 | - REGISTRY_HTTP_TLS_CERTIFICATE=/certs/fullchain.pem
16 | - REGISTRY_HTTP_TLS_KEY=/certs/privkey.pem
17 | - REGISTRY_HTTP_ADDR= :5000
18 | - REGISTRY_HTTP_HOST=https://registry.made.ua:5000
19 |
20 | # admin : password
21 | - REGISTRY_AUTH=htpasswd
22 | - REGISTRY_AUTH_HTPASSWD_REALM=Registry_Realm
23 | - REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd
24 | networks:
25 | default:
26 | aliases:
27 | - registry.made.ua
28 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/registry/secrets/htpasswd:
--------------------------------------------------------------------------------
1 | admin:$2y$05$e9iSb5kOMcLdSDo4Ok1UDO7ZCzEVuZwwoJdBHGtxxI0O8WBMIC/iC
2 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/registry/secrets/privkey.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDGFTO0WDvhb21h
3 | Slm267NzkHhXBPUim5VOW2AMI/0w9SXAxPL9m3x2Bm8v7AunfXEzT0SQlCHuUKBs
4 | LgjULbXwQJeu0pl0j/AQNCTNb30jsOQ9VfNyMLSP1TCDcWKuzoS7mknWZadUfCYD
5 | Vi/9pfPR+18yltxrosK4vaYVDcRzbcjnQY6YjNP1NIQUPm1sha6grkctN0BYtJpH
6 | ZUM9jr6y6R8Lep2wjETOvj2ylD7wuMO7man+sipq1OBjeSpZNDcMZpWxR2eN6b9H
7 | XkA7Zr6yCzGdTOSDocS1VMUwlmNfR+uJGU9jflfCFKW6RmSOUrcNpoP853f6lwxz
8 | YahxWg2NAgMBAAECggEBAJG8o6/6gwQYEWfVZjB5Niy+jGup0BnIUo+2FvKXv/83
9 | PrGXthf1QBsuHLwrdV+joZfYqF+fPV+znnQ5YnGJuCStwWv0ssbZ6Axj8PfdITsT
10 | OLoP9O98+jwg5HKxB6QJegr1N4IwhG5acTVfQivi34MM6Tu1VzMyKwX1Mq6m1HT/
11 | cqc3V0ZUDQqrpmvDAEmOTILvcWZjOHgEhbwtqnmUbuUvGCjXZ8QXnTSRfr8XFx5n
12 | mTlewMbwVGjHxDfnN+r7ttoG47pRhIRG3Dw3unhgYrnppD+h3PxDbQROVvpwn9XN
13 | OOJN9j9sjhDjJzUAXdHQFr8VIsbWlrgQtUN2iVUUbMECgYEA9H4zX2NO/M7IidB+
14 | rAQ9mfcet1ORC0++BkF8DDw9eqhGUU+dNKalGl9qtHOYKyw8nBYjCGqZA2A51cs6
15 | bLr0LJrhhkhQikqUtKc5kRDd9SA5OrLKCzK5dltzhe/dcA28tsAokWa+gk7LGiNl
16 | fmC0s2J6Dy2AKvxsmEEwTjATLpkCgYEAz2fS3f/48NJXXz0P/7Ex/naXruj35qHq
17 | pPdmFZJZk9dWm/R2pwVs4Hyw8z8v8ns8M7Qo0YW8emwSRUNq1wxaz44QQjZ8/fVS
18 | YllyJxnTtIsjpAWQ+o/MXLVqDFrAGF/nqEYK2ZaAB8C7UkUr/78XIsuy1Vpps+9m
19 | DFNYOPJs8xUCgYADDEO1r7Y/XpCPb0L1AASIk9UVMfx/6JmU2GIkgOe/TLO7tcNe
20 | 8ajjQEdDQqGmACItS/VZ2IJZ0WxHJwZubpI2sCOZmfMvFijua5tDHLO3p5QvECWG
21 | qDY5rIAvLkNTb0hn+tNGpMJBO5w6kjBkt+3owpSE9etAHe94om9ouh6zGQKBgQC6
22 | JfD4FvmTCvYoIGHvl1XLkRzqe2WjtccncHRLACMtsaOmLW8g0Jm7ka4vY5LhCwIp
23 | VZUdF+7l4YE0cSG/CPOoPVUSWF0ugbQDWUw/E8sSy56dY/GPMOi2fIxFsl2egtUk
24 | DllGmPEoBC8b08F9tEBLlo9XNeKRAFiSz2rrEYn/UQKBgQDAlkObSYnQO3C5RzKf
25 | VSfB0mAwbMPYLOlb0hX2qis/2+xVmMBD/PfEd3eENyFYgR4qwJPujmA0r6He3Mfb
26 | NjLQZxAsVmEEN6CJk6tHo3OJL1I9dfuj2WjOI2WeLQQNVJBBusb4UixOpy5z02iS
27 | Rmjo9jaB64RrMTjnPrDE9HkQ9A==
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/vagrant/shared/ssh-keys.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | PUBLIC_KEY=/vagrant/shared/.ssh/ssh-private-key.pub
4 | AUTH_KEYS=/home/vagrant/.ssh/authorized_keys
5 |
6 | if [[ -f $AUTH_KEYS ]]; then
7 | SPKE=$(cat $AUTH_KEYS | grep butuzov)
8 | if [[ -z $SPKE ]]; then
9 | cat $PUBLIC_KEY >> $AUTH_KEYS
10 | fi
11 | fi
12 |
13 |
14 | if [[ -d /shared ]]; then
15 | sudo chown jenkins:jenkins /vagrant/shared/.ssh
16 | sudo chown jenkins:jenkins /vagrant/shared/.ssh/ssh-private-key
17 | sudo chmod 0400 /vagrant/shared/.ssh/ssh-private-key
18 | sudo chmod 0400 /vagrant/shared/.ssh/ssh-private-key.pub
19 | fi
20 |
--------------------------------------------------------------------------------
/010-Jenkins-Teraform-Ansible/version.md:
--------------------------------------------------------------------------------
1 | version bumped
2 | added japanses and chinese versions
3 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/.ssh/id_rsa:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEpAIBAAKCAQEAtIF2dyzpNBzOqCGWdbkcnY0fVdIq5Q2WjqhjDUQsQ+FswXjF
3 | CJxeQW9Y2crF2fBBIxAk+THEAIKa8VEtnaKOf86WyGlj6fUfS5o1dqTjmQ0nnZxA
4 | 3zk2yO7pWezqTT9XyT322Fh04R8IfnGgJSEkHKoCTsMcZZgJEvSdWvlcv7/EBae/
5 | 0gDwFo5w7ulwDfwwkm6hkujNCgn9bCmAdZn5lxE5IFkppMs6mAKCEiO2cw8Rt9mh
6 | nH4CbEInKaShdY4fAxPwI1MnycTQ24C8sWRQB5kihgJ+OxbqUP6M+7LC/XgdZuxi
7 | /HASmMIWUjdmpt9NjlVLvHABKT0Mxhk5NRMopwIDAQABAoIBAQCZtPCrFRxUWv+D
8 | Xm6gOoxqgSSg0QHj4MlHG46VWmsTNp+EtJGb/oOo1udjNm96YUMmV3YUmVMdWBoW
9 | yhi89hLANKxjZpNFQ41ttJg4Orrwtqn4ZXk8XJ0RR5iAEeWG+ByJ6nEL029b0DBM
10 | c/d6BpmYqxCh/zVsMaXwdUqVkGhxgqvzm9+8nnV+zZDbs8peUAMcexkaFT3onM8/
11 | SB0f7KIK57bQexrgbjkZZhOhn1hOV+2u7XyH5827G/UWnRdS9wmAQVUGKCSO0N8j
12 | HEbH8j/1qn5kc0C9uYBP/mqL1IsT++OgRfnHAyRMP29ATbusyby6tiE4s+Hzn3hS
13 | WGrcl5rJAoGBAOwXszcdmGmrsDTUkpx/dQVT4pu6frK6W934fE69VBPfgaLbMIj0
14 | ++hhCI/WxWyO8KSmiWC2B/6ycVtiumend7mPjWBBQy4TtSjSzviojRKX6MKvNO/T
15 | 3YKRn2vuT4L81x3R4lQI3q+5w+4upaBmjCR6jpWwkuFSbuHE1+ZZgJx9AoGBAMO5
16 | 3QN+X9O/dzYoFrb/AopujSvrKl9QKu7eNXqs0d9aIQ8L8URqpckuklyVdQqqp0gW
17 | ApI1DXfuHugWOjtrYwYi5FVFCTAIj4fLgNo/360lJO5lnU6LXIQMbLf3eofZlkn5
18 | AZcSvRLapTQP9947ICg8WK1mcWGDIwOIVjaG4XbzAoGBAJWa+xTM6zzi79i6b2E+
19 | HzBOFMXr6sdk8jv5bwQSU0n4ktRZIytWWp7Jhjxns1j0Ryd+5ITlNLG/77ciG879
20 | Pw/WGJdLI7/5+w0wcLj/Tc3fdz6O+fOThMVtc+FX9QijIcmiq1HnAP3HsJGo+4lm
21 | Eh43j0zpxFmF+/+S1eEBDE+dAoGAeelqTcPIgocvBO+hfmiGt502l5WKNgYkR9bm
22 | 7rt4mit0jf+9TwUysIYuqjZ5yaQUQCVdgp5C/mG2pI87xEkWS/styCMRHvcmF8pd
23 | PxQsCZgqUce/XO4XOTTX/xrKGpYgM4FDSUtls7m2zx/nH2gqloVIIvG9S6S4CXpB
24 | YQNQqpsCgYBcixMTN/cNv753sY/NOOqOOtuv49xJY1I8Bkg091kYvW0cXJCnG6FB
25 | ZaXKe2Q+42FJPkMKJdFfrUKvrTfXal3xq330l6L55t6a+/YKNpGBRnRlFXEZ/jXP
26 | tqzqh6iPfERe9wnvWSpGkTJFQChS/5+ocHS189jnNxcbhOXzHiP7Xg==
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/.ssh/id_rsa.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0gXZ3LOk0HM6oIZZ1uRydjR9V0irlDZaOqGMNRCxD4WzBeMUInF5Bb1jZysXZ8EEjECT5McQAgprxUS2doo5/zpbIaWPp9R9LmjV2pOOZDSednEDfOTbI7ulZ7OpNP1fJPfbYWHThHwh+caAlISQcqgJOwxxlmAkS9J1a+Vy/v8QFp7/SAPAWjnDu6XAN/DCSbqGS6M0KCf1sKYB1mfmXETkgWSmkyzqYAoISI7ZzDxG32aGcfgJsQicppKF1jh8DE/AjUyfJxNDbgLyxZFAHmSKGAn47FupQ/oz7ssL9eB1m7GL8cBKYwhZSN2am302OVUu8cAEpPQzGGTk1Eyin butuzov@XPUAKYIW0005.kyiv.epam.com
2 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/.ssh/ssh.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | PUBLIC_KEY=/.ssh/id_rsa.pub
5 | AUTH_KEYS=/home/vagrant/.ssh/authorized_keys
6 |
7 | if [[ -f $AUTH_KEYS ]]; then
8 | SPKE=$(cat $AUTH_KEYS | grep butuzov)
9 | if [[ -z $SPKE ]]; then
10 | cat $PUBLIC_KEY >> $AUTH_KEYS
11 | fi
12 |
13 | if [[ ! -f /home/vagrant/.ssh/id_rsa ]]; then
14 | cp /.ssh/id_rsa /home/vagrant/.ssh/id_rsa
15 | fi
16 |
17 | fi
18 |
19 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 |
3 | # ~* Hosts *~ #
4 | config.vm.synced_folder '', '/vagrant', disabled: true
5 | config.vm.box_check_update = false
6 |
7 | # counting IPs in private network
8 | ip_counter = 10
9 |
10 | envs = {
11 | :centos => {
12 | :box => 'centos/7',
13 | :memory => '512',
14 | :port => 8080
15 | },
16 | :ubuntu => {
17 | :box => 'ubuntu/xenial64',
18 | :memory => '512',
19 | :port => 8081
20 | },
21 | }.each do |env, settings|
22 |
23 | servers = [
24 | 'lb',
25 | 'app-1',
26 | 'app-2',
27 | 'db-master',
28 | 'db-slave',
29 | ]
30 |
31 | servers.each do | server |
32 | config.vm.define "#{env}-#{server}" do |box|
33 |
34 | ip_counter += 1
35 | box.vm.network "private_network",
36 | # type: "dhcp"
37 | ip: "172.28.128.#{ip_counter}"
38 |
39 | if server == "lb"
40 | box.vm.network :forwarded_port,
41 | guest: 443,
42 | host: settings.dig(:port)
43 | end
44 |
45 |
46 | box.vm.box = settings.dig(:box)
47 | box.vm.provider 'virtualbox' do |box|
48 | box.memory = settings.dig(:memory)
49 | end # ~* End of Custom Specs Settings *~
50 |
51 | # ~* Shared Folder *~
52 | box.vm.synced_folder "./.ssh", "/.ssh",
53 | type: "sshfs",
54 | sshfs_opts_append: "-o nonempty"
55 |
56 | box.vm.provision "shell",
57 | path: ".ssh/ssh.sh"
58 |
59 | end # ~* End of Machine Iteration *~
60 | end # ~* End of Hosts Iteration *~
61 | end # ~* End of Environment Iteration *~
62 | end # ~* End of Vagrant Definitions *~
63 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/ansible.cfg:
--------------------------------------------------------------------------------
1 | # https://docs.ansible.com/ansible/latest/reference_appendices/config.html
2 |
3 | [defaults]
4 | # Do not create retry files
5 | retry_files_enabled = False
6 |
7 | # action wornings
8 | action_warnings = False
9 |
10 | # vagranbt
11 | remote_user = vagrant
12 |
13 | # set default inventory file
14 | inventory = hosts
15 |
16 | # don't ever ask me again (just vagrant)
17 | host_key_checking = False
18 |
19 | # one key to rule them all (actually bad idea)
20 | private_key_file=.ssh/id_rsa
21 |
22 | local_tmp = .tmp
23 |
24 | [ssh_connection]
25 |
26 | # ssh control path if the control
27 | control_path_dir = .cp
28 |
29 | # # enable SSH connection sharing to speed up remote execution
30 | # pipelining = True
31 | # control_path = ~/.ssh/ansible-%%h-%%p-%%r
32 | ssh_args = -o ControlMaster=auto -o ControlPersist=960s -o PreferredAuthentications=publickey
33 |
34 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/hosts:
--------------------------------------------------------------------------------
1 | [all:vars]
2 | ansible_connection=ssh
3 | ansible_host=127.0.0.1
4 |
5 |
6 |
7 | [ubuntu:vars]
8 | ansible_python_interpreter=/usr/bin/python3
9 |
10 | [ubuntu]
11 | ubuntu-lb ansible_port=2222
12 | ubuntu-app-1 ansible_port=2200
13 | ubuntu-app-2 ansible_port=2201
14 | ubuntu-db-master ansible_port=2202
15 | ubuntu-db-slave ansible_port=2203
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/inventory.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DIR=$(pwd | sed "s/\//\\\\\//g")
4 |
5 |
6 | echo "[all:vars]"
7 | echo "ansible_connection=ssh"
8 | echo "ansible_host=127.0.0.1"
9 | echo ""
10 |
11 | vagrant ssh-config | \
12 | grep -iE "Host|Port" | \
13 | grep -ivE "strict|user|name" | \
14 | sed "s/^ //" | \
15 | awk '{
16 | row= ( NR/2 == int(NR/2)) ? NR/2 : int(NR/2)+1;
17 | if ( array[row] == "" ) {
18 | array[row] = $2
19 | } else {
20 | array[row]= array[row] " " $2
21 | }
22 | }
23 | END {
24 | for( i=1; i<= row; i++){
25 | print( array[i] )
26 | }
27 | }
28 | ' | \
29 | awk '{
30 | split( $1, a, /-/)
31 | if ( groups[a[1]] == "" ){
32 |
33 | if ( a[1] == "ubuntu"){
34 | printf("\n\n[%s:vars]\n", a[1])
35 | print("ansible_python_interpreter=/usr/bin/python3\n")
36 | }
37 |
38 |
39 | groups[a[1]] = a[1]
40 | printf("[%s]", a[1])
41 | }
42 | printf "\n%-16s ansible_port=%d", $1, $2
43 | }'
44 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/files/apache.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIEVDCCAzygAwIBAgIJALYqe+iFzCoPMA0GCSqGSIb3DQEBBQUAMHkxCzAJBgNV
3 | BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
4 | aWRnaXRzIFB0eSBMdGQxEjAQBgNVBAMTCWxvY2FsaG9zdDEeMBwGCSqGSIb3DQEJ
5 | ARYPYnV0dXpvdkBtYWRlLnVhMB4XDTE4MDcyNjIxMzAxNFoXDTE5MDcyNjIxMzAx
6 | NFoweTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoT
7 | GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAxMJbG9jYWxob3N0MR4w
8 | HAYJKoZIhvcNAQkBFg9idXR1em92QG1hZGUudWEwggEiMA0GCSqGSIb3DQEBAQUA
9 | A4IBDwAwggEKAoIBAQDeRWo7lsj9pwTsEOMzu+ayAyPU2F8OK51FTROvgHi4ycpJ
10 | Vrx1RNK80ObTnKpUaOGk5sQHY9/Re8Z19hsWTyUL7SsIAFu5Iha66BloK58EpCKD
11 | VDtG5y0nTcBXfNJpfPGbd47R1w0B+elKSa2CfwQO8j8dkU5ARJful92st7VElD4X
12 | cl5ymxt3WCFoYzWUZ7ft5NfROWRltb3xhHVdoCnxL18j0X0v+dw6YX74jroS3qvK
13 | ym9BhOvLQGst9IuvAtbJjO8u6rwcul0cq2FiNNStddHykESFLSX4jf/wA5QqNr83
14 | kFPVf6UNX5yDffEew7aQFYRNlUsv3fXRW/MIdX/9AgMBAAGjgd4wgdswHQYDVR0O
15 | BBYEFNjwREFzaPL0FWJeoPNBrV4sNq5qMIGrBgNVHSMEgaMwgaCAFNjwREFzaPL0
16 | FWJeoPNBrV4sNq5qoX2kezB5MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1T
17 | dGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRIwEAYDVQQD
18 | Ewlsb2NhbGhvc3QxHjAcBgkqhkiG9w0BCQEWD2J1dHV6b3ZAbWFkZS51YYIJALYq
19 | e+iFzCoPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADFw3HwlzDU2
20 | N192SIWUCkV9qWMp3UEGolkPzfdjxnFejXeZMjonnMBt88kYmFEAO94KhVztYDTj
21 | U6RDEdK4sq2cXvzIv6qxbm1STGHawSuYkpGbZe885KD5TKyc6gAviaLXXipi0lSb
22 | /Db+USRK8PrlUvBYSvgX57gRkk+W1iOhJjzOIli4OEy+dGQ8lFXC+ILuZW77CLBI
23 | lzHGOBvTEviIsMWkeSJoYsi+xObk9Y42SKX25yFAUciMN0h3GEhHSEB2c5hzp6Su
24 | AkVfaJMLcqNMx5g4wBAaqx+f8Fx6tro/wlgTj6pq6otMzkD78KsEY2JNClXt2Lyg
25 | jd5ghYUYpwc=
26 | -----END CERTIFICATE-----
27 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/files/apache.key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEpAIBAAKCAQEA3kVqO5bI/acE7BDjM7vmsgMj1NhfDiudRU0Tr4B4uMnKSVa8
3 | dUTSvNDm05yqVGjhpObEB2Pf0XvGdfYbFk8lC+0rCABbuSIWuugZaCufBKQig1Q7
4 | RuctJ03AV3zSaXzxm3eO0dcNAfnpSkmtgn8EDvI/HZFOQESX7pfdrLe1RJQ+F3Je
5 | cpsbd1ghaGM1lGe37eTX0TlkZbW98YR1XaAp8S9fI9F9L/ncOmF++I66Et6ryspv
6 | QYTry0BrLfSLrwLWyYzvLuq8HLpdHKthYjTUrXXR8pBEhS0l+I3/8AOUKja/N5BT
7 | 1X+lDV+cg33xHsO2kBWETZVLL9310VvzCHV//QIDAQABAoIBAQDHKGdxkT/yl46/
8 | fro1hq3E3y9h7aXuaoH2sINtA64n1Rdd80vVv58b9Ap/naa5D2jvep43CPRpHYDL
9 | /YJz0OKe9xlRiBCnp7W6L3Fkd4F6hR8vkW+zrjw1NWOcgImAX6WU6eGcgBZ1qNmx
10 | 6KaBgRG/2qtW8PWufEeuT4Ee21SeZpJU4DddK0yfrHTlU1PKMZdYURFyCADhxElx
11 | t5kguE45ObbAlB3lXX6L0R4uuh3fadf8u3sZQmcgD4WBslQ8kxk7MshHihS0p/7T
12 | fOSUr22vj/4dQ1FXVJsVecl2c+aSegHjDN21MSxBIrBtaW5kSz+uNV+fSqi/yGs5
13 | aoOxdKQBAoGBAPNKBGzJpuX0LhJC7bzzIQwfzLuH1sveTy90Nv9xHuQm1m9tQS3g
14 | wHr+LLe4gepkm81xSXgIaR4NJLm+nvqQtjLNUvVndAm4T765NG0cp38gTeOvVa8S
15 | u1s/8acwgOuHnPqNucOZZFGhYgpA5n9soJwhhRKEsZ36fSzgs8j40fp9AoGBAOni
16 | SHWx+asGAaBfK1ZFMma96yW7uqm3FHqZ7m/PdtujVrIYJvkQGdNy8iFB8xHr3wm9
17 | 5+vFkOGjMI0LhPAQmspN3yAfxv+LPUXvn+YGz00HT66pD6PLuBVCitN1fQoItPhW
18 | LPLNqtx749NtBOLwp35Pm2V2OQtstD1AvhRDexOBAoGAGCySo/yliNPqgAYGeW/v
19 | KLVAMqIcxcUhOSXjER68Yw8433mLsHNUrOP0rYy33AKDOLa5BiRPzZyguZQnV95p
20 | PEMKqzfMX+rggaMFgpIGUqtzEdyqNus1kquX53T6WmzvPDY47yaewT238Cziumag
21 | zuLFxkFJJNlu9radJqsSskECgYByZUqZvpZmj0aGYWY6CRhw5l24lAzl6/VpThcw
22 | uyHgphDGhUeytKxdHzDNEYvJbTwncKqaSITnyED8C/FW1lhPsSz8cm6MGVe7+EuW
23 | YChO/c98l5ZdwEvmoihy61lgagfSG5xamjwDIdm6VO157smN33QbeWrHasDx4NPX
24 | 53WwgQKBgQDrsi+Jb81jLjVXzNH+s724ZdkD9K70JAsV280Hr++raXjWbxnuVbj3
25 | RMKIedNmVLLuiQmASzWTUtr3l6Q5YKqz16LEsFbB63s2i3Sl7iM7Dk/gCnfkrX1E
26 | U8jRdhvZ9TjK9MB3Ll7BOcfglIGossY1O/59V7l1vFWtXdFyM+ml2g==
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/files/made.com.ua.dh.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN DH PARAMETERS-----
2 | MIICCAKCAgEAzu56LoEx/VBt1ksIXsCOEn2q6vfvrwtBoHLJttWRGosS4+S/ZZXp
3 | shih9Gfv0q+lmiyRapBQzj2pB1UcIkjfDnLl8YVX51jyUROPFlriRyiWDtcEp8p9
4 | mH+vtT+mU0hQ9m+xSy3rRFY7It5nogrigGhYoNfhb7orYdjf1YHbmsNhyFVvauAu
5 | beSJiqGjhmBQ9uMBs2Rh5CQqady/HjSOItzKM1pN2V168nTeVPYhwLthQ1Rk4i7f
6 | KkGRdOTEwaNAthjmD0S1aYwEraoqS17EKQQVbpAQsDOInkNYGIZfXt3SLUMbVYfD
7 | jcpOY40fBxGuk2+fYyoAv+6BjBFlxtqIDd3V5EacvhTVM8Rzt7KYPMJ54R+G63M3
8 | fghJH9WxHIA+NGLhYa9siimchwn0HXtrDT+mrQc5HcNasWI+T5Sqz2WZX0PFqPxO
9 | WbRq+ZG01evoHzTF85uUq04xgKDPCKaG3Ypo7t01r5NeyIFR+yv4T630/b7yzFrX
10 | ZcWcKS4gFcYPeQiWryV5+ZvN7eRqHIrL3XjzFiF5Df56mtDGXYmsOhgZkCZqcf1E
11 | aw3B1lxjAk4SBVY2C4PFHzkrCnE11pYVkzootWh5MB8vjO6QlwQ3jICHO909xU1e
12 | BgxRHbeYE02o0gC5lHof4xwBHmlZmByCZd/p3e2VLA2DZW9KXmsrnfMCAQI=
13 | -----END DH PARAMETERS-----
14 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/files/made.com.ua.key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEpAIBAAKCAQEAv/7dlSKl5nT8Vl37tWp8GSKUqjdJY18prMarbeaN6bPOTMjO
3 | h/09pcgSVNR7MXwLupq3vk7mfSQKsWLZdgVEeR3lrTyCAMnT80bcl+7RNzSxP2uv
4 | a5qbBH1spWEGGDR7ibo7iGlQ+8KTW0abT5gXgOlHXAi4RvmqxbP1QD7vNm6tOEvk
5 | jFQPFjTv2kYtUoEmXMKyCa5UmQ21wNSMBumkc8+CVN77mDQsZ1kqM14jHtZEgypX
6 | 8hjPHWM4Xa9VHQhULU14w5AxDMLMG1aDZZbITqTraaDfJFgup6etpNubwwOakO0U
7 | a7hxXX8kTbeKE3a7zPriIbFE9QaE5RWnKcZi3wIDAQABAoIBAFlUH4xv3nhnHbCh
8 | cadqr+qR6FF5/684a49zr/cmOgjuG6ImG9GxxakIeV72rYrMiDh+G9dzplG7Hy6B
9 | K2R59u3TNzVu3Z24SeD6q1AVUUo6Pe2gELIj8NQZ9x7LEabZZFEkUOaa+Y7pbN4e
10 | 78wMm4LjBsyPUg3Tw9zQad9GaM4NyRwox4/Zx3WVqA0XpX4A3JFEe/9MpPuIWLiC
11 | yKcEvcR08lIx0AKHZI7p6l2i+/GUem+j9zPnsXKG4DoJ2UIM02P0Utx0pNFptG9l
12 | NXTjBYdHRZuYifkwk0U8LeWx6AgN/nbG9/M/W2NPruo6JhY6laVZ+zu5oZ8OeV6m
13 | EUEjXWECgYEA9n7VUygxo7ggZUPHTW73d2r/+LvXtoKY57OSBdhMwvOON/1LwrlZ
14 | Pc3nJBWxiYjPJPj+Fz7vh5wDeVScj/A4xm3Qid7KdNeKBkNsxrcKGHU8uFIWv2Ar
15 | WP8FmwkemRF/afR3hAhR7YIu/oRZt3LSshiXdHJyDVVd2VCTHKCgQgMCgYEAx2YP
16 | zr3BTevBelgrO3Rkr/eyKDIPZz7yGW//LmppZGVVJeDtPRDDkPOpORB/np5i531K
17 | YtIiNzL9U38QqRE40q+9fSSVXZaoQGCDXsRw/p1ayu2k4FB2NWvJPL0e/XAlzTvd
18 | HBtWio0BnONRIDvR+tyXAaKUPLBZPm1kFSNBEvUCgYEAwmNlkg25j9Yt3g4kh2FC
19 | UO+V668fP3QgZ2EKgAeDCZ8PAgKEE40d3oqZhLCC8OZuDUc7AlQEi9oyNyXNzglw
20 | jLOfaIrE+H3eAHB9wPwqPv19pBJAlC2gIbAD2K7JbYcKByHeesYg7f/jVZ5ELlKj
21 | 7AVOs2tpnDA9MZrEWSgnGccCgYEArIxdUYJTLKK+oqh5gNGP79ZgjPgE7EtEJU8N
22 | e5UJ7p7huA2UPiurkFvKLF9ikFhvLfL1094TTcEKBp8GQ8drbKY3S3vE5V/h6ROb
23 | mv5S6YIxdzl+W75FDKVhVvYxQw8xJxCOnivqD6tFogXzxfj+FB+fbQLLVFkmYSc4
24 | XDJNtaECgYBtqH5FMY9gYdEWw7IXF4onnMetj79QGikoXWLkpNbz2T08erloBdrb
25 | 49ZS5aKeUuLVp+M5w7KUeRqbpiRcgEBMUIfV+Sf9DzYwHGTp+42sO+Y1lQSkXKF2
26 | P4yvZ+gX6jDEu6XDXe3WqgY7caBpVXtI1Z816tVn4lyOufBwWbvfPQ==
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/play.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Startup"
3 | hosts: all
4 | become: True
5 | roles: [ 'ip' ]
6 |
7 | vars:
8 | environments: [ staging, production ]
9 | env: staging
10 |
11 | vars_files:
12 | - vars/defaults.yml
13 |
14 | # Preloading Variables for Deployment.
15 | pre_tasks:
16 | - name: Checking incoming env var
17 | fail: { msg : "Allowed env value is: {{ environments | join( ' or ' ) }} " }
18 | when: env not in [ 'staging', 'production' ]
19 |
20 | - name: Retriving Deployment Settings
21 | include_vars: { file: "{{ item }}", name: env_settings }
22 | with_first_found:
23 | - vars/deployment-{{env}}.yml
24 | ignore_errors: True
25 |
26 | - debug: { var : settings }
27 |
28 | - set_fact: { settings : "{{ settings | combine(env_settings) }}" }
29 |
30 | # - debug: { var : settings }
31 | # Extra Roles
32 | - name: "Database"
33 | hosts: "*-db-*"
34 | roles: [ 'db' ]
35 | become: True
36 |
37 | - name: "Backend"
38 | hosts: "*-app-*"
39 | roles: [ 'app' ]
40 | become: True
41 |
42 | - name: "Load Balancers"
43 | hosts: "*-lb"
44 | roles: [ 'lb' ]
45 | become: True
46 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for app
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # enable nginx as service
3 | - name: apache_service_cluster_start
4 | service: { name: "{{ apache.servicename }}", enabled: yes }
5 | notify: [ 'apache start' ]
6 |
7 | - name: apache start
8 | service: { name: "{{ apache.servicename }}", state: started }
9 |
10 | # restrts nginx with new configuration
11 | - name: apache restart
12 | service: { name : "{{ apache.servicename }}", state : restarted }
13 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/tasks/deployment.yml:
--------------------------------------------------------------------------------
1 | # Cetting facts from loadbalancer again
2 | - name: Group Fact
3 | set_fact:
4 | group : "{{ ansible_distribution | lower }}"
5 |
6 | - name: Host Facts
7 | set_fact:
8 | lb : "{{group}}-lb"
9 | db_master : "{{group}}-db-master"
10 | db_slave : "{{group}}-db-slave"
11 |
12 | - setup:
13 | delegate_to: "{{ item }}"
14 | delegate_facts: True
15 | with_list: [ "{{lb}}", "{{db_master}}", "{{db_slave}}" ]
16 | # "{{ groups[group] }}"
17 |
18 | # - debug : { msg : "{{hostvars[ db_master ].db_settings.users[0] }}" }
19 | # - debug : { msg : "{{hostvars[ db_slave ].db_settings.users[0] }}" }
20 | # with_items: "{{ groups[group] }}"
21 |
22 | # - debug : { var : lb }
23 | # - debug : { var : db_master }
24 | # - debug : { var : db_slave }
25 |
26 | # - debug : { msg : "{{ hostvars[ db_master ].db_settings }}" }
27 | # - debug : { msg : "{{ hostvars[ db_slave ].db_settings }}" }
28 |
29 | # Adding load balancer to hosts file
30 | - name: /etc/hosts
31 | lineinfile:
32 | dest: '/etc/hosts'
33 | line: "{{ hostvars[lb].ansible_local.ip }} {{ settings.domain }}"
34 |
35 | # Cleanup Procedure
36 | - name: Clean Up Local Directories
37 | file: { path: "{{ item.dir }}", state: "{{ item.state }}" }
38 | with_items:
39 | - { dir : "{{ apache.html_root }}", state: 'absent' }
40 | - { dir : "{{ apache.cert_root }}", state: 'absent' }
41 | - { dir : "{{ apache.html_root }}", state: 'directory' }
42 | - { dir : "{{ apache.cert_root }}", state: 'directory' }
43 |
44 |
45 | # Copying Certificates
46 | - name: Coping Certificates
47 | copy:
48 | src: "{{playbook_dir}}/{{ item.value }}"
49 | dest: "{{ apache.cert_root }}/{{ item.value | basename }}"
50 | with_dict: "{{ settings.localhost_certs }}"
51 |
52 |
53 | # - debug: { var : apache }
54 | # - debug: { var : settings.localhost_certs }
55 |
56 | # Creating Apache Configuration
57 | - name: application deployment
58 | template: { src: "{{ item.src }}", dest: "{{ item.dest }}"}
59 | notify: [ "apache restart" ]
60 | with_items:
61 | - { src: 'httpd.conf.j2', dest: "{{ apache.site_conf_root }}httpd.conf" }
62 | - { src: 'index.php.j2', dest: "{{ apache.html_root }}index.php" }
63 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/tasks/install-centos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install lamp
3 | yum: { name : "{{ item }}", state : latest }
4 | with_items:
5 | - httpd
6 | - mod_ssl
7 | - openssl
8 | - httpd-tools
9 | - php
10 | - php-xdebug
11 | - php-mysql
12 | notify:
13 | - apache_service_cluster_start
14 |
15 | - name: SElinux
16 | selinux: { policy: targeted, state: permissive }
17 | with_items:
18 | # - 'httpd_can_network_connect'
19 | - 'httpd_can_network_connect_db'
20 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/tasks/install-ubuntu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install lamp
3 | apt: { name : "{{ item }}", state : latest }
4 | with_items:
5 | - apache2
6 | - php
7 | - libapache2-mod-php
8 | - php-mysql
9 | - php-xdebug
10 | notify:
11 | - apache_service_cluster_start
12 |
13 | - name: enable ssl
14 | shell: "a2enmod ssl"
15 |
16 |
17 | - name: phpini debug
18 | lineinfile:
19 | dest: "/etc/php/7.0/apache2/php.ini"
20 | line: "{{item.line}}"
21 | regexp: "{{item.regexp}}"
22 | with_items:
23 | - { line: "display_errors = On", regexp : '^display_errors(.*)' }
24 | notify:
25 | - apache_service_cluster_start
26 |
27 | # used for debug proposes
28 | - name: Install MySQL
29 | apt: { name : "{{ item }}", state: present }
30 | with_items:
31 | - "mysql-client-core-5.7"
32 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Failing if OS isn't CentOS or Ubuntu
3 | - name: Fail Fast (OS Prerequisite Check)
4 | fail: { msg : "{{ ansible_distribution }} isn't supported atm" }
5 | when: ansible_distribution not in [ 'CentOS', 'Ubuntu' ]
6 |
7 | # Get Deployment Settings
8 | - name: Get Settings
9 | include_tasks: "settings.yml"
10 |
11 | # Actual Web App Environment
12 | - name: Actual Apache and PHP install
13 | include_tasks: "install-{{ ansible_distribution | lower }}.yml"
14 |
15 | # Gathering and Creating
16 | # web application configuration
17 |
18 | - name: Get Settings
19 | include_tasks: "deployment.yml"
20 |
21 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/tasks/settings.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Load OS Specific Apache Configuration
4 | include_vars: { file: "{{ item }}", name: os_apache_settings }
5 | with_first_found:
6 | - "{{ ansible_distribution | lower }}.yml"
7 |
8 | - name: Merging OS Specific nginx Settings with Defaults
9 | set_fact: { apache : "{{ apache | combine(os_apache_settings) }}" }
10 |
11 | # - debug : { var : settings }
12 | # - debug : { var : apache }
13 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/templates/httpd.conf.j2:
--------------------------------------------------------------------------------
1 |
2 | ServerName localhost
3 |
4 |
5 | SSLEngine on
6 | SSLCertificateFile "{{ apache.cert_root }}/{{ settings.localhost_certs.ssl_certificate | basename }}"
7 | SSLCertificateKeyFile "{{ apache.cert_root }}/{{ settings.localhost_certs.ssl_certificate_key | basename }}"
8 |
9 |
10 | DocumentRoot {{ apache.html_root }}
11 | DirectoryIndex index.php
12 |
13 |
14 | Options +Indexes +FollowSymLinks
15 | AllowOverride All
16 | Require all granted
17 |
18 |
19 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/templates/index.php.j2:
--------------------------------------------------------------------------------
1 |
6 |
7 |
8 |
9 | array(
13 | 'DB_USER' => '{{ hostvars[db_master].db_settings.users[0].username }}',
14 | 'DB_PASS' => '{{ hostvars[db_master].db_settings.users[0].password }}',
15 | 'DB_NAME' => '{{ hostvars[db_master].db_settings.name }}',
16 | 'DB_HOST' => '{{ hostvars[db_master].ansible_local.ip }}',
17 | 'DB_PORT' => 3306,
18 | ),
19 | 'slave' => array(
20 | 'DB_USER' => '{{ hostvars[db_slave].db_settings.users[0].username }}',
21 | 'DB_PASS' => '{{ hostvars[db_slave].db_settings.users[0].password }}',
22 | 'DB_NAME' => '{{ hostvars[db_slave].db_settings.name }}',
23 | 'DB_HOST' => '{{ hostvars[db_slave].ansible_local.ip }}',
24 | 'DB_PORT' => 3306,
25 | ),
26 | );
27 |
28 | var_dump( $db );
29 |
30 | $links = array();
31 | foreach( array_keys( $db ) as $server ){
32 | $links[ $server ] = new mysqli(
33 | $db[ $server ]['DB_HOST'],
34 | $db[ $server ]['DB_USER'],
35 | $db[ $server ]['DB_PASS'],
36 | $db[ $server ]['DB_NAME'],
37 | $db[ $server ]['DB_PORT']
38 | );
39 | if ( $links[ $server ]->connect_errno ) {
40 | echo "Failed to connect to MySQL: (" . $links[ $server ]->connect_errno . ") " . $links[ $server ]->connect_error;
41 | }
42 | }
43 |
44 | $ip = $_SERVER['SERVER_ADDR'];
45 | # running updates
46 | $query = "INSERT INTO visits (ip) VALUES ('{$ip}') ON DUPLICATE KEY UPDATE visits = visits+1";
47 |
48 | $res = $links['master']->query($query);
49 |
50 | array_map( 'results', array_keys( $db ) );
51 |
52 | function results( $type ){
53 | global $links;
54 |
55 | $title = sprintf("Query to %s
", $type);
56 | $header = 'IP | Count |
';
57 | $rows = array();
58 |
59 | $res = $links[ $type ]->query("SELECT * FROM `visits`");
60 | while ($row = $res->fetch_assoc()) {
61 | $rows[] = sprintf('%s | %d |
', $row['ip'], $row['visits']);
62 | }
63 |
64 | echo sprintf( '%s ', $title, $header, implode("", $rows));
65 | }
66 |
67 |
68 | ?>
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/vars/centos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # this is conf default for CentOS
3 | servicename: httpd
4 |
5 | site_conf_root: /etc/httpd/conf.d/
6 |
7 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apache:
3 | # this is conf default for CentOS
4 | servicename: httpd
5 |
6 | site_conf_root: /etc/httpd/conf.d/
7 |
8 |
9 | # this is conf default for CentOS and Ubuntu
10 | html_root: /var/www/html/
11 | cert_root: /var/www/certs/
12 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/app/vars/ubuntu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # this is conf default for Ubuntu
3 | servicename: apache2
4 |
5 | site_conf_root: /etc/apache2/sites-enabled/
6 |
7 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for sample
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/files/tables.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS `visits` (
2 | `ip` varchar(15) NOT NULL,
3 | `visits` int(11) NOT NULL DEFAULT '1',
4 | UNIQUE KEY `ip` (`ip`)
5 | ) DEFAULT CHARSET=utf8;
6 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # enable nginx as service
3 | - name: mysql_service_cluster_start
4 | service: { name: "{{ db_settings.service}}", enabled: yes }
5 | notify: [ 'mysql start' ]
6 |
7 | - name: mysql start
8 | service: { name: "{{ db_settings.service}}", state: started }
9 |
10 | # restrts nginx with new configuration
11 | - name: mysql restart
12 | service: { name : "{{ db_settings.service}}", state : restarted }
13 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Including IP mapping for quicker IP Access on deployment servers
3 | dependencies: [ 'ip' ]
4 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tasks/create-table-and-users.yml:
--------------------------------------------------------------------------------
1 | - name: create db for app
2 | mysql_db: { name: "{{settings.db.name}}", state: present }
3 | register: dbname
4 | - debug : { var : dbname }
5 |
6 | - name: restore database (part 1 - copy sql dump)
7 | copy: { src: "tables.sql" , dest: "/tmp/tables.sql" }
8 | when: dbname.changed == True
9 |
10 | - name: restore database (part 2 - import sql dump)
11 | mysql_db: { name: "{{settings.db.name}}", state: import, target: "/tmp/tables.sql" }
12 | when: dbname.changed == True
13 |
14 | - mysql_user:
15 | name : "{{ item.username }}"
16 | host : "%"
17 | priv: '*.*:ALL'
18 | password : "{{ item.password }}"
19 | state: present
20 | with_items: "{{ db_settings.users | default([]) }}"
21 | when: db_settings.users is defined
22 | notify: [ 'mysql_service_cluster_start' ]
23 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tasks/install-centos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install MySQL
3 | yum: { name : "{{ item }}", state: present }
4 | with_items:
5 | - "http://dev.mysql.com/get/mysql57-community-release-el7-8.noarch.rpm"
6 |
7 | - name: Install MySQL
8 | yum: { name : "{{ item }}", state: present }
9 | with_items:
10 | - mysql-community-server
11 | - mysql-community-client
12 | - MySQL-python
13 | register: mysql_install
14 | notify: [ 'mysql_service_cluster_start' ]
15 |
16 | - name: Initial MySQL Start
17 | service: { name: 'mysqld', state: 'started' }
18 | when: mysql_install.changed == True
19 |
20 | # SET GLOBAL validate_password_policy=LOW
21 | - name: Checker
22 | shell: |
23 | TMP_PASS=$(cat {{db_settings.logs_path}}mysqld.log | \
24 | grep "temporary password" | \
25 | grep -Eoi ": (.*?)" | \
26 | sed "s/: //");
27 | mysql -uroot -p$TMP_PASS --connect-expired-password -e "SET GLOBAL validate_password_policy=LOW; ALTER USER 'root'@'localhost' IDENTIFIED BY '{{ settings.db.root_password }}'; uninstall plugin validate_password;";
28 | when: mysql_install.changed == True
29 | notify: [ 'mysql restart' ]
30 |
31 | - name: /root/.my.cnf
32 | template: { src: 'my.cnf.j2', dest: '/root/.my.cnf' }
33 | when: mysql_install.changed == True
34 | notify: [ 'mysql restart' ]
35 |
36 | - name: SElinux
37 | selinux: { policy: targeted, state: permissive }
38 | with_items:
39 | - 'httpd_can_network_connect'
40 | - 'httpd_can_network_connect_db'
41 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tasks/install-ubuntu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # - name: UN Install MySQL
3 | # apt: { name : "{{ item }}", state: absent }
4 | # with_items:
5 | # - mysql-server-5.7
6 |
7 | - name: Install MySQL
8 | apt: { name : "{{ item }}", state: present }
9 | with_items:
10 | - mysql-server-5.7
11 | - python3-mysqldb
12 | register: mysql_install
13 | notify: [ 'mysql_service_cluster_start' ]
14 |
15 | - name: Update MySQL root password for all root accounts
16 | mysql_user:
17 | name : root
18 | host : "{{ item }}"
19 | password : "{{ db_settings.root_password }}"
20 | state: present
21 | with_items:
22 | - 127.0.0.1
23 | - ::1
24 | - localhost
25 | when: mysql_install.changed == True
26 | notify: [ 'mysql_service_cluster_start' ]
27 |
28 | # password update
29 | - name: /root/.my.cnf
30 | template: { src: 'my.cnf.j2', dest: '/root/.my.cnf' }
31 | when: mysql_install.changed == True
32 |
33 | - name: Ensure Anonymous user(s) are not in the database
34 | mysql_user: { name : "", host: "{{ item }}", state: absent }
35 | with_items:
36 | - localhost
37 | - 127.0.0.1
38 | - ::1
39 | when: mysql_install.changed == True
40 | notify: [ 'mysql_service_cluster_start' ]
41 |
42 |
43 | - name: Disable Some minor configuration
44 | lineinfile:
45 | dest: "{{db_settings.conf_path}}/mysql.conf.d/mysqld.cnf"
46 | line: "{{item.line}}"
47 | regexp: "{{item.regexp}}"
48 | state: absent
49 | with_items:
50 | - { line: "bind-address = 0.0.0.0", regexp : '^bind-address(.*)' }
51 | notify: [ 'mysql restart' ]
52 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Failing if OS isn't CentOS or Ubuntu
3 | - name: Fail Fast (OS Prerequisite Check)
4 | fail: { msg : " {{ansible_os_family}} {{ ansible_distribution }} isn't supported atm" }
5 | when: ansible_distribution not in [ 'CentOS', 'Ubuntu' ]
6 |
7 | # Get DB Settings
8 | - name: DB Settings
9 | include_tasks: settings.yml
10 |
11 | # Install MYSQL
12 | - name: MySQL Installation Centos
13 | include_tasks: "install-{{ ansible_distribution | lower }}.yml"
14 |
15 | # Get DB Settings
16 | - name: Tables and Users
17 | include_tasks: create-table-and-users.yml
18 |
19 | # Get DB Settings
20 | - name: Replication
21 | include_tasks: replication.yml
22 |
23 | # - debug: { var : db_settings }
24 |
25 | # - debug: { msg : "slave {{ settings.db.root_password }}" }
26 | # - debug: { msg : "slave {{ db_settings.root_password }}" }
27 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tasks/replication-master.yml:
--------------------------------------------------------------------------------
1 | - name: GENERATE_MASTER_CONFIG
2 | lineinfile:
3 | dest: "{{db_settings.my_config}}"
4 | line: "{{item.line}}"
5 | regexp: "{{item.regexp}}"
6 | with_items:
7 | - { line: "server-id=1", regexp : '^server-id(.*)' }
8 | - { line: "log-bin=mysql-bin", regexp : '^log-bin' }
9 | - { line: "binlog-format=row", regexp : '^binlog-format=' }
10 | - { line: "bind-address={{ ansible_facts.ansible_local.ip}}", regexp : '^bind-address(.*)' }
11 | - { line: "binlog_do_db='{{db_settings.name}}'", regexp : '^binlog\_do\_db(.*)' }
12 |
13 | - name: mysql restart
14 | service: { name : "{{ db_settings.service}}", state : restarted }
15 |
16 |
17 | - name: Master Data
18 | mysql_replication:
19 | mode: getmaster
20 | register: master_status
21 |
22 | - debug : { var : master_status }
23 |
24 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tasks/replication-slave.yml:
--------------------------------------------------------------------------------
1 | # This thing will work only once on Ubuntu.
2 | # no idea whats to write for CentOS
3 |
4 | - name: GENERATE_SLAVE_CONFIG
5 | lineinfile:
6 | dest: "{{db_settings.my_config}}"
7 | line: "{{item.line}}"
8 | regexp: "{{item.regexp}}"
9 | with_items:
10 | - { line: "server-id=2", regexp : '^server-id(.*)' }
11 | - { line: "bind-address=0.0.0.0", regexp : '^bind-address(.*)' }
12 | - { line: "relay-log={{ db_settings.logs_path }}mysql-relay-bin.log", regexp : '^relay-log(.*)' }
13 | - { line: "log-bin=mysql-bin", regexp : '^log-bin' }
14 | - { line: "replicate_do_db='{{db_settings.name}}'", regexp : '^replicate\_do\_db(.*)' }
15 |
16 | - name: MYSQL_VARIABLES | Set read only
17 | mysql_variables: { variable: read_only, value: 'ON' }
18 |
19 | - name: mysql restart
20 | service: { name : "{{ db_settings.service}}", state : restarted }
21 |
22 | - mysql_replication:
23 | mode: stopslave
24 | register: slave_status
25 |
26 | - name: Slave Status
27 | debug : { var : slave_status }
28 |
29 | - mysql_replication:
30 | mode: getslave
31 | register: slave_status
32 | - debug : { var : slave_status }
33 |
34 | - name: Set MySQL Replication Master
35 | mysql_replication:
36 | mode: changemaster
37 | master_host: '{{ hostvars[master].ansible_local.ip}}'
38 | master_user: '{{ db_settings.users[0].username }}'
39 | master_password: '{{ db_settings.users[0].password }}'
40 | master_log_file: '{{ hostvars[master].master_status.File }}'
41 | master_log_pos: '{{ hostvars[master].master_status.Position }}'
42 | register: changedmaster
43 | when: slave_status.Is_Slave == False
44 |
45 | - mysql_replication:
46 | mode: startslave
47 | register: slave_status
48 | when: changedmaster.changed == True
49 |
50 | - name: Slave Status
51 | debug : { var : slave_status }
52 |
53 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tasks/replication.yml:
--------------------------------------------------------------------------------
1 | # where am i?
2 | - set_fact:
3 | slave : "{{ group_names[0] }}-db-slave"
4 | master : "{{ group_names[0] }}-db-master"
5 |
6 | - name: get facts
7 | setup:
8 | delegate_to: "{{ master if inventory_hostname.find('slave') > -1 else slave }}"
9 | delegate_facts: True
10 |
11 | - name: Running Master Configuration
12 | include_tasks: replication-master.yml
13 | when: inventory_hostname.find('master') > -1
14 |
15 | - name: Running Slave Configuration
16 | include_tasks: replication-slave.yml
17 | when: inventory_hostname.find('slave') > -1
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 | # - name: Master Data
35 | # mysql_replication:
36 | # mode: startslave
37 | # register: slave_status
38 | # when: inventory_hostname.find('slave') > -1
39 |
40 | # - debug : { var : slave_status }
41 | # when: inventory_hostname.find('slave') > -1
42 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tasks/settings.yml:
--------------------------------------------------------------------------------
1 | # Loading Configuration of out environment
2 | - name: Load OS Specific MySQL Configuration
3 | include_vars: { file: "{{ item }}", name: os_db_settings }
4 | with_first_found:
5 | - "{{ ansible_distribution | lower }}.yml"
6 |
7 | - name: Merging OS Specific DB Settings with Defaults
8 | set_fact: { db_settings : "{{ db_settings | combine(os_db_settings) }}" }
9 |
10 | - name: Merging Custom DB Settings with Defaults
11 | set_fact: { db_settings : "{{ db_settings | combine(settings.db) }}" }
12 | when: settings.db is defined
13 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/templates/my.cnf.j2:
--------------------------------------------------------------------------------
1 | [client]
2 | user="root"
3 | password="{{ settings.db.root_password }}"
4 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - sample
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/vars/centos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | my_config: /etc/my.cnf
3 | logs_path: /var/log/
4 | service: mysqld
5 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | db_settings:
3 | root_password : "Yo_Soy_Groot"
4 |
5 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/db/vars/ubuntu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | my_config: /etc/mysql/mysql.conf.d/mysqld.cnf
3 | conf_path: /etc/mysql/
4 | logs_path: /var/log/mysql/
5 |
6 | service: mysql
7 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/epel/README.md:
--------------------------------------------------------------------------------
1 | epel
2 | =========
3 |
4 |
5 | Installs epel-latest for centos/7
6 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/epel/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install
3 | yum: { name: 'epel-release', state: latest }
4 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/ip/readme.md:
--------------------------------------------------------------------------------
1 | # Readme
2 |
3 | Allow us to use single ip in order to track server locally
4 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/ip/tasks/generate_ip_fact.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create custom fact directory
3 | file: { path: "/etc/ansible/facts.d", state: "directory" }
4 |
5 | - name: Store Fact Localy
6 | copy:
7 | content: "\"{{ansible_facts.all_ipv4_addresses|select('match', '^172')|list|join(',')}}\""
8 | dest: /etc/ansible/facts.d/ip.fact
9 | mode: 0644
10 |
11 | - name: Refreshing Facts for "{{ inventory_hostname }}"
12 | setup :
13 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/ip/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate IP Fact (IP Fact Not Exists)
3 | include_tasks: generate_ip_fact.yml
4 | when: ansible_facts.ansible_local.ip is not defined
5 |
6 | - name: Generate IP Fact (IP Changed)
7 | include_tasks: generate_ip_fact.yml
8 | when: ansible_facts.ansible_local.ip not in ansible_facts.all_ipv4_addresses
9 |
10 | # - name: Generate IP Fact
11 | # debug: { msg : "{{ inventory_hostname }} - {{ ansible_facts.ansible_local.ip }}" }
12 | # when: ansible_facts.ansible_local.ip is defined
13 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/README.md:
--------------------------------------------------------------------------------
1 | Nginx
2 | =========
3 |
4 | Allow to setup loadbalancer and setup domain and upstream servers (Works for Ubuntu 16 / Centos 7)
5 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # enable and start nginx
3 | - name: nginx service cluster
4 | service: { name: nginx, enabled: yes }
5 | notify:
6 | - nginx start
7 |
8 | # enable nginx as start
9 | - name: nginx start
10 | service: { name: nginx, state: started }
11 |
12 |
13 | # restrts nginx with new configuration
14 | - name: restart nginx
15 | service: { name : nginx, state : restarted }
16 |
17 |
18 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/meta/main.yml:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - { role: ip }
3 | - { role: epel, when: ansible_distribution == "CentOS" }
4 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/tasks/configuration.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Deleting and Creating Directories - in this way we can ensure that
3 | # ensure that directory is empty.
4 | - name: Clean Up Local Directories
5 | file: { path: "{{ item.dir }}", state: "{{ item.state }}" }
6 | with_items:
7 | - { dir : "{{ nginx.html_root }}", state: 'absent' }
8 | - { dir : "{{ nginx.cert_root }}", state: 'absent' }
9 | - { dir : "{{ nginx.html_root }}", state: 'directory' }
10 | - { dir : "{{ nginx.cert_root }}", state: 'directory' }
11 |
12 | # - debug : { var : settings.nginx_certs }
13 |
14 | # Copying
15 | - name: Coping Certificates
16 | copy:
17 | src: "{{playbook_dir}}/{{ item.value }}"
18 | dest: "{{ nginx.cert_root }}/{{ item.value | basename }}"
19 | with_dict: "{{ settings.nginx_certs }}"
20 |
21 | # pushing and configuring different os separatly
22 | - name: facts
23 | set_fact: { group : "{{ ansible_distribution | lower }}" }
24 |
25 | - setup:
26 | delegate_to: "{{ item }}"
27 | delegate_facts: True
28 | with_items: "{{ groups[group] }}"
29 |
30 | # - debug : { var : "{{ hostvars['centos-lb'] }}" }
31 |
32 | # Creating Nginx Configuration
33 | - name: configuring nginx
34 | template: { src : 'nginx.conf.j2', dest: "{{ nginx.configuration }}" }
35 | notify:
36 | - restart nginx
37 |
38 | - name: /etc/hosts
39 | lineinfile: { dest: /etc/hosts, line: "127.0.0.1 {{ settings.domain }}" }
40 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/tasks/install-centos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Installing latest epel release of nginx
3 | - name: Nginx Installation (yum)
4 | yum: { name: nginx, state: present, enablerepo: epel }
5 | notify:
6 | - nginx service cluster
7 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/tasks/install-ubuntu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Installing latest package of nginx
3 | - name: Nginx Installation (apt-get)
4 | apt: { name: nginx, state: present }
5 | notify:
6 | - nginx service cluster
7 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Failing if OS isn't CentOS or Ubuntu
3 | - name: Fail Fast (OS Prerequisite Check)
4 | fail: { msg : " {{ansible_os_family}} {{ ansible_distribution }} isn't supported atm" }
5 | when: ansible_distribution not in [ 'CentOS', 'Ubuntu' ]
6 |
7 | - name: Load OS Specific Nginx Configuration
8 | include_vars: { file: "{{ item }}", name: os_nginx_settings }
9 | with_first_found:
10 | - "{{ ansible_distribution | lower }}.yml"
11 |
12 | - name: Merging OS Specific nginx Settings with Defaults
13 | set_fact: { nginx : "{{ nginx | combine(os_nginx_settings) }}" }
14 |
15 | # - debug: { var : nginx }
16 | # - debug: { var : settings }
17 |
18 | # Install nginx
19 | - name: Nginx Installation
20 | include_tasks: "install-{{ ansible_distribution | lower }}.yml"
21 |
22 | # - debug : { var : settings }
23 | # - debug : { var : nginx }
24 |
25 | - name: Configurating
26 | include_tasks: "configuration.yml"
27 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/templates/nginx-ssl.conf.j2:
--------------------------------------------------------------------------------
1 |
2 | {% for item in settings.nginx_certs %}
3 | {{item}} {{ nginx.cert_root }}/{{settings.nginx_certs[item] | basename }};
4 | {% endfor %}
5 |
6 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
7 | ssl_prefer_server_ciphers on;
8 | ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
9 |
10 | # Enable SSL on all domains - you may also want to enable this on a per-site
11 | # basis instead if you are supporting multiple virtual hosts.
12 | ssl on;
13 |
14 | # Cache SSL sessions for 10m (this is about 40,000 sessions), timing them out
15 | # after 24 hours.
16 | ssl_session_timeout 1d;
17 | ssl_session_cache shared:SSL:5m;
18 |
19 | # Set the buffer size to 1400 bytes (that way it fits into a single MTU).
20 | ssl_buffer_size 1400;
21 |
22 | # OCSP Stapling
23 | #
24 | # When connecting to a server, clients should verify the validity of the server certificate
25 | # using either a Certificate Revocation List (CRL), or an Online Certificate Status Protocol (OCSP) record.
26 | ssl_stapling on;
27 | ssl_stapling_verify on;
28 |
29 | resolver 8.8.4.4 8.8.8.8 valid=300s;
30 | resolver_timeout 10s;
31 |
32 | # Enable HSTS
33 | add_header 'Strict-Transport-Security' 'max-age=31536000; includeSubDomains; preload';
34 |
35 | # Do not allow this site to be displayed in iframes
36 | add_header 'X-Frame-Options' 'DENY';
37 |
38 | # Do not permit Content-Type sniffing.
39 | add_header 'X-Content-Type-Options' 'nosniff';
40 |
41 | add_header 'Access-Control-Allow-Origin' '*';
42 | add_header 'Access-Control-Allow-Credentials' 'true';
43 | add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
44 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/templates/nginx.conf.j2:
--------------------------------------------------------------------------------
1 | user {{ nginx.user | default('nginx') }};
2 | worker_processes {{ nginx.worker_processes | default('auto') }};
3 |
4 | pid /run/nginx.pid;
5 |
6 | events {
7 | worker_connections 1024;
8 | }
9 |
10 | http {
11 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
12 | '$status $body_bytes_sent "$http_referer" '
13 | '"$http_user_agent" "$http_x_forwarded_for"';
14 |
15 | access_log /var/log/nginx/access.log main;
16 | error_log /var/log/nginx/error.log;
17 |
18 | sendfile on;
19 | tcp_nopush on;
20 | tcp_nodelay on;
21 | keepalive_timeout 65;
22 | types_hash_max_size 2048;
23 |
24 | include /etc/nginx/mime.types;
25 | default_type application/octet-stream;
26 | # include /etc/nginx/conf.d/*.conf;
27 |
28 | upstream backend {
29 | {% for host in groups[ group ] if "-app-" in host %}
30 | server {{ hostvars[ host ].ansible_local.ip }}:443;
31 | {% endfor %}
32 | }
33 |
34 | server {
35 | {% if settings.ssl and settings.domain %}
36 |
37 | listen 443 ssl http2 default_server;
38 | listen 80 default_server;
39 |
40 | {% else %}
41 | listen 80 default_server;
42 | {% endif %}
43 |
44 | server_name {{ settings.domain | default('_') }};
45 |
46 | {% if settings.ssl and settings.nginx_certs %}
47 | {% include 'nginx-ssl.conf.j2' %}
48 | {% endif %}
49 |
50 | root {{ nginx.html_root }};
51 |
52 | location / {
53 | proxy_pass https://backend;
54 | }
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/vars/Ubuntu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Users
3 | user: www-data
4 | worker_processes: 4
5 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/vars/centos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Users
3 | user: nginx
4 | worker_processes: auto
5 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/roles/lb/vars/main.yml:
--------------------------------------------------------------------------------
1 |
2 | ---
3 | nginx:
4 | # this is conf default for CentOS and Ubuntu
5 | html_root: /usr/share/nginx/html
6 | cert_root: /usr/share/nginx/cert
7 |
8 | # Users
9 | user: nginx
10 | worker_processes: auto
11 |
12 | configuration: /etc/nginx/nginx.conf
13 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/vars/defaults.yml:
--------------------------------------------------------------------------------
1 | ---
2 | settings:
3 | domain: dev.made.com.ua
4 |
5 | env_vars:
6 | env: "{{env | upper}}"
7 |
8 | db:
9 | name: 'visits'
10 | root_password: '-f3G3nQeB3T7eB#D'
11 |
12 | ssl: on
13 |
14 | localhost_certs:
15 | ssl_certificate: files/apache.crt
16 | ssl_certificate_key: files/apache.key
17 |
18 | nginx_certs:
19 | ssl_certificate: files/made.com.ua.crt
20 | ssl_certificate_key: files/made.com.ua.key
21 | ssl_dhparam: files/made.com.ua.dh.pem
22 |
23 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/vars/deployment-production.yml:
--------------------------------------------------------------------------------
1 | ---
2 | env_vars:
3 | env: PRODUCTION
4 |
5 | db:
6 | name: 'visits_production'
7 | root_password: '-f3G3nQeB3T7eB#D'
8 |
9 |
10 | users:
11 | - { username: 'app_user', password : 'Iwi11NeV3Re@3' }
12 | # - { username: 'app_user', password : 'Iwi11NeV3RevEeU$3defaultPA$$WORDS' }
13 | # - { username: 'ecommerce-1', password : 'P1a$$wd1' }
14 | # - { username: 'ecommerce-2', password : 'P1a$$wd2' }
15 | # - { username: 'ecommerce-3', password : 'P1a$$wd3' }
16 | # - { username: 'ecommerce-4', password : 'P1a$$wd4' }
17 | # - { username: 'ecommerce-5', password : 'P1a$$wd5' }
18 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/play-books/vars/deployment-staging.yml:
--------------------------------------------------------------------------------
1 | ---
2 | env_vars:
3 | env: STAGING
4 |
5 | db:
6 | name: 'visits_stage'
7 | root_password: '-f3G3nQeB3T7eB#D'
8 |
9 | users:
10 | - { username: 'app_user', password : 'password' }
11 |
--------------------------------------------------------------------------------
/011-Ansible-3-Tier-App-v2/readme.md:
--------------------------------------------------------------------------------
1 | # TODO: 3-Tier Web App
2 |
3 | This is 3 tier web app demo for Ansible-based deployment.
4 |
5 | ### What it does?
6 |
7 | - Deploying development/production environment for web application
8 | - SSL based (used pre generated wildcard certificates for made.com.ua)
9 | - Target Platforms: CentOS 7 / Ubuntu 16.
10 | - Virtualization: Vagrant
11 |
12 | ### Issues
13 | - MySQL Master/Slave relication for Centos/7 isnt finished.
14 | - MySQL Master/Slave relication for Ubuntu/16 works just once.
15 |
16 | I haven't spend much time figuring out proper way to implement master/slave replication.
17 |
18 | # Run it.
19 | ```bash
20 | vagrant up
21 | ./inventory > hosts
22 | ansible-playbook play-books/play.yml -e env=production -l "ubuntu*"
23 | ansible-playbook play-books/play.yml -e env=staging -l "centos*"
24 | ```
25 |
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/app/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import time, sys, json
3 | import sqlite3
4 | import os, re
5 |
6 | from flask import Flask, session, url_for, render_template, redirect, request
7 |
8 |
9 | app = Flask(__name__)
10 |
11 | def db_conn():
12 | dbname = "data/names.db"
13 | rv = sqlite3.connect(dbname)
14 | rv.row_factory = sqlite3.Row
15 | stat = os.stat(dbname)
16 | if stat.st_size == 0:
17 | with app.open_resource('../data/schema.sql', mode='r') as f:
18 | rv.cursor().executescript(f.read())
19 | rv.commit()
20 | return rv
21 |
22 | @app.after_request
23 | def add_header(r):
24 | r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
25 | r.headers["Pragma"] = "no-cache"
26 | r.headers["Expires"] = "0"
27 | r.headers['Cache-Control'] = 'public, max-age=0'
28 | return r
29 |
30 | @app.route('/')
31 | def index():
32 | return render_template('index.tpl.j2')
33 |
34 | @app.route('/list')
35 | def show(dateStart = None, dateEnd = None):
36 |
37 |
38 | dates = dict({
39 | 'start': request.args.get('start', None),
40 | 'end': request.args.get('end', None)
41 | })
42 |
43 | for dkey, dvalue in dates.items():
44 | if dvalue is not None and re.match( '^\d{4}-\d{2}-\d{2}$', dvalue) is None:
45 | dates[dkey] = None
46 |
47 |
48 |
49 | where = ""
50 | if dates.get('start') is not None and dates.get('end') is not None:
51 | where = "WHERE strftime('%Y-%m-%d', time) BETWEEN '{}' AND '{}'".format( dates.get('start'), dates.get('end'))
52 |
53 | db = db_conn()
54 | sql = f'SELECT name, time FROM names {where} ORDER BY time DESC'
55 | cur = db.execute(sql )
56 |
57 | return render_template('list.tpl.j2', dates=dates, data=cur.fetchall())
58 |
59 | @app.route('/add', methods=['GET', 'POST'])
60 | def add():
61 |
62 | if request.method == 'POST':
63 | if len(request.form["name"]) > 3:
64 | # insert
65 | db = db_conn()
66 | db.execute('insert into main.names (name, time) values (?, DATETIME("now"))',
67 | [request.form["name"]])
68 | db.commit()
69 | return redirect( url_for( 'add', ok="name-added" ) )
70 | # return json.dumps(request.form["name"])
71 |
72 | return redirect( url_for( 'add', error="empty-or-small-name" ) )
73 |
74 | return render_template('add.tpl.j2')
75 |
76 |
77 | if __name__ == '__main__':
78 | # db_conn()
79 | app.run(debug=True, host='0.0.0.0')
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/app/static/style.css:
--------------------------------------------------------------------------------
1 | html, body {
2 | margin:0;
3 | padding:0;
4 | }
5 |
6 | body {
7 | width:80%;
8 | margin:0 auto;
9 | }
10 |
11 | header { padding:50px; }
12 | header nav { font-size: 24px; text-align: center; }
13 | header nav a { text-decoration: none ; color:#000; display: inline-block; margin:0 30px;}
14 | header nav a.active { color:#404040; text-decoration: underline }
15 | header nav a:hover { text-decoration: underline }
16 |
17 | hr {
18 | border:0
19 | botder-bottom:1px solid #404040;
20 | }
21 |
22 | /* form */
23 | input[type=text] {
24 | width:100%;
25 | padding:5px;
26 | font-size:24px;
27 | border:1px solid #404040;
28 | border-radius:5px;
29 | text-align: center
30 | }
31 |
32 | input[type=text]:focus {
33 | outline: 0;
34 | }
35 |
36 | .notice {
37 | padding:10px 20px;
38 | text-align: center;
39 | font-size:18px;
40 | border:3px solid transparent;
41 | border-radius:4px;
42 | margin-bottom: 20px;
43 | }
44 |
45 | .notice.error {
46 | color:#f00;
47 | border-color:#f00;
48 | }
49 |
50 | .notice.ok {
51 | color:#0f0;
52 | border-color:#0f0;
53 | }
54 |
55 | /* lsit */
56 |
57 | dl {
58 | display: grid;
59 | grid-template-columns: auto 1fr;
60 | grid-column-gap: 1em;
61 | grid-row-gap: 0.5em;
62 | margin: 1em;
63 | line-height: 1.3;
64 | }
65 |
66 | dt {
67 | position: relative;
68 | grid-column: 1;
69 | font-weight: bold;
70 | overflow: hidden;
71 | }
72 |
73 | dd {
74 | grid-column: 2;
75 | margin-left: 0;
76 | }
77 |
78 | dt::after {
79 | position: absolute;
80 | top: 0.6em;
81 | width: 100%;
82 | margin-left: 0.75em;
83 | border-bottom: 1px dotted #888;
84 | content: '';
85 | }
86 |
87 |
88 | [type="date"] {
89 | background:#fff url(https://cdn1.iconfinder.com/data/icons/cc_mono_icon_set/blacks/16x16/calendar_2.png) 97% 50% no-repeat ;
90 | }
91 | [type="date"]::-webkit-inner-spin-button {
92 | display: none;
93 | }
94 | [type="date"]::-webkit-calendar-picker-indicator {
95 | opacity: 0;
96 | }
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/app/templates/add.tpl.j2:
--------------------------------------------------------------------------------
1 | {% extends "index.tpl.j2" %}
2 | {% set active_page = "add" %}
3 |
4 | {% block content %}
5 |
6 | {% if request.args.get('error') %}
7 | {{request.args.get('error')}}
8 | {% endif %}
9 |
10 | {% if request.args.get('ok') %}
11 | {{request.args.get('ok')}}
12 | {% endif %}
13 |
14 |
17 |
18 |
19 |
20 | {% endblock %}
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/app/templates/index.tpl.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | {% set menu = [
8 | ('/', 'index', 'Index'),
9 | ('/add', 'add', 'Add Entry'),
10 | ('/list', 'list', 'List Entries')
11 | ] -%}
12 | {% set active_page = active_page|default('index') -%}
13 |
14 |
15 |
22 |
23 |
24 |
25 |
26 | {% block content %}
27 | Hola!
28 | {% endblock %}
29 |
30 |
31 |
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/app/templates/list.tpl.j2:
--------------------------------------------------------------------------------
1 | {% extends "index.tpl.j2" %}
2 | {% set active_page = "list" %}
3 |
4 | {% block content %}
5 |
6 |
11 |
15 |
16 | {{ sql }}
17 |
18 | {% if data %}
19 |
20 | {% for item in data %}
21 | -
22 |
- {{ item.name }}
23 | - {{ item.time }}
24 |
25 | {% endfor%}
26 |
27 | {% else %}
28 | nothing here... shoo shoo go away...
29 | {% endif %}
30 |
31 | {% endblock %}
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/data/schema.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE names(
2 | name text,
3 | time text
4 | );
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/readme.md:
--------------------------------------------------------------------------------
1 | # Making Simple Flask App
2 |
3 | ```bash
4 | # setup virtualenv
5 | python3 -m pip install --upgrade virtualenv
6 | virtualenv -p python3 venv
7 | source venv/bin/activate
8 | python3 -m pip install -r requirements.txt
9 |
10 | # application
11 | chmod +x app/app.py
12 | ./app/app.py
13 |
14 | # deactivate
15 | deactivate
16 | rm -rf venv
17 | ```
18 |
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask
--------------------------------------------------------------------------------
/012-Python-Flask-MySQL/test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import re
4 |
5 | datestring = "2018-08-31"
6 |
7 | print( re.match( "^\d{4}-\d{2}-\d{2}$", datestring) )
--------------------------------------------------------------------------------
/013-Python-Network-Checker/.gitignore:
--------------------------------------------------------------------------------
1 | venv/*
2 |
--------------------------------------------------------------------------------
/013-Python-Network-Checker/async.py:
--------------------------------------------------------------------------------
1 | #!python3
2 |
3 | import asyncio
4 |
5 | async def speak_async():
6 | print('OMG asynchronicity!')
7 |
8 |
9 | loop = asyncio.get_event_loop()
10 | for i in range(1000):
11 | loop.run_until_complete(speak_async())
12 | loop.close()
13 |
--------------------------------------------------------------------------------
/013-Python-Network-Checker/readme.md:
--------------------------------------------------------------------------------
1 | # TODO: Check network resources using UDP/TCP protocols
2 |
3 | ```bash
4 | > ./app.py --help
5 | Usage: app.py [OPTIONS]
6 |
7 | Options:
8 | --cvsfile TEXT CSV file format (PROTOCOL,HOST,PORT)
9 | --protocol TEXT Protocol TCP or UDP
10 | --port TEXT Port
11 | --host TEXT Hostname or ip
12 | --help Show this message and exit.
13 | ```
14 |
15 |
16 | ### Start and Run
17 | ```bash
18 | # start virtual env
19 | python3 -m pip install --upgrade virtualenv
20 | virtualenv -p python3 venv
21 | source venv/bin/activate
22 | python3 -m pip install -r requirments.txt
23 |
24 | # do required task
25 | # run upd server ./server.py
26 | # or check hosts...
27 | ./server.py &
28 |
29 |
30 | # deactivate
31 | deactivate && rm -rf venv
32 | ```
33 |
34 |
35 | ### Examples
36 | ```bash
37 | > ./app.py --cvsfile google.csv
38 | TCP: OK google.com:443
39 | TCP: OK google.ca:443
40 | TCP: OK google.com.ua:443
41 |
42 | > ./app.py --protocol UDP --port 9995 --host localhost
43 | UDP: OK localhost:9995
44 | ```
--------------------------------------------------------------------------------
/013-Python-Network-Checker/requirments.txt:
--------------------------------------------------------------------------------
1 | click
2 | pylint
3 | icecream
4 | scapy
--------------------------------------------------------------------------------
/013-Python-Network-Checker/server.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import socket
4 |
5 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
6 | sock.bind(('0.0.0.0', 9995))
7 |
8 | while True:
9 | buf, addr = sock.recvfrom(1500)
10 | print(buf, addr)
11 | if buf.decode() == "ping":
12 | sock.sendto( str.encode("pong"), addr)
13 | if buf.decode() == "hello":
14 | sock.sendto( str.encode("buenos tardes amigo!"), addr)
15 | if buf.decode() == "Hello There!":
16 | sock.sendto( str.encode("General Kenobi!"), addr)
17 |
--------------------------------------------------------------------------------
/013-Python-Network-Checker/test.csv:
--------------------------------------------------------------------------------
1 | UDP,127.0.0.1,9995
2 | UDP,127.0.0.1,9994
3 | TCP,127.0.0.1,9995
4 | TCP,127.0.0.1,9994
5 | TCP,127.0.0.1,80
6 | TCP,google.com,80
7 | TCP,google.com,443
8 | ICMP,127.0.0.1,9994
--------------------------------------------------------------------------------
/014-aws-cloudformation-lambda/.number:
--------------------------------------------------------------------------------
1 | 2
2 |
--------------------------------------------------------------------------------
/014-aws-cloudformation-lambda/deployment.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function lambda(){
4 | local counter=$1
5 | # cleanup
6 | while read -d '' -r file; do
7 | out=$(diff -d $file "lambda-${counter}.zip")
8 | if [[ ! -z $out ]]; then
9 | unlink $file
10 | fi
11 | done < <(find . -name "lambda-*" -print0)
12 |
13 | }
14 |
15 | CREATE_LAMBDA="Yes"
16 |
17 | counter=$(cat .number)
18 | counter=$((counter + 1))
19 | echo $counter > .number
20 |
21 | zip "lambda-${counter}" lambda.py 2>&1 >/dev/null
22 |
23 | if [[ ! -z $CREATE_LAMBDA ]]; then
24 | # ship new version
25 | aws s3 mv "lambda-${counter}.zip" "s3://butuzov-lambdas/lambda-${counter}.zip"
26 | sed -e "s/lambda.zip/lambda-${counter}.zip/" ec2amis_backup.yaml > deployment.yml
27 | else
28 | # zip any new version
29 | aws s3 mv "lambda-${counter}.zip" "s3://butuzov-lambdas/lambda.zip"
30 | # date = time.strftime('%Y-%m-%d', time.strptime("29 Nov 00", "%d %b %y") )
31 | cp ec2amis_backup.yaml deployment.yml
32 | fi
33 |
34 | exists=$(aws cloudformation list-stacks --stack-status-filter "CREATE_IN_PROGRESS" "CREATE_COMPLETE" "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" "UPDATE_COMPLETE" | grep "demo")
35 |
36 | if [[ ! -z $exists ]]; then
37 | action=update-stack
38 | else
39 | action=create-stack
40 | fi
41 |
42 | aws cloudformation $action --stack-name demo --template-body "file://$(pwd)/deployment.yml" --capabilities CAPABILITY_NAMED_IAM
43 |
44 | sleep 10
45 |
46 | unlink deployment.yml
47 |
--------------------------------------------------------------------------------
/014-aws-cloudformation-lambda/ec2amis_backup.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 |
3 | Description: "EC2 AMI's Backup Procedure"
4 |
5 | ####################################################################################################
6 |
7 | Parameters:
8 |
9 | StackName:
10 | Type: String
11 | Default: EC2_Instances_Backup_Procedure
12 |
13 | Environment:
14 | Type: String
15 | Default: dev
16 | AllowedValues:
17 | - dev
18 | - pro
19 |
20 | ####################################################################################################
21 |
22 | Resources:
23 |
24 | Schedule:
25 | Description: ScheduledRule to backup instances
26 | Type: AWS::Events::Rule
27 | Properties:
28 | Description: ScheduledRule to backup instances
29 | ScheduleExpression: rate(1 day)
30 | State: ENABLED
31 | Name: !Join [ "_", [ !Ref Environment, !Ref StackName ] ]
32 | Targets:
33 | - Arn: !GetAtt Lambda.Arn
34 | Id: !Join [ "_", [ !Ref Environment, !Ref StackName ] ]
35 |
36 | Lambda:
37 | Description: Backup instances that decorated with "backup" tag (emty)
38 | Type: AWS::Lambda::Function
39 | Properties:
40 | FunctionName: !Join [ "_", [ !Ref Environment, !Ref StackName ] ]
41 | Code:
42 | S3Bucket: butuzov-lambdas
43 | S3Key: lambda.zip
44 | Handler: lambda.main
45 | Role: !GetAtt Role.Arn
46 | Runtime: python3.6
47 | Timeout: 10
48 | MemorySize: 128
49 |
50 | Role:
51 | Description: IAM Role for Running Lambda
52 | Type: AWS::IAM::Role
53 | Properties:
54 | ManagedPolicyArns: [ !Ref Policy ]
55 | RoleName: !Join [ "_", [ !Ref Environment, !Ref StackName ]]
56 | AssumeRolePolicyDocument:
57 | Version: '2012-10-17'
58 | Statement:
59 | - Effect: Allow
60 | Principal:
61 | Service: [ "lambda.amazonaws.com" ]
62 | Action: [ "sts:AssumeRole" ]
63 |
64 | Policy:
65 | Description: Policy allows to create/list/delete EC2 Instances, amis and their snapshots.
66 | Type: AWS::IAM::ManagedPolicy
67 | Properties:
68 | ManagedPolicyName: !Join [ "_", [ !Ref Environment, !Ref StackName ]]
69 | PolicyDocument:
70 | Version: '2012-10-17'
71 | Statement:
72 |
73 | # List describe instances, create images and delete snapshots.
74 | - Effect: Allow
75 | Action:
76 | - ec2:DescribeInstances
77 | - ec2:CreateImage
78 | - ec2:DescribeImages
79 | - ec2:DeregisterImage
80 | - ec2:DeleteSnapshot
81 | Resource: [ "*" ]
82 |
83 | # Logs
84 | - Effect: Allow
85 | Action:
86 | - logs:CreateLogGroup
87 | - logs:CreateLogStream
88 | - logs:PutLogEvents
89 | Resource: [ "arn:aws:logs:*:*:*" ]
90 |
--------------------------------------------------------------------------------
/014-aws-cloudformation-lambda/readme.md:
--------------------------------------------------------------------------------
1 | # Lambda
2 |
3 | This Lambda function will create ami for ec2 instances with tag `backup`, it will use valued of tag `Name`.
4 | It also will delete older ami backups, including snapshots (setup rate by yourself in cloudformation template).
5 |
6 | # AWS Lambda Usage Example
7 |
8 | This is a example of lambda deployment to aws using cloudformation.
9 |
10 | 1) Change s3 bucket I used (butuzov-lambdas)
11 | 2) Upload Lambda manually
12 | 3) Upload CloudFormation
13 | 4) Congrats!
14 |
15 | or just type in terminal `./deployment.sh`
16 |
--------------------------------------------------------------------------------
/015-serverless-apigateway-lambda/lambda.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import json
4 |
5 | def handler(event, context):
6 | return {
7 | 'statusCode': 200,
8 | 'headers': {
9 | 'Content-Type': 'text/html'
10 | },
11 | 'body': "QueryString is " + json.dumps(event.get('queryStringParameters'), indent=4) + ""
12 | }
13 |
--------------------------------------------------------------------------------
/015-serverless-apigateway-lambda/mappings-values.yml:
--------------------------------------------------------------------------------
1 | Domains:
2 | # Defined Domains For RequestsInsight-Core Lambda
3 | beta :
4 | domain: "test.made.ua"
5 | Certs:
6 | # Available WildCard Certificates per-account
7 | beta:
8 | arn: "arn:aws:acm:us-west-2:0000000000000:certificate/111111111-2222-3333-4444-5555555555"
9 |
--------------------------------------------------------------------------------
/015-serverless-apigateway-lambda/readme.md:
--------------------------------------------------------------------------------
1 | # Calling Lambda with Custom Domain (with custom basepathmapping)
2 |
3 | Case: Using few different aws accounts to deploy domain in gateway api to be used with lambda.
4 |
5 | ```
6 | serverless deploy --stage=beta --region=us-west-2
7 | ```
8 |
9 | This example solve one issue with dinamicaly created `AWS::ApiGateway::BasePathMapping` that comes with no `Stage`. See more info at https://github.com/serverless/serverless/issues/4029
10 |
11 | Correct execution order and resources will be `Lambda` -> (`Domain & RestApi`) -> `Deployment` -> `BasePathMapping`. Serverless will create **2** deployments - own real and our fake, but will need to wait till fake is ready to create `BasePathMapping` and basepathpadding will have stage at this step of execution.
12 |
13 |
14 | ### Route 53
15 | This is solution for a problem that use few different aws acconts and all domains managed just from one of them. So... Route53 not included.
16 |
--------------------------------------------------------------------------------
/015-serverless-apigateway-lambda/serverless.yml:
--------------------------------------------------------------------------------
1 | service: lambda-APITest
2 |
3 |
4 | custom:
5 | stage: ${opt:stage, 'beta'}
6 |
7 |
8 | provider:
9 | name: aws
10 | runtime: python3.6
11 | region: ${opt:region, 'us-west-2'}
12 | stackName: "${self:custom.stage}-${self:service}"
13 | deploymentBucket: ${cf:${self:custom.stage}-Serverless.ServerlessS3Bucket}
14 | role: { Fn::GetAtt: ["Role","Arn"] }
15 | endpointType: regional
16 |
17 | functions:
18 | App:
19 | name: ${self:provider.stackName}
20 | description: "ApiTest [${self:provider.stackName}]"
21 | handler: lambda.handler
22 | package:
23 | include: [ "lambda.py" ]
24 | events:
25 | - http:
26 | path: ""
27 | method: get
28 | cors: true
29 |
30 | package:
31 | individually: true
32 | exclude: [ "./**" ]
33 |
34 | resources:
35 | Description: "Serverless: Test Api App"
36 |
37 | Mappings: ${file(./mappings-values.yml)}
38 |
39 | Resources:
40 |
41 | # -------------------------------------------------------
42 | # Execution Order Maters
43 | # https://github.com/serverless/serverless/issues/4029
44 | # Lambda -> (Domain & RestApi) -> Deployment -> BasePathMapping
45 | # -------------------------------------------------------
46 |
47 | Domain:
48 | Type: AWS::ApiGateway::DomainName
49 | Properties:
50 | DomainName: { Fn::FindInMap : [ Domains, "${self:custom.stage}", "domain" ] }
51 | EndpointConfiguration:
52 | Types: [ "REGIONAL" ]
53 | RegionalCertificateArn: { Fn::FindInMap : [ Certs, "${self:custom.stage}", "arn" ] }
54 | DependsOn: AppLambdaFunction
55 |
56 | ApiGatewayRestApi:
57 | Type: AWS::ApiGateway::RestApi
58 | Properties:
59 | Description: "ApiGatheway [${self:provider.stackName}]"
60 | Name: ${self:provider.stackName}
61 | DependsOn: AppLambdaFunction
62 |
63 | ApiGatewayDeployment:
64 | Type: "AWS::ApiGateway::Deployment"
65 | Properties:
66 | Description: "Deployment [${self:provider.stackName}]"
67 | RestApiId: { Ref: ApiGatewayRestApi }
68 | StageName: "${self:custom.stage}"
69 | DependsOn: [ Domain ]
70 |
71 | BasePathMapping:
72 | Type: AWS::ApiGateway::BasePathMapping
73 | Properties:
74 | DomainName: { Fn::FindInMap : [ Domains, "${self:custom.stage}", "domain" ] }
75 | RestApiId: { Ref: ApiGatewayRestApi }
76 | Stage: "${self:custom.stage}"
77 | DependsOn: ApiGatewayDeployment
78 |
79 | # -------------------------------------------------------
80 |
81 | # Role for Lambdas
82 | Role:
83 | Description: "Role [${self:provider.stackName}]"
84 | Type: "AWS::IAM::Role"
85 | Properties:
86 | ManagedPolicyArns: [ Ref: Policy ]
87 | RoleName: "${self:provider.stackName}"
88 | AssumeRolePolicyDocument:
89 | Version: '2012-10-17'
90 | Statement:
91 | - Effect: Allow
92 | Principal:
93 | Service: [ "lambda.amazonaws.com" ]
94 | Action: [ "sts:AssumeRole" ]
95 |
96 | Policy:
97 | Description: "Policy [${self:provider.stackName}]"
98 | Type: "AWS::IAM::ManagedPolicy"
99 | Properties:
100 | ManagedPolicyName: ${self:provider.stackName}
101 | PolicyDocument:
102 | Version: '2012-10-17'
103 | Statement:
104 |
105 | - Effect: Allow
106 | Action:
107 | - logs:CreateLogGroup
108 | - logs:CreateLogStream
109 | - logs:PutLogEvents
110 | Resource: [ "arn:aws:logs:*:*:*" ]
111 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # The DevOps Journey
2 |
3 | The Journey of a Thousand Miles Begins with One Step.
4 |
5 | ## List Of Steps ( Hands-On Tasks / Practice / Snippets )
6 | 1. [Networking - DHCP (Clients)](001-Networking-DHCP)
7 | 1. [Networking - DNS](002-Networking-DNS)
8 | 1. [Docker - `httpd`+`cron` in one container](003-Docker-Cron-n-Apache)
9 | 1. [Docker-Compose - WebDev Stack](004-Docker-Compose)
10 | 1. [Ansible Deployment for 3 Tier App (Ansible Hello World v1)](005-Ansible-3-Tier-App)
11 | 1. [Terraform and AWS (hello world)](006-Terraform-AWS)
12 | 1. [Kubernetes - hello world](007-K8S-Hello-World)
13 | 1. [Kubernetes - MySQL](008-K8S-MySQL)
14 | 1. [Kubernetes - Accessing pods in cluster](009-K8S-Accessing-Pods-In-Cluster)
15 | 1. [Jenkins - Ansible/Artifactory/Docker/Docker Registry on Vagrant and AWS (with Terraform)](010-Jenkins-Teraform-Ansible)
16 | 1. [Ansible Deployment for 3 Tier App (v2)](011-Ansible-3-Tier-App-v2)
17 | 1. [Python: Running Simple Web App (flask)](012-Python-Flask-MySQL)
18 | 1. [Python: Network resources using UDP and TCP](013-Python-Network-Checker)
19 | 1. [AWS: Python Lambda for EC2 instances backups and cleanups using CloudFormation](014-aws-cloudformation-lambda)
20 | 1. [AWS: Serverless GatewayApi Custom Domain example](015-serverless-apigateway-lambda)
21 |
--------------------------------------------------------------------------------