├── .gitignore ├── Linux_Basics_1 ├── README.md ├── Vagrantfile └── tests │ ├── assessment.bats │ └── test_helper.bash ├── README.md ├── backup_and_restore_10 ├── README.md ├── RECOVERY_PLAN.md ├── Vagrantfile ├── features │ ├── backup.feature │ └── step_definitions │ │ └── backup_steps.rb ├── local_inventory.ini ├── playbook.backup.yml ├── playbook.provision.yml ├── prod_inventory.ini ├── roles │ ├── backup │ │ └── tasks │ │ │ └── main.yml │ ├── provision │ │ └── tasks │ │ │ └── main.yml │ └── verify_backup_move_to_S3 │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ └── backup.sh.j2 ├── secret_vars.example.yml └── vars.yml ├── bash_scripting └── basics.sh ├── build_management_4 ├── Vagrantfile ├── hello.yml └── navi-pipeline.yml ├── config_management_3 ├── Gemfile ├── README.md ├── Vagrantfile ├── features │ ├── install.feature │ └── step_definitions │ │ └── install_steps.rb ├── inventory.ini ├── playbook.mean.yml ├── playbook.provision.yml ├── requirements.txt └── roles │ ├── mongodb │ └── tasks │ │ └── main.yml │ ├── nodejs │ └── tasks │ │ └── main.yml │ └── provision │ └── tasks │ └── main.yml ├── iaas_7 ├── README.md ├── files │ ├── error.html │ └── index.html ├── playbook.ami.yml ├── playbook.autoscale_group.yml ├── playbook.cleanup_autoscaling.yml ├── playbook.cleanup_cloudformation.yml ├── playbook.cloudformation.yml ├── playbook.delete_s3.yml ├── playbook.destroy_ami.yml ├── playbook.s3.yml ├── policy.json ├── roles │ ├── autoscale_group │ │ └── tasks │ │ │ └── main.yml │ ├── aws_ami │ │ └── tasks │ │ │ └── main.yml │ ├── cleanup_ami │ │ └── tasks │ │ │ └── main.yml │ ├── cleanup_autoscaling │ │ └── tasks │ │ │ └── main.yml │ ├── cleanup_cloudformation │ │ └── tasks │ │ │ └── main.yml │ ├── cleanup_s3 │ │ └── tasks │ │ │ └── main.yml │ ├── cloudformation │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── cloudformation.json │ │ │ └── cloudformation.json.j2 │ └── s3 │ │ └── tasks │ │ └── main.yml └── templates │ └── awscli_config.j2 ├── monitoring_deployments_9 ├── Gemfile ├── README.md ├── Vagrantfile ├── features │ ├── cloudformation_setup.feature │ ├── nagios_host_install.feature │ ├── nagios_server_install.feature │ ├── new_relic_setup.feature │ └── step_definitions │ │ ├── cloudformation_steps.rb │ │ ├── nagios_host_install_steps.rb │ │ ├── nagios_server_install_steps.rb │ │ └── new_relic_steps.rb ├── local_inventory.ini ├── playbook.cleanup_cloudformation.yml ├── playbook.ec2.yml ├── playbook.mean.yml ├── playbook.nagios_host.yml ├── playbook.nagios_server.yml ├── playbook.newrelic.yml ├── playbook.node_app.yml ├── playbook.provision.yml ├── prod_inventory.ini ├── roles │ ├── add_host_to_nagios_server │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── nagios_host.cfg.j2 │ ├── cleanup_cloudformation │ │ └── tasks │ │ │ └── main.yml │ ├── deploy_node_app │ │ └── tasks │ │ │ └── main.yml │ ├── ec2 │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── cloudformation.json │ │ │ └── cloudformation.json.j2 │ ├── lamp_stack │ │ └── tasks │ │ │ └── main.yml │ ├── mongo │ │ └── tasks │ │ │ └── main.yml │ ├── nagios_host │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── nrpe.cfg.j2 │ ├── nagios_server │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── contacts.cfg.j2 │ │ │ └── nrpe.j2 │ ├── new_relic │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── newrelic.js.j2 │ ├── node │ │ └── tasks │ │ │ └── main.yml │ ├── provision │ │ └── tasks │ │ │ └── main.yml │ └── setup │ │ └── tasks │ │ └── main.yml ├── secret_vars.example.yml └── vars.yml ├── networking_2 └── README.md └── securing_deployments_11 ├── README.md ├── Vagrantfile ├── features ├── git_hook_setup_steps.feature ├── secret_vault_setup.feature └── step_definitions │ ├── git_hook_setup_steps.rb │ └── secret_vault_setup.rb ├── local_inventory.ini ├── playbook.provision.yml ├── playbook.secure_credential_hook.yml ├── playbook.secure_vault.yml ├── roles ├── git_secret_setup │ ├── files │ │ └── run_git_secret_hook.sh │ └── tasks │ │ └── main.yml ├── provision │ ├── files │ │ └── credentials │ └── tasks │ │ └── main.yml └── secret_vault_setup │ └── tasks │ └── main.yml └── vars.yml /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | .DS_Store 3 | *.gem 4 | *.rbc 5 | 6 | # for a library or gem, you might want to ignore these files since the code is 7 | # intended to run in multiple environments; otherwise, check them in: 8 | Gemfile.lock 9 | .ruby-version 10 | .ruby-gemset 11 | 12 | # unless supporting rvm < 1.11.0 or doing something fancy, ignore this: 13 | .rvmrc 14 | 15 | # Used by dotenv library to load environment variables. 16 | .env 17 | 18 | # ansible 19 | secret_vars.yml 20 | *.retry 21 | 22 | # aws 23 | *.pem 24 | -------------------------------------------------------------------------------- /Linux_Basics_1/README.md: -------------------------------------------------------------------------------- 1 | # Assessment 01 - Linux/Unix Basics 2 | 3 | This contains a list of tests to assess a fellows knowledge of the **Linux/Unix Basics** Learning Outcome 4 | 5 | ## Instructions 6 | 7 | 1. Install `vagrant` on local machine if not present 8 | 2. Setup a ubuntu vagrant box 9 | 3. Mount the tests directory onto your vagrant box 10 | 4. Ssh into the vagrant box 11 | 5. Install `bats` **(Bash Automated Testing System)** 12 | 13 | ``` 14 | git clone https://github.com/sstephenson/bats.git 15 | cd bats 16 | ./install.sh /usr/local 17 | ``` 18 | 6. `cd` into the tests directory 19 | 7. Find all `` strings and replace with the proper commands 20 | 8. Run `bats assessment.bats` to run the tests to verify your commands 21 | 22 | -------------------------------------------------------------------------------- /Linux_Basics_1/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | config.vm.define "web" do |web| 13 | web.vm.box = "ubuntu/trusty64" 14 | web.vm.hostname = 'web' 15 | web.vm.box_url = "ubuntu/precise64" 16 | 17 | web.vm.network :private_network, ip: "192.168.56.101" 18 | 19 | web.vm.provider :virtualbox do |v| 20 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 21 | v.customize ["modifyvm", :id, "--memory", 512] 22 | v.customize ["modifyvm", :id, "--name", "web"] 23 | end 24 | end 25 | 26 | config.vm.define "db" do |db| 27 | db.vm.box = "ubuntu/trusty64" 28 | db.vm.hostname = 'db' 29 | db.vm.box_url = "ubuntu/precise64" 30 | 31 | db.vm.network :private_network, ip: "192.168.56.102" 32 | 33 | db.vm.provider :virtualbox do |v| 34 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 35 | v.customize ["modifyvm", :id, "--memory", 512] 36 | v.customize ["modifyvm", :id, "--name", "db"] 37 | end 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /Linux_Basics_1/tests/assessment.bats: -------------------------------------------------------------------------------- 1 | load test_helper 2 | 3 | setup() { 4 | create_tmp_dir 5 | } 6 | 7 | teardown() { 8 | delete_tmp_dir 9 | rm -rf /tmp/*-called 10 | cleanup_virtual_block_device 11 | } 12 | 13 | @test "create a directory named 'testing'" { 14 | run_command mkdir testing 15 | 16 | [ "$status" -eq 0 ] 17 | [ -d "${tmp_dir}/testing" ] 18 | } 19 | 20 | @test "create a nested directory named 'foo/bar/baz'" { 21 | run_command mkdir -p foo/bar/baz 22 | 23 | [ "$status" -eq 0 ] 24 | [ -d "${tmp_dir}/foo/bar/baz" ] 25 | } 26 | 27 | @test "find all files and directories with names starting with 'file-'" { 28 | create_files_and_directories 29 | 30 | # The directory and file structure is describe below 31 | <<-EOF 32 | . 33 | ├── bar 34 | │   ├── file-1.txt 35 | │   ├── file-2.txt/ 36 | │   ├── file-3.txt 37 | │   ├── file-4.txt/ 38 | └── foo 39 | ├── file-1.txt/ 40 | ├── file-2.txt 41 | ├── file-3.txt/ 42 | ├── file-4.txt 43 | EOF 44 | 45 | # run the command below to find all files with names starting with 'file-' 46 | run_command find . -iname 'file-*' -type f 47 | 48 | [ "$status" -eq 0 ] 49 | 50 | echo -e "$output" | grep "bar/file-1.txt" 51 | echo -e "$output" | grep "bar/file-3.txt" 52 | echo -e "$output" | grep "foo/file-2.txt" 53 | echo -e "$output" | grep "foo/file-4.txt" 54 | 55 | # run the command below to find all directories with names starting with 'file-' 56 | run_command find . -iname 'file-*' -type d 57 | 58 | [ "$status" -eq 0 ] 59 | 60 | echo -e "$output" | grep "bar/file-2.txt" 61 | echo -e "$output" | grep "bar/file-4.txt" 62 | echo -e "$output" | grep "foo/file-1.txt" 63 | echo -e "$output" | grep "foo/file-3.txt" 64 | } 65 | 66 | @test "find text in files" { 67 | create_files_and_directories 68 | 69 | # The files listed below contains the text 70 | <<-EOF 71 | bar/file-1.txt - pattern 72 | bar/file-3.txt - pattern 73 | foo/file-2.txt - other pattern 74 | foo/file-4.txt - pattern 75 | EOF 76 | 77 | # run the command below to find files that contains lines that start with 'pattern' 78 | run_command grep -R '^\pattern' . 79 | 80 | [ "$status" -eq 0 ] 81 | 82 | echo -e "$output" | grep "bar/file-1.txt:pattern" 83 | echo -e "$output" | grep "bar/file-3.txt:pattern" 84 | echo -e "$output" | grep "foo/file-4.txt:pattern" 85 | } 86 | 87 | @test "format block device '/dev/loop0' as ext4 and mount to '/mnt/virtual'" { 88 | create_virtual_block_device 89 | 90 | # run the command below to format device '/dev/loop0' as 'ext4' 91 | run_command sudo mkfs.ext4 /dev/loop0 92 | 93 | # run the command below to mount '/dev/loop0' to '/mnt/virtual' 94 | run_command sudo mkdir -p /mnt/virtual 95 | 96 | [ "$status" -eq 0 ] 97 | 98 | run_command sudo mount -t ext4 /dev/loop0 /mnt/virtual 99 | 100 | [ "$status" -eq 0 ] 101 | 102 | devices=$(sudo lsblk) 103 | echo -e "$devices" | grep "loop0" 104 | 105 | block_info=$(sudo file -sL /dev/loop0) 106 | echo -e "$block_info" | grep "ext4 filesystem" 107 | 108 | mount_info=$(mount) 109 | echo -e "$mount_info" | grep "/dev/loop0" | grep "/mnt/virtual" | grep "ext4" 110 | } 111 | 112 | @test "list processes running for user 'tester'" { 113 | case1="$(echo "When." | tr "hW" "sp" | tr "en" " -" | tr "." "U")" 114 | case2="$(echo "When." | tr "hW" "sp" | tr "en" " -" | tr "." "u")" 115 | # Write a command below to list all processes for user 'tester' 116 | command="ps -u tester" 117 | 118 | (echo "$command" | grep "$case1 tester") || (echo "$command" | grep "$case2 tester") 119 | } 120 | 121 | @test "send signals to a running process" { 122 | sigint="$(echo "What" | tr "hW" "ik" | tr "a" "t" | tr "t" "l") -$(trap -l | grep -o '.. SIGINT' | cut -d ')' -f 1)" 123 | sigterm="$(echo "What" | tr "hW" "ik" | tr "a" "t" | tr "t" "l") -$(trap -l | grep -o '... SIGTERM' | cut -d ')' -f 1)" 124 | 125 | # Given a process exists with PID 1234 126 | # Write a command below to send a SIGINT signal to the process 127 | command="kill -2 3214" 128 | 129 | echo "$command" | grep "$sigint 3214" 130 | 131 | # Given a process exists with PID 3214 132 | # Write a command below to send a SIGTERM signal to the process 133 | command="kill -15 3214" 134 | 135 | echo "$command" | grep "$sigterm 3214" 136 | } 137 | 138 | @test "redirect text 'SOME MESSAGE' to and overwrite file named 'output-file'" { 139 | cat > "${tmp_dir}/output-file" < output-file 148 | [ "$status" -eq 0 ] 149 | 150 | actual=$(cat "${tmp_dir}/output-file") 151 | [ "$actual" = "SOME MESSAGE" ] 152 | } 153 | 154 | @test "redirect text 'SOME MESSAGE' to and append to file named 'output-file'" { 155 | cat > "${tmp_dir}/output-file" <> output-file 164 | [ "$status" -eq 0 ] 165 | 166 | actual=$(cat "${tmp_dir}/output-file") 167 | expected="CONTENTS 168 | SOME MESSAGE" 169 | 170 | [ "$actual" = "$expected" ] 171 | } 172 | 173 | @test "schedule process 'echo HI' to be run every hour" { 174 | command="crontab 0 * * * *; echo HI" 175 | 176 | echo -e "$command" | grep "0 $(printf '* %.0s' {0..3}) echo HI" 177 | } 178 | -------------------------------------------------------------------------------- /Linux_Basics_1/tests/test_helper.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | do_stub() { 4 | for F in $(compgen -A function | grep -e ^@stub:); do 5 | NAME=${F#@stub:} 6 | TMP=/tmp/stub-$RANDOM 7 | 8 | echo '#!/bin/bash' > $TMP 9 | echo 'set -e' >> $TMP 10 | 11 | type $F | sed "1,3d;$ d;s/^ *//g" >> $TMP 12 | chmod +x $TMP 13 | 14 | cp $TMP /usr/local/sbin/$NAME 15 | 16 | rm -f $TMP 17 | done 18 | } 19 | 20 | undo_stub() { 21 | for F in $(compgen -A function | grep -e ^@stub:); do 22 | NAME=${F#@stub:} 23 | 24 | rm /usr/local/sbin/$NAME 25 | done 26 | } 27 | 28 | run_command() { 29 | pushd $tmp_dir 30 | do_stub 31 | 32 | run "$@" 33 | 34 | undo_stub 35 | 36 | echo "Output:" 37 | echo $output 38 | popd 39 | } 40 | 41 | create_tmp_dir() { 42 | export tmp_dir=$(mktemp -d -t test.XXXX) 43 | } 44 | 45 | delete_tmp_dir() { 46 | rm -rf $tmp_dir 47 | } 48 | 49 | create_files_and_directories() { 50 | pushd $tmp_dir 51 | mkdir -p foo/file-{1,3}.txt 52 | touch foo/file-{2,4}.txt 53 | 54 | mkdir -p bar/file-{2,4}.txt 55 | touch bar/file-{1,3}.txt 56 | 57 | echo "other pattern" > foo/file-2.txt 58 | echo "pattern" > foo/file-4.txt 59 | echo "pattern" > bar/file-1.txt 60 | echo "pattern" > bar/file-3.txt 61 | popd 62 | } 63 | 64 | create_virtual_block_device() { 65 | if sudo losetup -a | grep 'loop0'; then 66 | cleanup_virtual_block_device 67 | fi 68 | 69 | sudo truncate -s 2G /mnt/blockdevice 70 | sudo losetup /dev/loop0 /mnt/blockdevice 71 | } 72 | 73 | cleanup_virtual_block_device() { 74 | if sudo losetup -a | grep 'loop0'; then 75 | if sudo mount | grep '/dev/loop0'; then 76 | sudo umount /dev/loop0 77 | fi 78 | 79 | sudo losetup -d /dev/loop0 80 | sudo rm -rf /mnt/blockdevice 81 | fi 82 | } 83 | 84 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DevOps exercises 2 | 3 | Exercises for learning purpose 4 | -------------------------------------------------------------------------------- /backup_and_restore_10/README.md: -------------------------------------------------------------------------------- 1 | # Outcome 10 - Backup & Restore 2 | 3 | An exercise that covers the following operations: 4 | - Setting up recurring automatic backups of critical data (database, e.t.c) on a system 5 | - Setting up scripts to verify the backups to ensure it is valid and restorable 6 | - Setting up scripts to move the backup data to an external store (AWS S3) 7 | - Putting together a recovery plan to restore the data in the event of data loss 8 | 9 | ### System Requirements 10 | * Python 11 | * Pip 12 | * Ansible > 2.0 13 | * AWS CLI 14 | * Boto 15 | * Ruby 16 | * Cucumber 17 | * Virtual box (to test locally) 18 | 19 | # Deploying to production 20 | 21 | ### Setup 22 | * Create a file called _secret_vars.yml_, copying the contents of _secret_vars.example.yml_. Replace the values in the file with yours. 23 | * Update the variables in _vars.yml_ file with yours. 24 | * Update _prod_inventory.ini_ file with your server name and its ip address. Replace `ansible_ssh_private_key_file` with the path to your aws private key file. 25 | 26 | ### Provision the instances 27 | * Run `ansible-playbook playbook.provision.yml -i prod_inventory.ini` to provision your server. 28 | 29 | ### Run backup script 30 | * Run `ansible-playbook playbook.backup.yml -i prod_inventory.ini` to set up automatic backup for mysql database to. 31 | 32 | # Testing locally 33 | 34 | ### Setup 35 | * Run `vagrant up`. This will bring up a virtual machine and also provision it. 36 | * Create a file called _secret_vars.yml_, copying the contents of _secret_vars.example.yml_. Replace the values in the file with yours. 37 | * Update the variables in _vars.yml_ file with yours. Note that _host_user_ should be changed to `vagrant` and not `ubuntu` for testing locally. 38 | 39 | ### Run tests 40 | * Run cucumber features/backup.feature to run tests and set up the virtual machine. 41 | -------------------------------------------------------------------------------- /backup_and_restore_10/RECOVERY_PLAN.md: -------------------------------------------------------------------------------- 1 | ### Database Recovery plan 2 | 3 | * Notify all affected stakeholders 4 | * Get latest backup from Amazon S3 bucket 5 | * Load the backup into a new database 6 | * Ensure database backup is loaded properly and works fine 7 | * Cut off from the old database and redirect connection to the new database 8 | * Notify all affected stakeholders of progress 9 | -------------------------------------------------------------------------------- /backup_and_restore_10/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://atlas.hashicorp.com/search. 15 | config.vm.box = "ubuntu/trusty64" 16 | # config.ssh.password = "vagrant" 17 | 18 | # Disable automatic box update checking. If you disable this, then 19 | # boxes will only be checked for updates when the user runs 20 | # `vagrant box outdated`. This is not recommended. 21 | # config.vm.box_check_update = false 22 | 23 | # Create a forwarded port mapping which allows access to a specific port 24 | # within the machine from a port on the host machine. In the example below, 25 | # accessing "localhost:8080" will access port 80 on the guest machine. 26 | # config.vm.network "forwarded_port", guest: 80, host: 8080 27 | 28 | # Create a private network, which allows host-only access to the machine 29 | # using a specific IP. 30 | config.vm.network "private_network", ip: "192.168.33.15" 31 | 32 | # Create a public network, which generally matched to bridged network. 33 | # Bridged networks make the machine appear as another physical device on 34 | # your network. 35 | # config.vm.network "public_network" 36 | 37 | # Share an additional folder to the guest VM. The first argument is 38 | # the path on the host to the actual folder. The second argument is 39 | # the path on the guest to mount the folder. And the optional third 40 | # argument is a set of non-required options. 41 | # config.vm.synced_folder "../data", "/vagrant_data" 42 | 43 | # Provider-specific configuration so you can fine-tune various 44 | # backing providers for Vagrant. These expose provider-specific options. 45 | # Example for VirtualBox: 46 | # 47 | config.vm.provider "virtualbox" do |vb| 48 | # # Display the VirtualBox GUI when booting the machine 49 | # vb.gui = true 50 | # 51 | # # Customize the amount of memory on the VM: 52 | vb.memory = "1024" 53 | end 54 | # 55 | # View the documentation for the provider you are using for more 56 | # information on available options. 57 | 58 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 59 | # such as FTP and Heroku are also available. See the documentation at 60 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 61 | # config.push.define "atlas" do |push| 62 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 63 | # end 64 | 65 | # Enable provisioning with a shell script. Additional provisioners such as 66 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 67 | # documentation for more information about their specific syntax and use. 68 | # config.vm.provision "shell", inline: <<-SHELL 69 | # sudo apt-get update 70 | # sudo apt-get install -y apache2 71 | # SHELL 72 | config.vm.provision "ansible" do |ansible| 73 | ansible.playbook = "playbook.provision.yml" 74 | ansible.inventory_path = "local_inventory.ini" 75 | ansible.sudo = true 76 | ansible.verbose = "v" 77 | end 78 | config.vm.define "dbserver" 79 | end 80 | -------------------------------------------------------------------------------- /backup_and_restore_10/features/backup.feature: -------------------------------------------------------------------------------- 1 | Feature: Backup mysql database 2 | 3 | Background: 4 | Given I have a running server 5 | And I provision it 6 | 7 | Scenario: Install automysqlbackup 8 | When I install automysqlbackup 9 | Then it should be successful 10 | 11 | Scenario: Run backup command 12 | When I run backup command 13 | Then it should be successful 14 | And backup folders should exist 15 | 16 | Scenario: Copy backup script to server 17 | When I copy backup script to server 18 | Then it should be successful 19 | And backup script should exist in server 20 | 21 | Scenario: Execute cron task 22 | When I execute cron task 23 | Then it should be successful 24 | -------------------------------------------------------------------------------- /backup_and_restore_10/features/step_definitions/backup_steps.rb: -------------------------------------------------------------------------------- 1 | require 'open3' 2 | 3 | Given(/^I have a running server$/) do 4 | _, _, status = Open3.capture3 "vagrant reload" 5 | 6 | expect(status.success?).to eq(true) 7 | end 8 | 9 | Given(/^I provision it$/) do 10 | _, _, status = Open3.capture3 "vagrant provision" 11 | 12 | expect(status.success?).to eq(true) 13 | end 14 | 15 | When(/^I install automysqlbackup$/) do 16 | cmd = "ansible-playbook -i local_inventory.ini playbook.backup.yml --tags 'install_automysqlbackup'" 17 | 18 | _, _, @status = Open3.capture3 "#{cmd}" 19 | end 20 | 21 | Then(/^it should be successful$/) do 22 | expect(@status.success?).to eq(true) 23 | end 24 | 25 | When(/^I run backup command$/) do 26 | cmd = "ansible-playbook -i local_inventory.ini playbook.backup.yml --tags 'backup_cmd'" 27 | 28 | _, _, @status = Open3.capture3 "#{cmd}" 29 | end 30 | 31 | And(/^backup folders should exist$/) do 32 | output, _, status = Open3.capture3 "vagrant ssh -c 'ls /var/lib/automysqlbackup/'" 33 | 34 | output.split.each do |folder| 35 | _, _, status = Open3.capture3 "vagrant ssh -c 'test -d /var/lib/automysqlbackup/#{folder}'" 36 | 37 | expect(status.success?).to eq(true) 38 | end 39 | 40 | expect(status.success?).to eq(true) 41 | end 42 | 43 | When(/^I copy backup script to server$/) do 44 | cmd = "ansible-playbook -i local_inventory.ini playbook.backup.yml --tags 'copy_backup_script'" 45 | 46 | _, _, @status = Open3.capture3 "#{cmd}" 47 | end 48 | 49 | Then(/^backup script should exist in server$/) do 50 | _, _, status = Open3.capture3 "vagrant ssh -c 'test -f /etc/automysqlbackup/backup.sh'" 51 | end 52 | 53 | When(/^I execute cron task$/) do 54 | cmd = "ansible-playbook -i local_inventory.ini playbook.backup.yml --tags 'run_cron'" 55 | 56 | _, _, @status = Open3.capture3 "#{cmd}" 57 | end 58 | -------------------------------------------------------------------------------- /backup_and_restore_10/local_inventory.ini: -------------------------------------------------------------------------------- 1 | [dbserver] 2 | 192.168.33.15 ansible_ssh_private_key_file=.vagrant/machines/dbserver/virtualbox/private_key 3 | -------------------------------------------------------------------------------- /backup_and_restore_10/playbook.backup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | - secret_vars.yml 9 | roles: 10 | - backup 11 | - verify_backup_move_to_S3 12 | -------------------------------------------------------------------------------- /backup_and_restore_10/playbook.provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | roles: 9 | - provision 10 | -------------------------------------------------------------------------------- /backup_and_restore_10/prod_inventory.ini: -------------------------------------------------------------------------------- 1 | [nagioshost] 2 | 52.42.87.11 ansible_ssh_private_key_file=cosy-devops-uswest2.pem 3 | -------------------------------------------------------------------------------- /backup_and_restore_10/roles/backup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if automysqlbackup is installed 3 | command: bash -c "dpkg --get-selections | grep automysqlbackup" 4 | ignore_errors: True 5 | register: automysqlbackup_installed 6 | tags: 7 | - install_automysqlbackup 8 | 9 | - name: Install automysqlbackup 10 | apt: name=automysqlbackup state=present 11 | when: automysqlbackup_installed | failed 12 | tags: 13 | - install_automysqlbackup 14 | 15 | # Change the default CREATE_DATABASE option to NO so the backup file can be used in any database 16 | - name: Edit automysqlbackup configuration 17 | lineinfile: 18 | dest: /etc/default/automysqlbackup 19 | state: present 20 | regexp: "^CREATE_DATABASE=yes$" 21 | line: "CREATE_DATABASE=no" 22 | backrefs: yes 23 | tags: 24 | - install_automysqlbackup 25 | 26 | # Ubuntu installs a cron script with this program that will run it every day. 27 | # It will organize the files to the appropriate directory. 28 | - name: Run backup command 29 | command: automysqlbackup 30 | tags: 31 | - backup_cmd 32 | -------------------------------------------------------------------------------- /backup_and_restore_10/roles/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update apt cache 3 | apt: update_cache=yes 4 | 5 | - name: Install python-pip 6 | apt: name=python-pip state=present 7 | 8 | - name: Install boto 9 | pip: name=boto state=present 10 | 11 | - name: Install python-mysqldb 12 | apt: name=python-mysqldb state=present 13 | 14 | - name: Install mysql-utilities 15 | apt: name=mysql-utilities state=present 16 | -------------------------------------------------------------------------------- /backup_and_restore_10/roles/verify_backup_move_to_S3/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy backup script to server 4 | template: src=backup.sh.j2 dest="{{ansible_env.PWD}}/backup.sh" mode=0755 5 | tags: 6 | - copy_backup_script 7 | 8 | - name: Cron job to verify backup and upload to S3 9 | cron: 10 | name: "verify automysqlbackup and copy to S3" 11 | special_time: daily 12 | job: "{{ansible_env.PWD}}/backup.sh" 13 | state: present 14 | tags: 15 | - run_cron 16 | -------------------------------------------------------------------------------- /backup_and_restore_10/roles/verify_backup_move_to_S3/templates/backup.sh.j2: -------------------------------------------------------------------------------- 1 | MYSQL_CMD="mysql -u root -p{{ mysql_password }}" 2 | 3 | DBEXISTS="sudo test -d /var/lib/mysql/{{ test_backup_db }}" 4 | 5 | # Create test database if it does not exist 6 | if ! $DBEXISTS; then 7 | echo "create database {{ test_backup_db }}" | $MYSQL_CMD 8 | fi 9 | 10 | DB_BACKUP_DIR="/var/lib/automysqlbackup/daily/{{ main_database_name }}" 11 | 12 | cd $DB_BACKUP_DIR 13 | 14 | # Get latest database backup file 15 | LATEST_DB_BACKUP=$(ls | sort -n -t _ -k 2 | tail -1) 16 | 17 | # Extract database backup file 18 | sudo gunzip $LATEST_DB_BACKUP 19 | 20 | # Get latest database backup file that was just uncompressed 21 | LATEST_DB_BACKUP=$(ls | sort -n -t _ -k 2 | tail -1) 22 | 23 | # Import backup file to test database 24 | $MYSQL_CMD {{ test_backup_db }} < $LATEST_DB_BACKUP 25 | 26 | # Compare backup 27 | BACKUP_COMPARE="mysqldbcompare --server1=root:{{ mysql_password }}@localhost {{ main_database_name }}:{{ test_backup_db }} --run-all-tests" 28 | 29 | # Move backup to S3 if the previous command was successful 30 | if $BACKUP_COMPARE; then 31 | echo "Moving backup to S3" 32 | aws s3 cp $DB_BACKUP_DIR/$LATEST_DB_BACKUP s3://{{ s3_bucket }}/ 33 | 34 | echo "Removing latest backup from server" 35 | # Delete backup from server 36 | sudo rm -f $LATEST_DB_BACKUP 37 | fi 38 | -------------------------------------------------------------------------------- /backup_and_restore_10/secret_vars.example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mysql_password: XXXXXXXX 3 | aws_access_key: XXXXXXXX 4 | aws_secret_key: XXXXXXXX 5 | -------------------------------------------------------------------------------- /backup_and_restore_10/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | host_user: ubuntu 4 | main_database_name: events 5 | test_backup_db: my_test_db 6 | s3_bucket: kosy-backup 7 | -------------------------------------------------------------------------------- /bash_scripting/basics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for loop 4 | for i in $( ls ); do 5 | echo item: $i 6 | done 7 | 8 | echo 9 | 10 | # while loop 11 | COUNTER=0 12 | while [ $COUNTER -lt 10 ]; do 13 | echo whil loop counter is $COUNTER 14 | let COUNTER=COUNTER+1 15 | done 16 | 17 | echo 18 | 19 | # until loop 20 | COUNTER=20 21 | until [ $COUNTER -lt 10 ]; do 22 | echo untl loop COUNTER is $COUNTER 23 | let COUNTER-=1 24 | done 25 | 26 | # Using select to make simple menus 27 | OPTIONS="Hello Quit" 28 | select opt in $OPTIONS; do 29 | if [ "$opt" = "Quit" ]; then 30 | echo done 31 | exit 32 | elif [ "$opt" = "Hello" ]; then 33 | echo Hello World 34 | else 35 | clear 36 | echo Bad option 37 | fi 38 | done 39 | 40 | # Using the command line 41 | if [ -z "$1" ]; then 42 | echo usage: $0 directory 43 | exit 44 | fi 45 | 46 | # Reading user input with read 47 | echo Please enter your name 48 | read NAME 49 | echo Hi $NAME 50 | -------------------------------------------------------------------------------- /build_management_4/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://atlas.hashicorp.com/search. 15 | config.vm.box = "concourse/lite" 16 | 17 | # Disable automatic box update checking. If you disable this, then 18 | # boxes will only be checked for updates when the user runs 19 | # `vagrant box outdated`. This is not recommended. 20 | # config.vm.box_check_update = false 21 | 22 | # Create a forwarded port mapping which allows access to a specific port 23 | # within the machine from a port on the host machine. In the example below, 24 | # accessing "localhost:8080" will access port 80 on the guest machine. 25 | # config.vm.network "forwarded_port", guest: 80, host: 8080 26 | 27 | # Create a private network, which allows host-only access to the machine 28 | # using a specific IP. 29 | # config.vm.network "private_network", ip: "192.168.33.10" 30 | 31 | # Create a public network, which generally matched to bridged network. 32 | # Bridged networks make the machine appear as another physical device on 33 | # your network. 34 | # config.vm.network "public_network" 35 | 36 | # Share an additional folder to the guest VM. The first argument is 37 | # the path on the host to the actual folder. The second argument is 38 | # the path on the guest to mount the folder. And the optional third 39 | # argument is a set of non-required options. 40 | # config.vm.synced_folder "../data", "/vagrant_data" 41 | 42 | # Provider-specific configuration so you can fine-tune various 43 | # backing providers for Vagrant. These expose provider-specific options. 44 | # Example for VirtualBox: 45 | # 46 | config.vm.provider "virtualbox" do |vb| 47 | # # Display the VirtualBox GUI when booting the machine 48 | # vb.gui = true 49 | # 50 | # Customize the amount of memory on the VM: 51 | vb.memory = "1024" 52 | end 53 | # 54 | # View the documentation for the provider you are using for more 55 | # information on available options. 56 | 57 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 58 | # such as FTP and Heroku are also available. See the documentation at 59 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 60 | # config.push.define "atlas" do |push| 61 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 62 | # end 63 | 64 | # Enable provisioning with a shell script. Additional provisioners such as 65 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 66 | # documentation for more information about their specific syntax and use. 67 | # config.vm.provision "shell", inline: <<-SHELL 68 | # sudo apt-get update 69 | # sudo apt-get install -y apache2 70 | # SHELL 71 | end 72 | -------------------------------------------------------------------------------- /build_management_4/hello.yml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - name: hello-world 3 | plan: 4 | - task: say-hello 5 | config: 6 | platform: linux 7 | image_resource: 8 | type: docker-image 9 | source: {repository: ubuntu} 10 | run: 11 | path: echo 12 | args: ["Hello, world!"] 13 | -------------------------------------------------------------------------------- /build_management_4/navi-pipeline.yml: -------------------------------------------------------------------------------- 1 | resources: 2 | - name: every-1m 3 | type: time 4 | source: {interval: 1m} 5 | 6 | jobs: 7 | - name: navi 8 | plan: 9 | - get: every-1m 10 | trigger: true 11 | - task: annoy 12 | config: 13 | platform: linux 14 | image_resource: 15 | type: docker-image 16 | source: {repository: ubuntu} 17 | run: 18 | path: echo 19 | args: ["Hey! Listen!"] 20 | -------------------------------------------------------------------------------- /config_management_3/Gemfile: -------------------------------------------------------------------------------- 1 | # A sample Gemfile 2 | source "https://rubygems.org" 3 | 4 | # gem "rails" 5 | gem 'rspec' 6 | gem 'cucumber' 7 | -------------------------------------------------------------------------------- /config_management_3/README.md: -------------------------------------------------------------------------------- 1 | # Assessment 03 - Configuration Management 2 | 3 | This project sets up a MEAN stack environment using ansible to test knowledge of the configuration management learning outcome. 4 | 5 | ### Testing Locally 6 | **Install the following on your mac:** 7 | 8 | - VirtualBox: _brew cask install virtualbox_ 9 | - Vagrant: _brew cask install vagrant_ 10 | - Python: _brew install python_ 11 | - Ansible: _pip install ansible_ 12 | - Ruby: _brew install rbenv ruby-build_ 13 | 14 | Note that Ruby and Python are available by default on macs. Be sure to verify that. 15 | 16 | If you are using rbenv, do this in the terminal. 17 | 18 | ``` 19 | echo 'if which rbenv > /dev/null; then eval "$(rbenv init -)"; fi' >> ~/.bash_profile 20 | source ~/.bash_profile 21 | ``` 22 | - Install Ruby 23 | ``` 24 | rbenv install 2.3.0 25 | rbenv global 2.3.0 26 | ruby -v 27 | ``` 28 | 29 | **Clone the project** 30 | ``` 31 | $ git clone https://github.com/andela-kanyanwu/devops-exercises.git 32 | ``` 33 | 34 | **Set it up** 35 | ``` 36 | $ cd devops-exercises/config_management_assessment_3/ 37 | $ vagrant up 38 | $ vagrant ssh 39 | ``` 40 | 41 | Switch to another terminal in your local machine, not inside your VM, run 42 | ``` 43 | $ bundle install 44 | $ cucumber features/install.feature 45 | ``` 46 | This runs all the tests and installs everything using ansible in your virtual machine. 47 | -------------------------------------------------------------------------------- /config_management_3/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://atlas.hashicorp.com/search. 15 | config.vm.box = "ubuntu/trusty64" 16 | 17 | # Disable automatic box update checking. If you disable this, then 18 | # boxes will only be checked for updates when the user runs 19 | # `vagrant box outdated`. This is not recommended. 20 | # config.vm.box_check_update = false 21 | 22 | # Create a forwarded port mapping which allows access to a specific port 23 | # within the machine from a port on the host machine. In the example below, 24 | # accessing "localhost:8080" will access port 80 on the guest machine. 25 | # config.vm.network "forwarded_port", guest: 80, host: 8080 26 | 27 | # Create a private network, which allows host-only access to the machine 28 | # using a specific IP. 29 | config.vm.network "private_network", ip: "192.168.33.10" 30 | 31 | # Create a public network, which generally matched to bridged network. 32 | # Bridged networks make the machine appear as another physical device on 33 | # your network. 34 | # config.vm.network "public_network" 35 | 36 | # Share an additional folder to the guest VM. The first argument is 37 | # the path on the host to the actual folder. The second argument is 38 | # the path on the guest to mount the folder. And the optional third 39 | # argument is a set of non-required options. 40 | # config.vm.synced_folder "../data", "/vagrant_data" 41 | 42 | # Provider-specific configuration so you can fine-tune various 43 | # backing providers for Vagrant. These expose provider-specific options. 44 | # Example for VirtualBox: 45 | # 46 | # config.vm.provider "virtualbox" do |vb| 47 | # # Display the VirtualBox GUI when booting the machine 48 | # vb.gui = true 49 | # 50 | # # Customize the amount of memory on the VM: 51 | # vb.memory = "1024" 52 | # end 53 | # 54 | # View the documentation for the provider you are using for more 55 | # information on available options. 56 | 57 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 58 | # such as FTP and Heroku are also available. See the documentation at 59 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 60 | # config.push.define "atlas" do |push| 61 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 62 | # end 63 | 64 | # Enable provisioning with a shell script. Additional provisioners such as 65 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 66 | # documentation for more information about their specific syntax and use. 67 | # config.vm.provision "shell", inline: <<-SHELL 68 | # sudo apt-get update 69 | # sudo apt-get install -y apache2 70 | # SHELL 71 | config.vm.provision "ansible" do |ansible| 72 | ansible.playbook = "playbook.provision.yml" 73 | ansible.inventory_path = "inventory.ini" 74 | ansible.sudo = true 75 | ansible.verbose = "v" 76 | end 77 | config.vm.define "meanserver" 78 | config.vm.hostname = "meanserver" 79 | end 80 | -------------------------------------------------------------------------------- /config_management_3/features/install.feature: -------------------------------------------------------------------------------- 1 | Feature: Provision and Install 2 | 3 | Background: 4 | Given I have a running server 5 | And I provision it 6 | 7 | #Scenario: Install MongoDB 8 | # When I install MongoDB 9 | # Then it should be successful 10 | # And MongoDB should be running 11 | 12 | Scenario: Install NodeJS 13 | When I install NodeJs 14 | Then it should be successful 15 | -------------------------------------------------------------------------------- /config_management_3/features/step_definitions/install_steps.rb: -------------------------------------------------------------------------------- 1 | require 'open3' 2 | 3 | Given(/^I have a running server$/) do 4 | _, _, status = Open3.capture3 "unset RUBYLIB; vagrant reload" 5 | 6 | expect(status.success?).to eq(true) 7 | end 8 | 9 | Given(/^I provision it$/) do 10 | _, _, status = Open3.capture3 "unset RUBYLIB; vagrant provision" 11 | 12 | expect(status.success?).to eq(true) 13 | end 14 | 15 | When(/^I install MongoDB$/) do 16 | cmd = "ansible-playbook -i inventory.ini --private-key=.vagrant/machines/meanserver/virtualbox/private_key -u vagrant playbook.mean.yml --tags 'mongodb_setup'" 17 | 18 | output, error, @status = Open3.capture3 "#{cmd}" 19 | end 20 | 21 | Then(/^it should be successful$/) do 22 | expect(@status.success?).to eq(true) 23 | end 24 | 25 | Then(/^MongoDB should be running$/) do 26 | output, error, status = Open3.capture3 "unset RUBYLIB; vagrant ssh -c 'sudo service mongod status'" 27 | 28 | expect(status.success?).to eq(true) 29 | expect(output).to match("mongod start/running") 30 | end 31 | 32 | When(/^I install NodeJs$/) do 33 | pending # Write code here that turns the phrase above into concrete actions 34 | end 35 | -------------------------------------------------------------------------------- /config_management_3/inventory.ini: -------------------------------------------------------------------------------- 1 | [meanserver] 2 | 192.168.33.10 3 | -------------------------------------------------------------------------------- /config_management_3/playbook.mean.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | user: vagrant 4 | become: yes 5 | become_method: sudo 6 | roles: 7 | - { role: mongodb, tags: 'mongodb_setup' } 8 | - { role: nodejs, tags: 'nodejs_setup' } 9 | -------------------------------------------------------------------------------- /config_management_3/playbook.provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | user: vagrant 4 | become: yes 5 | become_method: sudo 6 | roles: 7 | - provision 8 | -------------------------------------------------------------------------------- /config_management_3/requirements.txt: -------------------------------------------------------------------------------- 1 | ansible 2.0.2.0 2 | -------------------------------------------------------------------------------- /config_management_3/roles/mongodb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if MongoDB is installed 3 | command: bash -c "dpkg --get-selections | grep mongodb-org" 4 | register: mongodb_installed 5 | ignore_errors: True 6 | 7 | - name: Install necessary packages 8 | apt: name={{item}} state=present force=yes 9 | when: mongodb_installed | failed 10 | with_items: 11 | - mongodb-org 12 | - git 13 | - build-essential 14 | - openssl 15 | - libssl-dev 16 | - pkg-config 17 | 18 | - name: Start mongoDB 19 | service: name=mongod state=started enabled=yes 20 | -------------------------------------------------------------------------------- /config_management_3/roles/nodejs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if NodeJS is installed 3 | command: bash -c "dpkg --get-selections | grep nodejs" 4 | register: nodejs_installed 5 | ignore_errors: True 6 | 7 | - name: Install prerequisites 8 | apt: name=curl state=present force=yes 9 | when: nodejs_installed | failed 10 | 11 | - name: Install nodejs 12 | -------------------------------------------------------------------------------- /config_management_3/roles/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get MongoDB GPG key ID 3 | command: apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 4 | 5 | - name: Add MongoDB repository reference 6 | apt_repository: 7 | repo: deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen 8 | state: present 9 | 10 | - name: Update apt cache 11 | apt: update_cache=yes 12 | 13 | - name: Update OS 14 | apt: upgrade=dist force=yes 15 | -------------------------------------------------------------------------------- /iaas_7/README.md: -------------------------------------------------------------------------------- 1 | # Assessment 07 - Infrastructure as a Service 2 | 3 | An exercise that covers the following operations using Ansible: 4 | - Hosting a static website on Amazon AWS using Amazon S3 5 | - Creating a Server-Side encrypted S3 bucket on Amazon AWS and upload data to it 6 | - Creating a machine image on Amazon AWS 7 | - Creating a VPC with a private & public subnet on Amazon AWS using AWS Cloudformation 8 | - Launching a machine with the machine image created previously into the private subnet created above 9 | - Creating an autoscaling group 10 | 11 | Requirements to run the playbooks 12 | * python 13 | * pip 14 | * ansible 15 | * boto 16 | * aws cli 17 | -------------------------------------------------------------------------------- /iaas_7/files/error.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |

This is an error page.

5 | 6 | 7 | -------------------------------------------------------------------------------- /iaas_7/files/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |

Hello, World!

5 | 6 | 7 | -------------------------------------------------------------------------------- /iaas_7/playbook.ami.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | vars_files: 5 | - "secret_vars.yml" 6 | roles: 7 | - aws_ami 8 | -------------------------------------------------------------------------------- /iaas_7/playbook.autoscale_group.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Deletes autoscale group and launch configuration 3 | - hosts: localhost 4 | connection: local 5 | vars_files: 6 | - "secret_vars.yml" 7 | roles: 8 | - autoscale_group 9 | -------------------------------------------------------------------------------- /iaas_7/playbook.cleanup_autoscaling.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | vars_files: 5 | - "secret_vars.yml" 6 | roles: 7 | - cleanup_autoscaling 8 | -------------------------------------------------------------------------------- /iaas_7/playbook.cleanup_cloudformation.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Master playbook to remove a cloudformation stack 3 | - hosts: localhost 4 | connection: local 5 | vars_files: 6 | - "secret_vars.yml" 7 | roles: 8 | - cleanup_cloudformation 9 | -------------------------------------------------------------------------------- /iaas_7/playbook.cloudformation.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | vars_files: 5 | - "secret_vars.yml" 6 | roles: 7 | - cloudformation 8 | -------------------------------------------------------------------------------- /iaas_7/playbook.delete_s3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | gather_facts: no 5 | vars_files: 6 | - "secret_vars.yml" 7 | roles: 8 | - cleanup_s3 9 | -------------------------------------------------------------------------------- /iaas_7/playbook.destroy_ami.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Master playbook to deregister an AMI and delete associated snapshot 3 | - hosts: localhost 4 | connection: local 5 | gather_facts: no 6 | vars_files: 7 | - "secret_vars.yml" 8 | roles: 9 | - cleanup_ami 10 | -------------------------------------------------------------------------------- /iaas_7/playbook.s3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | vars_files: 5 | - "secret_vars.yml" 6 | roles: 7 | - s3 8 | -------------------------------------------------------------------------------- /iaas_7/policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Id": "PutObjPolicy", 4 | "Statement": [ 5 | { 6 | "Sid": "DenyIncorrectEncryptionHeader", 7 | "Effect": "Deny", 8 | "Principal": "*", 9 | "Action": "s3:PutObject", 10 | "Resource": "arn:aws:s3:::kosybucket/*", 11 | "Condition": { 12 | "StringNotEquals": { 13 | "s3:x-amz-server-side-encryption": "AES256" 14 | } 15 | } 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /iaas_7/roles/autoscale_group/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create autoscaling launch configuration 2 | ec2_lc: 3 | name: "{{ autoscale_launch_config_name }}" 4 | image_id: "{{ image }}" 5 | region: "{{ region }}" 6 | instance_type: "{{ instance_type }}" 7 | assign_public_ip: yes 8 | aws_access_key: "{{ aws_access_key }}" 9 | aws_secret_key: "{{ aws_secret_key }}" 10 | 11 | - name: Create autoscaling group 12 | ec2_asg: 13 | name: "{{ autoscaling_group_name }}" 14 | launch_config_name: "{{ autoscale_launch_config_name }}" 15 | region: "{{ region }}" 16 | min_size: 1 17 | max_size: 10 18 | desired_capacity: 1 19 | vpc_zone_identifier: [ "{{ vpc_subnet }}" ] 20 | aws_access_key: "{{ aws_access_key }}" 21 | aws_secret_key: "{{ aws_secret_key }}" 22 | -------------------------------------------------------------------------------- /iaas_7/roles/aws_ami/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Task to create an EBS backed AMI from an EC2 instance 3 | - name: Launch an EC2 instance 4 | ec2: 5 | aws_access_key: "{{ aws_access_key }}" 6 | aws_secret_key: "{{ aws_secret_key }}" 7 | instance_type: "{{ instance_type }}" 8 | image: "{{ image }}" 9 | wait: true 10 | region: "{{ region }}" 11 | register: ec2 12 | 13 | - name: Create an AMI from our EC2 instance 14 | ec2_ami: 15 | aws_access_key: "{{ aws_access_key }}" 16 | aws_secret_key: "{{ aws_secret_key }}" 17 | instance_id: "{{ item.id }}" 18 | wait: yes 19 | name: "{{ ec2_ami_name }}" 20 | region: "{{ region }}" 21 | tags: 22 | Name: "{{ ec2_ami_name }}" 23 | Service: KosyTestService 24 | with_items: 25 | - "{{ ec2.instances }}" 26 | -------------------------------------------------------------------------------- /iaas_7/roles/cleanup_ami/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Terminate an ec2 instance 3 | ec2: 4 | aws_access_key: "{{ aws_access_key }}" 5 | aws_secret_key: "{{ aws_secret_key }}" 6 | state: "absent" 7 | instance_ids: "{{ ec2_instance_id }}" 8 | region: "{{ region }}" 9 | 10 | - name: Deregister AMI (delete associated snapshots) 11 | ec2_ami: 12 | aws_access_key: "{{ aws_access_key }}" 13 | aws_secret_key: "{{ aws_secret_key }}" 14 | region: "{{ region }}" 15 | image_id: "{{ ami_id }}" 16 | delete_snapshot: True 17 | state: absent 18 | -------------------------------------------------------------------------------- /iaas_7/roles/cleanup_autoscaling/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Delete autoscaling group 2 | ec2_asg: 3 | name: "{{ autoscaling_group_name }}" 4 | region: "{{ region }}" 5 | vpc_zone_identifier: [ "{{ vpc_subnet }}" ] 6 | state: absent 7 | aws_access_key: "{{ aws_access_key }}" 8 | aws_secret_key: "{{ aws_secret_key }}" 9 | 10 | - name: Delete launch configuration 11 | ec2_lc: 12 | name: "{{ autoscale_launch_config_name }}" 13 | region: "{{ region }}" 14 | state: absent 15 | aws_access_key: "{{ aws_access_key }}" 16 | aws_secret_key: "{{ aws_secret_key }}" 17 | -------------------------------------------------------------------------------- /iaas_7/roles/cleanup_cloudformation/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Remove cloudformation stack 2 | cloudformation: 3 | stack_name: "{{ cloudformation_stack_name }}" 4 | state: "absent" 5 | -------------------------------------------------------------------------------- /iaas_7/roles/cleanup_s3/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete an S3 bucket and all its contents 3 | s3: 4 | bucket: "{{ bucket_name }}" 5 | mode: delete 6 | aws_access_key: "{{ aws_access_key }}" 7 | aws_secret_key: "{{ aws_secret_key }}" 8 | -------------------------------------------------------------------------------- /iaas_7/roles/cloudformation/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Convert cloudformation template from j2 to json 2 | local_action: template src={{ cloudformation_template }}.j2 dest={{ cloudformation_template }} 3 | 4 | # Launch cloudformation template to create a VPC with a private & public subnet 5 | - name: Launch ansible cloudformation template 6 | cloudformation: 7 | stack_name: "{{ cloudformation_stack_name }}" 8 | state: "present" 9 | region: "{{ region }}" 10 | disable_rollback: true 11 | aws_access_key: "{{ aws_access_key }}" 12 | aws_secret_key: "{{ aws_secret_key }}" 13 | template: "{{ cloudformation_template }}" 14 | tags: 15 | Stack: "VPC-ansible-cloudformation" 16 | -------------------------------------------------------------------------------- /iaas_7/roles/cloudformation/templates/cloudformation.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "CloudFormation template for a generic VPC with a public and private subnets", 4 | "Resources": { 5 | "VPC": { 6 | "Type": "AWS::EC2::VPC", 7 | "Properties": { 8 | "EnableDnsSupport": "true", 9 | "EnableDnsHostnames": "true", 10 | "CidrBlock": "10.0.0.0/16", 11 | "Tags": [{ 12 | "Key": "Application", 13 | "Value": { 14 | "Ref": "AWS::StackName" 15 | } 16 | }, { 17 | "Key": "Network", 18 | "Value": "Public" 19 | }] 20 | } 21 | }, 22 | "PrivateSubnet": { 23 | "Type": "AWS::EC2::Subnet", 24 | "Properties": { 25 | "VpcId": { 26 | "Ref": "VPC" 27 | }, 28 | "CidrBlock": "10.0.2.0/24", 29 | "Tags": [{ 30 | "Key": "Application", 31 | "Value": { 32 | "Ref": "AWS::StackName" 33 | } 34 | }, { 35 | "Key": "Network", 36 | "Value": "VPN Connected Subnet" 37 | }] 38 | } 39 | }, 40 | "PublicSubnet": { 41 | "Type": "AWS::EC2::Subnet", 42 | "Properties": { 43 | "VpcId": { 44 | "Ref": "VPC" 45 | }, 46 | "CidrBlock": "10.0.1.0/24", 47 | "Tags": [{ 48 | "Key": "Application", 49 | "Value": { 50 | "Ref": "AWS::StackName" 51 | } 52 | }, { 53 | "Key": "Network", 54 | "Value": "Public" 55 | }] 56 | } 57 | }, 58 | "InternetGateway": { 59 | "Type": "AWS::EC2::InternetGateway" 60 | }, 61 | "GatewayToInternet": { 62 | "Type": "AWS::EC2::VPCGatewayAttachment", 63 | "Properties": { 64 | "VpcId": { 65 | "Ref": "VPC" 66 | }, 67 | "InternetGatewayId": { 68 | "Ref": "InternetGateway" 69 | } 70 | } 71 | }, 72 | "PublicRouteTable": { 73 | "Type": "AWS::EC2::RouteTable", 74 | "Properties": { 75 | "VpcId": { 76 | "Ref": "VPC" 77 | } 78 | } 79 | }, 80 | "PublicRoute": { 81 | "Type": "AWS::EC2::Route", 82 | "DependsOn": "GatewayToInternet", 83 | "Properties": { 84 | "RouteTableId": { 85 | "Ref": "PublicRouteTable" 86 | }, 87 | "DestinationCidrBlock": "0.0.0.0/0", 88 | "GatewayId": { 89 | "Ref": "InternetGateway" 90 | } 91 | } 92 | }, 93 | "PublicSubnetRouteTableAssociation": { 94 | "Type": "AWS::EC2::SubnetRouteTableAssociation", 95 | "Properties": { 96 | "SubnetId": { 97 | "Ref": "PublicSubnet" 98 | }, 99 | "RouteTableId": { 100 | "Ref": "PublicRouteTable" 101 | } 102 | } 103 | }, 104 | "PrivateInstance": { 105 | "Type": "AWS::EC2::Instance", 106 | "Properties": { 107 | "InstanceType": "t1.micro", 108 | "ImageId": "ami-b578b8d5", 109 | "NetworkInterfaces": [{ 110 | "AssociatePublicIpAddress": "true", 111 | "DeviceIndex": "0", 112 | "DeleteOnTermination": "true", 113 | "SubnetId": { 114 | "Ref": "PrivateSubnet" 115 | } 116 | }] 117 | } 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /iaas_7/roles/cloudformation/templates/cloudformation.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "CloudFormation template for a generic VPC with a public and private subnets", 4 | "Resources": { 5 | "VPC": { 6 | "Type": "AWS::EC2::VPC", 7 | "Properties": { 8 | "EnableDnsSupport": "true", 9 | "EnableDnsHostnames": "true", 10 | "CidrBlock": "{{ VPC_cidrblock }}", 11 | "Tags": [{ 12 | "Key": "Application", 13 | "Value": { 14 | "Ref": "AWS::StackName" 15 | } 16 | }, { 17 | "Key": "Network", 18 | "Value": "Public" 19 | }] 20 | } 21 | }, 22 | "PrivateSubnet": { 23 | "Type": "AWS::EC2::Subnet", 24 | "Properties": { 25 | "VpcId": { 26 | "Ref": "VPC" 27 | }, 28 | "CidrBlock": "{{ private_subnet_cidrblock }}", 29 | "Tags": [{ 30 | "Key": "Application", 31 | "Value": { 32 | "Ref": "AWS::StackName" 33 | } 34 | }, { 35 | "Key": "Network", 36 | "Value": "VPN Connected Subnet" 37 | }] 38 | } 39 | }, 40 | "PublicSubnet": { 41 | "Type": "AWS::EC2::Subnet", 42 | "Properties": { 43 | "VpcId": { 44 | "Ref": "VPC" 45 | }, 46 | "CidrBlock": "{{ public_subnet_cidrblock }}", 47 | "Tags": [{ 48 | "Key": "Application", 49 | "Value": { 50 | "Ref": "AWS::StackName" 51 | } 52 | }, { 53 | "Key": "Network", 54 | "Value": "Public" 55 | }] 56 | } 57 | }, 58 | "InternetGateway": { 59 | "Type": "AWS::EC2::InternetGateway" 60 | }, 61 | "GatewayToInternet": { 62 | "Type": "AWS::EC2::VPCGatewayAttachment", 63 | "Properties": { 64 | "VpcId": { 65 | "Ref": "VPC" 66 | }, 67 | "InternetGatewayId": { 68 | "Ref": "InternetGateway" 69 | } 70 | } 71 | }, 72 | "PublicRouteTable": { 73 | "Type": "AWS::EC2::RouteTable", 74 | "Properties": { 75 | "VpcId": { 76 | "Ref": "VPC" 77 | } 78 | } 79 | }, 80 | "PublicRoute": { 81 | "Type": "AWS::EC2::Route", 82 | "DependsOn": "GatewayToInternet", 83 | "Properties": { 84 | "RouteTableId": { 85 | "Ref": "PublicRouteTable" 86 | }, 87 | "DestinationCidrBlock": "0.0.0.0/0", 88 | "GatewayId": { 89 | "Ref": "InternetGateway" 90 | } 91 | } 92 | }, 93 | "PublicSubnetRouteTableAssociation": { 94 | "Type": "AWS::EC2::SubnetRouteTableAssociation", 95 | "Properties": { 96 | "SubnetId": { 97 | "Ref": "PublicSubnet" 98 | }, 99 | "RouteTableId": { 100 | "Ref": "PublicRouteTable" 101 | } 102 | } 103 | }, 104 | "PrivateInstance": { 105 | "Type": "AWS::EC2::Instance", 106 | "Properties": { 107 | "InstanceType": "{{ instance_type }}", 108 | "ImageId": "{{ ami_id }}", 109 | "NetworkInterfaces": [{ 110 | "AssociatePublicIpAddress": "true", 111 | "DeviceIndex": "0", 112 | "DeleteOnTermination": "true", 113 | "SubnetId": { 114 | "Ref": "PrivateSubnet" 115 | } 116 | }] 117 | } 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /iaas_7/roles/s3/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create an empty S3 bucket 3 | s3: 4 | bucket: "{{ bucket_name }}" 5 | mode: create 6 | permission: public-read 7 | aws_access_key: "{{ aws_access_key }}" 8 | aws_secret_key: "{{ aws_secret_key }}" 9 | 10 | - name: Encrypt S3 bucket 11 | s3_bucket: 12 | name: "{{ bucket_name }}" 13 | policy: "{{ lookup('file','policy.json') }}" 14 | aws_access_key: "{{ aws_access_key }}" 15 | aws_secret_key: "{{ aws_secret_key }}" 16 | 17 | - name: Upload files to S3 bucket 18 | s3: 19 | bucket: "{{ bucket_name }}" 20 | aws_access_key: "{{ aws_access_key }}" 21 | aws_secret_key: "{{ aws_secret_key }}" 22 | object: "{{ item.object }}" 23 | src: "{{ item.src }}" 24 | mode: put 25 | permission: public-read 26 | with_items: 27 | - { object: index.html, src: files/index.html } 28 | - { object: error.html, src: files/error.html } 29 | 30 | - name: Configure bucket as static website 31 | command: "aws s3 website s3://{{ bucket_name }}/ --index-document index.html --error-document error.html" 32 | -------------------------------------------------------------------------------- /iaas_7/templates/awscli_config.j2: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id = {{ AWS_ACCESS_KEY }} 3 | aws_secret_access_key = {{ AWS_SECRET_KEY }} 4 | 5 | output=json 6 | -------------------------------------------------------------------------------- /monitoring_deployments_9/Gemfile: -------------------------------------------------------------------------------- 1 | # A sample Gemfile 2 | source "https://rubygems.org" 3 | 4 | # gem "rails" 5 | gem 'rspec' 6 | gem 'cucumber' 7 | -------------------------------------------------------------------------------- /monitoring_deployments_9/README.md: -------------------------------------------------------------------------------- 1 | # Outcome 09 - Monitoring Deployments 2 | 3 | Outputs 4 | ------- 5 | 6 | 1. An exercise that covers the following operations: 7 | - Setting up server monitoring for a _production_ environment using Nagios 8 | - Analyzing the collected metrics for 5 days and write a comprehensive report with recommendations 9 | 2. An exercise that covers the following operations: 10 | - Setting up application monitoring for a NodeJS application using New Relic 11 | - Deploying the NodeJS application to a VM on AWS 12 | - Setting up monitoring for the VM using AWS Cloudwatch 13 | 14 | 15 | ### System Requirements 16 | * Python 17 | * Pip 18 | * Ansible > 2.0 19 | * AWS CLI 20 | * Boto 21 | * Ruby 22 | * Cucumber 23 | * Virtual box (to test locally) 24 | 25 | # Deploying to production 26 | 27 | ### Setup 28 | * Create a file called _secret_vars.yml_, copying the contents of _secret_vars.example.yml_. Replace the values in the file with yours. 29 | * Update the variables in _vars.yml_ file with yours particularly the _key_pair_name_. Change _port_ to the port number your node app will run on, and _node_app_repo_ to the link to your application repository on github. 30 | 31 | ### Create EC2 instances 32 | * Run `ansible-playbook playbook.ec2.yml` to create two EC2 instances - One for the server and the other for the host. 33 | * Copy the ip address of the instances and replace the ip addresses in _prod_inventory.ini_ file with that of your instances where _nagiosserver_ is your server and _nagioshost_ is your host. Replace `ansible_ssh_private_key_file` with the path to your aws private key file. Update the ip addresses in your _secret_vars.yml_ also. 34 | 35 | * * [optional] You can run `cucumber features/cloudformation_setup.feature` to verify that the cloudformation stack successfully created your EC2 instances. 36 | 37 | ### Provision the instances 38 | * Run `ansible-playbook playbook.provision.yml -i prod_inventory.ini` to provision both instances. 39 | 40 | ### Set up Node in host server 41 | * Run `ansible-playbook playbook.mean.yml -i prod_inventory.ini` to install Mongo and Node on the host server. 42 | 43 | ### Deploy Node app to host server 44 | * Run `ansible-playbook playbook.node_app.yml -i prod_inventory.ini` to deploy your node app to your host server. 45 | * Visit your application on `host_ip_address:5000` 46 | 47 | ### Configure host server to be monitored by Nagios 48 | * Run `ansible-playbook playbook.nagios_host.yml -i prod_inventory.ini` to configure your host server to be monitored by Nagios. 49 | 50 | ### Configure Nagios server to monitor host server 51 | * Run `ansible-playbook playbook.nagios_server.yml -i prod_inventory.ini` to configure nagios to monitor the host server. 52 | * Visit Nagios server on `server_ip_address/nagios`, authenticating with `nagiosadmin` as username and your `nagiosadmin_user_password` in _secret_vars.yml_ file. 53 | 54 | ### Set up new relic on host 55 | * Create an account on New Relic 56 | * Replace `newrelic_license_key` value with the your license key in `secret_vars.yml` file 57 | * Run `ansible-playbook playbook.newrelic.yml -i prod_inventory.ini` to install new relic agent on the host. 58 | * Add `require('newrelic');` as the first line of your app's main module. 59 | * Within a few minutes after installation, you will begin to see data for your app in your New Relic account: From the New Relic menu bar, select APM > Applications > (selected app) > Overview. 60 | 61 | # Testing locally 62 | 63 | ### Setup 64 | * Run `vagrant up`. This will bring up two virtual machines - named nagiosserver and nagioshost and also provision them. 65 | * Create a file called _secret_vars.yml_, copying the contents of _secret_vars.example.yml_. Replace the values in the file with yours taking note of the _server_ip_ and _host_ip_ which should be that of the virtual machines created. 66 | * Update the variables in _vars.yml_ file with yours. Change _port_ to the port number your node app will run on and _node_app_repo_ to the link to your application repository on github. Note that _host_user_ should be changed to `vagrant` and not `ubuntu` for testing locally. 67 | 68 | ### Set up nagios host 69 | * Run `cucumber nagios_host_install.feature` to set up the host for nagios monitoring. 70 | 71 | ### Set up nagios server 72 | * Run `cucumber nagios_server_install.feature` to set up the server to monitor the host. 73 | * Visit Nagios server on `server_ip_address/nagios`, authenticating with `nagiosadmin` as username and your `nagiosadmin_user_password` in _secret_vars.yml_ file. 74 | 75 | ### Set up new relic on host 76 | * Run `cucumber features/new_relic_setup.feature` to install new relic agent on the host. 77 | -------------------------------------------------------------------------------- /monitoring_deployments_9/Vagrantfile: -------------------------------------------------------------------------------- 1 | Vagrant.configure(2) do |config| 2 | 3 | config.vm.define "nagiosserver" do |nagiosserver| 4 | 5 | nagiosserver.vm.box = "ubuntu/trusty64" 6 | nagiosserver.vm.hostname = "nagiosserver" 7 | nagiosserver.vm.network "private_network", ip: "192.168.33.10" 8 | 9 | nagiosserver.vm.provider "virtualbox" do |vb| 10 | # Customize the amount of memory on the VM: 11 | vb.memory = "1024" 12 | end 13 | 14 | nagiosserver.vm.provision "ansible" do |ansible| 15 | ansible.playbook = "playbook.provision.yml" 16 | ansible.inventory_path = "local_inventory.ini" 17 | ansible.sudo = true 18 | ansible.verbose = "v" 19 | end 20 | 21 | end 22 | 23 | config.vm.define "nagioshost" do |nagioshost| 24 | 25 | nagioshost.vm.box = "ubuntu/trusty64" 26 | nagioshost.vm.hostname = "nagioshost" 27 | nagioshost.vm.network "private_network", ip: "192.168.33.11" 28 | 29 | nagioshost.vm.provider "virtualbox" do |vb| 30 | # Customize the amount of memory on the VM: 31 | vb.memory = "1024" 32 | end 33 | 34 | nagioshost.vm.provision "ansible" do |ansible| 35 | ansible.playbook = "playbook.provision.yml" 36 | ansible.inventory_path = "local_inventory.ini" 37 | ansible.sudo = true 38 | ansible.verbose = "v" 39 | end 40 | 41 | end 42 | 43 | end 44 | -------------------------------------------------------------------------------- /monitoring_deployments_9/features/cloudformation_setup.feature: -------------------------------------------------------------------------------- 1 | Feature: Set up a cloudformation stack to provision EC2 instances 2 | 3 | Scenario: Launch cloudformation stack 4 | When I launch cloudformation stack 5 | Then it should be successful 6 | And two instances should be created 7 | -------------------------------------------------------------------------------- /monitoring_deployments_9/features/nagios_host_install.feature: -------------------------------------------------------------------------------- 1 | Feature: Provision and install Nagios host 2 | 3 | Background: 4 | Given I have a running server nagioshost 5 | And I provision it nagioshost 6 | 7 | Scenario: Install Nagios Plugins and NRPE 8 | When I install Nagios Plugins and NRPE 9 | Then It should be successful 10 | 11 | Scenario: Configure allowed hosts and allowed NRPE commands 12 | When I configure allowed hosts and allowed NRPE commands 13 | Then it should be successful 14 | And nagios-nrpe-server should be running on nagioshost 15 | -------------------------------------------------------------------------------- /monitoring_deployments_9/features/nagios_server_install.feature: -------------------------------------------------------------------------------- 1 | Feature: Provision and install Nagios server 2 | 3 | Background: 4 | Given I have a running server nagiosserver 5 | And I provision it nagiosserver 6 | 7 | Scenario: Install Apache 8 | When I install Apache 9 | Then it should be successful 10 | And apache2 should be running on nagiosserver 11 | And it should be accepting connections on port 80 12 | 13 | Scenario: Install MySQL 14 | When I install MySQL 15 | Then it should be successful 16 | And mysql should be running on nagiosserver 17 | 18 | Scenario: Install PHP 19 | When I install PHP 20 | Then it should be successful 21 | 22 | Scenario: Create Nagios user and group 23 | When I create user and group 24 | Then it should be successful 25 | And user should exist 26 | 27 | Scenario: Install build dependencies 28 | When I install build dependencies 29 | Then it should be successful 30 | 31 | Scenario: Install Nagios Core 32 | When I install Nagios core 33 | Then it should be successful 34 | 35 | Scenario: Add wwwdata user to nagios group 36 | When I add wwwdata user to nagios group 37 | Then it should be successful 38 | 39 | Scenario: Install Nagios plugins 40 | When I install Nagios plugins 41 | Then it should be successful 42 | 43 | Scenario: Install NRPE 44 | When I install NRPE 45 | Then it should be successful 46 | And xinetd startup script should be updated 47 | And xinetd should be running on nagiosserver 48 | 49 | Scenario: Edit Nagios configuration 50 | When I edit Nagios configuration 51 | Then it should be successful 52 | And a server configuration directory should exist 53 | 54 | Scenario: Configure Nagios Contacts 55 | When I configure nagios contacts 56 | Then it should be successful 57 | 58 | Scenario: Configure check_nrpe Command 59 | When I configure check_nrpe command 60 | Then it should be successful 61 | 62 | Scenario: Configure apache 63 | When I configure apache 64 | Then it should be successful 65 | And nagios should be running on nagiosserver 66 | And apache2 should be running on nagiosserver 67 | 68 | Scenario: Add Host to Nagios Configuration 69 | When I add host to nagios configuration 70 | Then it should be successful 71 | And nagios should be running on nagiosserver 72 | -------------------------------------------------------------------------------- /monitoring_deployments_9/features/new_relic_setup.feature: -------------------------------------------------------------------------------- 1 | Feature: Set up New Relic on host 2 | 3 | Background: 4 | Given I have a running server nagioshost 5 | And I provision it nagioshost 6 | 7 | Scenario: Install newrelic 8 | When I install newrelic 9 | Then It should be successful 10 | 11 | Scenario: Edit newrelic file 12 | When I edit newrelic file 13 | Then it should be successful 14 | -------------------------------------------------------------------------------- /monitoring_deployments_9/features/step_definitions/cloudformation_steps.rb: -------------------------------------------------------------------------------- 1 | require 'open3' 2 | require 'json' 3 | 4 | When(/^I launch cloudformation stack$/) do 5 | cmd = "ansible-playbook playbook.ec2.yml" 6 | 7 | _, _, @status = Open3.capture3 "#{cmd}" 8 | end 9 | 10 | Then(/^two instances should be created$/) do 11 | cmd = "aws cloudformation list-stack-resources --stack-name EC2 --region us-west-2" 12 | output, _, _ = Open3.capture3 "#{cmd}" 13 | output = JSON.parse(output) 14 | 15 | summary = output["StackResourceSummaries"] 16 | expect(summary.size).to eq(2) 17 | expect(summary.all? { |r| r["ResourceType"] == "AWS::EC2::Instance" }).to be true 18 | end 19 | -------------------------------------------------------------------------------- /monitoring_deployments_9/features/step_definitions/nagios_host_install_steps.rb: -------------------------------------------------------------------------------- 1 | require 'open3' 2 | 3 | When(/^I install Nagios Plugins and NRPE$/) do 4 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_host.yml --tags 'nagios_plugins_install'" 5 | 6 | _, _, @status = Open3.capture3 "#{cmd}" 7 | end 8 | 9 | Then(/^It should be successful$/) do 10 | expect(@status.success?).to eq(true) 11 | end 12 | 13 | When(/^I configure allowed hosts and allowed NRPE commands$/) do 14 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_host.yml --tags 'allowed_host_configure'" 15 | 16 | _, _, @status = Open3.capture3 "#{cmd}" 17 | end 18 | -------------------------------------------------------------------------------- /monitoring_deployments_9/features/step_definitions/nagios_server_install_steps.rb: -------------------------------------------------------------------------------- 1 | require 'open3' 2 | 3 | Given(/^I have a running server ([^"]*)$/) do |server| 4 | _, _, status = Open3.capture3 "unset RUBYLIB; vagrant reload #{server}" 5 | 6 | expect(status.success?).to eq(true) 7 | end 8 | 9 | Given(/^I provision it ([^"]*)$/) do |server| 10 | _, _, status = Open3.capture3 "unset RUBYLIB; vagrant provision #{server}" 11 | 12 | expect(status.success?).to eq(true) 13 | end 14 | 15 | When(/^I install Apache$/) do 16 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'apache_setup'" 17 | 18 | _, _, @status = Open3.capture3 "#{cmd}" 19 | end 20 | 21 | Then(/^it should be successful$/) do 22 | expect(@status.success?).to eq(true) 23 | end 24 | 25 | And(/^([^"]*) should be running on ([^"]*)$/) do |pkg, server| 26 | case pkg 27 | when 'apache2', 'mysql', 'xinetd', 'nagios', 'nagios-nrpe-server' 28 | output, _, status = Open3.capture3 "unset RUBYLIB; vagrant ssh #{server} -c 'sudo service #{pkg} status'" 29 | expect(status.success?).to eq(true) 30 | 31 | if ['apache2', 'nagios'].include? pkg 32 | expect(output.chomp).to match(Regexp.new("#{pkg}\s(\\(\\w+\s\\d+\\)\s)?is\srunning(\.+)?")) 33 | elsif pkg == 'nagios-nrpe-server' 34 | expect(output).to match("nagios-nrpe is running") 35 | else 36 | expect(output).to match("#{pkg} start/running") 37 | end 38 | 39 | else 40 | raise 'Not Implemented' 41 | end 42 | end 43 | 44 | And(/^it should be accepting connections on port (\d+)$/) do |port| 45 | _, _, status = Open3.capture3 "unset RUBYLIB; vagrant ssh nagiosserver -c 'curl -f http://localhost:#{port}'" 46 | 47 | expect(status.success?).to eq(true) 48 | end 49 | 50 | When(/^I install MySQL$/) do 51 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'mysql_setup'" 52 | 53 | _, _, @status = Open3.capture3 "#{cmd}" 54 | end 55 | 56 | When(/^I install PHP$/) do 57 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'php_setup'" 58 | 59 | _, _, @status = Open3.capture3 "#{cmd}" 60 | end 61 | 62 | When(/^I create user and group$/) do 63 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'nagios_user_setup'" 64 | 65 | _, _, @status = Open3.capture3 "#{cmd}" 66 | end 67 | 68 | And(/^user should exist$/) do 69 | _, _, status = Open3.capture3 "unset RUBYLIB; vagrant ssh nagiosserver -c 'getent passwd nagios'" 70 | 71 | expect(status.success?).to eq(true) 72 | end 73 | 74 | When(/^I install build dependencies$/) do 75 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'build_dependencies'" 76 | 77 | _, _, @status = Open3.capture3 "#{cmd}" 78 | end 79 | 80 | When(/^I install Nagios core$/) do 81 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'nagios_core_setup'" 82 | 83 | _, _, @status = Open3.capture3 "#{cmd}" 84 | end 85 | 86 | When(/^I add wwwdata user to nagios group$/) do 87 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'add_wwwdata_nagios'" 88 | 89 | _, _, @status = Open3.capture3 "#{cmd}" 90 | end 91 | 92 | When(/^I install Nagios plugins$/) do 93 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'nagios_plugins_setup'" 94 | 95 | _, _, @status = Open3.capture3 "#{cmd}" 96 | end 97 | 98 | When(/^I install NRPE$/) do 99 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'nrpe_setup'" 100 | 101 | _, _, @status = Open3.capture3 "#{cmd}" 102 | end 103 | 104 | Then(/^xinetd startup script should be updated$/) do 105 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'xinetd_script_setup'" 106 | 107 | _, _, @status = Open3.capture3 "#{cmd}" 108 | end 109 | 110 | When(/^I edit Nagios configuration$/) do 111 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'nagios_configure'" 112 | 113 | _, _, @status = Open3.capture3 "#{cmd}" 114 | end 115 | 116 | And(/^a server configuration directory should exist$/) do 117 | _, _, status = Open3.capture3 "unset RUBYLIB; vagrant ssh nagiosserver -c 'test -d /usr/local/nagios/etc/servers'" 118 | 119 | expect(status.success?).to eq(true) 120 | end 121 | 122 | When(/^I configure nagios contacts$/) do 123 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'nagios_contacts_configure'" 124 | 125 | _, _, @status = Open3.capture3 "#{cmd}" 126 | end 127 | 128 | When(/^I configure check_nrpe command$/) do 129 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'check_nrpe_configure'" 130 | 131 | _, _, @status = Open3.capture3 "#{cmd}" 132 | end 133 | 134 | When(/^I configure apache$/) do 135 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'apache_configure'" 136 | 137 | _, _, @status = Open3.capture3 "#{cmd}" 138 | end 139 | 140 | When(/^I add host to nagios configuration$/) do 141 | cmd = "ansible-playbook -i local_inventory.ini playbook.nagios_server.yml --tags 'add_host_to_nagios'" 142 | 143 | _, _, @status = Open3.capture3 "#{cmd}" 144 | end 145 | -------------------------------------------------------------------------------- /monitoring_deployments_9/features/step_definitions/new_relic_steps.rb: -------------------------------------------------------------------------------- 1 | require 'open3' 2 | 3 | When(/^I install newrelic$/) do 4 | cmd = "ansible-playbook -i local_inventory.ini playbook.newrelic.yml --tags 'newrelic_install'" 5 | 6 | output, _, @status = Open3.capture3 "#{cmd}" 7 | end 8 | 9 | When(/^I edit newrelic file$/) do 10 | cmd = "ansible-playbook -i local_inventory.ini playbook.newrelic.yml --tags 'newrelic_file'" 11 | 12 | output, _, @status = Open3.capture3 "#{cmd}" 13 | end 14 | -------------------------------------------------------------------------------- /monitoring_deployments_9/local_inventory.ini: -------------------------------------------------------------------------------- 1 | [nagiosserver] 2 | 192.168.33.10 ansible_ssh_private_key_file=.vagrant/machines/nagiosserver/virtualbox/private_key 3 | 4 | [nagioshost] 5 | 192.168.33.11 ansible_ssh_private_key_file=.vagrant/machines/nagioshost/virtualbox/private_key 6 | -------------------------------------------------------------------------------- /monitoring_deployments_9/playbook.cleanup_cloudformation.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Master playbook to remove a cloudformation stack 3 | - hosts: localhost 4 | connection: local 5 | vars_files: 6 | - secret_vars.yml 7 | - vars.yml 8 | roles: 9 | - cleanup_cloudformation 10 | -------------------------------------------------------------------------------- /monitoring_deployments_9/playbook.ec2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | vars_files: 5 | - vars.yml 6 | - secret_vars.yml 7 | roles: 8 | - ec2 9 | -------------------------------------------------------------------------------- /monitoring_deployments_9/playbook.mean.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nagioshost 3 | remote_user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | roles: 9 | - { role: setup, tags: setup } 10 | - node 11 | - mongo 12 | -------------------------------------------------------------------------------- /monitoring_deployments_9/playbook.nagios_host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nagioshost 3 | user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | - secret_vars.yml 9 | roles: 10 | - nagios_host 11 | -------------------------------------------------------------------------------- /monitoring_deployments_9/playbook.nagios_server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nagiosserver 3 | user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars: 7 | nagcmd_group: nagcmd 8 | nagios_group: nagios 9 | nagios_user: nagios 10 | nagiosadmin_user: nagiosadmin 11 | vars_files: 12 | - vars.yml 13 | - secret_vars.yml 14 | roles: 15 | - lamp_stack 16 | - nagios_server 17 | - add_host_to_nagios_server 18 | -------------------------------------------------------------------------------- /monitoring_deployments_9/playbook.newrelic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Playbook to set up new relic for monitoring a nodejs application on AWS 3 | # This assumes node js and npm are already installed on the host 4 | # Add require('newrelic'); as the first line of your app's main module after running this playbook. 5 | - hosts: nagioshost 6 | remote_user: "{{ host_user }}" 7 | become: yes 8 | become_method: sudo 9 | vars_files: 10 | - vars.yml 11 | - secret_vars.yml 12 | roles: 13 | - new_relic 14 | -------------------------------------------------------------------------------- /monitoring_deployments_9/playbook.node_app.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nagioshost 3 | remote_user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | - secret_vars.yml 9 | roles: 10 | - deploy_node_app 11 | -------------------------------------------------------------------------------- /monitoring_deployments_9/playbook.provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | roles: 9 | - provision 10 | -------------------------------------------------------------------------------- /monitoring_deployments_9/prod_inventory.ini: -------------------------------------------------------------------------------- 1 | [nagiosserver] 2 | 52.36.247.182 ansible_ssh_private_key_file=cosy-devops-uswest2.pem 3 | 4 | [nagioshost] 5 | 52.42.87.11 ansible_ssh_private_key_file=cosy-devops-uswest2.pem 6 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/add_host_to_nagios_server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: reload nagios 2 | service: name=nagios state=reloaded 3 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/add_host_to_nagios_server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add host to nagios configuration 3 | template: src=nagios_host.cfg.j2 dest="/usr/local/nagios/etc/servers/{{ nagios_host_name }}.cfg" 4 | notify: 5 | - reload nagios 6 | tags: 7 | - add_host_to_nagios 8 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/add_host_to_nagios_server/templates/nagios_host.cfg.j2: -------------------------------------------------------------------------------- 1 | define host { 2 | use linux-server 3 | host_name {{ nagios_host_name }} 4 | alias {{ nagios_host_description }} 5 | address {{ host_ip }} 6 | max_check_attempts 5 7 | check_period 24x7 8 | notification_interval 30 9 | notification_period 24x7 10 | } 11 | 12 | define service { 13 | use generic-service 14 | host_name {{ nagios_host_name }} 15 | service_description PING 16 | check_command check_ping!100.0,20%!500.0,60% 17 | } 18 | 19 | define service { 20 | use generic-service 21 | host_name {{ nagios_host_name }} 22 | service_description SSH 23 | check_command check_ssh 24 | notifications_enabled 0 25 | } 26 | 27 | define service{ 28 | use local-service 29 | host_name {{ nagios_host_name }} 30 | service_description HTTP 31 | check_command check_http! -p {{ port }} 32 | notifications_enabled 0 33 | } 34 | 35 | define service{ 36 | use local-service 37 | host_name {{ nagios_host_name }} 38 | service_description Current Users 39 | check_command check_local_users!20!50 40 | } 41 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/cleanup_cloudformation/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Remove cloudformation stack 2 | cloudformation: 3 | aws_access_key: "{{ aws_access_key }}" 4 | aws_secret_key: "{{ aws_secret_key }}" 5 | stack_name: "{{ cloudformation_stack_name }}" 6 | region: "{{ ec2_region }}" 7 | state: absent 8 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/deploy_node_app/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create mean app directory 3 | file: path="/home/{{ host_user }}/app" state=directory 4 | 5 | - name: Clone node app repository 6 | git: 7 | repo: "{{ node_app_repo }}" 8 | dest: "/home/{{ host_user }}/app" 9 | 10 | - name: Install node js packages 11 | npm: path="/home/{{ host_user }}/app" 12 | 13 | - name: Install Forever 14 | npm: name=forever global=yes 15 | 16 | - name: Launch application continuously 17 | command: forever start server.js 18 | args: 19 | chdir: "/home/{{ host_user }}/app" 20 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/ec2/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Convert cloudformation template from j2 to json 3 | local_action: template src="{{ cloudformation_template }}.j2" dest="{{ cloudformation_template }}" 4 | 5 | - name: Launch nagios server and host EC2 instances 6 | cloudformation: 7 | stack_name: "{{ cloudformation_stack_name }}" 8 | state: present 9 | region: "{{ ec2_region }}" 10 | disable_rollback: true 11 | aws_access_key: "{{ aws_access_key }}" 12 | aws_secret_key: "{{ aws_secret_key }}" 13 | template: "{{ cloudformation_template }}" 14 | tags: 15 | Stack: "EC2-cloudformation" 16 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/ec2/templates/cloudformation.json: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "NagiosServer": { 4 | "Type" : "AWS::EC2::Instance", 5 | "Properties" : { 6 | "AvailabilityZone" : "us-west-2a", 7 | "ImageId" : "ami-d732f0b7", 8 | "InstanceType" : "t2.micro", 9 | "KeyName" : "cosy-devops-uswest2" 10 | } 11 | }, 12 | "NagiosHost": { 13 | "Type" : "AWS::EC2::Instance", 14 | "Properties" : { 15 | "AvailabilityZone" : "us-west-2a", 16 | "ImageId" : "ami-d732f0b7", 17 | "InstanceType" : "t2.micro", 18 | "KeyName" : "cosy-devops-uswest2" 19 | } 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/ec2/templates/cloudformation.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "NagiosServer": { 4 | "Type" : "AWS::EC2::Instance", 5 | "Properties" : { 6 | "AvailabilityZone" : "{{ availability_zone }}", 7 | "ImageId" : "{{ ubuntu_ec2_image }}", 8 | "InstanceType" : "{{ instance_type }}", 9 | "KeyName" : "{{ key_pair_name }}" 10 | } 11 | }, 12 | "NagiosHost": { 13 | "Type" : "AWS::EC2::Instance", 14 | "Properties" : { 15 | "AvailabilityZone" : "{{ availability_zone }}", 16 | "ImageId" : "{{ ubuntu_ec2_image }}", 17 | "InstanceType" : "{{ instance_type }}", 18 | "KeyName" : "{{ key_pair_name }}" 19 | } 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/lamp_stack/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if apache is installed 3 | command: bash -c "dpkg --get-selections | grep apache2" 4 | register: apache_installed 5 | ignore_errors: True 6 | tags: 7 | - apache_setup 8 | 9 | - name: Install apache 10 | apt: name=apache2 state=present 11 | when: apache_installed|failed 12 | tags: 13 | - apache_setup 14 | 15 | - name: Check if mysql is installed 16 | command: bash -c "dpkg --get-selections | grep {{ item }}" 17 | register: mysql_installed 18 | ignore_errors: True 19 | with_items: 20 | - mysql-server 21 | - php5-mysql 22 | tags: 23 | - mysql_setup 24 | 25 | - name: Install mysql 26 | apt: name={{ item }} state=present 27 | when: mysql_installed|failed 28 | with_items: 29 | - mysql-server 30 | - php5-mysql 31 | tags: 32 | - mysql_setup 33 | 34 | - name: Check if php is installed 35 | command: bash -c "dpkg --get-selections | grep {{ item }}" 36 | register: php_installed 37 | ignore_errors: True 38 | with_items: 39 | - php5 40 | - libapache2-mod-php5 41 | - php5-mcrypt 42 | tags: 43 | - php_setup 44 | 45 | - name: Install php 46 | apt: name={{ item }} state=present 47 | when: php_installed|failed 48 | with_items: 49 | - php5 50 | - libapache2-mod-php5 51 | - php5-mcrypt 52 | tags: 53 | - php_setup 54 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/mongo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if MongoDB is installed 3 | command: bash -c "dpkg --get-selections | grep mongodb" 4 | register: mongodb_installed 5 | ignore_errors: True 6 | tags: 7 | - mongodb 8 | 9 | - name: Install MongoDB and dependencies 10 | apt: name={{item}} state=present update_cache=yes force=yes 11 | when: mongodb_installed|failed 12 | with_items: 13 | - mongodb-org 14 | - build-essential 15 | - openssl 16 | - libssl-dev 17 | - pkg-config 18 | tags: 19 | - mongodb 20 | 21 | - name: MongoDB | Insure deamon is running correctly 22 | service: name=mongod state=started 23 | tags: 24 | - mongodb 25 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/nagios_host/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nrpe 3 | service: name=nagios-nrpe-server state=restarted 4 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/nagios_host/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if Nagios plugins and NRPE are installed 3 | command: bash -c "dpkg --get-selections | grep {{ item }}" 4 | register: nagios_plugins_installed 5 | ignore_errors: True 6 | with_items: 7 | - nagios-plugins 8 | - nagios-nrpe-server 9 | tags: 10 | - nagios_plugins_install 11 | 12 | - name: Install Nagios plugins and NRPE 13 | apt: name="{{ item }}" state=present 14 | when: nagios_plugins_installed|failed 15 | with_items: 16 | - nagios-plugins 17 | - nagios-nrpe-server 18 | tags: 19 | - nagios_plugins_install 20 | 21 | - name: Configure allowed hosts and allowed NRPE commands 22 | template: src=nrpe.cfg.j2 dest=/etc/nagios/nrpe.cfg 23 | notify: 24 | - restart nrpe 25 | tags: 26 | - allowed_host_configure 27 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/nagios_host/templates/nrpe.cfg.j2: -------------------------------------------------------------------------------- 1 | ############################################################################# 2 | # Sample NRPE Config File 3 | # Written by: Ethan Galstad (nagios@nagios.org) 4 | # 5 | # Last Modified: 11-23-2007 6 | # 7 | # NOTES: 8 | # This is a sample configuration file for the NRPE daemon. It needs to be 9 | # located on the remote host that is running the NRPE daemon, not the host 10 | # from which the check_nrpe client is being executed. 11 | ############################################################################# 12 | 13 | 14 | # LOG FACILITY 15 | # The syslog facility that should be used for logging purposes. 16 | 17 | log_facility=daemon 18 | 19 | 20 | 21 | # PID FILE 22 | # The name of the file in which the NRPE daemon should write it's process ID 23 | # number. The file is only written if the NRPE daemon is started by the root 24 | # user and is running in standalone mode. 25 | 26 | pid_file=/var/run/nagios/nrpe.pid 27 | 28 | 29 | 30 | # PORT NUMBER 31 | # Port number we should wait for connections on. 32 | # NOTE: This must be a non-priviledged port (i.e. > 1024). 33 | # NOTE: This option is ignored if NRPE is running under either inetd or xinetd 34 | 35 | server_port=5666 36 | 37 | 38 | 39 | # SERVER ADDRESS 40 | # Address that nrpe should bind to in case there are more than one interface 41 | # and you do not want nrpe to bind on all interfaces. 42 | # NOTE: This option is ignored if NRPE is running under either inetd or xinetd 43 | 44 | server_address={{ host_ip }} 45 | 46 | 47 | 48 | # NRPE USER 49 | # This determines the effective user that the NRPE daemon should run as. 50 | # You can either supply a username or a UID. 51 | # 52 | # NOTE: This option is ignored if NRPE is running under either inetd or xinetd 53 | 54 | nrpe_user=nagios 55 | 56 | 57 | 58 | # NRPE GROUP 59 | # This determines the effective group that the NRPE daemon should run as. 60 | # You can either supply a group name or a GID. 61 | # 62 | # NOTE: This option is ignored if NRPE is running under either inetd or xinetd 63 | 64 | nrpe_group=nagios 65 | 66 | 67 | 68 | # ALLOWED HOST ADDRESSES 69 | # This is an optional comma-delimited list of IP address or hostnames 70 | # that are allowed to talk to the NRPE daemon. Network addresses with a bit mask 71 | # (i.e. 192.168.1.0/24) are also supported. Hostname wildcards are not currently 72 | # supported. 73 | # 74 | # Note: The daemon only does rudimentary checking of the client's IP 75 | # address. I would highly recommend adding entries in your /etc/hosts.allow 76 | # file to allow only the specified host to connect to the port 77 | # you are running this daemon on. 78 | # 79 | # NOTE: This option is ignored if NRPE is running under either inetd or xinetd 80 | 81 | allowed_hosts={{ server_ip }} 82 | 83 | 84 | 85 | # COMMAND ARGUMENT PROCESSING 86 | # This option determines whether or not the NRPE daemon will allow clients 87 | # to specify arguments to commands that are executed. This option only works 88 | # if the daemon was configured with the --enable-command-args configure script 89 | # option. 90 | # 91 | # *** ENABLING THIS OPTION IS A SECURITY RISK! *** 92 | # Read the SECURITY file for information on some of the security implications 93 | # of enabling this variable. 94 | # 95 | # Values: 0=do not allow arguments, 1=allow command arguments 96 | 97 | dont_blame_nrpe=0 98 | 99 | 100 | 101 | # BASH COMMAND SUBTITUTION 102 | # This option determines whether or not the NRPE daemon will allow clients 103 | # to specify arguments that contain bash command substitutions of the form 104 | # $(...). This option only works if the daemon was configured with both 105 | # the --enable-command-args and --enable-bash-command-substitution configure 106 | # script options. 107 | # 108 | # *** ENABLING THIS OPTION IS A HIGH SECURITY RISK! *** 109 | # Read the SECURITY file for information on some of the security implications 110 | # of enabling this variable. 111 | # 112 | # Values: 0=do not allow bash command substitutions, 113 | # 1=allow bash command substitutions 114 | 115 | allow_bash_command_substitution=0 116 | 117 | 118 | 119 | # COMMAND PREFIX 120 | # This option allows you to prefix all commands with a user-defined string. 121 | # A space is automatically added between the specified prefix string and the 122 | # command line from the command definition. 123 | # 124 | # *** THIS EXAMPLE MAY POSE A POTENTIAL SECURITY RISK, SO USE WITH CAUTION! *** 125 | # Usage scenario: 126 | # Execute restricted commmands using sudo. For this to work, you need to add 127 | # the nagios user to your /etc/sudoers. An example entry for alllowing 128 | # execution of the plugins from might be: 129 | # 130 | # nagios ALL=(ALL) NOPASSWD: /usr/lib/nagios/plugins/ 131 | # 132 | # This lets the nagios user run all commands in that directory (and only them) 133 | # without asking for a password. If you do this, make sure you don't give 134 | # random users write access to that directory or its contents! 135 | 136 | # command_prefix=/usr/bin/sudo 137 | 138 | 139 | 140 | # DEBUGGING OPTION 141 | # This option determines whether or not debugging messages are logged to the 142 | # syslog facility. 143 | # Values: 0=debugging off, 1=debugging on 144 | 145 | debug=0 146 | 147 | 148 | 149 | # COMMAND TIMEOUT 150 | # This specifies the maximum number of seconds that the NRPE daemon will 151 | # allow plugins to finish executing before killing them off. 152 | 153 | command_timeout=60 154 | 155 | 156 | 157 | # CONNECTION TIMEOUT 158 | # This specifies the maximum number of seconds that the NRPE daemon will 159 | # wait for a connection to be established before exiting. This is sometimes 160 | # seen where a network problem stops the SSL being established even though 161 | # all network sessions are connected. This causes the nrpe daemons to 162 | # accumulate, eating system resources. Do not set this too low. 163 | 164 | connection_timeout=300 165 | 166 | 167 | 168 | # WEEK RANDOM SEED OPTION 169 | # This directive allows you to use SSL even if your system does not have 170 | # a /dev/random or /dev/urandom (on purpose or because the necessary patches 171 | # were not applied). The random number generator will be seeded from a file 172 | # which is either a file pointed to by the environment valiable $RANDFILE 173 | # or $HOME/.rnd. If neither exists, the pseudo random number generator will 174 | # be initialized and a warning will be issued. 175 | # Values: 0=only seed from /dev/[u]random, 1=also seed from weak randomness 176 | 177 | #allow_weak_random_seed=1 178 | 179 | 180 | 181 | # INCLUDE CONFIG FILE 182 | # This directive allows you to include definitions from an external config file. 183 | 184 | #include= 185 | 186 | 187 | 188 | # INCLUDE CONFIG DIRECTORY 189 | # This directive allows you to include definitions from config files (with a 190 | # .cfg extension) in one or more directories (with recursion). 191 | 192 | #include_dir= 193 | #include_dir= 194 | 195 | 196 | 197 | # COMMAND DEFINITIONS 198 | # Command definitions that this daemon will run. Definitions 199 | # are in the following format: 200 | # 201 | # command[]= 202 | # 203 | # When the daemon receives a request to return the results of 204 | # it will execute the command specified by the argument. 205 | # 206 | # Unlike Nagios, the command line cannot contain macros - it must be 207 | # typed exactly as it should be executed. 208 | # 209 | # Note: Any plugins that are used in the command lines must reside 210 | # on the machine that this daemon is running on! The examples below 211 | # assume that you have plugins installed in a /usr/local/nagios/libexec 212 | # directory. Also note that you will have to modify the definitions below 213 | # to match the argument format the plugins expect. Remember, these are 214 | # examples only! 215 | 216 | 217 | # The following examples use hardcoded command arguments... 218 | 219 | command[check_users]=/usr/lib/nagios/plugins/check_users -w 5 -c 10 220 | command[check_load]=/usr/lib/nagios/plugins/check_load -w 15,10,5 -c 30,25,20 221 | command[check_hda1]=/usr/lib/nagios/plugins/check_disk -w 20% -c 10% -p {{ root_file_system }} 222 | command[check_zombie_procs]=/usr/lib/nagios/plugins/check_procs -w 5 -c 10 -s Z 223 | command[check_total_procs]=/usr/lib/nagios/plugins/check_procs -w 150 -c 200 224 | 225 | 226 | # The following examples allow user-supplied arguments and can 227 | # only be used if the NRPE daemon was compiled with support for 228 | # command arguments *AND* the dont_blame_nrpe directive in this 229 | # config file is set to '1'. This poses a potential security risk, so 230 | # make sure you read the SECURITY file before doing this. 231 | 232 | #command[check_users]=/usr/lib/nagios/plugins/check_users -w $ARG1$ -c $ARG2$ 233 | #command[check_load]=/usr/lib/nagios/plugins/check_load -w $ARG1$ -c $ARG2$ 234 | #command[check_disk]=/usr/lib/nagios/plugins/check_disk -w $ARG1$ -c $ARG2$ -p $ARG3$ 235 | #command[check_procs]=/usr/lib/nagios/plugins/check_procs -w $ARG1$ -c $ARG2$ -s $ARG3$ 236 | 237 | # 238 | # local configuration: 239 | # if you'd prefer, you can instead place directives here 240 | include=/etc/nagios/nrpe_local.cfg 241 | 242 | # 243 | # you can place your config snipplets into nrpe.d/ 244 | # only snipplets ending in .cfg will get included 245 | include_dir=/etc/nagios/nrpe.d/ 246 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/nagios_server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart xinetd 3 | service: name=xinetd state=restarted 4 | 5 | - name: start nagios 6 | service: name=nagios state=started 7 | 8 | - name: restart apache 9 | service: name=apache2 state=restarted 10 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/nagios_server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create nagcmd group 3 | group: name="{{ item }}" state=present 4 | with_items: 5 | - "{{ nagcmd_group }}" 6 | - "{{ nagios_group }}" 7 | tags: 8 | - nagios_user_setup 9 | 10 | - name: Create nagios user and add to group 11 | user: name="{{ nagios_user }}" group="{{ nagcmd_group }}" 12 | tags: 13 | - nagios_user_setup 14 | 15 | - name: Check if build dependencies are installed 16 | command: bash -c "dpkg --get-selections | grep {{ item }}" 17 | register: build_installed 18 | ignore_errors: True 19 | with_items: 20 | - build-essential 21 | - libgd2-xpm-dev 22 | - openssl 23 | - libssl-dev 24 | - xinetd 25 | - apache2-utils 26 | - unzip 27 | tags: 28 | - build_dependencies 29 | 30 | - name: Install build dependencies 31 | apt: name="{{ item }}" state=present 32 | when: build_installed|failed 33 | with_items: 34 | - build-essential 35 | - libgd2-xpm-dev 36 | - openssl 37 | - libssl-dev 38 | - xinetd 39 | - apache2-utils 40 | - unzip 41 | tags: 42 | - build_dependencies 43 | 44 | - name: Download and extract Nagios Core 45 | unarchive: 46 | src: https://assets.nagios.com/downloads/nagioscore/releases/nagios-4.1.1.tar.gz 47 | dest: "/home/{{ host_user }}" 48 | copy: no 49 | tags: 50 | - nagios_core_setup 51 | 52 | - name: Build nagios 53 | command: ./configure --with-nagios-group="{{ nagios_group }}" --with-command-group="{{ nagcmd_group }}" 54 | args: 55 | chdir: "/home/{{ host_user }}/nagios-4.1.1" 56 | tags: 57 | - nagios_core_setup 58 | 59 | - name: Install nagios, init scripts and sample configuration files 60 | command: "{{ item }}" 61 | args: 62 | chdir: "/home/{{ host_user }}/nagios-4.1.1" 63 | with_items: 64 | - make clean 65 | - make all 66 | - make install 67 | - make install-commandmode 68 | - make install-init 69 | - make install-config 70 | - /usr/bin/install -c -m 644 sample-config/httpd.conf /etc/apache2/sites-available/nagios.conf 71 | tags: 72 | - nagios_core_setup 73 | 74 | - name: Add www-data to nagios group 75 | user: name=www-data group="{{ nagcmd_group }}" 76 | tags: 77 | - add_wwwdata_nagios 78 | 79 | - name: Download and extract Nagios plugins 80 | unarchive: 81 | src: http://nagios-plugins.org/download/nagios-plugins-2.1.1.tar.gz 82 | dest: "/home/{{ host_user }}" 83 | copy: no 84 | tags: 85 | - nagios_plugins_setup 86 | 87 | - name: Configure nagios plugins 88 | command: ./configure --with-nagios-user="{{ nagios_user }}" --with-nagios-group="{{ nagios_group }}" --with-openssl 89 | args: 90 | chdir: "/home/{{ host_user }}/nagios-plugins-2.1.1" 91 | tags: 92 | - nagios_plugins_setup 93 | 94 | - name: Compile and install nagios plugins 95 | command: "{{ item }}" 96 | args: 97 | chdir: "/home/{{ host_user }}/nagios-plugins-2.1.1" 98 | with_items: 99 | - make 100 | - make install 101 | tags: 102 | - nagios_plugins_setup 103 | 104 | - name: Download and extract NRPE 105 | unarchive: 106 | src: http://downloads.sourceforge.net/project/nagios/nrpe-2.x/nrpe-2.15/nrpe-2.15.tar.gz 107 | dest: "/home/{{ host_user }}" 108 | copy: no 109 | tags: 110 | - nrpe_setup 111 | 112 | - name: Configure NRPE 113 | command: ./configure --enable-command-args --with-nagios-user="{{ nagios_user }}" --with-nagios-group="{{ nagios_group }}" --with-ssl=/usr/bin/openssl --with-ssl-lib=/usr/lib/x86_64-linux-gnu 114 | args: 115 | chdir: "/home/{{ host_user }}/nrpe-2.15" 116 | tags: 117 | - nrpe_setup 118 | 119 | - name: Build and install NRPE and its xinetd startup script 120 | command: "{{ item }}" 121 | args: 122 | chdir: "/home/{{ host_user }}/nrpe-2.15" 123 | with_items: 124 | - make clean 125 | - make all 126 | - make install 127 | - make install-xinetd 128 | - make install-daemon-config 129 | tags: 130 | - nrpe_setup 131 | 132 | - name: Modify xinetd startup script 133 | template: src=nrpe.j2 dest=/etc/xinetd.d/nrpe 134 | notify: 135 | - restart xinetd 136 | tags: 137 | - xinetd_script_setup 138 | 139 | - name: Edit Nagios Configuration 140 | lineinfile: 141 | dest: /usr/local/nagios/etc/nagios.cfg 142 | state: present 143 | regexp: "^#cfg_dir=/usr/local/nagios/etc/servers$" 144 | line: "cfg_dir=/usr/local/nagios/etc/servers" 145 | backrefs: yes 146 | tags: 147 | - nagios_configure 148 | 149 | - name: Create directory to store configuration file for each server to be monitored 150 | file: path=/usr/local/nagios/etc/servers state=directory 151 | tags: 152 | - nagios_configure 153 | 154 | - name: Configure nagios contacts 155 | template: src=contacts.cfg.j2 dest=/usr/local/nagios/etc/objects/contacts.cfg 156 | tags: 157 | - nagios_contacts_configure 158 | 159 | - name: Configure check_nrpe Command 160 | blockinfile: 161 | dest: /usr/local/nagios/etc/objects/commands.cfg 162 | state: present 163 | insertafter: EOF 164 | block: | 165 | define command{ 166 | command_name check_nrpe 167 | command_line $USER1$/check_nrpe -H $HOSTADDRESS$ -c $ARG1$ 168 | } 169 | tags: 170 | - check_nrpe_configure 171 | 172 | - name: Enable the Apache rewrite and cgi modules 173 | command: "{{ item }}" 174 | with_items: 175 | - a2enmod rewrite 176 | - a2enmod cgi 177 | tags: 178 | - apache_configure 179 | 180 | - name: Create admin user that can access the Nagios web interface 181 | htpasswd: 182 | path: /usr/local/nagios/etc/htpasswd.users 183 | name: "{{ nagiosadmin_user }}" 184 | password: "{{ nagiosadmin_user_password }}" 185 | tags: 186 | - apache_configure 187 | 188 | - name: Symlink nagios.conf to sites-enabled directory 189 | file: 190 | src: /etc/apache2/sites-available/nagios.conf 191 | dest: /etc/apache2/sites-enabled/nagios.conf 192 | state: link 193 | notify: 194 | - start nagios 195 | - restart apache 196 | tags: 197 | - apache_configure 198 | 199 | - name: Enable nagios to start on server boot 200 | file: 201 | src: /etc/init.d/nagios 202 | dest: /etc/rcS.d/S99nagios 203 | state: link 204 | tags: 205 | - apache_configure 206 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/nagios_server/templates/contacts.cfg.j2: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS 3 | # 4 | # 5 | # NOTES: This config file provides you with some example contact and contact 6 | # group definitions that you can reference in host and service 7 | # definitions. 8 | # 9 | # You don't need to keep these definitions in a separate file from your 10 | # other object definitions. This has been done just to make things 11 | # easier to understand. 12 | # 13 | ############################################################################### 14 | 15 | 16 | 17 | ############################################################################### 18 | ############################################################################### 19 | # 20 | # CONTACTS 21 | # 22 | ############################################################################### 23 | ############################################################################### 24 | 25 | # Just one contact defined by default - the Nagios admin (that's you) 26 | # This contact definition inherits a lot of default values from the 'generic-contact' 27 | # template which is defined elsewhere. 28 | 29 | define contact{ 30 | contact_name nagiosadmin ; Short name of user 31 | use generic-contact ; Inherit default values from generic-contact template (defined above) 32 | alias Nagios Admin ; Full name of user 33 | 34 | email {{ nagios_email }} ; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ****** 35 | } 36 | 37 | 38 | 39 | ############################################################################### 40 | ############################################################################### 41 | # 42 | # CONTACT GROUPS 43 | # 44 | ############################################################################### 45 | ############################################################################### 46 | 47 | # We only have one contact in this simple configuration file, so there is 48 | # no need to create more than one contact group. 49 | 50 | define contactgroup{ 51 | contactgroup_name admins 52 | alias Nagios Administrators 53 | members nagiosadmin 54 | } 55 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/nagios_server/templates/nrpe.j2: -------------------------------------------------------------------------------- 1 | # default: on 2 | # description: NRPE (Nagios Remote Plugin Executor) 3 | service nrpe 4 | { 5 | flags = REUSE 6 | socket_type = stream 7 | port = 5666 8 | wait = no 9 | user = nagios 10 | group = nagios 11 | server = /usr/local/nagios/bin/nrpe 12 | server_args = -c /usr/local/nagios/etc/nrpe.cfg --inetd 13 | log_on_failure += USERID 14 | disable = no 15 | only_from = 127.0.0.1 {{ server_ip }} 16 | } 17 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/new_relic/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install newrelic 2 | npm: name=newrelic path="/home/{{ host_user }}/{{ app_folder }}" 3 | tags: 4 | - newrelic_install 5 | 6 | - name: Copy newrelic file 7 | template: 8 | src: newrelic.js.j2 9 | dest: "/home/{{ host_user }}/{{ app_folder }}/newrelic.js" 10 | force: no 11 | tags: 12 | - newrelic_file 13 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/new_relic/templates/newrelic.js.j2: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | /** 4 | * New Relic agent configuration. 5 | * 6 | * See lib/config.defaults.js in the agent distribution for a more complete 7 | * description of configuration variables and their potential values. 8 | */ 9 | exports.config = { 10 | /** 11 | * Array of application names. 12 | */ 13 | app_name: ['{{ node_app_name }}'], 14 | /** 15 | * Your New Relic license key. 16 | */ 17 | license_key: '{{ newrelic_license_key }}', 18 | logging: { 19 | /** 20 | * Level at which to log. 'trace' is most useful to New Relic when diagnosing 21 | * issues with the agent, 'info' and higher will impose the least overhead on 22 | * production applications. 23 | */ 24 | level: 'info' 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Nodejs and git 3 | apt: name={{item}} state=latest 4 | register: result 5 | with_items: 6 | - nodejs 7 | - git 8 | tags: 9 | - node_git 10 | 11 | - name: Install required packages 12 | npm: name={{item}} global=yes state=latest 13 | with_items: 14 | - grunt-cli 15 | - bower 16 | tags: 17 | - required_pkgs -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update apt cache 3 | apt: update_cache=yes 4 | 5 | - name: Install python-passlib 6 | apt: name=python-passlib state=present 7 | -------------------------------------------------------------------------------- /monitoring_deployments_9/roles/setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add MongoDB key to our system list of trusted keys 3 | apt_key: keyserver=hkp://keyserver.ubuntu.com:80 id=7F0CEB10 state=present 4 | 5 | - name: Add MongoDB repo sources 6 | apt_repository: repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' state=present 7 | 8 | - name: Add apt key for nodesource 9 | apt_key: url=https://deb.nodesource.com/gpgkey/nodesource.gpg.key 10 | 11 | - name: add repo for nodesource 12 | apt_repository: repo='deb https://deb.nodesource.com/node_0.10 {{ ansible_distribution_release }} main' state=present 13 | 14 | - name: Update apt cache 15 | apt: update_cache=yes 16 | 17 | - name: Update OS 18 | apt: upgrade=dist force=yes 19 | -------------------------------------------------------------------------------- /monitoring_deployments_9/secret_vars.example.yml: -------------------------------------------------------------------------------- 1 | nagiosadmin_user_password: YOUR PASSWORD 2 | aws_access_key: YOUR AWS ACCESS KEY 3 | aws_secret_key: YOUR AWS SECRET KEY 4 | nagios_email: YOUR EMAIL ADDRESS 5 | server_ip: YOUR SERVER IP 6 | host_ip: YOUR HOST IP 7 | newrelic_license_key: YOUR NEW RELIC LICENSE KEY 8 | -------------------------------------------------------------------------------- /monitoring_deployments_9/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | host_user: ubuntu 3 | root_file_system: /dev/sda1 4 | nagios_host_name: nagioshost 5 | nagios_host_description: Host to be monitored by nagios 6 | 7 | instance_type: t2.micro 8 | ubuntu_ec2_image: ami-d732f0b7 9 | ec2_region: us-west-2 10 | availability_zone: us-west-2a 11 | cloudformation_stack_name: EC2 12 | cloudformation_template: roles/ec2/templates/cloudformation.json 13 | key_pair_name: cosy-devops-uswest2 14 | 15 | node_app_repo: https://github.com/andela-oadelemoni/doc-cache.git 16 | port: 5000 17 | app_folder: app 18 | node_app_name: doc-cache 19 | -------------------------------------------------------------------------------- /networking_2/README.md: -------------------------------------------------------------------------------- 1 | # Assessment 02 - Networking 2 | 3 | This contains a list of questions to assess a fellows knowledge of the **Networking** Learning Outcome 4 | 5 | ## Questions 6 | 7 | 1. Display network interfaces 8 | 9 | ``` 10 | ip addr 11 | ``` 12 | 13 | 2. Display network routing table 14 | 15 | ``` 16 | route -n 17 | ``` 18 | 19 | 3. Disable **ICMP** ping requests to your local machine 20 | 21 | ``` 22 | Add the following iptables rules to block the PING with an error message. (Use REJECT as Jump to target) 23 | 24 | iptables -A INPUT -p icmp --icmp-type echo-request -j REJECT 25 | ``` 26 | 27 | 4. Use a subnet mask to allocate IP addresses on a network 28 | 29 | Given the ip range **10.0.10.0/24** 30 | 31 | ``` 32 | minimum_ip_address="10.0.10.1" 33 | maximum_ip_address="10.0.10.254" 34 | ``` 35 | 36 | Given the ip range **10.0.10.0/30** 37 | 38 | ``` 39 | minimum_ip_address="10.0.10.1" 40 | maximum_ip_address="10.0.10.2" 41 | ``` 42 | 43 | 5. Use `ssh` to forward port **2444** on remote machine **10.0.0.1** to local port **6333** 44 | 45 | ``` 46 | ssh -L :: @ 47 | ssh -L 6333:10.0.0.1:2444 user@10.0.0.1 48 | ``` 49 | 50 | 6. Using `ssh` with authentication agent forwarded, connect to remote machine **10.0.0.1** 51 | 52 | ``` 53 | ssh -A user@10.0.0.1 54 | ``` 55 | 56 | 7. Using `ssh`, connect to remote machine **10.0.0.1** with the private key `private.key.pem` 57 | 58 | ``` 59 | ssh -i private.key.pem user@10.0.0.1 60 | ``` 61 | 62 | 8. Using `scp`, copy file `/home/user/file.txt` on remote machine **10.0.0.1** to your local machine 63 | 64 | ``` 65 | scp remoteuser@10.0.0.1:/home/user/file.txt /localdirectory/test 66 | ``` 67 | 68 | 9. Using `scp`, copy local file at `/home/user/file.txt` to remote machine **10.0.0.1** 69 | 70 | ``` 71 | scp -r /home/user/file.txt remoteuser@10.0.0.1:/home/user 72 | ``` 73 | 74 | 10. List all open ports on a system 75 | 76 | ``` 77 | sudo netstat -tulpen 78 | ``` 79 | 80 | 11. Block ip address from accessing all ports on your local machine using iptables 81 | 82 | ``` 83 | command goes here 84 | ``` 85 | 86 | 12. List differences between IPV4 & IPV6 addresses 87 | 88 | ``` 89 | i. IPv4 addresses are 32 bit length while IPv6 addresses are 128 bit length. 90 | 91 | ii. IPv4 addresses are binary numbers represented in decimals while IPv6 addresses are binary numbers represented in hexadecimals. 92 | 93 | iii. Broadcast messages are available in IPv4 while Broadcast messages are not available in IPv6. Instead a link-local scope "All nodes" multicast IPv6 address (FF02::1) is used for broadcast similar functionality. 94 | 95 | iv. Manual configuration (Static) of IPv4 addresses or DHCP (Dynamic configuration) is required to configure IPv4 addresses while Auto-configuration of addresses is available for IPv6. 96 | ``` 97 | -------------------------------------------------------------------------------- /securing_deployments_11/README.md: -------------------------------------------------------------------------------- 1 | # Outcome 11 - Securing Deployments 2 | 3 | An exercise that covers the following operations: 4 | - Setting up hooks to scan for secure credentials/token before code is checked in to a repo 5 | - Ensuring credentials for production systems are stored in a secure vault and restricting access to it 6 | - Isolating security credentials/tokens for all different environments 7 | - Putting together tools to monitor and analyze system logs for suspicious activities 8 | - Running processes with the least required set of permissions to prevent privilege escalation 9 | - Setting up and configuring ssh daemon for private key authentication 10 | - Applying OS security patches to a system 11 | - Defining firewall rules to control access to the machine 12 | - Logging all connections to deployed machines 13 | - Securing data transfer using SSL 14 | 15 | ### System Requirements 16 | * Python 17 | * Pip 18 | * Ansible > 2.0 19 | * AWS CLI (installed in virtual machine or remote server) 20 | * Boto 21 | * Ruby 22 | * Cucumber 23 | * Virtual box (to test locally) 24 | 25 | 26 | # Testing locally 27 | 28 | ### Setup 29 | * Run `vagrant up`. This will bring up a virtual machine and also provision it. 30 | * Update the variables in _vars.yml_ file with yours. Note that _host_user_ should be changed to `vagrant` and not `ubuntu` for testing locally. 31 | 32 | ### Run tests 33 | * Run cucumber features/git_hook_setup_steps.feature to run tests and set up hooks to scan for secure credentials/token before code is checked in to a repo in the virtual machine. 34 | -------------------------------------------------------------------------------- /securing_deployments_11/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://atlas.hashicorp.com/search. 15 | config.vm.box = "ubuntu/trusty64" 16 | 17 | # Disable automatic box update checking. If you disable this, then 18 | # boxes will only be checked for updates when the user runs 19 | # `vagrant box outdated`. This is not recommended. 20 | # config.vm.box_check_update = false 21 | 22 | # Create a forwarded port mapping which allows access to a specific port 23 | # within the machine from a port on the host machine. In the example below, 24 | # accessing "localhost:8080" will access port 80 on the guest machine. 25 | # config.vm.network "forwarded_port", guest: 80, host: 8080 26 | 27 | # Create a private network, which allows host-only access to the machine 28 | # using a specific IP. 29 | config.vm.network "private_network", ip: "192.168.33.21" 30 | 31 | # Create a public network, which generally matched to bridged network. 32 | # Bridged networks make the machine appear as another physical device on 33 | # your network. 34 | # config.vm.network "public_network" 35 | 36 | # Share an additional folder to the guest VM. The first argument is 37 | # the path on the host to the actual folder. The second argument is 38 | # the path on the guest to mount the folder. And the optional third 39 | # argument is a set of non-required options. 40 | # config.vm.synced_folder "../data", "/vagrant_data" 41 | 42 | # Provider-specific configuration so you can fine-tune various 43 | # backing providers for Vagrant. These expose provider-specific options. 44 | # Example for VirtualBox: 45 | # 46 | config.vm.provider "virtualbox" do |vb| 47 | # # Display the VirtualBox GUI when booting the machine 48 | # vb.gui = true 49 | # 50 | # # Customize the amount of memory on the VM: 51 | vb.memory = "1024" 52 | end 53 | # 54 | # View the documentation for the provider you are using for more 55 | # information on available options. 56 | 57 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 58 | # such as FTP and Heroku are also available. See the documentation at 59 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 60 | # config.push.define "atlas" do |push| 61 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 62 | # end 63 | 64 | # Enable provisioning with a shell script. Additional provisioners such as 65 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 66 | # documentation for more information about their specific syntax and use. 67 | # config.vm.provision "shell", inline: <<-SHELL 68 | # sudo apt-get update 69 | # sudo apt-get install -y apache2 70 | # SHELL 71 | config.vm.provision "ansible" do |ansible| 72 | ansible.playbook = "playbook.provision.yml" 73 | ansible.inventory_path = "local_inventory.ini" 74 | ansible.sudo = true 75 | ansible.verbose = "v" 76 | end 77 | config.vm.define "secure_server" 78 | end 79 | -------------------------------------------------------------------------------- /securing_deployments_11/features/git_hook_setup_steps.feature: -------------------------------------------------------------------------------- 1 | Feature: Set up hooks to scan for secure credentials/token before code is checked in to a repo 2 | 3 | Background: 4 | Given I have a running server 5 | And I provision it 6 | 7 | Scenario: Install Git-secrets 8 | When I install git-secrets 9 | Then it should be successful 10 | And git-secrets command should be available 11 | 12 | Scenario: Copy script to setup hook on server 13 | When I copy script to setup hook on server 14 | Then it should be successful 15 | And script should exist 16 | 17 | Scenario: Test that setup hook script exits with 0 18 | When I create a test repo 19 | And run script against test repo 20 | And I add aws secret to the repo 21 | Then aws secret should not be committed 22 | And test repo should be deleted 23 | -------------------------------------------------------------------------------- /securing_deployments_11/features/secret_vault_setup.feature: -------------------------------------------------------------------------------- 1 | Feature: Ensure credentials for production systems are stored in a secure vault and restrict access to it 2 | 3 | Background: 4 | Given I have a running server 5 | And I provision it 6 | 7 | Scenario: Install Vault and Consul 8 | When I install Vault and Consul 9 | Then it should be successful 10 | And vault command should be available 11 | And consul command should be available 12 | -------------------------------------------------------------------------------- /securing_deployments_11/features/step_definitions/git_hook_setup_steps.rb: -------------------------------------------------------------------------------- 1 | require 'open3' 2 | 3 | Given(/^I have a running server$/) do 4 | _, _, status = Open3.capture3 "vagrant reload" 5 | 6 | expect(status.success?).to eq(true) 7 | end 8 | 9 | Given(/^I provision it$/) do 10 | _, _, status = Open3.capture3 "vagrant provision" 11 | 12 | expect(status.success?).to eq(true) 13 | end 14 | 15 | When(/^I install git\-secrets$/) do 16 | cmd = "ansible-playbook -i local_inventory.ini playbook.secure_credential_hook.yml --tags 'install_git_secret'" 17 | 18 | _, _, @status = Open3.capture3 "#{cmd}" 19 | end 20 | 21 | Then(/^it should be successful$/) do 22 | expect(@status.success?).to eq(true) 23 | end 24 | 25 | Then(/^([^"]*) command should be available$/) do |package| 26 | output, _, status = Open3.capture3 "vagrant ssh -c 'type #{package}'" 27 | 28 | expect(status.success?).to eq(true) 29 | expect(output).to match("#{package} is /usr/local/bin/#{package}") 30 | end 31 | 32 | When(/^I copy script to setup hook on server$/) do 33 | cmd = "ansible-playbook -i local_inventory.ini playbook.secure_credential_hook.yml --tags 'copy_hook_script'" 34 | 35 | _, _, @status = Open3.capture3 "#{cmd}" 36 | end 37 | 38 | Then(/^script should exist$/) do 39 | _, _, status = Open3.capture3 "vagrant ssh -c 'test -f run_git_secret_hook.sh'" 40 | 41 | expect(status.success?).to eq(true) 42 | end 43 | 44 | When(/^I create a test repo$/) do 45 | _, _, status = Open3.capture3 "vagrant ssh -c 'mkdir testrepo && touch testrepo/testfile.txt && git init testrepo'" 46 | 47 | expect(status.success?).to eq(true) 48 | end 49 | 50 | And(/^run script against test repo$/) do 51 | _, _, status = Open3.capture3 "vagrant ssh -c './run_git_secret_hook.sh testrepo/'" 52 | 53 | expect(status.success?).to eq(true) 54 | end 55 | 56 | And(/^I add aws secret to the repo$/) do 57 | _, _, status = Open3.capture3 "vagrant ssh -c 'echo AWS Secret Access Key: random_Secret >> testrepo/testfile.txt'" 58 | 59 | expect(status.success?).to eq(true) 60 | end 61 | 62 | Then(/^aws secret should not be committed$/) do 63 | _, _, status = Open3.capture3 "vagrant ssh -c 'cd testrepo && git add . && git commit -m \"Test commit\"'" 64 | 65 | expect(status.success?).to eq(false) 66 | end 67 | 68 | And(/^test repo should be deleted$/) do 69 | _, _, status = Open3.capture3 "vagrant ssh -c 'rm -rf testrepo/'" 70 | 71 | expect(status.success?).to eq(true) 72 | end 73 | -------------------------------------------------------------------------------- /securing_deployments_11/features/step_definitions/secret_vault_setup.rb: -------------------------------------------------------------------------------- 1 | require 'open3' 2 | 3 | When(/^I install Vault and Consul$/) do 4 | cmd = "ansible-playbook -i local_inventory.ini playbook.secure_vault.yml --tags 'install_vault_and_consul'" 5 | 6 | _, _, @status = Open3.capture3 "#{cmd}" 7 | end 8 | -------------------------------------------------------------------------------- /securing_deployments_11/local_inventory.ini: -------------------------------------------------------------------------------- 1 | [secure_server] 2 | 192.168.33.21 ansible_ssh_private_key_file=.vagrant/machines/secure_server/virtualbox/private_key 3 | -------------------------------------------------------------------------------- /securing_deployments_11/playbook.provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | roles: 9 | - provision 10 | -------------------------------------------------------------------------------- /securing_deployments_11/playbook.secure_credential_hook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | roles: 9 | - git_secret_setup 10 | -------------------------------------------------------------------------------- /securing_deployments_11/playbook.secure_vault.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: "{{ host_user }}" 4 | become: yes 5 | become_method: sudo 6 | vars_files: 7 | - vars.yml 8 | roles: 9 | - secret_vault_setup 10 | -------------------------------------------------------------------------------- /securing_deployments_11/roles/git_secret_setup/files/run_git_secret_hook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # fail and fail fast \0/ 4 | set -e 5 | 6 | # Check if target directory is supplied as an argument when running script 7 | if [ "$#" -eq 0 ]; then 8 | echo "No target directory supplied as argument when running the script." 9 | echo "Exiting script." 10 | exit 1 11 | else 12 | REPOSITORY=$1 13 | # Check if target directory exist 14 | if [ -d "$REPOSITORY" ]; then 15 | echo "Moving to $REPOSITORY" 16 | cd "$REPOSITORY" 17 | 18 | echo "Installing git-secrets hooks in $REPOSITORY" 19 | git secrets --install -f 20 | 21 | echo "Adding hooks for aws" 22 | git secrets --register-aws 23 | else 24 | echo "Target directory does not exist." 25 | exit 1 26 | fi 27 | fi 28 | -------------------------------------------------------------------------------- /securing_deployments_11/roles/git_secret_setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Clone git-secret repository 3 | git: 4 | repo: https://github.com/awslabs/git-secrets.git 5 | clone: yes 6 | dest: "{{ ansible_env.PWD }}/git-secrets" 7 | tags: 8 | - install_git_secret 9 | 10 | - name: Install git-secret 11 | command: make install 12 | args: 13 | chdir: "{{ ansible_env.PWD }}/git-secrets" 14 | tags: 15 | - install_git_secret 16 | 17 | - name: Copy script to setup hook on server 18 | copy: 19 | src: files/run_git_secret_hook.sh 20 | dest: "{{ ansible_env.PWD }}/run_git_secret_hook.sh" 21 | mode: 0755 22 | tags: 23 | - copy_hook_script 24 | -------------------------------------------------------------------------------- /securing_deployments_11/roles/provision/files/credentials: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id = random_key 3 | aws_secret_access_key = random_Secret 4 | -------------------------------------------------------------------------------- /securing_deployments_11/roles/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update apt cache 3 | apt: update_cache=yes 4 | 5 | - name: Install git 6 | apt: name=git state=present 7 | 8 | - name: Install unzip 9 | apt: name=unzip state=present 10 | 11 | - name: Copy fake aws credentials 12 | copy: 13 | src: files/credentials 14 | dest: "{{ ansible_env.PWD }}/.aws/credentials" 15 | -------------------------------------------------------------------------------- /securing_deployments_11/roles/secret_vault_setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download precompiled Vault binary and Consul 3 | get_url: 4 | url: "{{ item }}" 5 | dest: /usr/local/bin 6 | with_items: 7 | - https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip 8 | - https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_amd64.zip 9 | register: zipped_file 10 | tags: 11 | - install_vault_and_consul 12 | 13 | ### TO DO::: Figure out how to make this Unzip part indempotent 14 | - name: Unzip Vault binary and Consul 15 | unarchive: 16 | src: "{{ item.dest }}" 17 | dest: /usr/local/bin 18 | copy: no 19 | with_items: 20 | - "{{ zipped_file.results }}" 21 | when: zipped_file | changed 22 | tags: 23 | - install_vault_and_consul 24 | -------------------------------------------------------------------------------- /securing_deployments_11/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | host_user: vagrant 4 | vault_version: 0.6.1 5 | consul_version: 0.6.4 6 | --------------------------------------------------------------------------------