├── .gitignore ├── Jenkinsfile ├── LICENSE ├── README.md ├── ansible ├── roles │ └── install │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml └── setup.yml ├── code ├── Dockerfile ├── hello.py ├── hello_test.py ├── requirements.txt └── run-flask.sh ├── packer └── packer-box.json └── terraform ├── application └── app.tf └── network ├── security_groups.tf └── vpc.tf /.gitignore: -------------------------------------------------------------------------------- 1 | *pyc 2 | */.terraform 3 | *.pem 4 | *.retry 5 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | ansiColor('css') { 2 | node { 3 | stage('Preparation') { // for display purposes 4 | // Get some code from a GitHub repository 5 | git 'https://github.com/sathya-demo/hello-world.git' 6 | } 7 | stage('Build') { 8 | sh 'pip install -r code/requirements.txt' 9 | } 10 | stage('Test') { 11 | sh 'cd code && python hello_test.py' 12 | } 13 | stage('Validate Packer Box') { 14 | withAWS(credentials:'sathyabhat', region:'ap-south-1') { 15 | sh 'packer validate packer/packer-box.json' 16 | } 17 | } 18 | stage('Validate Ansible Playbook') { 19 | withAWS(credentials:'sathyabhat', region:'ap-south-1') { 20 | sh 'ansible-playbook --syntax-check ansible/setup.yml' 21 | } 22 | } 23 | stage('Bake AMI') { 24 | withAWS(credentials:'sathyabhat', region:'ap-south-1') { 25 | withCredentials([string(credentialsId: 'datadog', variable: 'dd_token')]) { 26 | sh 'DD_TOKEN=${dd_token} packer build packer/packer-box.json' 27 | } 28 | } 29 | } 30 | stage('Launch Instance') { 31 | withAWS(credentials: 'sathyabhat', region:'ap-south-1') { 32 | sh('cd terraform && terraform init && terraform plan && terraform -auto-approve apply') 33 | } 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Sathya 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # devops-workflow 2 | Repo for demonstrating a typical "devops" workflow. A typical "devops" workflow consists of build -> test -> validate -> release cycle. 3 | 4 | This repo aims to show a typical build -> test -> validate -> release cycle can be created and applied. This repo consists of a simple Flask Hello World application and a simple test for it. The supporting toolchain around this includes: 5 | 6 | - Packer, for building a custom AMI based on an existing base (in this case, a stock Ubuntu 18.04 image) 7 | - Ansible, for config management and provisioning the necessary tools 8 | - Terraform for building the necessary resources and instances on AWS. 9 | - A Jenkinsfile which defines a Jenkins pipeline script for each of the different stages. 10 | - Datadog integration so that your instance reports metrics to datadog upon launch 11 | 12 | Things that are included in the repo but not yet integrated in the cycle: 13 | 14 | - A Dockerfile for building a Docker image 15 | - Necessary integrations and plugins required for auto deploy on a github commit/push 16 | - Setting up datadog account / datadog + AWS integration 17 | 18 | ### Jenkins plugins 19 | 20 | Following Jenkins plugins were used to achieve the desired result: 21 | 22 | - AnsiColor (for coloring console output) 23 | - Github Integration 24 | - Pipeline AWS steps (to inject AWS credentials, but can be used for many other things) 25 | -------------------------------------------------------------------------------- /ansible/roles/install/defaults/main.yml: -------------------------------------------------------------------------------- 1 | dd_token: "{{ DD_TOKEN }}" 2 | -------------------------------------------------------------------------------- /ansible/roles/install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: update apt 3 | become: yes 4 | apt: 5 | update_cache: yes 6 | 7 | - name: Install list of packages 8 | become: yes 9 | apt: 10 | name: "{{item}}" 11 | state: present 12 | update_cache: yes 13 | with_items: 14 | - htop 15 | - tmux 16 | - software-properties-common 17 | - apt-transport-https 18 | - ca-certificates 19 | - curl 20 | 21 | - name: get docker key 22 | become: yes 23 | apt_key: 24 | url: https://download.docker.com/linux/ubuntu/gpg 25 | state: present 26 | 27 | - name: get datadog key 28 | become: yes 29 | apt_key: 30 | keyserver: hkp://keyserver.ubuntu.com:80 31 | id: 382E94DE 32 | state: present 33 | 34 | - name: add datadog repo 35 | become: yes 36 | apt_repository: 37 | repo: deb https://apt.datadoghq.com/ stable 6 38 | state: present 39 | 40 | - name: add docker repo 41 | become: yes 42 | apt_repository: 43 | repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release | lower }} test 44 | state: present 45 | 46 | - name: update apt for docker repo 47 | become: yes 48 | apt: 49 | update_cache: yes 50 | name: "{{item}}" 51 | state: present 52 | with_items: 53 | - docker-ce 54 | - datadog-agent 55 | 56 | - name: update datadog API key 57 | become: yes 58 | shell: "sed 's/api_key:.*/api_key: {{dd_token}}/' /etc/datadog-agent/datadog.yaml.example > /etc/datadog-agent/datadog.yaml" 59 | 60 | - name: restart datadog 61 | become: yes 62 | service: 63 | name: datadog-agent 64 | state: restarted 65 | -------------------------------------------------------------------------------- /ansible/setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup the HelloWorld AMI 3 | hosts: 4 | - 127.0.0.1 5 | connection: local 6 | tasks: 7 | - import_role: 8 | name: install 9 | -------------------------------------------------------------------------------- /code/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2-alpine 2 | 3 | WORKDIR /app 4 | 5 | ADD *py /app/ 6 | ADD requirements.txt /app/ 7 | ADD run-flask.sh /app/ 8 | RUN pip install -r requirements.txt 9 | 10 | CMD ["sh", "run-flask.sh"] 11 | -------------------------------------------------------------------------------- /code/hello.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | app = Flask(__name__) 3 | 4 | @app.route("/") 5 | def hello(): 6 | return "Hello World!" 7 | -------------------------------------------------------------------------------- /code/hello_test.py: -------------------------------------------------------------------------------- 1 | import hello 2 | import unittest 3 | 4 | class HelloTestCase(unittest.TestCase): 5 | def test_hello(self): 6 | hello_response = hello.hello() 7 | self.assertEqual(hello.hello(), 'Hello World!') 8 | 9 | 10 | if __name__ == '__main__': 11 | unittest.main() 12 | -------------------------------------------------------------------------------- /code/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | -------------------------------------------------------------------------------- /code/run-flask.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | FLASK_APP=hello.py flask run -h 0.0.0.0 -p 80 3 | -------------------------------------------------------------------------------- /packer/packer-box.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "DD_TOKEN": "{{env `DD_TOKEN`}}" 4 | }, 5 | "builders": [{ 6 | "type": "amazon-ebs", 7 | "access_key": "{{user `aws_access_key`}}", 8 | "secret_key": "{{user `aws_secret_key`}}", 9 | "region": "ap-south-1", 10 | "source_ami_filter": { 11 | "filters": { 12 | "virtualization-type": "hvm", 13 | "name": "ubuntu/images/*ubuntu-bionic-18.04-amd64-server-*", 14 | "root-device-type": "ebs" 15 | }, 16 | "most_recent": true, 17 | "owners": ["099720109477"] 18 | }, 19 | "instance_type": "t2.micro", 20 | "ssh_username": "ubuntu", 21 | "ami_name": "greatlearning-{{timestamp}}" 22 | }], 23 | "provisioners": [ 24 | { 25 | "type": "shell", 26 | "inline": [ 27 | "sudo apt-get update", 28 | "sudo apt-get -y install software-properties-common", 29 | "sudo apt-add-repository ppa:ansible/ansible", 30 | "sudo apt-get update", 31 | "sudo apt-get -y install ansible" 32 | ] 33 | }, 34 | { 35 | "type": "ansible-local", 36 | "playbook_file": "ansible/setup.yml", 37 | "playbook_dir": "ansible", 38 | "extra_arguments": [ "--extra-vars", "DD_TOKEN={{user `DD_TOKEN`}}"] 39 | }] 40 | } 41 | -------------------------------------------------------------------------------- /terraform/application/app.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "application_ami" { 2 | most_recent = true 3 | filter { 4 | name = "name" 5 | values = ["greatlearning-*"] 6 | } 7 | owners = ["self"] 8 | } 9 | 10 | data "aws_vpc" "GreatLearning" { 11 | filter { 12 | name = "tag:Name" 13 | values = ["GreatLearning"] 14 | } 15 | 16 | } 17 | 18 | data "aws_subnet" "ap-south-1a-public" { 19 | filter { 20 | name = "tag:Name" 21 | values = ["ap-south-1a-public"] 22 | } 23 | } 24 | 25 | data "aws_security_group" "sg-allow-all" { 26 | filter { 27 | name = "tag:Name" 28 | values = ["sg-allow-all"] 29 | } 30 | } 31 | 32 | resource "aws_key_pair" "ec2-key" { 33 | key_name ="greatlearning" 34 | public_key = "${file("~/.ssh/id_rsa_greatlearning.pub")}" 35 | 36 | } 37 | 38 | resource "aws_instance" "app" { 39 | ami = "${data.aws_ami.application_ami.id}" 40 | instance_type = "t2.micro" 41 | subnet_id = "${data.aws_subnet.ap-south-1a-public.id}" 42 | vpc_security_group_ids = ["${data.aws_security_group.sg-allow-all.id}"] 43 | key_name = "${aws_key_pair.ec2-key.id}" 44 | tags { 45 | Name = "HelloWorld" 46 | CreatedOn = "${timestamp()}" 47 | vpc = "${data.aws_vpc.GreatLearning.id}" 48 | } 49 | } 50 | 51 | output "address" { 52 | value = "${aws_instance.app.public_ip}" 53 | } 54 | -------------------------------------------------------------------------------- /terraform/network/security_groups.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_security_group" "sg-all" { 3 | name = "SG to allow all traffic" 4 | vpc_id = "${aws_vpc.GreatLearning.id}" 5 | 6 | ingress { 7 | from_port = 0 8 | to_port = 0 9 | protocol = "-1" 10 | cidr_blocks = ["0.0.0.0/0"] 11 | } 12 | 13 | egress { 14 | from_port = 0 15 | to_port = 0 16 | protocol = "-1" 17 | cidr_blocks = ["0.0.0.0/0"] 18 | } 19 | 20 | tags { 21 | Name = "sg-allow-all" 22 | environment = "dev" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /terraform/network/vpc.tf: -------------------------------------------------------------------------------- 1 | // Create a VPC 2 | resource "aws_vpc" "GreatLearning" { 3 | cidr_block = "10.0.0.0/25" 4 | enable_dns_hostnames = true 5 | 6 | tags { 7 | Name = "GreatLearning" 8 | environment = "dev" 9 | } 10 | } 11 | 12 | // Public subnets 13 | resource "aws_subnet" "ap-south-1a-public" { 14 | vpc_id = "${aws_vpc.GreatLearning.id}" 15 | cidr_block = "10.0.0.0/27" 16 | availability_zone = "ap-south-1a" 17 | map_public_ip_on_launch = true 18 | tags { 19 | Name = "ap-south-1a-public" 20 | environment = "dev" 21 | } 22 | } 23 | 24 | resource "aws_subnet" "ap-south-1b-public" { 25 | vpc_id = "${aws_vpc.GreatLearning.id}" 26 | cidr_block = "10.0.0.32/27" 27 | availability_zone = "ap-south-1b" 28 | map_public_ip_on_launch = true 29 | tags { 30 | Name = "ap-south-1b-public" 31 | environment = "dev" 32 | } 33 | 34 | } 35 | 36 | // Private subnets 37 | resource "aws_subnet" "ap-south-1a-private" { 38 | vpc_id = "${aws_vpc.GreatLearning.id}" 39 | cidr_block = "10.0.0.64/27" 40 | availability_zone = "ap-south-1a" 41 | tags { 42 | Name = "ap-south-1a-private" 43 | environment = "dev" 44 | } 45 | } 46 | 47 | resource "aws_subnet" "ap-south-1b-private" { 48 | vpc_id = "${aws_vpc.GreatLearning.id}" 49 | cidr_block = "10.0.0.96/27" 50 | availability_zone = "ap-south-1b" 51 | tags { 52 | Name = "ap-south-1b-private" 53 | environment = "dev" 54 | } 55 | } 56 | 57 | // Internet gateway, for Public subnet 58 | resource "aws_internet_gateway" "igw" { 59 | vpc_id = "${aws_vpc.GreatLearning.id}" 60 | tags { 61 | Name = "igw" 62 | environment = "dev" 63 | } 64 | 65 | } 66 | 67 | // EIP for NAT Gateway 68 | resource "aws_eip" "natgw-eip" { 69 | vpc = true 70 | tags { 71 | Name = "natgw-eip" 72 | environment = "dev" 73 | } 74 | } 75 | 76 | // NAT Gateway, for private subnets 77 | 78 | resource "aws_nat_gateway" "natgw-ap-south-1a-public" { 79 | allocation_id = "${aws_eip.natgw-eip.id}" 80 | subnet_id = "${aws_subnet.ap-south-1a-public.id}" 81 | tags { 82 | Name = "natgw-ap-south-1a-public" 83 | environment = "dev" 84 | } 85 | } 86 | // Route table, for public subnet 87 | resource "aws_route_table" "rt-public" { 88 | vpc_id = "${aws_vpc.GreatLearning.id}" 89 | 90 | route { 91 | cidr_block = "0.0.0.0/0" 92 | gateway_id = "${aws_internet_gateway.igw.id}" 93 | } 94 | 95 | tags { 96 | Name = "rt-public" 97 | environment = "dev" 98 | } 99 | 100 | } 101 | 102 | // Route table for private subnet 103 | resource "aws_route_table" "rt-ap-south-1a-private" { 104 | vpc_id = "${aws_vpc.GreatLearning.id}" 105 | 106 | route { 107 | cidr_block = "0.0.0.0/0" 108 | nat_gateway_id = "${aws_nat_gateway.natgw-ap-south-1a-public.id}" 109 | } 110 | tags { 111 | Name = "rt-ap-south-1a-private" 112 | environment = "dev" 113 | } 114 | } 115 | 116 | // Route table association for public subnet 117 | resource "aws_route_table_association" "rta-ap-south-1a-public" { 118 | subnet_id = "${aws_subnet.ap-south-1a-public.id}" 119 | route_table_id = "${aws_route_table.rt-public.id}" 120 | 121 | } 122 | 123 | resource "aws_route_table_association" "rta-ap-south-1b-public" { 124 | subnet_id = "${aws_subnet.ap-south-1b-public.id}" 125 | route_table_id = "${aws_route_table.rt-public.id}" 126 | } 127 | 128 | 129 | // Route table for private subnet association 130 | 131 | resource "aws_route_table_association" "rta-ap-south-1a-private" { 132 | subnet_id = "${aws_subnet.ap-south-1a-private.id}" 133 | route_table_id = "${aws_route_table.rt-ap-south-1a-private.id}" 134 | } 135 | 136 | resource "aws_route_table_association" "rta-ap-south-1b-private" { 137 | subnet_id = "${aws_subnet.ap-south-1b-private.id}" 138 | route_table_id = "${aws_route_table.rt-ap-south-1a-private.id}" 139 | } 140 | --------------------------------------------------------------------------------