├── ansible
├── playbooks
│ ├── group_vars
│ │ └── all
│ ├── roles
│ │ ├── kafka
│ │ │ ├── tasks
│ │ │ │ ├── main.yml
│ │ │ │ ├── kafka-route53.yml
│ │ │ │ └── kafka.yml
│ │ │ ├── defaults
│ │ │ │ └── development.yml
│ │ │ └── templates
│ │ │ │ └── kafka-task-def.j2
│ │ ├── zookeeper
│ │ │ ├── tasks
│ │ │ │ ├── main.yml
│ │ │ │ ├── zoo-route53.yml
│ │ │ │ ├── zoo1.yml
│ │ │ │ ├── zoo3.yml
│ │ │ │ └── zoo2.yml
│ │ │ ├── defaults
│ │ │ │ └── development
│ │ │ │ │ ├── zoo1.yml
│ │ │ │ │ ├── zoo2.yml
│ │ │ │ │ └── zoo3.yml
│ │ │ └── templates
│ │ │ │ └── zookeeper-task-def.j2
│ │ ├── cassandra-master
│ │ │ ├── defaults
│ │ │ │ └── master.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── cassandra-task-def.j2
│ │ ├── cassandra-client
│ │ │ ├── defaults
│ │ │ │ └── client.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── cassandra-task-def.j2
│ │ ├── kong-client
│ │ │ ├── defaults
│ │ │ │ └── development.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── kong-task-def.j2
│ │ ├── kong-master
│ │ │ ├── defaults
│ │ │ │ └── development.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── kong-task-def.j2
│ │ └── files
│ │ │ └── orchestrate.py
│ ├── kafka.yml
│ ├── zookeeper.yml
│ ├── kong-client.yml
│ ├── kong-master.yml
│ ├── cassandra-client.yml
│ └── cassandra-master.yml
├── ssh-development.cfg
├── ansible_call_destroy.sh
├── ansible_call_deploy.sh
├── site-ecs-delete.yml
├── site-ecs-create.yml
└── hosts
│ └── ec2.ini
├── images
├── kafka.png
└── kafka-on-ecs.png
├── terraform
├── modules
│ ├── cloudwatch-log-groups
│ │ ├── variables.tf
│ │ └── main.tf
│ ├── ansible-ecs
│ │ ├── variable.tf
│ │ ├── templates
│ │ │ ├── ansible_ecs_destroy.sh
│ │ │ └── ansible_ecs_deploy.sh
│ │ └── main.tf
│ ├── route53-hosted-zone
│ │ ├── variables.tf
│ │ ├── output.tf
│ │ └── main.tf
│ ├── efs
│ │ ├── variables.tf
│ │ ├── output.tf
│ │ └── main.tf
│ ├── ecs-log-groups
│ │ └── main.tf
│ ├── bastion
│ │ ├── templates
│ │ │ ├── userdata_bastion
│ │ │ └── ssh.cfg
│ │ ├── outputs.tf
│ │ ├── bastion-sg.tf
│ │ ├── variables.tf
│ │ ├── templates.tf
│ │ ├── bastion.tf
│ │ └── iam.tf
│ ├── ecs-kafka-zk-cluster
│ │ ├── outputs.tf
│ │ ├── templates.tf
│ │ ├── templates
│ │ │ └── userdata-kafka-cluster
│ │ ├── variables.tf
│ │ ├── kafka-zk-sg.tf
│ │ ├── iam.tf
│ │ └── ecs-kafka-zk.tf
│ └── vpc
│ │ ├── output.tf
│ │ ├── variables.tf
│ │ ├── vpc-sg.tf
│ │ └── main.tf
└── environments
│ └── development
│ ├── secrets.tfvars
│ ├── output.tf
│ ├── terraform.tfvars
│ ├── variables.tf
│ └── main.tf
├── .gitignore
├── docker-images
├── kafka-docker-wm
│ ├── start-kafka-shell.sh
│ ├── broker-list.sh
│ ├── download-kafka.sh
│ ├── README.md
│ ├── docker-compose.yml
│ ├── docker-compose-single-broker.yml
│ ├── Dockerfile
│ ├── create-topics.sh
│ ├── start-kafka.sh
│ └── LICENSE
└── docker-zk-exhibitor
│ ├── README.md
│ ├── include
│ ├── web.xml
│ └── wrapper.sh
│ ├── LICENSE
│ └── Dockerfile
├── LICENSE
└── README.md
/ansible/playbooks/group_vars/all:
--------------------------------------------------------------------------------
1 | # Variable/values for all nodes
2 |
--------------------------------------------------------------------------------
/images/kafka.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GloballogicPractices/ECS-Kafka/HEAD/images/kafka.png
--------------------------------------------------------------------------------
/images/kafka-on-ecs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GloballogicPractices/ECS-Kafka/HEAD/images/kafka-on-ecs.png
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kafka/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include: kafka.yml
4 | - include: kafka-route53.yml
5 |
--------------------------------------------------------------------------------
/terraform/modules/cloudwatch-log-groups/variables.tf:
--------------------------------------------------------------------------------
1 | variable "log_group_name" {}
2 | variable "environment" {}
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/terraform.tfstate.backup
2 | **/.terraform/
3 | **/.terraform/*
4 | **/.DS_Store
5 | **/*.retry
6 | **/terraform.tfstate
7 |
--------------------------------------------------------------------------------
/ansible/playbooks/kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Kafka container stack setup
4 | hosts: localhost
5 | gather_facts: False
6 | become: False
7 | roles:
8 | - kafka
9 |
--------------------------------------------------------------------------------
/ansible/playbooks/zookeeper.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: ZK container stack setup
4 | hosts: localhost
5 | gather_facts: False
6 | become: False
7 | roles:
8 | - zookeeper
9 |
--------------------------------------------------------------------------------
/terraform/modules/ansible-ecs/variable.tf:
--------------------------------------------------------------------------------
1 | variable "env" {}
2 | variable "region" {}
3 | variable "log_group_name" {}
4 | variable "dependency_id" {
5 | default = []
6 | }
7 |
--------------------------------------------------------------------------------
/ansible/playbooks/kong-client.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Kong Service creation
4 | hosts: localhost
5 | gather_facts: False
6 | become: False
7 | roles:
8 | - kong-client
9 |
--------------------------------------------------------------------------------
/ansible/playbooks/kong-master.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Kong Service creation
4 | hosts: localhost
5 | gather_facts: False
6 | become: False
7 | roles:
8 | - kong-master
9 |
--------------------------------------------------------------------------------
/terraform/modules/route53-hosted-zone/variables.tf:
--------------------------------------------------------------------------------
1 |
2 | variable "hosted_zone_name" {}
3 | variable "vpc_id" {}
4 | #variable "route53-recordset-ip" {}
5 | #variable "instance-ip" {}
6 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/start-kafka-shell.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -e HOST_IP=$1 -e ZK=$2 -i -t wurstmeister/kafka /bin/bash
3 |
--------------------------------------------------------------------------------
/ansible/playbooks/cassandra-client.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Cassandra container stack setup
4 | hosts: localhost
5 | gather_facts: False
6 | become: False
7 | roles:
8 | - cassandra-client
9 |
--------------------------------------------------------------------------------
/ansible/playbooks/cassandra-master.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Cassandra seed-node container stack setup
4 | hosts: localhost
5 | gather_facts: False
6 | become: False
7 | roles:
8 | - cassandra-master
9 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/zookeeper/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Deploy zookeeper clusters
4 |
5 | - include: zoo1.yml
6 |
7 | - include: zoo2.yml
8 |
9 | - include: zoo3.yml
10 |
11 | - include: zoo-route53.yml
12 |
--------------------------------------------------------------------------------
/terraform/environments/development/secrets.tfvars:
--------------------------------------------------------------------------------
1 | aws_key_name = "test-cluster-key"
2 | aws_key_path = "~/.ssh/test-cluster-key.pem"
3 | // Can be generated using
4 | // ssh-keygen -y -f mykey.pem > mykey.pub
5 | keypair_public_key = "ssh-rsa publickey"
6 |
--------------------------------------------------------------------------------
/terraform/modules/efs/variables.tf:
--------------------------------------------------------------------------------
1 | /*
2 | Variables for EFS
3 | */
4 |
5 | variable "efs_cluster_name" {}
6 | variable "subnet_ids" {}
7 | variable "environment" {}
8 | variable "count" {}
9 | variable "security_group_id" {
10 | default = []
11 | }
12 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/broker-list.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CONTAINERS=$(docker ps | grep 9092 | awk '{print $1}')
4 | BROKERS=$(for CONTAINER in $CONTAINERS; do docker port $CONTAINER 9092 | sed -e "s/0.0.0.0:/$HOST_IP:/g"; done)
5 | echo $BROKERS | sed -e 's/ /,/g'
6 |
--------------------------------------------------------------------------------
/terraform/modules/ecs-log-groups/main.tf:
--------------------------------------------------------------------------------
1 | /* Create a log group and pass it to Ansible for cloudwatch logs */
2 |
3 | resource "aws_cloudwatch_log_group" "log-group" {
4 | name = "${var.log_group_name}"
5 |
6 | tags {
7 | Environment = "${var.environment}"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/terraform/modules/bastion/templates/userdata_bastion:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | repo_update: all
4 | repo_upgrade: all
5 |
6 | packages:
7 | - python
8 | - perl
9 | - perl-Switch
10 | - perl-DateTime
11 | - perl-Sys-Syslog
12 | - perl-LWP-Protocol-https
13 | - perl-Digest-SHA
14 |
--------------------------------------------------------------------------------
/terraform/modules/cloudwatch-log-groups/main.tf:
--------------------------------------------------------------------------------
1 | /* Create a log group and pass it to Ansible for cloudwatch logs */
2 |
3 | resource "aws_cloudwatch_log_group" "log-group" {
4 | name = "${var.log_group_name}"
5 |
6 | tags {
7 | Environment = "${var.environment}"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/terraform/modules/route53-hosted-zone/output.tf:
--------------------------------------------------------------------------------
1 | /* We use this to track dependecies between each modules */
2 |
3 |
4 | output "dependency_id" {
5 | value = "${null_resource.module_dependency.id}"
6 | }
7 |
8 | output "zone-id" {
9 | value = "${aws_route53_zone.private-zone.id}"
10 | }
11 |
--------------------------------------------------------------------------------
/terraform/modules/efs/output.tf:
--------------------------------------------------------------------------------
1 | output "efs_fs_id" {
2 | value = "${aws_efs_file_system.efs.id}"
3 | }
4 |
5 | /*
6 | output "sub_id" {
7 | value = "${aws_efs_mount_target.efs.subnet_id}"
8 | }
9 | */
10 |
11 | output "dependency_id" {
12 | value = "${null_resource.module_dependency.id}"
13 | }
14 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/download-kafka.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | mirror=$(curl --stderr /dev/null https://www.apache.org/dyn/closer.cgi\?as_json\=1 | jq -r '.preferred')
4 | url="${mirror}kafka/${KAFKA_VERSION}/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz"
5 | wget -q "${url}" -O "/tmp/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz"
6 |
--------------------------------------------------------------------------------
/terraform/modules/route53-hosted-zone/main.tf:
--------------------------------------------------------------------------------
1 | /* private hosted zone */
2 |
3 | resource "aws_route53_zone" "private-zone" {
4 | name = "${var.hosted_zone_name}"
5 | vpc_id = "${var.vpc_id}"
6 | }
7 |
8 |
9 | resource "null_resource" "module_dependency" {
10 | depends_on = [
11 | "aws_route53_zone.private-zone",
12 | ]
13 | }
14 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/README.md:
--------------------------------------------------------------------------------
1 | kafka-docker
2 | ============
3 |
4 | Dockerfile for [Apache Kafka](http://kafka.apache.org/)
5 |
6 | ### NOTE: Customized version of [Docker Hub](https://hub.docker.com/r/wurstmeister/kafka/)
7 |
8 | Customizations:
9 |
10 | - Made partitions count variable in order to have more partitions deployed by default if desired
11 |
--------------------------------------------------------------------------------
/terraform/modules/bastion/outputs.tf:
--------------------------------------------------------------------------------
1 | output "bastion-sg-id" {
2 | value = "${aws_security_group.jump-sg.id}"
3 | }
4 |
5 | output "ip_authorised_for_inbound_traffic" {
6 | value = "${var.control_cidr}"
7 | }
8 |
9 |
10 | output "bastion_eip" {
11 | value = "${aws_eip.jump-node.public_ip}"
12 | }
13 |
14 | output "bastion_dns" {
15 | value = "${aws_instance.jump_node.public_dns}"
16 | }
17 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | zookeeper:
4 | image: wurstmeister/zookeeper
5 | ports:
6 | - "2181:2181"
7 | kafka:
8 | build: .
9 | ports:
10 | - "9092"
11 | environment:
12 | KAFKA_ADVERTISED_HOST_NAME: 192.168.99.100
13 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
14 | volumes:
15 | - /var/run/docker.sock:/var/run/docker.sock
16 |
--------------------------------------------------------------------------------
/terraform/environments/development/output.tf:
--------------------------------------------------------------------------------
1 | output "private_subnet_ids_str" {
2 | value = "${module.vpc.aws_pri_subnet_id_str}"
3 | }
4 |
5 | output "private_subnet_ids" {
6 | value = "${module.vpc.aws_pri_subnet_id}"
7 | }
8 |
9 | output "publice_subnet_ids_str" {
10 | value = "${module.vpc.aws_pub_subnet_id_str}"
11 | }
12 |
13 | output "public_subnet_ids" {
14 | value = "${module.vpc.aws_pub_subnet_id}"
15 | }
16 |
--------------------------------------------------------------------------------
/terraform/modules/ecs-kafka-zk-cluster/outputs.tf:
--------------------------------------------------------------------------------
1 | /* We use this to track dependecies between each modules */
2 | output "dependency_id" {
3 | value = "${null_resource.module_dependency.id}"
4 | }
5 |
6 | output "kafka-cluster-sg-id" {
7 | value = "${aws_security_group.kafka-cluster-sg.id}"
8 | }
9 |
10 | // This is used in userdata
11 | output "kafka_cluster_name" {
12 | value = "${aws_ecs_cluster.kafka-cluster.name}"
13 | }
14 |
--------------------------------------------------------------------------------
/terraform/modules/ecs-kafka-zk-cluster/templates.tf:
--------------------------------------------------------------------------------
1 | /* Specify all templates to be used here */
2 |
3 | data "template_file" "userdata-kafka-cluster" {
4 | template = "${file("${path.module}/templates/userdata-kafka-cluster")}"
5 |
6 | vars {
7 | ecs_cluster_name = "${aws_ecs_cluster.kafka-cluster.name}"
8 | efs_data_dir = "${var.efs_data_dir}"
9 | efs_fs_id = "${var.efs_fs_id}"
10 | region = "${var.region}"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/docker-compose-single-broker.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | zookeeper:
4 | image: wurstmeister/zookeeper
5 | ports:
6 | - "2181:2181"
7 | kafka:
8 | build: .
9 | ports:
10 | - "9092:9092"
11 | environment:
12 | KAFKA_ADVERTISED_HOST_NAME: 192.168.99.100
13 | KAFKA_CREATE_TOPICS: "test:1:1"
14 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
15 | volumes:
16 | - /var/run/docker.sock:/var/run/docker.sock
17 |
--------------------------------------------------------------------------------
/terraform/modules/bastion/bastion-sg.tf:
--------------------------------------------------------------------------------
1 | // Bastion-node-sg
2 | resource "aws_security_group" "jump-sg" {
3 | name = "jump-sg"
4 | vpc_id = "${var.vpc-id}"
5 |
6 | ingress {
7 | from_port = 22
8 | to_port = 22
9 | protocol = "tcp"
10 | cidr_blocks = ["${split(",",var.control_cidr)}"]
11 | }
12 |
13 | egress {
14 | from_port = 0
15 | to_port = 0
16 | protocol = "-1"
17 | cidr_blocks = ["0.0.0.0/0"]
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/docker-images/docker-zk-exhibitor/README.md:
--------------------------------------------------------------------------------
1 | Runs an [Exhibitor](https://github.com/Netflix/exhibitor)-managed [ZooKeeper](http://zookeeper.apache.org/) instance using S3 for backups and automatic node discovery.
2 |
3 | ### NOTE: Customized version of docker Index as [mbabineau/zookeeper-exhibitor](https://index.docker.io/u/mbabineau/zookeeper-exhibitor/):
4 |
5 |
6 | ### Versions
7 | * Exhibitor 1.5.5
8 | * ZooKeeper 3.4.6
9 |
10 |
11 | Customizations:
12 |
13 | - Made EFS directory as a variable parameter provided via ecs task definition
14 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kafka/tasks/kafka-route53.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This will register our kafka instance with our private domain
3 | - name: Update Route53 entries for kafka
4 | route53:
5 | command: create
6 | zone: "{{env}}-internal.com"
7 | record: "kafka.{{env}}-internal.com"
8 | type: A
9 | ttl: 300
10 | value: "{% for host in groups['tag_Role1_kafka_instances'] %}{{ hostvars[host]['ansible_ssh_host']}}{% if not loop.last %},{% endif %}{% endfor %}"
11 | overwrite: true
12 | private_zone: true
13 |
--------------------------------------------------------------------------------
/terraform/modules/bastion/variables.tf:
--------------------------------------------------------------------------------
1 | /*
2 | Variables for bastion
3 | */
4 |
5 | variable "keypair_public_key" {}
6 | variable "vpc-id" {}
7 | variable "region" {}
8 | variable "pub_sub_id" {}
9 | variable "aws_key_name" {}
10 | variable "ansible_ssh_user" {}
11 | variable "proxy_cidr" {}
12 | variable "bastion_instance_type" {}
13 | variable "environment" {}
14 |
15 |
16 |
17 | variable "dependency_id" {
18 | default = ""
19 | }
20 |
21 | variable "public_sub_cidr" {
22 | default = []
23 | }
24 |
25 | variable "control_cidr" {
26 | }
27 |
--------------------------------------------------------------------------------
/ansible/ssh-development.cfg:
--------------------------------------------------------------------------------
1 | Host jump 13.56.132.182 ec2-54-67-121-158.us-west-1.compute.amazonaws.com
2 | Hostname 13.56.132.182
3 | User ec2-user
4 | ForwardAgent yes
5 | ControlMaster auto
6 | ControlPath ~/.ssh/ansible-%r@%h:%p
7 | ControlPersist 5m
8 | UserKnownHostsFile=/dev/null
9 | StrictHostKeyChecking=no
10 |
11 | Host 10.2.*
12 | ProxyCommand ssh ec2-user@13.56.132.182 -W %h:%p -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
13 | User ec2-user
14 | UserKnownHostsFile=/dev/null
15 | StrictHostKeyChecking=no
16 |
17 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/zookeeper/tasks/zoo-route53.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This will register our zookeeper instance with our private domain
3 | - name: Update Route53 entries for zookeeper
4 | route53:
5 | command: create
6 | zone: "{{env}}-internal.com"
7 | record: "zookeeper.{{env}}-internal.com"
8 | type: A
9 | ttl: 300
10 | value: "{% for host in groups['tag_Role2_zookeeper_instances'] %}{{ hostvars[host]['ansible_ssh_host']}}{% if not loop.last %},{% endif %}{% endfor %}"
11 | overwrite: true
12 | private_zone: true
13 |
--------------------------------------------------------------------------------
/ansible/ansible_call_destroy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ## Pre-reqs
4 | # - Ansible > 2.0
5 | # - Botocore
6 | # - Boto3
7 | # - python 2. 7
8 |
9 | ansible_code_dir="../../../ansible/"
10 |
11 | # For Dynamic inventory
12 | export AWS_REGION=eu-central-1
13 | echo $AWS_REGION
14 | # **** Only for localhost***
15 | # **** ALL VM level configuration is done via ansible pull *****#
16 |
17 | ansible-playbook -i $ansible_code_dir/hosts/ec2.py $ansible_code_dir/site-ecs-delete.yml --extra-vars \
18 | "env=development
19 | region=eu-central-1
20 | log_group_name=/ecs/development-logs
21 | "
22 |
23 |
--------------------------------------------------------------------------------
/terraform/modules/bastion/templates/ssh.cfg:
--------------------------------------------------------------------------------
1 | Host jump ${jump_public_ip} ${jump_public_dns}
2 | Hostname ${jump_public_ip}
3 | User ${ansible_ssh_user}
4 | ForwardAgent yes
5 | ControlMaster auto
6 | ControlPath ~/.ssh/ansible-%r@%h:%p
7 | ControlPersist 5m
8 | UserKnownHostsFile=/dev/null
9 | StrictHostKeyChecking=no
10 |
11 | Host ${proxy_cidr}
12 | ProxyCommand ssh ${ansible_ssh_user}@${jump_public_ip} -W %h:%p -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
13 | User ${ansible_ssh_user}
14 | UserKnownHostsFile=/dev/null
15 | StrictHostKeyChecking=no
16 |
--------------------------------------------------------------------------------
/terraform/modules/ansible-ecs/templates/ansible_ecs_destroy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ## Pre-reqs
4 | # - Ansible > 2.0
5 | # - Botocore
6 | # - Boto3
7 | # - python 2. 7
8 |
9 | ansible_code_dir="../../../ansible/"
10 |
11 | # For Dynamic inventory
12 | export AWS_REGION=${region}
13 | echo $AWS_REGION
14 | # **** Only for localhost***
15 | # **** ALL VM level configuration is done via ansible pull *****#
16 |
17 | ansible-playbook -i $ansible_code_dir/hosts/ec2.py $ansible_code_dir/site-ecs-delete.yml --extra-vars \
18 | "env=${env}
19 | region=${region}
20 | log_group_name=${log_group_name}
21 | "
22 |
--------------------------------------------------------------------------------
/ansible/ansible_call_deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ## Pre-reqs
3 | # - Ansible > 2.0
4 | # - Botocore
5 | # - Boto3
6 | # - python 2. 7
7 |
8 | # Get path of ansible-playbook
9 |
10 |
11 |
12 | ansible_code_dir="../../../ansible/"
13 |
14 | # For Dynamic inventory
15 | export AWS_REGION=eu-central-1
16 | echo $AWS_REGION
17 | # **** Only for localhost***
18 | # **** ALL VM level configuration is done via ansible pull *****#
19 |
20 | ansible-playbook -i $ansible_code_dir/hosts/ec2.py $ansible_code_dir/site-ecs-create.yml --extra-vars \
21 | "env=development
22 | region=eu-central-1
23 | log_group_name=/ecs/development-logs
24 | "
25 |
26 |
--------------------------------------------------------------------------------
/terraform/modules/bastion/templates.tf:
--------------------------------------------------------------------------------
1 | /* Specify all templates to be used here */
2 |
3 | data "template_file" "ssh_cfg" {
4 | template = "${file("${path.module}/templates/ssh.cfg")}"
5 | depends_on = [ ]
6 |
7 | vars {
8 | jump_public_ip = "${aws_eip.jump-node.public_ip}"
9 | jump_public_dns = "${aws_instance.jump_node.public_dns}"
10 | ansible_ssh_user = "${var.ansible_ssh_user}"
11 | proxy_cidr = "${var.proxy_cidr}"
12 | }
13 |
14 | }
15 |
16 |
17 |
18 | data "template_file" "userdata-bastion" {
19 | template = "${file("${path.module}/templates/userdata_bastion")}"
20 | }
21 |
--------------------------------------------------------------------------------
/terraform/modules/ansible-ecs/templates/ansible_ecs_deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ## Pre-reqs
3 | # - Ansible > 2.0
4 | # - Botocore
5 | # - Boto3
6 | # - python 2. 7
7 |
8 | # Get path of ansible-playbook
9 |
10 |
11 |
12 | ansible_code_dir="../../../ansible/"
13 |
14 | # For Dynamic inventory
15 | export AWS_REGION=${region}
16 | echo $AWS_REGION
17 | # **** Only for localhost***
18 | # **** ALL VM level configuration is done via ansible pull *****#
19 |
20 | ansible-playbook -i $ansible_code_dir/hosts/ec2.py $ansible_code_dir/site-ecs-create.yml --extra-vars \
21 | "env=${env}
22 | region=${region}
23 | log_group_name=${log_group_name}
24 | "
25 |
--------------------------------------------------------------------------------
/terraform/modules/vpc/output.tf:
--------------------------------------------------------------------------------
1 | output "vpc_id" {
2 | value = "${aws_vpc.vpc.id}"
3 | }
4 |
5 | output "aws_pub_subnet_id" {
6 | value = ["${aws_subnet.public-subnet.*.id}"]
7 | }
8 |
9 | output "aws_pri_subnet_id" {
10 | value = ["${aws_subnet.private-subnet.*.id}"]
11 | }
12 |
13 | // str ouput values can be used by split functions
14 | // to be used in other resources
15 | output "aws_pri_subnet_id_str" {
16 | value = "${join(",",aws_subnet.private-subnet.*.id)}"
17 | }
18 |
19 | output "aws_pub_subnet_id_str" {
20 | value = "${join(",",aws_subnet.public-subnet.*.id)}"
21 | }
22 |
23 | /*
24 | output "aws_default_sg_id" {
25 | value = "${aws_security_group.default-vpc-sg.id}"
26 | }
27 | */
28 |
--------------------------------------------------------------------------------
/terraform/modules/efs/main.tf:
--------------------------------------------------------------------------------
1 | /* EFS Module for connecting EFS to EC2 instances */
2 |
3 | resource "aws_efs_file_system" "efs" {
4 | creation_token = "${var.efs_cluster_name}-efs"
5 | tags {
6 | Name = "${var.efs_cluster_name}-efs"
7 | Terraform = "true"
8 | Environment = "${var.environment}"
9 | Stack = "GLP"
10 | }
11 | }
12 |
13 | // stuck here
14 | resource "aws_efs_mount_target" "efs" {
15 | file_system_id = "${aws_efs_file_system.efs.id}"
16 | count = "${var.count}"
17 | subnet_id = "${element(split(",",var.subnet_ids),count.index)}"
18 | security_groups = [ "${var.security_group_id}" ]
19 |
20 | depends_on = ["aws_efs_file_system.efs"]
21 | }
22 |
23 |
24 | /* We use this to create this as a dependency for other modules */
25 | resource "null_resource" "module_dependency" {
26 | depends_on = ["aws_efs_mount_target.efs"]
27 | }
28 |
--------------------------------------------------------------------------------
/terraform/modules/vpc/variables.tf:
--------------------------------------------------------------------------------
1 | /*
2 | Variables for VPC
3 | */
4 |
5 | variable "environment" {}
6 |
7 | variable "dhcp_domain_name_servers" {
8 | default = []
9 | }
10 |
11 |
12 | variable "vpc_cidr" {
13 | description = "Sring - Vpc cidrs. Will be mapped in individual env files"
14 |
15 | }
16 |
17 | variable "public_sub_cidr" {
18 | description = "Cidr for public subnet"
19 | default = []
20 |
21 | }
22 |
23 | variable "private_sub_cidr" {
24 | description = "Cidr for private subnet"
25 | default = []
26 | }
27 |
28 | variable "azs" {
29 | description = "Value for AZs for private subnet. Deploying two subnets for private vpc only"
30 | default = []
31 | }
32 |
33 | variable "enable_dns_hostnames" {
34 | description = "String - Boolean indicating if we need pub ips"
35 | default = false
36 | }
37 |
38 | variable "vpc_name" {
39 | description = "String - Name of vpc"
40 | }
41 |
--------------------------------------------------------------------------------
/terraform/modules/ecs-kafka-zk-cluster/templates/userdata-kafka-cluster:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | repo_update: all
4 | repo_upgrade: all
5 |
6 | packages:
7 | - python
8 | - perl
9 | - perl-Switch
10 | - perl-DateTime
11 | - perl-Sys-Syslog
12 | - perl-LWP-Protocol-https
13 | - perl-Digest-SHA
14 | - nfs-utils
15 |
16 | write_files:
17 | - content: |
18 | ECS_CLUSTER=${ecs_cluster_name}
19 | ECS_AVAILABLE_LOGGING_DRIVERS=["json-file","syslog","fluentd","awslogs"]
20 | path: /etc/ecs/ecs.config
21 |
22 |
23 | runcmd:
24 | - easy_install pip
25 | - /usr/local/bin/pip install -q awscli
26 | - mkdir ${efs_data_dir}
27 | - chown -R ec2-user:ec2-user ${efs_data_dir}
28 | - echo "${efs_fs_id}.efs.${region}.amazonaws.com:/ ${efs_data_dir} nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab
29 | - mount -a -t nfs4
30 | - service docker restart
31 | - start ecs
32 | - rm /etc/init/scope.conf
33 |
--------------------------------------------------------------------------------
/terraform/modules/ecs-kafka-zk-cluster/variables.tf:
--------------------------------------------------------------------------------
1 | /*
2 | Variables for ECS_CLUSTER
3 | */
4 |
5 |
6 | variable "keypair_public_key" {}
7 | variable "vpc-id" {}
8 | variable "region" {}
9 | variable "aws_key_name" {}
10 | variable "environment" {}
11 | variable "kafka_instance_type" {}
12 | variable "control_cidr" {}
13 | variable "efs_data_dir" {}
14 | variable "efs_fs_id" {}
15 | variable "bastion_sg_id" {}
16 | #variable "vpc_sg_id" {}
17 | variable "ami_owner_name" {}
18 | variable "ami_name_regex" {}
19 | variable "vpc_cidr" {}
20 |
21 | // ASG
22 | variable "kafka_asg_max_size" {}
23 | variable "kafka_asg_min_size" {}
24 | variable "kafka_asg_desired_size" {}
25 |
26 |
27 | variable "private_subnet_ids" {
28 | default = []
29 | }
30 |
31 | variable "dependency_id" {
32 | default = ""
33 | }
34 |
35 | variable "public_sub_cidr" {
36 | default = []
37 | }
38 |
39 | variable "private_sub_cidr" {
40 | default = []
41 | }
42 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM anapsix/alpine-java
2 |
3 | ARG kafka_version=0.10.2.1
4 | ARG scala_version=2.12
5 |
6 | MAINTAINER wurstmeister
7 |
8 | RUN apk add --update unzip wget curl docker jq coreutils
9 |
10 | ENV KAFKA_VERSION=$kafka_version SCALA_VERSION=$scala_version
11 | ADD download-kafka.sh /tmp/download-kafka.sh
12 | RUN chmod a+x /tmp/download-kafka.sh && sync && /tmp/download-kafka.sh && tar xfz /tmp/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz -C /opt && rm /tmp/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz && ln -s /opt/kafka_${SCALA_VERSION}-${KAFKA_VERSION} /opt/kafka
13 |
14 | VOLUME ["/kafka"]
15 |
16 | ENV KAFKA_HOME /opt/kafka
17 | ENV PATH ${PATH}:${KAFKA_HOME}/bin
18 | ADD start-kafka.sh /usr/bin/start-kafka.sh
19 | ADD broker-list.sh /usr/bin/broker-list.sh
20 | ADD create-topics.sh /usr/bin/create-topics.sh
21 | # The scripts need to have executable permission
22 | RUN chmod a+x /usr/bin/start-kafka.sh && \
23 | chmod a+x /usr/bin/broker-list.sh && \
24 | chmod a+x /usr/bin/create-topics.sh
25 | # Use "exec" form so that it runs as PID 1 (useful for graceful shutdown)
26 | CMD ["start-kafka.sh"]
27 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 GloballogicPractices
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/terraform/modules/ecs-kafka-zk-cluster/kafka-zk-sg.tf:
--------------------------------------------------------------------------------
1 | /*
2 | Opening all traffic within SG only
3 | */
4 |
5 | resource "aws_security_group" "kafka-cluster-sg" {
6 | name = "kafka-cluster-sg"
7 | vpc_id = "${var.vpc-id}"
8 |
9 | // allows traffic from the SG itself for tcp
10 | ingress {
11 | from_port = 0
12 | to_port = 65535
13 | protocol = "tcp"
14 | self = true
15 | }
16 |
17 | // allows traffic from the SG itself for udp
18 | ingress {
19 | from_port = 0
20 | to_port = 65535
21 | protocol = "udp"
22 | self = true
23 | }
24 |
25 | ingress {
26 | from_port = 0
27 | to_port = 65535
28 | protocol = "udp"
29 | cidr_blocks = ["${var.vpc_cidr}"]
30 | }
31 |
32 | ingress {
33 | from_port = 0
34 | to_port = 65535
35 | protocol = "tcp"
36 | cidr_blocks = ["${var.vpc_cidr}"]
37 | }
38 |
39 | ingress {
40 | from_port = 22
41 | to_port = 22
42 | protocol = "tcp"
43 | security_groups = ["${var.bastion_sg_id}"]
44 | }
45 |
46 |
47 | egress {
48 | from_port = 0
49 | to_port = 0
50 | protocol = "-1"
51 | cidr_blocks = ["0.0.0.0/0"]
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/docker-images/docker-zk-exhibitor/include/web.xml:
--------------------------------------------------------------------------------
1 |
2 |
7 |
8 |
9 | A Protected Page
10 | /*
11 |
12 |
13 | zk
14 |
15 |
16 |
17 |
18 |
19 | A Protected Page
20 | /exhibitor/v1/cluster/state
21 |
22 |
23 |
24 |
25 | zk
26 |
27 |
28 |
29 | BASIC
30 | Zookeeper
31 |
32 |
33 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kafka/defaults/development.yml:
--------------------------------------------------------------------------------
1 | task_family: "KAFKA"
2 | task_role_arn: ''
3 | network_mode: 'host'
4 |
5 | # First container
6 | c1_appname: kafka
7 | c1_cpu: 512
8 | c1_memory: 3500
9 | c1_soft_memory: 1024
10 | c1_ports:
11 | - c1hostPort: 9020
12 | c1containerPort: 9020
13 | c1protocol: tcp
14 | c1_image: fali2/ecs-zookeeper:latest
15 | c1_essential: "True"
16 | c1_privileged: False
17 | c1_environment_variables:
18 | - c1name : KAFKA_ADVERTISED_PORT
19 | c1value: >
20 | "9092"
21 | - c1name: KAFKA_ZOOKEEPER_CONNECT
22 | c1value: >
23 | "zookeeper.{{env}}-internal.com:2181"
24 |
25 | c1_awslogs_group: "/aws/ecs/{{env}}"
26 | # Our logs go to us-east-2
27 | c1_log_region: "us-east-2"
28 | c1_awslogs_stream_prefix: kafka/
29 | c1_docker_labels: kafka
30 | c1_docker_security_options: no-new-privileges
31 |
32 |
33 | # Volume attachment
34 |
35 | src_volume_name: "kafka-data"
36 | volume_src_path: "/kafka-data/kafka/"
37 | mount_container_path: "/kafka/"
38 | mount_readonly_bool: False
39 |
40 |
41 | # Service Setup
42 | cluster_name: "Kafka-cluster-{{env}}"
43 | service_name: "kafka-service"
44 | count: "3"
45 |
46 | # Stage to Prod it has to be rolling update
47 | maximumPercent: 200
48 | minimumHealthyPercent: 50
49 |
--------------------------------------------------------------------------------
/ansible/site-ecs-delete.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include: playbooks/cassandra-master.yml
4 | vars:
5 | ecs_action: delete
6 | env: "{{env}}"
7 | region: "{{region}}"
8 | log_group_name: "{{log_group_name}}"
9 |
10 | - include: playbooks/cassandra-client.yml
11 | vars:
12 | ecs_action: delete
13 | env: "{{env}}"
14 | region: "{{region}}"
15 | log_group_name: "{{log_group_name}}"
16 |
17 | - include: playbooks/zookeeper.yml
18 | vars:
19 | ecs_action: delete
20 | env: "{{env}}"
21 | region: "{{region}}"
22 | kafka_zk_cluster_name: "{{kafka_zk_cluster_name}}"
23 |
24 | - include: playbooks/kafka.yml
25 | vars:
26 | ecs_action: delete
27 | env: "{{env}}"
28 | region: "{{region}}"
29 | kafka_zk_cluster_name: "{{kafka_zk_cluster_name}}"
30 |
31 | ###############################################
32 | # Kong at last just to be in sync with delete.
33 | # Does not make any difference
34 | - include: playbooks/kong-master.yml
35 | vars:
36 | ecs_action: delete
37 | env: "{{env}}"
38 | region: "{{region}}"
39 | log_group_name: "{{log_group_name}}"
40 |
41 | - include: playbooks/kong-client.yml
42 | vars:
43 | ecs_action: delete
44 | env: "{{env}}"
45 | region: "{{region}}"
46 | log_group_name: "{{log_group_name}}"
47 |
--------------------------------------------------------------------------------
/docker-images/docker-zk-exhibitor/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Original work copyright (c) 2014 The Factory
4 | Modified work copyright (c) 2015 Michael Babineau
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
--------------------------------------------------------------------------------
/ansible/playbooks/roles/cassandra-master/defaults/master.yml:
--------------------------------------------------------------------------------
1 | # Seed nodes are master nodes
2 | task_family: "CASSANDRA-SEED"
3 | task_role_arn: ''
4 | network_mode: "bridge"
5 | c1_appname: cassandra-master
6 | c1_cpu: 100
7 | c1_memory: 2048
8 | c1_soft_memory: 1024
9 | c1_ports:
10 | - c1hostPort: 9042
11 | c1containerPort: 9042
12 | c1protocol: tcp
13 | - c1hostPort: 7000
14 | c1containerPort: 7000
15 | c1protocol: tcp
16 | - c1hostPort: 9160
17 | c1containerPort: 9160
18 | c1protocol: tcp
19 | c1_image: cassandra:2.2
20 | c1_essential: True
21 | c1_privileged: False
22 | c1_environment_variables:
23 | - c1name : CASSANDRA_CLUSTER_NAME
24 | c1value: cassandra-cluster-stage
25 | c1_awslogs_group: "{{log_group_name}}"
26 | c1_log_region: "{{region}}"
27 | c1_awslogs_stream_prefix: cassandra-master
28 | c1_docker_labels: cassandra-master
29 | c1_docker_security_options: no-new-privileges
30 |
31 | # Volume attachment
32 | src_volume_name: "cassandra-data-ebs"
33 | volume_src_path: "/cassandra-data"
34 | mount_container_path: "/var/lib/cassandra"
35 | mount_readonly_bool: False
36 |
37 | # Service Setup
38 | cluster_name: "Cassandra-Cluster-{{env}}"
39 | service_name: "cassandra-master"
40 | count: "1"
41 |
42 | # Stage to Prod it has to be rolling update
43 | maximumPercent: 200
44 | minimumHealthyPercent: 50
45 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/create-topics.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | if [[ -z "$START_TIMEOUT" ]]; then
5 | START_TIMEOUT=600
6 | fi
7 |
8 | start_timeout_exceeded=false
9 | count=0
10 | step=10
11 | while netstat -lnt | awk '$4 ~ /:'$KAFKA_PORT'$/ {exit 1}'; do
12 | echo "waiting for kafka to be ready"
13 | sleep $step;
14 | count=$(expr $count + $step)
15 | if [ $count -gt $START_TIMEOUT ]; then
16 | start_timeout_exceeded=true
17 | break
18 | fi
19 | done
20 |
21 | if $start_timeout_exceeded; then
22 | echo "Not able to auto-create topic (waited for $START_TIMEOUT sec)"
23 | exit 1
24 | fi
25 |
26 | if [[ -n $KAFKA_CREATE_TOPICS ]]; then
27 | IFS=','; for topicToCreate in $KAFKA_CREATE_TOPICS; do
28 | echo "creating topics: $topicToCreate"
29 | IFS=':' read -a topicConfig <<< "$topicToCreate"
30 | if [ ${topicConfig[3]} ]; then
31 | JMX_PORT='' $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper $KAFKA_ZOOKEEPER_CONNECT --replication-factor ${topicConfig[2]} --partition ${topicConfig[1]} --topic "${topicConfig[0]}" --config cleanup.policy="${topicConfig[3]}"
32 | else
33 | JMX_PORT='' $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper $KAFKA_ZOOKEEPER_CONNECT --replication-factor ${topicConfig[2]} --partition ${topicConfig[1]} --topic "${topicConfig[0]}"
34 | fi
35 | done
36 | fi
37 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/zookeeper/defaults/development/zoo1.yml:
--------------------------------------------------------------------------------
1 | task_family: "ZOO1-EXBTOR"
2 | task_role_arn: ''
3 | network_mode: 'host'
4 |
5 | # First container
6 | c1_appname: zoo1
7 | c1_cpu: 1024
8 | c1_memory: 2048
9 | c1_soft_memory: 512
10 | c1_ports:
11 | - c1hostPort: 2181
12 | c1containerPort: 2181
13 | c1protocol: tcp
14 | - c1hostPort: 8181
15 | c1containerPort: 8181
16 | c1protocol: tcp
17 | - c1hostPort: 2888
18 | c1containerPort: 2888
19 | c1protocol: tcp
20 | - c1hostPort: 3888
21 | c1containerPort: 3888
22 | c1protocol: tcp
23 | c1_image: fali2/ecs-zookeeper
24 | c1_essential: "True"
25 | c1_privileged: False
26 | #c1_environment_variables:
27 | # - c1name : HOSTNAME
28 | # c1value: "zoo1.weave.local"
29 |
30 | c1_awslogs_group: "/aws/ecs/{{env}}"
31 | # Our logs go to us-east-2
32 | c1_log_region: "us-east-2"
33 | c1_awslogs_stream_prefix: zookeeper/
34 | c1_docker_labels: zookeeper
35 | c1_docker_security_options: no-new-privileges
36 |
37 | # Volume attachment
38 |
39 | src_volume_name: "zk-data"
40 | volume_src_path: "/kafka-data/zookeeper-exhibitor/"
41 | mount_container_path: "/opt/zookeeper/local_configs/"
42 | mount_readonly_bool: False
43 |
44 |
45 | # Service Setup
46 | cluster_name: "Kafka-cluster-{{env}}"
47 | service_name: "zookeeper1-service"
48 | count: "1"
49 |
50 | # Stage to Prod it has to be rolling update
51 | maximumPercent: 200
52 | minimumHealthyPercent: 100
53 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/zookeeper/defaults/development/zoo2.yml:
--------------------------------------------------------------------------------
1 | task_family: "ZOO2-EXBTOR"
2 | task_role_arn: ''
3 | network_mode: 'host'
4 |
5 | # First container
6 | c1_appname: zoo2
7 | c1_cpu: 1024
8 | c1_memory: 2048
9 | c1_soft_memory: 512
10 | c1_ports:
11 | - c1hostPort: 2181
12 | c1containerPort: 2181
13 | c1protocol: tcp
14 | - c1hostPort: 8181
15 | c1containerPort: 8181
16 | c1protocol: tcp
17 | - c1hostPort: 2888
18 | c1containerPort: 2888
19 | c1protocol: tcp
20 | - c1hostPort: 3888
21 | c1containerPort: 3888
22 | c1protocol: tcp
23 | c1_image: fali2/ecs-zookeeper
24 | c1_essential: "True"
25 | c1_privileged: False
26 | #c1_environment_variables:
27 | # - c1name : HOSTNAME
28 | # c1value: "zoo1.weave.local"
29 |
30 | c1_awslogs_group: "/aws/ecs/{{env}}"
31 | # Our logs go to us-east-2
32 | c1_log_region: "us-east-2"
33 | c1_awslogs_stream_prefix: zookeeper/
34 | c1_docker_labels: zookeeper
35 | c1_docker_security_options: no-new-privileges
36 |
37 | # Volume attachment
38 |
39 | src_volume_name: "zk-data"
40 | volume_src_path: "/kafka-data/zookeeper-exhibitor/"
41 | mount_container_path: "/opt/zookeeper/local_configs/"
42 | mount_readonly_bool: False
43 |
44 |
45 | # Service Setup
46 | cluster_name: "Kafka-cluster-{{env}}"
47 | service_name: "zookeeper2-service"
48 | count: "1"
49 |
50 | # Stage to Prod it has to be rolling update
51 | maximumPercent: 200
52 | minimumHealthyPercent: 100
53 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/zookeeper/defaults/development/zoo3.yml:
--------------------------------------------------------------------------------
1 | task_family: "ZOO3-EXBTOR"
2 | task_role_arn: ''
3 | network_mode: 'host'
4 |
5 | # First container
6 | c1_appname: zoo3
7 | c1_cpu: 1024
8 | c1_memory: 2048
9 | c1_soft_memory: 512
10 | c1_ports:
11 | - c1hostPort: 2181
12 | c1containerPort: 2181
13 | c1protocol: tcp
14 | - c1hostPort: 8181
15 | c1containerPort: 8181
16 | c1protocol: tcp
17 | - c1hostPort: 2888
18 | c1containerPort: 2888
19 | c1protocol: tcp
20 | - c1hostPort: 3888
21 | c1containerPort: 3888
22 | c1protocol: tcp
23 | c1_image: fali2/ecs-zookeeper
24 | c1_essential: "True"
25 | c1_privileged: False
26 | #c1_environment_variables:
27 | # - c1name : HOSTNAME
28 | # c1value: "zoo1.weave.local"
29 |
30 | c1_awslogs_group: "/aws/ecs/{{env}}"
31 | # Our logs go to us-east-2
32 | c1_log_region: "us-east-2"
33 | c1_awslogs_stream_prefix: zookeeper/
34 | c1_docker_labels: zookeeper
35 | c1_docker_security_options: no-new-privileges
36 |
37 | # Volume attachment
38 |
39 | src_volume_name: "zk-data"
40 | volume_src_path: "/kafka-data/zookeeper-exhibitor/"
41 | mount_container_path: "/opt/zookeeper/local_configs/"
42 | mount_readonly_bool: False
43 |
44 |
45 | # Service Setup
46 | cluster_name: "Kafka-cluster-{{env}}"
47 | service_name: "zookeeper3-service"
48 | count: "1"
49 |
50 | # Stage to Prod it has to be rolling update
51 | maximumPercent: 200
52 | minimumHealthyPercent: 100
53 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/cassandra-client/defaults/client.yml:
--------------------------------------------------------------------------------
1 | task_family: "CASSANDRA-CLIENT"
2 | task_role_arn: ''
3 | network_mode: "bridge"
4 | c1_appname: cassandra-client
5 | c1_cpu: 100
6 | c1_memory: 2048
7 | c1_soft_memory: 1024
8 | c1_ports:
9 | - c1hostPort: 9042
10 | c1containerPort: 9042
11 | c1protocol: tcp
12 | - c1hostPort: 7000
13 | c1containerPort: 7000
14 | c1protocol: tcp
15 | - c1hostPort: 9160
16 | c1containerPort: 9160
17 | c1protocol: tcp
18 | c1_image: cassandra:2.2
19 | c1_essential: True
20 | c1_privileged: False
21 | c1_environment_variables:
22 | - c1name : CASSANDRA_CLUSTER_NAME
23 | c1value: cassandra-cluster-stage
24 | - c1name : CASSANDRA_SEEDS
25 | c1value: cassandra-master.weave.local
26 | c1_awslogs_group: "{{log_group_name}}"
27 | c1_log_region: "{{region}}"
28 | c1_awslogs_stream_prefix: cassandra-client
29 | c1_docker_labels: cassandra-client
30 | c1_docker_security_options: no-new-privileges
31 |
32 | # Volume attachment
33 | src_volume_name: "cassandra-data-ebs"
34 | volume_src_path: "/cassandra-data"
35 | mount_container_path: "/var/lib/cassandra"
36 | mount_readonly_bool: False
37 |
38 | # Service Setup
39 | cluster_name: "Cassandra-Cluster-{{env}}"
40 | service_name: "cassandra-client"
41 |
42 | # Assuming a cluster of three ( 1 seed and 2 child nodes )
43 | count: "2"
44 |
45 | # Stage to Prod it has to be rolling update
46 | maximumPercent: 200
47 | minimumHealthyPercent: 50
48 |
--------------------------------------------------------------------------------
/ansible/site-ecs-create.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #####################################################################################
4 | #####################################################################################
5 |
6 |
7 | - include: playbooks/cassandra-master.yml
8 | vars:
9 | ecs_action: create
10 | env: "{{env}}"
11 | region: "{{region}}"
12 | log_group_name: "{{log_group_name}}"
13 |
14 | - include: playbooks/cassandra-client.yml
15 | vars:
16 | ecs_action: create
17 | env: "{{env}}"
18 | region: "{{region}}"
19 | log_group_name: "{{log_group_name}}"
20 |
21 | - include: playbooks/zookeeper.yml
22 | vars:
23 | ecs_action: create
24 | env: "{{env}}"
25 | region: "{{region}}"
26 | kafka_zk_cluster_name: "{{kafka_zk_cluster_name}}"
27 |
28 | - include: playbooks/kafka.yml
29 | vars:
30 | ecs_action: create
31 | env: "{{env}}"
32 | region: "{{region}}"
33 | kafka_zk_cluster_name: "{{kafka_zk_cluster_name}}"
34 |
35 | #############################
36 | # Keep kong in last as it has
37 | # dependency on ELB
38 | #############################
39 | - include: playbooks/kong-master.yml
40 | vars:
41 | ecs_action: create
42 | env: "{{env}}"
43 | region: "{{region}}"
44 | log_group_name: "{{log_group_name}}"
45 |
46 | - include: playbooks/kong-client.yml
47 | vars:
48 | ecs_action: create
49 | env: "{{env}}"
50 | region: "{{region}}"
51 | log_group_name: "{{log_group_name}}"
52 |
--------------------------------------------------------------------------------
/terraform/modules/vpc/vpc-sg.tf:
--------------------------------------------------------------------------------
1 | /*
2 | Opening all traffic within SG for VPC only
3 | This did not work and hence, disabled
4 | In VPC traffic flow should be allowed with sg-ids rather than IPs as a
5 | recommended security best practice
6 | -------------------------------------
7 |
8 | resource "aws_security_group" "default-vpc-sg" {
9 | name = "default-vpc-sg"
10 | vpc_id = "${aws_vpc.vpc.id}"
11 |
12 | // allows traffic from the SG itself for tcp
13 | ingress {
14 | from_port = 0
15 | to_port = 65535
16 | protocol = "tcp"
17 | self = true
18 | }
19 |
20 | // allows traffic from the SG itself for udp
21 | ingress {
22 | from_port = 0
23 | to_port = 65535
24 | protocol = "udp"
25 | self = true
26 | }
27 |
28 |
29 | ingress {
30 | from_port = 6783
31 | to_port = 6783
32 | protocol = "tcp"
33 | self = true
34 | cidr_blocks = ["${var.vpc_cidr}"]
35 | }
36 |
37 | ingress {
38 | from_port = 6783
39 | to_port = 6783
40 | protocol = "udp"
41 | self = true
42 | cidr_blocks = ["${var.vpc_cidr}"]
43 | }
44 |
45 | ingress {
46 | from_port = 6784
47 | to_port = 6784
48 | protocol = "udp"
49 | self = true
50 | cidr_blocks = ["${var.vpc_cidr}"]
51 | }
52 |
53 | egress {
54 | from_port = 0
55 | to_port = 0
56 | protocol = "-1"
57 | cidr_blocks = ["${var.vpc_cidr}"]
58 | }
59 |
60 | }
61 | */
62 |
--------------------------------------------------------------------------------
/terraform/modules/bastion/bastion.tf:
--------------------------------------------------------------------------------
1 | /*
2 | Bastion-node
3 | Nothing else apart from sshd to be on this node
4 | */
5 |
6 | /* This is used to generate data about ami to be used */
7 | data "aws_ami" "bastion" {
8 | most_recent = true
9 |
10 | filter {
11 | name = "owner-alias"
12 | values = ["amazon"]
13 | }
14 |
15 | filter {
16 | name = "name"
17 | values = ["amzn-ami-*-amazon-ecs*"]
18 | }
19 |
20 | }
21 |
22 |
23 | resource "aws_instance" "jump_node" {
24 | ami = "${"${data.aws_ami.bastion.id}"}"
25 | instance_type = "${var.bastion_instance_type}"
26 | key_name = "${var.aws_key_name}"
27 | vpc_security_group_ids = ["${aws_security_group.jump-sg.id}"]
28 | #count = "${length(var.public_sub_cidr)}"
29 | user_data = "${data.template_file.userdata-bastion.rendered}"
30 | subnet_id = "${var.pub_sub_id}"
31 | associate_public_ip_address = true
32 | source_dest_check = false
33 | // Implicit dependency
34 | iam_instance_profile = "${aws_iam_instance_profile.bastion_profile.name}"
35 |
36 | tags = {
37 | Name = "ECS-BASTION-NODE-${var.environment}"
38 | Role = "bastion"
39 | Environment = "${var.environment}"
40 | }
41 |
42 | }
43 |
44 |
45 | //assgin eip to jump-node
46 | resource "aws_eip" "jump-node" {
47 | instance = "${aws_instance.jump_node.id}"
48 | vpc = true
49 | }
50 |
--------------------------------------------------------------------------------
/terraform/modules/bastion/iam.tf:
--------------------------------------------------------------------------------
1 | resource "aws_iam_role" "bastion_role" {
2 | name = "bastion_role-${var.environment}"
3 |
4 | assume_role_policy = <> /etc/java-8-openjdk/security/java.security \
19 |
20 | # Install ZK
21 | && curl -Lo /tmp/zookeeper.tgz $ZK_RELEASE \
22 | && mkdir -p /opt/zookeeper/transactions /opt/zookeeper/snapshots \
23 | && tar -xzf /tmp/zookeeper.tgz -C /opt/zookeeper --strip=1 \
24 | && rm /tmp/zookeeper.tgz \
25 |
26 | # Install Exhibitor
27 | && mkdir -p /opt/exhibitor \
28 | && curl -Lo /opt/exhibitor/pom.xml $EXHIBITOR_POM \
29 | && mvn -f /opt/exhibitor/pom.xml package \
30 | && ln -s /opt/exhibitor/target/exhibitor*jar /opt/exhibitor/exhibitor.jar \
31 |
32 | # Remove build-time dependencies
33 | && apt-get purge -y --auto-remove $BUILD_DEPS \
34 | && rm -rf /var/lib/apt/lists/*
35 |
36 |
37 | # Add the wrapper script to setup configs and exec exhibitor
38 | ADD include/wrapper.sh /opt/exhibitor/wrapper.sh
39 |
40 | # Add the optional web.xml for authentication
41 | ADD include/web.xml /opt/exhibitor/web.xml
42 |
43 | USER root
44 | WORKDIR /opt/exhibitor
45 | EXPOSE 2181 2888 3888 8181
46 |
47 | ENTRYPOINT ["bash", "-ex", "/opt/exhibitor/wrapper.sh"]
48 |
--------------------------------------------------------------------------------
/terraform/environments/development/variables.tf:
--------------------------------------------------------------------------------
1 | /*
2 | Variables for all modules
3 | */
4 |
5 | // VPC
6 | variable "region" {}
7 | variable "vpc_cidr" {}
8 | variable "aws_key_path" {}
9 | variable "aws_key_name" {}
10 | variable "keypair_public_key" {}
11 | variable "vpc_name" {}
12 | variable "environment" {}
13 | variable "private_sub_control_cidr" {}
14 | variable "ansible_ssh_user" {}
15 | variable "control_cidr" {}
16 | variable "proxy_cidr" {}
17 | variable "ami_owner_name" {}
18 | variable "ami_name_regex" {}
19 |
20 | // Route53
21 | variable "main_zone_id" {}
22 | variable "public_domain_name" {}
23 |
24 |
25 | // ELB Kong
26 | variable "kong_elb_name" {}
27 | variable "kong_elb_sg_name" {}
28 | variable "kong_elb_healthy_threshold" {}
29 | variable "kong_elb_unhealthy_threshold" {}
30 | variable "kong_elb_timeout" {}
31 | variable "kong_elb_elb_health_target" {}
32 | variable "kong_elb_interval" {}
33 | variable "kong_ssl_certificate_id" {}
34 |
35 | // ELB Main
36 | variable "main_elb_name" {}
37 | variable "main_elb_sg_name" {}
38 | variable "main_elb_healthy_threshold" {}
39 | variable "main_elb_unhealthy_threshold" {}
40 | variable "main_elb_timeout" {}
41 | variable "main_elb_elb_health_target" {}
42 | variable "main_elb_interval" {}
43 | variable "main_ssl_certificate_id" {}
44 |
45 |
46 | // Cassandra EBS
47 | variable "cass_ebs_dev_name" {}
48 | variable "cass_ebs_vol_type" {}
49 | variable "cass_ebs_vol_size" {}
50 | variable "cass_data_dir" {}
51 |
52 |
53 | // Declare classes of instances for each modules
54 | variable "bastion_instance_type" {}
55 | variable "ingress_instance_type" {}
56 | variable "cassandra_instance_type" {}
57 |
58 |
59 | // ASG size for each cluster
60 |
61 | variable "cassandra_asg_max_size" {}
62 | variable "cassandra_asg_min_size" {}
63 | variable "cassandra_asg_desired_size" {}
64 | variable "kong_asg_max_size" {}
65 | variable "kong_asg_min_size" {}
66 | variable "kong_asg_desired_size" {}
67 |
68 |
69 | // Generic
70 | variable "azs" {
71 | default = []
72 | }
73 |
74 |
75 | variable "public_sub_cidr" {
76 | default = []
77 | }
78 |
79 |
80 | variable "private_sub_cidr" {
81 | default = []
82 | }
83 |
--------------------------------------------------------------------------------
/terraform/modules/ansible-ecs/main.tf:
--------------------------------------------------------------------------------
1 | /*
2 | This module will generate config for our ansible playbooks
3 | We need to pass env, region and cluster name for all applications to
4 | our ansible roles.
5 | Two provisioners to be used
6 | - create time ( deploy all services )
7 | - destroy time ( delete all ecs cluster else tf wont be able to destroy completely )
8 | */
9 |
10 |
11 | data "template_file" "ansible_ecs_deploy" {
12 | template = "${file("${path.module}/templates/ansible_ecs_deploy.sh")}"
13 |
14 | vars {
15 | env = "${lower(var.env)}"
16 | region = "${var.region}"
17 | log_group_name = "${var.log_group_name}"
18 | }
19 |
20 | }
21 |
22 | data "template_file" "ansible_ecs_destroy" {
23 | template = "${file("${path.module}/templates/ansible_ecs_destroy.sh")}"
24 |
25 | vars {
26 | env = "${lower(var.env)}"
27 | region = "${var.region}"
28 | log_group_name = "${var.log_group_name}"
29 | }
30 |
31 | }
32 |
33 |
34 | resource "null_resource" "ansible_ecs_generate" {
35 |
36 | triggers {
37 | # This will trigger create on every run
38 | filename = "test-${uuid()}"
39 | }
40 |
41 | provisioner "local-exec" {
42 | command = "echo '${ data.template_file.ansible_ecs_deploy.rendered }' > ../../../ansible/ansible_call_deploy.sh"
43 | }
44 |
45 | provisioner "local-exec" {
46 | command = "chmod 755 ../../../ansible/ansible_call_deploy.sh"
47 | }
48 |
49 | provisioner "local-exec" {
50 | command = "../../../ansible/ansible_call_deploy.sh"
51 | }
52 |
53 | }
54 |
55 |
56 |
57 | resource "null_resource" "ansible_ecs_destroy" {
58 |
59 | triggers {
60 | template_rendered = "${data.template_file.ansible_ecs_destroy.rendered}"
61 | }
62 |
63 | provisioner "local-exec" {
64 | command = "echo '${ data.template_file.ansible_ecs_destroy.rendered }' > ../../../ansible/ansible_call_destroy.sh"
65 | when = "destroy"
66 | }
67 |
68 | provisioner "local-exec" {
69 | command = "chmod 755 ../../../ansible/ansible_call_destroy.sh"
70 | when = "destroy"
71 | }
72 |
73 | provisioner "local-exec" {
74 | command = "../../../ansible/ansible_call_destroy.sh"
75 | when = "destroy"
76 | }
77 |
78 | }
79 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kong-client/defaults/development.yml:
--------------------------------------------------------------------------------
1 | task_family: "KONG-API-GATEWAY"
2 | task_role_arn: ''
3 |
4 | # Kong api gw
5 | c1_appname: kong
6 | c1_cpu: 100
7 | c1_memory: 2048
8 | c1_soft_memory: 512
9 | c1_ports:
10 | - c1hostPort: 8000
11 | c1containerPort: 8000
12 | c1protocol: tcp
13 | - c1hostPort: 8443
14 | c1containerPort: 8443
15 | c1protocol: tcp
16 | - c1hostPort: 8001
17 | c1containerPort: 8001
18 | c1protocol: tcp
19 | - c1hostPort: 7946
20 | c1containerPort: 7946
21 | c1protocol: tcp
22 | - c1hostPort: 7946
23 | c1containerPort: 7946
24 | c1protocol: udp
25 | - c1hostPort: 7373
26 | c1containerPort: 7373
27 | c1protocol: tcp
28 | c1_image: fali2/kong-weave
29 | c1_essential: True
30 | c1_privileged: False
31 | c1_environment_variables:
32 | - c1name : KONG_CASSANDRA_CONTACT_POINTS
33 | c1value: cassandra-master.weave.local
34 | - c1name: KONG_CLUSTER_LISTEN_RPC
35 | c1value: 127.0.0.1:7373
36 | - c1name : KONG_CASSANDRA_CASSANDRA_PORTs
37 | c1value: >
38 | "9042"
39 | - c1name: KONG_DATABASE
40 | c1value: cassandra
41 | - c1name: KONG_CLUSTER_ENCRYPT_KEY
42 | c1value: cg8StVXbQJ0gPvMd9o7yrg==
43 | - c1name: KONG_CASSANDRA_KEYSPACE
44 | c1value: kong
45 | - c1name: KONG_CASSANDRA_REPL_STRATEGY
46 | c1value: SimpleStrategy
47 | - c1name: KONG_CASSANDRA_REPL_FACTOR
48 | c1value: >
49 | "2"
50 | c1_awslogs_group: "{{log_group_name}}"
51 | c1_log_region: "{{region}}"
52 | c1_awslogs_stream_prefix: kong-gw-logs/
53 | c1_docker_labels: kong-containers
54 | c1_docker_security_options: no-new-privileges
55 |
56 |
57 | # Kong ui
58 | c2_appname: kong-apigw-ui
59 | c2_cpu: 256
60 | c2_memory: 256
61 | c2_soft_memory: 256
62 | c2_ports:
63 | - c2hostPort: 8080
64 | c2containerPort: 8080
65 | c2protocol: tcp
66 | c2_image: fali2/kongui-weave
67 | c2_essential: True
68 | c2_privileged: False
69 | c2_environment_variables:
70 | - c2name : PASSWD
71 | c2value: myencryptedpassword
72 | - c2name: servicename
73 | c2value: kong-apigw
74 | - c2name: UNAME
75 | c2value: kong-user
76 | c2_awslogs_group: "{{log_group_name}}"
77 | c2_log_region: "{{region}}"
78 | c2_awslogs_stream_prefix: kong-ui/
79 | c2_docker_labels: kong-containers
80 |
81 |
82 | # Service Setup
83 | cluster_name: "Ingress-Cluster-{{env}}"
84 | service_name: "kong-client-service"
85 | count: "2"
86 |
87 | maximumPercent: 200
88 | minimumHealthyPercent: 50
89 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kong-master/defaults/development.yml:
--------------------------------------------------------------------------------
1 | task_family: "KONG-API-GATEWAY"
2 | task_role_arn: ''
3 |
4 | # Kong api gw
5 | c1_appname: kong
6 | c1_cpu: 100
7 | c1_memory: 2048
8 | c1_soft_memory: 512
9 | c1_ports:
10 | - c1hostPort: 8000
11 | c1containerPort: 8000
12 | c1protocol: tcp
13 | - c1hostPort: 8443
14 | c1containerPort: 8443
15 | c1protocol: tcp
16 | - c1hostPort: 8001
17 | c1containerPort: 8001
18 | c1protocol: tcp
19 | - c1hostPort: 7946
20 | c1containerPort: 7946
21 | c1protocol: tcp
22 | - c1hostPort: 7946
23 | c1containerPort: 7946
24 | c1protocol: udp
25 | - c1hostPort: 7373
26 | c1containerPort: 7373
27 | c1protocol: tcp
28 | c1_image: fali2/kong-weave
29 | c1_essential: True
30 | c1_privileged: False
31 | c1_environment_variables:
32 | - c1name : KONG_CASSANDRA_CONTACT_POINTS
33 | c1value: cassandra-master.weave.local
34 | - c1name : KONG_CASSANDRA_CASSANDRA_PORTs
35 | c1value: >
36 | "9042"
37 | - c1name: KONG_CLUSTER_LISTEN_RPC
38 | c1value: 127.0.0.1:7373
39 | - c1name: KONG_DATABASE
40 | c1value: cassandra
41 | - c1name: KONG_CLUSTER_ENCRYPT_KEY
42 | c1value: cg8StVXbQJ0gPvMd9o7yrg==
43 | - c1name: KONG_CASSANDRA_KEYSPACE
44 | c1value: kong
45 | - c1name: KONG_CASSANDRA_REPL_STRATEGY
46 | c1value: SimpleStrategy
47 | - c1name: KONG_CASSANDRA_REPL_FACTOR
48 | c1value: >
49 | "2"
50 | c1_awslogs_group: "{{log_group_name}}"
51 | c1_log_region: "{{region}}"
52 | c1_awslogs_stream_prefix: kong-gw-logs/
53 | c1_docker_labels: kong-containers
54 | c1_docker_security_options: no-new-privileges
55 |
56 |
57 | # Kong ui
58 | c2_appname: kong-apigw-ui
59 | c2_cpu: 256
60 | c2_memory: 256
61 | c2_soft_memory: 256
62 | c2_ports:
63 | - c2hostPort: 8080
64 | c2containerPort: 8080
65 | c2protocol: tcp
66 | c2_image: fali2/kongui-weave
67 | c2_essential: True
68 | c2_privileged: False
69 | c2_environment_variables:
70 | - c2name : PASSWD
71 | c2value: myencryptedpassword
72 | - c2name: servicename
73 | c2value: kong-apigw
74 | - c2name: UNAME
75 | c2value: kong-user
76 | c2_awslogs_group: "{{log_group_name}}"
77 | c2_log_region: "{{region}}"
78 | c2_awslogs_stream_prefix: kong-ui/
79 | c2_docker_labels: kong-containers
80 |
81 |
82 | # Service Setup
83 | cluster_name: "Ingress-Cluster-{{env}}"
84 | service_name: "kong-master-service"
85 | count: "1"
86 |
87 | maximumPercent: 200
88 | minimumHealthyPercent: 100
89 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/start-kafka.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [[ -z "$KAFKA_PORT" ]]; then
4 | export KAFKA_PORT=9092
5 | fi
6 |
7 |
8 | if [[ -z "$KAFKA_DELETE_TOPIC_ENABLE" ]]; then
9 | export KAFKA_DELETE_TOPIC_ENABLE=true
10 | fi
11 |
12 | # Set default rc and partitions
13 | if [[ -z "$KAFKA_DEFAULT_REPLICATION_FACTOR" ]]; then
14 | export KAFKA_DEFAULT_REPLICATION_FACTOR=1
15 | fi
16 |
17 | if [[ -z "$KAFKA_NUM_PARTITIONS" ]]; then
18 | export KAFKA_NUM_PARTITIONS=1
19 | fi
20 |
21 | if [[ -z "$KAFKA_ADVERTISED_PORT" && \
22 | -z "$KAFKA_LISTENERS" && \
23 | -z "$KAFKA_ADVERTISED_LISTENERS" ]]; then
24 | export KAFKA_ADVERTISED_PORT=$(docker port `hostname` $KAFKA_PORT | sed -r "s/.*:(.*)/\1/g")
25 | fi
26 |
27 | if [[ -z "$KAFKA_BROKER_ID" ]]; then
28 | if [[ -n "$BROKER_ID_COMMAND" ]]; then
29 | export KAFKA_BROKER_ID=$(eval $BROKER_ID_COMMAND)
30 | else
31 | # By default auto allocate broker ID
32 | export KAFKA_BROKER_ID=-1
33 | fi
34 | fi
35 |
36 | if [[ -z "$KAFKA_LOG_DIRS" ]]; then
37 | export KAFKA_LOG_DIRS="/kafka/kafka-logs-$HOSTNAME"
38 | fi
39 |
40 | if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then
41 | export KAFKA_ZOOKEEPER_CONNECT=$(env | grep ZK.*PORT_2181_TCP= | sed -e 's|.*tcp://||' | paste -sd ,)
42 | fi
43 |
44 | if [[ -n "$KAFKA_HEAP_OPTS" ]]; then
45 | sed -r -i "s/(export KAFKA_HEAP_OPTS)=\"(.*)\"/\1=\"$KAFKA_HEAP_OPTS\"/g" $KAFKA_HOME/bin/kafka-server-start.sh
46 | unset KAFKA_HEAP_OPTS
47 | fi
48 |
49 | # Fixing hostname command
50 | KAFKA_ADVERTISED_HOST_NAME=$(wget -t3 -T2 -qO- http://169.254.169.254/latest/meta-data/local-ipv4)
51 | export KAFKA_ADVERTISED_HOST_NAME
52 |
53 | if [[ -z "$KAFKA_ADVERTISED_HOST_NAME" && -n "$HOSTNAME_COMMAND" ]]; then
54 | export KAFKA_ADVERTISED_HOST_NAME=$(eval $HOSTNAME_COMMAND)
55 | fi
56 |
57 | for VAR in `env`
58 | do
59 | if [[ $VAR =~ ^KAFKA_ && ! $VAR =~ ^KAFKA_HOME ]]; then
60 | kafka_name=`echo "$VAR" | sed -r "s/KAFKA_(.*)=.*/\1/g" | tr '[:upper:]' '[:lower:]' | tr _ .`
61 | env_var=`echo "$VAR" | sed -r "s/(.*)=.*/\1/g"`
62 | if egrep -q "(^|^#)$kafka_name=" $KAFKA_HOME/config/server.properties; then
63 | sed -r -i "s@(^|^#)($kafka_name)=(.*)@\2=${!env_var}@g" $KAFKA_HOME/config/server.properties #note that no config values may contain an '@' char
64 | else
65 | echo "$kafka_name=${!env_var}" >> $KAFKA_HOME/config/server.properties
66 | fi
67 | fi
68 | done
69 |
70 | if [[ -n "$CUSTOM_INIT_SCRIPT" ]] ; then
71 | eval $CUSTOM_INIT_SCRIPT
72 | fi
73 |
74 | create-topics.sh &
75 | exec $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties
76 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kong-master/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include_vars: ../kong-master/defaults/{{env}}.yml
4 |
5 | - name: Generate ecs-task def template
6 | template: >
7 | src=kong-task-def.j2
8 | dest=/tmp/kong-task-def.yaml
9 |
10 |
11 | # ------------Service management------------------------#
12 | # Create task definition only but do not create service
13 | - name: Deploy kong service
14 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml --task
15 | when: ecs_action == "task"
16 | register: task_creation
17 |
18 | - debug: var=task_creation.stdout_lines
19 | when: ecs_action == "task"
20 |
21 | # Update task definition and create service. Ensures that service uses tasks created from ansible only
22 | - name: Create service
23 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml --create
24 | when: ecs_action == "create"
25 | register: service_creation
26 |
27 | - debug: var=service_creation.stdout_lines
28 | when: ecs_action == "create"
29 |
30 | # Update service with new code but without increasing or decreasing the count
31 | - name: Update service without count
32 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml --update
33 | when: ecs_action == "update" and count is not defined
34 | register: service_update_without_count
35 |
36 | - debug: var=service_update_without_count.stdout_lines
37 | when: ecs_action == "update" and count is not defined
38 |
39 | # Update service with the specified count. Ensure count to use is set from update_couchbase_client_count
40 | # by passing it as extra-var
41 | - name: Update service with specified count
42 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -c "{{count}}" -f /tmp/kong-task-def.yaml --update
43 | when: ecs_action == "update" and count is defined
44 | register: service_update_with_count
45 |
46 | - debug: var=service_update_with_count.stdout_lines
47 | when: ecs_action == "update" and count is defined
48 |
49 |
50 | # CAUTION:
51 | # There are no checks to ensure to if service should be deleted as this would be called by terraform
52 | - name: Delete service
53 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml -c 0 --update
54 | when: ecs_action == "delete"
55 | register: service_update_before_delete
56 |
57 | - debug: var=service_update_before_delete.stdout_lines
58 | when: ecs_action == "delete"
59 |
60 | - name: Delete service
61 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml --delete
62 | when: ecs_action == "delete"
63 | register: service_delete
64 |
65 | - debug: var=service_delete.stdout_lines
66 | when: ecs_action == "delete"
67 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/cassandra-master/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Variables file will be selected based on extra-var argument env
4 | - include_vars: ../cassandra-master/defaults/master.yml
5 |
6 |
7 | # All scripts are generated and executed from localhost. Ensure required
8 | # dependencies are met
9 | - name: Generate ecs-task def template
10 | template: >
11 | src=cassandra-task-def.j2
12 | dest=/tmp/cassandra-master-task-def.yaml
13 |
14 |
15 | # ------------Service management------------------------#
16 | # Create task definition only but do not create service
17 | - name: Deploy cassandra-master service
18 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-master-task-def.yaml --task
19 | when: ecs_action == "task"
20 | register: task_creation
21 |
22 | - debug: var=task_creation.stdout_lines
23 | when: ecs_action == "task"
24 |
25 | # Update task definition and create service. Ensures that service uses tasks created from ansible only
26 | - name: Create service
27 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-master-task-def.yaml --create
28 | when: ecs_action == "create"
29 | register: service_creation
30 |
31 | - debug: var=service_creation.stdout_lines
32 | when: ecs_action == "create"
33 |
34 | # Update service with new code but without increasing or decreasing the count
35 | - name: Update service without count
36 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-master-task-def.yaml --update
37 | when: ecs_action == "update" and count is not defined
38 | register: service_update_without_count
39 |
40 | - debug: var=service_update_without_count.stdout_lines
41 | when: ecs_action == "update" and count is not defined
42 |
43 | # Update service with the specified count. Ensure count to use is set from update_cassandra_master_count
44 | # by passing it as extra-var
45 | - name: Update service with specified count
46 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -c "{{count}}" -f /tmp/cassandra-master-task-def.yaml --update
47 | when: ecs_action == "update" and count is defined
48 | register: service_update_with_count
49 |
50 | - debug: var=service_update_with_count.stdout_lines
51 | when: ecs_action == "update" and count is defined
52 |
53 |
54 | # CAUTION:
55 | # There are no checks to ensure to if service should be deleted as this would be called by terraform
56 | - name: Delete service
57 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-master-task-def.yaml -c 0 --update
58 | when: ecs_action == "delete"
59 | register: service_update_before_delete
60 |
61 | - debug: var=service_update_before_delete.stdout_lines
62 | when: ecs_action == "delete"
63 |
64 | - name: Delete service
65 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-master-task-def.yaml --delete
66 | when: ecs_action == "delete"
67 | register: service_delete
68 |
69 | - debug: var=service_delete.stdout_lines
70 | when: ecs_action == "delete"
71 |
--------------------------------------------------------------------------------
/terraform/modules/ecs-kafka-zk-cluster/iam.tf:
--------------------------------------------------------------------------------
1 | resource "aws_iam_role" "ecs-role-kafka" {
2 | name = "ecs-role-kafka-${var.environment}"
3 |
4 | assume_role_policy = <
11 | src=zookeeper-task-def.j2
12 | dest=/tmp/zookeeper-task-def.yaml
13 |
14 |
15 | # ------------Service management------------------------#
16 | # Create task definition only but do not create service
17 | - name: Deploy zookeeper service
18 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --task
19 | when: ecs_action == "task"
20 | register: task_creation
21 |
22 | - debug: var=task_creation.stdout_lines
23 | when: ecs_action == "task"
24 |
25 | # Update task definition and create service. Ensures that service uses tasks created from ansible only
26 | - name: Create service
27 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --create
28 | when: ecs_action == "create"
29 | register: service_creation
30 |
31 | - debug: var=service_creation.stdout_lines
32 | when: ecs_action == "create"
33 |
34 | # Update service with new code but without increasing or decreasing the count
35 | - name: Update service without count
36 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --update
37 | when: ecs_action == "update" and update_consul_master_count is not defined
38 | register: service_update_without_count
39 |
40 | - debug: var=service_update_without_count.stdout_lines
41 | when: ecs_action == "update" and update_consul_master_count is not defined
42 |
43 | # Update service with the specified count. Ensure count to use is set from update_consul_master_count
44 | # by passing it as extra-var
45 | - name: Update service with specified count
46 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -c "{{update_consul_master_count}}" -f /tmp/zookeeper-task-def.yaml --update
47 | when: ecs_action == "update" and update_consul_master_count is defined
48 | register: service_update_with_count
49 |
50 | - debug: var=service_update_with_count.stdout_lines
51 | when: ecs_action == "update" and update_consul_master_count is defined
52 |
53 |
54 | # CAUTION:
55 | # There are no checks to ensure to if service should be deleted as this would be called by terraform
56 | - name: Delete service
57 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml -c 0 --update
58 | when: ecs_action == "delete"
59 | register: service_update_before_delete
60 |
61 | - debug: var=service_update_before_delete.stdout_lines
62 | when: ecs_action == "delete"
63 |
64 | - name: Delete service
65 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --delete
66 | when: ecs_action == "delete"
67 | register: service_delete
68 |
69 | - debug: var=service_delete.stdout_lines
70 | when: ecs_action == "delete"
71 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kafka/tasks/kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Variables file will be selected based on extra-var argument env
4 | - include_vars: ../kafka/defaults/{{env}}.yml
5 |
6 |
7 | - pause:
8 | minutes: 5
9 | when: ecs_action == "create"
10 |
11 |
12 |
13 | # All scripts are generated and executed from localhost. Ensure required
14 | # dependencies are met
15 | - name: Generate ecs-task def template
16 | template: >
17 | src=kafka-task-def.j2
18 | dest=/tmp/kafka-task-def.yaml
19 |
20 |
21 | # ------------Service management------------------------#
22 | # Create task definition only but do not create service
23 | - name: Deploy kafka service
24 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kafka-task-def.yaml --task
25 | when: ecs_action == "task"
26 | register: task_creation
27 |
28 | - debug: var=task_creation.stdout_lines
29 | when: ecs_action == "task"
30 |
31 | # Update task definition and create service. Ensures that service uses tasks created from ansible only
32 | - name: Create service
33 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kafka-task-def.yaml --create
34 | when: ecs_action == "create"
35 | register: service_creation
36 |
37 | - debug: var=service_creation.stdout_lines
38 | when: ecs_action == "create"
39 |
40 | # Update service with new code but without increasing or decreasing the count
41 | - name: Update service without count
42 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kafka-task-def.yaml --update
43 | when: ecs_action == "update" and update_consul_master_count is not defined
44 | register: service_update_without_count
45 |
46 | - debug: var=service_update_without_count.stdout_lines
47 | when: ecs_action == "update" and update_consul_master_count is not defined
48 |
49 | # Update service with the specified count. Ensure count to use is set from update_consul_master_count
50 | # by passing it as extra-var
51 | - name: Update service with specified count
52 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -c "{{update_consul_master_count}}" -f /tmp/kafka-task-def.yaml --update
53 | when: ecs_action == "update" and update_consul_master_count is defined
54 | register: service_update_with_count
55 |
56 | - debug: var=service_update_with_count.stdout_lines
57 | when: ecs_action == "update" and update_consul_master_count is defined
58 |
59 |
60 | # CAUTION:
61 | # There are no checks to ensure to if service should be deleted as this would be called by terraform
62 | - name: Delete service
63 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kafka-task-def.yaml -c 0 --update
64 | when: ecs_action == "delete"
65 | register: service_update_before_delete
66 |
67 | - debug: var=service_update_before_delete.stdout_lines
68 | when: ecs_action == "delete"
69 |
70 | - name: Delete service
71 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kafka-task-def.yaml --delete
72 | when: ecs_action == "delete"
73 | register: service_delete
74 |
75 | - debug: var=service_delete.stdout_lines
76 | when: ecs_action == "delete"
77 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/zookeeper/tasks/zoo3.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Variables file will be selected based on extra-var argument env
4 | - include_vars: ../zookeeper/defaults/{{env}}/zoo3.yml
5 |
6 | - pause:
7 | minutes: 2
8 | when: ecs_action == "create"
9 |
10 | # All scripts are generated and executed from localhost. Ensure required
11 | # dependencies are met
12 | - name: Generate ecs-task def template
13 | template: >
14 | src=zookeeper-task-def.j2
15 | dest=/tmp/zookeeper-task-def.yaml
16 |
17 |
18 | # ------------Service management------------------------#
19 | # Create task definition only but do not create service
20 | - name: Deploy zookeeper service
21 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --task
22 | when: ecs_action == "task"
23 | register: task_creation
24 |
25 | - debug: var=task_creation.stdout_lines
26 | when: ecs_action == "task"
27 |
28 | # Update task definition and create service. Ensures that service uses tasks created from ansible only
29 | - name: Create service
30 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --create
31 | when: ecs_action == "create"
32 | register: service_creation
33 |
34 | - debug: var=service_creation.stdout_lines
35 | when: ecs_action == "create"
36 |
37 | # Update service with new code but without increasing or decreasing the count
38 | - name: Update service without count
39 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --update
40 | when: ecs_action == "update" and update_consul_master_count is not defined
41 | register: service_update_without_count
42 |
43 | - debug: var=service_update_without_count.stdout_lines
44 | when: ecs_action == "update" and update_consul_master_count is not defined
45 |
46 | # Update service with the specified count. Ensure count to use is set from update_consul_master_count
47 | # by passing it as extra-var
48 | - name: Update service with specified count
49 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -c "{{update_consul_master_count}}" -f /tmp/zookeeper-task-def.yaml --update
50 | when: ecs_action == "update" and update_consul_master_count is defined
51 | register: service_update_with_count
52 |
53 | - debug: var=service_update_with_count.stdout_lines
54 | when: ecs_action == "update" and update_consul_master_count is defined
55 |
56 |
57 | # CAUTION:
58 | # There are no checks to ensure to if service should be deleted as this would be called by terraform
59 | - name: Delete service
60 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml -c 0 --update
61 | when: ecs_action == "delete"
62 | register: service_update_before_delete
63 |
64 | - debug: var=service_update_before_delete.stdout_lines
65 | when: ecs_action == "delete"
66 |
67 | - name: Delete service
68 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --delete
69 | when: ecs_action == "delete"
70 | register: service_delete
71 |
72 | - debug: var=service_delete.stdout_lines
73 | when: ecs_action == "delete"
74 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/zookeeper/tasks/zoo2.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Variables file will be selected based on extra-var argument env
4 | - include_vars: ../zookeeper/defaults/{{env}}/zoo2.yml
5 |
6 | - pause:
7 | minutes: 1
8 | when: ecs_action == "create"
9 |
10 |
11 | # All scripts are generated and executed from localhost. Ensure required
12 | # dependencies are met
13 | - name: Generate ecs-task def template
14 | template: >
15 | src=zookeeper-task-def.j2
16 | dest=/tmp/zookeeper-task-def.yaml
17 |
18 |
19 | # ------------Service management------------------------#
20 | # Create task definition only but do not create service
21 | - name: Deploy zookeeper service
22 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --task
23 | when: ecs_action == "task"
24 | register: task_creation
25 |
26 | - debug: var=task_creation.stdout_lines
27 | when: ecs_action == "task"
28 |
29 | # Update task definition and create service. Ensures that service uses tasks created from ansible only
30 | - name: Create service
31 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --create
32 | when: ecs_action == "create"
33 | register: service_creation
34 |
35 | - debug: var=service_creation.stdout_lines
36 | when: ecs_action == "create"
37 |
38 | # Update service with new code but without increasing or decreasing the count
39 | - name: Update service without count
40 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --update
41 | when: ecs_action == "update" and update_consul_master_count is not defined
42 | register: service_update_without_count
43 |
44 | - debug: var=service_update_without_count.stdout_lines
45 | when: ecs_action == "update" and update_consul_master_count is not defined
46 |
47 | # Update service with the specified count. Ensure count to use is set from update_consul_master_count
48 | # by passing it as extra-var
49 | - name: Update service with specified count
50 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -c "{{update_consul_master_count}}" -f /tmp/zookeeper-task-def.yaml --update
51 | when: ecs_action == "update" and update_consul_master_count is defined
52 | register: service_update_with_count
53 |
54 | - debug: var=service_update_with_count.stdout_lines
55 | when: ecs_action == "update" and update_consul_master_count is defined
56 |
57 |
58 | # CAUTION:
59 | # There are no checks to ensure to if service should be deleted as this would be called by terraform
60 | - name: Delete service
61 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml -c 0 --update
62 | when: ecs_action == "delete"
63 | register: service_update_before_delete
64 |
65 | - debug: var=service_update_before_delete.stdout_lines
66 | when: ecs_action == "delete"
67 |
68 | - name: Delete service
69 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/zookeeper-task-def.yaml --delete
70 | when: ecs_action == "delete"
71 | register: service_delete
72 |
73 | - debug: var=service_delete.stdout_lines
74 | when: ecs_action == "delete"
75 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/cassandra-client/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 |
4 | # Pause so that master can launch
5 | - pause:
6 | minutes: 1
7 | when: ecs_action == "create"
8 |
9 | # Variables file will be selected based on extra-var argument env
10 | - include_vars: ../cassandra-client/defaults/client.yml
11 |
12 |
13 | # All scripts are generated and executed from localhost. Ensure required
14 | # dependencies are met
15 | - name: Generate ecs-task def template
16 | template: >
17 | src=cassandra-task-def.j2
18 | dest=/tmp/cassandra-client-task-def.yaml
19 |
20 |
21 | # ------------Service management------------------------#
22 | # Create task definition only but do not create service
23 | - name: Deploy cassandra-client service
24 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-client-task-def.yaml --task
25 | when: ecs_action == "task"
26 | register: task_creation
27 |
28 | - debug: var=task_creation.stdout_lines
29 | when: ecs_action == "task"
30 |
31 | # Update task definition and create service. Ensures that service uses tasks created from ansible only
32 | - name: Create service
33 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-client-task-def.yaml --create
34 | when: ecs_action == "create"
35 | register: service_creation
36 |
37 | - debug: var=service_creation.stdout_lines
38 | when: ecs_action == "create"
39 |
40 | # Update service with new code but without increasing or decreasing the count
41 | - name: Update service without count
42 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-client-task-def.yaml --update
43 | when: ecs_action == "update" and count is not defined
44 | register: service_update_without_count
45 |
46 | - debug: var=service_update_without_count.stdout_lines
47 | when: ecs_action == "update" and count is not defined
48 |
49 | # Update service with the specified count. Ensure count to use is set from update_cassandra_client_count
50 | # by passing it as extra-var
51 | - name: Update service with specified count
52 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -c "{{count}}" -f /tmp/cassandra-client-task-def.yaml --update
53 | when: ecs_action == "update" and count is defined
54 | register: service_update_with_count
55 |
56 | - debug: var=service_update_with_count.stdout_lines
57 | when: ecs_action == "update" and count is defined
58 |
59 |
60 | # CAUTION:
61 | # There are no checks to ensure to if service should be deleted as this would be called by terraform
62 | - name: Delete service
63 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-client-task-def.yaml -c 0 --update
64 | when: ecs_action == "delete"
65 | register: service_update_before_delete
66 |
67 | - debug: var=service_update_before_delete.stdout_lines
68 | when: ecs_action == "delete"
69 |
70 | - name: Delete service
71 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/cassandra-client-task-def.yaml --delete
72 | when: ecs_action == "delete"
73 | register: service_delete
74 |
75 | - debug: var=service_delete.stdout_lines
76 | when: ecs_action == "delete"
77 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kong-client/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ######## KONG-CLIENT ##############
3 | # Kong cannot update cassandra when started in cluster mode
4 | # due to race conditions
5 | # We need to start 1 contaner, map it to cassandra and then scale out
6 | # Assuming this can be done in 5 minutes since the time when kong-master is
7 | # launched.
8 | #
9 | # https://github.com/Mashape/kong/issues/2139
10 | # https://github.com/Mashape/kong/issues/2084
11 |
12 | # Pause for 3 minutes to build app cache.
13 | - pause:
14 | minutes: 3
15 | when: ecs_action == "create"
16 |
17 | - include_vars: ../kong-client/defaults/{{env}}.yml
18 |
19 | - name: Generate ecs-task def template
20 | template: >
21 | src=kong-task-def.j2
22 | dest=/tmp/kong-task-def.yaml
23 |
24 |
25 | # ------------Service management------------------------#
26 | # Create task definition only but do not create service
27 | - name: Deploy kong service
28 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml --task
29 | when: ecs_action == "task"
30 | register: task_creation
31 |
32 | - debug: var=task_creation.stdout_lines
33 | when: ecs_action == "task"
34 |
35 | # Update task definition and create service. Ensures that service uses tasks created from ansible only
36 | - name: Create service
37 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml --create
38 | when: ecs_action == "create"
39 | register: service_creation
40 |
41 | - debug: var=service_creation.stdout_lines
42 | when: ecs_action == "create"
43 |
44 | # Update service with new code but without increasing or decreasing the count
45 | - name: Update service without count
46 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml --update
47 | when: ecs_action == "update" and count is not defined
48 | register: service_update_without_count
49 |
50 | - debug: var=service_update_without_count.stdout_lines
51 | when: ecs_action == "update" and count is not defined
52 |
53 | # Update service with the specified count. Ensure count to use is set from update_couchbase_client_count
54 | # by passing it as extra-var
55 | - name: Update service with specified count
56 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -c "{{count}}" -f /tmp/kong-task-def.yaml --update
57 | when: ecs_action == "update" and count is defined
58 | register: service_update_with_count
59 |
60 | - debug: var=service_update_with_count.stdout_lines
61 | when: ecs_action == "update" and count is defined
62 |
63 |
64 | # CAUTION:
65 | # There are no checks to ensure to if service should be deleted as this would be called by terraform
66 | - name: Delete service
67 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml -c 0 --update
68 | when: ecs_action == "delete"
69 | register: service_update_before_delete
70 |
71 | - debug: var=service_update_before_delete.stdout_lines
72 | when: ecs_action == "delete"
73 |
74 | - name: Delete service
75 | script: ../files/orchestrate.py -v -e "{{env}}" -r "{{region}}" -f /tmp/kong-task-def.yaml --delete
76 | when: ecs_action == "delete"
77 | register: service_delete
78 |
79 | - debug: var=service_delete.stdout_lines
80 | when: ecs_action == "delete"
81 |
--------------------------------------------------------------------------------
/terraform/modules/ecs-kafka-zk-cluster/ecs-kafka-zk.tf:
--------------------------------------------------------------------------------
1 | /*
2 | kafka-clusters without ECS
3 | */
4 |
5 |
6 | /* This is used to generate data about ami to be used */
7 | data "aws_ami" "kafka" {
8 | most_recent = true
9 |
10 | filter {
11 | name = "owner-id"
12 | values = ["${var.ami_owner_name}"]
13 | }
14 |
15 | filter {
16 | name = "name"
17 | values = ["${var.ami_name_regex}"]
18 | }
19 |
20 | }
21 |
22 |
23 | resource "aws_launch_configuration" "kafka-cluster-lc" {
24 | image_id = "${data.aws_ami.kafka.id}"
25 | name_prefix = "kafka-cluster-${var.environment}-"
26 | instance_type = "${var.kafka_instance_type}"
27 | associate_public_ip_address = true
28 | key_name = "${var.aws_key_name}"
29 | security_groups = ["${aws_security_group.kafka-cluster-sg.id}"]
30 | user_data = "${data.template_file.userdata-kafka-cluster.rendered}"
31 | iam_instance_profile = "${aws_iam_instance_profile.ecs-profile-kafka.name}"
32 | placement_tenancy = "default"
33 |
34 | root_block_device {
35 | volume_type = "standard"
36 | volume_size = 30
37 | delete_on_termination = true
38 | }
39 |
40 | connection {
41 | user = "ec2-user"
42 | agent = true
43 | }
44 |
45 | lifecycle {
46 | create_before_destroy = true
47 | }
48 |
49 | }
50 |
51 |
52 | resource "aws_autoscaling_group" "kafka-cluster-asg" {
53 | vpc_zone_identifier = ["${var.private_subnet_ids}"]
54 | name = "ECS-KAFKA-CLUSTER-${var.environment}"
55 | max_size = "${var.kafka_asg_max_size}"
56 | min_size = "${var.kafka_asg_min_size}"
57 | health_check_grace_period = 100
58 | health_check_type = "EC2"
59 | desired_capacity = "${var.kafka_asg_desired_size}"
60 | force_delete = false
61 | launch_configuration = "${aws_launch_configuration.kafka-cluster-lc.name}"
62 |
63 | // Setting this to true would not allow us to delete the ECS clusters
64 | lifecycle {
65 | create_before_destroy = true
66 | }
67 |
68 | tag {
69 | key = "Name"
70 | value = "ECS-KAFKA-INSTANCES-${upper(var.environment)}"
71 | propagate_at_launch = true
72 | }
73 |
74 | tag {
75 | key = "Environment"
76 | value = "${var.environment}"
77 | propagate_at_launch = true
78 | }
79 |
80 | // This will decide Ansible role to be applied via dynamic inventory
81 | tag {
82 | key = "Role1"
83 | value = "kafka_instances"
84 | propagate_at_launch = true
85 | }
86 |
87 | tag {
88 | key = "Role2"
89 | value = "zookeeper_instances"
90 | propagate_at_launch = true
91 | }
92 |
93 | tag{
94 | key = "Stack"
95 | value = "GLP"
96 | propagate_at_launch = true
97 | }
98 |
99 | tag{
100 | key = "weave:peerGroupName"
101 | value = "GLP-${var.environment}"
102 | propagate_at_launch = true
103 | }
104 |
105 | depends_on = ["aws_launch_configuration.kafka-cluster-lc"]
106 | }
107 |
108 | resource "aws_ecs_cluster" "kafka-cluster" {
109 | name = "Kafka-cluster-${var.environment}"
110 |
111 | lifecycle {
112 | create_before_destroy = true
113 | }
114 | }
115 |
116 | /* We use this to create this as a dependency for other modules */
117 | resource "null_resource" "module_dependency" {
118 | depends_on = ["aws_autoscaling_group.kafka-cluster-asg"]
119 | }
120 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kafka/templates/kafka-task-def.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 | containers:
3 | - appname: {{ c1_appname }}
4 | family: {{ task_family }}
5 | taskRoleArn: {{ task_role_arn }}
6 | networkMode: {{ network_mode }}
7 | containerDefinitions:
8 | # There can be multiple containers in task def
9 | - name: {{ c1_appname }}
10 | cpu: {{ c1_cpu }}
11 | memory: {{ c1_memory }}
12 | memoryReservation: {{ c1_soft_memory }}
13 | portMappings:
14 | {% for hostport in c1_ports %}
15 | - hostPort: {{ hostport['c1hostPort'] }}
16 | containerPort: {{ hostport['c1containerPort'] }}
17 | protocol: {{ hostport['c1protocol'] }}
18 | {% endfor %}
19 | image: {{ c1_image }}
20 | essential: {{ c1_essential }}
21 | privileged: {{ c1_privileged }}
22 | environment:
23 | {% for env in c1_environment_variables %}
24 | - name: {{ env['c1name'] }}
25 | value: {{ env['c1value'] }}
26 | {% endfor %}
27 | mountPoints:
28 | - sourceVolume: {{ src_volume_name }}
29 | containerPath: {{ mount_container_path }}
30 | readOnly: {{ mount_readonly_bool }}
31 | logConfiguration:
32 | logDriver: 'awslogs'
33 | options:
34 | awslogs-group: {{ c1_awslogs_group }}
35 | awslogs-region: {{ c1_log_region }}
36 | awslogs-stream-prefix: {{ c1_awslogs_stream_prefix }}
37 | dockerLabels:
38 | name: {{ c1_docker_labels }}
39 | dockerSecurityOptions:
40 | - {{ c1_docker_security_options }}
41 | volumes:
42 | - name: {{ src_volume_name }}
43 | host:
44 | sourcePath: {{ volume_src_path }}
45 |
46 | # ***********************************************************************************************************
47 | # Service parameters will be used to create service
48 | # We can add load balancers alo if required.
49 | # Please visit : http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.create_service
50 | # ***********************************************************************************************************
51 | serviceCreate:
52 | - cluster: {{ cluster_name }}
53 | serviceName: {{ service_name }}
54 | # Task definition is family:revision.
55 | # Creat service on latest revision and use update to roll back o deploy new version
56 | taskDefinition: {{ task_family }}
57 | desiredCount: {{ count }}
58 | clientToken: {{ service_name }}
59 | deploymentConfiguration:
60 | maximumPercent: {{ maximumPercent }}
61 | minimumHealthyPercent: {{ minimumHealthyPercent }}
62 |
63 | # ******************************************************************
64 | # Service Update parameters will be used to update running service
65 | # ******************************************************************
66 | serviceUpdate:
67 | - cluster: {{ cluster_name }}
68 | serviceName: {{ service_name }}
69 | # Desired count also can be updated via command line
70 | desiredCount: {{ count }}
71 | # Specify task def revision to roll back
72 | taskDefinition: {{ task_family }}
73 | deploymentConfiguration:
74 | maximumPercent: {{ maximumPercent }}
75 | minimumHealthyPercent: {{ minimumHealthyPercent }}
76 | # **********************************************************************
77 | # Service delete will be used to delete services where running count is 0
78 | # Cannot be used via automated tools as it requires user confimration
79 | # **********************************************************************
80 | serviceDelete:
81 | - cluster: {{ cluster_name }}
82 | serviceName: {{ service_name }}
83 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/cassandra-client/templates/cassandra-task-def.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 | containers:
3 | - appname: {{ c1_appname }}
4 | family: {{ task_family }}
5 | taskRoleArn: {{ task_role_arn }}
6 | networkMode: {{ network_mode }}
7 | containerDefinitions:
8 | # There can be multiple containers in task def
9 | - name: {{ c1_appname }}
10 | cpu: {{ c1_cpu }}
11 | memory: {{ c1_memory }}
12 | memoryReservation: {{ c1_soft_memory }}
13 | portMappings:
14 | {% for hostport in c1_ports %}
15 | - hostPort: {{ hostport['c1hostPort'] }}
16 | containerPort: {{ hostport['c1containerPort'] }}
17 | protocol: {{ hostport['c1protocol'] }}
18 | {% endfor %}
19 | image: {{ c1_image }}
20 | essential: {{ c1_essential }}
21 | privileged: {{ c1_privileged }}
22 | environment:
23 | {% for env in c1_environment_variables %}
24 | - name: {{ env['c1name'] }}
25 | value: {{ env['c1value'] }}
26 | {% endfor %}
27 | mountPoints:
28 | - sourceVolume: {{ src_volume_name }}
29 | containerPath: {{ mount_container_path }}
30 | readOnly: {{ mount_readonly_bool }}
31 | logConfiguration:
32 | logDriver: 'awslogs'
33 | options:
34 | awslogs-group: {{ c1_awslogs_group }}
35 | awslogs-region: {{ c1_log_region }}
36 | awslogs-stream-prefix: {{ c1_awslogs_stream_prefix }}
37 | dockerLabels:
38 | name: {{ c1_docker_labels }}
39 | dockerSecurityOptions:
40 | - {{ c1_docker_security_options }}
41 | volumes:
42 | - name: {{ src_volume_name }}
43 | host:
44 | sourcePath: {{ volume_src_path }}
45 |
46 | # ***********************************************************************************************************
47 | # Service parameters will be used to create service
48 | # We can add load balancers alo if required.
49 | # Please visit : http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.create_service
50 | # ***********************************************************************************************************
51 | serviceCreate:
52 | - cluster: {{ cluster_name }}
53 | serviceName: {{ service_name }}
54 | # Task definition is family:revision.
55 | # Creat service on latest revision and use update to roll back o deploy new version
56 | taskDefinition: {{ task_family }}
57 | desiredCount: {{ count }}
58 | clientToken: {{ service_name }}
59 | deploymentConfiguration:
60 | maximumPercent: {{ maximumPercent }}
61 | minimumHealthyPercent: {{ minimumHealthyPercent }}
62 |
63 | # ******************************************************************
64 | # Service Update parameters will be used to update running service
65 | # ******************************************************************
66 | serviceUpdate:
67 | - cluster: {{ cluster_name }}
68 | serviceName: {{ service_name }}
69 | # Desired count also can be updated via command line
70 | desiredCount: {{ count }}
71 | # Specify task def revision to roll back
72 | taskDefinition: {{ task_family }}
73 | deploymentConfiguration:
74 | maximumPercent: {{ maximumPercent }}
75 | minimumHealthyPercent: {{ minimumHealthyPercent }}
76 |
77 | # **********************************************************************
78 | # Service delete will be used to delete services where running count is 0
79 | # Cannot be used via automated tools as it requires user confimration
80 | # **********************************************************************
81 | serviceDelete:
82 | - cluster: {{ cluster_name }}
83 | serviceName: {{ service_name }}
84 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/cassandra-master/templates/cassandra-task-def.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 | containers:
3 | - appname: {{ c1_appname }}
4 | family: {{ task_family }}
5 | taskRoleArn: {{ task_role_arn }}
6 | networkMode: {{ network_mode }}
7 | containerDefinitions:
8 | # There can be multiple containers in task def
9 | - name: {{ c1_appname }}
10 | cpu: {{ c1_cpu }}
11 | memory: {{ c1_memory }}
12 | memoryReservation: {{ c1_soft_memory }}
13 | portMappings:
14 | {% for hostport in c1_ports %}
15 | - hostPort: {{ hostport['c1hostPort'] }}
16 | containerPort: {{ hostport['c1containerPort'] }}
17 | protocol: {{ hostport['c1protocol'] }}
18 | {% endfor %}
19 | image: {{ c1_image }}
20 | essential: {{ c1_essential }}
21 | privileged: {{ c1_privileged }}
22 | environment:
23 | {% for env in c1_environment_variables %}
24 | - name: {{ env['c1name'] }}
25 | value: {{ env['c1value'] }}
26 | {% endfor %}
27 | mountPoints:
28 | - sourceVolume: {{ src_volume_name }}
29 | containerPath: {{ mount_container_path }}
30 | readOnly: {{ mount_readonly_bool }}
31 | logConfiguration:
32 | logDriver: 'awslogs'
33 | options:
34 | awslogs-group: {{ c1_awslogs_group }}
35 | awslogs-region: {{ c1_log_region }}
36 | awslogs-stream-prefix: {{ c1_awslogs_stream_prefix }}
37 | dockerLabels:
38 | name: {{ c1_docker_labels }}
39 | dockerSecurityOptions:
40 | - {{ c1_docker_security_options }}
41 | volumes:
42 | - name: {{ src_volume_name }}
43 | host:
44 | sourcePath: {{ volume_src_path }}
45 |
46 | # ***********************************************************************************************************
47 | # Service parameters will be used to create service
48 | # We can add load balancers alo if required.
49 | # Please visit : http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.create_service
50 | # ***********************************************************************************************************
51 | serviceCreate:
52 | - cluster: {{ cluster_name }}
53 | serviceName: {{ service_name }}
54 | # Task definition is family:revision.
55 | # Creat service on latest revision and use update to roll back o deploy new version
56 | taskDefinition: {{ task_family }}
57 | desiredCount: {{ count }}
58 | clientToken: {{ service_name }}
59 | deploymentConfiguration:
60 | maximumPercent: {{ maximumPercent }}
61 | minimumHealthyPercent: {{ minimumHealthyPercent }}
62 |
63 | # ******************************************************************
64 | # Service Update parameters will be used to update running service
65 | # ******************************************************************
66 | serviceUpdate:
67 | - cluster: {{ cluster_name }}
68 | serviceName: {{ service_name }}
69 | # Desired count also can be updated via command line
70 | desiredCount: {{ count }}
71 | # Specify task def revision to roll back
72 | taskDefinition: {{ task_family }}
73 | deploymentConfiguration:
74 | maximumPercent: {{ maximumPercent }}
75 | minimumHealthyPercent: {{ minimumHealthyPercent }}
76 |
77 | # **********************************************************************
78 | # Service delete will be used to delete services where running count is 0
79 | # Cannot be used via automated tools as it requires user confimration
80 | # **********************************************************************
81 | serviceDelete:
82 | - cluster: {{ cluster_name }}
83 | serviceName: {{ service_name }}
84 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/zookeeper/templates/zookeeper-task-def.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 | containers:
3 | - appname: {{ c1_appname }}
4 | family: {{ task_family }}
5 | taskRoleArn: {{ task_role_arn }}
6 | networkMode: {{ network_mode }}
7 | containerDefinitions:
8 | # There can be multiple containers in task def
9 | - name: {{ c1_appname }}
10 | cpu: {{ c1_cpu }}
11 | memory: {{ c1_memory }}
12 | memoryReservation: {{ c1_soft_memory }}
13 | portMappings:
14 | {% for hostport in c1_ports %}
15 | - hostPort: {{ hostport['c1hostPort'] }}
16 | containerPort: {{ hostport['c1containerPort'] }}
17 | protocol: {{ hostport['c1protocol'] }}
18 | {% endfor %}
19 | image: {{ c1_image }}
20 | essential: {{ c1_essential }}
21 | privileged: {{ c1_privileged }}
22 | {% if c1_environment_variables is defined %}
23 | environment:
24 | {% for env in c1_environment_variables %}
25 | - name: {{ env['c1name'] }}
26 | value: {{ env['c1value'] }}
27 | {% endfor %}
28 | {% endif %}
29 | mountPoints:
30 | - sourceVolume: {{ src_volume_name }}
31 | containerPath: {{ mount_container_path }}
32 | readOnly: {{ mount_readonly_bool }}
33 | logConfiguration:
34 | logDriver: 'awslogs'
35 | options:
36 | awslogs-group: {{ c1_awslogs_group }}
37 | awslogs-region: {{ c1_log_region }}
38 | awslogs-stream-prefix: {{ c1_awslogs_stream_prefix }}
39 | dockerLabels:
40 | name: {{ c1_docker_labels }}
41 | dockerSecurityOptions:
42 | - {{ c1_docker_security_options }}
43 | volumes:
44 | - name: {{ src_volume_name }}
45 | host:
46 | sourcePath: {{ volume_src_path }}
47 |
48 | # ***********************************************************************************************************
49 | # Service parameters will be used to create service
50 | # We can add load balancers alo if required.
51 | # Please visit : http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.create_service
52 | # ***********************************************************************************************************
53 | serviceCreate:
54 | - cluster: {{ cluster_name }}
55 | serviceName: {{ service_name }}
56 | # Task definition is family:revision.
57 | # Creat service on latest revision and use update to roll back o deploy new version
58 | taskDefinition: {{ task_family }}
59 | desiredCount: {{ count }}
60 | clientToken: {{ service_name }}
61 | deploymentConfiguration:
62 | maximumPercent: {{ maximumPercent }}
63 | minimumHealthyPercent: {{ minimumHealthyPercent }}
64 |
65 | # ******************************************************************
66 | # Service Update parameters will be used to update running service
67 | # ******************************************************************
68 | serviceUpdate:
69 | - cluster: {{ cluster_name }}
70 | serviceName: {{ service_name }}
71 | # Desired count also can be updated via command line
72 | desiredCount: {{ count }}
73 | # Specify task def revision to roll back
74 | taskDefinition: {{ task_family }}
75 | deploymentConfiguration:
76 | maximumPercent: {{ maximumPercent }}
77 | minimumHealthyPercent: {{ minimumHealthyPercent }}
78 | # **********************************************************************
79 | # Service delete will be used to delete services where running count is 0
80 | # Cannot be used via automated tools as it requires user confimration
81 | # **********************************************************************
82 | serviceDelete:
83 | - cluster: {{ cluster_name }}
84 | serviceName: {{ service_name }}
85 |
--------------------------------------------------------------------------------
/terraform/modules/vpc/main.tf:
--------------------------------------------------------------------------------
1 | /**********************************************
2 | This will deploy one vpc
3 | - public subnet/private subnets based on variables
4 | - igw
5 | - nat-gateway
6 | - associated route tables
7 | **********************************************/
8 |
9 | resource "aws_vpc" "vpc" {
10 | cidr_block = "${var.vpc_cidr}"
11 | enable_dns_hostnames = "${var.enable_dns_hostnames}"
12 | tags = {
13 | Name = "${var.vpc_name}"
14 | environment = "${var.environment}"
15 | }
16 | }
17 |
18 | /* Internet-Gateways */
19 | resource "aws_internet_gateway" "igw" {
20 | vpc_id = "${aws_vpc.vpc.id}"
21 | tags = {
22 | Name = "igw-pub-sub"
23 | environment = "${var.environment}"
24 | }
25 | }
26 |
27 | /***** Routing information public subnet ***************/
28 | resource "aws_route_table" "pub_rtb" {
29 | vpc_id = "${aws_vpc.vpc.id}"
30 | route {
31 | cidr_block = "0.0.0.0/0"
32 | gateway_id ="${aws_internet_gateway.igw.id}"
33 | }
34 | tags = {
35 | Name = "Public-RTB"
36 | environment = "${var.environment}"
37 | }
38 | }
39 |
40 | resource "aws_route_table_association" "a-pub-sub" {
41 | count = "${length(var.public_sub_cidr)}"
42 | subnet_id = "${element(aws_subnet.public-subnet.*.id,count.index)}"
43 | route_table_id = "${element(aws_route_table.pub_rtb.*.id,count.index)}"
44 | }
45 |
46 | /**************** Public-subnet **********/
47 | resource "aws_subnet" "public-subnet" {
48 | count = "${length(var.public_sub_cidr)}"
49 | availability_zone = "${element(var.azs,count.index)}"
50 | cidr_block = "${var.public_sub_cidr[count.index]}"
51 | vpc_id = "${aws_vpc.vpc.id}"
52 | tags = {
53 | Name = "Public_Subnet-${count.index}"
54 | environment = "${var.environment}"
55 | }
56 | }
57 |
58 | /********************Nat-Gateway **********************/
59 | resource "aws_nat_gateway" "ngw" {
60 | allocation_id = "${aws_eip.nat.id}"
61 | subnet_id = "${aws_subnet.public-subnet.*.id[0]}"
62 | depends_on = ["aws_internet_gateway.igw"]
63 | }
64 |
65 | resource "aws_eip" "nat"{
66 | vpc = true
67 | }
68 |
69 |
70 | /********* Private-subnet ***************/
71 |
72 | resource "aws_subnet" "private-subnet" {
73 | count = "${length(var.private_sub_cidr)}"
74 | availability_zone = "${element(var.azs,count.index)}"
75 | cidr_block = "${var.private_sub_cidr[count.index]}"
76 | vpc_id = "${aws_vpc.vpc.id}"
77 | tags = {
78 | Name = "Private_Subnet-${count.index}"
79 | environment = "${var.environment}"
80 | }
81 | depends_on = ["aws_nat_gateway.ngw"]
82 | }
83 |
84 | /***** Routing information private subnet ************/
85 |
86 | resource "aws_route_table" "pri_rtb" {
87 | vpc_id = "${aws_vpc.vpc.id}"
88 | route {
89 | cidr_block = "0.0.0.0/0"
90 | gateway_id ="${aws_nat_gateway.ngw.id}"
91 | }
92 | tags = {
93 | Name = "Private-RTB"
94 | environment = "${var.environment}"
95 | }
96 | }
97 |
98 | resource "aws_route_table_association" "a-priv-sub" {
99 | count = "${length(var.private_sub_cidr)}"
100 | subnet_id = "${element(aws_subnet.private-subnet.*.id,count.index)}"
101 | route_table_id = "${element(aws_route_table.pri_rtb.*.id,count.index)}"
102 | }
103 |
104 |
105 |
106 | resource "null_resource" "module_dependency" {
107 | depends_on = [
108 | "aws_vpc.vpc",
109 | "aws_subnet.public-subnet",
110 | "aws_subnet.private-subnet",
111 | "aws_internet_gateway.igw",
112 | "aws_route_table.pub_rtb",
113 | "aws_route_table_association.a-pub-sub",
114 | "aws_route_table.pri_rtb",
115 | "aws_route_table_association.a-priv-sub",
116 | "aws_internet_gateway.igw",
117 | "aws_eip.nat"
118 | ]
119 | }
120 |
--------------------------------------------------------------------------------
/docker-images/docker-zk-exhibitor/include/wrapper.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash -e
2 |
3 | # Generates the default exhibitor config and launches exhibitor
4 |
5 | # fixing hostname for weave to be IP address
6 | # HOSTNAME=`awk '/^[[:space:]]*($|#)/{next} /'$hostname'/{print $1; exit}' /etc/hosts`
7 | #HOSTNAME=$(uuidgen)
8 |
9 | HOSTNAME="zk"-$(echo $(( RANDOM % (10 - 5 + 1 ) + 5 )))
10 |
11 | MISSING_VAR_MESSAGE="must be set"
12 | DEFAULT_AWS_REGION="us-west-2"
13 | DEFAULT_DATA_DIR="/opt/zookeeper/snapshots"
14 | DEFAULT_LOG_DIR="/opt/zookeeper/transactions"
15 | DEFAULT_ZK_ENSEMBLE_SIZE=3
16 | S3_SECURITY=""
17 | HTTP_PROXY=""
18 | : ${HOSTNAME:?$MISSING_VAR_MESSAGE}
19 | : ${AWS_REGION:=$DEFAULT_AWS_REGION}
20 | : ${ZK_DATA_DIR:=$DEFAULT_DATA_DIR}
21 | : ${ZK_LOG_DIR:=$DEFAULT_LOG_DIR}
22 | : ${ZK_ENSEMBLE_SIZE:=$DEFAULT_ZK_ENSEMBLE_SIZE}
23 | : ${HTTP_PROXY_HOST:=""}
24 | : ${HTTP_PROXY_PORT:=""}
25 | : ${HTTP_PROXY_USERNAME:=""}
26 | : ${HTTP_PROXY_PASSWORD:=""}
27 |
28 | cat <<- EOF > /opt/exhibitor/defaults.conf
29 | zookeeper-data-directory=$ZK_DATA_DIR
30 | zookeeper-install-directory=/opt/zookeeper
31 | zookeeper-log-directory=$ZK_LOG_DIR
32 | log-index-directory=$ZK_LOG_DIR
33 | cleanup-period-ms=300000
34 | check-ms=30000
35 | backup-period-ms=600000
36 | client-port=2181
37 | cleanup-max-files=20
38 | backup-max-store-ms=21600000
39 | connect-port=2888
40 | observer-threshold=0
41 | election-port=3888
42 | zoo-cfg-extra=tickTime\=2000&initLimit\=10&syncLimit\=5&quorumListenOnAllIPs\=false
43 | auto-manage-instances-settling-period-ms=0
44 | auto-manage-instances=1
45 | auto-manage-instances-fixed-ensemble-size=$ZK_ENSEMBLE_SIZE
46 | EOF
47 |
48 |
49 | if [[ -n ${AWS_ACCESS_KEY_ID} ]]; then
50 | cat <<- EOF > /opt/exhibitor/credentials.properties
51 | com.netflix.exhibitor.s3.access-key-id=${AWS_ACCESS_KEY_ID}
52 | com.netflix.exhibitor.s3.access-secret-key=${AWS_SECRET_ACCESS_KEY}
53 | EOF
54 | S3_SECURITY="--s3credentials /opt/exhibitor/credentials.properties"
55 | fi
56 |
57 | if [[ -n ${S3_BUCKET} ]]; then
58 | echo "backup-extra=throttle\=&bucket-name\=${S3_BUCKET}&key-prefix\=${S3_PREFIX}&max-retries\=4&retry-sleep-ms\=30000" >> /opt/exhibitor/defaults.conf
59 |
60 | BACKUP_CONFIG="--configtype s3 --s3config ${S3_BUCKET}:${S3_PREFIX} ${S3_SECURITY} --s3region ${AWS_REGION} --s3backup true"
61 | else
62 | BACKUP_CONFIG="--configtype file --fsconfigdir /opt/zookeeper/local_configs --filesystembackup true"
63 | fi
64 |
65 | if [[ -n ${ZK_PASSWORD} ]]; then
66 | SECURITY="--security web.xml --realm Zookeeper:realm --remoteauth basic:zk"
67 | echo "zk: ${ZK_PASSWORD},zk" > realm
68 | fi
69 |
70 |
71 | if [[ -n $HTTP_PROXY_HOST ]]; then
72 | cat <<- EOF > /opt/exhibitor/proxy.properties
73 | com.netflix.exhibitor.s3.proxy-host=${HTTP_PROXY_HOST}
74 | com.netflix.exhibitor.s3.proxy-port=${HTTP_PROXY_PORT}
75 | com.netflix.exhibitor.s3.proxy-username=${HTTP_PROXY_USERNAME}
76 | com.netflix.exhibitor.s3.proxy-password=${HTTP_PROXY_PASSWORD}
77 | EOF
78 |
79 | HTTP_PROXY="--s3proxy=/opt/exhibitor/proxy.properties"
80 | fi
81 |
82 |
83 | exec 2>&1
84 |
85 | # If we use exec and this is the docker entrypoint, Exhibitor fails to kill the ZK process on restart.
86 | # If we use /bin/bash as the entrypoint and run wrapper.sh by hand, we do not see this behavior. I suspect
87 | # some init or PID-related shenanigans, but I'm punting on further troubleshooting for now since dropping
88 | # the "exec" fixes it.
89 | #
90 | # exec java -jar /opt/exhibitor/exhibitor.jar \
91 | # --port 8181 --defaultconfig /opt/exhibitor/defaults.conf \
92 | # --configtype s3 --s3config thefactory-exhibitor:${CLUSTER_ID} \
93 | # --s3credentials /opt/exhibitor/credentials.properties \
94 | # --s3region us-west-2 --s3backup true
95 |
96 | java -jar /opt/exhibitor/exhibitor.jar \
97 | --port 8181 --defaultconfig /opt/exhibitor/defaults.conf \
98 | ${BACKUP_CONFIG} \
99 | ${HTTP_PROXY} \
100 | --hostname ${HOSTNAME} \
101 | ${SECURITY}
102 |
--------------------------------------------------------------------------------
/terraform/environments/development/main.tf:
--------------------------------------------------------------------------------
1 | /*
2 | -----------------------------------------------------------------
3 | - Setup creds and region via env variables
4 | - For more details: https://www.terraform.io/docs/providers/aws
5 | -----------------------------------------------------------------
6 | Notes:
7 | - control_cidr changes for different modules
8 | - Instance class also changes for different modules
9 | - Default security group is added where traffic is supposed to flow between VPC
10 | */
11 |
12 | /********************************************************************************/
13 |
14 |
15 | provider "aws" {
16 | region = "${var.region}"
17 | }
18 |
19 |
20 | /* Uncomment this if you want to use
21 | S3 - as backend for terraform state file storage
22 | Terraform locking - You need to have a dynamoDb table with primary-key as
23 | lock_table
24 | terraform {
25 | required_version = ">= 0.9, <= 0.9.6"
26 | backend "s3" {
27 | bucket = "terraform-myapp-remote-state"
28 | key = "terraform.tfstate-development-myapp"
29 | region = "us-east-1"
30 | encrypt = "true"
31 | lock_table = "terraform-state"
32 | }
33 | }
34 | */
35 |
36 |
37 | module "vpc" {
38 | source = "../../modules/vpc"
39 | azs = "${var.azs}"
40 | vpc_cidr = "${var.vpc_cidr}"
41 | public_sub_cidr = "${var.public_sub_cidr}"
42 | private_sub_cidr = "${var.private_sub_cidr}"
43 | enable_dns_hostnames = true
44 | vpc_name = "${var.vpc_name}-${var.environment}"
45 | //-- In case we need to change Domain servers
46 | //dhcp_domain_name_servers = ["${var.domain_servers}"]
47 | environment = "${var.environment}"
48 | }
49 |
50 | module "glp-private-zone" {
51 | source = "../../modules/route53-hosted-zone"
52 | hosted_zone_name = "${var.environment}-internal.com"
53 | vpc_id = "${module.glp-vpc.vpc_id}"
54 | }
55 |
56 | module "bastion" {
57 | source = "../../modules/bastion"
58 | public_sub_cidr = "${var.public_sub_cidr}"
59 | vpc-id = "${module.vpc.vpc_id}"
60 | pub_sub_id = "${module.vpc.aws_pub_subnet_id[0]}"
61 | region = "${var.region}"
62 | bastion_instance_type = "${var.bastion_instance_type}"
63 | keypair_public_key = "${var.keypair_public_key}"
64 | aws_key_name = "${var.aws_key_name}"
65 | control_cidr = "${var.control_cidr}"
66 | ansible_ssh_user = "${var.ansible_ssh_user}"
67 | proxy_cidr = "${var.proxy_cidr}"
68 | environment = "${var.environment}"
69 | }
70 |
71 |
72 | /* Kafka cluster */
73 | module "ecs-kafka-cluster" {
74 | source = "../../modules/ecs-kafka-zk-cluster"
75 | private_subnet_ids = "${module.vpc.aws_pri_subnet_id}"
76 | vpc-id = "${module.vpc.vpc_id}"
77 | region = "${var.region}"
78 | keypair_public_key = "${var.keypair_public_key}"
79 | aws_key_name = "${var.aws_key_name}"
80 | control_cidr = "${var.private_sub_control_cidr}"
81 | kafka_instance_type = "${var.kafka_instance_type}"
82 | efs_data_dir = "${var.efs_kafka_data_dir}"
83 | efs_fs_id = "${module.efs-private-subnet.efs_fs_id}"
84 | environment = "${var.environment}"
85 | bastion_sg_id = "${module.bastion.bastion-sg-id}"
86 | dependency_id = "${module.efs-private-subnet.dependency_id}"
87 | kafka_asg_max_size = "${var.kafka_asg_max_size}"
88 | kafka_asg_min_size = "${var.kafka_asg_min_size}"
89 | kafka_asg_desired_size = "${var.kafka_asg_desired_size}"
90 | ami_owner_name = "${var.ami_owner_name}"
91 | ami_name_regex = "${var.ami_name_regex}"
92 | vpc_cidr = "${var.vpc_cidr}"
93 | }
94 |
95 | /* EFS for Kafka */
96 | module "efs-private-subnet" {
97 | source = "../../modules/efs"
98 | efs_cluster_name = "efs_kafka"
99 | count = "${length(var.azs)}"
100 | subnet_ids = "${module.vpc.aws_pri_subnet_id_str}"
101 | environment = "${var.environment}"
102 | // We need SGs for all instances where EFS is to be launched
103 | security_group_id = [
104 | "${module.ecs-kafka-cluster.kafka-cluster-sg-id}"
105 | ]
106 | }
107 |
108 |
109 | module "aws-log-group" {
110 | source = "../../modules/cloudwatch-log-groups"
111 | log_group_name = "/ecs/${var.environment}-logs"
112 | environment = "${var.environment}"
113 | }
114 |
115 |
116 | module "ansible-ecs-setup" {
117 | source = "../../modules/ansible-ecs"
118 | env = "${var.environment}"
119 | region = "${var.region}"
120 | # This is only used for Couchbase server
121 | route53_private_domain = "${var.environment}-internal.com"
122 | # These add explicit dependencies
123 | dependency_id = [
124 | "${module.ecs-kafka-cluster.dependency_id}"
125 | ]
126 | }
127 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kong-client/templates/kong-task-def.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 | containers:
3 | - appname: {{ c1_appname }}
4 | family: {{ task_family }}
5 | taskRoleArn: {{ task_role_arn }}
6 | containerDefinitions:
7 | # There can be multiple containers in task def
8 | # Container name will also be used by weave for DNS entries
9 | - name: {{ c1_appname }}
10 | cpu: {{ c1_cpu }}
11 | memory: {{ c1_memory }}
12 | memoryReservation: {{ c1_soft_memory }}
13 | portMappings:
14 | {% for hostport in c1_ports %}
15 | - hostPort: {{ hostport['c1hostPort'] }}
16 | containerPort: {{ hostport['c1containerPort'] }}
17 | protocol: {{ hostport['c1protocol'] }}
18 | {% endfor %}
19 | image: {{ c1_image }}
20 | essential: {{ c1_essential }}
21 | privileged: {{ c1_privileged }}
22 | environment:
23 | {% for env in c1_environment_variables %}
24 | - name: {{ env['c1name'] }}
25 | value: {{ env['c1value'] }}
26 | {% endfor %}
27 | mountPoints:
28 | - sourceVolume: kong-logs
29 | containerPath: /usr/local/kong/logs
30 | readOnly: False
31 | ulimits:
32 | - name: nofile
33 | softLimit: 40960
34 | hardLimit: 40960
35 | - name: core
36 | softLimit: 100000000
37 | hardLimit: 100000000
38 | - name: memlock
39 | softLimit: 100000000
40 | hardLimit: 100000000
41 | logConfiguration:
42 | logDriver: 'awslogs'
43 | options:
44 | awslogs-group: {{ c1_awslogs_group }}
45 | awslogs-region: {{ c1_log_region }}
46 | awslogs-stream-prefix: {{ c1_awslogs_stream_prefix }}
47 | dockerLabels:
48 | name: {{ c1_docker_labels }}
49 | dockerSecurityOptions:
50 | - {{ c1_docker_security_options }}
51 |
52 |
53 | ################# Second container #############################
54 | - name: {{ c2_appname }}
55 | cpu: {{ c2_cpu }}
56 | memory: {{ c2_memory }}
57 | memoryReservation: {{ c2_soft_memory }}
58 | portMappings:
59 | {% for hostport in c2_ports %}
60 | - hostPort: {{ hostport['c2hostPort'] }}
61 | containerPort: {{ hostport['c2containerPort'] }}
62 | protocol: {{ hostport['c2protocol'] }}
63 | {% endfor %}
64 | image: {{ c2_image }}
65 | essential: {{ c2_essential }}
66 | privileged: {{ c2_privileged }}
67 | environment:
68 | {% for env in c2_environment_variables %}
69 | - name: {{ env['c2name'] }}
70 | value: {{ env['c2value'] }}
71 | {% endfor %}
72 | # For now we use AWS logging driver. We will change this in future
73 | logConfiguration:
74 | logDriver: 'awslogs'
75 | options:
76 | awslogs-group: {{ c2_awslogs_group }}
77 | awslogs-region: {{ c2_log_region }}
78 | awslogs-stream-prefix: {{ c2_awslogs_stream_prefix }}
79 | dockerLabels:
80 | name: {{ c2_docker_labels }}
81 | volumes:
82 | - name: kong-logs
83 | host:
84 | sourcePath: /var/log/kong-logs
85 |
86 | # ***********************************************************************************************************
87 | # Service parameters will be used to create service
88 | # We can add load balancers alo if required.
89 | # Please visit : http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.create_service
90 | # ***********************************************************************************************************
91 | serviceCreate:
92 | - cluster: {{ cluster_name }}
93 | serviceName: {{ service_name }}
94 | # Task definition is family:revision.
95 | # Creat service on latest revision and use update to roll back o deploy new version
96 | taskDefinition: {{ task_family }}
97 | desiredCount: {{ count }}
98 | clientToken: {{ service_name }}
99 | deploymentConfiguration:
100 | maximumPercent: {{ maximumPercent }}
101 | minimumHealthyPercent: {{ minimumHealthyPercent }}
102 |
103 | # ******************************************************************
104 | # Service Update parameters will be used to update running service
105 | # ******************************************************************
106 | serviceUpdate:
107 | - cluster: {{ cluster_name }}
108 | serviceName: {{ service_name }}
109 | # Desired count also can be updated via commandlinee
110 | desiredCount: {{ count }}
111 | # Specify task def revision to roll back
112 | taskDefinition: {{ task_family }}
113 | deploymentConfiguration:
114 | maximumPercent: {{ maximumPercent }}
115 | minimumHealthyPercent: {{ minimumHealthyPercent }}
116 | # **********************************************************************
117 | # Service dlete will be used to delete services where running count is 0
118 | # Cannot be used via automated tools as it requires user confimration
119 | # **********************************************************************
120 | serviceDelete:
121 | - cluster: {{ cluster_name }}
122 | serviceName: {{ service_name }}
123 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/kong-master/templates/kong-task-def.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 | containers:
3 | - appname: {{ c1_appname }}
4 | family: {{ task_family }}
5 | taskRoleArn: {{ task_role_arn }}
6 | containerDefinitions:
7 | # There can be multiple containers in task def
8 | # Container name will also be used by weave for DNS entries
9 | - name: {{ c1_appname }}
10 | cpu: {{ c1_cpu }}
11 | memory: {{ c1_memory }}
12 | memoryReservation: {{ c1_soft_memory }}
13 | portMappings:
14 | {% for hostport in c1_ports %}
15 | - hostPort: {{ hostport['c1hostPort'] }}
16 | containerPort: {{ hostport['c1containerPort'] }}
17 | protocol: {{ hostport['c1protocol'] }}
18 | {% endfor %}
19 | image: {{ c1_image }}
20 | essential: {{ c1_essential }}
21 | privileged: {{ c1_privileged }}
22 | environment:
23 | {% for env in c1_environment_variables %}
24 | - name: {{ env['c1name'] }}
25 | value: {{ env['c1value'] }}
26 | {% endfor %}
27 | mountPoints:
28 | - sourceVolume: kong-logs
29 | containerPath: /usr/local/kong/logs
30 | readOnly: False
31 | ulimits:
32 | - name: nofile
33 | softLimit: 40960
34 | hardLimit: 40960
35 | - name: core
36 | softLimit: 100000000
37 | hardLimit: 100000000
38 | - name: memlock
39 | softLimit: 100000000
40 | hardLimit: 100000000
41 | logConfiguration:
42 | logDriver: 'awslogs'
43 | options:
44 | awslogs-group: {{ c1_awslogs_group }}
45 | awslogs-region: {{ c1_log_region }}
46 | awslogs-stream-prefix: {{ c1_awslogs_stream_prefix }}
47 | dockerLabels:
48 | name: {{ c1_docker_labels }}
49 | dockerSecurityOptions:
50 | - {{ c1_docker_security_options }}
51 |
52 |
53 | ################# Second container #############################
54 | - name: {{ c2_appname }}
55 | cpu: {{ c2_cpu }}
56 | memory: {{ c2_memory }}
57 | memoryReservation: {{ c2_soft_memory }}
58 | portMappings:
59 | {% for hostport in c2_ports %}
60 | - hostPort: {{ hostport['c2hostPort'] }}
61 | containerPort: {{ hostport['c2containerPort'] }}
62 | protocol: {{ hostport['c2protocol'] }}
63 | {% endfor %}
64 | image: {{ c2_image }}
65 | essential: {{ c2_essential }}
66 | privileged: {{ c2_privileged }}
67 | environment:
68 | {% for env in c2_environment_variables %}
69 | - name: {{ env['c2name'] }}
70 | value: {{ env['c2value'] }}
71 | {% endfor %}
72 | # For now we use AWS logging driver. We will change this in future
73 | logConfiguration:
74 | logDriver: 'awslogs'
75 | options:
76 | awslogs-group: {{ c2_awslogs_group }}
77 | awslogs-region: {{ c2_log_region }}
78 | awslogs-stream-prefix: {{ c2_awslogs_stream_prefix }}
79 | dockerLabels:
80 | name: {{ c2_docker_labels }}
81 | volumes:
82 | - name: kong-logs
83 | host:
84 | sourcePath: /var/log/kong-logs
85 |
86 | # ***********************************************************************************************************
87 | # Service parameters will be used to create service
88 | # We can add load balancers alo if required.
89 | # Please visit : http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.create_service
90 | # ***********************************************************************************************************
91 | serviceCreate:
92 | - cluster: {{ cluster_name }}
93 | serviceName: {{ service_name }}
94 | # Task definition is family:revision.
95 | # Creat service on latest revision and use update to roll back o deploy new version
96 | taskDefinition: {{ task_family }}
97 | desiredCount: {{ count }}
98 | clientToken: {{ service_name }}
99 | deploymentConfiguration:
100 | maximumPercent: {{ maximumPercent }}
101 | minimumHealthyPercent: {{ minimumHealthyPercent }}
102 |
103 | # ******************************************************************
104 | # Service Update parameters will be used to update running service
105 | # ******************************************************************
106 | serviceUpdate:
107 | - cluster: {{ cluster_name }}
108 | serviceName: {{ service_name }}
109 | # Desired count also can be updated via commandlinee
110 | desiredCount: {{ count }}
111 | # Specify task def revision to roll back
112 | taskDefinition: {{ task_family }}
113 | deploymentConfiguration:
114 | maximumPercent: {{ maximumPercent }}
115 | minimumHealthyPercent: {{ minimumHealthyPercent }}
116 | # **********************************************************************
117 | # Service dlete will be used to delete services where running count is 0
118 | # Cannot be used via automated tools as it requires user confimration
119 | # **********************************************************************
120 | serviceDelete:
121 | - cluster: {{ cluster_name }}
122 | serviceName: {{ service_name }}
123 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ### HA Kafka cluster on ECS
2 | ---
3 |
4 |
5 | ###### -- Three node kafka cluster which includes HA zookeeper
6 | ###### -- EFS volumes mounted and used by both Kafka & Zookeeper
7 | ###### -- Scalable - Easy horizontal scaling for Kafka nodes
8 |
9 |
10 |
11 | 
12 |
13 |
14 | =======
15 |
16 |
17 |
18 |
19 | ##### This repository
20 | - Terraform modules and code to deploy a highly available Ka cluster in ECS
21 | - Ansible Integration to demonstrate concepts for deploying Kafka and Cassandra services
22 | - A python utility that manages deployement on ECS rather than relying on Ansible's ECS module.
23 | - Lack of ability to deploy a service across multiple cluster led to a custom utility
24 | - Also demonstrate deploy and destroy time provisioners in Terraform
25 | - Orchestration of ECS tasks using ansible where statefulsets are not available.
26 | - Demonstrate use of Cloudwatch-logs. A log group and stream is setup for log forwarding and aws logging driver is used.
27 | - Demonstrate cloud-init with Terraform
28 | - Deployment of EFS for Kafka/Zookeeper
29 |
30 |
31 |
32 |
33 | ##### Pre-requisites
34 | - AWS account.
35 | - Terraform > 0.9.5
36 | - Ansible >= 2.3
37 | - Python 2.7
38 | - Boto, Botocore
39 |
40 |
41 |
42 | #### Deployment architecture
43 | ---
44 | 
45 |
46 |
47 |
48 |
49 | NOTE: Kong can be deployed as a proxy server using code from https://github.com/GloballogicPractices/ECS-Kong
50 |
51 |
52 | ### Deployment
53 | ---
54 | #### What is deployed?
55 | 1. VPC - Three private subnets and three public subnets
56 | 2. One ECS Cluster ( Kafka private subnets respectively)
57 | 3. A bastion node.
58 | 4. AWS Log group and log stream
59 | 5. EBS Volumes of 50G attached to each Cassandra node using cloud-init
60 | 6. Route53 for private hosted zone
61 | 7. EFS as backing storage
62 |
63 | #### Deployment procedure
64 | 1. Ensure pre-requisites are met
65 | 2. Decide a region where this needs to be deployed
66 | 3. This guides a cluster in a region with 3 AZs. You can reduce the number in terraform.tfvars file
67 | 4. Ensure a private key is available in AWS
68 |
69 |
70 | ```shell
71 | # Prepare your environment ( Terraform and Ansible )
72 | # Change directory to terraform/environments/development
73 | # We are considering a sample development environment
74 | # Update secrets.tf file with your public key
75 |
76 | $ cat secrets.tf
77 | aws_key_name = "test-cluster-key"
78 | aws_key_path = "~/.ssh/test-cluster-key.pem"
79 | // Can be generated using
80 | // ssh-keygen -y -f mykey.pem > mykey.pub
81 | keypair_public_key = "ssh-rsa publickey" # Replace this with public key corresponding to your private key in AWS
82 |
83 | # You can use any authentication procedures mentioned here https://www.terraform.io/docs/providers/aws/
84 |
85 | $ export AWS_ACCESS_KEY_ID="anaccesskey"
86 | $ export AWS_SECRET_ACCESS_KEY="asecretkey"
87 | $ export AWS_DEFAULT_REGION="us-west-2"
88 |
89 | ```
90 |
91 | #### terraform.tfvars for your infra
92 |
93 | ```shell
94 | /*
95 | Variables for deploying stack
96 | --------------------------------
97 | - ACM certificates have to pre-exist
98 | */
99 |
100 | // General
101 | region = "eu-central-1" #Select a region based on your preference
102 | vpc_name = "Custom-VPC"
103 | vpc_cidr = "10.2.0.0/16"
104 | // This is for generated ssh.cfg if you want to get into the instance
105 | proxy_cidr = "10.2.*"
106 |
107 | /*
108 | Environment keyword is important
109 | - This will become suffix for your clusters
110 | - Ansible roles for deployments are called based on the environment
111 | - For simplicity we keep this as development.
112 | */
113 |
114 | environment = "development"
115 |
116 | # AZs are combintation of az length + subnet cidrs
117 | public_sub_cidr = ["10.2.0.0/24","10.2.1.0/24","10.2.2.0/24"]
118 | private_sub_cidr = ["10.2.3.0/24","10.2.4.0/24","10.2.5.0/24"]
119 | azs = ["eu-central-1a","eu-central-1b","eu-central-1c"]
120 |
121 |
122 | // You can reduce the size if you do not want to incur cost
123 | bastion_instance_type = "t2.micro"
124 | kafka_instance_type = "t2.medium"
125 |
126 | # Ansible auto ssh - used when you want to do host level
127 | # configuration
128 | ansible_ssh_user = "ec2-user"
129 | ansible_user = "ansible"
130 |
131 |
132 | // For public facing sites and ELBs
133 | // Applications will be accessible from these IPs only
134 | control_cidr = "52.45.0.0/16,138.5.0.0/16"
135 |
136 | # ECS Kafka cluster
137 | kafka_asg_max_size = 3
138 | kafka_asg_min_size = 3
139 | kafka_asg_desired_size = 3
140 |
141 |
142 | // Same as vpc cidr. Can change upon vpc peering
143 | private_sub_control_cidr ="10.2.0.0/16"
144 |
145 |
146 |
147 | // Kafka EFS
148 | // Zookeeper config is stored in a sub directorty
149 | // under kafka directory
150 | efs_kafka_data_dir = "/kafka-data"
151 |
152 | ```
153 |
154 |
155 | ##### Plan and setup
156 | ```shell
157 | terraform plan -var-file="secrets.tf"
158 | # If successful then
159 | terraform apply -varf-file="secrets.tf"
160 | ## There should be no manual intervention required.
161 | ```
162 |
163 |
164 | ##### Note
165 | - Cost: This is beyond the scope of Free-tier.
166 | - Environment: The environment keyword is used to pickup a defined ansible role. If you change or add new environments, ensure that corresponding Yaml file exists in Ansible role
167 | - Private hosted zone takes the form of
168 | ```shell
169 | kafka.{{environment}-internal.com
170 | ```
171 | For information on Ecs utility : https://github.com/faizan82/ecs-orchestrate
172 |
--------------------------------------------------------------------------------
/ansible/hosts/ec2.ini:
--------------------------------------------------------------------------------
1 | # Ansible EC2 external inventory script settings
2 | #
3 |
4 | [ec2]
5 |
6 | # to talk to a private eucalyptus instance uncomment these lines
7 | # and edit edit eucalyptus_host to be the host name of your cloud controller
8 | #eucalyptus = True
9 | #eucalyptus_host = clc.cloud.domain.org
10 |
11 | # AWS regions to make calls to. Set this to 'all' to make request to all regions
12 | # in AWS and merge the results together. Alternatively, set this to a comma
13 | # separated list of regions. E.g. 'us-east-1, us-west-1, us-west-2'
14 | # 'auto' is AWS_REGION or AWS_DEFAULT_REGION environment variable.
15 | regions = auto
16 | regions_exclude = us-gov-west-1, cn-north-1
17 |
18 | # When generating inventory, Ansible needs to know how to address a server.
19 | # Each EC2 instance has a lot of variables associated with it. Here is the list:
20 | # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
21 | # Below are 2 variables that are used as the address of a server:
22 | # - destination_variable
23 | # - vpc_destination_variable
24 |
25 | # This is the normal destination variable to use. If you are running Ansible
26 | # from outside EC2, then 'public_dns_name' makes the most sense. If you are
27 | # running Ansible from within EC2, then perhaps you want to use the internal
28 | # address, and should set this to 'private_dns_name'. The key of an EC2 tag
29 | # may optionally be used; however the boto instance variables hold precedence
30 | # in the event of a collision.
31 | destination_variable = private_dns_name
32 |
33 | # This allows you to override the inventory_name with an ec2 variable, instead
34 | # of using the destination_variable above. Addressing (aka ansible_ssh_host)
35 | # will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
36 | #hostname_variable = tag_Name
37 |
38 | # For server inside a VPC, using DNS names may not make sense. When an instance
39 | # has 'subnet_id' set, this variable is used. If the subnet is public, setting
40 | # this to 'ip_address' will return the public IP address. For instances in a
41 | # private subnet, this should be set to 'private_ip_address', and Ansible must
42 | # be run from within EC2. The key of an EC2 tag may optionally be used; however
43 | # the boto instance variables hold precedence in the event of a collision.
44 | # WARNING: - instances that are in the private vpc, _without_ public ip address
45 | # will not be listed in the inventory until You set:
46 | # vpc_destination_variable = private_ip_address
47 | vpc_destination_variable = private_ip_address
48 |
49 | # The following two settings allow flexible ansible host naming based on a
50 | # python format string and a comma-separated list of ec2 tags. Note that:
51 | #
52 | # 1) If the tags referenced are not present for some instances, empty strings
53 | # will be substituted in the format string.
54 | # 2) This overrides both destination_variable and vpc_destination_variable.
55 | #
56 | #destination_format = {0}.{1}.example.com
57 | #destination_format_tags = Name,environment
58 |
59 | # To tag instances on EC2 with the resource records that point to them from
60 | # Route53, uncomment and set 'route53' to True.
61 | route53 = False
62 |
63 | # To exclude RDS instances from the inventory, uncomment and set to False.
64 | rds = False
65 |
66 | # To exclude ElastiCache instances from the inventory, uncomment and set to False.
67 | elasticache = False
68 |
69 | # Additionally, you can specify the list of zones to exclude looking up in
70 | # 'route53_excluded_zones' as a comma-separated list.
71 | # route53_excluded_zones = samplezone1.com, samplezone2.com
72 |
73 | # By default, only EC2 instances in the 'running' state are returned. Set
74 | # 'all_instances' to True to return all instances regardless of state.
75 | all_instances = False
76 |
77 | # By default, only EC2 instances in the 'running' state are returned. Specify
78 | # EC2 instance states to return as a comma-separated list. This
79 | # option is overridden when 'all_instances' is True.
80 | # instance_states = pending, running, shutting-down, terminated, stopping, stopped
81 |
82 | # By default, only RDS instances in the 'available' state are returned. Set
83 | # 'all_rds_instances' to True return all RDS instances regardless of state.
84 | all_rds_instances = False
85 |
86 | # Include RDS cluster information (Aurora etc.)
87 | include_rds_clusters = False
88 |
89 | # By default, only ElastiCache clusters and nodes in the 'available' state
90 | # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
91 | # to True return all ElastiCache clusters and nodes, regardless of state.
92 | #
93 | # Note that all_elasticache_nodes only applies to listed clusters. That means
94 | # if you set all_elastic_clusters to false, no node will be return from
95 | # unavailable clusters, regardless of the state and to what you set for
96 | # all_elasticache_nodes.
97 | all_elasticache_replication_groups = False
98 | all_elasticache_clusters = False
99 | all_elasticache_nodes = False
100 |
101 | # API calls to EC2 are slow. For this reason, we cache the results of an API
102 | # call. Set this to the path you want cache files to be written to. Two files
103 | # will be written to this directory:
104 | # - ansible-ec2.cache
105 | # - ansible-ec2.index
106 | cache_path = ~/.ansible/tmp
107 |
108 | # The number of seconds a cache file is considered valid. After this many
109 | # seconds, a new API call will be made, and the cache file will be updated.
110 | # To disable the cache, set this value to 0
111 | cache_max_age = 300
112 |
113 | # Organize groups into a nested/hierarchy instead of a flat namespace.
114 | nested_groups = False
115 |
116 | # Replace - tags when creating groups to avoid issues with ansible
117 | replace_dash_in_groups = True
118 |
119 | # If set to true, any tag of the form "a,b,c" is expanded into a list
120 | # and the results are used to create additional tag_* inventory groups.
121 | expand_csv_tags = False
122 |
123 | # The EC2 inventory output can become very large. To manage its size,
124 | # configure which groups should be created.
125 | group_by_instance_id = True
126 | group_by_region = True
127 | group_by_availability_zone = True
128 | group_by_aws_account = False
129 | group_by_ami_id = True
130 | group_by_instance_type = True
131 | group_by_key_pair = True
132 | group_by_vpc_id = True
133 | group_by_security_group = True
134 | group_by_tag_keys = True
135 | group_by_tag_none = True
136 | group_by_route53_names = True
137 | group_by_rds_engine = True
138 | group_by_rds_parameter_group = True
139 | group_by_elasticache_engine = True
140 | group_by_elasticache_cluster = True
141 | group_by_elasticache_parameter_group = True
142 | group_by_elasticache_replication_group = True
143 |
144 | # If you only want to include hosts that match a certain regular expression
145 | # pattern_include = staging-*
146 |
147 | # If you want to exclude any hosts that match a certain regular expression
148 | # pattern_exclude = staging-*
149 |
150 | # Instance filters can be used to control which instances are retrieved for
151 | # inventory. For the full list of possible filters, please read the EC2 API
152 | # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
153 | # Filters are key/value pairs separated by '=', to list multiple filters use
154 | # a list separated by commas. See examples below.
155 |
156 | # If you want to apply multiple filters simultaneously, set stack_filters to
157 | # True. Default behaviour is to combine the results of all filters. Stacking
158 | # allows the use of multiple conditions to filter down, for example by
159 | # environment and type of host.
160 | stack_filters = False
161 |
162 | # Retrieve only instances with (key=value) env=staging tag
163 | # instance_filters = tag:env=staging
164 |
165 | # Retrieve only instances with role=webservers OR role=dbservers tag
166 | # instance_filters = tag:role=webservers,tag:role=dbservers
167 |
168 | # Retrieve only t1.micro instances OR instances with tag env=staging
169 | # instance_filters = instance-type=t1.micro,tag:env=staging
170 |
171 | # You can use wildcards in filter values also. Below will list instances which
172 | # tag Name value matches webservers1*
173 | # (ex. webservers15, webservers1a, webservers123 etc)
174 | # instance_filters = tag:Name=webservers1*
175 |
176 | # A boto configuration profile may be used to separate out credentials
177 | # see http://boto.readthedocs.org/en/latest/boto_config_tut.html
178 | # boto_profile = some-boto-profile-name
179 |
--------------------------------------------------------------------------------
/docker-images/kafka-docker-wm/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/ansible/playbooks/roles/files/orchestrate.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | '''
4 | @Author: Faizan ali
5 | TODO:
6 | Logging setup to make this lambda ready
7 | '''
8 |
9 |
10 | import boto3
11 | import json
12 | import sys
13 | import argparse
14 | import logging
15 | import yaml
16 | import os
17 | import pprint
18 |
19 | parser=argparse.ArgumentParser(description='Arguments for managing services with ecs')
20 | #parser.add_argument('-r',action='store',dest='region',default='us-east-2',help='OPTIONAL: Region. Default is us-east-2')
21 | parser.add_argument('-f',action='store',dest='confFile',default=None,help='OPTIONAL: Config to pass for parsing.')
22 | parser.add_argument('-c',action='store',dest='count',default=int(-1),help='OPTIONAL: Count for scaling up or down, If not specified, will be picked from update conf file')
23 | parser.add_argument('-v',action='store_true',help='OPTIONAL: Verbose flag')
24 | parser.add_argument('-r',action='store',dest='region',required=True,help='Required: Target region [us-east-1,us-east-2]. ')
25 | parser.add_argument('-e',action='store',dest='env',default='dev',help='OPTIONAL: Env (dev,qa,stage) to pick all files automatically. ')
26 | action=parser.add_mutually_exclusive_group(required=True)
27 | action.add_argument('--create',action='store_true',help='REQUIRED/EXCLUSIVE : Create a service from task definition')
28 | action.add_argument('--update',action='store_true',help='REQUIRED/EXCLUSIVE : Update a service. [ task defs, container count etc]')
29 | action.add_argument('--delete',action='store_true',help='REQUIRED/EXCLUSIVE : Delete a service from specified cluster')
30 | action.add_argument('--task',action='store_true',help='REQUIRED/EXCLUSIVE : Create a task def without creating service')
31 |
32 |
33 |
34 |
35 | def deleteService(mode,confFile,region,clusterName):
36 | """Function to parse config file for deleting service"""
37 | client = boto3.client('ecs',region_name=region)
38 | with open(confFile, 'r') as config:
39 | confParser = yaml.load(config)
40 | # keeping failure to 503 since no error code is returned
41 | failedResponseCode = 503
42 |
43 | for modules in confParser['containers']:
44 | for svc in modules['serviceDelete']:
45 | svcCluster = clusterName
46 | svcServiceName = svc['serviceName']
47 | try:
48 | if mode:
49 | print "Deleting service - %s " % (svcServiceName)
50 | response = client.delete_service(cluster=svcCluster, service=svcServiceName)
51 | deleteResponse = response['ResponseMetadata']['HTTPStatusCode']
52 | if mode:
53 | print "Delete response:: %s " %(deleteResponse)
54 | except Exception, e:
55 | # verbose
56 | if mode:
57 | print e
58 | print "Service delete failed"
59 | print "Response::: %s " % (failedResponseCode)
60 | return failedResponseCode
61 |
62 |
63 | def registerNUpdateTask(mode,confFile,region):
64 | """Function to parse config file for registerring and creating/updating task def"""
65 | client = boto3.client('ecs',region_name=region)
66 | with open(confFile, 'r') as config:
67 | confParser = yaml.load(config)
68 | # keeping failure to 503 since no error code is returned
69 | failedResponseCode = 503
70 |
71 | # setup parameter for task-def registration
72 | for modules in confParser['containers']:
73 | family=modules['family']
74 | containerDefinitions=modules['containerDefinitions']
75 | try:
76 | volumes = modules['volumes']
77 | except:
78 | if mode:
79 | print "No volumes to be attached"
80 | volumes = ''
81 | try:
82 | networkMode = modules['networkMode']
83 | except:
84 | networkMode = 'bridge'
85 | # verbose
86 | #if mode:
87 | #pprint.pprint(containerDefinitions)
88 | try:
89 | if mode:
90 | print "Registerring/updating task def: %s " % (family)
91 | if volumes:
92 | response = client.register_task_definition(family=family, networkMode=networkMode, containerDefinitions=containerDefinitions, volumes=volumes)
93 | else:
94 | response = client.register_task_definition(family=family, networkMode=networkMode, containerDefinitions=containerDefinitions)
95 | regResponse=response['ResponseMetadata']['HTTPStatusCode']
96 | if mode:
97 | print "Task registerred successfully..."
98 | print regResponse
99 | return regResponse
100 | except Exception, e:
101 | # verbose
102 | if mode:
103 | print e
104 | print "Service task creation or update failed"
105 | print "Response::: %s " % (failedResponseCode)
106 | return failedResponseCode
107 |
108 |
109 | def updateService(mode,confFile,region,count,clusterName):
110 | """Function to parse config file for updating service"""
111 | # @TODO : check for task-def update
112 | client = boto3.client('ecs',region_name=region)
113 | with open(confFile, 'r') as config:
114 | confParser = yaml.load(config)
115 | # keeping failure to 503 since no error code is returned
116 | failedResponseCode = 503
117 |
118 |
119 | #register task def before creating service.
120 | # Do not update task def if service count going to 0
121 | if count == int(0):
122 | regResponse = 200
123 | else:
124 | regResponse = registerNUpdateTask(mode,confFile,region)
125 | if mode:
126 | print "Task register response: %s " % (regResponse)
127 |
128 | if regResponse == 200:
129 | for modules in confParser['containers']:
130 | for svc in modules['serviceUpdate']:
131 | svcCluster = clusterName
132 | svcServiceName = svc['serviceName']
133 | svcTaskDefinition = svc['taskDefinition']
134 | # Count of tasks can be specified at the commandline as well
135 | if count == int(-1): svcDesiredCount = svc['desiredCount']
136 | else: svcDesiredCount = count
137 | svcdeploymentConfiguration = svc['deploymentConfiguration']
138 | try:
139 | if mode:
140 | print "Updating sevice %s with count %s " % (svcServiceName,svcDesiredCount)
141 | response = client.update_service(cluster=svcCluster, service=svcServiceName, desiredCount=svcDesiredCount, taskDefinition=svcTaskDefinition, deploymentConfiguration=svcdeploymentConfiguration)
142 | updateResponse = response['ResponseMetadata']['HTTPStatusCode']
143 | if mode:
144 | print "Update response:: %s " %(updateResponse)
145 |
146 | except Exception, e:
147 | # verbose
148 | if mode:
149 | print e
150 | print "Service update failed"
151 | print "Response::: %s " % (failedResponseCode)
152 | return failedResponseCode
153 |
154 |
155 | def createService(mode,confFile,region,count,clusterName):
156 | """Function to parse config file for registerring and creating services"""
157 | client = boto3.client('ecs',region_name=region)
158 | with open(confFile, 'r') as config:
159 | confParser = yaml.load(config)
160 | # keeping failure to 503 since no error code is returned
161 | failedResponseCode = 503
162 |
163 | #register task def before creating service
164 | if count == int(0):
165 | regResponse = 200
166 | else:
167 | regResponse = registerNUpdateTask(mode,confFile,region)
168 | if mode:
169 | print regResponse
170 |
171 | if regResponse == 200:
172 | for modules in confParser['containers']:
173 | for svc in modules['serviceCreate']:
174 | #svcCluster = svc['cluster']
175 | svcCluster = clusterName
176 | svcServiceName = svc['serviceName']
177 | svcTaskDefinition = svc['taskDefinition']
178 | # Count of tasks can be specified at the commandline as well
179 | if count == int(-1): svcDesiredCount = svc['desiredCount']
180 | else: svcDesiredCount = count
181 | svcClientToken = svc['clientToken']
182 | svcdeploymentConfiguration = svc['deploymentConfiguration']
183 | try:
184 | svcloadBalancers = svc['loadBalancers']
185 | svcloadBalancers = []
186 | except:
187 | svcloadBalancers = []
188 | svcLbrole = ''
189 | # verbose
190 | #if mode:
191 | #pprint.pprint(svc)
192 | try:
193 | if mode:
194 | print "Creating service %s " % (svcServiceName)
195 | # If task def was registerred then create service
196 | svcCreateResponse = client.create_service(cluster=svcCluster,serviceName=svcServiceName,taskDefinition=svcTaskDefinition,loadBalancers=svcloadBalancers,desiredCount=svcDesiredCount,clientToken=svcClientToken,deploymentConfiguration=svcdeploymentConfiguration)
197 | createResponse=svcCreateResponse['ResponseMetadata']['HTTPStatusCode']
198 | if createResponse == 200:
199 | # verbose
200 | if mode:
201 | print createResponse
202 | print "Response::: %s " % (createResponse)
203 | return createResponse
204 | else: return failedResponseCode
205 | except Exception, e:
206 | # verbose
207 | if mode:
208 | print e
209 | print "Service creation failed"
210 | print "Response::: %s " % (failedResponseCode)
211 | return failedResponseCode
212 |
213 | def main(mode,confFile,region,count,action):
214 | """Function to manage muliple clusters for a single service situation as in consul client"""
215 | with open(confFile, 'r') as config:
216 | confParser = yaml.load(config)
217 | responseCode = 503
218 |
219 | # If a service has listed multiple clusters then loop over it and
220 | # deploy service to all clusters. Else deploy to only one cluster
221 | for modules in confParser['containers']:
222 | #print modules
223 | for svc in modules['serviceCreate']:
224 | svcCluster = svc['cluster']
225 | #print svc
226 | if isinstance(svc['cluster'],list):
227 | if mode:
228 | print 'Multiple clusters for service'
229 | if action == 'create':
230 | for cluster in svc['cluster']:
231 | print cluster
232 | response = createService(mode,serviceConfFile,region,count,cluster)
233 | responseCode = response
234 | if action == 'update':
235 | for cluster in svc['cluster']:
236 | response = updateService(mode,confFile,region,count,cluster)
237 | responseCode = response
238 | if action == 'delete':
239 | for cluster in svc['cluster']:
240 | response = deleteService(mode,confFile,region,cluster)
241 | responseCode = response
242 | else:
243 | if mode:
244 | print "Single cluster for single service"
245 | if action == 'create':
246 | response = createService(mode,serviceConfFile,region,count,svc['cluster'])
247 | responseCode = response
248 | if action == 'update':
249 | response = updateService(mode,confFile,region,count,svc['cluster'])
250 | responseCode = response
251 | if action == 'delete':
252 | response = deleteService(mode,confFile,region,svc['cluster'])
253 | responseCode = response
254 |
255 | return responseCode
256 |
257 |
258 | if __name__ == '__main__':
259 | # Call the parser to get the values
260 | args = parser.parse_args()
261 | # vebosity
262 | mode=args.v
263 | confFile = args.confFile
264 | region = args.region
265 | # Env is captured directly
266 | count = int(args.count)
267 |
268 |
269 | # *****************************************#
270 | #### SETUP TASK_DEF ONLY ###
271 | # *****************************************#
272 | if args.task:
273 | action = 'create-task'
274 | print "Create-task request recieved...\n"
275 | if confFile:
276 | #serviceConfFile = os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"+confFile
277 | serviceConfFile = confFile
278 | # verbose
279 | if mode:
280 | print "Service config file : %s " %(serviceConfFile)
281 | # Create service for a single file
282 | registerNUpdateTask(mode,confFile,region)
283 | else:
284 | # loop over dir and extract all files
285 | for confFile in os.listdir(os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"):
286 | if confFile.endswith(".yaml"):
287 | serviceConfFile = os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"+confFile
288 | if mode:
289 | print "Service config file : %s " %(serviceConfFile)
290 | # Create service for all files in config-env directory
291 | registerNUpdateTask(mode,confFile,region)
292 |
293 |
294 | # *****************************************#
295 | #### SETUP TASK_DEF AND CREATE_SERVICE ###
296 | # *****************************************#
297 | if args.create:
298 | action = 'create'
299 | print "Create request recieved...\n"
300 | if confFile:
301 | #serviceConfFile = os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"+confFile
302 | serviceConfFile = confFile
303 | # verbose
304 | if mode:
305 | print "Service config file : %s " %(serviceConfFile)
306 | # Create service for a single file
307 | main(mode,serviceConfFile,region,count,action)
308 | else:
309 | # loop over dir and extract all files
310 | for confFile in os.listdir(os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"):
311 | if confFile.endswith(".yaml"):
312 | serviceConfFile = os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"+confFile
313 | if mode:
314 | print "Service config file : %s " %(serviceConfFile)
315 | # Create service for all files in config-env directory
316 | main(mode,serviceConfFile,region,count,action)
317 |
318 |
319 | # ****************************************
320 | #### UPDATE EXISTING SERVICE
321 | #### COUNT,TASKDEF CAN BE UPDATED
322 | # ***************************************
323 | if args.update:
324 | action = 'update'
325 | print "Update request recieved\n"
326 | if confFile:
327 | # loop over dir and extract all files
328 | #serviceConfFile = os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"+confFile
329 | serviceConfFile = confFile
330 | if mode:
331 | print "Service config file : %s " %(serviceConfFile)
332 | # Update service for a single file
333 | main(mode,serviceConfFile,region,count,action)
334 | else:
335 | for confFile in os.listdir(os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"):
336 | if confFile.endswith(".yaml"):
337 | serviceConfFile = os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"+confFile
338 | if mode:
339 | print "Service config file : %s " %(serviceConfFile)
340 | # Update service for all files in config-env directory
341 | print "...."
342 | print serviceConfFile
343 | main(mode,serviceConfFile,region,count,action)
344 |
345 |
346 | # *****************************************************
347 | #### DELETE SERVICES
348 | #### NOT Count in delete does not have any significance
349 | # ******************************************************
350 | if args.delete:
351 | action = 'delete'
352 | print "Delete requeste recieved........\n"
353 | if confFile:
354 | # loop over dir and extract all files
355 | #serviceConfFile = os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"+confFile
356 | serviceConfFile = confFile
357 | # verbose
358 | if mode:
359 | print "Service config file : %s " %(serviceConfFile)
360 | # Delete service for a single file
361 | main(mode,serviceConfFile,region,count,action)
362 | else:
363 | for confFile in os.listdir(os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"):
364 | if confFile.endswith(".yaml"):
365 | serviceConfFile = os.path.dirname(os.path.abspath(__file__))+"/config/"+args.env+"/"+confFile
366 | if mode:
367 | print "Service config file : %s " %(serviceConfFile)
368 | # Delete service for all files in config-env directory
369 | main(mode,serviceConfFile,region,count,action)
370 |
--------------------------------------------------------------------------------