├── ansible ├── roles │ ├── java │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── containers │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── registry.yml │ │ │ ├── jenkins.yml │ │ │ └── elk.yml │ │ └── tasks │ │ │ └── main.yml │ ├── docker │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── rsyslog │ │ ├── templates │ │ │ └── 10-logstash.conf │ │ └── tasks │ │ │ └── main.yml │ ├── nfs │ │ └── tasks │ │ │ └── main.yml │ ├── users │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── docker-compose │ │ └── tasks │ │ │ └── main.yml │ ├── logstash │ │ ├── tasks │ │ │ └── main.yml │ │ └── files │ │ │ └── syslog-collectd.conf │ └── jenkins-agent │ │ └── tasks │ │ └── main.yml ├── base.yml ├── jenkins-agent.yml ├── docker.yml ├── ansible.cfg ├── elk.yml ├── registry.yml └── jenkins.yml ├── conf ├── Dockerfile.logstash ├── Dockerfile.prometheus ├── Dockerfile.mongo ├── logstash.conf ├── prometheus.yml ├── init-mongo-rs.sh └── logstash-proxy.conf ├── terraform ├── azure │ ├── scripts │ │ ├── get-internal-ip.sh │ │ ├── init-swarm-mode.sh │ │ └── deploy-swarmmode.sh │ ├── variables.tf │ ├── README.md │ └── swarm.tf ├── aws-full │ ├── rexray.tpl │ ├── docker.tf │ ├── docker.service │ ├── packer-ubuntu-docker-compose.json │ ├── variables.tf │ ├── packer-ubuntu-docker.json │ ├── packer-ubuntu-docker-rexray.json │ ├── test-swarm.tf │ ├── swarm.tf │ └── common.tf ├── azure-acs │ ├── README.md │ ├── variables.tf │ └── swarm.tf ├── scw │ ├── variables.tf │ ├── init.sh │ └── swarm.tf ├── variables.tf ├── aws │ ├── variables.tf │ ├── packer-ubuntu-docker.json │ ├── common.tf │ └── swarm.tf ├── do │ ├── variables.tf │ ├── packer-ubuntu-docker.json │ ├── common.tf │ └── swarm.tf ├── packer-ubuntu-docker.json ├── common.tf └── swarm.tf ├── compose ├── prometheus2.yml ├── jenkins.yml ├── registry.yml ├── registry-rexray-stack.yml ├── jenkins-agent.yml ├── consul.yml ├── elk.yml ├── prometheus.yml └── logstash.conf ├── .gitignore ├── scripts ├── dm-swarm-single.sh ├── go-demo-stages.groovy ├── go-demo-pull.groovy ├── bootstrap_ansible.sh ├── dm-test-swarm.sh ├── swarm-services-2.sh ├── dm-swarm.sh ├── dm-test-swarm-2.sh ├── rtime_down.sh ├── rtime_up.sh ├── dm-swarm-5.sh ├── swarm-services-3.sh ├── aws-swarm-services.sh ├── dm-swarm-services-4.sh ├── dm-swarm-services-3.sh ├── swarm-services-uber.sh ├── swarm-services-1.sh ├── go-demo.groovy ├── dm-swarm-services.sh ├── dm-test-swarm-services.sh ├── dm-swarm-services-2.sh ├── dm-test-swarm-services-2.sh ├── dm-swarm-services-elk.sh └── aws-swarm-creator.sh ├── Vagrantfile └── rexray.md /ansible/roles/java/defaults/main.yml: -------------------------------------------------------------------------------- 1 | yum_packages: 2 | - wget -------------------------------------------------------------------------------- /ansible/roles/containers/defaults/main.yml: -------------------------------------------------------------------------------- 1 | container_volume_mode: "0777" -------------------------------------------------------------------------------- /ansible/roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | debian_version: xenial 2 | release: main -------------------------------------------------------------------------------- /ansible/roles/rsyslog/templates/10-logstash.conf: -------------------------------------------------------------------------------- 1 | *.* @@{{ elk_ip }}:25826 2 | -------------------------------------------------------------------------------- /ansible/roles/nfs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: NFS is present 2 | apt: 3 | name: nfs-common -------------------------------------------------------------------------------- /conf/Dockerfile.logstash: -------------------------------------------------------------------------------- 1 | FROM logstash:2.4 2 | 3 | COPY logstash.conf /conf/logstash.conf -------------------------------------------------------------------------------- /ansible/base.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | roles: 3 | - docker 4 | - docker-compose 5 | - nfs 6 | -------------------------------------------------------------------------------- /conf/Dockerfile.prometheus: -------------------------------------------------------------------------------- 1 | FROM prom/prometheus:v1.2.1 2 | 3 | COPY prometheus.yml /etc/prometheus/prometheus.yml -------------------------------------------------------------------------------- /ansible/jenkins-agent.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | roles: 3 | - docker 4 | - docker-compose 5 | # - users 6 | - java -------------------------------------------------------------------------------- /terraform/azure/scripts/get-internal-ip.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}' -------------------------------------------------------------------------------- /ansible/docker.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | vars: 3 | - project: proxy 4 | roles: 5 | - docker 6 | - docker-compose 7 | - users 8 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking=False 3 | 4 | [privilege_escalation] 5 | become=True 6 | become_method=sudo 7 | become_user=root 8 | -------------------------------------------------------------------------------- /conf/Dockerfile.mongo: -------------------------------------------------------------------------------- 1 | FROM mongo:3.2.10 2 | 3 | COPY init-mongo-rs.sh /init-mongo-rs.sh 4 | RUN chmod +x /init-mongo-rs.sh 5 | ENTRYPOINT ["/init-mongo-rs.sh"] -------------------------------------------------------------------------------- /ansible/roles/users/defaults/main.yml: -------------------------------------------------------------------------------- 1 | user: devops 2 | password: $6$rounds=656000$zkqHigPRNWxmm0Gk$ZSGPELWBQ7EPly6hoYUf/WbORz3Ell.IKrb6CXzrHNpKG/fQH9c2LmfE7KQ9CvSITrgNtoH1Qxl2rRGBaMjW31 -------------------------------------------------------------------------------- /terraform/azure/scripts/init-swarm-mode.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | IP=$(/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}') 3 | docker swarm init --advertise-addr $IP 4 | -------------------------------------------------------------------------------- /ansible/elk.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | vars: 3 | - project: elk 4 | roles: 5 | - docker 6 | - docker-compose 7 | - logstash 8 | - containers 9 | - users 10 | -------------------------------------------------------------------------------- /ansible/registry.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | vars: 3 | - project: registry 4 | roles: 5 | - nfs 6 | - docker 7 | - docker-compose 8 | - containers 9 | # - users 10 | -------------------------------------------------------------------------------- /compose/prometheus2.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: 'go-demo' 3 | 4 | scrape_interval: 5s 5 | 6 | static_configs: 7 | - targets: ['localhost:8080'] 8 | labels: 9 | group: 'production' 10 | -------------------------------------------------------------------------------- /compose/jenkins.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | jenkins: 6 | container_name: jenkins 7 | image: jenkins/jenkins:2.161-alpine 8 | ports: 9 | - 80:8080 10 | - 50000:50000 11 | restart: always 12 | -------------------------------------------------------------------------------- /ansible/roles/docker-compose/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Executable is present 2 | get_url: 3 | url: https://github.com/docker/compose/releases/download/1.7.0/docker-compose-Linux-x86_64 4 | dest: /usr/local/bin/docker-compose 5 | mode: 0755 6 | -------------------------------------------------------------------------------- /conf/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | syslog { port => 51415 } 3 | } 4 | 5 | output { 6 | elasticsearch { 7 | hosts => ["elasticsearch:9200"] 8 | } 9 | # Remove in production 10 | stdout { 11 | codec => rubydebug 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /compose/registry.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | registry: 6 | container_name: registry 7 | image: registry 8 | ports: 9 | - 5000:5000 10 | volumes: 11 | - ../.:/var/lib/registry 12 | restart: always 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /*.iml 3 | .vagrant 4 | /packer/*.env 5 | /packer/packer_cache 6 | secrets* 7 | *.tfstate.backup 8 | *.pem 9 | *.retry 10 | /go-demo 11 | /docker 12 | /docker*.tgz 13 | /terraform/do/devops21-do* 14 | /terraform/do/graph.png 15 | *.log -------------------------------------------------------------------------------- /ansible/roles/logstash/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Directory is present 2 | file: 3 | path: /data/logstash/conf 4 | state: directory 5 | 6 | - name: Config is present 7 | copy: 8 | src: syslog-collectd.conf 9 | dest: /data/logstash/conf/syslog-collectd.conf -------------------------------------------------------------------------------- /ansible/roles/jenkins-agent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Package is installed 2 | get_url: 3 | url: http://repo.jenkins-ci.org/releases/org/jenkins-ci/plugins/swarm-client/2.2/swarm-client-2.2-jar-with-dependencies.jar 4 | dest: /var/lib/swarm-client-2.2-jar-with-dependencies.jar 5 | mode: 0644 -------------------------------------------------------------------------------- /ansible/roles/containers/files/registry.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | registry: 6 | container_name: registry 7 | image: registry:2 8 | ports: 9 | - 5000:5000 10 | volumes: 11 | - /data/registry:/var/lib/registry/docker/registry 12 | restart: always 13 | -------------------------------------------------------------------------------- /scripts/dm-swarm-single.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker-machine create \ 4 | -d virtualbox \ 5 | swarm 6 | 7 | eval $(docker-machine env swarm) 8 | 9 | docker swarm init \ 10 | --advertise-addr $(docker-machine ip swarm) 11 | 12 | echo ">> The swarm cluster is up and running" 13 | -------------------------------------------------------------------------------- /scripts/go-demo-stages.groovy: -------------------------------------------------------------------------------- 1 | node("docker") { 2 | 3 | stage("Pull") { 4 | } 5 | 6 | stage("Unit") { 7 | } 8 | 9 | stage("Staging") { 10 | } 11 | 12 | stage("Publish") { 13 | } 14 | 15 | stage("Prod-like") { 16 | } 17 | 18 | stage("Production") { 19 | } 20 | 21 | } 22 | -------------------------------------------------------------------------------- /ansible/jenkins.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | vars: 3 | - project: jenkins 4 | - container_volume: /data/jenkins 5 | - container_volume_mode: "0777" 6 | - do_not_start_container: yes 7 | roles: 8 | - docker 9 | - docker-compose 10 | - containers 11 | # - users 12 | - nfs 13 | -------------------------------------------------------------------------------- /ansible/roles/containers/files/jenkins.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | jenkins: 6 | container_name: jenkins 7 | image: jenkins/jenkins:2.161-alpine 8 | ports: 9 | - 80:8080 10 | - 50000:50000 11 | volumes: 12 | - /data/jenkins:/var/jenkins_home 13 | restart: always 14 | -------------------------------------------------------------------------------- /compose/registry-rexray-stack.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | 5 | main: 6 | image: registry 7 | ports: 8 | - "5000:5000" 9 | volumes: 10 | - main:/var/lib/registry 11 | deploy: 12 | resources: 13 | reservations: 14 | memory: 100M 15 | 16 | volumes: 17 | main: 18 | driver: rexray/efs -------------------------------------------------------------------------------- /scripts/go-demo-pull.groovy: -------------------------------------------------------------------------------- 1 | node("docker") { 2 | 3 | stage("Pull") { 4 | git "https://github.com/vfarcic/go-demo.git" 5 | } 6 | 7 | stage("Unit") { 8 | } 9 | 10 | stage("Staging") { 11 | } 12 | 13 | stage("Publish") { 14 | } 15 | 16 | stage("Prod-like") { 17 | } 18 | 19 | stage("Production") { 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /terraform/aws-full/rexray.tpl: -------------------------------------------------------------------------------- 1 | libstorage: 2 | service: efs 3 | server: 4 | services: 5 | efs: 6 | driver: efs 7 | efs: 8 | accessKey: ${aws_access_key} 9 | secretKey: ${aws_secret_key} 10 | region: ${aws_default_region} 11 | securityGroups: ${aws_security_group} 12 | tag: rexray 13 | -------------------------------------------------------------------------------- /conf/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 5s 3 | 4 | scrape_configs: 5 | - job_name: 'node' 6 | dns_sd_configs: 7 | - names: ['tasks.node-exporter'] 8 | type: A 9 | port: 9100 10 | - job_name: 'cadvisor' 11 | dns_sd_configs: 12 | - names: ['tasks.cadvisor'] 13 | type: A 14 | port: 8080 15 | - job_name: 'prometheus' 16 | static_configs: 17 | - targets: ['localhost:9090'] 18 | -------------------------------------------------------------------------------- /terraform/azure-acs/README.md: -------------------------------------------------------------------------------- 1 | # Terraform - Azure ACS 2 | 3 | Based on example from [Terraform documentation](https://www.terraform.io/docs/providers/azurerm/r/container_service.html). 4 | 5 | Creates a setup based upon the "Docker for Azure" configuration. 6 | 7 | ACS stands Azure Container Services, and just another name for Azure for Docker. 8 | 9 | A better way to understand how the setup works, is look at [ACS Engine](https://github.com/Azure/acs-engine). -------------------------------------------------------------------------------- /terraform/azure-acs/variables.tf: -------------------------------------------------------------------------------- 1 | variable "swarm_manager_token" { 2 | default = "" 3 | } 4 | 5 | variable "swarm_worker_token" { 6 | default = "" 7 | } 8 | 9 | variable "swarm_managers" { 10 | default = 3 11 | } 12 | variable "swarm_workers" { 13 | default = 2 14 | } 15 | 16 | variable "swarm_worker_type" { 17 | default = "Standard_A1" 18 | } 19 | 20 | variable "personal_prefix" { 21 | default = "" 22 | } 23 | 24 | variable "location" { 25 | default = "West Europe" 26 | } -------------------------------------------------------------------------------- /ansible/roles/rsyslog/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Packages are present 2 | apt: 3 | name: "{{ item }}" 4 | state: latest 5 | install_recommends: no 6 | with_items: 7 | - rsyslog 8 | - logrotate 9 | 10 | - name: Config file is present 11 | template: 12 | src: 10-logstash.conf 13 | dest: /etc/rsyslog.d/10-logstash.conf 14 | register: config_result 15 | 16 | - name: Service is restarted 17 | shell: service rsyslog restart 18 | when: config_result.changed 19 | -------------------------------------------------------------------------------- /terraform/scw/variables.tf: -------------------------------------------------------------------------------- 1 | variable "instance_type" { 2 | default = "VC1S" 3 | } 4 | variable "region" { 5 | default = "ams1" 6 | } 7 | variable "managers" { 8 | default = "1" 9 | } 10 | variable "workers" { 11 | default = "3" 12 | } 13 | variable "swarm_init" { 14 | default = "false" 15 | } 16 | variable "swarm_manager_ip" { 17 | default = "" 18 | } 19 | variable "swarm_manager_token" { 20 | default = "" 21 | } 22 | variable "swarm_worker_token" { 23 | default = "" 24 | } 25 | -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "swarm_manager_token" { 2 | default = "" 3 | } 4 | variable "swarm_worker_token" { 5 | default = "" 6 | } 7 | variable "swarm_ami_id" { 8 | default = "unknown" 9 | } 10 | variable "swarm_manager_ip" { 11 | default = "" 12 | } 13 | variable "swarm_managers" { 14 | default = 3 15 | } 16 | variable "swarm_workers" { 17 | default = 2 18 | } 19 | variable "swarm_instance_type" { 20 | default = "t2.micro" 21 | } 22 | variable "swarm_init" { 23 | default = false 24 | } 25 | -------------------------------------------------------------------------------- /terraform/aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "swarm_manager_token" { 2 | default = "" 3 | } 4 | variable "swarm_worker_token" { 5 | default = "" 6 | } 7 | variable "swarm_ami_id" { 8 | default = "unknown" 9 | } 10 | variable "swarm_manager_ip" { 11 | default = "" 12 | } 13 | variable "swarm_managers" { 14 | default = 3 15 | } 16 | variable "swarm_workers" { 17 | default = 2 18 | } 19 | variable "swarm_instance_type" { 20 | default = "t2.micro" 21 | } 22 | variable "swarm_init" { 23 | default = false 24 | } 25 | -------------------------------------------------------------------------------- /compose/jenkins-agent.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | jenkins-agent: 6 | container_name: jenkins-agent 7 | image: vfarcic/jenkins-swarm-agent 8 | privileged: true 9 | environment: 10 | - COMMAND_OPTIONS=-master ${JENKINS_ADDRESS} -username ${USER} -password ${PASSWORD} 11 | volumes: 12 | - /var/run/docker.sock:/var/run/docker.sock 13 | - /workspace:/workspace 14 | - $HOME/.docker/machine/machines:/machines 15 | network_mode: host 16 | restart: always 17 | -------------------------------------------------------------------------------- /terraform/aws-full/docker.tf: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "ci" { 2 | count = "${var.ci_count}" 3 | ami = "${var.ci_ami_id}" 4 | instance_type = "${var.ci_instance_type}" 5 | tags { 6 | Name = "ci" 7 | } 8 | vpc_security_group_ids = [ 9 | "${aws_security_group.docker.id}" 10 | ] 11 | key_name = "devops21" 12 | connection { 13 | user = "ubuntu" 14 | private_key = "${file("devops21.pem")}" 15 | } 16 | } 17 | 18 | output "ci_public_ip" { 19 | value = "${aws_instance.ci.0.public_ip}" 20 | } 21 | 22 | -------------------------------------------------------------------------------- /terraform/aws-full/docker.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Application Container Engine 3 | Documentation=https://docs.docker.com 4 | After=network.target docker.socket 5 | Requires=docker.socket 6 | 7 | [Service] 8 | Type=notify 9 | ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2375 10 | ExecReload=/bin/kill -s HUP $MAINPID 11 | LimitNOFILE=infinity 12 | LimitNPROC=infinity 13 | LimitCORE=infinity 14 | TasksMax=infinity 15 | TimeoutStartSec=0 16 | Delegate=yes 17 | KillMode=process 18 | 19 | [Install] 20 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /terraform/scw/init.sh: -------------------------------------------------------------------------------- 1 | export SCALEWAY_ORGANIZATION="..." 2 | export SCALEWAY_TOKEN="..." 3 | terraform apply -target=scaleway_server.manager -target=scaleway_ip.manager_ip -var managers=1 -var swarm_init=true 4 | terraform refresh 5 | export TF_VAR_swarm_worker_token=$(ssh root@$(terraform output manager_external) docker swarm join-token -q worker) 6 | export TF_VAR_swarm_manager_token=$(ssh root@$(terraform output manager_external) docker swarm join-token -q manager) 7 | export TF_VAR_swarm_manager_ip=$(terraform output manager_internal) 8 | terraform apply 9 | -------------------------------------------------------------------------------- /ansible/roles/java/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Repository is added 2 | apt_repository: 3 | repo: ppa:webupd8team/java 4 | register: repository_result 5 | 6 | - name: Repositories are updated 7 | apt: 8 | update_cache: yes 9 | when: repository_result|changed 10 | 11 | - name: Accept licence 12 | shell: echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true \ 13 | | sudo /usr/bin/debconf-set-selections 14 | when: repository_result|changed 15 | 16 | - name: Package is installed 17 | apt: 18 | name: oracle-java8-installer 19 | -------------------------------------------------------------------------------- /terraform/do/variables.tf: -------------------------------------------------------------------------------- 1 | variable "swarm_manager_token" { 2 | default = "" 3 | } 4 | variable "swarm_worker_token" { 5 | default = "" 6 | } 7 | variable "swarm_snapshot_id" { 8 | default = "unknown" 9 | } 10 | variable "swarm_manager_ip" { 11 | default = "" 12 | } 13 | variable "swarm_managers" { 14 | default = 3 15 | } 16 | variable "swarm_workers" { 17 | default = 2 18 | } 19 | variable "swarm_region" { 20 | default = "sfo2" 21 | } 22 | variable "swarm_instance_size" { 23 | default = "1gb" 24 | } 25 | variable "swarm_init" { 26 | default = false 27 | } 28 | -------------------------------------------------------------------------------- /compose/consul.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | consul-server: 6 | container_name: consul 7 | image: consul 8 | network_mode: host 9 | environment: 10 | - 'CONSUL_LOCAL_CONFIG={"skip_leave_on_interrupt": true}' 11 | command: agent -server -bind=$HOST_IP -bootstrap-expect=1 -client=$HOST_IP -ui 12 | 13 | consul-agent: 14 | container_name: consul 15 | image: consul 16 | network_mode: host 17 | environment: 18 | - 'CONSUL_LOCAL_CONFIG={"leave_on_terminate": true}' 19 | command: agent -bind=$DOCKER_IP -retry-join=$CONSUL_SERVER_IP -client=$HOST_IP 20 | -------------------------------------------------------------------------------- /terraform/azure/variables.tf: -------------------------------------------------------------------------------- 1 | variable "resource-group" { 2 | default = "" 3 | } 4 | 5 | variable "prefix" { 6 | default = "" 7 | } 8 | 9 | variable "ssh" { 10 | default = "" 11 | } 12 | 13 | variable "admin" { 14 | default = "swarmadmin" 15 | } 16 | 17 | variable "vm_size_manager" { 18 | default = "Standard_D1" 19 | } 20 | 21 | variable "vm_size_worker" { 22 | default = "Standard_A2" 23 | } 24 | 25 | variable "manager-base-name" { 26 | default = "manager" 27 | } 28 | 29 | variable "worker-base-name" { 30 | default = "worker" 31 | } 32 | 33 | variable "resource-base-name" { 34 | default = "" 35 | } -------------------------------------------------------------------------------- /conf/init-mongo-rs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | for rs in "$@"; do 4 | mongo --host $rs --eval 'db' 5 | while [ $? -ne 0 ]; do 6 | echo "Waiting for $rs to become available" 7 | sleep 3 8 | mongo --host $rs --eval 'db' 9 | done 10 | done 11 | 12 | i=0 13 | for rs in "$@"; do 14 | if [ "$rs" != "$1" ]; then 15 | MEMBERS="$MEMBERS ," 16 | fi 17 | MEMBERS="$MEMBERS {_id: $i, host: \"$rs\" }" 18 | i=$((i+1)) 19 | done 20 | 21 | mongo --host $1 --eval "rs.initiate({_id: \"rs0\", version: 1, members: [$MEMBERS]})" 22 | sleep 3 23 | mongo --host $1 --eval 'rs.status()' -------------------------------------------------------------------------------- /scripts/bootstrap_ansible.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "Installing Ansible..." 6 | apt-get install -y software-properties-common 7 | apt-add-repository ppa:ansible/ansible 8 | apt-get update 9 | apt-get install -y --force-yes ansible 10 | cp /vagrant/ansible/ansible.cfg /etc/ansible/ansible.cfg 11 | 12 | #apt-get update -y 13 | #apt-get install -y python-pip python-dev 14 | #pip install ansible==1.9.2 15 | #mkdir -p /etc/ansible 16 | #touch /etc/ansible/hosts 17 | #cp /vagrant/ansible/ansible.cfg /etc/ansible/ansible.cfg 18 | #mkdir -p /etc/ansible/callback_plugins/ 19 | #cp /vagrant/ansible/plugins/human_log.py /etc/ansible/callback_plugins/human_log.py 20 | -------------------------------------------------------------------------------- /compose/elk.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | elasticsearch: 6 | container_name: elasticsearch 7 | image: elasticsearch 8 | ports: 9 | - 9200 10 | - 9300 11 | volumes: 12 | - /data/elasticsearch:/usr/share/elasticsearch/data 13 | restart: always 14 | 15 | logstash: 16 | container_name: logstash 17 | image: logstash 18 | ports: 19 | - 25826:25826 20 | - 25827:25827 21 | volumes: 22 | - /data/logstash/conf:/conf 23 | command: logstash -f /conf/syslog-collectd.conf 24 | restart: always 25 | 26 | kibana: 27 | container_name: kibana 28 | image: kibana 29 | ports: 30 | - 80:5601 31 | restart: always 32 | -------------------------------------------------------------------------------- /ansible/roles/logstash/files/syslog-collectd.conf: -------------------------------------------------------------------------------- 1 | input { 2 | syslog { 3 | type => syslog 4 | port => 25826 5 | } 6 | udp { 7 | port => 25827 8 | buffer_size => 1452 9 | codec => collectd { } 10 | type => collectd 11 | } 12 | } 13 | 14 | filter { 15 | if "docker/" in [program] { 16 | mutate { 17 | add_field => { 18 | "container_id" => "%{program}" 19 | } 20 | } 21 | mutate { 22 | gsub => [ 23 | "container_id", "docker/", "" 24 | ] 25 | } 26 | mutate { 27 | update => [ 28 | "program", "docker" 29 | ] 30 | } 31 | } 32 | } 33 | 34 | output { 35 | elasticsearch { 36 | hosts => elasticsearch 37 | } 38 | } -------------------------------------------------------------------------------- /ansible/roles/containers/files/elk.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | elasticsearch: 6 | container_name: elasticsearch 7 | image: elasticsearch 8 | ports: 9 | - 9200 10 | - 9300 11 | volumes: 12 | - /data/elasticsearch:/usr/share/elasticsearch/data 13 | restart: always 14 | 15 | logstash: 16 | container_name: logstash 17 | image: logstash 18 | ports: 19 | - 25826:25826 20 | - 25827:25827 21 | volumes: 22 | - /data/logstash/conf:/conf 23 | command: logstash -f /conf/syslog-collectd.conf 24 | restart: always 25 | 26 | kibana: 27 | container_name: kibana 28 | image: kibana 29 | ports: 30 | - 80:5601 31 | restart: always 32 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure(2) do |config| 5 | if (/cygwin|mswin|mingw|bccwin|wince|emx/ =~ RUBY_PLATFORM) != nil 6 | config.vm.synced_folder ".", "/vagrant", mount_options: ["dmode=700,fmode=600"] 7 | else 8 | config.vm.synced_folder ".", "/vagrant" 9 | end 10 | (1..3).each do |i| 11 | config.vm.define "swarm-node-#{i}" do |d| 12 | d.vm.box = "ubuntu/xenial64" 13 | d.vm.hostname = "swarm-node-#{i}" 14 | d.vm.network "private_network", ip: "10.100.192.20#{i}" 15 | d.vm.provider "virtualbox" do |v| 16 | v.memory = 1024 17 | end 18 | end 19 | end 20 | if Vagrant.has_plugin?("vagrant-cachier") 21 | config.cache.scope = :box 22 | end 23 | end -------------------------------------------------------------------------------- /ansible/roles/users/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create users 2 | user: 3 | name: "{{ user }}" 4 | password: "{{ password }}" 5 | shell: /bin/bash 6 | group: docker 7 | groups: sudo 8 | append: yes 9 | 10 | - name: User is password-less sudoer 11 | lineinfile: "dest=/etc/sudoers.d/90-cloud-init-users state=present regexp='^%{{ user }}' line='{{ user }} ALL=(ALL) NOPASSWD:ALL'" 12 | 13 | - name: Allow password authentication 14 | lineinfile: 15 | dest: /etc/ssh/sshd_config 16 | regexp: "PasswordAuthentication no" 17 | line: "PasswordAuthentication yes" 18 | register: authentication_result 19 | 20 | - name: SSH is restarted 21 | service: 22 | name: ssh 23 | state: restarted 24 | when: authentication_result|changed -------------------------------------------------------------------------------- /compose/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: 'codelab-monitor' 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: 'prometheus' 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ['localhost:9090'] -------------------------------------------------------------------------------- /ansible/roles/containers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Directory is present 2 | file: 3 | path: "/composes/{{ project }}" 4 | state: directory 5 | 6 | - name: Volume is present 7 | file: 8 | path: "{{ container_volume }}" 9 | mode: "{{ container_volume_mode }}" 10 | state: directory 11 | when: container_volume is defined 12 | 13 | - name: Compose is present 14 | copy: 15 | src: "{{ project }}.yml" 16 | dest: "/composes/{{ project }}/docker-compose.yml" 17 | 18 | - name: Containers are pulled 19 | command: docker-compose pull 20 | args: 21 | chdir: "/composes/{{ project }}" 22 | 23 | - name: Containers are running 24 | command: docker-compose up -d 25 | args: 26 | chdir: "/composes/{{ project }}" 27 | when: not do_not_start_container is defined 28 | -------------------------------------------------------------------------------- /scripts/dm-test-swarm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$(uname -s )" == "Linux" ]]; then 4 | export VIRTUALBOX_SHARE_FOLDER="$PWD:$PWD" 5 | fi 6 | 7 | for i in 1 2 3; do 8 | docker-machine create \ 9 | -d virtualbox \ 10 | swarm-test-$i 11 | done 12 | 13 | eval $(docker-machine env swarm-test-1) 14 | 15 | docker swarm init \ 16 | --advertise-addr $(docker-machine ip swarm-test-1) 17 | 18 | TOKEN=$(docker swarm join-token -q manager) 19 | 20 | for i in 2 3; do 21 | eval $(docker-machine env swarm-test-$i) 22 | 23 | docker swarm join \ 24 | --token $TOKEN \ 25 | --advertise-addr $(docker-machine ip swarm-test-$i) \ 26 | $(docker-machine ip swarm-test-1):2377 27 | done 28 | 29 | echo ">> The swarm test cluster is up and running" 30 | -------------------------------------------------------------------------------- /terraform/do/packer-ubuntu-docker.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "digitalocean", 4 | "region": "sfo2", 5 | "image": "ubuntu-16-04-x64", 6 | "size": "1gb", 7 | "private_networking": true, 8 | "snapshot_name": "devops21-{{timestamp}}", 9 | "ssh_username": "root" 10 | }], 11 | "provisioners": [{ 12 | "type": "shell", 13 | "inline": [ 14 | "sudo apt-get update", 15 | "sudo apt-get install -y apt-transport-https ca-certificates nfs-common", 16 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", 17 | "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"", 18 | "sudo apt-get update", 19 | "sudo apt-get install -y docker-ce" 20 | ] 21 | }] 22 | } -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Packages are present 2 | apt: 3 | name: "{{ item }}" 4 | update_cache: yes 5 | with_items: 6 | - apt-transport-https 7 | - ca-certificates 8 | 9 | - name: Docker repository is added and cache is updated 10 | apt_repository: 11 | repo: "deb https://apt.dockerproject.org/repo ubuntu-{{ debian_version }} {{ release }}" 12 | update_cache: yes 13 | state: present 14 | 15 | - name: Docker Engine is present 16 | apt: 17 | name: docker-engine 18 | update_cache: yes 19 | force: yes 20 | 21 | - name: User is added to the docker group 22 | user: 23 | name: ubuntu 24 | group: docker 25 | register: user_result 26 | 27 | - name: Debian Docker service is restarted 28 | service: 29 | name: docker 30 | state: restarted 31 | when: user_result|changed 32 | -------------------------------------------------------------------------------- /scripts/swarm-services-2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker network create --driver overlay proxy 4 | 5 | docker service create --name proxy \ 6 | -p 80:80 \ 7 | -p 443:443 \ 8 | --reserve-memory 10m \ 9 | --network proxy \ 10 | --replicas 3 \ 11 | -e MODE=swarm \ 12 | -e LISTENER_ADDRESS=swarm-listener \ 13 | vfarcic/docker-flow-proxy 14 | 15 | docker service create --name swarm-listener \ 16 | --network proxy \ 17 | --reserve-memory 10m \ 18 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 19 | -e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 20 | -e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 21 | --constraint 'node.role==manager' \ 22 | vfarcic/docker-flow-swarm-listener 23 | 24 | echo "" 25 | echo ">> The scheduled services will be up-and-running soon" 26 | -------------------------------------------------------------------------------- /scripts/dm-swarm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$(uname -s )" == "Linux" ]]; then 4 | export VIRTUALBOX_SHARE_FOLDER="$PWD:$PWD" 5 | fi 6 | 7 | for i in 1 2 3; do 8 | docker-machine create \ 9 | -d virtualbox \ 10 | swarm-$i 11 | done 12 | 13 | eval $(docker-machine env swarm-1) 14 | 15 | docker swarm init \ 16 | --advertise-addr $(docker-machine ip swarm-1) 17 | 18 | TOKEN=$(docker swarm join-token -q manager) 19 | 20 | for i in 2 3; do 21 | eval $(docker-machine env swarm-$i) 22 | 23 | docker swarm join \ 24 | --token $TOKEN \ 25 | --advertise-addr $(docker-machine ip swarm-$i) \ 26 | $(docker-machine ip swarm-1):2377 27 | done 28 | 29 | for i in 1 2 3; do 30 | eval $(docker-machine env swarm-$i) 31 | 32 | docker node update \ 33 | --label-add env=prod \ 34 | swarm-$i 35 | done 36 | 37 | echo ">> The swarm cluster is up and running" 38 | -------------------------------------------------------------------------------- /terraform/do/common.tf: -------------------------------------------------------------------------------- 1 | resource "digitalocean_ssh_key" "docker" { 2 | name = "devops21-do" 3 | public_key = "${file("devops21-do.pub")}" 4 | } 5 | 6 | resource "digitalocean_floating_ip" "docker_1" { 7 | droplet_id = "${digitalocean_droplet.swarm-manager.0.id}" 8 | region = "${var.swarm_region}" 9 | } 10 | 11 | resource "digitalocean_floating_ip" "docker_2" { 12 | droplet_id = "${digitalocean_droplet.swarm-manager.1.id}" 13 | region = "${var.swarm_region}" 14 | } 15 | 16 | resource "digitalocean_floating_ip" "docker_3" { 17 | droplet_id = "${digitalocean_droplet.swarm-manager.2.id}" 18 | region = "${var.swarm_region}" 19 | } 20 | 21 | output "floating_ip_1" { 22 | value = "${digitalocean_floating_ip.docker_1.ip_address}" 23 | } 24 | 25 | output "floating_ip_2" { 26 | value = "${digitalocean_floating_ip.docker_2.ip_address}" 27 | } 28 | 29 | output "floating_ip_3" { 30 | value = "${digitalocean_floating_ip.docker_3.ip_address}" 31 | } -------------------------------------------------------------------------------- /scripts/dm-test-swarm-2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$(uname -s )" == "Linux" ]]; then 4 | export VIRTUALBOX_SHARE_FOLDER="$PWD:$PWD" 5 | fi 6 | 7 | for i in 1 2 3; do 8 | docker-machine create \ 9 | -d virtualbox \ 10 | swarm-test-$i 11 | done 12 | 13 | eval $(docker-machine env swarm-test-1) 14 | 15 | docker swarm init \ 16 | --advertise-addr $(docker-machine ip swarm-test-1) 17 | 18 | docker node update \ 19 | --label-add env=jenkins-agent \ 20 | swarm-test-1 21 | 22 | TOKEN=$(docker swarm join-token -q manager) 23 | 24 | for i in 2 3; do 25 | eval $(docker-machine env swarm-test-$i) 26 | 27 | docker swarm join \ 28 | --token $TOKEN \ 29 | --advertise-addr $(docker-machine ip swarm-test-$i) \ 30 | $(docker-machine ip swarm-test-1):2377 31 | 32 | docker node update \ 33 | --label-add env=prod-like \ 34 | swarm-test-$i 35 | done 36 | 37 | echo ">> The swarm test cluster is up and running" 38 | -------------------------------------------------------------------------------- /scripts/rtime_down.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SERVICE=$1 4 | MAX_MILLIS_WARN=$2 5 | MAX_MILLIS_ERR=$3 6 | 7 | RTIME=$(curl http://{{ elk_ip }}:9200/logstash-*/_search -d " 8 | { 9 | \"size\" : 0, 10 | \"query\": { 11 | \"bool\": { 12 | \"must\": { \"match\": { \"tags\" : \"haproxy_stats\" } }, 13 | \"must\": { \"match\": { \"haproxy_stats.svname\" : \"BACKEND\" } }, 14 | \"must\": { \"match\": { \"haproxy_stats.pxname.raw\" : \"${SERVICE}-be\" } }, 15 | \"must\": { \"range\": { \"@timestamp\": { \"gt\" : \"now-1h\" } } } 16 | } 17 | }, 18 | \"aggs\" : { 19 | \"avg_rtime\" : { 20 | \"avg\": { \"field\": \"haproxy_stats.rtime\" } 21 | } 22 | } 23 | }" | jq '.aggregations.avg_rtime.value') 24 | 25 | RTIME=$(printf "%.0f" $RTIME) 26 | 27 | if [ $RTIME -lt $MAX_MILLIS_ERR ]; then 28 | exit 2 29 | elif [ $RTIME -lt $MAX_MILLIS_WARN ]; then 30 | exit 1 31 | else 32 | exit 0 33 | fi 34 | -------------------------------------------------------------------------------- /scripts/rtime_up.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SERVICE=$1 4 | MAX_MILLIS_WARN=$2 5 | MAX_MILLIS_ERR=$3 6 | 7 | RTIME=$(curl http://{{ elk_ip }}:9200/logstash-*/_search -d " 8 | { 9 | \"size\" : 0, 10 | \"query\": { 11 | \"bool\": { 12 | \"must\": { \"match\": { \"tags\" : \"haproxy_stats\" } }, 13 | \"must\": { \"match\": { \"haproxy_stats.svname\" : \"BACKEND\" } }, 14 | \"must\": { \"match\": { \"haproxy_stats.pxname.raw\" : \"${SERVICE}-be\" } }, 15 | \"must\": { \"range\": { \"@timestamp\": { \"gt\" : \"now-1h\" } } } 16 | } 17 | }, 18 | \"aggs\" : { 19 | \"avg_rtime\" : { 20 | \"avg\": { \"field\": \"haproxy_stats.rtime\" } 21 | } 22 | } 23 | }" | jq '.aggregations.avg_rtime.value') 24 | 25 | RTIME=$(printf "%.0f" $RTIME) 26 | 27 | if [ $RTIME -gt $MAX_MILLIS_ERR ]; then 28 | exit 2 29 | elif [ $RTIME -gt $MAX_MILLIS_WARN ]; then 30 | exit 1 31 | else 32 | exit 0 33 | fi 34 | -------------------------------------------------------------------------------- /terraform/packer-ubuntu-docker.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "amazon-ebs", 4 | "region": "us-east-1", 5 | "source_ami_filter": { 6 | "filters": { 7 | "virtualization-type": "hvm", 8 | "name": "*ubuntu-xenial-16.04-amd64-server-*", 9 | "root-device-type": "ebs" 10 | }, 11 | "owners": ["099720109477"], 12 | "most_recent": true 13 | }, 14 | "instance_type": "t2.micro", 15 | "ssh_username": "ubuntu", 16 | "ami_name": "devops21", 17 | "force_deregister": true 18 | }], 19 | "provisioners": [{ 20 | "type": "shell", 21 | "inline": [ 22 | "sudo apt-get update", 23 | "sudo apt-get install -y apt-transport-https ca-certificates nfs-common", 24 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", 25 | "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"", 26 | "sudo apt-get update", 27 | "sudo apt-get install -y docker-ce", 28 | "sudo usermod -aG docker ubuntu" 29 | ] 30 | }] 31 | } 32 | -------------------------------------------------------------------------------- /terraform/aws/packer-ubuntu-docker.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "amazon-ebs", 4 | "region": "us-east-1", 5 | "source_ami_filter": { 6 | "filters": { 7 | "virtualization-type": "hvm", 8 | "name": "*ubuntu-xenial-16.04-amd64-server-*", 9 | "root-device-type": "ebs" 10 | }, 11 | "owners": ["099720109477"], 12 | "most_recent": true 13 | }, 14 | "instance_type": "t2.micro", 15 | "ssh_username": "ubuntu", 16 | "ami_name": "devops21", 17 | "force_deregister": true 18 | }], 19 | "provisioners": [{ 20 | "type": "shell", 21 | "inline": [ 22 | "sleep 15", 23 | "sudo apt-get clean", 24 | "sudo apt-get update", 25 | "sudo apt-get install -y apt-transport-https ca-certificates nfs-common", 26 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", 27 | "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"", 28 | "sudo apt-get update", 29 | "sudo apt-get install -y docker-ce", 30 | "sudo usermod -aG docker ubuntu" 31 | ] 32 | }] 33 | } 34 | -------------------------------------------------------------------------------- /terraform/azure-acs/swarm.tf: -------------------------------------------------------------------------------- 1 | # Configure the Microsoft Azure Provider 2 | #provider "azurerm" { 3 | # subscription_id = "..." 4 | # client_id = "..." 5 | # client_secret = "..." 6 | # tenant_id = "..." 7 | #} 8 | 9 | resource "azurerm_resource_group" "swarm" { 10 | name = "swarm-terraform-acs" 11 | location = "${var.location}" 12 | } 13 | 14 | resource "azurerm_container_service" "swarm" { 15 | name = "acctestcontservice1" 16 | location = "${azurerm_resource_group.swarm.location}" 17 | resource_group_name = "${azurerm_resource_group.swarm.name}" 18 | orchestration_platform = "Swarm" 19 | 20 | master_profile { 21 | count = "${var.swarm_managers}" 22 | dns_prefix = "${var.personal_prefix}-swarm-manager" 23 | } 24 | 25 | linux_profile { 26 | admin_username = "swarmadmin" 27 | 28 | ssh_key { 29 | key_data = "${var.swarm_manager_token}" 30 | } 31 | } 32 | 33 | agent_pool_profile { 34 | name = "swarmworkers" 35 | count = "${var.swarm_workers}" 36 | dns_prefix = "${var.personal_prefix}-swarm-agent" 37 | vm_size = "${var.swarm_worker_type}" 38 | } 39 | 40 | diagnostics_profile { 41 | enabled = false 42 | } 43 | 44 | } -------------------------------------------------------------------------------- /terraform/common.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "docker" { 2 | name = "docker" 3 | ingress { 4 | from_port = 22 5 | to_port = 22 6 | protocol = "tcp" 7 | cidr_blocks = ["0.0.0.0/0"] 8 | } 9 | ingress { 10 | from_port = 80 11 | to_port = 80 12 | protocol = "tcp" 13 | cidr_blocks = ["0.0.0.0/0"] 14 | } 15 | ingress { 16 | from_port = 443 17 | to_port = 443 18 | protocol = "tcp" 19 | cidr_blocks = ["0.0.0.0/0"] 20 | } 21 | ingress { 22 | from_port = 2377 23 | to_port = 2377 24 | protocol = "tcp" 25 | self = true 26 | } 27 | ingress { 28 | from_port = 7946 29 | to_port = 7946 30 | protocol = "tcp" 31 | self = true 32 | } 33 | ingress { 34 | from_port = 7946 35 | to_port = 7946 36 | protocol = "udp" 37 | self = true 38 | } 39 | ingress { 40 | from_port = 4789 41 | to_port = 4789 42 | protocol = "tcp" 43 | self = true 44 | } 45 | ingress { 46 | from_port = 4789 47 | to_port = 4789 48 | protocol = "udp" 49 | self = true 50 | } 51 | egress { 52 | from_port = 0 53 | to_port = 0 54 | protocol = "-1" 55 | cidr_blocks = ["0.0.0.0/0"] 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /scripts/dm-swarm-5.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$(uname -s )" == "Linux" ]]; then 4 | export VIRTUALBOX_SHARE_FOLDER="$PWD:$PWD" 5 | fi 6 | 7 | for i in {1..5}; do 8 | docker-machine create \ 9 | -d virtualbox \ 10 | swarm-$i 11 | done 12 | 13 | eval $(docker-machine env swarm-1) 14 | 15 | docker swarm init \ 16 | --advertise-addr $(docker-machine ip swarm-1) 17 | 18 | TOKEN_MANAGER=$(docker swarm join-token -q manager) 19 | 20 | TOKEN_WORKER=$(docker swarm join-token -q worker) 21 | 22 | for i in 2 3; do 23 | eval $(docker-machine env swarm-$i) 24 | 25 | docker swarm join \ 26 | --token $TOKEN_MANAGER \ 27 | --advertise-addr $(docker-machine ip swarm-$i) \ 28 | $(docker-machine ip swarm-1):2377 29 | done 30 | 31 | for i in 4 5; do 32 | eval $(docker-machine env swarm-$i) 33 | 34 | docker swarm join \ 35 | --token $TOKEN_WORKER \ 36 | --advertise-addr $(docker-machine ip swarm-$i) \ 37 | $(docker-machine ip swarm-1):2377 38 | done 39 | 40 | for i in {1..5}; do 41 | eval $(docker-machine env swarm-1) 42 | 43 | docker node update \ 44 | --label-add env=prod \ 45 | swarm-$i 46 | done 47 | 48 | echo "" 49 | echo ">> The swarm cluster is up and running" 50 | -------------------------------------------------------------------------------- /terraform/aws/common.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "docker" { 2 | name = "docker" 3 | ingress { 4 | from_port = 22 5 | to_port = 22 6 | protocol = "tcp" 7 | cidr_blocks = ["0.0.0.0/0"] 8 | } 9 | ingress { 10 | from_port = 80 11 | to_port = 80 12 | protocol = "tcp" 13 | cidr_blocks = ["0.0.0.0/0"] 14 | } 15 | ingress { 16 | from_port = 443 17 | to_port = 443 18 | protocol = "tcp" 19 | cidr_blocks = ["0.0.0.0/0"] 20 | } 21 | ingress { 22 | from_port = 2377 23 | to_port = 2377 24 | protocol = "tcp" 25 | self = true 26 | } 27 | ingress { 28 | from_port = 7946 29 | to_port = 7946 30 | protocol = "tcp" 31 | self = true 32 | } 33 | ingress { 34 | from_port = 7946 35 | to_port = 7946 36 | protocol = "udp" 37 | self = true 38 | } 39 | ingress { 40 | from_port = 4789 41 | to_port = 4789 42 | protocol = "tcp" 43 | self = true 44 | } 45 | ingress { 46 | from_port = 4789 47 | to_port = 4789 48 | protocol = "udp" 49 | self = true 50 | } 51 | egress { 52 | from_port = 0 53 | to_port = 0 54 | protocol = "-1" 55 | cidr_blocks = ["0.0.0.0/0"] 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /scripts/swarm-services-3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker network create --driver overlay proxy 4 | 5 | docker service create --name proxy \ 6 | -p 80:80 \ 7 | -p 443:443 \ 8 | --reserve-memory 10m \ 9 | --network proxy \ 10 | --replicas 3 \ 11 | -e MODE=swarm \ 12 | -e LISTENER_ADDRESS=swarm-listener \ 13 | vfarcic/docker-flow-proxy 14 | 15 | docker service create --name swarm-listener \ 16 | --network proxy \ 17 | --reserve-memory 10m \ 18 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 19 | -e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 20 | -e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 21 | --constraint 'node.role==manager' \ 22 | vfarcic/docker-flow-swarm-listener 23 | 24 | docker service create --name jenkins \ 25 | -e JENKINS_OPTS="--prefix=/jenkins" \ 26 | --mount "type=volume,source=jenkins,target=/var/jenkins_home,volume-driver=rexray/efs" \ 27 | --label com.df.notify=true \ 28 | --label com.df.distribute=true \ 29 | --label com.df.servicePath=/jenkins \ 30 | --label com.df.port=8080 \ 31 | --network proxy \ 32 | --reserve-memory 300m \ 33 | jenkins/jenkins:2.161-alpine 34 | 35 | echo "" 36 | echo ">> The scheduled services will be up-and-running soon" 37 | -------------------------------------------------------------------------------- /terraform/aws-full/packer-ubuntu-docker-compose.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "amazon-ebs", 4 | "region": "us-east-1", 5 | "source_ami_filter": { 6 | "filters": { 7 | "virtualization-type": "hvm", 8 | "name": "*ubuntu-xenial-16.04-amd64-server-*", 9 | "root-device-type": "ebs" 10 | }, 11 | "owners": ["099720109477"], 12 | "most_recent": true 13 | }, 14 | "instance_type": "t2.micro", 15 | "ssh_username": "ubuntu", 16 | "ami_name": "devops21ci", 17 | "force_deregister": true 18 | }], 19 | "provisioners": [{ 20 | "type": "shell", 21 | "inline": [ 22 | "sleep 15", 23 | "sudo apt-get clean", 24 | "sudo apt-get update", 25 | "sudo apt-get install -y apt-transport-https ca-certificates nfs-common", 26 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", 27 | "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"", 28 | "sudo apt-get update", 29 | "sudo apt-get install -y docker-ce", 30 | "sudo usermod -aG docker ubuntu", 31 | "sudo curl -L \"https://github.com/docker/compose/releases/download/1.10.0/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose", 32 | "sudo chmod +x /usr/local/bin/docker-compose" 33 | ] 34 | }] 35 | } 36 | -------------------------------------------------------------------------------- /terraform/aws-full/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" {} 2 | variable "aws_secret_key" {} 3 | variable "aws_default_region" {} 4 | 5 | variable "swarm_manager_token" { 6 | default = "" 7 | } 8 | 9 | variable "swarm_worker_token" { 10 | default = "" 11 | } 12 | 13 | variable "swarm_ami_id" { 14 | default = "unknown" 15 | } 16 | 17 | variable "swarm_manager_ip" { 18 | default = "" 19 | } 20 | 21 | variable "swarm_managers" { 22 | default = 3 23 | } 24 | 25 | variable "swarm_workers" { 26 | default = 2 27 | } 28 | 29 | variable "swarm_instance_type" { 30 | default = "t2.medium" 31 | } 32 | 33 | variable "swarm_init" { 34 | default = false 35 | } 36 | 37 | variable "rexray" { 38 | default = false 39 | } 40 | 41 | variable "efs" { 42 | default = false 43 | } 44 | 45 | variable "ci_ami_id" { 46 | default = "unknown" 47 | } 48 | 49 | variable "ci_instance_type" { 50 | default = "t2.medium" 51 | } 52 | 53 | variable "ci_count" { 54 | default = 0 55 | } 56 | 57 | variable "test_swarm_managers" { 58 | default = 3 59 | } 60 | 61 | variable "test_swarm_workers" { 62 | default = 2 63 | } 64 | 65 | variable "test_swarm_instance_type" { 66 | default = "t2.medium" 67 | } 68 | 69 | variable "test_swarm_manager_token" { 70 | default = "" 71 | } 72 | 73 | variable "test_swarm_worker_token" { 74 | default = "" 75 | } 76 | 77 | variable "test_swarm_manager_ip" { 78 | default = "" 79 | } 80 | -------------------------------------------------------------------------------- /scripts/aws-swarm-services.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | eval $(docker-machine env swarm-1) 4 | 5 | docker network create --driver overlay proxy 6 | 7 | docker network create --driver overlay go-demo 8 | 9 | docker service create --name proxy \ 10 | -p 80:80 \ 11 | -p 443:443 \ 12 | --network proxy \ 13 | --replicas 3 \ 14 | -e MODE=swarm \ 15 | vfarcic/docker-flow-proxy 16 | 17 | docker service create --name swarm-listener \ 18 | --network proxy \ 19 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 20 | -e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 21 | -e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 22 | --constraint 'node.role==manager' \ 23 | vfarcic/docker-flow-swarm-listener 24 | 25 | docker service create --name go-demo-db \ 26 | --network go-demo \ 27 | mongo:3.2.10 28 | 29 | docker service create --name go-demo \ 30 | -e DB=go-demo-db \ 31 | --network go-demo \ 32 | --network proxy \ 33 | --replicas 3 \ 34 | --label com.df.notify=true \ 35 | --label com.df.distribute=true \ 36 | --label com.df.servicePath=/demo \ 37 | --label com.df.port=8080 \ 38 | vfarcic/go-demo:1.2 39 | 40 | docker service create --name util \ 41 | --network proxy \ 42 | --mode global \ 43 | alpine sleep 1000000000 44 | 45 | echo "" 46 | echo ">> The services scheduled and will be up-and-running soon" 47 | -------------------------------------------------------------------------------- /scripts/dm-swarm-services-4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | eval $(docker-machine env swarm-1) 4 | 5 | docker network create --driver overlay proxy 6 | 7 | docker network create --driver overlay go-demo 8 | 9 | docker service create --name proxy \ 10 | -p 80:80 \ 11 | -p 443:443 \ 12 | --reserve-memory 10m \ 13 | --network proxy \ 14 | --replicas 3 \ 15 | -e MODE=swarm \ 16 | -e LISTENER_ADDRESS=swarm-listener \ 17 | vfarcic/docker-flow-proxy 18 | 19 | docker service create --name swarm-listener \ 20 | --network proxy \ 21 | --reserve-memory 10m \ 22 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 23 | -e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 24 | -e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 25 | --constraint 'node.role==manager' \ 26 | vfarcic/docker-flow-swarm-listener 27 | 28 | docker service create --name go-demo-db \ 29 | --reserve-memory 100m \ 30 | --network go-demo \ 31 | mongo:3.2.10 32 | 33 | docker service create --name go-demo \ 34 | -e DB=go-demo-db \ 35 | --reserve-memory 10m \ 36 | --network go-demo \ 37 | --network proxy \ 38 | --replicas 3 \ 39 | --label com.df.notify=true \ 40 | --label com.df.distribute=true \ 41 | --label com.df.servicePath=/demo \ 42 | --label com.df.port=8080 \ 43 | vfarcic/go-demo:1.2 44 | 45 | echo "" 46 | echo ">> The services scheduled and will be up-and-running soon" 47 | -------------------------------------------------------------------------------- /terraform/aws-full/packer-ubuntu-docker.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "amazon-ebs", 4 | "region": "us-east-1", 5 | "source_ami_filter": { 6 | "filters": { 7 | "virtualization-type": "hvm", 8 | "name": "*ubuntu-xenial-16.04-amd64-server-*", 9 | "root-device-type": "ebs" 10 | }, 11 | "owners": ["099720109477"], 12 | "most_recent": true 13 | }, 14 | "instance_type": "t2.micro", 15 | "ssh_username": "ubuntu", 16 | "ami_name": "devops21", 17 | "force_deregister": true 18 | }], 19 | "provisioners": [{ 20 | "type": "shell", 21 | "inline": [ 22 | "sudo apt-get clean", 23 | "sudo apt-get update", 24 | "sudo apt-get install -y apt-transport-https ca-certificates nfs-common", 25 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", 26 | "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"", 27 | "sudo apt-get update", 28 | "sudo apt-get install -y docker-ce", 29 | "sudo usermod -aG docker ubuntu" 30 | ] 31 | }, { 32 | "type": "file", 33 | "source": "docker.service", 34 | "destination": "/tmp/docker.service" 35 | }, { 36 | "type": "shell", 37 | "inline": [ 38 | "sudo mv /tmp/docker.service /lib/systemd/system/docker.service", 39 | "sudo chmod 644 /lib/systemd/system/docker.service", 40 | "sudo systemctl daemon-reload", 41 | "sudo systemctl restart docker" 42 | ] 43 | }] 44 | } 45 | -------------------------------------------------------------------------------- /scripts/dm-swarm-services-3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | eval $(docker-machine env swarm-1) 4 | 5 | docker network create --driver overlay proxy 6 | 7 | docker network create --driver overlay go-demo 8 | 9 | docker service create --name proxy \ 10 | -p 80:80 \ 11 | -p 443:443 \ 12 | --reserve-memory 10m \ 13 | --network proxy \ 14 | --replicas 3 \ 15 | -e MODE=swarm \ 16 | -e LISTENER_ADDRESS=swarm-listener \ 17 | vfarcic/docker-flow-proxy 18 | 19 | docker service create --name swarm-listener \ 20 | --network proxy \ 21 | --reserve-memory 10m \ 22 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 23 | -e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 24 | -e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 25 | --constraint 'node.role==manager' \ 26 | vfarcic/docker-flow-swarm-listener 27 | 28 | docker service create --name go-demo-db \ 29 | --reserve-memory 100m \ 30 | --network go-demo \ 31 | mongo:3.2.10 32 | 33 | docker service create --name go-demo \ 34 | -e DB=go-demo-db \ 35 | --reserve-memory 10m \ 36 | --network go-demo \ 37 | --network proxy \ 38 | --replicas 3 \ 39 | --label com.df.notify=true \ 40 | --label com.df.distribute=true \ 41 | --label com.df.servicePath=/demo \ 42 | --label com.df.port=8080 \ 43 | vfarcic/go-demo:1.2 44 | 45 | docker service create --name util \ 46 | --network proxy \ 47 | --mode global \ 48 | alpine sleep 1000000000 49 | 50 | echo "" 51 | echo ">> The services scheduled and will be up-and-running soon" 52 | -------------------------------------------------------------------------------- /scripts/swarm-services-uber.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker network create --driver overlay proxy 4 | 5 | docker network create --driver overlay go-demo 6 | 7 | docker service create --name proxy \ 8 | -p 80:80 \ 9 | -p 443:443 \ 10 | --reserve-memory 10m \ 11 | --network proxy \ 12 | --replicas 3 \ 13 | -e MODE=swarm \ 14 | vfarcic/docker-flow-proxy 15 | 16 | docker service create --name swarm-listener \ 17 | --network proxy \ 18 | --reserve-memory 10m \ 19 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 20 | -e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 21 | -e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 22 | --constraint 'node.role==manager' \ 23 | vfarcic/docker-flow-swarm-listener 24 | 25 | docker service create --name go-demo-db \ 26 | --reserve-memory 100m \ 27 | --network go-demo \ 28 | mongo:3.2.10 29 | 30 | docker service create --name go-demo \ 31 | -e DB=go-demo-db \ 32 | --reserve-memory 10m \ 33 | --network go-demo \ 34 | --network proxy \ 35 | --replicas 3 \ 36 | --label com.df.notify=true \ 37 | --label com.df.distribute=true \ 38 | --label com.df.servicePath=/demo \ 39 | --label com.df.port=8080 \ 40 | vfarcic/go-demo:1.2 41 | 42 | # TODO: Add jenkins 43 | 44 | # TODO: Add jenkins-agent 45 | 46 | # TODO: Add basi/node-exporter 47 | 48 | # TODO: Add cadvisor 49 | 50 | # TODO: Add prometheus 51 | 52 | # TODO: Add grafana 53 | 54 | # TODO: Add elasticsearch 55 | 56 | # TODO: Add logstash 57 | 58 | # TODO: Add logspout 59 | 60 | echo "" 61 | echo ">> The services scheduled and will be up-and-running soon" 62 | -------------------------------------------------------------------------------- /terraform/aws-full/packer-ubuntu-docker-rexray.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "amazon-ebs", 4 | "region": "us-east-1", 5 | "source_ami_filter": { 6 | "filters": { 7 | "virtualization-type": "hvm", 8 | "name": "*ubuntu-xenial-16.04-amd64-server-*", 9 | "root-device-type": "ebs" 10 | }, 11 | "owners": ["099720109477"], 12 | "most_recent": true 13 | }, 14 | "instance_type": "t2.micro", 15 | "ssh_username": "ubuntu", 16 | "ami_name": "devops21", 17 | "force_deregister": true 18 | }], 19 | "provisioners": [{ 20 | "type": "shell", 21 | "inline": [ 22 | "sudo apt-get clean", 23 | "sudo apt-get update", 24 | "sudo apt-get install -y apt-transport-https ca-certificates nfs-common", 25 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", 26 | "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"", 27 | "sudo apt-get update", 28 | "sudo apt-get install -y docker-ce", 29 | "sudo usermod -aG docker ubuntu", 30 | "curl -sSL https://dl.bintray.com/emccode/rexray/install | sh -s -- stable 0.6.3" 31 | ] 32 | }, { 33 | "type": "file", 34 | "source": "docker.service", 35 | "destination": "/tmp/docker.service" 36 | }, { 37 | "type": "shell", 38 | "inline": [ 39 | "sleep 15", 40 | "sudo mv /tmp/docker.service /lib/systemd/system/docker.service", 41 | "sudo chmod 644 /lib/systemd/system/docker.service", 42 | "sudo systemctl daemon-reload", 43 | "sudo systemctl restart docker", 44 | "sudo mkdir -p /workspace", 45 | "sudo chmod 777 /workspace", 46 | "sudo sysctl -w vm.max_map_count=262144" 47 | ] 48 | }] 49 | } 50 | -------------------------------------------------------------------------------- /terraform/scw/swarm.tf: -------------------------------------------------------------------------------- 1 | provider "scaleway" { 2 | region = "${var.region}" 3 | } 4 | 5 | data "scaleway_image" "ubuntu" { 6 | architecture = "x86_64" 7 | name = "Ubuntu Xenial" 8 | } 9 | 10 | # Choose Docker-ready kernel 11 | data "scaleway_bootscript" "docker" { 12 | architecture = "x86_64" 13 | name_filter = "docker" 14 | } 15 | 16 | # Floating/elastic IP for manager access 17 | resource "scaleway_ip" "manager_ip" { 18 | server = "${scaleway_server.manager.0.id}" 19 | } 20 | 21 | resource "scaleway_server" "manager" { 22 | count = "${var.managers}" 23 | name = "swarm-manager-${count.index}" 24 | image = "${data.scaleway_image.ubuntu.id}" 25 | bootscript = "${data.scaleway_bootscript.docker.id}" 26 | type = "${var.instance_type}" 27 | dynamic_ip_required = "true" 28 | provisioner "remote-exec" { 29 | inline = [ 30 | "curl -sSL https://get.docker.com/ | sh", 31 | "if ${var.swarm_init}; then docker swarm init --advertise-addr ${self.private_ip}; fi", 32 | "if ! ${var.swarm_init}; then docker swarm join --token ${var.swarm_manager_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377; fi" 33 | ] 34 | } 35 | } 36 | 37 | resource "scaleway_server" "worker" { 38 | count = "${var.workers}" 39 | name = "swarm-worker-${count.index}" 40 | image = "${data.scaleway_image.ubuntu.id}" 41 | bootscript = "${data.scaleway_bootscript.docker.id}" 42 | type = "${var.instance_type}" 43 | dynamic_ip_required = "true" 44 | provisioner "remote-exec" { 45 | inline = [ 46 | "curl -sSL https://get.docker.com/ | sh", 47 | "docker swarm join --token ${var.swarm_worker_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377" 48 | ] 49 | } 50 | } 51 | 52 | output "manager_external" { 53 | value = "${scaleway_ip.manager_ip.ip}" 54 | } 55 | 56 | output "manager_internal" { 57 | value = "${scaleway_server.manager.0.private_ip}" 58 | } 59 | -------------------------------------------------------------------------------- /compose/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | syslog { port => 51415 } 3 | gelf { 4 | type => docker 5 | port => 12201 6 | } 7 | http_poller { 8 | urls => { 9 | ha_proxy_stats => "http://admin:admin@proxy/admin?stats;csv" 10 | } 11 | tags => haproxy_stats 12 | codec => plain 13 | metadata_target => http_poller_metadata 14 | interval => 60 15 | request_timeout => 2 16 | } 17 | # Remove in production 18 | heartbeat { } 19 | } 20 | 21 | filter { 22 | if "haproxy_stats" in [tags] { 23 | split {} 24 | # We can't read the haproxy csv header, so we define it statically 25 | # This is because we're working line by line, and so have no header context 26 | csv { 27 | target => "haproxy_stats" 28 | columns => [ pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime ] 29 | } 30 | # Drop the haproxy CSV header, which always has this special value 31 | if [haproxy_stats][pxname] == "# pxname" { 32 | drop{} 33 | } 34 | # We no longer need the message field as the CSV filter has created separate 35 | # fields for data. 36 | mutate { 37 | remove_field => message 38 | } 39 | # We can make educated guesses that strings with mixes of numbers and dots 40 | # are numbers, cast them for better behavior in Elasticsearch/Kibana 41 | ruby { 42 | code => "h=event['haproxy_stats']; h.each {|k,v| h[k] = v.to_f if v =~ /\A-?[0-9\.]+\Z/}" 43 | } 44 | } 45 | } 46 | 47 | output { 48 | elasticsearch { 49 | hosts => ["elasticsearch:9200"] 50 | } 51 | # Remove in production 52 | stdout { 53 | codec => rubydebug 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /conf/logstash-proxy.conf: -------------------------------------------------------------------------------- 1 | input { 2 | syslog { port => 51415 } 3 | gelf { 4 | type => docker 5 | port => 12201 6 | } 7 | http_poller { 8 | urls => { 9 | ha_proxy_stats => "http://admin:admin@proxy/admin?stats;csv" 10 | } 11 | tags => haproxy_stats 12 | codec => plain 13 | metadata_target => http_poller_metadata 14 | interval => 60 15 | request_timeout => 2 16 | } 17 | # Remove in production 18 | heartbeat { } 19 | } 20 | 21 | filter { 22 | if "haproxy_stats" in [tags] { 23 | split {} 24 | # We can't read the haproxy csv header, so we define it statically 25 | # This is because we're working line by line, and so have no header context 26 | csv { 27 | target => "haproxy_stats" 28 | columns => [ pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime ] 29 | } 30 | # Drop the haproxy CSV header, which always has this special value 31 | if [haproxy_stats][pxname] == "# pxname" { 32 | drop{} 33 | } 34 | # We no longer need the message field as the CSV filter has created separate 35 | # fields for data. 36 | mutate { 37 | remove_field => message 38 | } 39 | # We can make educated guesses that strings with mixes of numbers and dots 40 | # are numbers, cast them for better behavior in Elasticsearch/Kibana 41 | ruby { 42 | code => "h=event['haproxy_stats']; h.each {|k,v| h[k] = v.to_f if v =~ /\A-?[0-9\.]+\Z/}" 43 | } 44 | } 45 | } 46 | 47 | output { 48 | elasticsearch { 49 | hosts => ["elasticsearch:9200"] 50 | } 51 | # Remove in production 52 | stdout { 53 | codec => rubydebug 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /terraform/aws/swarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "swarm-manager" { 2 | count = "${var.swarm_managers}" 3 | ami = "${var.swarm_ami_id}" 4 | instance_type = "${var.swarm_instance_type}" 5 | tags { 6 | Name = "swarm-manager" 7 | } 8 | vpc_security_group_ids = [ 9 | "${aws_security_group.docker.id}" 10 | ] 11 | key_name = "devops21" 12 | connection { 13 | user = "ubuntu" 14 | private_key = "${file("devops21.pem")}" 15 | } 16 | provisioner "remote-exec" { 17 | inline = [ 18 | "if ${var.swarm_init}; then docker swarm init --advertise-addr ${self.private_ip}; fi", 19 | "if ! ${var.swarm_init}; then docker swarm join --token ${var.swarm_manager_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377; fi" 20 | ] 21 | } 22 | } 23 | 24 | resource "aws_instance" "swarm-worker" { 25 | count = "${var.swarm_workers}" 26 | ami = "${var.swarm_ami_id}" 27 | instance_type = "${var.swarm_instance_type}" 28 | tags { 29 | Name = "swarm-worker" 30 | } 31 | vpc_security_group_ids = [ 32 | "${aws_security_group.docker.id}" 33 | ] 34 | key_name = "devops21" 35 | connection { 36 | user = "ubuntu" 37 | private_key = "${file("devops21.pem")}" 38 | } 39 | provisioner "remote-exec" { 40 | inline = [ 41 | "docker swarm join --token ${var.swarm_worker_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377" 42 | ] 43 | } 44 | } 45 | 46 | output "swarm_manager_1_public_ip" { 47 | value = "${aws_instance.swarm-manager.0.public_ip}" 48 | } 49 | 50 | output "swarm_manager_1_private_ip" { 51 | value = "${aws_instance.swarm-manager.0.private_ip}" 52 | } 53 | 54 | output "swarm_manager_2_public_ip" { 55 | value = "${aws_instance.swarm-manager.1.public_ip}" 56 | } 57 | 58 | output "swarm_manager_2_private_ip" { 59 | value = "${aws_instance.swarm-manager.1.private_ip}" 60 | } 61 | 62 | output "swarm_manager_3_public_ip" { 63 | value = "${aws_instance.swarm-manager.2.public_ip}" 64 | } 65 | 66 | output "swarm_manager_3_private_ip" { 67 | value = "${aws_instance.swarm-manager.2.private_ip}" 68 | } 69 | 70 | -------------------------------------------------------------------------------- /scripts/swarm-services-1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker network create --driver overlay proxy 4 | 5 | docker network create --driver overlay go-demo 6 | 7 | docker service create --name proxy \ 8 | -p 80:80 \ 9 | -p 443:443 \ 10 | --reserve-memory 10m \ 11 | --network proxy \ 12 | --replicas 3 \ 13 | -e MODE=swarm \ 14 | -e LISTENER_ADDRESS=swarm-listener \ 15 | vfarcic/docker-flow-proxy 16 | 17 | docker service create --name swarm-listener \ 18 | --network proxy \ 19 | --reserve-memory 10m \ 20 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 21 | -e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 22 | -e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 23 | --constraint 'node.role==manager' \ 24 | vfarcic/docker-flow-swarm-listener 25 | 26 | while true; do 27 | REPLICAS=$(docker service ls | grep proxy | awk '{print $3}') 28 | REPLICAS_NEW=$(docker service ls | grep proxy | awk '{print $4}') 29 | if [[ $REPLICAS == "3/3" || $REPLICAS_NEW == "3/3" ]]; then 30 | break 31 | else 32 | echo "Waiting for the proxy service..." 33 | sleep 10 34 | fi 35 | done 36 | 37 | docker service create --name go-demo-db \ 38 | --reserve-memory 100m \ 39 | --network go-demo \ 40 | mongo:3.2.10 41 | 42 | while true; do 43 | REPLICAS=$(docker service ls | grep go-demo-db | awk '{print $3}') 44 | REPLICAS_NEW=$(docker service ls | grep go-demo-db | awk '{print $4}') 45 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 46 | break 47 | else 48 | echo "Waiting for the go-demo-db service..." 49 | sleep 10 50 | fi 51 | done 52 | 53 | docker service create --name go-demo \ 54 | -e DB=go-demo-db \ 55 | --reserve-memory 10m \ 56 | --network go-demo \ 57 | --network proxy \ 58 | --replicas 3 \ 59 | --label com.df.notify=true \ 60 | --label com.df.distribute=true \ 61 | --label com.df.servicePath=/demo \ 62 | --label com.df.port=8080 \ 63 | vfarcic/go-demo:1.2 64 | 65 | echo "" 66 | echo ">> The scheduled services will be up-and-running soon" 67 | -------------------------------------------------------------------------------- /terraform/swarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "swarm-manager" { 2 | count = "${var.swarm_managers}" 3 | ami = "${var.swarm_ami_id}" 4 | instance_type = "${var.swarm_instance_type}" 5 | tags { 6 | Name = "swarm-manager" 7 | } 8 | vpc_security_group_ids = [ 9 | "${aws_security_group.docker.id}" 10 | ] 11 | key_name = "devops21" 12 | connection { 13 | user = "ubuntu" 14 | private_key = "${file("devops21.pem")}" 15 | } 16 | provisioner "remote-exec" { 17 | inline = [ 18 | "if ${var.swarm_init}; then docker swarm init --advertise-addr ${self.private_ip}; fi", 19 | "if ! ${var.swarm_init}; then docker swarm join --token ${var.swarm_manager_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377; fi" 20 | ] 21 | } 22 | } 23 | 24 | resource "aws_instance" "swarm-worker" { 25 | count = "${var.swarm_workers}" 26 | ami = "${var.swarm_ami_id}" 27 | instance_type = "${var.swarm_instance_type}" 28 | tags { 29 | Name = "swarm-worker" 30 | } 31 | vpc_security_group_ids = [ 32 | "${aws_security_group.docker.id}" 33 | ] 34 | key_name = "devops21" 35 | connection { 36 | user = "ubuntu" 37 | private_key = "${file("devops21.pem")}" 38 | } 39 | provisioner "remote-exec" { 40 | inline = [ 41 | "echo \"docker swarm join --token ${var.swarm_worker_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377\"", 42 | "docker swarm join --token ${var.swarm_worker_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377" 43 | ] 44 | } 45 | } 46 | 47 | output "swarm_manager_1_public_ip" { 48 | value = "${aws_instance.swarm-manager.0.public_ip}" 49 | } 50 | 51 | output "swarm_manager_1_private_ip" { 52 | value = "${aws_instance.swarm-manager.0.private_ip}" 53 | } 54 | 55 | output "swarm_manager_2_public_ip" { 56 | value = "${aws_instance.swarm-manager.1.public_ip}" 57 | } 58 | 59 | output "swarm_manager_2_private_ip" { 60 | value = "${aws_instance.swarm-manager.1.private_ip}" 61 | } 62 | 63 | output "swarm_manager_3_public_ip" { 64 | value = "${aws_instance.swarm-manager.2.public_ip}" 65 | } 66 | 67 | output "swarm_manager_3_private_ip" { 68 | value = "${aws_instance.swarm-manager.2.private_ip}" 69 | } 70 | 71 | -------------------------------------------------------------------------------- /terraform/do/swarm.tf: -------------------------------------------------------------------------------- 1 | resource "digitalocean_droplet" "swarm-manager" { 2 | image = "${var.swarm_snapshot_id}" 3 | size = "${var.swarm_instance_size}" 4 | count = "${var.swarm_managers}" 5 | name = "${format("swarm-manager-%02d", (count.index + 1))}" 6 | region = "${var.swarm_region}" 7 | private_networking = true 8 | ssh_keys = [ 9 | "${digitalocean_ssh_key.docker.id}" 10 | ] 11 | connection { 12 | user = "root" 13 | private_key = "${file("devops21-do")}" 14 | agent = false 15 | } 16 | provisioner "remote-exec" { 17 | inline = [ 18 | "if ${var.swarm_init}; then docker swarm init --advertise-addr ${self.ipv4_address_private}; fi", 19 | "if ! ${var.swarm_init}; then docker swarm join --token ${var.swarm_manager_token} --advertise-addr ${self.ipv4_address_private} ${var.swarm_manager_ip}:2377; fi" 20 | ] 21 | } 22 | } 23 | 24 | resource "digitalocean_droplet" "swarm-worker" { 25 | image = "${var.swarm_snapshot_id}" 26 | size = "${var.swarm_instance_size}" 27 | count = "${var.swarm_workers}" 28 | name = "${format("swarm-worker-%02d", (count.index + 1))}" 29 | region = "${var.swarm_region}" 30 | private_networking = true 31 | ssh_keys = [ 32 | "${digitalocean_ssh_key.docker.id}" 33 | ] 34 | connection { 35 | user = "root" 36 | private_key = "${file("devops21-do")}" 37 | agent = false 38 | } 39 | provisioner "remote-exec" { 40 | inline = [ 41 | "docker swarm join --token ${var.swarm_worker_token} --advertise-addr ${self.ipv4_address_private} ${var.swarm_manager_ip}:2377" 42 | ] 43 | } 44 | } 45 | 46 | output "swarm_manager_1_public_ip" { 47 | value = "${digitalocean_droplet.swarm-manager.0.ipv4_address}" 48 | } 49 | 50 | output "swarm_manager_1_private_ip" { 51 | value = "${digitalocean_droplet.swarm-manager.0.ipv4_address_private}" 52 | } 53 | 54 | output "swarm_manager_2_public_ip" { 55 | value = "${digitalocean_droplet.swarm-manager.1.ipv4_address}" 56 | } 57 | 58 | output "swarm_manager_2_private_ip" { 59 | value = "${digitalocean_droplet.swarm-manager.1.ipv4_address_private}" 60 | } 61 | 62 | output "swarm_manager_3_public_ip" { 63 | value = "${digitalocean_droplet.swarm-manager.2.ipv4_address}" 64 | } 65 | 66 | output "swarm_manager_3_private_ip" { 67 | value = "${digitalocean_droplet.swarm-manager.2.ipv4_address_private}" 68 | } -------------------------------------------------------------------------------- /scripts/go-demo.groovy: -------------------------------------------------------------------------------- 1 | node("docker") { 2 | 3 | stage("Pull") { 4 | git "https://github.com/vfarcic/go-demo.git" 5 | } 6 | 7 | withEnv([ 8 | "COMPOSE_FILE=docker-compose-test-local.yml" 9 | ]) { 10 | 11 | stage("Unit") { 12 | sh "docker-compose run --rm unit" 13 | sh "docker-compose build app" 14 | } 15 | 16 | stage("Staging") { 17 | try { 18 | sh "docker-compose up -d staging-dep" 19 | sh "docker-compose run --rm staging" 20 | } catch(e) { 21 | error "Staging failed" 22 | } finally { 23 | sh "docker-compose down" 24 | } 25 | } 26 | 27 | stage("Publish") { 28 | sh "docker tag go-demo \ 29 | localhost:5000/go-demo:2.${env.BUILD_NUMBER}" 30 | sh "docker push \ 31 | localhost:5000/go-demo:2.${env.BUILD_NUMBER}" 32 | } 33 | 34 | stage("Prod-like") { 35 | withEnv([ 36 | "DOCKER_TLS_VERIFY=1", 37 | "DOCKER_HOST=tcp://${env.PROD_LIKE_IP}:2376", 38 | "DOCKER_CERT_PATH=/machines/${env.PROD_LIKE_NAME}" 39 | ]) { 40 | sh "docker service update \ 41 | --image localhost:5000/go-demo:2.${env.BUILD_NUMBER} \ 42 | go-demo" 43 | } 44 | withEnv(["HOST_IP=localhost"]) { 45 | try { 46 | for (i = 0; i <10; i++) { 47 | sh "docker-compose run --rm production" 48 | } 49 | } catch (e) { 50 | sh "docker service update --rollback go-demo" 51 | } 52 | } 53 | } 54 | 55 | stage("Production") { 56 | withEnv([ 57 | "DOCKER_TLS_VERIFY=1", 58 | "DOCKER_HOST=tcp://${env.PROD_IP}:2376", 59 | "DOCKER_CERT_PATH=/machines/${env.PROD_NAME}" 60 | ]) { 61 | sh "docker service update \ 62 | --image localhost:5000/go-demo:2.${env.BUILD_NUMBER} \ 63 | go-demo" 64 | } 65 | try { 66 | withEnv(["HOST_IP=${env.PROD_IP}"]) { 67 | for (i = 0; i <10; i++) { 68 | sh "docker-compose run --rm production" 69 | } 70 | } 71 | } catch (e) { 72 | withEnv([ 73 | "DOCKER_TLS_VERIFY=1", 74 | "DOCKER_HOST=tcp://${env.PROD_IP}:2376", 75 | "DOCKER_CERT_PATH=/machines/${env.PROD_NAME}" 76 | ]) { 77 | sh "docker service update --rollback go-demo" 78 | } 79 | } 80 | } 81 | 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /terraform/aws-full/test-swarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "test-swarm-manager" { 2 | count = "${var.test_swarm_managers}" 3 | ami = "${var.swarm_ami_id}" 4 | instance_type = "${var.test_swarm_instance_type}" 5 | tags { 6 | Name = "test-swarm-manager" 7 | } 8 | vpc_security_group_ids = [ 9 | "${aws_security_group.docker.id}" 10 | ] 11 | key_name = "devops21" 12 | connection { 13 | user = "ubuntu" 14 | private_key = "${file("devops21.pem")}" 15 | } 16 | provisioner "remote-exec" { 17 | inline = [ 18 | "if ${var.swarm_init}; then docker swarm init --advertise-addr ${self.private_ip}; fi", 19 | "if ! ${var.swarm_init}; then docker swarm join --token ${var.test_swarm_manager_token} --advertise-addr ${self.private_ip} ${var.test_swarm_manager_ip}:2377; fi", 20 | "if ${var.rexray}; then echo \"${data.template_file.rexray.rendered}\" | sudo tee /etc/rexray/config.yml; fi", 21 | "if ${var.rexray}; then sudo rexray service start >/dev/null 2>/dev/null; fi" 22 | ] 23 | } 24 | } 25 | 26 | resource "aws_instance" "test-swarm-worker" { 27 | count = "${var.test_swarm_workers}" 28 | ami = "${var.swarm_ami_id}" 29 | instance_type = "${var.test_swarm_instance_type}" 30 | tags { 31 | Name = "test-swarm-worker" 32 | } 33 | vpc_security_group_ids = [ 34 | "${aws_security_group.docker.id}" 35 | ] 36 | key_name = "devops21" 37 | connection { 38 | user = "ubuntu" 39 | private_key = "${file("devops21.pem")}" 40 | } 41 | provisioner "remote-exec" { 42 | inline = [ 43 | "docker swarm join --token ${var.test_swarm_worker_token} --advertise-addr ${self.private_ip} ${var.test_swarm_manager_ip}:2377", 44 | "if ${var.rexray}; then echo \"${data.template_file.rexray.rendered}\" | sudo tee /etc/rexray/config.yml; fi", 45 | "if ${var.rexray}; then sudo rexray service start >/dev/null 2>/dev/null; fi" 46 | ] 47 | } 48 | } 49 | 50 | output "test_swarm_manager_1_public_ip" { 51 | value = "${aws_instance.test-swarm-manager.0.public_ip}" 52 | } 53 | 54 | output "test_swarm_manager_1_private_ip" { 55 | value = "${aws_instance.test-swarm-manager.0.private_ip}" 56 | } 57 | 58 | output "test_swarm_manager_2_public_ip" { 59 | value = "${aws_instance.test-swarm-manager.1.public_ip}" 60 | } 61 | 62 | output "test_swarm_manager_2_private_ip" { 63 | value = "${aws_instance.test-swarm-manager.1.private_ip}" 64 | } 65 | 66 | output "test_swarm_manager_3_public_ip" { 67 | value = "${aws_instance.test-swarm-manager.2.public_ip}" 68 | } 69 | 70 | output "test_swarm_manager_3_private_ip" { 71 | value = "${aws_instance.test-swarm-manager.2.private_ip}" 72 | } 73 | 74 | -------------------------------------------------------------------------------- /terraform/aws-full/swarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "swarm-manager" { 2 | count = "${var.swarm_managers}" 3 | ami = "${var.swarm_ami_id}" 4 | instance_type = "${var.swarm_instance_type}" 5 | tags { 6 | Name = "swarm-manager" 7 | } 8 | vpc_security_group_ids = [ 9 | "${aws_security_group.docker.id}" 10 | ] 11 | key_name = "devops21" 12 | connection { 13 | user = "ubuntu" 14 | private_key = "${file("devops21.pem")}" 15 | } 16 | provisioner "remote-exec" { 17 | inline = [ 18 | "if ${var.swarm_init}; then docker swarm init --advertise-addr ${self.private_ip}; fi", 19 | "if ! ${var.swarm_init}; then docker swarm join --token ${var.swarm_manager_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377; fi", 20 | "if ${var.rexray}; then docker plugin install rexray/efs --grant-all-permissions EFS_ACCESSKEY=${var.aws_access_key} EFS_SECRETKEY=${var.aws_secret_key} EFS_REGION=${var.aws_default_region} EFS_SECURITYGROUPS=${aws_security_group.docker.id} EFS_TAG=\"rexray\"; fi" 21 | ] 22 | } 23 | } 24 | 25 | resource "aws_instance" "swarm-worker" { 26 | count = "${var.swarm_workers}" 27 | ami = "${var.swarm_ami_id}" 28 | instance_type = "${var.swarm_instance_type}" 29 | tags { 30 | Name = "swarm-worker" 31 | } 32 | vpc_security_group_ids = [ 33 | "${aws_security_group.docker.id}", 34 | ] 35 | key_name = "devops21" 36 | connection { 37 | user = "ubuntu" 38 | private_key = "${file("devops21.pem")}" 39 | } 40 | provisioner "remote-exec" { 41 | inline = [ 42 | "docker swarm join --token ${var.swarm_worker_token} --advertise-addr ${self.private_ip} ${var.swarm_manager_ip}:2377", 43 | "if ${var.rexray}; then docker plugin install rexray/efs --grant-all-permissions EFS_ACCESSKEY=${var.aws_access_key} EFS_SECRETKEY=${var.aws_secret_key} EFS_REGION=${var.aws_default_region} EFS_SECURITYGROUPS=${aws_security_group.docker.id} EFS_TAG=\"rexray\"; fi" 44 | ] 45 | } 46 | } 47 | 48 | output "swarm_manager_1_public_ip" { 49 | value = "${aws_instance.swarm-manager.0.public_ip}" 50 | } 51 | 52 | output "swarm_manager_1_private_ip" { 53 | value = "${aws_instance.swarm-manager.0.private_ip}" 54 | } 55 | 56 | output "swarm_manager_2_public_ip" { 57 | value = "${aws_instance.swarm-manager.1.public_ip}" 58 | } 59 | 60 | output "swarm_manager_2_private_ip" { 61 | value = "${aws_instance.swarm-manager.1.private_ip}" 62 | } 63 | 64 | output "swarm_manager_3_public_ip" { 65 | value = "${aws_instance.swarm-manager.2.public_ip}" 66 | } 67 | 68 | output "swarm_manager_3_private_ip" { 69 | value = "${aws_instance.swarm-manager.2.private_ip}" 70 | } 71 | -------------------------------------------------------------------------------- /terraform/azure/README.md: -------------------------------------------------------------------------------- 1 | # Terraform - Azure 2 | 3 | Based on the "Docker for Azure" configuration, as can be seen at [ACS Engine](https://github.com/Azure/acs-engine). 4 | 5 | ACS stands Azure Container Services, and just another name for Azure for Docker. 6 | 7 | The ACS setup is a bit of a straitjacket. 8 | You're stuck with the configuration of the ACS blueprint, and the manager vm size is fixed to a D2. 9 | 10 | Thats why there's this separate configuration, it requires more manual work but can be tuned in detail. 11 | 12 | ## Scripts 13 | 14 | I have not found the time yet to embed the docker swarm initialization into a extension script. 15 | 16 | So for now the following steps still have to be performed: 17 | 18 | * adjust the variables to your taste 19 | * create the infrastructure with terraform 20 | * add your key location to the deploy-swarmmode.sh 21 | * add the loadbalancer ip to the deploy-swarmmode.sh 22 | * configure the loadbalancer: azurerm in terraform doesn't provide the ability to create the loadbalance rules or properly configure the backendpool sets 23 | * run deploy-swarmmode.sh 24 | 25 | ## Configure the loadbalancer 26 | 27 | The things we need to configure: 28 | 29 | * Add VM's to the BackendPool configurations 30 | * Add Load Balancer Rules for port forwarding to these backend pools 31 | 32 | This requires a bit of clicking in the Azure portal, but can be done quickly. 33 | 34 | 35 | #### BackendPool AVSET 36 | 37 | Go to the Load Balancer and open the Backend Pools. 38 | 39 | This pool is for rules such as port 443, for any docker (swarm) service. 40 | 41 | * **Associate** with **Availability Set**, and select **drovetfavset** 42 | * Add each VM with its primary NIC (they should only have one) 43 | 44 | #### BackendPool Manager1 45 | 46 | Go to the Load Balancer and open the Backend Pools. 47 | 48 | This pool is for being able to connect to the Manager1 VM with SSH. 49 | 50 | Assuming you've configured the AVSET backendpool above, the Load Balancer should already be associated with the AVSET. 51 | 52 | * Add the Manager1 VM 53 | 54 | #### Load Balance Rule 443 55 | 56 | Go to the Load Balancer and open the Load balancing rules. 57 | 58 | * Create new rule 59 | * Enter name LBRule443 60 | * Port: 443 61 | * Backend port: 443 62 | * Backend pool: LBBackendPoolAVSET 63 | * Health probe: LBProbe443 64 | * Session persistence: none 65 | 66 | #### Load Balance Rule 22 67 | 68 | Go to the Load Balancer and open the Load balancing rules. 69 | 70 | * Create new rule 71 | * Enter name LBRule22 72 | * Port: 2200 73 | * Backend port: 22 74 | * Backend pool: LBBackendPoolManager1 75 | * Health probe: LBProve22 76 | * Session persistence: none -------------------------------------------------------------------------------- /scripts/dm-swarm-services.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | eval $(docker-machine env swarm-1) 4 | 5 | docker network create --driver overlay proxy 6 | 7 | docker network create --driver overlay go-demo 8 | 9 | curl -o docker-compose-proxy.yml \ 10 | https://raw.githubusercontent.com/\ 11 | vfarcic/docker-flow-proxy/master/docker-compose.yml 12 | 13 | export DOCKER_IP=$(docker-machine ip swarm-1) 14 | 15 | docker-compose -f docker-compose-proxy.yml \ 16 | up -d consul-server 17 | 18 | export CONSUL_SERVER_IP=$(docker-machine ip swarm-1) 19 | 20 | for i in 2 3; do 21 | eval $(docker-machine env swarm-$i) 22 | 23 | export DOCKER_IP=$(docker-machine ip swarm-$i) 24 | 25 | docker-compose -f docker-compose-proxy.yml \ 26 | up -d consul-agent 27 | done 28 | 29 | rm docker-compose-proxy.yml 30 | 31 | docker service create --name proxy \ 32 | -p 80:80 \ 33 | -p 443:443 \ 34 | -p 8090:8080 \ 35 | --network proxy \ 36 | -e MODE=swarm \ 37 | --replicas 3 \ 38 | -e CONSUL_ADDRESS="$(docker-machine ip swarm-1):8500,$(docker-machine ip swarm-2):8500,$(docker-machine ip swarm-3):8500" \ 39 | --reserve-memory 50m \ 40 | vfarcic/docker-flow-proxy 41 | 42 | docker service create --name go-demo-db \ 43 | --network go-demo \ 44 | --reserve-memory 150m \ 45 | mongo:3.2.10 46 | 47 | while true; do 48 | REPLICAS=$(docker service ls | grep proxy | awk '{print $3}') 49 | REPLICAS_NEW=$(docker service ls | grep proxy | awk '{print $4}') 50 | if [[ $REPLICAS == "3/3" || $REPLICAS_NEW == "3/3" ]]; then 51 | break 52 | else 53 | echo "Waiting for the proxy service..." 54 | sleep 10 55 | fi 56 | done 57 | 58 | while true; do 59 | REPLICAS=$(docker service ls | grep go-demo-db | awk '{print $3}') 60 | REPLICAS_NEW=$(docker service ls | grep go-demo-db | awk '{print $4}') 61 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 62 | break 63 | else 64 | echo "Waiting for the go-demo-db service..." 65 | sleep 10 66 | fi 67 | done 68 | 69 | docker service create --name go-demo \ 70 | -e DB=go-demo-db \ 71 | --network go-demo \ 72 | --network proxy \ 73 | --replicas 3 \ 74 | --reserve-memory 50m \ 75 | --update-delay 5s \ 76 | vfarcic/go-demo:1.0 77 | 78 | while true; do 79 | REPLICAS=$(docker service ls | grep vfarcic/go-demo | awk '{print $3}') 80 | REPLICAS_NEW=$(docker service ls | grep vfarcic/go-demo | awk '{print $4}') 81 | if [[ $REPLICAS == "3/3" || $REPLICAS_NEW == "3/3" ]]; then 82 | break 83 | else 84 | echo "Waiting for the go-demo-db service..." 85 | sleep 10 86 | fi 87 | done 88 | 89 | curl "$(docker-machine ip swarm-1):8090/v1/docker-flow-proxy/reconfigure?serviceName=go-demo&servicePath=/demo&port=8080&distribute=true" 90 | 91 | echo "" 92 | echo ">> The services are up and running inside the swarm cluster" 93 | -------------------------------------------------------------------------------- /scripts/dm-test-swarm-services.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | eval $(docker-machine env swarm-test-1) 4 | 5 | docker network create --driver overlay proxy 6 | 7 | docker network create --driver overlay go-demo 8 | 9 | curl -o docker-compose-proxy.yml \ 10 | https://raw.githubusercontent.com/\ 11 | vfarcic/docker-flow-proxy/master/docker-compose.yml 12 | 13 | export DOCKER_IP=$(docker-machine ip swarm-test-1) 14 | 15 | docker-compose -f docker-compose-proxy.yml \ 16 | up -d consul-server 17 | 18 | export CONSUL_SERVER_IP=$(docker-machine ip swarm-test-1) 19 | 20 | for i in 2 3; do 21 | eval $(docker-machine env swarm-test-$i) 22 | 23 | export DOCKER_IP=$(docker-machine ip swarm-test-$i) 24 | 25 | docker-compose -f docker-compose-proxy.yml \ 26 | up -d consul-agent 27 | done 28 | 29 | rm docker-compose-proxy.yml 30 | 31 | docker service create --name proxy \ 32 | -p 80:80 \ 33 | -p 443:443 \ 34 | -p 8090:8080 \ 35 | --network proxy \ 36 | -e MODE=swarm \ 37 | --replicas 2 \ 38 | -e CONSUL_ADDRESS="$(docker-machine ip swarm-test-1):8500,$(docker-machine ip swarm-test-2):8500,$(docker-machine ip swarm-test-3):8500" \ 39 | --constraint 'node.labels.env == prod-like' \ 40 | vfarcic/docker-flow-proxy 41 | 42 | docker service create --name go-demo-db \ 43 | --network go-demo \ 44 | --constraint 'node.labels.env == prod-like' \ 45 | mongo:3.2.10 46 | 47 | while true; do 48 | REPLICAS=$(docker service ls | grep proxy | awk '{print $3}') 49 | REPLICAS_NEW=$(docker service ls | grep proxy | awk '{print $4}') 50 | if [[ $REPLICAS == "2/2" || $REPLICAS_NEW == "2/2" ]]; then 51 | break 52 | else 53 | echo "Waiting for the proxy service..." 54 | sleep 10 55 | fi 56 | done 57 | 58 | while true; do 59 | REPLICAS=$(docker service ls | grep go-demo-db | awk '{print $3}') 60 | REPLICAS_NEW=$(docker service ls | grep go-demo-db | awk '{print $4}') 61 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 62 | break 63 | else 64 | echo "Waiting for the go-demo-db service..." 65 | sleep 10 66 | fi 67 | done 68 | 69 | docker service create --name go-demo \ 70 | -e DB=go-demo-db \ 71 | --network go-demo \ 72 | --network proxy \ 73 | --replicas 2 \ 74 | --constraint 'node.labels.env == prod-like' \ 75 | --update-delay 5s \ 76 | vfarcic/go-demo:1.0 77 | 78 | while true; do 79 | REPLICAS=$(docker service ls | grep vfarcic/go-demo | awk '{print $3}') 80 | REPLICAS_NEW=$(docker service ls | grep vfarcic/go-demo | awk '{print $4}') 81 | if [[ $REPLICAS == "2/2" || $REPLICAS_NEW == "2/2" ]]; then 82 | break 83 | else 84 | echo "Waiting for the go-demo-db service..." 85 | sleep 10 86 | fi 87 | done 88 | 89 | curl "$(docker-machine ip swarm-test-1):8090/v1/docker-flow-proxy/reconfigure?serviceName=go-demo&servicePath=/demo&port=8080&distribute=true" 90 | 91 | echo "" 92 | echo ">> The services are up and running inside the swarm test cluster" 93 | -------------------------------------------------------------------------------- /terraform/aws-full/common.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | access_key = "${var.aws_access_key}" 3 | secret_key = "${var.aws_secret_key}" 4 | region = "${var.aws_default_region}" 5 | } 6 | 7 | resource "aws_security_group" "docker" { 8 | name = "docker" 9 | // SSH 10 | ingress { 11 | from_port = 22 12 | to_port = 22 13 | protocol = "tcp" 14 | cidr_blocks = ["0.0.0.0/0"] 15 | } 16 | // HTTP 17 | ingress { 18 | from_port = 80 19 | to_port = 80 20 | protocol = "tcp" 21 | cidr_blocks = ["0.0.0.0/0"] 22 | } 23 | // HTTPS 24 | ingress { 25 | from_port = 443 26 | to_port = 443 27 | protocol = "tcp" 28 | cidr_blocks = ["0.0.0.0/0"] 29 | } 30 | // NFS 31 | ingress { 32 | from_port = 2049 33 | to_port = 2049 34 | protocol = "tcp" 35 | self = true 36 | } 37 | // Docker 38 | ingress { 39 | from_port = 2375 40 | to_port = 2375 41 | protocol = "tcp" 42 | self = true 43 | } 44 | // Swarm 45 | ingress { 46 | from_port = 2377 47 | to_port = 2377 48 | protocol = "tcp" 49 | self = true 50 | } 51 | ingress { 52 | from_port = 7946 53 | to_port = 7946 54 | protocol = "tcp" 55 | self = true 56 | } 57 | ingress { 58 | from_port = 7946 59 | to_port = 7946 60 | protocol = "udp" 61 | self = true 62 | } 63 | ingress { 64 | from_port = 4789 65 | to_port = 4789 66 | protocol = "tcp" 67 | self = true 68 | } 69 | ingress { 70 | from_port = 4789 71 | to_port = 4789 72 | protocol = "udp" 73 | self = true 74 | } 75 | // Visualizer (demo purposes only) 76 | ingress { 77 | from_port = 9090 78 | to_port = 9090 79 | protocol = "tcp" 80 | cidr_blocks = ["0.0.0.0/0"] 81 | } 82 | // Jenkins agents (internal only) 83 | ingress { 84 | from_port = 50000 85 | to_port = 50000 86 | protocol = "tcp" 87 | self = true 88 | } 89 | // Prometheus 90 | ingress { 91 | from_port = 9091 92 | to_port = 9091 93 | protocol = "tcp" 94 | cidr_blocks = ["0.0.0.0/0"] 95 | } 96 | // Grafana 97 | ingress { 98 | from_port = 3000 99 | to_port = 3000 100 | protocol = "tcp" 101 | cidr_blocks = ["0.0.0.0/0"] 102 | } 103 | egress { 104 | from_port = 0 105 | to_port = 0 106 | protocol = "-1" 107 | cidr_blocks = ["0.0.0.0/0"] 108 | } 109 | } 110 | 111 | data "template_file" "rexray" { 112 | template = "${file("rexray.tpl")}" 113 | 114 | vars { 115 | aws_access_key = "${var.aws_access_key}" 116 | aws_secret_key = "${var.aws_secret_key}" 117 | aws_default_region = "${var.aws_default_region}" 118 | aws_security_group = "${aws_security_group.docker.id}" 119 | } 120 | } 121 | 122 | resource "aws_eip" "swarm" { 123 | instance = "${aws_instance.swarm-manager.0.id}" 124 | } 125 | 126 | output "security_group_id" { 127 | value = "${aws_security_group.docker.id}" 128 | } 129 | 130 | output "eip" { 131 | value = "${aws_eip.swarm.public_ip}" 132 | } 133 | -------------------------------------------------------------------------------- /scripts/dm-swarm-services-2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export MSYS_NO_PATHCONV=1 4 | 5 | eval $(docker-machine env swarm-1) 6 | 7 | docker service create --name registry \ 8 | -p 5000:5000 \ 9 | --reserve-memory 100m \ 10 | --mount "type=bind,source=$PWD,target=/var/lib/registry" \ 11 | registry:2.5.0 12 | 13 | docker network create --driver overlay proxy 14 | 15 | docker network create --driver overlay go-demo 16 | 17 | curl -o docker-compose-proxy.yml \ 18 | https://raw.githubusercontent.com/\ 19 | vfarcic/docker-flow-proxy/master/docker-compose.yml 20 | 21 | export DOCKER_IP=$(docker-machine ip swarm-1) 22 | 23 | docker-compose -f docker-compose-proxy.yml \ 24 | up -d consul-server 25 | 26 | export CONSUL_SERVER_IP=$(docker-machine ip swarm-1) 27 | 28 | for i in 2 3; do 29 | eval $(docker-machine env swarm-$i) 30 | 31 | export DOCKER_IP=$(docker-machine ip swarm-$i) 32 | 33 | docker-compose -f docker-compose-proxy.yml \ 34 | up -d consul-agent 35 | done 36 | 37 | rm docker-compose-proxy.yml 38 | 39 | docker service create --name proxy \ 40 | -p 80:80 \ 41 | -p 443:443 \ 42 | -p 8090:8080 \ 43 | --network proxy \ 44 | -e MODE=swarm \ 45 | --replicas 3 \ 46 | -e CONSUL_ADDRESS="$(docker-machine ip swarm-1):8500,$(docker-machine ip swarm-2):8500,$(docker-machine ip swarm-3):8500" \ 47 | --reserve-memory 50m \ 48 | vfarcic/docker-flow-proxy 49 | 50 | docker service create --name go-demo-db \ 51 | --network go-demo \ 52 | --reserve-memory 150m \ 53 | mongo:3.2.10 54 | 55 | while true; do 56 | REPLICAS=$(docker service ls | grep proxy | awk '{print $3}') 57 | REPLICAS_NEW=$(docker service ls | grep proxy | awk '{print $4}') 58 | if [[ $REPLICAS == "3/3" || $REPLICAS_NEW == "3/3" ]]; then 59 | break 60 | else 61 | echo "Waiting for the proxy service..." 62 | sleep 10 63 | fi 64 | done 65 | 66 | while true; do 67 | REPLICAS=$(docker service ls | grep go-demo-db | awk '{print $3}') 68 | REPLICAS_NEW=$(docker service ls | grep go-demo-db | awk '{print $4}') 69 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 70 | break 71 | else 72 | echo "Waiting for the go-demo-db service..." 73 | sleep 10 74 | fi 75 | done 76 | 77 | docker service create --name go-demo \ 78 | -e DB=go-demo-db \ 79 | --network go-demo \ 80 | --network proxy \ 81 | --replicas 3 \ 82 | --reserve-memory 50m \ 83 | --update-delay 5s \ 84 | vfarcic/go-demo:1.0 85 | 86 | while true; do 87 | REPLICAS=$(docker service ls | grep vfarcic/go-demo | awk '{print $3}') 88 | REPLICAS_NEW=$(docker service ls | grep vfarcic/go-demo | awk '{print $4}') 89 | if [[ $REPLICAS == "3/3" || $REPLICAS_NEW == "3/3" ]]; then 90 | break 91 | else 92 | echo "Waiting for the go-demo service..." 93 | sleep 10 94 | fi 95 | done 96 | 97 | curl "$(docker-machine ip swarm-1):8090/v1/docker-flow-proxy/reconfigure?serviceName=go-demo&servicePath=/demo&port=8080&distribute=true" 98 | 99 | echo "" 100 | echo ">> The services are up and running inside the swarm cluster" 101 | -------------------------------------------------------------------------------- /scripts/dm-test-swarm-services-2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export MSYS_NO_PATHCONV=1 4 | 5 | eval $(docker-machine env swarm-test-1) 6 | 7 | docker service create --name registry \ 8 | -p 5000:5000 \ 9 | --reserve-memory 100m \ 10 | --mount "type=bind,source=$PWD,target=/var/lib/registry" \ 11 | registry:2.5.0 12 | 13 | docker network create --driver overlay proxy 14 | 15 | docker network create --driver overlay go-demo 16 | 17 | curl -o docker-compose-proxy.yml \ 18 | https://raw.githubusercontent.com/\ 19 | vfarcic/docker-flow-proxy/master/docker-compose.yml 20 | 21 | export DOCKER_IP=$(docker-machine ip swarm-test-1) 22 | 23 | docker-compose -f docker-compose-proxy.yml \ 24 | up -d consul-server 25 | 26 | export CONSUL_SERVER_IP=$(docker-machine ip swarm-test-1) 27 | 28 | for i in 2 3; do 29 | eval $(docker-machine env swarm-test-$i) 30 | 31 | export DOCKER_IP=$(docker-machine ip swarm-test-$i) 32 | 33 | docker-compose -f docker-compose-proxy.yml \ 34 | up -d consul-agent 35 | done 36 | 37 | rm docker-compose-proxy.yml 38 | 39 | docker service create --name proxy \ 40 | -p 80:80 \ 41 | -p 443:443 \ 42 | -p 8090:8080 \ 43 | --network proxy \ 44 | -e MODE=swarm \ 45 | --replicas 2 \ 46 | -e CONSUL_ADDRESS="$(docker-machine ip swarm-test-1):8500,$(docker-machine ip swarm-test-2):8500,$(docker-machine ip swarm-test-3):8500" \ 47 | --constraint 'node.labels.env == prod-like' \ 48 | vfarcic/docker-flow-proxy 49 | 50 | docker service create --name go-demo-db \ 51 | --network go-demo \ 52 | --constraint 'node.labels.env == prod-like' \ 53 | mongo:3.2.10 54 | 55 | while true; do 56 | REPLICAS=$(docker service ls | grep proxy | awk '{print $3}') 57 | REPLICAS_NEW=$(docker service ls | grep proxy | awk '{print $4}') 58 | if [[ $REPLICAS == "2/2" || $REPLICAS_NEW == "2/2" ]]; then 59 | break 60 | else 61 | echo "Waiting for the proxy service..." 62 | sleep 10 63 | fi 64 | done 65 | 66 | while true; do 67 | REPLICAS=$(docker service ls | grep go-demo-db | awk '{print $3}') 68 | REPLICAS_NEW=$(docker service ls | grep go-demo-db | awk '{print $4}') 69 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 70 | break 71 | else 72 | echo "Waiting for the go-demo-db service..." 73 | sleep 10 74 | fi 75 | done 76 | 77 | docker service create --name go-demo \ 78 | -e DB=go-demo-db \ 79 | --network go-demo \ 80 | --network proxy \ 81 | --replicas 2 \ 82 | --constraint 'node.labels.env == prod-like' \ 83 | --update-delay 1s \ 84 | vfarcic/go-demo:1.0 85 | 86 | while true; do 87 | REPLICAS=$(docker service ls | grep vfarcic/go-demo | awk '{print $3}') 88 | REPLICAS_NEW=$(docker service ls | grep vfarcic/go-demo | awk '{print $4}') 89 | if [[ $REPLICAS == "2/2" || $REPLICAS_NEW == "2/2" ]]; then 90 | break 91 | else 92 | echo "Waiting for the go-demo service..." 93 | sleep 10 94 | fi 95 | done 96 | 97 | curl "$(docker-machine ip swarm-test-1):8090/v1/docker-flow-proxy/reconfigure?serviceName=go-demo&servicePath=/demo&port=8080&distribute=true" 98 | 99 | echo "" 100 | echo ">> The services are up and running inside the swarm test cluster" 101 | -------------------------------------------------------------------------------- /scripts/dm-swarm-services-elk.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export MSYS_NO_PATHCONV=1 4 | 5 | eval $(docker-machine env swarm-1) 6 | 7 | docker network create --driver overlay elk 8 | 9 | docker service create --name elasticsearch \ 10 | --network elk \ 11 | -p 9200:9200 \ 12 | --reserve-memory 500m \ 13 | elasticsearch:2.4 14 | 15 | while true; do 16 | REPLICAS=$(docker service ls | grep elasticsearch | awk '{print $3}') 17 | REPLICAS_NEW=$(docker service ls | grep elasticsearch | awk '{print $4}') 18 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 19 | break 20 | else 21 | echo "Waiting for the elasticsearch service..." 22 | sleep 5 23 | fi 24 | done 25 | 26 | mkdir -p docker/logstash 27 | 28 | cp conf/logstash.conf docker/logstash/logstash.conf 29 | 30 | docker service create --name logstash \ 31 | --mount "type=bind,source=$PWD/docker/logstash,target=/conf" \ 32 | --network elk \ 33 | -e LOGSPOUT=ignore \ 34 | --reserve-memory 100m \ 35 | logstash:2.4 logstash -f /conf/logstash.conf 36 | 37 | while true; do 38 | REPLICAS=$(docker service ls | grep logstash | awk '{print $3}') 39 | REPLICAS_NEW=$(docker service ls | grep logstash | awk '{print $4}') 40 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 41 | break 42 | else 43 | echo "Waiting for the logstash service..." 44 | sleep 5 45 | fi 46 | done 47 | 48 | docker network create --driver overlay proxy 49 | 50 | docker service create --name swarm-listener \ 51 | --network proxy \ 52 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 53 | -e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 54 | -e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 55 | --constraint 'node.role==manager' \ 56 | vfarcic/docker-flow-swarm-listener 57 | 58 | docker service create --name proxy \ 59 | -p 80:80 \ 60 | -p 443:443 \ 61 | --network proxy \ 62 | -e MODE=swarm \ 63 | -e LISTENER_ADDRESS=swarm-listener \ 64 | vfarcic/docker-flow-proxy 65 | 66 | while true; do 67 | REPLICAS=$(docker service ls | grep swarm-listener | awk '{print $3}') 68 | REPLICAS_NEW=$(docker service ls | grep swarm-listener | awk '{print $4}') 69 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 70 | break 71 | else 72 | echo "Waiting for the swarm-listener service..." 73 | sleep 5 74 | fi 75 | done 76 | 77 | while true; do 78 | REPLICAS=$(docker service ls | grep proxy | awk '{print $3}') 79 | REPLICAS_NEW=$(docker service ls | grep proxy | awk '{print $4}') 80 | if [[ $REPLICAS == "1/1" || $REPLICAS_NEW == "1/1" ]]; then 81 | break 82 | else 83 | echo "Waiting for the proxy service..." 84 | sleep 5 85 | fi 86 | done 87 | 88 | docker service create --name kibana \ 89 | --network elk \ 90 | --network proxy \ 91 | -e ELASTICSEARCH_URL=http://elasticsearch:9200 \ 92 | --reserve-memory 50m \ 93 | --label com.df.notify=true \ 94 | --label com.df.distribute=true \ 95 | --label com.df.servicePath=/app/kibana,/bundles,/elasticsearch \ 96 | --label com.df.port=5601 \ 97 | kibana:4.6 98 | 99 | echo "" 100 | echo ">> The services are up and running inside the swarm cluster" 101 | echo "" 102 | -------------------------------------------------------------------------------- /terraform/azure/scripts/deploy-swarmmode.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | LOADBALANCER_IP=changeit 3 | MANAGER_INTERNAL_IP= 4 | TOKEN= 5 | WORKERS=(worker1 worker2) 6 | KEY=/home/user/.ssh/id_rsa_azure 7 | REMOTE_USER=swarmadmin 8 | 9 | echo "########################################" 10 | echo "########################################" 11 | echo "## SETUP DOCKER SWARM MODE #############" 12 | echo "########################################" 13 | 14 | echo "########################################" 15 | echo "## PARAMETERS ##########################" 16 | echo "# LOADBALANCER_IP=$LOADBALANCER_IP" 17 | echo "# WORKERS=$WORKERS" 18 | echo "# KEY=$KEY" 19 | echo "# REMOTE_USER=$REMOTE_USER" 20 | echo "########################################" 21 | 22 | echo "########################################" 23 | echo "## COPY FILES & SCRIPTS ################" 24 | echo "########################################" 25 | echo "## @Manager" 26 | scp -P 2200 -o StrictHostKeyChecking=no $KEY $REMOTE_USER@$LOADBALANCER_IP:/home/$REMOTE_USER/.ssh/id_rsa 27 | scp -P 2200 -o StrictHostKeyChecking=no get-internal-ip.sh $REMOTE_USER@$LOADBALANCER_IP:/home/$REMOTE_USER 28 | scp -P 2200 -o StrictHostKeyChecking=no init-swarm-mode.sh $REMOTE_USER@$LOADBALANCER_IP:/home/$REMOTE_USER 29 | scp -P 2200 -o StrictHostKeyChecking=no -r ../../resources/azure-storage-driver/ $REMOTE_USER@$LOADBALANCER_IP:/home/$REMOTE_USER 30 | echo "## @WORKERs" 31 | for WORKER in ${WORKERS[@]}; do 32 | echo "# $WORKER" 33 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP scp -o StrictHostKeyChecking=no -r /home/$REMOTE_USER/azure-storage-driver/ $WORKER:/home/$REMOTE_USER 34 | done 35 | echo "########################################" 36 | 37 | echo "########################################" 38 | echo "## INSTALL DOCKER CLOUDSTOR PLUGIN #####" 39 | echo "## @Manager" 40 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP 'docker plugin install docker4x/cloudstor:azure-v17.03.0-ce --grant-all-permissions' 41 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP 'docker plugin ls' 42 | echo "## @WORKERs" 43 | for WORKER in ${WORKERS[@]}; do 44 | echo "# $WORKER" 45 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP ssh -o StrictHostKeyChecking=no $WORKER docker plugin install docker4x/cloudstor:azure-v17.03.0-ce --grant-all-permissions 46 | done 47 | echo "########################################" 48 | 49 | echo "########################################" 50 | echo "## INSTALL DOCKER AZURE FILE PLUGIN ####" 51 | echo "## @Manager" 52 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP 'cd azure-storage-driver && sudo ./install.sh' 53 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP 'docker plugin ls' 54 | echo "## @WORKER" 55 | for WORKER in ${WORKERS[@]}; do 56 | echo "# $WORKER" 57 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP ssh -o StrictHostKeyChecking=no $WORKER << EOF 58 | cd azure-storage-driver 59 | ls -lath; 60 | sudo ./install.sh 61 | echo $HOSTNAME 62 | EOF 63 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP ssh -o StrictHostKeyChecking=no $WORKER docker plugin ls 64 | done 65 | echo "########################################" 66 | 67 | echo "########################################" 68 | echo "## INIT SWARM MODE ON MANAGER VM #######" 69 | ssh -p 2200 $REMOTE_USER@$LOADBALANCER_IP './init-swarm-mode.sh' 70 | echo "########################################" 71 | 72 | echo "########################################" 73 | echo "## RETRIEVE WORKER TOKEN ###############" 74 | TOKEN=$(ssh -p 2200 -o StrictHostKeyChecking=no ${REMOTE_USER}@${LOADBALANCER_IP} docker swarm join-token -q worker) 75 | echo "## Worker Token=$TOKEN" 76 | MANAGER_INTERNAL_IP=$(ssh -p 2200 -o StrictHostKeyChecking=no ${REMOTE_USER}@${LOADBALANCER_IP} './get-internal-ip.sh') 77 | echo "## MANAGER_INTERNAL_IP=$MANAGER_INTERNAL_IP" 78 | echo "########################################" 79 | 80 | echo "########################################" 81 | echo "## WORKER JOIN SWARM CLUSTER ###########" 82 | for WORKER in ${WORKERS[@]}; do 83 | echo "# $WORKER" 84 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP ssh $WORKER docker swarm join --token ${TOKEN} ${MANAGER_INTERNAL_IP}:2377 85 | done 86 | echo "########################################" 87 | 88 | echo "########################################" 89 | echo "## CHECK NODES ON MANAGER ##############" 90 | ssh -p 2200 -o StrictHostKeyChecking=no $REMOTE_USER@$LOADBALANCER_IP 'docker node ls' 91 | echo "########################################" 92 | 93 | -------------------------------------------------------------------------------- /rexray.md: -------------------------------------------------------------------------------- 1 | # Binary 2 | 3 | ```bash 4 | ~ $ curl -sSL https://dl.bintray.com/emccode/rexray/install | sh -s stable 0.6.3 5 | sudo: unable to execute /usr/bin/rexray: No such file or directory 6 | 7 | rexray has been installed to /usr/bin/rexray 8 | 9 | sh: /usr/bin/rexray: not found 10 | ``` 11 | 12 | # Container 13 | 14 | ```bash 15 | ~ $ sudo docker run \ 16 | > -d --name rexray \ 17 | > -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY \ 18 | > -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_KEY \ 19 | > -v /run/docker/plugins:/run/docker/plugins \ 20 | > -v /var/lib/rexray:/var/lib/rexray:shared \ 21 | > -v /var/run/rexray:/var/run/rexray \ 22 | > -v /var/log/rexray:/var/log/rexray \ 23 | > -v /dev:/dev \ 24 | > basi/rexray 25 | Unable to find image 'basi/rexray:latest' locally 26 | latest: Pulling from basi/rexray 27 | 8ad8b3f87b37: Pull complete 28 | d66207d05cbb: Pull complete 29 | fc1f8906394f: Pull complete 30 | Digest: sha256:d5961337a42c3d26181142dbdf8b02423c81d1b64602f35e76fcfd0a240a30da 31 | Status: Downloaded newer image for basi/rexray:latest 32 | 5640ec82025dd1785d6202f9e4724c60c5bad30edf419f4b1f58c50196746b56 33 | docker: Error response from daemon: linux mounts: Path /var/lib/rexray is mounted on /var but it is not a shared mount.. 34 | ``` 35 | 36 | # Container without shared 37 | 38 | ```bash 39 | ~ $ sudo docker run \ 40 | > -d --name rexray \ 41 | > -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY \ 42 | > -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_KEY \ 43 | > -v /run/docker/plugins:/run/docker/plugins \ 44 | > -v /var/lib/rexray:/var/lib/rexray \ 45 | > -v /var/run/rexray:/var/run/rexray \ 46 | > -v /var/log/rexray:/var/log/rexray \ 47 | > -v /dev:/dev \ 48 | > basi/rexray 49 | cd7299ea508df020bf415d5953046317e12ff997bd6a9e33ef5bb95bc79bff43 50 | ~ $ docker ps -a 51 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 52 | cd7299ea508d basi/rexray "/bin/sh -c /entry..." 3 seconds ago Up 3 seconds rexray 53 | b9e26dc77e80 docker4x/l4controller-aws:aws-v1.13.0-rc1-beta11 "loadbalancer run ..." 13 minutes ago Up 13 minutes editions_controller 54 | 502e3e0704e9 docker4x/shell-aws:aws-v1.13.0-rc1-beta11 "/entry.sh /usr/sb..." 13 minutes ago Up 13 minutes 0.0.0.0:22->22/tcp shell-aws 55 | a876af096363 docker4x/guide-aws:aws-v1.13.0-rc1-beta11 "/entry.sh" 13 minutes ago Up 13 minutes guide-aws 56 | e2a74b1daaa0 docker4x/init-aws:aws-v1.13.0-rc1-beta11 "/entry.sh" 13 minutes ago Exited (0) 13 minutes ago fervent_mccarthy 57 | 8a30db519d3d docker4x/meta-aws:aws-v1.13.0-rc1-beta11 "metaserver -flavo..." 14 minutes ago Up 14 minutes 172.31.5.147:9024->8080/tcp meta-aws 58 | ~ $ sudo docker exec -it rexray rexray volume get 59 | - name: "" 60 | volumeid: vol-63fe28f2 61 | availabilityzone: us-east-1d 62 | status: in-use 63 | volumetype: standard 64 | iops: 0 65 | size: "20" 66 | networkname: "" 67 | attachments: 68 | - volumeid: vol-63fe28f2 69 | instanceid: i-b0725f23 70 | devicename: /dev/xvdb 71 | status: attached 72 | - name: "" 73 | volumeid: vol-80fd2b11 74 | availabilityzone: us-east-1d 75 | status: in-use 76 | volumetype: standard 77 | iops: 0 78 | size: "20" 79 | networkname: "" 80 | attachments: 81 | - volumeid: vol-80fd2b11 82 | instanceid: i-0d4c619e 83 | devicename: /dev/xvdb 84 | status: attached 85 | - name: "" 86 | volumeid: vol-b2c62321 87 | availabilityzone: us-east-1b 88 | status: in-use 89 | volumetype: standard 90 | iops: 0 91 | size: "20" 92 | networkname: "" 93 | attachments: 94 | - volumeid: vol-b2c62321 95 | instanceid: i-d5a6eadb 96 | devicename: /dev/xvdb 97 | status: attached 98 | - name: "" 99 | volumeid: vol-ecc6237f 100 | availabilityzone: us-east-1b 101 | status: in-use 102 | volumetype: standard 103 | iops: 0 104 | size: "20" 105 | networkname: "" 106 | attachments: 107 | - volumeid: vol-ecc6237f 108 | instanceid: i-d6a6ead8 109 | devicename: /dev/xvdb 110 | status: attached 111 | ~ $ sudo docker run -ti --rm -v test1:/test busybox touch /test/viktor1 112 | Unable to find image 'busybox:latest' locally 113 | latest: Pulling from library/busybox 114 | 56bec22e3559: Pull complete 115 | Digest: sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 116 | Status: Downloaded newer image for busybox:latest 117 | ``` 118 | -------------------------------------------------------------------------------- /scripts/aws-swarm-creator.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Creating Docker Swarm Environment on AWS" 4 | 5 | echo "Checking if AWS ENV variables are set" 6 | [ -z "$AWS_ACCESS_KEY_ID" ] && echo "Need to set AWS_ACCESS_KEY_ID" && exit 1; 7 | [ -z "$AWS_SECRET_ACCESS_KEY" ] && echo "Need to set AWS_SECRET_ACCESS_KEY" && exit 1; 8 | [ -z "$AWS_DEFAULT_REGION" ] && echo "Need to set AWS_DEFAULT_REGION" && exit 1; 9 | 10 | echo "AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID" 11 | echo "AWS_SECRET_ACCESS_KEY: Not showing you that!" 12 | echo "AWS_DEFAULT_REGION: $AWS_DEFAULT_REGION" 13 | echo "Checking if AWS CLI is installed" 14 | command -v aws >/dev/null 2>&1 || { echo >&2 "I require aws but it's not installed. Aborting."; exit 1; } 15 | echo "IAM Get User" 16 | aws iam get-user 17 | echo "Checking the availability zones in $AWS_DEFAULT_REGION" 18 | AWS_ZONES=($(aws ec2 describe-availability-zones | jq -r ".AvailabilityZones[] | .ZoneName")) 19 | echo ${AWS_ZONES[@]} 20 | NUMBER_OF_ZONES=${#AWS_ZONES[@]} 21 | echo "Enter a swarm name:" 22 | read SWARM_NAME 23 | echo "How many manager nodes? (an odd number): " 24 | read NUMBER_OF_MANAGERS 25 | echo "How many worker nodes?" 26 | read NUMBER_OF_WORKERS 27 | TOTAL_NUMBER_OF_NODES=$(($NUMBER_OF_MANAGERS + $NUMBER_OF_WORKERS)) 28 | 29 | NODE_NUMBER=1 30 | while [ $NODE_NUMBER -le $NUMBER_OF_MANAGERS ]; do 31 | ZONE_INDEX=$(($(($NODE_NUMBER-1))%$NUMBER_OF_ZONES)) 32 | ZONE=${AWS_ZONES[$ZONE_INDEX]: -1: 1} 33 | NODE_NAME="$SWARM_NAME-$NODE_NUMBER" 34 | echo "Creating manager: $NODE_NAME on zone:$ZONE of $NUMBER_OF_ZONES zones in $AWS_DEFAULT_REGION" 35 | docker-machine create \ 36 | --driver amazonec2 \ 37 | --amazonec2-zone $ZONE \ 38 | --amazonec2-tags "Type,manager" \ 39 | --amazonec2-security-group $SWARM_NAME \ 40 | $NODE_NAME 41 | let NODE_NUMBER=NODE_NUMBER+1 42 | done 43 | 44 | while [ $NODE_NUMBER -le $TOTAL_NUMBER_OF_NODES ]; do 45 | ZONE_INDEX=$(($(($NODE_NUMBER-1))%$NUMBER_OF_ZONES)) 46 | ZONE=${AWS_ZONES[$ZONE_INDEX]: -1: 1} 47 | NODE_NAME="$SWARM_NAME-$NODE_NUMBER" 48 | echo "Creating worker: $NODE_NAME on zone:$ZONE of $NUMBER_OF_ZONES zones in $AWS_DEFAULT_REGION" 49 | docker-machine create \ 50 | --driver amazonec2 \ 51 | --amazonec2-zone $ZONE \ 52 | --amazonec2-tags "Type,worker" \ 53 | --amazonec2-security-group $SWARM_NAME \ 54 | $NODE_NAME 55 | let NODE_NUMBER=NODE_NUMBER+1 56 | done 57 | 58 | LEADER_IP=$(aws ec2 describe-instances \ 59 | --filter "Name=tag:Name,Values=$SWARM_NAME-1" \ 60 | "Name=instance-state-name,Values=running" \ 61 | | jq -r ".Reservations[0].Instances[0].PrivateIpAddress") 62 | echo "LEADER_IP: $LEADER_IP" 63 | eval $(docker-machine env $SWARM_NAME-1) 64 | docker swarm init --advertise-addr $LEADER_IP 65 | SECURITY_GROUP_ID=$(aws ec2 describe-security-groups \ 66 | --filter "Name=group-name,Values=$SWARM_NAME" | \ 67 | jq -r '.SecurityGroups[0].GroupId') 68 | echo "SECURITY_GROUP_ID: $SECURITY_GROUP_ID" 69 | echo "Open TCP ingress ports on 2377 7946 and 4789" 70 | for p in 2377 7946 4789; do 71 | aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID \ 72 | --protocol tcp \ 73 | --port $p \ 74 | --source-group $SECURITY_GROUP_ID 75 | done 76 | echo "Open UDP ingress ports on 7946 and 4789" 77 | for p in 7946 4789; do 78 | aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID \ 79 | --protocol udp \ 80 | --port $p \ 81 | --source-group $SECURITY_GROUP_ID 82 | done 83 | MANAGER_TOKEN=$(docker swarm join-token -q manager) 84 | echo "MANAGER_TOKEN: $MANAGER_TOKEN" 85 | WORKER_TOKEN=$(docker swarm join-token -q worker) 86 | echo "WORKER_TOKEN: $WORKER_TOKEN" 87 | 88 | NODE_NUMBER=2 89 | while [ $NODE_NUMBER -le $NUMBER_OF_MANAGERS ]; do 90 | NODE_NAME="$SWARM_NAME-$NODE_NUMBER" 91 | IP=$(aws ec2 describe-instances \ 92 | --filter "Name=tag:Name,Values=$SWARM_NAME" \ 93 | "Name=instance-state-name,Values=running" \ 94 | | jq -r ".Reservations[0].Instances[0].PrivateIpAddress") 95 | echo "IP for $NODE_NAME: $IP" 96 | eval $(docker-machine env $NODE_NAME) 97 | 98 | docker swarm join \ 99 | --token $MANAGER_TOKEN \ 100 | --advertise-addr $IP \ 101 | $LEADER_IP:2377 102 | 103 | let NODE_NUMBER=NODE_NUMBER+1 104 | done 105 | 106 | while [ $NODE_NUMBER -le $TOTAL_NUMBER_OF_NODES ]; do 107 | NODE_NAME="$SWARM_NAME-$NODE_NUMBER" 108 | IP=$(aws ec2 describe-instances \ 109 | --filter "Name=tag:Name,Values=$SWARM_NAME" \ 110 | "Name=instance-state-name,Values=running" \ 111 | | jq -r ".Reservations[0].Instances[0].PrivateIpAddress") 112 | echo "IP for $NODE_NAME: $IP" 113 | eval $(docker-machine env $NODE_NAME) 114 | 115 | docker swarm join \ 116 | --token $WORKER_TOKEN \ 117 | --advertise-addr $IP \ 118 | $LEADER_IP:2377 119 | 120 | let NODE_NUMBER=NODE_NUMBER+1 121 | done 122 | 123 | eval $(docker-machine env $SWARM_NAME-1) 124 | docker node ls -------------------------------------------------------------------------------- /terraform/azure/swarm.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_resource_group" "swarm" { 2 | name = "${var.resource-group}" 3 | location = "West Europe" 4 | } 5 | 6 | resource "azurerm_virtual_network" "swarm" { 7 | name = "drove" 8 | address_space = ["10.0.0.0/16"] 9 | location = "${azurerm_resource_group.swarm.location}" 10 | resource_group_name = "${azurerm_resource_group.swarm.name}" 11 | } 12 | 13 | resource "azurerm_storage_account" "swarm" { 14 | name = "${var.prefix}dockervolumes" 15 | resource_group_name = "${azurerm_resource_group.swarm.name}" 16 | location = "${azurerm_resource_group.swarm.location}" 17 | account_type = "Standard_GRS" 18 | } 19 | 20 | resource "azurerm_subnet" "swarm" { 21 | name = "drovesub" 22 | resource_group_name = "${azurerm_resource_group.swarm.name}" 23 | virtual_network_name = "${azurerm_virtual_network.swarm.name}" 24 | address_prefix = "10.0.3.0/24" 25 | } 26 | 27 | 28 | ############################################## 29 | ############################################## 30 | ######## LOAD BALANCER 31 | ############################################## 32 | ############################################## 33 | resource "azurerm_public_ip" "swarmmanagerlbip" { 34 | name = "${var.prefix}swarmmanagerlbip" 35 | location = "${azurerm_resource_group.swarm.location}" 36 | resource_group_name = "${azurerm_resource_group.swarm.name}" 37 | public_ip_address_allocation = "static" 38 | } 39 | 40 | resource "azurerm_lb" "swarmmanagerlb" { 41 | name = "${var.resource-base-name}loadbalancer" 42 | location = "${azurerm_resource_group.swarm.location}" 43 | resource_group_name = "${azurerm_resource_group.swarm.name}" 44 | 45 | frontend_ip_configuration { 46 | name = "PublicIPAddress" 47 | public_ip_address_id = "${azurerm_public_ip.swarmmanagerlbip.id}" 48 | } 49 | } 50 | 51 | resource "azurerm_lb_probe" "probessh" { 52 | resource_group_name = "${azurerm_resource_group.swarm.name}" 53 | loadbalancer_id = "${azurerm_lb.swarmmanagerlb.id}" 54 | name = "LBProbe22" 55 | port = 22 56 | } 57 | 58 | 59 | resource "azurerm_lb_probe" "probehttps" { 60 | resource_group_name = "${azurerm_resource_group.swarm.name}" 61 | loadbalancer_id = "${azurerm_lb.swarmmanagerlb.id}" 62 | name = "LBProbe443" 63 | port = 443 64 | } 65 | 66 | ## BACKEND POOL VM1 67 | resource "azurerm_lb_backend_address_pool" "manager1" { 68 | resource_group_name = "${azurerm_resource_group.swarm.name}" 69 | loadbalancer_id = "${azurerm_lb.swarmmanagerlb.id}" 70 | name = "LBBackendPoolManager1" 71 | } 72 | 73 | ## BACKEND POOL VM2 74 | resource "azurerm_lb_backend_address_pool" "manager2" { 75 | resource_group_name = "${azurerm_resource_group.swarm.name}" 76 | loadbalancer_id = "${azurerm_lb.swarmmanagerlb.id}" 77 | name = "LBBackendPoolWorker1" 78 | } 79 | 80 | ## BACKEND POOL AVSET 81 | resource "azurerm_lb_backend_address_pool" "avset" { 82 | resource_group_name = "${azurerm_resource_group.swarm.name}" 83 | loadbalancer_id = "${azurerm_lb.swarmmanagerlb.id}" 84 | name = "LBBackendPoolAVSET" 85 | } 86 | 87 | ############################################## 88 | ############################################## 89 | 90 | ############################################## 91 | ############################################## 92 | ######## VM AVAILABILITY SET 93 | ############################################## 94 | ############################################## 95 | 96 | resource "azurerm_availability_set" "avset" { 97 | name = "${var.resource-base-name}avset" 98 | location = "${azurerm_resource_group.swarm.location}" 99 | resource_group_name = "${azurerm_resource_group.swarm.name}" 100 | } 101 | 102 | ############################################## 103 | ############################################## 104 | ############################################## 105 | ############################################## 106 | ########### V M 's ########################### 107 | ## Each VM has: 108 | # Public IP (via NIC) 109 | # NIC 110 | # Storage Account (via S.A. Container) 111 | # Storage Account Container (For VHD) 112 | # Part of a Availability Set 113 | # Docker Extension 114 | 115 | ############################################## 116 | ######## SWARM - MANAGERS 117 | ######## MANAGER1 118 | ############################################## 119 | 120 | # NIC - Network Interface 121 | resource "azurerm_network_interface" "nic-manager1" { 122 | name = "${var.resource-base-name}nicmanager1" 123 | location = "${azurerm_resource_group.swarm.location}" 124 | resource_group_name = "${azurerm_resource_group.swarm.name}" 125 | 126 | ip_configuration { 127 | name = "ipconfig" 128 | subnet_id = "${azurerm_subnet.swarm.id}" 129 | private_ip_address_allocation = "dynamic" 130 | } 131 | } 132 | 133 | # Storage account 134 | resource "azurerm_storage_account" "sa-manager1" { 135 | name = "${var.prefix}samanager1" 136 | resource_group_name = "${azurerm_resource_group.swarm.name}" 137 | location = "${azurerm_resource_group.swarm.location}" 138 | account_type = "Standard_GRS" 139 | } 140 | 141 | # Storage Account Container 142 | resource "azurerm_storage_container" "sc-manager1" { 143 | name = "vhds" 144 | resource_group_name = "${azurerm_resource_group.swarm.name}" 145 | storage_account_name = "${azurerm_storage_account.sa-manager1.name}" 146 | container_access_type = "private" 147 | } 148 | 149 | # VM 150 | resource "azurerm_virtual_machine" "vm-manager1" { 151 | name = "vm-manager1" 152 | location = "${azurerm_resource_group.swarm.location}" 153 | resource_group_name = "${azurerm_resource_group.swarm.name}" 154 | network_interface_ids = ["${azurerm_network_interface.nic-manager1.id}"] 155 | vm_size = "${var.vm_size_manager}" 156 | availability_set_id = "${azurerm_availability_set.avset.id}" 157 | 158 | storage_image_reference { 159 | publisher = "Canonical" 160 | offer = "UbuntuServer" 161 | sku = "16.04-LTS" 162 | version = "latest" 163 | } 164 | 165 | storage_os_disk { 166 | name = "myosdisk1" 167 | vhd_uri = "${azurerm_storage_account.sa-manager1.primary_blob_endpoint}${azurerm_storage_container.sc-manager1.name}/myosdisk1.vhd" 168 | caching = "ReadWrite" 169 | create_option = "FromImage" 170 | } 171 | 172 | os_profile { 173 | computer_name = "manager1" 174 | admin_username = "${var.admin}" 175 | admin_password = "fRLCuek67V1g" 176 | } 177 | 178 | os_profile_linux_config { 179 | disable_password_authentication = true 180 | 181 | ssh_keys { 182 | path = "/home/${var.admin}/.ssh/authorized_keys" 183 | key_data = "${var.ssh}" 184 | } 185 | } 186 | } 187 | 188 | ## VM Extension 189 | resource "azurerm_virtual_machine_extension" "vm-manager1-ext" { 190 | name = "hostname" 191 | location = "${azurerm_resource_group.swarm.location}" 192 | resource_group_name = "${azurerm_resource_group.swarm.name}" 193 | virtual_machine_name = "${azurerm_virtual_machine.vm-manager1.name}" 194 | publisher = "Microsoft.Azure.Extensions" 195 | type = "DockerExtension" 196 | type_handler_version = "1.0" 197 | auto_upgrade_minor_version = true 198 | } 199 | 200 | ############################################## 201 | ############################################## 202 | ######## SWARM - WORKERS 203 | ######## WORKER1 204 | ############################################## 205 | 206 | # NIC - Network Interface 207 | resource "azurerm_network_interface" "nic-worker1" { 208 | name = "${var.resource-base-name}nic${var.worker-base-name}1" 209 | location = "${azurerm_resource_group.swarm.location}" 210 | resource_group_name = "${azurerm_resource_group.swarm.name}" 211 | 212 | ip_configuration { 213 | name = "ipconfig" 214 | subnet_id = "${azurerm_subnet.swarm.id}" 215 | private_ip_address_allocation = "dynamic" 216 | } 217 | } 218 | 219 | # Storage account 220 | resource "azurerm_storage_account" "sa-worker1" { 221 | name = "${var.prefix}${var.resource-base-name}sa${var.worker-base-name}1" 222 | resource_group_name = "${azurerm_resource_group.swarm.name}" 223 | location = "${azurerm_resource_group.swarm.location}" 224 | account_type = "Standard_GRS" 225 | } 226 | 227 | # Storage Account Container 228 | resource "azurerm_storage_container" "sc-worker1" { 229 | name = "vhds" 230 | resource_group_name = "${azurerm_resource_group.swarm.name}" 231 | storage_account_name = "${azurerm_storage_account.sa-worker1.name}" 232 | container_access_type = "private" 233 | } 234 | 235 | # VM 236 | resource "azurerm_virtual_machine" "vm-worker1" { 237 | name = "${var.resource-base-name}vm${var.worker-base-name}1" 238 | location = "${azurerm_resource_group.swarm.location}" 239 | resource_group_name = "${azurerm_resource_group.swarm.name}" 240 | network_interface_ids = ["${azurerm_network_interface.nic-worker1.id}"] 241 | vm_size = "${var.vm_size_worker}" 242 | availability_set_id = "${azurerm_availability_set.avset.id}" 243 | 244 | storage_image_reference { 245 | publisher = "Canonical" 246 | offer = "UbuntuServer" 247 | sku = "16.04-LTS" 248 | version = "latest" 249 | } 250 | 251 | storage_os_disk { 252 | name = "myosdisk1" 253 | vhd_uri = "${azurerm_storage_account.sa-worker1.primary_blob_endpoint}${azurerm_storage_container.sc-worker1.name}/myosdisk1.vhd" 254 | caching = "ReadWrite" 255 | create_option = "FromImage" 256 | } 257 | 258 | os_profile { 259 | computer_name = "${var.worker-base-name}1" 260 | admin_username = "${var.admin}" 261 | admin_password = "fRLCuek67V1g" 262 | } 263 | 264 | os_profile_linux_config { 265 | disable_password_authentication = true 266 | 267 | ssh_keys { 268 | path = "/home/${var.admin}/.ssh/authorized_keys" 269 | key_data = "${var.ssh}" 270 | } 271 | } 272 | } 273 | 274 | ## VM Extension 275 | resource "azurerm_virtual_machine_extension" "vm-worker1-ext" { 276 | name = "hostname" 277 | location = "${azurerm_resource_group.swarm.location}" 278 | resource_group_name = "${azurerm_resource_group.swarm.name}" 279 | virtual_machine_name = "${azurerm_virtual_machine.vm-worker1.name}" 280 | publisher = "Microsoft.Azure.Extensions" 281 | type = "DockerExtension" 282 | type_handler_version = "1.0" 283 | auto_upgrade_minor_version = true 284 | } 285 | 286 | ############################################## 287 | ######## WORKER2 288 | ############################################## 289 | 290 | # NIC - Network Interface 291 | resource "azurerm_network_interface" "nic-worker2" { 292 | name = "${var.resource-base-name}nic${var.worker-base-name}2" 293 | location = "${azurerm_resource_group.swarm.location}" 294 | resource_group_name = "${azurerm_resource_group.swarm.name}" 295 | 296 | ip_configuration { 297 | name = "ipconfig" 298 | subnet_id = "${azurerm_subnet.swarm.id}" 299 | private_ip_address_allocation = "dynamic" 300 | } 301 | } 302 | 303 | # Storage account 304 | resource "azurerm_storage_account" "sa-worker2" { 305 | name = "${var.prefix}${var.resource-base-name}sa${var.worker-base-name}2" 306 | resource_group_name = "${azurerm_resource_group.swarm.name}" 307 | location = "${azurerm_resource_group.swarm.location}" 308 | account_type = "Standard_GRS" 309 | } 310 | 311 | # Storage Account Container 312 | resource "azurerm_storage_container" "sc-worker2" { 313 | name = "vhds" 314 | resource_group_name = "${azurerm_resource_group.swarm.name}" 315 | storage_account_name = "${azurerm_storage_account.sa-worker2.name}" 316 | container_access_type = "private" 317 | } 318 | 319 | # VM 320 | resource "azurerm_virtual_machine" "vm-worker2" { 321 | name = "${var.resource-base-name}vm${var.worker-base-name}2" 322 | location = "${azurerm_resource_group.swarm.location}" 323 | resource_group_name = "${azurerm_resource_group.swarm.name}" 324 | network_interface_ids = ["${azurerm_network_interface.nic-worker2.id}"] 325 | vm_size = "${var.vm_size_worker}" 326 | availability_set_id = "${azurerm_availability_set.avset.id}" 327 | 328 | storage_image_reference { 329 | publisher = "Canonical" 330 | offer = "UbuntuServer" 331 | sku = "16.04-LTS" 332 | version = "latest" 333 | } 334 | 335 | storage_os_disk { 336 | name = "myosdisk1" 337 | vhd_uri = "${azurerm_storage_account.sa-worker2.primary_blob_endpoint}${azurerm_storage_container.sc-worker2.name}/myosdisk1.vhd" 338 | caching = "ReadWrite" 339 | create_option = "FromImage" 340 | } 341 | 342 | os_profile { 343 | computer_name = "${var.worker-base-name}2" 344 | admin_username = "${var.admin}" 345 | admin_password = "fRLCuek67V1g" 346 | } 347 | 348 | os_profile_linux_config { 349 | disable_password_authentication = true 350 | 351 | ssh_keys { 352 | path = "/home/${var.admin}/.ssh/authorized_keys" 353 | key_data = "${var.ssh}" 354 | } 355 | } 356 | } 357 | 358 | ## VM Extension 359 | resource "azurerm_virtual_machine_extension" "vm-worker2-ext" { 360 | name = "hostname" 361 | location = "${azurerm_resource_group.swarm.location}" 362 | resource_group_name = "${azurerm_resource_group.swarm.name}" 363 | virtual_machine_name = "${azurerm_virtual_machine.vm-worker2.name}" 364 | publisher = "Microsoft.Azure.Extensions" 365 | type = "DockerExtension" 366 | type_handler_version = "1.0" 367 | auto_upgrade_minor_version = true 368 | } 369 | 370 | ############################################## 371 | ############################################## 372 | 373 | --------------------------------------------------------------------------------