├── .gitignore
├── .travis.yml
├── LICENSE.md
├── README.md
├── Vagrantfile
├── ansible
├── ansible.cfg
├── group_vars
│ └── all
├── host_vars
│ ├── 10.100.192.200
│ ├── 10.100.192.201
│ ├── 10.100.192.202
│ └── 10.100.198.200
├── hosts
│ └── prod
├── proxy.yml
├── roles
│ ├── common
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── consul
│ │ └── tasks
│ │ │ └── main.yml
│ ├── docker-compose
│ │ └── tasks
│ │ │ └── main.yml
│ ├── docker-flow
│ │ └── tasks
│ │ │ └── main.yml
│ ├── docker
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── docker.cfg
│ ├── java
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jenkins
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── credentials.xml
│ │ │ └── docker-node-config.xml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ ├── deploy-to-swarm-with-proxy-config.xml
│ │ │ ├── deploy-to-swarm-with-proxy.groovy
│ │ │ ├── deploy-to-swarm-without-proxy-config.xml
│ │ │ ├── deploy-to-swarm-without-proxy.groovy
│ │ │ ├── scale-to-swarm-with-proxy-config.xml
│ │ │ └── scale-to-swarm-with-proxy.groovy
│ ├── registrator
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ └── swarm
│ │ └── tasks
│ │ └── main.yml
└── swarm.yml
├── articles
├── article.md
├── deployment-scaling.md
├── img
│ ├── base-architecture.png
│ ├── deployment-without-proxy-flow.png
│ ├── deployment-without-proxy-user.png
│ ├── first-deployment-flow.png
│ ├── first-deployment-user.png
│ ├── proxy-flow.png
│ ├── proxy-user.png
│ ├── scaling-flow.png
│ ├── scaling-user.png
│ ├── second-deployment-flow.png
│ ├── second-deployment-user-after.png
│ ├── second-deployment-user-before.png
│ └── vagrant-sample.png
├── proxy.md
└── templates.md
├── compose
├── docker_compose.go
└── docker_compose_test.go
├── consul.go
├── consul_test.go
├── docker-compose-setup.yml
├── docker-compose.yml
├── docker-flow.yml
├── flow.go
├── flow_test.go
├── ha_proxy.go
├── ha_proxy_test.go
├── integration_test.go
├── main.go
├── main_test.go
├── mocks_test.go
├── opts.go
├── opts_test.go
├── proxy.go
├── proxy_test.go
├── scripts
└── bootstrap_ansible.sh
├── service_discovery.go
├── service_discovery_test.go
├── setup.sh
├── something.md
├── test_configs
└── tmpl
│ ├── go-demo-app-be.tmpl
│ └── go-demo-app-fe.tmpl
└── util
└── util.go
/.gitignore:
--------------------------------------------------------------------------------
1 | /.idea
2 | /*.iml
3 | /docker-flow
4 | /*.out
5 | /docker-compose-flow.yml.tmp
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | env:
2 | - VERSION=1.0.2
3 |
4 | language: go
5 |
6 | before_install:
7 | - go get github.com/mitchellh/gox
8 | - gox -build-toolchain
9 | - go get github.com/tcnksm/ghr
10 | script:
11 | - go test -v ./...
12 | after_success:
13 | - '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && gox -output "dist/{{.Dir}}_{{.OS}}_{{.Arch}}" || false'
14 | - '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && ghr --username vfarcic --token $GITHUB_TOKEN --replace --debug v${VERSION} dist/ || false'
15 |
16 | branches:
17 | only:
18 | - master
19 |
20 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Viktor Farcic
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deprecated
2 |
3 | Since the emergence of Docker Swarm Mode, this project is deprecated. If you're interested in tools that can help you orchestrate a Swarm cluster, please visit the [Docker Flow Proxy](https://github.com/vfarcic/docker-flow-proxy) project.
4 |
--------------------------------------------------------------------------------
/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 | if (/cygwin|mswin|mingw|bccwin|wince|emx/ =~ RUBY_PLATFORM) != nil
6 | config.vm.synced_folder ".", "/vagrant", mount_options: ["dmode=700,fmode=600"]
7 | else
8 | config.vm.synced_folder ".", "/vagrant"
9 | end
10 | config.vm.define "proxy" do |d|
11 | d.vm.box = "ubuntu/trusty64"
12 | d.vm.hostname = "proxy"
13 | d.vm.network "private_network", ip: "10.100.198.200"
14 | d.vm.provision :shell, path: "scripts/bootstrap_ansible.sh"
15 | d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /vagrant/ansible/proxy.yml -i /vagrant/ansible/hosts/prod"
16 | d.vm.provision :shell, inline: "PYTHONUNBUFFERED=1 ansible-playbook /vagrant/ansible/swarm.yml -i /vagrant/ansible/hosts/prod"
17 | d.vm.provider "virtualbox" do |v|
18 | v.memory = 1024
19 | v.linked_clone = true if Vagrant::VERSION =~ /^1.8/
20 | end
21 | end
22 | config.vm.define "master" do |d|
23 | d.vm.box = "ubuntu/trusty64"
24 | d.vm.hostname = "master"
25 | d.vm.network "private_network", ip: "10.100.192.200"
26 | d.vm.provider "virtualbox" do |v|
27 | v.memory = 1024
28 | v.linked_clone = true if Vagrant::VERSION =~ /^1.8/
29 | end
30 | end
31 | (1..2).each do |i|
32 | config.vm.define "node-#{i}" do |d|
33 | d.vm.box = "ubuntu/trusty64"
34 | d.vm.hostname = "node-#{i}"
35 | d.vm.network "private_network", ip: "10.100.192.20#{i}"
36 | d.vm.provider "virtualbox" do |v|
37 | v.memory = 1024
38 | v.linked_clone = true if Vagrant::VERSION =~ /^1.8/
39 | end
40 | end
41 | end
42 | if Vagrant.has_plugin?("vagrant-cachier")
43 | config.cache.scope = :box
44 | end
45 | end
--------------------------------------------------------------------------------
/ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | host_key_checking=False
3 |
4 | [privilege_escalation]
5 | become=True
6 | become_method=sudo
7 | become_user=root
8 |
--------------------------------------------------------------------------------
/ansible/group_vars/all:
--------------------------------------------------------------------------------
1 | consul_ip: 10.100.198.200
2 | proxy_ip: 10.100.198.200
3 |
--------------------------------------------------------------------------------
/ansible/host_vars/10.100.192.200:
--------------------------------------------------------------------------------
1 | ansible_ssh_private_key_file: /vagrant/.vagrant/machines/master/virtualbox/private_key
2 | swarm_master: true
3 | docker_extra: "--cluster-store=consul://{{ consul_ip }}:8500/swarm --cluster-advertise={{ facter_ipaddress_eth1 }}:2375"
4 |
--------------------------------------------------------------------------------
/ansible/host_vars/10.100.192.201:
--------------------------------------------------------------------------------
1 | ansible_ssh_private_key_file: /vagrant/.vagrant/machines/node-1/virtualbox/private_key
2 | swarm_master_ip: 10.100.192.200
3 | docker_extra: "-H tcp://0.0.0.0:2375 --cluster-store=consul://{{ consul_ip }}:8500/swarm --cluster-advertise={{ facter_ipaddress_eth1 }}:2375"
4 |
5 |
--------------------------------------------------------------------------------
/ansible/host_vars/10.100.192.202:
--------------------------------------------------------------------------------
1 | ansible_ssh_private_key_file: /vagrant/.vagrant/machines/node-2/virtualbox/private_key
2 | swarm_master_ip: 10.100.192.200
3 | docker_extra: "-H tcp://0.0.0.0:2375 --cluster-store=consul://{{ consul_ip }}:8500/swarm --cluster-advertise={{ facter_ipaddress_eth1 }}:2375"
--------------------------------------------------------------------------------
/ansible/host_vars/10.100.198.200:
--------------------------------------------------------------------------------
1 | ansible_ssh_private_key_file: /vagrant/.vagrant/machines/proxy/virtualbox/private_key
2 |
--------------------------------------------------------------------------------
/ansible/hosts/prod:
--------------------------------------------------------------------------------
1 | [proxy]
2 | 10.100.198.200
3 |
4 | [swarm]
5 | 10.100.192.20[0:2]
6 |
--------------------------------------------------------------------------------
/ansible/proxy.yml:
--------------------------------------------------------------------------------
1 | - hosts: proxy
2 | remote_user: vagrant
3 | serial: 1
4 | roles:
5 | - common
6 | - docker
7 | - docker-compose
8 | - consul
9 | - docker-flow
10 |
--------------------------------------------------------------------------------
/ansible/roles/common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | hosts: [
2 | { host_ip: "10.100.198.200", host_name: "proxy"},
3 | { host_ip: "10.100.198.200", host_name: "consul"},
4 | { host_ip: "10.100.192.200", host_name: "master"},
5 | { host_ip: "10.100.192.201", host_name: "node-1"},
6 | { host_ip: "10.100.192.202", host_name: "node-2"},
7 | ]
8 |
--------------------------------------------------------------------------------
/ansible/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: JQ is present
2 | apt:
3 | name: jq
4 | force: yes
5 |
6 | - name: Host is present
7 | lineinfile:
8 | dest: /etc/hosts
9 | regexp: "^{{ item.host_ip }} {{ item.host_name }}"
10 | line: "{{ item.host_ip }} {{ item.host_name }}"
11 | with_items: "{{ hosts }}"
12 |
--------------------------------------------------------------------------------
/ansible/roles/consul/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Container is running
2 | docker:
3 | name: consul
4 | image: progrium/consul
5 | state: running
6 | ports:
7 | - 8500:8500
8 | - 8301:8301
9 | - 8300:8300
10 | hostname: "{{ ansible_hostname }}"
11 | command: -advertise {{ facter_ipaddress_eth1 }} -server -bootstrap
12 |
--------------------------------------------------------------------------------
/ansible/roles/docker-compose/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Executable is present
2 | get_url:
3 | url: https://github.com/docker/compose/releases/download/1.6.2/docker-compose-Linux-x86_64
4 | dest: /usr/local/bin/docker-compose
5 | mode: 0755
6 |
--------------------------------------------------------------------------------
/ansible/roles/docker-flow/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Executable is present
2 | get_url:
3 | url: https://github.com/vfarcic/docker-flow/releases/download/v1.0.2/docker-flow_linux_amd64
4 | dest: /usr/local/bin/docker-flow
5 | mode: 0755
6 |
7 | - name: Sample sample source code is present
8 | file:
9 | path: /books-ms
10 | state: directory
11 | owner: vagrant
12 |
13 | - name: Sample docker-compose.yml is present
14 | get_url:
15 | url: "https://raw.githubusercontent.com/vfarcic/docker-flow/master/{{ item }}"
16 | dest: "/books-ms/{{ item }}"
17 | mode: 0644
18 | with_items:
19 | - docker-compose.yml
20 | - docker-flow.yml
21 |
--------------------------------------------------------------------------------
/ansible/roles/docker/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_extra: "-H tcp://0.0.0.0:2375"
--------------------------------------------------------------------------------
/ansible/roles/docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Debian add Docker repository and update apt cache
2 | apt_repository:
3 | repo: deb https://apt.dockerproject.org/repo ubuntu-trusty main
4 | update_cache: yes
5 | state: present
6 |
7 | - name: Debian Docker is present
8 | apt:
9 | name: docker-engine
10 | state: latest
11 | force: yes
12 |
13 | - name: Debian python-pip is present
14 | apt: name=python-pip state=present
15 |
16 | - name: Debian docker-py is present
17 | pip:
18 | name: docker-py
19 | version: 1.6.0
20 | state: present
21 |
22 | - name: Debian files are present
23 | template:
24 | src: docker.cfg
25 | dest: /etc/default/docker
26 | register: copy_result
27 |
28 | - name: vagrant user is added to the docker group
29 | user:
30 | name: vagrant
31 | group: docker
32 | register: user_result
33 |
34 | - name: Debian Docker service is restarted
35 | service:
36 | name: docker
37 | state: restarted
38 | when: copy_result|changed or user_result|changed
39 |
--------------------------------------------------------------------------------
/ansible/roles/docker/templates/docker.cfg:
--------------------------------------------------------------------------------
1 | DOCKER_OPTS="$DOCKER_OPTS -H unix:///var/run/docker.sock {{ docker_extra }}"
--------------------------------------------------------------------------------
/ansible/roles/java/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Package is present
2 | apt:
3 | name=openjdk-7-jdk
4 | state=present
5 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/defaults/main.yml:
--------------------------------------------------------------------------------
1 | home: /data/jenkins
2 | main_job_src: service-workflow-config.xml
3 | jenkins_port: 8080
4 |
5 | configs: [
6 | { src: "credentials.xml", dir: "/data/jenkins", file: "credentials.xml", mode: "0777" },
7 | { src: "docker-node-config.xml", dir: "/data/jenkins/nodes/docker", file: "config.xml", mode: "0777" },
8 | ]
9 |
10 | jobs:
11 | - "deploy-to-swarm-without-proxy"
12 | - "deploy-to-swarm-with-proxy"
13 | - "scale-to-swarm-with-proxy"
14 |
15 | plugins:
16 | - git
17 | - workflow-aggregator
18 | - workflow-multibranch
19 | - docker-workflow
20 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/files/credentials.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | GLOBAL
11 | 0f0d9da8-c1e9-40af-b0b5-13d75e58401e
12 | docker
13 | vagrant
14 | QhhhgZqgyTx7+8aHwNr84Q==
15 |
16 | /machines/jenkins/virtualbox/private_key
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/files/docker-node-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | cd
4 |
5 | /data/jenkins_slaves/cd
6 | 2
7 | NORMAL
8 |
9 |
10 | 10.100.199.200
11 | 22
12 | 0f0d9da8-c1e9-40af-b0b5-13d75e58401e
13 | 0
14 | 0
15 |
16 |
17 |
18 | anonymous
19 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Directories are created
2 | file:
3 | path: "{{ item.dir }}"
4 | mode: 0777
5 | recurse: yes
6 | state: directory
7 | with_items: "{{ configs }}"
8 |
9 | - name: Container is running
10 | docker:
11 | name: jenkins
12 | image: jenkins
13 | ports: "{{ jenkins_port }}:8080"
14 | volumes:
15 | - /data/jenkins:/var/jenkins_home
16 | - /vagrant/.vagrant/machines:/machines
17 | register: container_result
18 |
19 | - pause: seconds=60
20 | when: container_result|changed
21 |
22 | - name: Configurations are present
23 | copy:
24 | src: "{{ item.src }}"
25 | dest: "{{ item.dir }}/{{ item.file }}"
26 | mode: "{{ item.mode }}"
27 | with_items: "{{ configs }}"
28 | register: configs_result
29 |
30 | - name: Plugins are installed
31 | shell: "curl -X POST \
32 | -d '' \
33 | --header 'Content-Type: text/xml' \
34 | http://{{ facter_ipaddress_eth1 }}:{{ jenkins_port }}/pluginManager/installNecessaryPlugins"
35 | args:
36 | creates: /data/jenkins/plugins/{{ item }}
37 | with_items: "{{ plugins }}"
38 | register: plugins_result
39 |
40 | - wait_for:
41 | path: /data/jenkins/plugins/{{ item }}
42 | with_items: "{{ plugins }}"
43 |
44 | # Workaround to fix the bug in restarting containers from Ansible 2
45 |
46 | - name: Container is stopped
47 | docker:
48 | name: jenkins
49 | image: jenkins
50 | state: stopped
51 | when: configs_result|changed or plugins_result|changed
52 |
53 | - name: Container is running
54 | docker:
55 | name: jenkins
56 | image: jenkins
57 | ports: "{{ jenkins_port }}:8080"
58 | volumes:
59 | - /data/jenkins:/var/jenkins_home
60 | - /vagrant/.vagrant/machines:/machines
61 | when: configs_result|changed or plugins_result|changed
62 |
63 | - pause: seconds=30
64 | when: configs_result|changed or plugins_result|changed
65 |
66 | - name: Job directories are present
67 | file:
68 | path: "{{ home }}/jobs/{{ item }}"
69 | state: directory
70 | mode: 0777
71 | with_items: "{{ jobs }}"
72 |
73 | - name: Jobs are present
74 | template:
75 | src: "{{ item }}-config.xml"
76 | dest: "{{ home }}/jobs/{{ item }}/config.xml"
77 | mode: 0777
78 | with_items: jobs
79 | register: jobs_result
80 |
81 | - name: Jenkins is reloaded
82 | uri:
83 | url: http://{{ facter_ipaddress_eth1 }}:8080/reload
84 | method: POST
85 | status_code: 200,302
86 | when: jobs_result|changed
87 | ignore_errors: yes
88 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/templates/deploy-to-swarm-with-proxy-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | false
6 |
7 |
8 |
9 | -1
10 | 25
11 | -1
12 | -1
13 |
14 |
15 |
16 |
17 |
18 | false
19 |
20 |
21 | false
22 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/templates/deploy-to-swarm-with-proxy.groovy:
--------------------------------------------------------------------------------
1 | node("docker") {
2 | git "https://github.com/vfarcic/docker-flow-proxy.git"
3 | withEnv(["DOCKER_HOST=tcp://10.100.192.200:2375"]) {
4 | sh "docker-compose -p books-ms -f docker-compose-demo.yml up -d"
5 | }
6 | sh "curl \"proxy:8081/v1/docker-flow-proxy/reconfigure?serviceName=books-ms&servicePath=/api/v1/books\""
7 | }
--------------------------------------------------------------------------------
/ansible/roles/jenkins/templates/deploy-to-swarm-without-proxy-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | false
6 |
7 |
8 |
9 | -1
10 | 25
11 | -1
12 | -1
13 |
14 |
15 |
16 |
17 |
18 | false
19 |
20 |
21 | false
22 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/templates/deploy-to-swarm-without-proxy.groovy:
--------------------------------------------------------------------------------
1 | node("docker") {
2 | git "https://github.com/vfarcic/docker-flow-proxy.git"
3 | withEnv(["DOCKER_HOST=tcp://10.100.192.200:2375"]) {
4 | sh "docker-compose -p books-ms -f docker-compose-demo.yml up -d"
5 | }
6 | }
--------------------------------------------------------------------------------
/ansible/roles/jenkins/templates/scale-to-swarm-with-proxy-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | false
6 |
7 |
8 |
9 | -1
10 | 25
11 | -1
12 | -1
13 |
14 |
15 |
16 |
17 |
18 | SCALE
19 |
20 | 1
21 |
22 |
23 |
24 |
25 |
26 |
27 | false
28 |
29 |
30 | false
31 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/templates/scale-to-swarm-with-proxy.groovy:
--------------------------------------------------------------------------------
1 | node("docker") {
2 | git "https://github.com/vfarcic/docker-flow-proxy.git"
3 | withEnv(["DOCKER_HOST=tcp://10.100.192.200:2375"]) {
4 | sh "docker-compose -p books-ms -f docker-compose-demo.yml scale app=${SCALE}"
5 | }
6 | sh "curl \"proxy:8081/v1/docker-flow-proxy/reconfigure?serviceName=books-ms&servicePath=/api/v1/books\""
7 | }
--------------------------------------------------------------------------------
/ansible/roles/registrator/defaults/main.yml:
--------------------------------------------------------------------------------
1 | registrator_name: registrator
2 | registrator_protocol: consul
3 | registrator_port: 8500
4 |
--------------------------------------------------------------------------------
/ansible/roles/registrator/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Container is running
2 | docker:
3 | name: registrator
4 | image: gliderlabs/registrator
5 | volumes:
6 | - /var/run/docker.sock:/tmp/docker.sock
7 | hostname: "{{ ansible_hostname }}"
8 | command: -ip {{ facter_ipaddress_eth1 }} consul://{{ consul_ip }}:8500
9 |
--------------------------------------------------------------------------------
/ansible/roles/swarm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Swarm node is running
2 | docker:
3 | name: swarm-node
4 | image: swarm
5 | command: join --advertise={{ facter_ipaddress_eth1 }}:2375 consul://{{ consul_ip }}:8500/swarm
6 | env:
7 | SERVICE_NAME: swarm-node
8 | when: not swarm_master is defined
9 | tags: [swarm]
10 |
11 | - name: Swarm master is running
12 | docker:
13 | name: swarm-master
14 | image: swarm
15 | ports: 2375:2375
16 | command: manage consul://{{ consul_ip }}:8500/swarm
17 | env:
18 | SERVICE_NAME: swarm-master
19 | when: swarm_master is defined
20 | tags: [swarm]
21 |
--------------------------------------------------------------------------------
/ansible/swarm.yml:
--------------------------------------------------------------------------------
1 | - hosts: swarm
2 | remote_user: vagrant
3 | serial: 1
4 | roles:
5 | - common
6 | - docker
7 | - registrator
8 | - swarm
9 |
--------------------------------------------------------------------------------
/articles/deployment-scaling.md:
--------------------------------------------------------------------------------
1 | Since the first time I laid my hands on Docker, I started writing scripts that I've been running as my continuous deployment flow. I ended up with Shell scripts, Ansible playbooks, Chef cookbooks, Jenkins Pipelines, and so on. Each of those had a similar (not to say the same) objective inside a different context. I realized that was a huge waste of time and decided to create a single executable that I'll be able to run no matter the tool I'm using to execute the continuous deployment pipeline. The result is a birth of the [Docker Flow](https://github.com/vfarcic/docker-flow) project.
2 |
3 | Features
4 | ========
5 |
6 | The goal of the project is to add features and processes that are currently missing inside the Docker ecosystem. The project is in its infancy and, at the moment, solves only the problems of *blue-green deployments* and *relative scaling*. Many additional features will be added very soon.
7 |
8 | I'll restrain myself from explaining *blue-green deployment* since I already wrote quite a few articles on this subject. If you are not familiar with it, please read the [Blue-Green Deployment](http://technologyconversations.com/2016/02/08/blue-green-deployment/) post. For a more hands-on example with [Jenkins Pipeline](https://wiki.jenkins-ci.org/display/JENKINS/Pipeline+Plugin), please read the [Blue-Green Deployment To Docker Swarm with Jenkins Workflow Plugin](http://technologyconversations.com/2015/12/08/blue-green-deployment-to-docker-swarm-with-jenkins-workflow-plugin/) post. Finally, for another practical example in a much broader context (and without Jenkins), please consult the [Scaling To Infinity with Docker Swarm, Docker Compose and Consul](http://technologyconversations.com/2015/07/02/scaling-to-infinity-with-docker-swarm-docker-compose-and-consul-part-14-a-taste-of-what-is-to-come/) series.
9 |
10 | The second feature I wanted to present is *relative scaling*. Docker Compose makes it very easy to scale services to a fixed number. We can specify how many instances of a container we want to run and watch the magic unfold. When combined with Docker Swarm, the result is an easy way to manage containers inside a cluster. Depending on how many instances are already running, Docker Compose will increase (or decrease) the number of running containers so that the desired number of instances is achieved. However, Docker Compose always expects a fixed number as the parameter. I found this very limiting when dealing with production deployments. In many cases, I do not want to need to know how many instances are already running but, simply, send a signal to increase (or decrease) the capacity by some factor. For example, we might have an increase in traffic and want to increase the capacity by three instances. Similarly, if the demand for some service decreases, we might want the number of running instances to decrease as well and, in that way, free resources for other services and processes. This necessity is even more evident when we move towards autonomous and automated [Self-Healing Systems](http://technologyconversations.com/2016/01/26/self-healing-systems/) where human interactions are reduced to a minimum.
11 |
12 | Running Docker Flow
13 | ===================
14 |
15 | **Docker Flow** requirements are [Docker Engine](https://www.docker.com/products/docker-engine), [Docker Compose](https://www.docker.com/products/docker-compose), and [Consul](https://www.consul.io/). The idea behind the project is not to substitute any Docker functionality but to provide additional features. It assumes that containers are defined in a docker-compose.yml file (path can be changed) and that Consul is used as the service registry (soon to be extended to etcd and Zookeeper as well).
16 |
17 | The examples that follow will setup an environment with Docker Engine and Consul, and will assume that you already have the [Docker Toolbox](https://www.docker.com/products/docker-toolbox) installed. Even though the examples will be run through the *Docker Toolbox Terminal*, feel free to apply them to your existing setup. You can execute them on any of your Docker servers or, even better, inside a Docker Swarm cluster.
18 |
19 | Let's start setting up the environment we'll use throughout this article. Please launch the *Docker Toolbox Terminal* and clone the project repository.
20 |
21 | ```bash
22 | git clone https://github.com/vfarcic/docker-flow
23 |
24 | cd docker-flow
25 | ```
26 |
27 | The next step is to download the latest *Docker Flow* release. Please open the [Docker Flow Releases](https://github.com/vfarcic/docker-flow/releases/latest) page, download the release that matches your OS, rename it to docker-flow, and make sure that it has execute permissions (i.e. `chmod +x docker-flow`). The rest of the article will assume that the binary is in the *docker-flow* directory created when we cloned the repository. If, later on, you choose to use it, please place it to one of the directories included in your *PATH*.
28 |
29 | Before we proceed, let's take a look at the *docker-compose.yml* file we'll use.
30 |
31 | ```yml
32 | version: '2'
33 |
34 | services:
35 | app:
36 | image: vfarcic/books-ms${BOOKS_MS_VERSION}
37 | ports:
38 | - 8080
39 | environment:
40 | - SERVICE_NAME=books-ms
41 | - DB_HOST=books-ms-db
42 |
43 | db:
44 | container_name: books-ms-db
45 | image: mongo
46 | environment:
47 | - SERVICE_NAME=books-ms-db
48 | ```
49 |
50 | As you can see, it does not contain anything special. It has two targets. The *app* target defines the main container of the server. The *db* is a "side" target required by the *app*. Since the version is *2*, Docker Compose will utilize one of its new features and create a network around those targets allowing containers to communicate with each others (handy if deployed to a cluster). Finally, the *app* image uses the *BOOKS_MS_VERSION* environment variable which will enable us to simulate multiple releases. I assume that you already used Docker Compose and that there is no reason to go into more details.
51 |
52 | We'll use *docker-machine* to create a VM that will simulate our production environment.
53 |
54 | ```bash
55 | docker-machine create \
56 | -d virtualbox \
57 | docker-flow
58 |
59 | eval "$(docker-machine env docker-flow)"
60 | ```
61 |
62 | *Docker Flow* needs to store the state of the containers it is deploying, so I made a choice to use *Consul* in this first iteration of the project. The other registries (*etcd*, *Zookeeper*, and whichever other someone requests) will follow soon.
63 |
64 | Let's bring up an instance.
65 |
66 | ```bash
67 | docker run -d \
68 | -p "8500:8500" \
69 | -h "consul" \
70 | --name "consul" \
71 | progrium/consul -server -bootstrap
72 |
73 | export CONSUL_IP=$(docker-machine ip docker-flow)
74 | ```
75 |
76 | Now we are ready to start deploying the *books-ms* service.
77 |
78 | Deployment With Downtime
79 | ------------------------
80 |
81 | Before we dive into *Docker Flow* features, let's see how does the deployment work with Docker Compose.
82 |
83 | We'll start by deploying the release *1.0*.
84 |
85 | ```bash
86 | BOOKS_MS_VERSION=:1.0 docker-compose up -d app db
87 |
88 | docker-compose ps
89 | ```
90 |
91 | The output of the `docker-compose ps` command is as follows.
92 |
93 | ```
94 | Name Command State Ports
95 | --------------------------------------------------------------------------
96 | books-ms-db /entrypoint.sh mongod Up 27017/tcp
97 | dockerflow_app_1 /run.sh Up 0.0.0.0:32771->8080/tcp
98 | ```
99 |
100 | We haven't done anything special (yet). The two containers (*mongo* and *books-ms* version 1.0) are running.
101 |
102 | Let's take a look at what happens if we deploy a new release.
103 |
104 | ```
105 | export BOOKS_MS_VERSION=:latest
106 |
107 | docker-compose up -d app db
108 | ```
109 |
110 | The output of the `docker-compose up` command is as follows.
111 |
112 | ```
113 | Recreating dockerflow_app_1
114 | books-ms-db is up-to-date
115 | ```
116 |
117 | The problem lies in the first line that states that the *dockerflow_app_1* is being recreated. There are quite a few problems that might arise from it, two primary ones being downtime and inability to test the new release before making it available in production. Not only that the service will be unavailable during a (hopefully) short period, but it will be untested as well. In this particular case, I do not want to say that the service is not tested at all, but that the deployment has not been tested. Even if you run a set of integration tests in a staging environment, there is no guarantee that the same tests would pass in production. For that reason, a preferable way to handle this scenario is to apply the *blue-green deployment* process. We should run the new release in parallel with the old one, execute a set of tests that confirm that the deployment was done correctly and that the service is integrated with the rest of the system, and, if everything went as expected, switch the proxy from the old to the new release. With this process, we avoid downtime and, at the same time, guarantee that the new release is indeed working as expected before it becomes available to our users.
118 |
119 | Before we see how we can accomplish *blue-green deployment* with *Docker Flow*, let's destroy the containers we just run and start over.
120 |
121 | ```bash
122 | docker-compose down
123 | ```
124 |
125 | Blue-Green Deployment
126 | ---------------------
127 |
128 | *Docker Flow* is a single binary sitting on top of Docker Compose and utilizing service discovery to decide which actions should be performed. It allows a combination of three kinds of inputs; command line arguments, environment variables, and *docker-flow.yml* definition. We'll start with the command line arguments.
129 |
130 | ```bash
131 | ./docker-flow \
132 | --consul-address=http://$CONSUL_IP:8500 \
133 | --target=app \
134 | --side-target=db \
135 | --blue-green
136 | ```
137 |
138 | Let's see which containers are running.
139 |
140 | ```bash
141 | docker ps \
142 | --format "table{{.Image}}\t{{.Status}}\t{{.Names}}"
143 | ```
144 |
145 | The output of the `docker ps` command is as follows.
146 |
147 | ```
148 | IMAGE STATUS NAMES
149 | vfarcic/books-ms:latest Up About a minute dockerflow_app-blue_1
150 | mongo Up About a minute books-ms-db
151 | progrium/consul Up 32 minutes consul
152 | ```
153 |
154 | The significant difference, when compared with examples without *Docker Flow* is the name of the deployed target. This time, the container's name is *dockerflow_app-blue_1*. Since this was the first deployment, only the *blue* release is running.
155 |
156 | Let's see what happens when we deploy the second release. This time, we'll use a combination of environment variables and *docker-flow.yml* file. The later is as follows.
157 |
158 | ```yml
159 | target: app
160 | side_targets:
161 | - db
162 | blue_green: true
163 | ```
164 |
165 | As you can see, the arguments in the *docker-flow.yml* file are (almost) the same as those we used through the command line. The difference is that keys are using dash (*-*) instead of underscore (*_*) so that they follow YML conventions. The second difference is in the way lists (in this case *side_targets*) are defined in YML.
166 |
167 | Environment variables also follow the same namings but, just like YML, formatted to follow appropriate naming conventions. The are all in capital letters. Another difference is the *FLOW* prefix. It is added so that environment variables used with *Docker Flow* do no override other variables that might be in use by your system.
168 |
169 | It is up to you to choose whether you prefer command line arguments, YML specification, environment variables, or a combination of those three.
170 |
171 | Let's repeat the deployment. This time, we'll specify the Consul address as an environment variable and use the *docker-flow.yml* for the rest of arguments.
172 |
173 | ```bash
174 | export FLOW_CONSUL_ADDRESS=http://$CONSUL_IP:8500
175 |
176 | ./docker-flow --flow deploy
177 |
178 | docker ps \
179 | --format "table{{.Image}}\t{{.Status}}\t{{.Names}}"
180 | ```
181 |
182 | The output of the `docker ps` command is as follows.
183 |
184 | ```
185 | IMAGE STATUS NAMES
186 | vfarcic/books-ms:latest Up 24 seconds dockerflow_app-green_1
187 | vfarcic/books-ms:latest Up 4 minutes dockerflow_app-blue_1
188 | mongo Up About an hour books-ms-db
189 | progrium/consul Up 2 hours consul
190 | ```
191 |
192 | As you can see, this time, both releases are running in parallel. The *green* release has joined the *blue* release we run earlier. At this moment, you should run your *integration tests* (I tend to call them *post-deployment tests*) and, if everything seems to be working correctly, change your proxy to point to the new release (*green*). The choice how to do that is yours (I tend to us *Consul Template* to reconfigure my *nginx* or *HAProxy*). The plan is to incorporate proxy reconfiguration and reloading into *Docker Flow*, but, until then, you should do that yourself.
193 |
194 | Once the deployment is tested, and the proxy is reconfigured, your users will be redirected to the new release, and you can stop the old one. *Docker Flow* can help you with that as well.
195 |
196 | ```bash
197 | ./docker-flow --flow=stop-old
198 |
199 | docker ps -a \
200 | --format "table{{.Image}}\t{{.Status}}\t{{.Names}}"
201 | ```
202 |
203 | The output of the `docker ps` command is as follows.
204 |
205 | ```
206 | IMAGE STATUS NAMES
207 | vfarcic/books-ms:latest Up 5 minutes dockerflow_app-green_1
208 | vfarcic/books-ms:latest Exited (137) 38 seconds ago dockerflow_app-blue_1
209 | mongo Up About an hour books-ms-db
210 | progrium/consul Up 2 hours consul
211 | ```
212 |
213 | As you can see, it stopped the old release (*blue*). Containers were intentionally only stopped, and not removed, so that you can easily rollback in case you discover that something went wrong after the proxy has been reconfigured.
214 |
215 | Let's take a look at the second *Docker Flow* feature.
216 |
217 | Relative Scaling
218 | ----------------
219 |
220 | Just like with Docker Compose, *Docker Flow* allows you to scale a service to a fixed number of instances.
221 |
222 | Let's scale our service to two instances.
223 |
224 | ```bash
225 | ./docker-flow --flow=deploy --scale=2
226 |
227 | docker ps \
228 | --format "table{{.Image}}\t{{.Status}}\t{{.Names}}"
229 | ```
230 |
231 | The output of the `docker ps` command is as follows.
232 |
233 | ```
234 | IMAGE STATUS NAMES
235 | vfarcic/books-ms:latest Up 4 seconds dockerflow_app-blue_2
236 | vfarcic/books-ms:latest Up 4 seconds dockerflow_app-blue_1
237 | vfarcic/books-ms:latest Up About a minute dockerflow_app-green_1
238 | mongo Up 4 hours books-ms-db
239 | progrium/consul Up 5 hours consul
240 | ```
241 |
242 |
243 | As expected, two instances of the new release (*blue*) were deployed. This behavior is the same as what Docker Compose offers (except the addition of blue-green deployment). What *Docker Flow* allows us is to prepend the *scale* value with plus (*+*) or minus (*-*) signs. Let's see it in action before discussing the benefits.
244 |
245 | ```bash
246 | ./docker-flow --flow=deploy --scale=+2
247 |
248 | docker ps \
249 | --format "table{{.Image}}\t{{.Status}}\t{{.Names}}"
250 | ```
251 |
252 | The output of the `docker ps` command is as follows.
253 |
254 | ```
255 | IMAGE STATUS NAMES
256 | vfarcic/books-ms:latest Up 6 seconds dockerflow_app-green_4
257 | vfarcic/books-ms:latest Up 7 seconds dockerflow_app-green_3
258 | vfarcic/books-ms:latest Up 7 seconds dockerflow_app-green_2
259 | vfarcic/books-ms:latest Up 22 minutes dockerflow_app-blue_2
260 | vfarcic/books-ms:latest Up 22 minutes dockerflow_app-blue_1
261 | vfarcic/books-ms:latest Up 24 minutes dockerflow_app-green_1
262 | mongo Up 5 hours books-ms-db
263 | progrium/consul Up 5 hours consul
264 | ```
265 |
266 | Since there were two instances of the previous release, the new release was deployed, and the number of instances was increased by two (four in total). While this is useful when we want to deploy a new release and know in advance that the number of instances should be scaled, a much more commonly used option will be to run *Docker Flow* with the *--flow=scale* argument. It will follow the same rules of scaling (and de-scaling) but without deploying a new release. Before we try it out, let's stop the old release.
267 |
268 | ```
269 | ./docker-flow --flow=stop-old
270 | ```
271 |
272 | Let's try `--flow scale` to descale the number of instances by one.
273 |
274 | ```bash
275 | ./docker-flow --scale=-1 --flow=scale
276 |
277 | docker ps \
278 | --format "table{{.Image}}\t{{.Status}}\t{{.Names}}"
279 | ```
280 |
281 | The output of the `docker ps` command is as follows.
282 |
283 | ```
284 | IMAGE STATUS NAMES
285 | vfarcic/books-ms:latest Up 19 minutes dockerflow_app-green_3
286 | vfarcic/books-ms:latest Up 19 minutes dockerflow_app-green_2
287 | vfarcic/books-ms:latest Up 43 minutes dockerflow_app-green_1
288 | mongo Up 5 hours books-ms-db
289 | progrium/consul Up 5 hours consul
290 | ```
291 |
292 | The number of running instances was reduced by one (from four to three). To scale and descale by using relative values has many usages. You might, for example, schedule some of your services to scale every Monday morning because you know, from experience, that's the time when it receives increased traffic. Following the same scenario, you might want to descale every Monday afternoon because at that time traffic tends to go back to normal and you'd like to free resources for some other services. When scaling/descaling is automated, using absolute numbers is perilous. You might have a script that scales from four to six instances during peak hours. After some time, regular hours might require eight instances and scaling on peak hours to six would have an opposite effect by, actually, descaling. The need for relative scaling and descaling is even more apparent in case of [Self-Healing Systems](http://technologyconversations.com/2016/01/26/self-healing-systems/).
293 |
294 | The Roadmap
295 | ===========
296 |
297 | Even though the examples from this article used a single server, the primary use case for those features is inside a Docker Swarm cluster. For information regarding all arguments that can be used, please refer to the [Docker Flow README](https://github.com/vfarcic/docker-flow).
298 |
299 | This is the very beginning of the *Docker Flow* project. The main idea behind it is to provide an easy way to execute processes (like blue-green deployment) and provide easy integration with other tools (like Consul). I have a huge list of features I'm planning to add. However, before I announce them, I would like to get some feedback. Do you think this project would be useful? Which features would you like to see next? With which tools would you like it to be integrated with?
--------------------------------------------------------------------------------
/articles/img/base-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/base-architecture.png
--------------------------------------------------------------------------------
/articles/img/deployment-without-proxy-flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/deployment-without-proxy-flow.png
--------------------------------------------------------------------------------
/articles/img/deployment-without-proxy-user.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/deployment-without-proxy-user.png
--------------------------------------------------------------------------------
/articles/img/first-deployment-flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/first-deployment-flow.png
--------------------------------------------------------------------------------
/articles/img/first-deployment-user.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/first-deployment-user.png
--------------------------------------------------------------------------------
/articles/img/proxy-flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/proxy-flow.png
--------------------------------------------------------------------------------
/articles/img/proxy-user.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/proxy-user.png
--------------------------------------------------------------------------------
/articles/img/scaling-flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/scaling-flow.png
--------------------------------------------------------------------------------
/articles/img/scaling-user.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/scaling-user.png
--------------------------------------------------------------------------------
/articles/img/second-deployment-flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/second-deployment-flow.png
--------------------------------------------------------------------------------
/articles/img/second-deployment-user-after.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/second-deployment-user-after.png
--------------------------------------------------------------------------------
/articles/img/second-deployment-user-before.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/second-deployment-user-before.png
--------------------------------------------------------------------------------
/articles/img/vagrant-sample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vfarcic/docker-flow/ae53702aebc997bf1249c9e8ca84d67979cb51a8/articles/img/vagrant-sample.png
--------------------------------------------------------------------------------
/articles/proxy.md:
--------------------------------------------------------------------------------
1 | [Docker Flow](https://github.com/vfarcic/docker-flow) is a project aimed towards creating an easy to use continuous deployment flow. It depends on [Docker Engine](https://www.docker.com/products/docker-engine), [Docker Compose](https://www.docker.com/products/docker-compose), [Consul](https://www.consul.io/), and [Registrator](https://github.com/gliderlabs/registrator). Each of those tools is proven to bring value and are recommended for any Docker deployment.
2 |
3 | The goal of the project is to add features and processes that are currently missing inside the Docker ecosystem. The project, at the moment, solves the problems of blue-green deployments, relative scaling, and proxy service discovery and reconfiguration. Many additional features will be added soon.
4 |
5 | The current list of features is as follows.
6 |
7 | * **Blue-green deployment**
8 | * **Relative scaling**
9 | * **Proxy reconfiguration**
10 |
11 | The latest release can be found [here](https://github.com/vfarcic/docker-flow/releases/latest).
12 |
13 | The Standard Setup
14 | ==================
15 |
16 | We'll start by exploring a typical Swarm cluster setup and discuss some of the problems we might face when using it as the cluster orchestrator. If you are already familiar with Docker Swarm, feel free to skip this section and jump straight into [The Problems](#the-problems).
17 |
18 | As a minimum, each node inside a Swarm cluster has to have [Docker Engine](https://www.docker.com/products/docker-engine) and the [Swarm container](https://hub.docker.com/_/swarm/) running. The later container should act as a node. On top of the cluster, we need at least one Swarm container running as master, and all Swarm nodes should announce their existence to it.
19 |
20 | A combination of Swarm master(s) and nodes are a minimal setup that, in most cases, is far from sufficient. Optimum utilization of a cluster means that we are not in control anymore. Swarm is. It will decide which node is the most appropriate place for a container to run. That choice can be as simple as a node with the least number of containers running, or can be based on a more complex calculation that involves the amount of available CPU and memory, type of hard disk, affinity, and so on. No matter the strategy we choose, the fact is that we will not know where a container will run. On top of that, we should not specify ports our services should expose. "Hard-coded" ports reduce our ability to scale services and can result in conflicts. After all, two separate processes cannot listen to the same port. Long story short, once we adopt Swarm, both IPs and ports of our services will become unknown. So, the next step in setting up a Swarm cluster is to create a mechanism that will detect deployed services and store their information in a distributed registry so that the information is easily available.
21 |
22 | [Registrator](https://github.com/gliderlabs/registrator) is one of the tools that we can use to monitor Docker Engine events and send the information about deployed or stopped containers to a service registry. While there are many different service registries we can use, [Consul](https://www.consul.io/) proved to be, currently, the best one. Please read the [Service Discovery: Zookeeper vs etcd vs Consul](https://technologyconversations.com/2015/09/08/service-discovery-zookeeper-vs-etcd-vs-consul/) article for more information.
23 |
24 | With *Registrator* and *Consul*, we can obtain information about any of the services running inside the Swarm cluster. A diagram of the setup we discussed, is as follows.
25 |
26 | 
27 |
28 | Please note that anything but a small cluster would have multiple Swarm masters and Consul instances thus preventing any loss of information or downtime in case one of them fails.
29 |
30 | The process of deploying containers, in such a setup, is as follows.
31 |
32 | * The operator sends a request to *Swarm master* to deploy a service consisting of one or multiple containers. This request can be sent through *Docker CLI* by defining the *DOCKER_HOST* environment variable with the IP and the port of the *Swarm master*.
33 | * Depending on criteria sent in the request (CPU, memory, affinity, and so on), *Swarm master* makes the decision where to run the containers and sends requests to chosen *Swarm nodes*.
34 | * *Swarm node*, upon receiving the request to run (or stop) a container, invokes local *Docker Engine*, which, in turn, runs (or stops) the desired container and publishes the result as an event.
35 | * *Registrator* monitors *Docker Engine* and, upon detecting a new event, sends the information to *Consul*.
36 | * Anyone interested in data about containers running inside the cluster can consult *Consul*.
37 |
38 | While this process is a vast improvement when compared to the ways we were operating clusters in the past, it is far from complete and creates quite a few problems that should be solved.
39 |
40 | The Problems
41 | ============
42 |
43 | In this article, I will focus on three major problems or, to be more precise, features missing in the previously described setup.
44 |
45 | Deploying Without Downtime
46 | --------------------------
47 |
48 | When a new release is pulled, running `docker-compose up` will stop the containers running the old release and run the new one in their place. The problem with that approach is downtime. Between stopping the old release and running the new in its place, there is downtime. No matter whether it is one millisecond or a full minute, a new container needs to start, and the service inside it needs to initialize.
49 |
50 | We can solve this by setting up a proxy with health checks. However, that would still require running multiple instances of the service (as you definitely should). The process would be to stop one instance and bring the new release in its place. During the downtime of that instance, the proxy would redirect the requests to one of the other instances. Then, when the first instance is running the new release and the service inside it is initialized, we would continue repeating the process with the other instances. This process can become very complicated and would prevent you from using Docker Compose *scale* command.
51 |
52 | The better solution is to deploy the new release using the *blue-green* deployment process. If you are unfamiliar with it, please read the [Blue-Green Deployment](https://technologyconversations.com/2016/02/08/blue-green-deployment/) article. In a nutshell, the process deploys the new release in parallel with the old one. Throughout the process, the proxy should continue sending all requests to the old release. Once the deployment is finished and the service inside the container is initialized, the proxy should be reconfigured to send all the requests to the new release and the old one can be stopped. With a process like this, we can avoid downtime. The problem is that Swarm does not support *blue-green* deployment.
53 |
54 | Scaling Containers Using Relative Numbers
55 | -----------------------------------------
56 |
57 | *Docker Compose* makes it very easy to scale services to a fixed number. We can specify how many instances of a container we want to run and watch the magic unfold. When combined with Docker Swarm, the result is an easy way to manage containers inside a cluster. Depending on how many instances are already running, Docker Compose will increase (or decrease) the number of running containers so that the desired result is achieved.
58 |
59 | The problem is that Docker Compose always expects a fixed number as the parameter. That can be very limiting when dealing with production deployments. In many cases, we do not want to know how many instances are already running but send a signal to increase (or decrease) the capacity by some factor. For example, we might have an increase in traffic and want to increase the capacity by three instances. Similarly, if the demand for some service decreases, we might want the number of running instances to decrease by some factor and, in that way, free resources for other services and processes. This necessity is even more evident when we move towards autonomous and automated [Self-Healing Systems](http://technologyconversations.com/2016/01/26/self-healing-systems/) where human interactions are reduced to a minimum.
60 |
61 | On top of the lack of relative scaling, *Docker Compose* does not know how to maintain the same number of running instances when a new container is deployed.
62 |
63 | Proxy Reconfiguration After The New Release Is Tested
64 | -----------------------------------------------------
65 |
66 | The need for dynamic reconfiguration of the proxy becomes evident soon after we adopt microservices architecture. Containers allow us to pack them as immutable entities and Swarm lets us deploy them inside a cluster. The adoption of immutability through containers and cluster orchestrators like Swarm resulted in a huge increase in interest and adoption of microservices and, with them, the increase in deployment frequency. Unlike monolithic applications that forced us to deploy infrequently, now we can deploy often. Even if you do not adopt continuous deployment (each commit goes to production), you are likely to start deploying your microservices more often. That might be once a week, once a day, or multiple times a day. No matter the frequency, there is a high need to reconfigure the proxy every time a new release is deployed. Swarm will run containers somewhere inside the cluster, and proxy needs to be reconfigured to redirect requests to all the instances of the new release. That reconfiguration needs to be dynamic. That means that there must be a process that retrieves information from the service registry, changes the configuration of the proxy and, finally, reloads it.
67 |
68 | There are several commonly used approaches to this problem.
69 |
70 | Manual proxy reconfiguration should be discarded for obvious reasons. Frequent deploys mean that there is no time for an operator to change the configuration manually. Even if time is not of the essence, manual reconfiguration adds "human factor" to the process, and we are known to make mistakes.
71 |
72 | There are quite a few tools that monitor Docker events or entries to the registry and reconfigure proxy whenever a new container is run or an old one is stopped. The problem with those tools is that they do not give us enough time to test the new release. If there is a bug or a feature is not entirely complete, our users will suffer. Proxy reconfiguration should be performed only after a set of tests is run, and the new release is validated.
73 |
74 | We can use tools like [Consul Template](https://github.com/hashicorp/consul-template) or [ConfD](https://github.com/kelseyhightower/confd) into our deployment scripts. Both are great and work well but require quite a lot of plumbing before they are truly incorporated into the deployment process.
75 |
76 | Solving The Problems
77 | --------------------
78 |
79 | [Docker Flow](https://github.com/vfarcic/docker-flow) is the project that solves the problems we discussed. Its goal is to provide features that are not currently available in the Docker's ecosystem. It does not replace any of the ecosystem's features but builds on top of them.
80 |
81 | Docker Flow Walkthrough
82 | =======================
83 |
84 | The examples that follow will use [Vagrant](https://www.vagrantup.com/) to simulate a [Docker Swarm](https://www.docker.com/products/docker-swarm) cluster. That does not mean that the usage of **Docker Flow** is limited to Vagrant. You can use it with a single [Docker Engine](https://www.docker.com/products/docker-engine) or a Swarm cluster set up in any other way.
85 |
86 | For similar examples based on [Docker Machine](https://www.docker.com/products/docker-machine) (tested on Linux and OS X), please read the [project README](https://github.com/vfarcic/docker-flow).
87 |
88 | Setting it up
89 | -------------
90 |
91 | Before jumping into examples, please make sure that [Vagrant](https://www.vagrantup.com/) is installed. You will not need anything else since the [Ansible](https://www.ansible.com/) playbooks we are about to run will make sure that all the tools are correctly provisioned.
92 |
93 | Please clone the code from the [vfarcic/docker-flow](https://github.com/vfarcic/docker-flow) repository.
94 |
95 | ```sh
96 | git clone https://github.com/vfarcic/docker-flow.git
97 |
98 | cd docker-flow
99 | ```
100 |
101 | With the code downloaded, we can run Vagrant and create the cluster we'll use throughout this article.
102 |
103 | ```bash
104 | vagrant plugin install vagrant-cachier
105 |
106 | vagrant up master node-1 node-2 proxy
107 | ```
108 |
109 | Once VMs are created and provisioned, the setup will be the same as explained in *The Standard Setup* section of this article. The *master* server will contain *Swarm master* while nodes *1* and *2* will form the cluster. Each of those nodes will have *Registrator* pointing to the *Consul* instance running in the *proxy* server.
110 |
111 | 
112 |
113 | > Please note that this setup is for demo purposes only. While the same principle should be applied in production, you should aim at having multiple Swarm masters and Consul instances to avoid potential downtime in case one of them fails.
114 |
115 | Once the `vagrant up` command is finished, we can enter the *proxy* VM and see *Docker Flow* in action.
116 |
117 | ```bash
118 | vagrant ssh proxy
119 | ```
120 |
121 | > We'll run all the examples from the *proxy* machine. However, in production, you should run deployment commands from a separate machine (even your laptop).
122 |
123 | The latest release of *docker-flow* binary has been downloaded and ready to use, and the */books-ms* directory contains the *docker-compose.yml* file we'll use in the examples that follow.
124 |
125 | Let's enter the directory.
126 |
127 | ```bash
128 | cd /books-ms
129 | ```
130 |
131 | Reconfiguring Proxy After Deployment
132 | ------------------------------------
133 |
134 | *Docker Flow* requires the address of the Consul instance as well as the information about the node the proxy is (or will be) running on. It allows three ways to provide the necessary information. We can define arguments inside the *docker-flow.yml* file, as environment variables, or as command line arguments. In this example, we'll use all three input methods so that you can get familiar with them and choose the combination that suits you needs.
135 |
136 | Let's start by defining proxy and Consul data through environment variables.
137 |
138 | ```bash
139 | export FLOW_PROXY_HOST=proxy
140 |
141 | export FLOW_CONSUL_ADDRESS=http://10.100.198.200:8500
142 |
143 | export FLOW_PROXY_DOCKER_HOST=tcp://proxy:2375
144 |
145 | export DOCKER_HOST=tcp://master:2375
146 |
147 | export BOOKS_MS_VERSION=":latest"
148 | ```
149 |
150 | The *FLOW_PROXY_HOST* variable is the IP of the host where the proxy is running while the *FLOW_CONSUL_ADDRESS* represents the full address of the Consul API. The *FLOW_PROXY_DOCKER_HOST* is the host of the Docker Engine running on the server where the proxy container is (or will be) running. The last variable (*DOCKER_HOST*) is the address of the *Swarm master*. *Docker Flow* is designed to run operations on multiple servers at the same time, so we need to provide all the information it needs to do its tasks. In the examples we are exploring, it will deploy containers on the Swarm cluster, use Consul instance to store and retrieve information, and reconfigure the proxy every time a new service is deployed. Finally, we set the environment variable *BOOKS_MS_VERSION* to *latest*. The [docker-compose.yml](https://github.com/vfarcic/docker-flow/blob/master/docker-compose.yml) uses it do determine which version we want to run.
151 |
152 | Now we are ready to deploy the first release of our sample service.
153 |
154 | ```bash
155 | docker-flow \
156 | --blue-green \
157 | --target=app \
158 | --service-path="/api/v1/books" \
159 | --side-target=db \
160 | --flow=deploy --flow=proxy
161 | ```
162 |
163 | We instructed `docker-flow` to use the *blue-green deployment* process and that the target (defined in [docker-compose.yml](https://github.com/vfarcic/docker-flow/blob/master/docker-compose.yml)) is *app*. We also told it that the service exposes an API on the address */api/v1/books* and that it requires a side (or secondary) target *db*. Finally, through the `--flow` arguments we specified that the we want it to *deploy* the targets and reconfigure the *proxy*. A lot happened in that single command so we'll explore the result in more detail.
164 |
165 | Let's take a look at our servers and see what happened. We'll start with the Swarm cluster.
166 |
167 | ```bash
168 | docker ps --format "table {{.Names}}\t{{.Image}}"
169 | ```
170 |
171 | The output of the `ps` command is as follows.
172 |
173 | ```
174 | NAMES IMAGE
175 | node-2/dockerflow_app-blue_1 vfarcic/books-ms
176 | node-1/books-ms-db mongo
177 | ...
178 | ```
179 |
180 | *Docker Flow* run our main target *app* together with the side target named *books-ms-db*. Both targets are defined in [docker-compose.yml](https://github.com/vfarcic/docker-flow/blob/master/docker-compose.yml). Container names depend on many different factors, some of which are the Docker Compose project (defaults to the current directory as in the case of the *app* target) or can be specified inside the *docker-compose.yml* through the `container_name` argument (as in the case of the *db* target). The first difference you'll notice is that *Docker Flow* added *blue* to the container name. The reason behind that is in the `--blue-green` argument. If present, *Docker Flow* will use the *blue-green* process to run the primary target. Since this was the first deployment, *Docker Flow* decided that it will be called *blue*. If you are unfamiliar with the process, please read the [Blue-Green Deployment](http://technologyconversations.com/2016/02/08/blue-green-deployment/) article for general information and [Docker Flow: Blue-Green Deployment and Relative Scaling](http://technologyconversations.com/2016/03/07/docker-flow-blue-green-deployment-and-relative-scaling/) for a more detailed explanation within the *Docker Flow* context.
181 |
182 | Let's take a look at the *proxy* node as well.
183 |
184 | ```bash
185 | export DOCKER_HOST=tcp://proxy:2375
186 |
187 | docker ps --format "table {{.Names}}\t{{.Image}}"
188 | ```
189 |
190 | The output of the `ps` command is as follows.
191 |
192 | ```
193 | NAMES IMAGE
194 | docker-flow-proxy vfarcic/docker-flow-proxy
195 | consul progrium/consul
196 | ```
197 |
198 | *Docker Flow* detected that there was no *proxy* on that node and run it for us. The *docker-flow-proxy* container contains *HAProxy* together with custom code that reconfigures it every time a new service is run. For more information about the *Docker Flow: Proxy*, please read the [project README](https://github.com/vfarcic/docker-flow-proxy).
199 |
200 | Since we instructed Swarm to deploy the service somewhere inside the cluster, we could not know in advance which server will be chosen. In this particular case, our service ended up running inside the *node-2*. Moreover, to avoid potential conflicts and allow easier scaling, we did not specify which port the service should expose. In other words, both the IP and the port of the service were not defined in advance. Among other things, *Docker Flow* solves this by running *Docker Flow: Proxy* and instructing it to reconfigure itself with the information gathered after the container is run. We can confirm that the proxy reconfiguration was indeed successful by sending an HTTP request to the newly deployed service.
201 |
202 | ```bash
203 | curl -I proxy/api/v1/books
204 | ```
205 |
206 | The output of the `curl` command is as follows.
207 |
208 | ```
209 | HTTP/1.1 200 OK
210 | Server: spray-can/1.3.1
211 | Date: Thu, 07 Apr 2016 19:23:34 GMT
212 | Access-Control-Allow-Origin: *
213 | Content-Type: application/json; charset=UTF-8
214 | Content-Length: 2
215 | ```
216 |
217 | The flow of the events was as follows.
218 |
219 | 1. **Docker Flow** inspected *Consul* to find out which release (*blue* or *green*) should be deployed next. Since this is the first deployment and no release was running, it decided to deploy it as *blue*.
220 | 2. **Docker Flow** sent the request to deploy the *blue* release to *Swarm Master*, which, in turn, decided to run the container in the *node-2*. *Registrator* detected the new event created by *Docker Engine* and registered the service information in *Consul*. Similarly, the request was sent to deploy the side target *db*.
221 | 3. **Docker Flow** retrieved the service information from *Consul*.
222 | 4. **Docker Flow** inspected the server that should host the proxy, realized that it is not running, and deployed it.
223 | 5. **Docker Flow** updated *HAProxy* with service information.
224 |
225 | 
226 |
227 | Even though our service is running in one of the servers chosen by Swarm and is exposing a random port, the proxy was reconfigured, and our users can access it through the fixed IP and without a port (to be more precise through the standard HTTP port 80 or HTTPS port 443).
228 |
229 | 
230 |
231 | Let's see what happens when the second release is deployed.
232 |
233 | Deploying a New Release Without Downtime
234 | ----------------------------------------
235 |
236 | After some time, a developer will push a new commit, and we'll want to deploy a new release of the service. We do not want to have any downtime so we'll continue using the *blue-green* process. Since the current release is *blue*, the new one will be named *green*. Downtime will be avoided by running the new release (*green*) in parallel with the old one (*blue*) and, after it is fully up and running, reconfigure the proxy so that all requests are sent to the new release. Only after the proxy is reconfigured, we want the old release to stop running and free the resources it was using. We can accomplish all that by running the same `docker-flow` command. However, this time, we'll leverage the [docker-flow.yml](https://github.com/vfarcic/docker-flow/blob/master/docker-flow.yml) file that already has some of the arguments we used before.
237 |
238 | The content of the [docker-flow.yml](https://github.com/vfarcic/docker-flow/blob/master/docker-flow.yml) is as follows.
239 |
240 | ```yml
241 | target: app
242 | side_targets:
243 | - db
244 | blue_green: true
245 | service_path:
246 | - /api/v1/books
247 | ```
248 |
249 | Let's run the new release.
250 |
251 | ```bash
252 | export DOCKER_HOST=tcp://master:2375
253 |
254 | docker-flow \
255 | --flow=deploy --flow=proxy --flow=stop-old
256 | ```
257 |
258 | Just like before, let's explore Docker processes and see the result.
259 |
260 | ```bash
261 | docker ps -a --format "table {{.Names}}\t{{.Image}}\t{{.Status}}"
262 | ```
263 |
264 | The output of the `ps` command is as follows.
265 |
266 | ```bash
267 | NAMES IMAGE STATUS
268 | node-1/booksms_app-green_1 vfarcic/books-ms Up 33 seconds
269 | node-2/booksms_app-blue_1 vfarcic/books-ms Exited (137) 22 seconds ago
270 | node-1/books-ms-db mongo Up 41 minutes
271 | ...
272 | ```
273 |
274 | From the output, we can observe that the new release (*green*) is running and that the old (*blue*) was stopped. The reason the old release was only stopped and not entirely removed lies in potential need to rollback quickly in case a problem is discovered at some later moment in time.
275 |
276 | Let's confirm that the proxy was reconfigured as well.
277 |
278 | ```bash
279 | curl -I proxy/api/v1/books
280 | ```
281 |
282 | The output of the `curl` command is as follows.
283 |
284 | ```
285 | HTTP/1.1 200 OK
286 | Server: spray-can/1.3.1
287 | Date: Thu, 07 Apr 2016 19:45:07 GMT
288 | Access-Control-Allow-Origin: *
289 | Content-Type: application/json; charset=UTF-8
290 | Content-Length: 2
291 | ```
292 |
293 | The flow of the events was as follows.
294 |
295 | 1. **Docker Flow** inspected *Consul* to find out which release (*blue* or *green*) should be deployed next. Since the previous release was *blue*, it decided to deploy it as *green*.
296 | 2. **Docker Flow** sent the request to *Swarm Master* to deploy the *green* release, which, in turn, decided to run the container in the *node-1*. *Registrator* detected the new event created by *Docker Engine* and registered the service information in *Consul*.
297 | 3. **Docker Flow** retrieved the service information from *Consul*.
298 | 4. **Docker Flow** updated *HAProxy* with service information.
299 | 5. **Docker Flow** stopped the old release.
300 |
301 | 
302 |
303 | Throughout the first three steps of the flow, HAProxy continued sending all requests to the old release. As the result, users were oblivious that deployment is in progress.
304 |
305 | 
306 |
307 | Only after the deployment is finished, HAProxy was reconfigured, and users were redirected to the new release. As the result, there was no downtime caused by deployment.
308 |
309 | 
310 |
311 | Now that we have a safe way to deploy new releases, let us turn our attention to relative scaling.
312 |
313 | ### Scaling the service
314 |
315 | One of the great benefits *Docker Compose* provides is scaling. We can use it to scale to any number of instances. However, it allows only absolute scaling. We cannot instruct *Docker Compose* to apply relative scaling. That makes the automation of some of the processes difficult. For example, we might have an increase in traffic that requires us to increase the number of instances by two. In such a scenario, the automation script would need to obtain the number of instances that are currently running, do some simple math to get to the desired number, and pass the result to Docker Compose. On top of all that, proxy still needs to be reconfigured as well. *Docker Flow* makes this process much easier.
316 |
317 | Let's see it in action.
318 |
319 | ```bash
320 | docker-flow \
321 | --scale="+2" \
322 | --flow=scale --flow=proxy
323 | ```
324 |
325 | The scaling result can be observed by listing the currently running Docker processes.
326 |
327 | ```bash
328 | docker ps --format "table {{.Names}}\t{{.Image}}\t{{.Status}}"
329 | ```
330 |
331 | The output of the `ps` command is as follows.
332 |
333 | ```
334 | NAMES IMAGE STATUS
335 | node-2/booksms_app-green_2 vfarcic/books-ms:latest Up 5 seconds
336 | node-1/booksms_app-green_3 vfarcic/books-ms:latest Up 6 seconds
337 | node-1/booksms_app-green_1 vfarcic/books-ms:latest Up 40 minutes
338 | node-1/books-ms-db mongo Up 53 minutes
339 | ```
340 |
341 | The number of instances was increased by two. While only one instance was running before, now we have three.
342 |
343 | Similarly, the proxy was reconfigured as well and, from now on, it will load balance all requests between those three instances.
344 |
345 | The flow of the events was as follows.
346 |
347 | 1. **Docker Flow** inspected *Consul* to find out how many instances are currently running.
348 | 2. Since only one instance was running and we specified that we want to increase that number by two, **Docker Flow** sent the request to *Swarm Master* to scale the *green* release to three, which, in turn, decided to run one container on *node-1* and the other on *node-2*. *Registrator* detected the new events created by *Docker Engine* and registered two new instances in *Consul*.
349 | 3. **Docker Flow** retrieved the service information from *Consul*.
350 | 4. **Docker Flow** updated *HAProxy* with the service information and set it up to perform load balancing among all three instances.
351 |
352 | 
353 |
354 | From the users perspective, they continue receiving responses from the current release but, this time, their requests are load balanced among all instances of the service. As a result, service performance is improved.
355 |
356 | 
357 |
358 | We can use the same method to de-scale the number of instances by prefixing the value of the `--scale` argument with the minus sign (*-*). Following the same example, when the traffic returns to normal, we can de-scale the number of instances to the original amount by running the following command.
359 |
360 | ```bash
361 | docker-flow \
362 | --scale="-1" \
363 | --flow=scale --flow=proxy
364 | ```
365 |
366 | Testing Deployments to Production
367 | ---------------------------------
368 |
369 | The major downside of the proxy examples we run by now is the inability to verify the release before reconfiguring the proxy. Ideally, we should use the *blue-green* process to deploy the new release in parallel with the old one, run a set of tests that validate that everything is working as expected, and, finally, reconfigure the proxy only if all tests were successful. We can accomplish that easily by running `docker-flow` twice.
370 |
371 | > Many tools aim at providing zero-downtime deployments but only a few of them (if any), take into account that a set of tests should be run before the proxy is reconfigured.
372 |
373 | First, we should deploy the new version.
374 |
375 | ```bash
376 | docker-flow \
377 | --flow=deploy
378 | ```
379 |
380 | Let's list the Docker processes.
381 |
382 | ```bash
383 | docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
384 | ```
385 |
386 | The output of the `ps` command is as follows.
387 |
388 | ```
389 | node-1/booksms_app-blue_2 Up 8 minutes 10.100.192.201:32773->8080/tcp
390 | node-2/booksms_app-blue_1 Up 8 minutes 10.100.192.202:32771->8080/tcp
391 | node-2/booksms_app-green_2 Up About an hour 10.100.192.202:32770->8080/tcp
392 | node-1/booksms_app-green_1 Up 2 hours 10.100.192.201:32771->8080/tcp
393 | node-1/books-ms-db Up 2 hours 27017/tcp
394 | ```
395 |
396 | At this moment, the new release (*blue*) is running in parallel with the old release (*green*). Since we did not specify the *--flow=proxy* argument, the proxy is left unchanged and still redirects to all the instances of the old release. What this means is that the users of our service still see the old release, while we have the opportunity to test it. We can run integration, functional, or any other type of tests and validate that the new release indeed meets the expectations we have. While testing in production does not exclude testing in other environments (e.g. staging), this approach gives us greater level of trust by being able to validate the software under the same circumstances our users will use it, while, at the same time, not affecting them during the process (they are still oblivious to the existence of the new release).
397 |
398 | > Please note that even though we did not specify the number of instances that should be deployed, *Docker Flow* deployed the new release and scaled it to the same number of instances as we had before.
399 |
400 | The flow of the events was as follows.
401 |
402 | 1. **Docker Flow** inspected *Consul* to find out the color of the current release and how many instances are currently running.
403 | 2. Since two instances of the old release (*green*) were running and we didn't specify that we want to change that number, **Docker Flow** sent the request to *Swarm Master* to deploy the new release (*blue*) and scale it to two instances.
404 |
405 | 
406 |
407 | From the users perspective, they continue receiving responses from the old release since we did not specify that we want to reconfigure the proxy.
408 |
409 | 
410 |
411 | From this moment, you can run tests in production against the new release. Assuming that you do not overload the server (e.g. stress tests), tests can run for any period without affecting users.
412 |
413 | After the tests execution is finished, there are two paths we can take. If one of the tests failed, we can just stop the new release and fix the problem. Since the proxy is still redirecting all requests to the old release, our users would not be affected by a failure, and we can dedicate our time towards fixing the problem. On the other hand, if all tests were successful, we can run the rest of the *flow* that will reconfigure the proxy and stop the old release.
414 |
415 | ```bash
416 | docker-flow \
417 | --flow=proxy --flow=stop-old
418 | ```
419 |
420 | The command reconfigured the proxy and stopped the old release.
421 |
422 | The flow of the events was as follows.
423 |
424 | 1. **Docker Flow** inspected *Consul* to find out the color of the current release and how many instances are running.
425 | 2. **Docker Flow** updated the proxy with service information.
426 | 3. **Docker Flow** stopped the old release.
427 |
428 | 
429 |
430 | From the user's perspective, all new requests are redirected to the new release.
431 |
432 | 
433 |
434 | That concludes the quick tour through some of the features *Docker Flow* provides. Please explore the [Usage](https://github.com/vfarcic/docker-flow#usage) section for more details.
435 |
436 | Even if you choose not to use [Docker Flow](https://github.com/vfarcic/docker-flow), the process explained in this article is useful and represents some of the best practices applied to containers deployment flow.
--------------------------------------------------------------------------------
/articles/templates.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | ./docker-flow \
3 | -f docker-compose-demo.yml \
4 | --flow deploy --flow proxy
5 | ```
--------------------------------------------------------------------------------
/compose/docker_compose.go:
--------------------------------------------------------------------------------
1 | package compose
2 |
3 | import (
4 | "fmt"
5 | "../util"
6 | "strings"
7 | "os"
8 | )
9 |
10 | const dockerComposeFlowPath = "docker-compose-flow.yml.tmp"
11 |
12 | var dockerCompose DockerComposer = DockerCompose{}
13 |
14 | type DockerComposer interface {
15 | CreateFlowFile(dcPath, serviceName, target string, sideTargets []string, color string, blueGreen bool) error
16 | RemoveFlow() error
17 | PullTargets(host, certPath, project string, targets []string) error
18 | UpTargets(host, certPath, project string, targets []string) error
19 | ScaleTargets(host, certPath, project, target string, scale int) error
20 | RmTargets(host, certPath, project string, targets []string) error
21 | StopTargets(host, certPath, project string, targets []string) error
22 | }
23 |
24 | type DockerCompose struct{}
25 |
26 | var GetDockerCompose = func() DockerComposer {
27 | return dockerCompose
28 | }
29 |
30 | func (dc DockerCompose) CreateFlowFile(dcPath, serviceName, target string, sideTargets []string, color string, blueGreen bool) error {
31 | // TODO: Start remove
32 | data, err := util.ReadFile(dcPath)
33 | if err != nil {
34 | return fmt.Errorf("Could not read the Docker Compose file %s\n%s", dcPath, err.Error())
35 | }
36 | s := string(data)
37 | // TODO: End remove
38 | extendedTarget := target
39 | if blueGreen {
40 | // TODO: Start remove
41 | old := fmt.Sprintf("%s:", target)
42 | new := fmt.Sprintf("%s-%s:", target, color)
43 | s = strings.Replace(string(data), old, new, 1)
44 | // TODO: End remove
45 | extendedTarget = fmt.Sprintf("%s-%s", target, color)
46 | }
47 | s = ""
48 | dcData := strings.Trim(string(data), " ")
49 | firstLine := strings.Split(dcData, "\n")[0]
50 | indent := ""
51 | dcTemplate := `
52 | %s%s:
53 | %s extends:
54 | %s file: %s
55 | %s service: %s`
56 | dcTemplateTarget := dcTemplate + `
57 | %s environment:
58 | %s - SERVICE_NAME=%s-%s`
59 | if strings.Contains(strings.ToLower(firstLine), "version") && strings.Contains(firstLine, "2") {
60 | indent = " "
61 | s = `version: '2'
62 |
63 | services:`
64 | }
65 | s += fmt.Sprintf(
66 | dcTemplateTarget,
67 | indent,
68 | extendedTarget,
69 | indent,
70 | indent,
71 | dcPath,
72 | indent,
73 | target,
74 | indent,
75 | indent,
76 | serviceName,
77 | color,
78 | )
79 | for _, sideTarget := range sideTargets {
80 | s += fmt.Sprintf(
81 | dcTemplate,
82 | indent,
83 | sideTarget,
84 | indent,
85 | indent,
86 | dcPath,
87 | indent,
88 | sideTarget,
89 | )
90 | }
91 | err = util.WriteFile(dockerComposeFlowPath, []byte(strings.Trim(s, "\n")), 0644)
92 | if err != nil {
93 | return fmt.Errorf("Could not write the Docker Flow file %s\n%s", dockerComposeFlowPath, err.Error())
94 | }
95 | return nil
96 | }
97 |
98 | func (dc DockerCompose) RemoveFlow() error {
99 | if err := util.RemoveFile(dockerComposeFlowPath); err != nil {
100 | return fmt.Errorf("Could not remove the temp file %s\n%s", dockerComposeFlowPath, err.Error())
101 | }
102 | return nil
103 | }
104 |
105 | func (dc DockerCompose) PullTargets(host, certPath, project string, targets []string) error {
106 | if len(targets) == 0 {
107 | return nil
108 | }
109 | args := append([]string{"pull"}, targets...)
110 | return dc.runCmd(host, certPath, project, args)
111 | }
112 |
113 | func (dc DockerCompose) UpTargets(host, certPath, project string, targets []string) error {
114 | if len(targets) == 0 {
115 | return nil
116 | }
117 | args := append([]string{"up", "-d"}, targets...)
118 | return dc.runCmd(host, certPath, project, args)
119 | }
120 |
121 | func (dc DockerCompose) ScaleTargets(host, certPath, project, target string, scale int) error {
122 | if len(target) == 0 {
123 | return nil
124 | }
125 | args := []string{"scale", fmt.Sprintf("%s=%d", target, scale)}
126 | return dc.runCmd(host, certPath, project, args)
127 | }
128 |
129 | func (dc DockerCompose) RmTargets(host, certPath, project string, targets []string) error {
130 | if len(targets) == 0 {
131 | return nil
132 | }
133 | args := append([]string{"rm", "-f"}, targets...)
134 | return dc.runCmd(host, certPath, project, args)
135 | }
136 |
137 | func (dc DockerCompose) StopTargets(host, certPath, project string, targets []string) error {
138 | if len(targets) == 0 {
139 | return nil
140 | }
141 | args := append([]string{"stop"}, targets...)
142 | return dc.runCmd(host, certPath, project, args)
143 | }
144 |
145 | func (dc DockerCompose) getArgs(host, certPath, project string) []string {
146 | args := []string{"-f", dockerComposeFlowPath}
147 | util.SetDockerHost(host, certPath)
148 | if len(project) > 0 {
149 | args = append(args, "-p", project)
150 | }
151 | return args
152 | }
153 |
154 | func (dc DockerCompose) runCmd(host, certPath, project string, args []string) error {
155 | args = append(dc.getArgs(host, certPath, project), args...)
156 | cmd := util.ExecCmd("docker-compose", args...)
157 | cmd.Stdout = os.Stdout
158 | cmd.Stderr = os.Stderr
159 | if err := util.RunCmd(cmd); err != nil {
160 | return fmt.Errorf("Docker Compose command: docker-compose %s\n%s", strings.Join(cmd.Args, ","), err.Error())
161 | }
162 | return nil
163 | }
164 |
--------------------------------------------------------------------------------
/compose/docker_compose_test.go:
--------------------------------------------------------------------------------
1 | package compose
2 |
3 | import (
4 | "fmt"
5 | "github.com/stretchr/testify/suite"
6 | "os"
7 | "os/exec"
8 | "../util"
9 | "testing"
10 | )
11 |
12 | type DockerComposeTestSuite struct {
13 | suite.Suite
14 | dockerComposePath string
15 | serviceName string
16 | target string
17 | sideTargets []string
18 | color string
19 | blueGreen bool
20 | host string
21 | certPath string
22 | project string
23 | }
24 |
25 | func (s *DockerComposeTestSuite) SetupTest() {
26 | s.dockerComposePath = "test-docker-compose.yml"
27 | s.serviceName = "myService"
28 | s.target = "my-target"
29 | s.sideTargets = []string{"my-side-target-1", "my-side-target-2"}
30 | s.color = "red"
31 | s.blueGreen = false
32 | s.host = "tcp://1.2.3.4:1234"
33 | s.certPath = "/path/to/docker/cert"
34 | s.project = "my-project"
35 | util.ReadFile = func(fileName string) ([]byte, error) {
36 | return []byte(""), nil
37 | }
38 | util.WriteFile = func(fileName string, data []byte, perm os.FileMode) error {
39 | return nil
40 | }
41 | util.RemoveFile = func(name string) error {
42 | return nil
43 | }
44 | util.ExecCmd = func(name string, arg ...string) *exec.Cmd {
45 | return &exec.Cmd{}
46 | }
47 | }
48 |
49 | // GetDockerCompose
50 |
51 | func (s DockerComposeTestSuite) Test_GetDockerCompose_ReturnsDockerCompose() {
52 | actual := GetDockerCompose()
53 |
54 | s.Equal(dockerCompose, actual)
55 | }
56 |
57 | // CreateFlow
58 |
59 | func (s DockerComposeTestSuite) Test_CreateFlowFile_ReturnsNil() {
60 | actual := DockerCompose{}.CreateFlowFile(s.dockerComposePath, s.serviceName, s.target, s.sideTargets, s.color, s.blueGreen)
61 |
62 | s.Nil(actual)
63 | }
64 |
65 | func (s DockerComposeTestSuite) Test_CreateFlowFile_ReturnsError_WhenReadFile() {
66 | util.ReadFile = func(fileName string) ([]byte, error) {
67 | return []byte(""), fmt.Errorf("Some error")
68 | }
69 |
70 | err := DockerCompose{}.CreateFlowFile(s.dockerComposePath, s.serviceName, s.target, s.sideTargets, s.color, s.blueGreen)
71 |
72 | s.Error(err)
73 | }
74 |
75 | func (s DockerComposeTestSuite) Test_CreateFlowFile_CreatesTheFile() {
76 | var actual string
77 | util.WriteFile = func(filename string, data []byte, perm os.FileMode) error {
78 | actual = filename
79 | return nil
80 | }
81 |
82 | DockerCompose{}.CreateFlowFile(s.dockerComposePath, s.serviceName, s.target, s.sideTargets, s.color, s.blueGreen)
83 |
84 | s.Equal(dockerComposeFlowPath, actual)
85 | }
86 |
87 | func (s DockerComposeTestSuite) Test_CreateFlowFile_CreatesDockerComposeReplica() {
88 | var actual string
89 | util.ReadFile = func(filename string) ([]byte, error) {
90 | actual = filename
91 | return []byte(""), nil
92 | }
93 |
94 | DockerCompose{}.CreateFlowFile(s.dockerComposePath, s.serviceName, s.target, s.sideTargets, s.color, s.blueGreen)
95 |
96 | s.Equal(s.dockerComposePath, actual)
97 | }
98 |
99 | func (s DockerComposeTestSuite) Test_CreateFlowFile_CreatesNewTarget_WhenBlueGreen() {
100 | color := "orange"
101 | var actual string
102 | var dcContent = fmt.Sprintf(`
103 | %s:
104 | image: vfarcic/books-ms`,
105 | s.target,
106 | )
107 | newTarget := fmt.Sprintf("%s-%s", s.target, color)
108 | expected := fmt.Sprintf(`%s:
109 | extends:
110 | file: %s
111 | service: %s
112 | environment:
113 | - SERVICE_NAME=%s-%s
114 | %s:
115 | extends:
116 | file: %s
117 | service: %s
118 | %s:
119 | extends:
120 | file: %s
121 | service: %s`,
122 | newTarget,
123 | s.dockerComposePath,
124 | s.target,
125 | s.serviceName,
126 | color,
127 | s.sideTargets[0],
128 | s.dockerComposePath,
129 | s.sideTargets[0],
130 | s.sideTargets[1],
131 | s.dockerComposePath,
132 | s.sideTargets[1],
133 | )
134 | util.ReadFile = func(filename string) ([]byte, error) {
135 | return []byte(dcContent), nil
136 | }
137 | util.WriteFile = func(filename string, data []byte, perm os.FileMode) error {
138 | actual = string(data)
139 | return nil
140 | }
141 |
142 | DockerCompose{}.CreateFlowFile(s.dockerComposePath, s.serviceName, s.target, s.sideTargets, color, true)
143 |
144 | s.Equal(expected, actual)
145 | }
146 |
147 | func (s DockerComposeTestSuite) Test_CreateFlowFile_UsesV2_WhenBlueGreen() {
148 | color := "orange"
149 | var actual string
150 | newTarget := fmt.Sprintf("%s-%s", s.target, color)
151 | var dcContent = fmt.Sprintf(`version: '2'
152 |
153 | services:
154 | %s:
155 | image: vfarcic/books-ms`,
156 | s.target,
157 | )
158 | expected := fmt.Sprintf(`version: '2'
159 |
160 | services:
161 | %s:
162 | extends:
163 | file: %s
164 | service: %s
165 | environment:
166 | - SERVICE_NAME=%s-%s
167 | %s:
168 | extends:
169 | file: %s
170 | service: %s
171 | %s:
172 | extends:
173 | file: %s
174 | service: %s`,
175 | newTarget,
176 | s.dockerComposePath,
177 | s.target,
178 | s.serviceName,
179 | color,
180 | s.sideTargets[0],
181 | s.dockerComposePath,
182 | s.sideTargets[0],
183 | s.sideTargets[1],
184 | s.dockerComposePath,
185 | s.sideTargets[1],
186 | )
187 | util.ReadFile = func(filename string) ([]byte, error) {
188 | return []byte(dcContent), nil
189 | }
190 | util.WriteFile = func(filename string, data []byte, perm os.FileMode) error {
191 | actual = string(data)
192 | return nil
193 | }
194 |
195 | DockerCompose{}.CreateFlowFile(s.dockerComposePath, s.serviceName, s.target, s.sideTargets, color, true)
196 |
197 | s.Equal(expected, actual)
198 | }
199 |
200 | func (s DockerComposeTestSuite) Test_CreateFlowFile_ReturnsError_WhenWriteFile() {
201 | util.WriteFile = func(filename string, data []byte, perm os.FileMode) error {
202 | return fmt.Errorf("Some error")
203 | }
204 |
205 | err := DockerCompose{}.CreateFlowFile(s.dockerComposePath, s.serviceName, s.target, s.sideTargets, s.color, s.blueGreen)
206 |
207 | s.Error(err)
208 | }
209 |
210 | // RemoveFlow
211 |
212 | func (s DockerComposeTestSuite) Test_RemoveFlow_RemovesTheFile() {
213 | var actual string
214 | util.RemoveFile = func(name string) error {
215 | actual = name
216 | return nil
217 | }
218 |
219 | DockerCompose{}.RemoveFlow()
220 |
221 | s.Equal(dockerComposeFlowPath, actual)
222 | }
223 |
224 | func (s DockerComposeTestSuite) Test_RemoveFlow_ReturnsError() {
225 | util.RemoveFile = func(name string) error {
226 | return fmt.Errorf("Some error")
227 | }
228 |
229 | err := DockerCompose{}.RemoveFlow()
230 |
231 | s.Error(err)
232 | }
233 |
234 | // PullTargets
235 |
236 | func (s DockerComposeTestSuite) Test_PullTargets_ReturnsNil_WhenTargetsAreEmpty() {
237 | actual := DockerCompose{}.PullTargets(s.host, s.certPath, s.project, []string{})
238 |
239 | s.Nil(actual)
240 | }
241 |
242 | func (s DockerComposeTestSuite) Test_PullTargets() {
243 | s.testCmd(DockerCompose{}.PullTargets, "pull", s.target)
244 | }
245 |
246 | func (s DockerComposeTestSuite) Test_PullTargets_ReturnsError_WhenCommandFails() {
247 | runCmdOrig := util.RunCmd
248 | defer func() { util.RunCmd = runCmdOrig }()
249 | util.RunCmd = func(cmd *exec.Cmd) error { return fmt.Errorf("This is an error") }
250 |
251 | actual := DockerCompose{}.PullTargets(s.host, s.certPath, s.project, []string{s.target})
252 | s.Error(actual)
253 | }
254 |
255 | // UpTargets
256 |
257 | func (s DockerComposeTestSuite) Test_UpTargets() {
258 | s.testCmd(DockerCompose{}.UpTargets, "up", "-d", s.target)
259 | }
260 |
261 | // ScaleTargets
262 |
263 | func (s DockerComposeTestSuite) Test_ScaleTargets_ReturnsNil_WhenTargetIsEmpty() {
264 | actual := DockerCompose{}.ScaleTargets(s.host, s.certPath, s.project, "", 8)
265 |
266 | s.Nil(actual)
267 | }
268 |
269 | func (s DockerComposeTestSuite) Test_ScaleTargets_CreatesTheCommand() {
270 | var scale = 7
271 | expected := []string{"docker-compose", "-f", dockerComposeFlowPath, "-p", s.project, "scale", fmt.Sprintf("%s=%d", s.target, scale)}
272 | actual := s.mockExecCmd()
273 |
274 | DockerCompose{}.ScaleTargets(s.host, s.certPath, s.project, s.target, scale)
275 |
276 | s.Equal(expected, *actual)
277 | }
278 |
279 | // RmTargets
280 |
281 | func (s DockerComposeTestSuite) Test_RmTargets() {
282 | s.testCmd(DockerCompose{}.RmTargets, "rm", "-f", s.target)
283 | }
284 |
285 | // StopTargets
286 |
287 | func (s DockerComposeTestSuite) Test_StopTargets() {
288 | s.testCmd(DockerCompose{}.StopTargets, "stop", s.target)
289 | }
290 |
291 | // Suite
292 |
293 | func TestDockerComposeTestSuite(t *testing.T) {
294 | dockerHost := os.Getenv("DOCKER_HOST")
295 | dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
296 | defer func() {
297 | os.Setenv("DOCKER_HOST", dockerHost)
298 | os.Setenv("DOCKER_CERT_PATH", dockerCertPath)
299 | }()
300 | runCmdOrig := util.RunCmd
301 | defer func() { util.RunCmd = runCmdOrig }()
302 | util.RunCmd = func(cmd *exec.Cmd) error { return nil }
303 | suite.Run(t, new(DockerComposeTestSuite))
304 | }
305 |
306 | // Helper
307 |
308 | func (s DockerComposeTestSuite) mockExecCmd() *[]string {
309 | var actualCommand []string
310 | util.ExecCmd = func(name string, arg ...string) *exec.Cmd {
311 | actualCommand = append([]string{name}, arg...)
312 | cmd := &exec.Cmd{}
313 | return cmd
314 | }
315 | return &actualCommand
316 | }
317 |
318 | type testCmdType func(host, certPath, project string, targets []string) error
319 |
320 | func (s DockerComposeTestSuite) testCmd(f testCmdType, args ...string) {
321 | var expected []string
322 | var actual *[]string
323 |
324 | // Returns nil when targets are empty
325 | s.Nil(f(s.host, s.certPath, s.project, []string{}))
326 |
327 | // Creates command
328 | expected = append([]string{"docker-compose", "-f", dockerComposeFlowPath, "-p", s.project}, args...)
329 | actual = s.mockExecCmd()
330 | f(s.host, s.certPath, s.project, []string{s.target})
331 | s.Equal(expected, *actual)
332 |
333 | // Does not add project when empty
334 | expected = append([]string{"docker-compose", "-f", dockerComposeFlowPath}, args...)
335 | actual = s.mockExecCmd()
336 | f(s.host, s.certPath, "", []string{s.target})
337 | s.Equal(expected, *actual)
338 |
339 | // Adds DOCKER_HOST variable
340 | f(s.host, s.certPath, s.project, []string{s.target})
341 | host := s.host
342 | s.Equal(host, s.host)
343 |
344 | // Does not add DOCKER_HOST variable when empty
345 | f("", s.certPath, s.project, []string{s.target})
346 | s.NotEqual(os.Getenv("DOCKER_HOST"), s.host)
347 |
348 | // Adds DOCKER_CERT_PATH variable
349 | f(s.host, s.certPath, s.project, []string{s.target})
350 | s.Equal(os.Getenv("DOCKER_CERT_PATH"), s.certPath)
351 |
352 | }
353 |
--------------------------------------------------------------------------------
/consul.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "net/http"
7 | "strconv"
8 | "strings"
9 | )
10 |
11 | const ConsulScaleKey = "scale"
12 | const ConsulColorKey = "color"
13 |
14 | type Consul struct{}
15 |
16 | func (c Consul) GetScaleCalc(address, serviceName, scale string) (int, error) {
17 | s := 1
18 | inc := 0
19 | resp, err := http.Get(fmt.Sprintf("%s/v1/kv/docker-flow/%s/scale?raw", address, serviceName))
20 | if err != nil {
21 | return 0, fmt.Errorf("Please make sure that Consul address is correct\n%s", err.Error())
22 | }
23 | defer resp.Body.Close()
24 | data, _ := ioutil.ReadAll(resp.Body)
25 | if len(data) > 0 {
26 | s, _ = strconv.Atoi(string(data))
27 | }
28 | if len(scale) > 0 {
29 | if scale[:1] == "+" || scale[:1] == "-" {
30 | inc, _ = strconv.Atoi(scale)
31 | } else {
32 | s, _ = strconv.Atoi(scale)
33 | }
34 | }
35 | total := s + inc
36 | if total <= 0 {
37 | return 1, nil
38 | }
39 | return total, nil
40 | }
41 |
42 | func (c Consul) GetColor(address, serviceName string) (string, error) {
43 | resp, err := http.Get(fmt.Sprintf("%s/v1/kv/docker-flow/%s/color?raw", address, serviceName))
44 | if err != nil {
45 | return "", fmt.Errorf("Could not retrieve the color from Consul. Please make sure that Consul address is correct\n%s", err.Error())
46 | }
47 | defer resp.Body.Close()
48 | data, _ := ioutil.ReadAll(resp.Body)
49 | currColor := GreenColor
50 | if len(data) > 0 {
51 | currColor = string(data)
52 | }
53 | return currColor, nil
54 | }
55 |
56 | func (c Consul) GetNextColor(currentColor string) string {
57 | if currentColor == BlueColor {
58 | return GreenColor
59 | }
60 | return BlueColor
61 | }
62 |
63 | func (c Consul) PutScale(address, serviceName string, value int) (string, error) {
64 | return c.putValue(address, serviceName, ConsulScaleKey, strconv.Itoa(value))
65 | }
66 |
67 | func (c Consul) PutColor(address, serviceName string, value string) (string, error) {
68 | return c.putValue(address, serviceName, ConsulColorKey, value)
69 | }
70 |
71 | func (c Consul) putValue(address, serviceName, key, value string) (string, error) {
72 | url := fmt.Sprintf("%s/v1/kv/docker-flow/%s/%s", address, serviceName, key)
73 | client := &http.Client{}
74 | request, _ := http.NewRequest("PUT", url, strings.NewReader(value))
75 | resp, err := client.Do(request)
76 | if err != nil {
77 | return "", fmt.Errorf("Could not store store information in Consul\n%s", err.Error())
78 | }
79 | defer resp.Body.Close()
80 | data, _ := ioutil.ReadAll(resp.Body)
81 | return string(data), nil
82 | }
83 |
--------------------------------------------------------------------------------
/consul_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/stretchr/testify/suite"
6 | "net/http"
7 | "net/http/httptest"
8 | "os"
9 | "strconv"
10 | "testing"
11 | )
12 |
13 | type ConsulTestSuite struct {
14 | suite.Suite
15 | Server *httptest.Server
16 | ConsulScale int
17 | ServiceName string
18 | ServiceColor string
19 | PutScaleResponse string
20 | PutColorResponse string
21 | }
22 |
23 | func (s *ConsulTestSuite) SetupTest() {
24 | s.ConsulScale = 4
25 | s.ServiceName = "myService"
26 | s.ServiceColor = BlueColor
27 | s.PutScaleResponse = "PUT_SCALE"
28 | s.PutColorResponse = "PUT_COLOR"
29 | s.Server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
30 | scaleGetUrl := fmt.Sprintf("/v1/kv/docker-flow/%s/scale?raw", s.ServiceName)
31 | colorGetUrl := fmt.Sprintf("/v1/kv/docker-flow/%s/color?raw", s.ServiceName)
32 | scalePutUrl := fmt.Sprintf("/v1/kv/docker-flow/%s/scale?", s.ServiceName)
33 | colorPutUrl := fmt.Sprintf("/v1/kv/docker-flow/%s/color?", s.ServiceName)
34 | actualUrl := fmt.Sprintf("%s?%s", r.URL.Path, r.URL.RawQuery)
35 | if r.Method == "GET" {
36 | if actualUrl == scaleGetUrl {
37 | fmt.Fprint(w, s.ConsulScale)
38 | } else if actualUrl == colorGetUrl {
39 | fmt.Fprint(w, s.ServiceColor)
40 | } else {
41 | fmt.Fprint(w, "")
42 | }
43 | } else if r.Method == "PUT" {
44 | if actualUrl == scalePutUrl {
45 | fmt.Fprint(w, s.PutScaleResponse)
46 | }
47 | if actualUrl == colorPutUrl {
48 | fmt.Fprint(w, s.PutColorResponse)
49 | }
50 | }
51 | }))
52 | }
53 |
54 | func (s ConsulTestSuite) Test_GetScaleCalc_Returns1() {
55 | actual, _ := Consul{}.GetScaleCalc(s.Server.URL, "SERVICE_NEVER_DEPLOYED_BEFORE", "")
56 |
57 | s.Equal(1, actual)
58 | }
59 |
60 | func (s ConsulTestSuite) Test_GetScaleCalc_ReturnsNumberFromConsul() {
61 | actual, _ := Consul{}.GetScaleCalc(s.Server.URL, s.ServiceName, "")
62 |
63 | s.Equal(s.ConsulScale, actual)
64 | }
65 |
66 | func (s ConsulTestSuite) Test_GetScaleCalc_ReturnsErrorFromHttpGet() {
67 | _, err := Consul{}.GetScaleCalc("WRONG_URL", s.ServiceName, "")
68 |
69 | s.Error(err)
70 | }
71 |
72 | func (s ConsulTestSuite) Test_GetScaleCalc_ReturnScaleFuncArg() {
73 | expected := 7
74 |
75 | actual, _ := Consul{}.GetScaleCalc(s.Server.URL, s.ServiceName, strconv.Itoa(expected))
76 |
77 | s.Equal(expected, actual)
78 | }
79 |
80 | func (suite ConsulTestSuite) Test_GetScaleCalc_IncrementsScale() {
81 | actual, _ := Consul{}.GetScaleCalc(suite.Server.URL, suite.ServiceName, "+2")
82 |
83 | suite.Equal(suite.ConsulScale+2, actual)
84 | }
85 |
86 | func (suite ConsulTestSuite) Test_GetScaleCalc_DecrementsScale() {
87 | actual, _ := Consul{}.GetScaleCalc(suite.Server.URL, suite.ServiceName, "-2")
88 |
89 | suite.Equal(suite.ConsulScale-2, actual)
90 | }
91 |
92 | func (suite ConsulTestSuite) Test_GetScaleCalc_Returns1_WhenScaleIsNegativeOrZero() {
93 | actual, _ := Consul{}.GetScaleCalc(suite.Server.URL, suite.ServiceName, "-100")
94 |
95 | suite.Equal(1, actual)
96 | }
97 |
98 | func (suite ConsulTestSuite) Test_GetColor_ReturnsGreen() {
99 | actual, _ := Consul{}.GetColor(suite.Server.URL, "SERVICE_NEVER_DEPLOYED_BEFORE")
100 |
101 | suite.Equal(GreenColor, actual)
102 | }
103 |
104 | func (suite ConsulTestSuite) Test_GetColor_ReturnServiceColor() {
105 | actual, _ := Consul{}.GetColor(suite.Server.URL, suite.ServiceName)
106 |
107 | suite.Equal(suite.ServiceColor, actual)
108 | }
109 |
110 | func (suite ConsulTestSuite) Test_GetColor_ReturnsErrorFromHttpGet() {
111 | _, err := Consul{}.GetColor("WRONG_URL", suite.ServiceName)
112 |
113 | suite.Error(err)
114 | }
115 |
116 | func (suite ConsulTestSuite) Test_GetNextColor_ReturnsBlueWhenGreen() {
117 | actual := Consul{}.GetNextColor(GreenColor)
118 |
119 | suite.Equal(BlueColor, actual)
120 | }
121 |
122 | func (suite ConsulTestSuite) Test_GetNextColor_ReturnsGreenWhenBlue() {
123 | actual := Consul{}.GetNextColor(BlueColor)
124 |
125 | suite.Equal(GreenColor, actual)
126 | }
127 |
128 | func (suite ConsulTestSuite) Test_PutScale_PutsToConsul() {
129 | actual, _ := Consul{}.PutScale(suite.Server.URL, suite.ServiceName, 34)
130 |
131 | suite.Equal(suite.PutScaleResponse, actual)
132 | }
133 |
134 | func (suite ConsulTestSuite) Test_GetScaleCalc_ReturnsErrorFromHttpPut() {
135 | _, err := Consul{}.PutScale("WRONG_URL", suite.ServiceName, 45)
136 |
137 | suite.Error(err)
138 | }
139 |
140 | func (suite ConsulTestSuite) Test_PutColor_PutsToConsul() {
141 | actual, _ := Consul{}.PutColor(suite.Server.URL, suite.ServiceName, "orange")
142 |
143 | suite.Equal(suite.PutColorResponse, actual)
144 | }
145 |
146 | func (suite ConsulTestSuite) Test_GetColorCalc_ReturnsErrorFromHttpPut() {
147 | _, err := Consul{}.PutColor("WRONG_URL", suite.ServiceName, "purple")
148 |
149 | suite.Error(err)
150 | }
151 |
152 | func TestConsulTestSuite(t *testing.T) {
153 | dockerHost := os.Getenv("DOCKER_HOST")
154 | dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
155 | defer func() {
156 | os.Setenv("DOCKER_HOST", dockerHost)
157 | os.Setenv("DOCKER_CERT_PATH", dockerCertPath)
158 | }()
159 | suite.Run(t, new(ConsulTestSuite))
160 | }
161 |
--------------------------------------------------------------------------------
/docker-compose-setup.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | consul:
5 | container_name: consul
6 | image: progrium/consul
7 | ports:
8 | - 8500:8500
9 | - 8301:8301
10 | - 8300:8300
11 | command: -server -bootstrap
12 |
13 | consul-server:
14 | container_name: consul
15 | image: consul
16 | network_mode: host
17 | environment:
18 | - 'CONSUL_LOCAL_CONFIG={"skip_leave_on_interrupt": true}'
19 | command: agent -server -bind=$HOST_IP -bootstrap-expect=1 -client=$HOST_IP
20 |
21 | registrator:
22 | container_name: registrator
23 | image: gliderlabs/registrator
24 | volumes:
25 | - /var/run/docker.sock:/tmp/docker.sock
26 | command: -ip $HOST_IP consul://$CONSUL_IP:8500
27 |
28 | proxy:
29 | container_name: docker-flow-proxy
30 | image: vfarcic/docker-flow-proxy
31 | environment:
32 | CONSUL_ADDRESS: $CONSUL_IP:8500
33 | volumes:
34 | - ./test_configs/:/consul_templates/
35 | ports:
36 | - 80:80
37 | - 443:443
38 | - 8080:8080
39 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 |
5 | app:
6 | image: vfarcic/go-demo
7 | ports:
8 | - 8080
9 |
10 | db:
11 | image: mongo
12 |
--------------------------------------------------------------------------------
/docker-flow.yml:
--------------------------------------------------------------------------------
1 | target: app
2 | side_targets:
3 | - db
4 | blue_green: true
5 | service_path:
6 | - /demo
--------------------------------------------------------------------------------
/flow.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "./compose"
6 | )
7 |
8 | type Flowable interface {
9 | Deploy(opts Opts, dc compose.DockerComposer) error
10 | GetPullTargets(opts Opts) []string
11 | Scale(opts Opts, dc compose.DockerComposer, target string, createFlowFile bool) error
12 | Proxy(opts Opts, proxy Proxy) error
13 | }
14 |
15 | const FLOW_DEPLOY = "deploy"
16 | const FLOW_SCALE = "scale"
17 | const FLOW_STOP_OLD = "stop-old"
18 | const FLOW_PROXY = "proxy"
19 |
20 | type Flow struct{}
21 |
22 | var flow Flowable = Flow{}
23 |
24 | func getFlow() Flowable {
25 | return flow
26 | }
27 |
28 | func (m Flow) Deploy(opts Opts, dc compose.DockerComposer) error {
29 | if err := dc.CreateFlowFile(
30 | opts.ComposePath,
31 | opts.ServiceName,
32 | opts.Target,
33 | opts.SideTargets,
34 | opts.NextColor,
35 | opts.BlueGreen,
36 | ); err != nil {
37 | return fmt.Errorf("Failed to create the Docker Flow file\n%s\n", err.Error())
38 | }
39 | logPrintln(fmt.Sprintf("Deploying (%s)...", opts.NextTarget))
40 |
41 | if err := dc.PullTargets(opts.Host, opts.CertPath, opts.Project, m.GetPullTargets(opts)); err != nil {
42 | return fmt.Errorf("The deployment phase failed (pull)\n%s", err.Error())
43 | }
44 | if opts.BlueGreen {
45 | if err := dc.RmTargets(opts.Host, opts.CertPath, opts.Project, []string{opts.NextTarget}); err != nil {
46 | return fmt.Errorf("The deployment phase failed (rm)\n%s", err.Error())
47 | }
48 | }
49 | targets := append(opts.SideTargets, opts.NextTarget)
50 | if err := dc.UpTargets(opts.Host, opts.CertPath, opts.Project, targets); err != nil {
51 | return fmt.Errorf("The deployment phase failed (up)\n%s", err.Error())
52 | }
53 | if err := m.Scale(opts, dc, opts.NextTarget, false); err != nil {
54 | return err
55 | }
56 | if err := dc.RemoveFlow(); err != nil {
57 | return err
58 | }
59 | return nil
60 | }
61 |
62 | func (m Flow) Scale(opts Opts, dc compose.DockerComposer, target string, createFlowFile bool) error {
63 | if createFlowFile {
64 | if err := dc.CreateFlowFile(
65 | opts.ComposePath,
66 | opts.ServiceName,
67 | opts.Target,
68 | opts.SideTargets,
69 | opts.CurrentColor,
70 | opts.BlueGreen,
71 | ); err != nil {
72 | return fmt.Errorf("Failed to create the Docker Flow file\n%s\n", err.Error())
73 | }
74 | }
75 | sc := getServiceDiscovery()
76 | scale, err := sc.GetScaleCalc(opts.ServiceDiscoveryAddress, opts.ServiceName, opts.Scale)
77 | if err != nil {
78 | return err
79 | }
80 | if err := dc.ScaleTargets(opts.Host, opts.CertPath, opts.Project, target, scale); err != nil {
81 | return fmt.Errorf("The scale phase failed\n%s", err.Error())
82 | }
83 | sc.PutScale(opts.ServiceDiscoveryAddress, opts.ServiceName, scale)
84 | if createFlowFile {
85 | if err := dc.RemoveFlow(); err != nil {
86 | return err
87 | }
88 | }
89 | return nil
90 | }
91 |
92 | func (m Flow) Test(opts Opts, dc compose.DockerComposer) {
93 | }
94 |
95 |
96 |
97 | func (m Flow) Proxy(opts Opts, proxy Proxy) error {
98 | if err := proxy.Provision(
99 | opts.ProxyDockerHost,
100 | opts.ProxyReconfPort,
101 | opts.ProxyDockerCertPath,
102 | opts.ServiceDiscoveryAddress,
103 | ); err != nil {
104 | return err
105 | }
106 | color := opts.CurrentColor
107 | if m.contains(opts.Flow, FLOW_DEPLOY) {
108 | color = opts.NextColor
109 | }
110 | if err := proxy.Reconfigure(
111 | opts.ProxyDockerHost,
112 | opts.ProxyDockerCertPath,
113 | opts.ProxyHost,
114 | opts.ProxyReconfPort,
115 | opts.ServiceName,
116 | color,
117 | opts.ServicePath,
118 | opts.ConsulTemplateFePath,
119 | opts.ConsulTemplateBePath,
120 | ); err != nil {
121 | return err
122 | }
123 | return nil
124 | }
125 |
126 | func (m Flow) GetPullTargets(opts Opts) []string {
127 | targets := make([]string, 0)
128 | targets = append(targets, opts.NextTarget)
129 | if opts.PullSideTargets {
130 | targets = append(targets, opts.SideTargets...)
131 | }
132 | return targets
133 | }
134 |
135 | func (m Flow) contains(s []string, v string) bool {
136 | for _, a := range s {
137 | if a == v {
138 | return true
139 | }
140 | }
141 | return false
142 | }
143 |
--------------------------------------------------------------------------------
/flow_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/stretchr/testify/mock"
6 | "github.com/stretchr/testify/suite"
7 | "os"
8 | "testing"
9 | "./compose"
10 | )
11 |
12 | type FlowTestSuite struct {
13 | suite.Suite
14 | opts Opts
15 | dc compose.DockerComposer
16 | }
17 |
18 | func (s *FlowTestSuite) SetupTest() {
19 | s.opts = Opts{
20 | ComposePath: "myComposePath",
21 | Target: "myTarget",
22 | NextColor: "orange",
23 | CurrentColor: "pink",
24 | NextTarget: "myNextTarget",
25 | CurrentTarget: "myCurrentTarget",
26 | BlueGreen: true,
27 | Flow: []string{"deploy", "scale"},
28 | ServiceDiscoveryAddress: "myServiceDiscoveryAddress",
29 | ServiceName: "myServiceName",
30 | ProxyHost: "myProxyHost",
31 | ProxyDockerHost: "myProxyDockerHost",
32 | ProxyDockerCertPath: "myProxyCertPath",
33 | }
34 | GetOptsOrig := GetOpts
35 | defer func() {
36 | GetOpts = GetOptsOrig
37 | }()
38 | GetOpts = func() (Opts, error) {
39 | return s.opts, nil
40 | }
41 | s.dc = getDockerComposeMock(s.opts, "")
42 | compose.GetDockerCompose = func() compose.DockerComposer{ return s.dc }
43 | flow = getFlowMock("")
44 | serviceDiscovery = getServiceDiscoveryMock(s.opts, "")
45 | logFatal = func(v ...interface{}) {}
46 | logPrintln = func(v ...interface{}) {}
47 | }
48 |
49 | // Deploy
50 |
51 | func (s FlowTestSuite) Test_DeployReturnsNil() {
52 | opts := Opts{}
53 | mockObj := getDockerComposeMock(opts, "")
54 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
55 |
56 | actual := Flow{}.Deploy(opts, mockObj)
57 |
58 | s.Nil(actual)
59 | }
60 |
61 | // Deploy > CreateFlowFile
62 |
63 | func (s FlowTestSuite) Test_Deploy_InvokesDockerComposeCreateFlowFile_WhenDeploy() {
64 | mockObj := getDockerComposeMock(s.opts, "")
65 | s.dc = mockObj
66 |
67 | Flow{}.Deploy(s.opts, s.dc)
68 |
69 | mockObj.AssertCalled(
70 | s.T(),
71 | "CreateFlowFile",
72 | s.opts.ComposePath,
73 | s.opts.ServiceName,
74 | s.opts.Target,
75 | s.opts.SideTargets,
76 | s.opts.NextColor,
77 | s.opts.BlueGreen,
78 | )
79 | }
80 |
81 | func (s MainTestSuite) Test_Deploy_ReturnsError_WhenDeployAndDockerComposeCreateFlowFileFails() {
82 | mockObj := getDockerComposeMock(s.opts, "CreateFlowFile")
83 | mockObj.On(
84 | "CreateFlowFile",
85 | mock.Anything,
86 | mock.Anything,
87 | mock.Anything,
88 | mock.Anything,
89 | mock.Anything,
90 | mock.Anything,
91 | ).Return(fmt.Errorf("This is an error"))
92 | s.dc = mockObj
93 |
94 | err := Flow{}.Deploy(s.opts, s.dc)
95 |
96 | s.Error(err)
97 | }
98 |
99 | // Deploy > PullTargets
100 |
101 | func (s FlowTestSuite) Test_DeployInvokesPullTargets() {
102 | opts := Opts{
103 | Host: "myHost",
104 | Project: "myProject",
105 | NextTarget: "myNextTarget",
106 | SideTargets: []string{"target1", "target2"},
107 | }
108 | mockObj := getDockerComposeMock(opts, "")
109 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
110 | flow := Flow{}
111 |
112 | flow.Deploy(opts, mockObj)
113 |
114 | mockObj.AssertCalled(s.T(), "PullTargets", opts.Host, opts.CertPath, opts.Project, flow.GetPullTargets(opts))
115 | }
116 |
117 | func (s FlowTestSuite) Test_DeployReturnsError_WhenPullTargetsFails() {
118 | opts := Opts{}
119 | mockObj := getDockerComposeMock(opts, "PullTargets")
120 | mockObj.On("PullTargets", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("This is an error"))
121 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
122 |
123 | actual := Flow{}.Deploy(opts, mockObj)
124 |
125 | s.Error(actual)
126 | }
127 |
128 | // Deploy > UpTargets
129 |
130 | func (s FlowTestSuite) Test_DeployInvokesUpTargets() {
131 | opts := Opts{
132 | Host: "myHost",
133 | Project: "myProject",
134 | SideTargets: []string{"target1", "target2"},
135 | }
136 | mockObj := getDockerComposeMock(opts, "")
137 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
138 |
139 | Flow{}.Deploy(opts, mockObj)
140 |
141 | mockObj.AssertCalled(s.T(), "UpTargets", opts.Host, opts.CertPath, opts.Project, append(opts.SideTargets, opts.NextTarget))
142 | }
143 |
144 | func (s FlowTestSuite) Test_DeployReturnsError_WhenUpTargetsFails() {
145 | opts := Opts{}
146 | mockObj := getDockerComposeMock(opts, "UpTargets")
147 | mockObj.On(
148 | "UpTargets",
149 | mock.Anything,
150 | mock.Anything,
151 | mock.Anything,
152 | mock.Anything,
153 | ).Return(fmt.Errorf("This is an error"))
154 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
155 |
156 | actual := Flow{}.Deploy(opts, mockObj)
157 |
158 | s.Error(actual)
159 | }
160 |
161 | // Deploy > RmTargets
162 |
163 | func (s FlowTestSuite) Test_DeployInvokesRmTargets() {
164 | opts := Opts{
165 | BlueGreen: true,
166 | Host: "myHost",
167 | Project: "myProject",
168 | NextTarget: "myNextTarget",
169 | }
170 | mockObj := getDockerComposeMock(opts, "")
171 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
172 |
173 | Flow{}.Deploy(opts, mockObj)
174 |
175 | mockObj.AssertCalled(s.T(), "RmTargets", opts.Host, opts.CertPath, opts.Project, []string{opts.NextTarget})
176 | }
177 |
178 | func (s FlowTestSuite) Test_DeployDoesNotInvokeRmTargets_WhenBlueGreenIsFalse() {
179 | opts := Opts{
180 | BlueGreen: false,
181 | }
182 | mockObj := getDockerComposeMock(opts, "")
183 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
184 |
185 | Flow{}.Deploy(opts, mockObj)
186 |
187 | mockObj.AssertNotCalled(s.T(), "RmTargets", opts.Host, opts.Project, append(opts.SideTargets, opts.NextTarget))
188 | }
189 |
190 | func (s FlowTestSuite) Test_DeployReturnsError_WhenRmTargetsFails() {
191 | opts := Opts{
192 | BlueGreen: true,
193 | }
194 | mockObj := getDockerComposeMock(opts, "RmTargets")
195 | mockObj.On("RmTargets", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("This is an error"))
196 |
197 | actual := Flow{}.Deploy(opts, mockObj)
198 | s.Error(actual)
199 | }
200 |
201 | // Deploy > GetScaleCalc
202 |
203 | func (s FlowTestSuite) Test_DeployReturnsError_WhenGetScaleCalcFails() {
204 | opts := Opts{}
205 | mockObj := getDockerComposeMock(opts, "")
206 | scMockObj := getServiceDiscoveryMock(opts, "GetScaleCalc")
207 | scMockObj.On("GetScaleCalc", mock.Anything, mock.Anything, mock.Anything).Return(0, fmt.Errorf("This is an error"))
208 | serviceDiscovery = scMockObj
209 |
210 | actual := Flow{}.Deploy(opts, mockObj)
211 |
212 | s.Error(actual)
213 | }
214 |
215 | // Deploy > Scale
216 |
217 | func (s FlowTestSuite) Test_DeployDoesNotInvokeScaleTargets() {
218 | opts := Opts{
219 | Host: "myHost",
220 | Project: "myProject",
221 | NextTarget: "myNextTarget",
222 | ServiceDiscoveryAddress: "mySeviceDiscoveryAddress",
223 | ServiceName: "myService",
224 | Scale: "34",
225 | }
226 | mockObj := getDockerComposeMock(opts, "")
227 | flow := Flow{}
228 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
229 | scale, _ := serviceDiscovery.GetScaleCalc(opts.ServiceDiscoveryAddress, opts.ServiceName, opts.Scale)
230 |
231 | flow.Deploy(opts, mockObj)
232 |
233 | mockObj.AssertCalled(s.T(), "ScaleTargets", opts.Host, opts.CertPath, opts.Project, opts.NextTarget, scale)
234 | }
235 |
236 | func (s FlowTestSuite) Test_DeployReturnsError_WhenScaleTargetsFails() {
237 | opts := Opts{}
238 | mockObj := getDockerComposeMock(opts, "ScaleTargets")
239 | mockObj.On(
240 | "ScaleTargets",
241 | mock.Anything,
242 | mock.Anything,
243 | mock.Anything,
244 | mock.Anything,
245 | mock.Anything,
246 | ).Return(fmt.Errorf("This is an error"))
247 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
248 |
249 | actual := Flow{}.Deploy(opts, mockObj)
250 |
251 | s.Error(actual)
252 | }
253 |
254 | func (s FlowTestSuite) Test_DeployInvokesCreateFlowFileOnlyOnce() {
255 | opts := Opts{}
256 | mockObj := getDockerComposeMock(opts, "")
257 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
258 |
259 | Flow{}.Deploy(opts, mockObj)
260 |
261 | mockObj.AssertNumberOfCalls(s.T(), "CreateFlowFile", 1)
262 | }
263 |
264 | func (s FlowTestSuite) Test_DeployInvokesRemoveFlowFileOnlyOnce() {
265 | opts := Opts{}
266 | mockObj := getDockerComposeMock(opts, "")
267 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
268 |
269 | Flow{}.Deploy(opts, mockObj)
270 |
271 | mockObj.AssertNumberOfCalls(s.T(), "CreateFlowFile", 1)
272 | }
273 |
274 | // Deploy > PutScale
275 |
276 | func (s FlowTestSuite) Test_DeployInvokesPutScale() {
277 | opts := Opts{
278 | ServiceDiscoveryAddress: "mySeviceDiscoveryAddress",
279 | ServiceName: "myService",
280 | Scale: "34",
281 | }
282 | mockObj := getDockerComposeMock(opts, "")
283 | scMockObj := getServiceDiscoveryMock(opts, "")
284 | serviceDiscovery = scMockObj
285 | scale, _ := serviceDiscovery.GetScaleCalc(opts.ServiceDiscoveryAddress, opts.ServiceName, opts.Scale)
286 |
287 | Flow{}.Deploy(opts, mockObj)
288 |
289 | scMockObj.AssertCalled(s.T(), "PutScale", opts.ServiceDiscoveryAddress, opts.ServiceName, scale)
290 | }
291 |
292 | // Deploy > RemoveFlow
293 |
294 | func (s FlowTestSuite) Test_Deploy_InvokesDockerComposeRemoveFlow() {
295 | mockObj := getDockerComposeMock(s.opts, "")
296 | s.dc = mockObj
297 |
298 | Flow{}.Deploy(s.opts, s.dc)
299 |
300 | mockObj.AssertCalled(s.T(), "RemoveFlow")
301 | }
302 |
303 | func (s FlowTestSuite) Test_Deploy_ReturnsError_WhenDockerComposeRemoveFlowFails() {
304 | mockObj := getDockerComposeMock(s.opts, "RemoveFlow")
305 | mockObj.On("RemoveFlow").Return(fmt.Errorf("This is an error"))
306 | s.dc = mockObj
307 |
308 | err := Flow{}.Deploy(s.opts, s.dc)
309 |
310 | s.Error(err)
311 | }
312 |
313 | // Scale > CreateFlowFile
314 |
315 | func (s FlowTestSuite) Test_Scale_InvokesDockerComposeCreateFlowFile() {
316 | mockObj := getDockerComposeMock(s.opts, "")
317 | s.dc = mockObj
318 |
319 | Flow{}.Scale(s.opts, s.dc, s.opts.CurrentTarget, true)
320 |
321 | mockObj.AssertCalled(
322 | s.T(),
323 | "CreateFlowFile",
324 | s.opts.ComposePath,
325 | s.opts.ServiceName,
326 | s.opts.Target,
327 | s.opts.SideTargets,
328 | s.opts.CurrentColor,
329 | s.opts.BlueGreen,
330 | )
331 | }
332 |
333 | func (s MainTestSuite) Test_Scale_ReturnsError_WhenDeployAndDockerComposeCreateFlowFileFails() {
334 | mockObj := getDockerComposeMock(s.opts, "CreateFlowFile")
335 | mockObj.On(
336 | "CreateFlowFile",
337 | mock.Anything,
338 | mock.Anything,
339 | mock.Anything,
340 | mock.Anything,
341 | mock.Anything,
342 | mock.Anything,
343 | ).Return(fmt.Errorf("This is an error"))
344 | s.dc = mockObj
345 |
346 | err := Flow{}.Scale(s.opts, s.dc, s.opts.CurrentTarget, true)
347 |
348 | s.Error(err)
349 | }
350 |
351 | // Scale > GetScaleCalc
352 |
353 | func (s FlowTestSuite) Test_ScaleReturnsError_WhenGetScaleCalcFails() {
354 | opts := Opts{}
355 | mockObj := getDockerComposeMock(opts, "")
356 | scMockObj := getServiceDiscoveryMock(opts, "GetScaleCalc")
357 | serviceDiscovery = scMockObj
358 | scMockObj.On("GetScaleCalc", mock.Anything, mock.Anything, mock.Anything).Return(0, fmt.Errorf("This is an error"))
359 |
360 | actual := Flow{}.Scale(opts, mockObj, "myTarget", true)
361 |
362 | s.Error(actual)
363 | }
364 |
365 | // Scale > ScaleTargets
366 |
367 | func (s FlowTestSuite) Test_ScaleInvokesScaleTargets() {
368 | opts := Opts{
369 | Host: "myHost",
370 | Project: "myProject",
371 | ServiceDiscoveryAddress: "mySeviceDiscoveryAddress",
372 | ServiceName: "myService",
373 | Scale: "34",
374 | }
375 | mockObj := getDockerComposeMock(opts, "")
376 | flow := Flow{}
377 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
378 | target := "myTarget"
379 | scale, _ := serviceDiscovery.GetScaleCalc(opts.ServiceDiscoveryAddress, opts.ServiceName, opts.Scale)
380 |
381 | flow.Scale(opts, mockObj, target, true)
382 |
383 | mockObj.AssertCalled(s.T(), "ScaleTargets", opts.Host, opts.CertPath, opts.Project, target, scale)
384 | }
385 |
386 | func (s FlowTestSuite) Test_ScaleReturnsError_WhenScaleTargetsFails() {
387 | opts := Opts{}
388 | mockObj := getDockerComposeMock(opts, "ScaleTargets")
389 | mockObj.On(
390 | "ScaleTargets",
391 | mock.Anything,
392 | mock.Anything,
393 | mock.Anything,
394 | mock.Anything,
395 | mock.Anything,
396 | ).Return(fmt.Errorf("This is an error"))
397 | serviceDiscovery = getServiceDiscoveryMock(opts, "")
398 |
399 | actual := Flow{}.Scale(opts, mockObj, "myTarget", true)
400 |
401 | s.Error(actual)
402 | }
403 |
404 | // Scale > PutScale
405 |
406 | func (s FlowTestSuite) Test_ScaleInvokesPutScale() {
407 | opts := Opts{
408 | ServiceDiscoveryAddress: "mySeviceDiscoveryAddress",
409 | ServiceName: "myService",
410 | Scale: "34",
411 | }
412 | mockObj := getDockerComposeMock(opts, "")
413 | scMockObj := getServiceDiscoveryMock(opts, "")
414 | serviceDiscovery = scMockObj
415 | scale, _ := scMockObj.GetScaleCalc(opts.ServiceDiscoveryAddress, opts.ServiceName, opts.Scale)
416 |
417 | Flow{}.Scale(opts, mockObj, "myTarget", true)
418 |
419 | scMockObj.AssertCalled(s.T(), "PutScale", opts.ServiceDiscoveryAddress, opts.ServiceName, scale)
420 | }
421 |
422 | // Deploy > RemoveFlow
423 |
424 | func (s FlowTestSuite) Test_Scale_InvokesDockerComposeRemoveFlow() {
425 | mockObj := getDockerComposeMock(s.opts, "")
426 | s.dc = mockObj
427 |
428 | Flow{}.Scale(s.opts, s.dc, s.opts.CurrentTarget, true)
429 |
430 | mockObj.AssertCalled(s.T(), "RemoveFlow")
431 | }
432 |
433 | func (s FlowTestSuite) Test_Scale_ReturnsError_WhenDockerComposeRemoveFlowFails() {
434 | mockObj := getDockerComposeMock(s.opts, "RemoveFlow")
435 | mockObj.On("RemoveFlow").Return(fmt.Errorf("This is an error"))
436 | s.dc = mockObj
437 |
438 | err := Flow{}.Scale(s.opts, s.dc, s.opts.CurrentTarget, true)
439 |
440 | s.Error(err)
441 | }
442 |
443 | // GetTargets
444 |
445 | func (s FlowTestSuite) Test_GetTargetsReturnsAllTargets() {
446 | opts := Opts{
447 | NextTarget: "myNextTarget",
448 | SideTargets: []string{"sideTarget1", "sideTarget2"},
449 | PullSideTargets: true,
450 | }
451 | expected := append([]string{opts.NextTarget}, opts.SideTargets...)
452 |
453 | actual := Flow{}.GetPullTargets(opts)
454 |
455 | s.Equal(expected, actual)
456 | }
457 |
458 | func (s FlowTestSuite) Test_GetTargetsExcludesSideTargets_WhenNotPullSideTargets() {
459 | opts := Opts{
460 | NextTarget: "myNextTarget",
461 | SideTargets: []string{"sideTarget1", "sideTarget2"},
462 | PullSideTargets: false,
463 | }
464 | expected := []string{opts.NextTarget}
465 |
466 | actual := Flow{}.GetPullTargets(opts)
467 |
468 | s.Equal(expected, actual)
469 | }
470 |
471 | // Proxy
472 |
473 | func (s FlowTestSuite) Test_Proxy_InvokesProvision() {
474 | mockObj := getProxyMock("")
475 |
476 | Flow{}.Proxy(s.opts, mockObj)
477 |
478 | mockObj.AssertCalled(
479 | s.T(),
480 | "Provision",
481 | s.opts.ProxyDockerHost,
482 | s.opts.ProxyDockerCertPath,
483 | s.opts.ServiceDiscoveryAddress,
484 | )
485 | }
486 |
487 | func (s FlowTestSuite) Test_Proxy_ReturnsError_WhenProvisionFails() {
488 | opts := Opts{}
489 | mockObj := getProxyMock("Provision")
490 | mockObj.On("Provision", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("This is an error"))
491 |
492 | actual := Flow{}.Proxy(opts, mockObj)
493 |
494 | s.Error(actual)
495 | }
496 |
497 | func (s FlowTestSuite) Test_Proxy_InvokesReconfigure_WhenDeploy() {
498 | mockObj := getProxyMock("")
499 | s.opts.Flow = []string{FLOW_DEPLOY}
500 |
501 | Flow{}.Proxy(s.opts, mockObj)
502 |
503 | mockObj.AssertCalled(
504 | s.T(),
505 | "Reconfigure",
506 | s.opts.ProxyDockerHost,
507 | s.opts.ProxyDockerCertPath,
508 | s.opts.ProxyHost,
509 | s.opts.ProxyReconfPort,
510 | s.opts.ServiceName,
511 | s.opts.NextColor,
512 | s.opts.ServicePath,
513 | "",
514 | "",
515 | )
516 | }
517 |
518 | func (s FlowTestSuite) Test_Proxy_InvokesReconfigure_WhenScale() {
519 | mockObj := getProxyMock("")
520 | s.opts.Flow = []string{FLOW_SCALE}
521 |
522 | Flow{}.Proxy(s.opts, mockObj)
523 |
524 | mockObj.AssertCalled(
525 | s.T(),
526 | "Reconfigure",
527 | s.opts.ProxyDockerHost,
528 | s.opts.ProxyDockerCertPath,
529 | s.opts.ProxyHost,
530 | s.opts.ProxyReconfPort,
531 | s.opts.ServiceName,
532 | s.opts.CurrentColor,
533 | s.opts.ServicePath,
534 | "",
535 | "",
536 | )
537 | }
538 |
539 | func (s FlowTestSuite) Test_Proxy_ReturnsError_WhenReconfigureFails() {
540 | s.opts.Flow = []string{FLOW_DEPLOY}
541 | mockObj := getProxyMock("Reconfigure")
542 | mockObj.On(
543 | "Reconfigure",
544 | mock.Anything,
545 | mock.Anything,
546 | mock.Anything,
547 | mock.Anything,
548 | mock.Anything,
549 | mock.Anything,
550 | mock.Anything,
551 | mock.Anything,
552 | mock.Anything,
553 | ).Return(fmt.Errorf("This is an error"))
554 |
555 | actual := Flow{}.Proxy(s.opts, mockObj)
556 |
557 | s.Error(actual)
558 | }
559 |
560 | // Suite
561 |
562 | func TestFlowTestSuite(t *testing.T) {
563 | dockerHost := os.Getenv("DOCKER_HOST")
564 | dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
565 | defer func() {
566 | os.Setenv("DOCKER_HOST", dockerHost)
567 | os.Setenv("DOCKER_CERT_PATH", dockerCertPath)
568 | }()
569 | suite.Run(t, new(FlowTestSuite))
570 | }
571 |
572 | // Mock
573 |
574 | type FlowMock struct {
575 | mock.Mock
576 | }
577 |
578 | func (m *FlowMock) Deploy(opts Opts, dc compose.DockerComposer) error {
579 | args := m.Called(opts, dc)
580 | return args.Error(0)
581 | }
582 |
583 | func (m *FlowMock) GetPullTargets(opts Opts) []string {
584 | // args := m.Called(opts)
585 | return []string{}
586 | }
587 |
588 | func (m *FlowMock) Scale(opts Opts, dc compose.DockerComposer, target string, createFlowFile bool) error {
589 | args := m.Called(opts, dc, target, createFlowFile)
590 | return args.Error(0)
591 | }
592 |
593 | func (m *FlowMock) Proxy(opts Opts, proxy Proxy) error {
594 | args := m.Called(opts, proxy)
595 | return args.Error(0)
596 | }
597 |
598 | func getFlowMock(skipMethod string) *FlowMock {
599 | mockObj := new(FlowMock)
600 | if skipMethod != "Deploy" {
601 | mockObj.On("Deploy", mock.Anything, mock.Anything).Return(nil)
602 | }
603 | if skipMethod != "GetPullTargets" {
604 | mockObj.On("GetPullTargets", mock.Anything).Return(nil)
605 | }
606 | if skipMethod != "Scale" {
607 | mockObj.On("Scale", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
608 | }
609 | if skipMethod != "Proxy" {
610 | mockObj.On("Proxy", mock.Anything, mock.Anything).Return(nil)
611 | }
612 | return mockObj
613 | }
614 |
--------------------------------------------------------------------------------
/ha_proxy.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "net/http"
7 | "os"
8 | "os/exec"
9 | "strings"
10 | "time"
11 | "./util"
12 | )
13 |
14 | const containerStatusRunning = 1
15 | const containerStatusExited = 2
16 | const containerStatusRemoved = 3
17 | const ProxyReconfigureDefaultPort = 8080
18 | const ConsulTemplatesDir = "/consul_templates"
19 |
20 | var haProxy Proxy = HaProxy{}
21 |
22 | type HaProxy struct{}
23 |
24 | var runHaProxyRunCmd = func(cmd *exec.Cmd) error { return cmd.Run() }
25 | var runHaProxyExecCmd = func(cmd *exec.Cmd) error { return cmd.Run() }
26 | var runHaProxyCpCmd = func(cmd *exec.Cmd) error { return cmd.Run() }
27 | var runHaProxyPsCmd = func(cmd *exec.Cmd) error { return cmd.Run() }
28 | var runHaProxyStartCmd = func(cmd *exec.Cmd) error { return cmd.Run() }
29 | var httpGet = http.Get
30 |
31 | func (m HaProxy) Provision(dockerHost, reconfPort, certPath, scAddress string) error {
32 | if len(dockerHost) == 0 {
33 | return fmt.Errorf("Proxy docker host is mandatory for the proxy step. Please set the proxy-docker-host argument.")
34 | }
35 | if len(scAddress) == 0 {
36 | return fmt.Errorf("Service Discovery Address is mandatory.")
37 | }
38 | util.SetDockerHost(dockerHost, certPath)
39 | status, err := m.ps()
40 | if err != nil {
41 | return err
42 | }
43 | switch status {
44 | case containerStatusRunning:
45 | return nil
46 | case containerStatusExited:
47 | if err := m.start(); err != nil {
48 | return err
49 | }
50 | util.Sleep(time.Second * 5)
51 | default:
52 | if err := m.run(reconfPort, scAddress); err != nil {
53 | return err
54 | }
55 | util.Sleep(time.Second * 5)
56 | }
57 | return nil
58 | }
59 |
60 | // TODO: Change args to struct
61 | func (m HaProxy) Reconfigure(
62 | dockerHost, dockerCertPath, host, reconfPort, serviceName, serviceColor string,
63 | servicePath []string,
64 | consulTemplateFePath string, consulTemplateBePath string,
65 | ) error {
66 | if len(consulTemplateFePath) > 0 {
67 | if err := m.sendConsulTemplatesToTheProxy(dockerHost, dockerCertPath, consulTemplateFePath, consulTemplateBePath, serviceName, serviceColor); err != nil {
68 | return err
69 | }
70 | } else if len(servicePath) == 0 {
71 | return fmt.Errorf("It is mandatory to specify servicePath or consulTemplatePath. Please set one of the two.")
72 | }
73 | if len(host) == 0 {
74 | return fmt.Errorf("Proxy host is mandatory for the proxy step. Please set the proxy-host argument.")
75 | }
76 | if len(serviceName) == 0 {
77 | return fmt.Errorf("Service name is mandatory for the proxy step.")
78 | }
79 | if len(reconfPort) == 0 && !strings.Contains(host, ":") {
80 | return fmt.Errorf("Reconfigure port is mandatory.")
81 | }
82 | if err := m.sendReconfigureRequest(host, reconfPort, serviceName, serviceColor, servicePath, consulTemplateFePath, consulTemplateBePath); err != nil {
83 | return err
84 | }
85 | return nil
86 | }
87 |
88 | func (m HaProxy) sendReconfigureRequest(
89 | host, reconfPort, serviceName, serviceColor string,
90 | servicePath []string,
91 | consulTemplateFePath, consulTemplateBePath string,
92 | ) error {
93 | address := host
94 | if len(reconfPort) > 0 {
95 | address = fmt.Sprintf("%s:%s", host, reconfPort)
96 | }
97 | if !strings.HasPrefix(strings.ToLower(address), "http") {
98 | address = fmt.Sprintf("http://%s", address)
99 | }
100 | proxyUrl := fmt.Sprintf(
101 | "%s/v1/docker-flow-proxy/reconfigure?serviceName=%s",
102 | address,
103 | serviceName,
104 | )
105 | if len(consulTemplateFePath) > 0 {
106 | proxyUrl = fmt.Sprintf("%s&consulTemplateFePath=%s/%s-fe.tmpl&consulTemplateBePath=%s/%s-be.tmpl", proxyUrl, ConsulTemplatesDir, serviceName, ConsulTemplatesDir, serviceName)
107 | } else {
108 | if len(serviceColor) > 0 {
109 | proxyUrl = fmt.Sprintf("%s&serviceColor=%s", proxyUrl, serviceColor)
110 | }
111 | proxyUrl = fmt.Sprintf("%s&servicePath=%s", proxyUrl, strings.Join(servicePath, ","))
112 | }
113 | logPrintf("Sending request to %s to reconfigure the proxy", proxyUrl)
114 | resp, err := httpGet(proxyUrl)
115 | if err != nil {
116 | return fmt.Errorf("The request to reconfigure the proxy failed\n%s\n", err.Error())
117 | }
118 | defer resp.Body.Close()
119 | if resp.StatusCode != 200 {
120 | return fmt.Errorf("The request to the proxy (%s) failed with status code %d\n", proxyUrl, resp.StatusCode)
121 | }
122 | return nil
123 | }
124 |
125 | func (m HaProxy) sendConsulTemplatesToTheProxy(dockerHost, dockerCertPath, consulTemplateFePath, consulTemplateBePath, serviceName, color string) error {
126 | if err := m.sendConsulTemplateToTheProxy(dockerHost, dockerCertPath, consulTemplateFePath, serviceName, color, "fe"); err != nil {
127 | return err
128 | }
129 | if err := m.sendConsulTemplateToTheProxy(dockerHost, dockerCertPath, consulTemplateBePath, serviceName, color, "be"); err != nil {
130 | return err
131 | }
132 | return nil
133 | }
134 |
135 | func (m HaProxy) sendConsulTemplateToTheProxy(dockerHost, dockerCertPath, consulTemplatePath, serviceName, color, templateType string) error {
136 | if err := m.createTempConsulTemplate(consulTemplatePath, serviceName, color); err != nil {
137 | return err
138 | }
139 | file := fmt.Sprintf("%s-%s.tmpl", serviceName, templateType)
140 | if err := m.copyConsulTemplateToTheProxy(dockerHost, dockerCertPath, consulTemplatePath, file); err != nil {
141 | return err
142 | }
143 | util.RemoveFile(fmt.Sprintf("%s.tmp", consulTemplatePath))
144 | return nil
145 | }
146 |
147 | func (m HaProxy) copyConsulTemplateToTheProxy(dockerHost, dockerCertPath, consulTemplatePath, templateName string) error {
148 | util.SetDockerHost(dockerHost, dockerCertPath)
149 | args := []string{"exec", "-i", "docker-flow-proxy", "mkdir", "-p", ConsulTemplatesDir}
150 | execCmd := exec.Command("docker", args...)
151 | execCmd.Stdout = os.Stdout
152 | execCmd.Stderr = os.Stderr
153 | // TODO: Remove. Deprecated since Docker Flow: Proxy has that directory by default.
154 | if err := runHaProxyExecCmd(execCmd); err != nil {
155 | return err
156 | }
157 | args = []string{
158 | "cp",
159 | fmt.Sprintf("%s.tmp", consulTemplatePath),
160 | fmt.Sprintf("docker-flow-proxy:%s/%s", ConsulTemplatesDir, templateName),
161 | }
162 | cpCmd := exec.Command("docker", args...)
163 | cpCmd.Stdout = os.Stdout
164 | cpCmd.Stderr = os.Stderr
165 | if err := runHaProxyCpCmd(cpCmd); err != nil {
166 | return err
167 | }
168 | return nil
169 | }
170 |
171 | func (m HaProxy) createTempConsulTemplate(consulTemplatePath, serviceName, color string) error {
172 | fullServiceName := fmt.Sprintf("%s-%s", serviceName, color)
173 | tmpPath := fmt.Sprintf("%s.tmp", consulTemplatePath)
174 | data, err := util.ReadFile(consulTemplatePath)
175 | if err != nil {
176 | return fmt.Errorf("Could not read the Consul template %s\n%s", consulTemplatePath, err.Error())
177 | }
178 | if err := util.WriteFile(
179 | tmpPath,
180 | []byte(strings.Replace(string(data), "SERVICE_NAME", fullServiceName, -1)),
181 | 0644,
182 | ); err != nil {
183 | return fmt.Errorf("Could not write temporary Consul template to %s\n%s", tmpPath, err.Error())
184 | }
185 | return nil
186 | }
187 |
188 | func (m HaProxy) run(reconfPort, scAddress string) error {
189 | logPrintln("Running the docker-flow-proxy container...")
190 | args := []string{
191 | "run", "-d",
192 | "--name", "docker-flow-proxy",
193 | "-e", fmt.Sprintf("%s=%s", "CONSUL_ADDRESS", scAddress),
194 | "-p", "80:80", "-p", fmt.Sprintf("%s:8080", reconfPort),
195 | "vfarcic/docker-flow-proxy",
196 | }
197 | cmd := exec.Command("docker", args...)
198 | cmd.Stdout = os.Stdout
199 | cmd.Stderr = os.Stderr
200 | if err := runHaProxyRunCmd(cmd); err != nil {
201 | return fmt.Errorf("Docker run command failed\n%s\n%s\n", strings.Join(cmd.Args, " "), err.Error())
202 | }
203 | return nil
204 | }
205 |
206 | func (m HaProxy) ps() (int, error) {
207 | logPrintln("Checking status of the docker-flow-proxy container...")
208 | args := []string{
209 | "ps", "-a",
210 | "--filter", "name=docker-flow-proxy",
211 | "--format", "{{.Status}}",
212 | }
213 | cmd := exec.Command("docker", args...)
214 | var out bytes.Buffer
215 | cmd.Stdout = &out
216 | cmd.Stderr = os.Stderr
217 | if err := runHaProxyPsCmd(cmd); err != nil {
218 | return 0, fmt.Errorf("Docker ps command failed\n%s\n%s\n", strings.Join(cmd.Args, " "), err.Error())
219 | }
220 | status := string(out.Bytes())
221 | if strings.HasPrefix(status, "Exited") {
222 | return containerStatusExited, nil
223 | }
224 | if len(status) == 0 {
225 | return containerStatusRemoved, nil
226 | }
227 | return containerStatusRunning, nil
228 | }
229 |
230 | func (m HaProxy) start() error {
231 | logPrintln("Starting the docker-flow-proxy container...")
232 | args := []string{"start", "docker-flow-proxy"}
233 | cmd := exec.Command("docker", args...)
234 | cmd.Stdout = os.Stdout
235 | cmd.Stderr = os.Stderr
236 | if err := runHaProxyStartCmd(cmd); err != nil {
237 | return fmt.Errorf("Docker start command failed\n%s\n%s\n", strings.Join(cmd.Args, " "), err.Error())
238 | }
239 | return nil
240 | }
241 |
--------------------------------------------------------------------------------
/ha_proxy_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/stretchr/testify/suite"
6 | "net/http"
7 | "net/http/httptest"
8 | "os"
9 | "os/exec"
10 | "strings"
11 | "testing"
12 | "time"
13 | "./util"
14 | )
15 |
16 | type HaProxyTestSuite struct {
17 | suite.Suite
18 | ScAddress string
19 | CertPath string
20 | ExitedMessage string
21 | Host string
22 | ServiceName string
23 | Color string
24 | ServicePath []string
25 | ReconfPort string
26 | DockerHost string
27 | DockerCertPath string
28 | Server *httptest.Server
29 | }
30 |
31 | func (s *HaProxyTestSuite) SetupTest() {
32 | s.ScAddress = "1.2.3.4:1234"
33 | s.ServiceName = "my-service"
34 | s.Color = "purpurina"
35 | s.ServicePath = []string{"/path/to/my/service", "/path/to/my/other/service"}
36 | s.ExitedMessage = "Exited (2) 15 seconds ago"
37 | s.ReconfPort = "5362"
38 | s.Server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
39 | reconfigureUrl := fmt.Sprintf(
40 | "/v1/docker-flow-proxy/reconfigure",
41 | s.ServiceName,
42 | strings.Join(s.ServicePath, ","),
43 | )
44 | actualUrl := fmt.Sprintf("%s?%s", r.URL.Path, r.URL.RawQuery)
45 | if r.Method == "GET" {
46 | if strings.HasPrefix(actualUrl, reconfigureUrl) {
47 | w.WriteHeader(http.StatusOK)
48 | }
49 | }
50 | }))
51 | s.DockerHost = "tcp://my-docker-proxy-host"
52 | s.DockerCertPath = "/path/to/pem"
53 | s.Host = "http://my-docker-proxy-host.com"
54 | runHaProxyRunCmd = func(cmd *exec.Cmd) error {
55 | return nil
56 | }
57 | runHaProxyPsCmd = func(cmd *exec.Cmd) error {
58 | return nil
59 | }
60 | runHaProxyStartCmd = func(cmd *exec.Cmd) error {
61 | return nil
62 | }
63 | httpGetOrig := httpGet
64 | defer func() { httpGet = httpGetOrig }()
65 | httpGet = func(url string) (resp *http.Response, err error) {
66 | return nil, nil
67 | }
68 | }
69 |
70 | // Provision
71 |
72 | func (s HaProxyTestSuite) Test_Provision_SetsDockerHost() {
73 | actual := ""
74 | SetDockerHostOrig := util.SetDockerHost
75 | defer func() { util.SetDockerHost = SetDockerHostOrig }()
76 | util.SetDockerHost = func(host, certPath string) {
77 | actual = host
78 | }
79 |
80 | HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
81 |
82 | s.Equal(s.Host, actual)
83 | }
84 |
85 | func (s HaProxyTestSuite) Test_Provision_ReturnsError_WhenProxyHostIsEmpty() {
86 | err := HaProxy{}.Provision("", s.ReconfPort, s.CertPath, s.ScAddress)
87 |
88 | s.Error(err)
89 | }
90 |
91 | func (s HaProxyTestSuite) Test_Provision_ReturnsError_WhenScAddressIsEmpty() {
92 | err := HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, "")
93 |
94 | s.Error(err)
95 | }
96 |
97 | func (s HaProxyTestSuite) Test_Provision_RunsDockerFlowProxyContainer() {
98 | var actual []string
99 | expected := []string{
100 | "docker", "run", "-d",
101 | "--name", "docker-flow-proxy",
102 | "-e", fmt.Sprintf("%s=%s", "CONSUL_ADDRESS", s.ScAddress),
103 | "-p", "80:80", "-p", fmt.Sprintf("%s:8080", s.ReconfPort),
104 | "vfarcic/docker-flow-proxy",
105 | }
106 | runHaProxyRunCmd = func(cmd *exec.Cmd) error {
107 | actual = cmd.Args
108 | return nil
109 | }
110 |
111 | HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
112 |
113 | s.Equal(expected, actual)
114 | }
115 |
116 | func (s HaProxyTestSuite) Test_Provision_ReturnsError_WhenFailure() {
117 | runHaProxyRunCmd = func(cmd *exec.Cmd) error {
118 | return fmt.Errorf("This is an error")
119 | }
120 |
121 | err := HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
122 |
123 | s.Error(err)
124 | }
125 |
126 | func (s HaProxyTestSuite) Test_Provision_RunsDockerPs() {
127 | var actual []string
128 | expected := []string{
129 | "docker", "ps", "-a",
130 | "--filter", "name=docker-flow-proxy",
131 | "--format", "{{.Status}}",
132 | }
133 | runHaProxyPsCmd = func(cmd *exec.Cmd) error {
134 | actual = cmd.Args
135 | return nil
136 | }
137 |
138 | HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
139 |
140 | s.Equal(expected, actual)
141 | }
142 |
143 | func (s HaProxyTestSuite) Test_Provision_ReturnsError_WhenPsFailure() {
144 | runHaProxyPsCmd = func(cmd *exec.Cmd) error {
145 | return fmt.Errorf("This is an docker ps error")
146 | }
147 |
148 | err := HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
149 |
150 | s.Error(err)
151 | }
152 |
153 | func (s HaProxyTestSuite) Test_Provision_ReturnsError_WhenProxyFails() {
154 | runHaProxyPsCmd = func(cmd *exec.Cmd) error {
155 | return fmt.Errorf("This is an docker ps error")
156 | }
157 |
158 | err := HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
159 |
160 | s.Error(err)
161 | }
162 |
163 | func (s HaProxyTestSuite) Test_Provision_DoesNotRun_WhenProxyExists() {
164 | actual := false
165 | runHaProxyRunCmd = func(cmd *exec.Cmd) error {
166 | actual = true
167 | return nil
168 | }
169 | runHaProxyPsCmd = func(cmd *exec.Cmd) error {
170 | cmd.Stdout.Write([]byte("Up 3 hours"))
171 | return nil
172 | }
173 | HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
174 |
175 | s.False(actual)
176 | }
177 |
178 | func (s HaProxyTestSuite) Test_Provision_StartsAndDoesNotRun_WhenProxyIsExited() {
179 | start := false
180 | run := false
181 | runHaProxyStartCmd = func(cmd *exec.Cmd) error {
182 | start = true
183 | return nil
184 | }
185 | runHaProxyRunCmd = func(cmd *exec.Cmd) error {
186 | run = true
187 | return nil
188 | }
189 | runHaProxyPsCmd = func(cmd *exec.Cmd) error {
190 | cmd.Stdout.Write([]byte(s.ExitedMessage))
191 | return nil
192 | }
193 |
194 | HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
195 |
196 | s.True(start)
197 | s.False(run)
198 | }
199 |
200 | func (s HaProxyTestSuite) Test_Provision_StartsDockerFlowProxyContainer_WhenProxyIsExited() {
201 | var actual []string
202 | expected := []string{"docker", "start", "docker-flow-proxy"}
203 | runHaProxyPsCmd = func(cmd *exec.Cmd) error {
204 | cmd.Stdout.Write([]byte(s.ExitedMessage))
205 | return nil
206 | }
207 | runHaProxyStartCmd = func(cmd *exec.Cmd) error {
208 | actual = cmd.Args
209 | return nil
210 | }
211 |
212 | HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
213 |
214 | s.Equal(expected, actual)
215 | }
216 |
217 | func (s HaProxyTestSuite) Test_Provision_ReturnsError_WhenStartFailure() {
218 | runHaProxyPsCmd = func(cmd *exec.Cmd) error {
219 | cmd.Stdout.Write([]byte(s.ExitedMessage))
220 | return nil
221 | }
222 | runHaProxyStartCmd = func(cmd *exec.Cmd) error {
223 | return fmt.Errorf("This is an docker start error")
224 | }
225 |
226 | err := HaProxy{}.Provision(s.Host, s.ReconfPort, s.CertPath, s.ScAddress)
227 |
228 | s.Error(err)
229 | }
230 |
231 | // Reconfigure
232 |
233 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenProxyHostIsEmpty() {
234 | err := HaProxy{}.Reconfigure("", "", "", s.ReconfPort, s.ServiceName, s.Color, s.ServicePath, "", "")
235 |
236 | s.Error(err)
237 | }
238 |
239 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenProjectIsEmpty() {
240 | err := HaProxy{}.Reconfigure("", "", s.Host, s.ReconfPort, "", s.Color, s.ServicePath, "", "")
241 |
242 | s.Error(err)
243 | }
244 |
245 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenServicePathAndConsulTemplatePathAreEmpty() {
246 | err := HaProxy{}.Reconfigure("", "", s.Host, s.ReconfPort, s.ServiceName, s.Color, []string{""}, "", "")
247 |
248 | s.Error(err)
249 | }
250 |
251 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenReconfPortIsEmpty() {
252 | err := HaProxy{}.Reconfigure("", "", s.Host, "", s.ServiceName, s.Color, s.ServicePath, "", "")
253 |
254 | s.Error(err)
255 | }
256 |
257 | func (s HaProxyTestSuite) Test_Reconfigure_SendsHttpRequest() {
258 | actual := ""
259 | expected := fmt.Sprintf(
260 | "%s:%s/v1/docker-flow-proxy/reconfigure?serviceName=%s&serviceColor=%s&servicePath=%s",
261 | s.Host,
262 | s.ReconfPort,
263 | s.ServiceName,
264 | s.Color,
265 | strings.Join(s.ServicePath, ","),
266 | )
267 | httpGetOrig := httpGet
268 | defer func() { httpGet = httpGetOrig }()
269 | httpGet = func(url string) (resp *http.Response, err error) {
270 | actual = url
271 | return nil, fmt.Errorf("This is an HTTP error")
272 | }
273 |
274 | HaProxy{}.Reconfigure("", "", s.Host, s.ReconfPort, s.ServiceName, s.Color, s.ServicePath, "", "")
275 |
276 | s.Equal(expected, actual)
277 | }
278 |
279 | func (s HaProxyTestSuite) Test_Reconfigure_SendsHttpRequestWithOutColor_WhenNotBlueGreen() {
280 | actual := ""
281 | expected := fmt.Sprintf(
282 | "%s:%s/v1/docker-flow-proxy/reconfigure?serviceName=%s&servicePath=%s",
283 | s.Host,
284 | s.ReconfPort,
285 | s.ServiceName,
286 | strings.Join(s.ServicePath, ","),
287 | )
288 | httpGetOrig := httpGet
289 | defer func() { httpGet = httpGetOrig }()
290 | httpGet = func(url string) (resp *http.Response, err error) {
291 | actual = url
292 | return nil, fmt.Errorf("This is an HTTP error")
293 | }
294 |
295 | HaProxy{}.Reconfigure("", "", s.Host, s.ReconfPort, s.ServiceName, "", s.ServicePath, "", "")
296 |
297 | s.Equal(expected, actual)
298 | }
299 |
300 | func (s HaProxyTestSuite) Test_Reconfigure_SendsHttpRequestWithPrependedHttp() {
301 | actual := ""
302 | expected := fmt.Sprintf(
303 | "%s:%s/v1/docker-flow-proxy/reconfigure?serviceName=%s&servicePath=%s",
304 | s.Host,
305 | s.ReconfPort,
306 | s.ServiceName,
307 | strings.Join(s.ServicePath, ","),
308 | )
309 | httpGetOrig := httpGet
310 | defer func() { httpGet = httpGetOrig }()
311 | httpGet = func(url string) (resp *http.Response, err error) {
312 | actual = url
313 | return nil, fmt.Errorf("This is an HTTP error")
314 | }
315 |
316 | HaProxy{}.Reconfigure("", "", "my-docker-proxy-host.com", s.ReconfPort, s.ServiceName, "", s.ServicePath, "", "")
317 |
318 | s.Equal(expected, actual)
319 | }
320 |
321 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenRequestFails() {
322 | httpGetOrig := httpGet
323 | defer func() { httpGet = httpGetOrig }()
324 | httpGet = func(url string) (resp *http.Response, err error) {
325 | return nil, fmt.Errorf("This is an HTTP error")
326 | }
327 |
328 | err := HaProxy{}.Reconfigure("", "", s.Host, s.ReconfPort, s.ServiceName, s.Color, s.ServicePath, "", "")
329 |
330 | s.Error(err)
331 | }
332 |
333 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenResponseCodeIsNot2xx() {
334 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
335 | w.WriteHeader(http.StatusBadRequest)
336 | }))
337 |
338 | err := HaProxy{}.Reconfigure("", "", server.URL, "", s.ServiceName, s.Color, s.ServicePath, "", "")
339 |
340 | s.Error(err)
341 | }
342 |
343 | func (s HaProxyTestSuite) Test_Reconfigure_SetsDockerHost_WhenConsulTemplatePathIsPresent() {
344 | os.Unsetenv("DOCKER_HOST")
345 |
346 | err := HaProxy{}.Reconfigure(s.DockerHost, s.DockerCertPath, s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, "/path/to/consul/fe/template", "/path/to/consul/be/template")
347 |
348 | s.NoError(err)
349 | s.Equal(s.DockerHost, os.Getenv("DOCKER_HOST"))
350 | s.Equal(s.DockerCertPath, os.Getenv("DOCKER_CERT_PATH"))
351 | }
352 |
353 | func (s HaProxyTestSuite) Test_Reconfigure_CreatesConsulTemplatesDirectory_WhenConsulTemplatePathIsPresent() {
354 | var actual []string
355 | expected := []string{"docker", "exec", "-i", "docker-flow-proxy", "mkdir", "-p", "/consul_templates"}
356 | runHaProxyExecCmd = func(cmd *exec.Cmd) error {
357 | actual = cmd.Args
358 | return nil
359 | }
360 |
361 | HaProxy{}.Reconfigure("", "", s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, "/path/to/consul/fe/template", "/path/to/consul/be/template")
362 |
363 | s.Equal(expected, actual)
364 | }
365 |
366 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenDirectoryCreationFails() {
367 | runHaProxyExecCmdOrig := runHaProxyExecCmd
368 | defer func() { runHaProxyExecCmd = runHaProxyExecCmdOrig }()
369 | runHaProxyExecCmd = func(cmd *exec.Cmd) error {
370 | return fmt.Errorf("This is an docker exec error")
371 | }
372 |
373 | actual := HaProxy{}.Reconfigure("", "", s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, "/path/to/consul/fe/template", "/path/to/consul/be/template")
374 |
375 | s.Error(actual)
376 | }
377 |
378 | func (s HaProxyTestSuite) Test_Reconfigure_CopiesTemplates_WhenConsulTemplatePathIsPresent() {
379 | fePath := "/path/to/consul/fe/template"
380 | bePath := "/path/to/consul/be/template"
381 | var actual [][]string
382 | feExpected := []string{
383 | "docker",
384 | "cp",
385 | fmt.Sprintf("%s.tmp", fePath),
386 | fmt.Sprintf("docker-flow-proxy:/consul_templates/%s-fe.tmpl", s.ServiceName),
387 | }
388 | beExpected := []string{
389 | "docker",
390 | "cp",
391 | fmt.Sprintf("%s.tmp", bePath),
392 | fmt.Sprintf("docker-flow-proxy:/consul_templates/%s-be.tmpl", s.ServiceName),
393 | }
394 | runHaProxyCpCmdOrig := runHaProxyCpCmd
395 | defer func() { runHaProxyCpCmd = runHaProxyCpCmdOrig }()
396 | runHaProxyCpCmd = func(cmd *exec.Cmd) error {
397 | actual = append(actual, cmd.Args)
398 | return nil
399 | }
400 |
401 | HaProxy{}.Reconfigure("", "", s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, fePath, bePath)
402 |
403 | s.Equal(feExpected, actual[0])
404 | s.Equal(beExpected, actual[1])
405 | }
406 |
407 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenTemplateCopyFails() {
408 | runHaProxyCpCmdOrig := runHaProxyCpCmd
409 | defer func() { runHaProxyCpCmd = runHaProxyCpCmdOrig }()
410 | runHaProxyCpCmd = func(cmd *exec.Cmd) error {
411 | return fmt.Errorf("This is an docker cp error")
412 | }
413 |
414 | actual := HaProxy{}.Reconfigure("", "", s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, "/path/to/consul/fe/template", "/path/to/consul/be/template")
415 |
416 | s.Error(actual)
417 | }
418 |
419 | func (s HaProxyTestSuite) Test_Reconfigure_SendsHttpRequestWithConsulTemplatePath_WhenSpecified() {
420 | actual := ""
421 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
422 | actual = fmt.Sprintf("%s?%s", r.URL.Path, r.URL.RawQuery)
423 | }))
424 | expected := fmt.Sprintf(
425 | "/v1/docker-flow-proxy/reconfigure?serviceName=%s&consulTemplateFePath=/consul_templates/%s-fe.tmpl&consulTemplateBePath=/consul_templates/%s-be.tmpl",
426 | s.ServiceName,
427 | s.ServiceName,
428 | s.ServiceName,
429 | )
430 |
431 | HaProxy{}.Reconfigure("", "", server.URL, "", s.ServiceName, s.Color, s.ServicePath, "/path/to/consul/fe/template", "/path/to/consul/be/template")
432 |
433 | s.Equal(expected, actual)
434 | }
435 |
436 | func (s HaProxyTestSuite) Test_Reconfigure_CreatesTempTemplateFile() {
437 | fePath := "/path/to/consul/fe/template"
438 | bePath := "/path/to/consul/be/template"
439 | var actualFilenames []string
440 | actualData := ""
441 | data := "This is a %s template"
442 | expectedData := fmt.Sprintf(data, s.ServiceName+"-"+s.Color)
443 | writeFileOrig := util.WriteFile
444 | defer func() { util.WriteFile = writeFileOrig }()
445 | util.WriteFile = func(filename string, data []byte, perm os.FileMode) error {
446 | actualFilenames = append(actualFilenames, filename)
447 | actualData = string(data)
448 | return nil
449 | }
450 | readFileOrig := util.ReadFile
451 | defer func() { util.ReadFile = readFileOrig }()
452 | util.ReadFile = func(fileName string) ([]byte, error) {
453 | return []byte(fmt.Sprintf(data, "SERVICE_NAME")), nil
454 | }
455 |
456 | HaProxy{}.Reconfigure("", "", s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, fePath, bePath)
457 |
458 | s.Equal(fePath+".tmp", actualFilenames[0])
459 | s.Equal(bePath+".tmp", actualFilenames[1])
460 | s.Equal(expectedData, actualData)
461 | }
462 |
463 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenTemplateFileReadFails() {
464 | readFileOrig := util.ReadFile
465 | defer func() { util.ReadFile = readFileOrig }()
466 | util.ReadFile = func(fileName string) ([]byte, error) {
467 | return []byte(""), fmt.Errorf("This is an read file error")
468 | }
469 |
470 | err := HaProxy{}.Reconfigure("", "", s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, "/path/to/consul/fe/template", "/path/to/consul/be/template")
471 |
472 | s.Error(err)
473 | }
474 |
475 | func (s HaProxyTestSuite) Test_Reconfigure_ReturnsError_WhenTempTemplateFileCreationFails() {
476 | writeFileOrig := util.WriteFile
477 | defer func() { util.WriteFile = writeFileOrig }()
478 | util.WriteFile = func(filename string, data []byte, perm os.FileMode) error {
479 | return fmt.Errorf("This is an write file error")
480 | }
481 |
482 | err := HaProxy{}.Reconfigure("", "", s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, "/path/to/consul/fe/template", "/path/to/consul/be/template")
483 |
484 | s.Error(err)
485 | }
486 |
487 | func (s HaProxyTestSuite) Test_Reconfigure_RemovesTempTemplateFile() {
488 | fePath := "/path/to/consul/fe/template"
489 | bePath := "/path/to/consul/be/template"
490 | expected := []string{
491 | fmt.Sprintf("%s.tmp", fePath),
492 | fmt.Sprintf("%s.tmp", bePath),
493 | }
494 | var actual []string
495 | removeFileOrig := util.RemoveFile
496 | defer func() { util.RemoveFile = removeFileOrig }()
497 | util.RemoveFile = func(name string) error {
498 | actual = append(actual, name)
499 | return nil
500 | }
501 |
502 | HaProxy{}.Reconfigure("", "", s.Server.URL, "", s.ServiceName, s.Color, s.ServicePath, fePath, bePath)
503 |
504 | s.Equal(expected, actual)
505 | }
506 |
507 | // Suite
508 |
509 | func TestHaProxyTestSuite(t *testing.T) {
510 | logPrintln = func(v ...interface{}) {}
511 | logPrintf = func(format string, v ...interface{}) {}
512 | util.Sleep = func(d time.Duration) {}
513 | dockerHost := os.Getenv("DOCKER_HOST")
514 | dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
515 | runHaProxyExecCmd = func(cmd *exec.Cmd) error {
516 | return nil
517 | }
518 | runHaProxyCpCmd = func(cmd *exec.Cmd) error {
519 | return nil
520 | }
521 | util.WriteFile = func(fileName string, data []byte, perm os.FileMode) error {
522 | return nil
523 | }
524 | util.ReadFile = func(fileName string) ([]byte, error) {
525 | return []byte(""), nil
526 | }
527 | util.RemoveFile = func(name string) error {
528 | return nil
529 | }
530 | defer func() {
531 | os.Setenv("DOCKER_HOST", dockerHost)
532 | os.Setenv("DOCKER_CERT_PATH", dockerCertPath)
533 | }()
534 | suite.Run(t, new(HaProxyTestSuite))
535 | }
536 |
--------------------------------------------------------------------------------
/integration_test.go:
--------------------------------------------------------------------------------
1 | // +build integration
2 |
3 | package main
4 |
5 | // Without Docker Machine
6 | // $ export HOST_IP=
7 | // $ go build && go test --cover --tags integration
8 |
9 | // With Docker Machine
10 | // $ docker-machine create -d virtualbox docker-flow-test
11 | // $ eval "$(docker-machine env docker-flow-test)"
12 | // $ go build && go test integration_test.go --cover --tags integration -v | tee tests.log
13 | // $ docker-machine rm -f docker-flow-test
14 |
15 | import (
16 | "bytes"
17 | "fmt"
18 | "github.com/stretchr/testify/suite"
19 | "log"
20 | "net/http"
21 | "os"
22 | "os/exec"
23 | "strings"
24 | "testing"
25 | "time"
26 | )
27 |
28 | type IntegrationTestSuite struct {
29 | suite.Suite
30 | ConsulIp string
31 | ProxyIp string
32 | ProxyHost string
33 | ProxyDockerHost string
34 | ProxyDockerCertPath string
35 | ServicePath string
36 | ServiceName string
37 | }
38 |
39 | func (s *IntegrationTestSuite) SetupTest() {
40 | s.removeAll()
41 | time.Sleep(time.Second)
42 | }
43 |
44 | // Integration
45 |
46 | func (s IntegrationTestSuite) XTest_BlueGreenDeployment() {
47 | origConsulAddress := os.Getenv("FLOW_CONSUL_ADDRESS")
48 | defer func() {
49 | os.Setenv("FLOW_CONSUL_ADDRESS", origConsulAddress)
50 | }()
51 |
52 | log.Println(">> Integration tests: deployment")
53 |
54 | log.Println("First deployment (blue)")
55 | s.runCmdWithStdOut(
56 | true,
57 | "./docker-flow",
58 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
59 | "--target", "app",
60 | "--side-target", "db",
61 | "--blue-green",
62 | )
63 | s.verifyContainer([]ContainerStatus{
64 | {"godemo_app-blue_1", "Up"},
65 | {"godemo_db", "Up"},
66 | })
67 |
68 | log.Println("Second deployment (green)")
69 | os.Setenv("FLOW_CONSUL_ADDRESS", fmt.Sprintf("http://%s:8500", s.ConsulIp))
70 | s.runCmdWithStdOut(true, "./docker-flow", "--flow", "deploy")
71 | s.verifyContainer([]ContainerStatus{
72 | {"godemo_app-blue_1", "Up"},
73 | {"godemo_app-green_1", "Up"},
74 | })
75 |
76 | log.Println("Third deployment (blue) with stop old release (green)")
77 | s.runCmdWithStdOut(
78 | true,
79 | "./docker-flow",
80 | "--flow", "deploy", "--flow", "stop-old")
81 | s.verifyContainer([]ContainerStatus{
82 | {"godemo_app-blue_1", "Up"},
83 | {"godemo_app-green_1", "Exited"},
84 | })
85 | }
86 |
87 | func (s IntegrationTestSuite) XTest_Scaling() {
88 | log.Println(">> Integration tests: scaling")
89 |
90 | log.Println("First deployment (blue, 2 instances)")
91 | s.runCmdWithStdOut(
92 | true,
93 | "./docker-flow",
94 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
95 | "--flow", "deploy",
96 | "--scale", "2",
97 | )
98 | s.verifyContainer([]ContainerStatus{
99 | {"godemo_app-blue_1", "Up"},
100 | {"godemo_app-blue_2", "Up"},
101 | {"godemo_db", "Up"},
102 | })
103 |
104 | log.Println("Second deployment (green, 4 (+2) instances)")
105 | s.runCmdWithStdOut(
106 | true,
107 | "./docker-flow",
108 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
109 | "--flow", "deploy",
110 | "--scale", "+2",
111 | )
112 | s.verifyContainer([]ContainerStatus{
113 | {"godemo_app-green_1", "Up"},
114 | {"godemo_app-green_2", "Up"},
115 | {"godemo_app-green_3", "Up"},
116 | {"godemo_app-green_4", "Up"},
117 | })
118 |
119 | log.Println("Scaling (green, 3 (-1) instances)")
120 | s.runCmdWithStdOut(
121 | true,
122 | "./docker-flow",
123 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
124 | "--flow", "scale",
125 | "--scale", "\"-1\"",
126 | )
127 | s.verifyContainer([]ContainerStatus{
128 | {"godemo_app-green_1", "Up"},
129 | {"godemo_app-green_2", "Up"},
130 | {"godemo_app-green_3", "Up"},
131 | {"godemo_app-green_4", "N/A"},
132 | })
133 | }
134 |
135 | func (s IntegrationTestSuite) XTest_Proxy() {
136 | log.Println(">> Integration tests: proxy")
137 |
138 | s.runCmdWithStdOut(
139 | true,
140 | "docker", "run", "-d", "--name", "registrator",
141 | "-v", "/var/run/docker.sock:/tmp/docker.sock",
142 | "gliderlabs/registrator",
143 | "-ip", s.ConsulIp, fmt.Sprintf("consul://%s:8500", s.ConsulIp),
144 | )
145 |
146 | log.Println("Runs proxy when not present and reconfigures it when deploy")
147 | s.runCmdWithStdOut(
148 | true,
149 | "./docker-flow",
150 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
151 | "--proxy-host", s.ProxyHost,
152 | "--proxy-docker-host", s.ProxyDockerHost,
153 | "--proxy-docker-cert-path", s.ProxyDockerCertPath,
154 | "--service-path", s.ServicePath,
155 | "--flow", "deploy", "--flow", "proxy",
156 | )
157 | s.verifyContainer([]ContainerStatus{
158 | {"docker-flow-proxy", "Up"},
159 | })
160 | url := fmt.Sprintf("http://%s%s", s.ConsulIp, s.ServicePath)
161 | resp, err := http.Get(url)
162 | s.NoError(err)
163 | s.Equal(200, resp.StatusCode, "Failed to send the request %s", url)
164 |
165 | log.Println("Runs proxy when stopped and reconfigures it when scale")
166 | s.runCmdWithStdOut(false, "docker", "stop", "docker-flow-proxy")
167 | s.verifyContainer([]ContainerStatus{
168 | {"docker-flow-proxy", "Exited"},
169 | })
170 | s.runCmdWithStdOut(
171 | true,
172 | "./docker-flow",
173 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
174 | "--proxy-host", s.ProxyHost,
175 | "--proxy-docker-host", s.ProxyDockerHost,
176 | "--proxy-docker-cert-path", s.ProxyDockerCertPath,
177 | "--service-path", s.ServicePath,
178 | "--scale", "+1",
179 | "--flow", "scale", "--flow", "proxy",
180 | )
181 | s.verifyContainer([]ContainerStatus{
182 | {"docker-flow-proxy", "Up"},
183 | })
184 | resp, err = http.Get(fmt.Sprintf("http://%s%s", s.ConsulIp, s.ServicePath))
185 | s.NoError(err)
186 | s.Equal(200, resp.StatusCode)
187 | s.runCmdWithStdOut(true, "docker", "rm", "-f", "godemo_app-blue_1")
188 | resp, err = http.Get(fmt.Sprintf("http://%s%s", s.ConsulIp, s.ServicePath))
189 | s.NoError(err)
190 | s.Equal(200, resp.StatusCode)
191 |
192 | log.Println("Works as a standalone")
193 | s.runCmdWithStdOut(
194 | true,
195 | "./docker-flow",
196 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
197 | "--flow", "deploy", "--flow", "stop-old",
198 | )
199 | s.runCmdWithStdOut(
200 | true,
201 | "./docker-flow",
202 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
203 | "--proxy-host", s.ProxyHost,
204 | "--proxy-docker-host", s.ProxyDockerHost,
205 | "--proxy-docker-cert-path", s.ProxyDockerCertPath,
206 | "--service-path", s.ServicePath,
207 | "--flow", "proxy",
208 | )
209 | resp, err = http.Get(fmt.Sprintf("http://%s%s", s.ConsulIp, s.ServicePath))
210 | s.NoError(err)
211 | s.Equal(200, resp.StatusCode)
212 | }
213 |
214 | func (s IntegrationTestSuite) Test_Proxy_Templates() {
215 | log.Println(">> Integration tests: proxy with templates")
216 |
217 | s.runCmdWithStdOut(
218 | true,
219 | "docker", "run", "-d", "--name", "registrator",
220 | "-v", "/var/run/docker.sock:/tmp/docker.sock",
221 | "gliderlabs/registrator",
222 | "-ip", s.ConsulIp, fmt.Sprintf("consul://%s:8500", s.ConsulIp),
223 | )
224 | s.runCmdWithStdOut(
225 | true,
226 | "./docker-flow",
227 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
228 | "--proxy-host", s.ProxyHost,
229 | "--proxy-docker-host", s.ProxyDockerHost,
230 | "--proxy-docker-cert-path", s.ProxyDockerCertPath,
231 | "--service-path", "INCORRECT",
232 | "--consul-template-fe-path", "test_configs/tmpl/go-demo-app-fe.tmpl",
233 | "--consul-template-be-path", "test_configs/tmpl/go-demo-app-be.tmpl",
234 | "--flow", "deploy", "--flow", "proxy",
235 | )
236 | s.verifyContainer([]ContainerStatus{
237 | {"docker-flow-proxy", "Up"},
238 | })
239 | url := fmt.Sprintf("http://%s%s", s.ConsulIp, s.ServicePath)
240 | resp, err := http.Get(url)
241 | s.NoError(err)
242 | s.Equal(200, resp.StatusCode, "Failed to send the request %s", url)
243 | }
244 |
245 | func (s IntegrationTestSuite) Test_TestFlow() {
246 | log.Println(">> Integration tests: test flow")
247 |
248 | ok, _ := s.runCmdWithStdOut(
249 | true,
250 | "./docker-flow",
251 | "--consul-address", fmt.Sprintf("http://%s:8500", s.ConsulIp),
252 | "--test-compose-path", "docker-compose-test.yml",
253 | "--flow", "test:unit",
254 | )
255 |
256 | s.True(ok)
257 | }
258 |
259 | // Util
260 |
261 | type ContainerStatus struct {
262 | Name string
263 | Status string
264 | }
265 |
266 | func (s IntegrationTestSuite) verifyContainer(csList []ContainerStatus) {
267 | s.runCmdWithStdOut(false, "docker", "ps", "-a")
268 | for _, cs := range csList {
269 | _, msg := s.runCmdWithoutStdOut(
270 | true,
271 | "docker", "ps", "-a",
272 | "--filter", fmt.Sprintf("name=%s", cs.Name),
273 | "--format", "\"{{.Names}} {{.Status}}\"",
274 | )
275 |
276 | if cs.Status == "N/A" {
277 | s.NotContains(msg, cs.Name)
278 | } else {
279 | s.Contains(msg, cs.Name)
280 | s.Contains(msg, cs.Status)
281 | }
282 | }
283 | }
284 |
285 | func (s IntegrationTestSuite) runCmd(failOnError, stdOut bool, command string, args ...string) (bool, string) {
286 | cmd := exec.Command(command, args...)
287 | var out bytes.Buffer
288 | msg := ""
289 | if stdOut {
290 | cmd.Stdout = os.Stdout
291 | cmd.Stderr = os.Stderr
292 | } else {
293 | cmd.Stdout = &out
294 | cmd.Stderr = &out
295 | }
296 | err := cmd.Run()
297 | if !stdOut {
298 | msg = string(out.Bytes())
299 | }
300 | fmt.Printf("$ %s %s\n", command, strings.Join(args, " "))
301 | fmt.Println(msg)
302 | if err != nil {
303 | msgWithError := fmt.Sprintf("%s %s\n%s\n", command, strings.Join(args, " "), err.Error())
304 | if failOnError {
305 | log.Fatal(msgWithError)
306 | }
307 | return false, msgWithError
308 | }
309 | return true, msg
310 | }
311 |
312 | func (s IntegrationTestSuite) runCmdWithoutStdOut(failOnError bool, command string, args ...string) (bool, string) {
313 | return s.runCmd(failOnError, false, command, args...)
314 | }
315 |
316 | func (s IntegrationTestSuite) runCmdWithStdOut(failOnError bool, command string, args ...string) (bool, string) {
317 | return s.runCmd(failOnError, true, command, args...)
318 | }
319 |
320 | func (s *IntegrationTestSuite) removeAll() {
321 | _, ids := s.runCmdWithoutStdOut(true, "docker", "ps", "-a", "--filter", "name=dockerflow", "--format", "{{.ID}}")
322 | for _, id := range strings.Split(ids, "\n") {
323 | s.runCmdWithStdOut(false, "docker", "rm", "-f", string(id))
324 | }
325 | s.runCmdWithStdOut(false, "docker", "rm", "-f", "consul", "docker-flow-proxy", "registrator")
326 | s.runCmdWithStdOut(false, "docker-compose", "-f", "docker-compose-setup.yml", "-p", "tests-setup", "down")
327 | s.runCmdWithStdOut(
328 | true,
329 | "docker-compose", "-f", "docker-compose-setup.yml", "-p", "tests-setup", "up", "-d", "consul",
330 | )
331 | }
332 |
333 | // Suite
334 |
335 | func TestIntegrationTestSuite(t *testing.T) {
336 | s := new(IntegrationTestSuite)
337 | ip := os.Getenv("HOST_IP")
338 | if len(ip) == 0 {
339 | _, msg := s.runCmdWithoutStdOut(true, "docker-machine", "ip", "docker-flow-test")
340 | ip = strings.Trim(msg, "\n")
341 | }
342 | s.ConsulIp = ip
343 | s.ProxyIp = ip
344 | s.ProxyHost = ip
345 | s.ProxyDockerHost = os.Getenv("DOCKER_HOST")
346 | s.ProxyDockerCertPath = os.Getenv("DOCKER_CERT_PATH")
347 | s.ServicePath = "/demo/hello"
348 | s.ServiceName = "go-demo"
349 | os.Setenv("FLOW_CONSUL_IP", s.ConsulIp)
350 | os.Setenv("FLOW_PROJECT", s.ServiceName)
351 | suite.Run(t, s)
352 | }
353 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | // TODO: Test
4 |
5 | import (
6 | "fmt"
7 | "log"
8 | "strings"
9 | "./compose"
10 | )
11 |
12 | func init() {
13 | log.SetPrefix(">> Docker Flow: ")
14 | log.SetFlags(0)
15 | }
16 |
17 | var logFatal = log.Fatal
18 | var logPrintln = log.Println
19 | var logPrintf = log.Printf
20 | var deployed = false
21 |
22 | func main() {
23 | // createdFlow := false
24 | flow := getFlow()
25 | sc := getServiceDiscovery()
26 |
27 | opts, err := GetOpts()
28 | if err != nil {
29 | logFatal(err)
30 | }
31 | dc := compose.GetDockerCompose()
32 |
33 | for _, step := range opts.Flow {
34 | switch strings.ToLower(step) {
35 | case FLOW_DEPLOY:
36 | if err := flow.Deploy(opts, dc); err != nil {
37 | logFatal(err)
38 | }
39 | deployed = true
40 | // TODO: Move to flow
41 | logPrintln("Cleaning...")
42 | if _, err := sc.PutColor(
43 | opts.ServiceDiscoveryAddress,
44 | opts.ServiceName,
45 | opts.NextColor,
46 | ); err != nil {
47 | logFatal(err)
48 | }
49 | // TODO: End Move to flow
50 | case FLOW_SCALE:
51 | if !deployed {
52 | logPrintln(fmt.Sprintf("Scaling (%s)...", opts.CurrentTarget))
53 | if err := flow.Scale(opts, dc, opts.CurrentTarget, true); err != nil {
54 | logFatal(err)
55 | }
56 | }
57 | case FLOW_STOP_OLD:
58 | // TODO: Move to flow
59 | if opts.BlueGreen {
60 | target := opts.CurrentTarget
61 | color := opts.CurrentColor
62 | if !deployed {
63 | target = opts.NextTarget
64 | color = opts.NextColor
65 | }
66 | logPrintln(fmt.Sprintf("Stopping old (%s)...", target))
67 | if err := dc.CreateFlowFile(
68 | opts.ComposePath,
69 | opts.ServiceName,
70 | opts.Target,
71 | opts.SideTargets,
72 | color,
73 | opts.BlueGreen,
74 | ); err != nil {
75 | logFatal(err)
76 | }
77 | if err := dc.StopTargets(opts.Host, opts.CertPath, opts.Project, []string{target}); err != nil {
78 | logFatal(err)
79 | }
80 | if err := dc.RemoveFlow(); err != nil {
81 | logFatal(err)
82 | }
83 | }
84 | // TODO: End Move to flow
85 | case FLOW_PROXY:
86 | if err := flow.Proxy(opts, haProxy); err != nil {
87 | logFatal(err)
88 | }
89 | }
90 |
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/main_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/stretchr/testify/mock"
6 | "github.com/stretchr/testify/suite"
7 | "os"
8 | "testing"
9 | "./compose"
10 | )
11 |
12 | type MainTestSuite struct {
13 | suite.Suite
14 | opts Opts
15 | dc compose.DockerComposer
16 | }
17 |
18 | func (s *MainTestSuite) SetupTest() {
19 | s.opts = Opts{
20 | ComposePath: "myComposePath",
21 | Target: "myTarget",
22 | NextColor: "orange",
23 | CurrentColor: "pink",
24 | NextTarget: "myNextTarget",
25 | CurrentTarget: "myCurrentTarget",
26 | BlueGreen: true,
27 | Flow: []string{"deploy", "scale"},
28 | ServiceDiscoveryAddress: "myServiceDiscoveryAddress",
29 | ServiceName: "myServiceName",
30 | }
31 | GetOpts = func() (Opts, error) {
32 | return s.opts, nil
33 | }
34 | s.dc = getDockerComposeMock(s.opts, "")
35 | compose.GetDockerCompose = func() compose.DockerComposer { return s.dc }
36 | flow = getFlowMock("")
37 | haProxy = getProxyMock("")
38 | serviceDiscovery = getServiceDiscoveryMock(s.opts, "")
39 | logFatal = func(v ...interface{}) {}
40 | logPrintln = func(v ...interface{}) {}
41 | deployed = false
42 | }
43 |
44 | // main
45 |
46 | func (s MainTestSuite) Test_Main_Exits_WhenGetOptsFails() {
47 | GetOpts = func() (Opts, error) {
48 | return s.opts, fmt.Errorf("This is an error")
49 | }
50 | actual := false
51 | logFatal = func(v ...interface{}) {
52 | actual = true
53 | }
54 |
55 | main()
56 |
57 | s.True(actual)
58 | }
59 |
60 | // main > deploy
61 |
62 | func (s MainTestSuite) Test_Main_InvokesFlowDeploy_WhenDeploy() {
63 | mockObj := getFlowMock("")
64 | flow = mockObj
65 |
66 | main()
67 |
68 | mockObj.AssertCalled(
69 | s.T(),
70 | "Deploy",
71 | s.opts,
72 | s.dc,
73 | )
74 | }
75 |
76 | func (s MainTestSuite) Test_Main_LogsError_WhenDeployAndFlowDeployFails() {
77 | mockObj := getFlowMock("Deploy")
78 | mockObj.On(
79 | "Deploy",
80 | mock.Anything,
81 | mock.Anything,
82 | mock.Anything,
83 | ).Return(fmt.Errorf("This is an error"))
84 | flow = mockObj
85 | actual := false
86 | logFatal = func(v ...interface{}) {
87 | actual = true
88 | }
89 |
90 | main()
91 |
92 | s.True(actual)
93 | }
94 |
95 | func (s MainTestSuite) Test_Main_InvokesServiceDiscoveryPutColor_WhenDeploy() {
96 | mockObj := getServiceDiscoveryMock(s.opts, "")
97 | serviceDiscovery = mockObj
98 |
99 | main()
100 |
101 | mockObj.AssertCalled(
102 | s.T(),
103 | "PutColor",
104 | s.opts.ServiceDiscoveryAddress,
105 | s.opts.ServiceName,
106 | s.opts.NextColor,
107 | )
108 | }
109 |
110 | func (s MainTestSuite) Test_Main_LogsFatal_WhenDeployAndServiceDiscoveryPutColorFails() {
111 | mockObj := getServiceDiscoveryMock(s.opts, "PutColor")
112 | mockObj.On(
113 | "PutColor",
114 | mock.Anything,
115 | mock.Anything,
116 | mock.Anything,
117 | ).Return("", fmt.Errorf("This is an error"))
118 | serviceDiscovery = mockObj
119 | actual := false
120 | logFatal = func(v ...interface{}) {
121 | actual = true
122 | }
123 |
124 | main()
125 |
126 | s.True(actual)
127 | }
128 |
129 | // main > scale
130 |
131 | func (s MainTestSuite) Test_Main_InvokesFlowScale_WhenScaleAndNotDeploy() {
132 | mockObj := getFlowMock("")
133 | GetOpts = func() (Opts, error) {
134 | s.opts.Flow = []string{"scale"}
135 | return s.opts, nil
136 | }
137 | flow = mockObj
138 |
139 | main()
140 |
141 | mockObj.AssertCalled(
142 | s.T(),
143 | "Scale",
144 | s.opts,
145 | s.dc,
146 | s.opts.CurrentTarget,
147 | true,
148 | )
149 | }
150 |
151 | func (s MainTestSuite) Test_Main_LogsFatal_WhenScaleAndNotDeployAndScaleFails() {
152 | mockObj := getFlowMock("Scale")
153 | mockObj.On(
154 | "Scale",
155 | mock.Anything,
156 | mock.Anything,
157 | mock.Anything,
158 | mock.Anything,
159 | ).Return(fmt.Errorf("This is an error"))
160 | GetOpts = func() (Opts, error) {
161 | s.opts.Flow = []string{"scale"}
162 | return s.opts, nil
163 | }
164 | flow = mockObj
165 | actual := false
166 | logFatal = func(v ...interface{}) {
167 | actual = true
168 | }
169 |
170 | main()
171 |
172 | s.True(actual)
173 | }
174 |
175 | // main > stop-old
176 |
177 | func (s MainTestSuite) Test_Main_InvokesDockerComposeCreateFlowFileWithCurrentColor_WhenStopOldAndDeployed() {
178 | mockObj := getDockerComposeMock(s.opts, "")
179 | compose.GetDockerCompose = func() compose.DockerComposer{ return mockObj }
180 | GetOpts = func() (Opts, error) {
181 | s.opts.Flow = []string{"stop-old"}
182 | return s.opts, nil
183 | }
184 | deployed = true
185 |
186 | main()
187 |
188 | mockObj.AssertCalled(
189 | s.T(),
190 | "CreateFlowFile",
191 | s.opts.ComposePath,
192 | s.opts.ServiceName,
193 | s.opts.Target,
194 | s.opts.SideTargets,
195 | s.opts.CurrentColor,
196 | s.opts.BlueGreen,
197 | )
198 | }
199 |
200 | func (s MainTestSuite) Test_Main_InvokesDockerComposeCreateFlowFileWithNextColor_WhenStopOldAndNotDeployed() {
201 | mockObj := getDockerComposeMock(s.opts, "")
202 | compose.GetDockerCompose = func() compose.DockerComposer { return mockObj }
203 | GetOpts = func() (Opts, error) {
204 | s.opts.Flow = []string{"stop-old"}
205 | return s.opts, nil
206 | }
207 |
208 | main()
209 |
210 | mockObj.AssertCalled(
211 | s.T(),
212 | "CreateFlowFile",
213 | s.opts.ComposePath,
214 | s.opts.ServiceName,
215 | s.opts.Target,
216 | s.opts.SideTargets,
217 | s.opts.NextColor,
218 | s.opts.BlueGreen,
219 | )
220 | }
221 |
222 | func (s MainTestSuite) Test_Main_InvokesLogFatal_WhenStopOldAndDockerComposeCreateFlowFileFails() {
223 | mockObj := getDockerComposeMock(s.opts, "CreateFlowFile")
224 | mockObj.On(
225 | "CreateFlowFile",
226 | mock.Anything,
227 | mock.Anything,
228 | mock.Anything,
229 | mock.Anything,
230 | mock.Anything,
231 | mock.Anything,
232 | ).Return(fmt.Errorf("This is an error"))
233 | compose.GetDockerCompose = func() compose.DockerComposer { return mockObj }
234 | GetOpts = func() (Opts, error) {
235 | s.opts.Flow = []string{"stop-old"}
236 | return s.opts, nil
237 | }
238 | actual := false
239 | logFatal = func(v ...interface{}) {
240 | actual = true
241 | }
242 |
243 | main()
244 |
245 | s.True(actual)
246 | }
247 |
248 | func (s MainTestSuite) Test_Main_InvokesDockerComposeStopTargetWithCurrentTarget_WhenStopOldAndDeployed() {
249 | mockObj := getDockerComposeMock(s.opts, "")
250 | compose.GetDockerCompose = func() compose.DockerComposer { return mockObj }
251 | GetOpts = func() (Opts, error) {
252 | s.opts.Flow = []string{"stop-old"}
253 | return s.opts, nil
254 | }
255 | deployed = true
256 |
257 | main()
258 |
259 | mockObj.AssertCalled(
260 | s.T(),
261 | "StopTargets",
262 | s.opts.Host,
263 | s.opts.CertPath,
264 | s.opts.Project,
265 | []string{s.opts.CurrentTarget},
266 | )
267 | }
268 |
269 | func (s MainTestSuite) Test_Main_InvokesDockerComposeStopTargetWithNextColor_WhenStopOldAndNotDeployed() {
270 | mockObj := getDockerComposeMock(s.opts, "")
271 | compose.GetDockerCompose = func() compose.DockerComposer { return mockObj }
272 | GetOpts = func() (Opts, error) {
273 | s.opts.Flow = []string{"stop-old"}
274 | return s.opts, nil
275 | }
276 |
277 | main()
278 |
279 | mockObj.AssertCalled(
280 | s.T(),
281 | "StopTargets",
282 | s.opts.Host,
283 | s.opts.CertPath,
284 | s.opts.Project,
285 | []string{s.opts.NextTarget},
286 | )
287 | }
288 |
289 | func (s MainTestSuite) Test_Main_InvokesLogFatal_WhenStopOldAndDockerComposeStopTargetsFails() {
290 | mockObj := getDockerComposeMock(s.opts, "StopTargets")
291 | mockObj.On(
292 | "StopTargets",
293 | mock.Anything,
294 | mock.Anything,
295 | mock.Anything,
296 | mock.Anything,
297 | ).Return(fmt.Errorf("This is an error"))
298 | compose.GetDockerCompose = func() compose.DockerComposer { return mockObj }
299 | GetOpts = func() (Opts, error) {
300 | s.opts.Flow = []string{"stop-old"}
301 | return s.opts, nil
302 | }
303 | actual := false
304 | logFatal = func(v ...interface{}) {
305 | actual = true
306 | }
307 |
308 | main()
309 |
310 | s.True(actual)
311 | }
312 |
313 | func (s MainTestSuite) Test_Main_DoesNotRunStopOld_WhenStopOldAndNotBlueGreen() {
314 | mockObj := getDockerComposeMock(s.opts, "")
315 | compose.GetDockerCompose = func() compose.DockerComposer { return mockObj }
316 | GetOpts = func() (Opts, error) {
317 | s.opts.Flow = []string{"stop-old"}
318 | s.opts.BlueGreen = false
319 | return s.opts, nil
320 | }
321 |
322 | main()
323 |
324 | mockObj.AssertNotCalled(
325 | s.T(),
326 | "StopTargets",
327 | mock.Anything,
328 | mock.Anything,
329 | mock.Anything,
330 | )
331 | }
332 |
333 | func (s MainTestSuite) Test_Main_InvokesDockerComposeRemoveFlow_WhenStopOld() {
334 | mockObj := getDockerComposeMock(s.opts, "")
335 | compose.GetDockerCompose = func() compose.DockerComposer { return mockObj }
336 | GetOpts = func() (Opts, error) {
337 | s.opts.Flow = []string{"stop-old"}
338 | return s.opts, nil
339 | }
340 |
341 | main()
342 |
343 | mockObj.AssertCalled(s.T(), "RemoveFlow")
344 | }
345 |
346 | func (s MainTestSuite) Test_Main_InvokesLogFatal_WhenStopOldAndDockerComposeRemoveFlowFails() {
347 | mockObj := getDockerComposeMock(s.opts, "RemoveFlow")
348 | mockObj.On("RemoveFlow").Return(fmt.Errorf("This is an error"))
349 | compose.GetDockerCompose = func() compose.DockerComposer { return mockObj }
350 | actual := false
351 | logFatal = func(v ...interface{}) {
352 | actual = true
353 | }
354 | GetOpts = func() (Opts, error) {
355 | s.opts.Flow = []string{"stop-old"}
356 | return s.opts, nil
357 | }
358 |
359 | main()
360 |
361 | s.True(actual)
362 | }
363 |
364 | // main > proxy
365 |
366 | func (s MainTestSuite) Test_Main_InvokesFlawProxy() {
367 | mockObj := getFlowMock("")
368 | flow = mockObj
369 | GetOpts = func() (Opts, error) {
370 | s.opts.Flow = []string{"proxy"}
371 | return s.opts, nil
372 | }
373 |
374 | main()
375 |
376 | mockObj.AssertCalled(
377 | s.T(),
378 | "Proxy",
379 | s.opts,
380 | haProxy,
381 | )
382 | }
383 |
384 | func (s MainTestSuite) Test_Main_InvokesLogFatal_WhenFlawProxyFails() {
385 | mockObj := getFlowMock("Proxy")
386 | mockObj.On("Proxy", mock.Anything, mock.Anything).Return(fmt.Errorf("This is an error"))
387 | flow = mockObj
388 | GetOpts = func() (Opts, error) {
389 | s.opts.Flow = []string{"proxy"}
390 | return s.opts, nil
391 | }
392 | actual := false
393 | logFatal = func(v ...interface{}) {
394 | actual = true
395 | }
396 |
397 | main()
398 |
399 | s.True(actual)
400 | }
401 |
402 | // Suite
403 |
404 | func TestMainTestSuite(t *testing.T) {
405 | dockerHost := os.Getenv("DOCKER_HOST")
406 | dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
407 | defer func() {
408 | os.Setenv("DOCKER_HOST", dockerHost)
409 | os.Setenv("DOCKER_CERT_PATH", dockerCertPath)
410 | }()
411 | getOptsOrig := GetOpts
412 | defer func() {
413 | GetOpts = getOptsOrig
414 | }()
415 | suite.Run(t, new(MainTestSuite))
416 | }
417 |
--------------------------------------------------------------------------------
/mocks_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import "github.com/stretchr/testify/mock"
4 |
5 | type DockerComposeMock struct {
6 | mock.Mock
7 | }
8 |
9 | func (m *DockerComposeMock) CreateFlowFile(
10 | dcPath,
11 | serviceName,
12 | target string,
13 | sideTargets []string,
14 | color string,
15 | blueGreen bool,
16 | ) error {
17 | args := m.Called(dcPath, serviceName, target, sideTargets, color, blueGreen)
18 | return args.Error(0)
19 | }
20 |
21 | func (m *DockerComposeMock) RemoveFlow() error {
22 | args := m.Called()
23 | return args.Error(0)
24 | }
25 |
26 | func (m *DockerComposeMock) PullTargets(host, certPath, project string, targets []string) error {
27 | args := m.Called(host, certPath, project, targets)
28 | return args.Error(0)
29 | }
30 |
31 | func (m *DockerComposeMock) UpTargets(host, certPath, project string, targets []string) error {
32 | args := m.Called(host, certPath, project, targets)
33 | return args.Error(0)
34 | }
35 |
36 | func (m *DockerComposeMock) ScaleTargets(host, certPath, project, target string, scale int) error {
37 | args := m.Called(host, certPath, project, target, scale)
38 | return args.Error(0)
39 | }
40 |
41 | func (m *DockerComposeMock) RmTargets(host, certPath, project string, targets []string) error {
42 | args := m.Called(host, certPath, project, targets)
43 | return args.Error(0)
44 | }
45 |
46 | func (m *DockerComposeMock) StopTargets(host, certPath, project string, targets []string) error {
47 | args := m.Called(host, certPath, project, targets)
48 | return args.Error(0)
49 | }
50 |
51 | func getDockerComposeMock(opts Opts, skipMethod string) *DockerComposeMock {
52 | mockObj := new(DockerComposeMock)
53 | if skipMethod != "PullTargets" {
54 | mockObj.On("PullTargets", opts.Host, opts.CertPath, opts.Project, Flow{}.GetPullTargets(opts)).Return(nil)
55 | }
56 | if skipMethod != "UpTargets" {
57 | mockObj.On("UpTargets", opts.Host, opts.CertPath, opts.Project, append(opts.SideTargets, opts.NextTarget)).Return(nil)
58 | }
59 | if skipMethod != "RmTargets" {
60 | mockObj.On("RmTargets", opts.Host, opts.CertPath, opts.Project, []string{opts.NextTarget}).Return(nil)
61 | }
62 | if skipMethod != "ScaleTargets" {
63 | mockObj.On("ScaleTargets", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
64 | }
65 | if skipMethod != "CreateFlowFile" {
66 | mockObj.On(
67 | "CreateFlowFile",
68 | mock.Anything,
69 | mock.Anything,
70 | mock.Anything,
71 | mock.Anything,
72 | mock.Anything,
73 | mock.Anything,
74 | mock.Anything,
75 | mock.Anything,
76 | ).Return(nil)
77 | }
78 | if skipMethod != "StopTargets" {
79 | mockObj.On("StopTargets", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
80 | }
81 | if skipMethod != "RemoveFlow" {
82 | mockObj.On("RemoveFlow").Return(nil)
83 | }
84 | return mockObj
85 | }
86 |
--------------------------------------------------------------------------------
/opts.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/jessevdk/go-flags"
6 | "github.com/kelseyhightower/envconfig"
7 | "gopkg.in/yaml.v2"
8 | "os"
9 | "strconv"
10 | "strings"
11 | "./util"
12 | )
13 |
14 | const dockerFlowPath = "docker-flow.yml"
15 | const dockerComposePath = "docker-compose.yml"
16 |
17 | var getWd = os.Getwd
18 | var parseYml = ParseYml
19 | var parseEnvVars = ParseEnvVars
20 | var parseArgs = ParseArgs
21 | var processOpts = ProcessOpts
22 |
23 | type Opts struct {
24 | BlueGreen bool `short:"b" long:"blue-green" description:"Perform blue-green deployment." yaml:"blue_green" envconfig:"blue_green"`
25 | CertPath string `long:"cert-path" description:"Docker certification path. If not specified, DOCKER_CERT_PATH environment variable will be used instead." yaml:"cert_path" envconfig:"cert_path"`
26 | ComposePath string `short:"f" long:"compose-path" value-name:"docker-compose.yml" description:"Path to the Docker Compose configuration file. If not specified, the default docker-compose.yml files will be used." yaml:"compose_path" envconfig:"compose_path"`
27 | ServiceDiscoveryAddress string `short:"c" long:"consul-address" description:"The address of the Consul server." yaml:"consul_address" envconfig:"consul_address"`
28 | ConsulTemplateBePath string `long:"consul-template-be-path" description:"The path to the Consul Template representing snippet of the frontend configuration. If specified, proxy template will be loaded from the specified file." yaml:"consul_template_be_path" envconfig:"consul_template_be_path"`
29 | ConsulTemplateFePath string `long:"consul-template-fe-path" description:"The path to the Consul Template representing snippet of the frontend configuration. If specified, proxy template will be loaded from the specified file." yaml:"consul_template_fe_path" envconfig:"consul_template_fe_path"`
30 | Flow []string `short:"F" long:"flow" description:"The actions that should be performed as the flow. Multiple values are allowed.\ndeploy: Deploys a new release\nscale: Scales currently running release\nstop-old: Stops the old release\nproxy: Reconfigures the proxy\ntest:[TARGET]: Runs a test target specified through the test-docker-compose argument.\n" yaml:"flow" envconfig:"flow"`
31 | Host string `short:"H" long:"host" description:"Docker daemon socket to connect to. If not specified, DOCKER_HOST environment variable will be used instead."`
32 | Project string `short:"p" long:"project" description:"Docker Compose project. If not specified, the current directory will be used instead."`
33 | ProxyDockerCertPath string `long:"proxy-docker-cert-path" description:"Docker certification path for the proxy host." yaml:"proxy_docker_cert_path" envconfig:"proxy_docker_cert_path"`
34 | ProxyDockerHost string `long:"proxy-docker-host" description:"Docker daemon socket of the proxy host. This argument is required only if the proxy flow step is used." yaml:"proxy_docker_host" envconfig:"proxy_docker_host"`
35 | ProxyHost string `long:"proxy-host" description:"The host of the proxy. Visitors should request services from this domain. Docker Flow uses it to request reconfiguration when a new service is deployed or an existing one is scaled. This argument is required only if the proxy flow step is used." yaml:"proxy_host" envconfig:"proxy_host"`
36 | ProxyReconfPort string `long:"proxy-reconf-port" description:"The port used by the proxy to reconfigure its configuration" yaml:"proxy_reconf_port" envconfig:"proxy_reconf_port"`
37 | PullSideTargets bool `short:"S" long:"pull-side-targets" description:"Pull side or auxiliary targets." yaml:"pull_side_targets" envconfig:"pull_side_targets"`
38 | Scale string `short:"s" long:"scale" description:"Number of instances to deploy. If the value starts with the plus sign (+), the number of instances will be increased by the given number. If the value begins with the minus sign (-), the number of instances will be decreased by the given number." yaml:"scale" envconfig:"scale"`
39 | ServicePath []string `long:"service-path" description:"Path that should be configured in the proxy (e.g. /api/v1/my-service). This argument is required only if the proxy flow step is used." yaml:"service_path"`
40 | SideTargets []string `short:"T" long:"side-target" description:"Side or auxiliary Docker Compose targets. Multiple values are allowed." yaml:"side_targets"`
41 | Target string `short:"t" long:"target" description:"Docker Compose target."`
42 | TestComposePath string `long:"test-compose-path" description:"Path to the Docker Compose configuration file used for tests. If not specified, the default docker-compose.yml files will be used." yaml:"test_compose_path" envconfig:"test_compose_path"`
43 | ServiceName string
44 | CurrentColor string
45 | NextColor string
46 | CurrentTarget string
47 | NextTarget string
48 | ConsulTemplateFe string
49 | ConsulTemplateBe string
50 | }
51 |
52 | var GetOpts = func() (Opts, error) {
53 | opts := Opts{
54 | ComposePath: dockerComposePath,
55 | Flow: []string{"deploy"},
56 | }
57 | if err := parseYml(&opts); err != nil {
58 | return opts, err
59 | }
60 | if err := parseEnvVars(&opts); err != nil {
61 | return opts, err
62 | }
63 | if err := parseArgs(&opts); err != nil {
64 | return opts, err
65 | }
66 | if err := processOpts(&opts); err != nil {
67 | return opts, err
68 | }
69 | return opts, nil
70 | }
71 |
72 | func ParseYml(opts *Opts) error {
73 | data, err := util.ReadFile(dockerFlowPath)
74 | if err != nil {
75 | return nil
76 | }
77 | if err = yaml.Unmarshal([]byte(data), opts); err != nil {
78 | return fmt.Errorf("Could not parse the Docker Flow file %s\n%s", dockerFlowPath, err.Error())
79 | }
80 | return nil
81 | }
82 |
83 | func ParseArgs(opts *Opts) error {
84 | if _, err := flags.ParseArgs(opts, os.Args[1:]); err != nil {
85 | return fmt.Errorf("Could not parse command line arguments\n%s", err.Error())
86 | }
87 | return nil
88 | }
89 |
90 | func ParseEnvVars(opts *Opts) error {
91 | if err := envconfig.Process("flow", opts); err != nil {
92 | return fmt.Errorf("Could not retrieve environment variables\n%s", err.Error())
93 | }
94 | data := []struct {
95 | key string
96 | value *[]string
97 | }{
98 | {"FLOW_SIDE_TARGETS", &opts.SideTargets},
99 | {"FLOW", &opts.Flow},
100 | {"FLOW_SERVICE_PATH", &opts.ServicePath},
101 | }
102 | for _, d := range data {
103 | value := strings.Trim(os.Getenv(d.key), " ")
104 | if len(value) > 0 {
105 | *d.value = strings.Split(value, ",")
106 | }
107 | }
108 | return nil
109 | }
110 |
111 | func ProcessOpts(opts *Opts) (err error) {
112 | sc := getServiceDiscovery()
113 | if len(opts.Project) == 0 {
114 | dir, _ := getWd()
115 | opts.Project = dir[strings.LastIndex(dir, string(os.PathSeparator))+1:]
116 | }
117 | if len(opts.Target) == 0 {
118 | return fmt.Errorf("target argument is required")
119 | }
120 | if len(opts.ServiceDiscoveryAddress) == 0 {
121 | return fmt.Errorf("consul-address argument is required")
122 | }
123 | if len(opts.Scale) > 0 {
124 | if _, err := strconv.Atoi(opts.Scale); err != nil {
125 | return fmt.Errorf("scale must be a number or empty")
126 | }
127 | }
128 | if len(opts.ConsulTemplateFePath) > 0 {
129 | data, err := util.ReadFile(opts.ConsulTemplateFePath)
130 | if err != nil {
131 | return fmt.Errorf("Consul Template %s could not be loaded", opts.ConsulTemplateFePath)
132 | }
133 | opts.ConsulTemplateFe = string(data)
134 | }
135 | if len(opts.ConsulTemplateBePath) > 0 {
136 | data, err := util.ReadFile(opts.ConsulTemplateBePath)
137 | if err != nil {
138 | return fmt.Errorf("Consul Template %s could not be loaded", opts.ConsulTemplateBePath)
139 | }
140 | opts.ConsulTemplateBe = string(data)
141 | }
142 | if len(opts.Flow) == 0 {
143 | opts.Flow = []string{"deploy"}
144 | }
145 | if len(opts.ServiceName) == 0 {
146 | opts.ServiceName = fmt.Sprintf("%s-%s", opts.Project, opts.Target)
147 | }
148 | if len(opts.ProxyReconfPort) == 0 {
149 | opts.ProxyReconfPort = strconv.Itoa(ProxyReconfigureDefaultPort)
150 | }
151 | if opts.CurrentColor, err = sc.GetColor(opts.ServiceDiscoveryAddress, opts.ServiceName); err != nil {
152 | return err
153 | }
154 | if len(opts.Host) == 0 {
155 | opts.Host = os.Getenv("DOCKER_HOST")
156 | }
157 | if len(opts.CertPath) == 0 {
158 | opts.CertPath = os.Getenv("DOCKER_CERT_PATH")
159 | }
160 | opts.NextColor = sc.GetNextColor(opts.CurrentColor)
161 | if opts.BlueGreen {
162 | opts.NextTarget = fmt.Sprintf("%s-%s", opts.Target, opts.NextColor)
163 | opts.CurrentTarget = fmt.Sprintf("%s-%s", opts.Target, opts.CurrentColor)
164 | } else {
165 | opts.NextTarget = opts.Target
166 | opts.CurrentTarget = opts.Target
167 | }
168 | return nil
169 | }
170 |
--------------------------------------------------------------------------------
/opts_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/stretchr/testify/mock"
6 | "github.com/stretchr/testify/suite"
7 | "os"
8 | "path/filepath"
9 | "strconv"
10 | "strings"
11 | "testing"
12 | "./util"
13 | )
14 |
15 | // Setup
16 |
17 | type OptsTestSuite struct {
18 | suite.Suite
19 | dir string
20 | opts Opts
21 | }
22 |
23 | func (s *OptsTestSuite) SetupTest() {
24 | s.dir = "myProjectDir"
25 | s.opts = Opts{
26 | Project: "myProject",
27 | Target: "myTarget",
28 | ServiceDiscoveryAddress: "http://1.2.3.4:1234",
29 | ServiceName: "myFancyService",
30 | CurrentColor: "orange",
31 | }
32 | serviceDiscovery = getServiceDiscoveryMock(s.opts, "")
33 | path := fmt.Sprintf("/some/path/%s", s.dir)
34 | path = filepath.FromSlash(path)
35 | getWd = func() (string, error) {
36 | return path, nil
37 | }
38 | util.ReadFile = func(fileName string) ([]byte, error) {
39 | return []byte(""), nil
40 | }
41 | }
42 |
43 | // ProcessOpts
44 |
45 | func (s OptsTestSuite) Test_ProcessOpts_ReturnsNil() {
46 | actual := ProcessOpts(&s.opts)
47 |
48 | s.Nil(actual)
49 | }
50 |
51 | func (s OptsTestSuite) Test_ProcessOpts_SetsProjectToCurrentDir() {
52 | s.opts.Project = ""
53 |
54 | ProcessOpts(&s.opts)
55 |
56 | s.Equal(s.dir, s.opts.Project)
57 | }
58 |
59 | func (s OptsTestSuite) Test_ProcessOpts_DoesNotSetProjectToCurrentDir_WhenProjectIsNotEmpty() {
60 | expected := s.opts.Project
61 |
62 | ProcessOpts(&s.opts)
63 |
64 | s.Equal(expected, s.opts.Project)
65 | }
66 |
67 | func (s OptsTestSuite) Test_ProcessOpts_ReturnsError_WhenTargetIsEmpty() {
68 | s.opts.Target = ""
69 |
70 | actual := ProcessOpts(&s.opts)
71 |
72 | s.Error(actual)
73 | }
74 |
75 | func (s OptsTestSuite) Test_ProcessOpts_ReturnsError_WhenServiceDiscoveryAddressIsEmpty() {
76 | s.opts.ServiceDiscoveryAddress = ""
77 |
78 | actual := ProcessOpts(&s.opts)
79 |
80 | s.Error(actual)
81 | }
82 |
83 | func (s OptsTestSuite) Test_ProcessOpts_ReturnsError_WhenScaleIsNotNumber() {
84 | s.opts.Scale = "This Is Not A Number"
85 |
86 | actual := ProcessOpts(&s.opts)
87 |
88 | s.Error(actual)
89 | }
90 |
91 | func (s OptsTestSuite) Test_ProcessOpts_SetsServiceNameToProjectAndTarget() {
92 | expected := fmt.Sprintf("%s-%s", s.opts.Project, s.opts.Target)
93 | mockObj := getServiceDiscoveryMock(s.opts, "GetColor")
94 | mockObj.On("GetColor", mock.Anything, mock.Anything).Return("orange", fmt.Errorf("This is an error"))
95 | serviceDiscovery = mockObj
96 | s.opts.ServiceName = ""
97 |
98 | ProcessOpts(&s.opts)
99 |
100 | s.Equal(expected, s.opts.ServiceName)
101 | }
102 |
103 | func (s OptsTestSuite) Test_ProcessOpts_SetsPortTo8080_WhenEmpty() {
104 | s.opts.ProxyReconfPort = ""
105 | ProcessOpts(&s.opts)
106 |
107 | s.Equal(strconv.Itoa(ProxyReconfigureDefaultPort), s.opts.ProxyReconfPort)
108 | }
109 |
110 | func (s OptsTestSuite) Test_ProcessOpts_DoesNotSetServiceNameWhenNotEmpty() {
111 | expected := s.opts.ServiceName
112 |
113 | ProcessOpts(&s.opts)
114 |
115 | s.Equal(expected, s.opts.ServiceName)
116 | }
117 |
118 | func (s OptsTestSuite) Test_ProcessOpts_SetsCurrentColorFromServiceDiscovery() {
119 | expected, _ := serviceDiscovery.GetColor(s.opts.ServiceDiscoveryAddress, s.opts.ServiceName)
120 |
121 | ProcessOpts(&s.opts)
122 |
123 | s.Equal(expected, s.opts.CurrentColor)
124 | }
125 |
126 | func (s OptsTestSuite) Test_ProcessOpts_ReturnsError_WhenGetColorFails() {
127 | mockObj := getServiceDiscoveryMock(s.opts, "GetColor")
128 | mockObj.On("GetColor", mock.Anything, mock.Anything).Return("orange", fmt.Errorf("This is an error"))
129 | serviceDiscovery = mockObj
130 |
131 | actual := ProcessOpts(&s.opts)
132 |
133 | s.Error(actual)
134 | }
135 |
136 | func (s OptsTestSuite) Test_ProcessOpts_SetsNextColorFromServiceDiscovery() {
137 | expected := serviceDiscovery.GetNextColor(s.opts.CurrentColor)
138 |
139 | ProcessOpts(&s.opts)
140 |
141 | s.Equal(expected, s.opts.NextColor)
142 | }
143 |
144 | func (s OptsTestSuite) Test_ProcessOpts_SetsNextTargetToTarget() {
145 | expected := s.opts.Target
146 |
147 | ProcessOpts(&s.opts)
148 |
149 | s.Equal(expected, s.opts.NextTarget)
150 | }
151 |
152 | func (s OptsTestSuite) Test_ProcessOpts_SetsCurrentTargetToTarget() {
153 | expected := s.opts.Target
154 |
155 | ProcessOpts(&s.opts)
156 |
157 | s.Equal(expected, s.opts.CurrentTarget)
158 | }
159 |
160 | func (s OptsTestSuite) Test_ProcessOpts_SetsNextTargetToTargetAndNextColor_WhenBlueGreen() {
161 | s.opts.BlueGreen = true
162 | expected := fmt.Sprintf("%s-%s", s.opts.Target, serviceDiscovery.GetNextColor(s.opts.CurrentColor))
163 |
164 | ProcessOpts(&s.opts)
165 |
166 | s.Equal(expected, s.opts.NextTarget)
167 | }
168 |
169 | func (s OptsTestSuite) Test_ProcessOpts_SetsCurrentTargetToTargetAndCurrentColor_WhenBlueGreen() {
170 | s.opts.BlueGreen = true
171 | expected := fmt.Sprintf("%s-%s", s.opts.Target, s.opts.CurrentColor)
172 |
173 | ProcessOpts(&s.opts)
174 |
175 | s.Equal(expected, s.opts.CurrentTarget)
176 | }
177 |
178 | func (s OptsTestSuite) Test_ProcessOpts_SetsHostFromDockerHostEnv_WhenEmpty() {
179 | expected := "tcp://5.5.5.5:4444"
180 | s.opts.Host = ""
181 | os.Setenv("DOCKER_HOST", expected)
182 |
183 | ProcessOpts(&s.opts)
184 |
185 | s.Equal(expected, s.opts.Host)
186 | }
187 |
188 | func (s OptsTestSuite) Test_ProcessOpts_DoesNotSetHostFromDockerHostEnv_WhenNotEmpty() {
189 | expected := "tcp://5.5.5.5:4444"
190 | s.opts.Host = expected
191 | os.Setenv("DOCKER_HOST", "myHost")
192 |
193 | ProcessOpts(&s.opts)
194 |
195 | s.Equal(expected, s.opts.Host)
196 | }
197 |
198 | func (s OptsTestSuite) Test_ProcessOpts_SetsCertPathFromDockerCertPathEnv_WhenEmpty() {
199 | expected := "/path/to/docker/cert"
200 | s.opts.Host = ""
201 | os.Setenv("DOCKER_CERT_PATH", expected)
202 |
203 | ProcessOpts(&s.opts)
204 |
205 | s.Equal(expected, s.opts.CertPath)
206 | }
207 |
208 | func (s OptsTestSuite) Test_ProcessOpts_DoesNotSetCertPathFromDockerCertPathEnv_WhenEmpty() {
209 | expected := "/path/to/docker/cert"
210 | s.opts.CertPath = expected
211 | os.Setenv("DOCKER_CERT_PATH", "/my/cert/path")
212 |
213 | ProcessOpts(&s.opts)
214 |
215 | s.Equal(expected, s.opts.CertPath)
216 | }
217 |
218 | func (s OptsTestSuite) Test_ProcessOpts_SetsFlowToDeploy_WhenEmpty() {
219 | expected := []string{"deploy"}
220 | s.opts.Flow = []string{}
221 | ProcessOpts(&s.opts)
222 | s.Equal(expected, s.opts.Flow)
223 | }
224 |
225 | func (s OptsTestSuite) Test_ProcessOpts_ReturnsError_WhenConsulTemplateFeFileDoesNotExist() {
226 | s.opts.ConsulTemplateFePath = "/this/path/does/not/exist"
227 | util.ReadFile = func(fileName string) ([]byte, error) {
228 | return []byte(""), fmt.Errorf("This is an error")
229 | }
230 |
231 | actual := ProcessOpts(&s.opts)
232 |
233 | s.Error(actual)
234 | }
235 |
236 | func (s OptsTestSuite) Test_ProcessOpts_ReturnsError_WhenConsulTemplateBeFileDoesNotExist() {
237 | s.opts.ConsulTemplateBePath = "/this/path/does/not/exist"
238 | util.ReadFile = func(fileName string) ([]byte, error) {
239 | return []byte(""), fmt.Errorf("This is an error")
240 | }
241 |
242 | actual := ProcessOpts(&s.opts)
243 |
244 | s.Error(actual)
245 | }
246 |
247 | func (s OptsTestSuite) Test_ProcessOpts_SetsConsulTemplateFe_WhenConsulTemplateFileIsSpecified() {
248 | expected := "This is content of a Consul Template"
249 | s.opts.ConsulTemplateFePath = "/this/path/does/not/exist"
250 | util.ReadFile = func(fileName string) ([]byte, error) {
251 | return []byte(expected), nil
252 | }
253 |
254 | ProcessOpts(&s.opts)
255 |
256 | s.Equal(expected, s.opts.ConsulTemplateFe)
257 | }
258 |
259 | func (s OptsTestSuite) Test_ProcessOpts_SetsConsulTemplateBe_WhenConsulTemplateFileIsSpecified() {
260 | expected := "This is content of a Consul Template"
261 | s.opts.ConsulTemplateBePath = "/this/path/does/not/exist"
262 | util.ReadFile = func(fileName string) ([]byte, error) {
263 | return []byte(expected), nil
264 | }
265 |
266 | ProcessOpts(&s.opts)
267 |
268 | s.Equal(expected, s.opts.ConsulTemplateBe)
269 | }
270 |
271 | // ParseEnvVars
272 |
273 | func (s OptsTestSuite) Test_ParseEnvVars_Strings() {
274 | data := []struct {
275 | expected string
276 | key string
277 | value *string
278 | }{
279 | {"myHost", "FLOW_HOST", &s.opts.Host},
280 | {"myCertPath", "FLOW_CERT_PATH", &s.opts.CertPath},
281 | {"myComposePath", "FLOW_COMPOSE_PATH", &s.opts.ComposePath},
282 | {"myTarget", "FLOW_TARGET", &s.opts.Target},
283 | {"myProject", "FLOW_PROJECT", &s.opts.Project},
284 | {"mySDAddress", "FLOW_CONSUL_ADDRESS", &s.opts.ServiceDiscoveryAddress},
285 | {"myScale", "FLOW_SCALE", &s.opts.Scale},
286 | {"myProxyHost", "FLOW_PROXY_HOST", &s.opts.ProxyHost},
287 | {"myProxyDockerHost", "FLOW_PROXY_DOCKER_HOST", &s.opts.ProxyDockerHost},
288 | {"myProxyCertPath", "FLOW_PROXY_DOCKER_CERT_PATH", &s.opts.ProxyDockerCertPath},
289 | {"4357", "FLOW_PROXY_RECONF_PORT", &s.opts.ProxyReconfPort},
290 | {"myConsulTemplateFePath", "FLOW_CONSUL_TEMPLATE_FE_PATH", &s.opts.ConsulTemplateFePath},
291 | {"myConsulTemplateBePath", "FLOW_CONSUL_TEMPLATE_BE_PATH", &s.opts.ConsulTemplateBePath},
292 | {"myTestComposePath", "FLOW_TEST_COMPOSE_PATH", &s.opts.TestComposePath},
293 | }
294 | for _, d := range data {
295 | os.Setenv(d.key, d.expected)
296 | }
297 | ParseEnvVars(&s.opts)
298 | for _, d := range data {
299 | s.Equal(d.expected, *d.value)
300 | }
301 | }
302 |
303 | func (s OptsTestSuite) Test_ParseEnvVars_Bools() {
304 | data := []struct {
305 | key string
306 | value *bool
307 | }{
308 | {"FLOW_BLUE_GREEN", &s.opts.BlueGreen},
309 | {"FLOW_PULL_SIDE_TARGETS", &s.opts.PullSideTargets},
310 | }
311 | for _, d := range data {
312 | os.Setenv(d.key, "true")
313 | }
314 | ParseEnvVars(&s.opts)
315 | for _, d := range data {
316 | s.True(*d.value)
317 | }
318 | }
319 |
320 | func (s OptsTestSuite) Test_ParseEnvVars_Slices() {
321 | data := []struct {
322 | expected string
323 | key string
324 | value *[]string
325 | }{
326 | {"myTarget1,myTarget2", "FLOW_SIDE_TARGETS", &s.opts.SideTargets},
327 | {"deploy,stop-old", "FLOW", &s.opts.Flow},
328 | {"path1,path2", "FLOW_SERVICE_PATH", &s.opts.ServicePath},
329 | }
330 | for _, d := range data {
331 | os.Setenv(d.key, d.expected)
332 | }
333 | ParseEnvVars(&s.opts)
334 | for _, d := range data {
335 | s.Equal(strings.Split(d.expected, ","), *d.value)
336 | }
337 | }
338 |
339 | func (s OptsTestSuite) Test_ParseEnvVars_DoesNotParseSlices_WhenEmpty() {
340 | data := []struct {
341 | key string
342 | value *[]string
343 | }{
344 | {"FLOW_SIDE_TARGETS", &s.opts.SideTargets},
345 | }
346 | for _, d := range data {
347 | s.opts.SideTargets = []string{}
348 | os.Unsetenv(d.key)
349 | }
350 | ParseEnvVars(&s.opts)
351 | for _, d := range data {
352 | s.Len(*d.value, 0)
353 | }
354 | }
355 |
356 | func (s OptsTestSuite) Test_ParseEnvVars_ReturnsError_WhenFailure() {
357 | os.Setenv("FLOW_BLUE_GREEN", "This is not a bool")
358 |
359 | actual := ParseEnvVars(&s.opts)
360 |
361 | s.Error(actual)
362 | os.Unsetenv("FLOW_BLUE_GREEN")
363 | }
364 |
365 | // ParseArgs
366 |
367 | func (s OptsTestSuite) Test_ParseArgs_LongStrings() {
368 | data := []struct {
369 | expected string
370 | key string
371 | value *string
372 | }{
373 | {"hostFromArgs", "host", &s.opts.Host},
374 | {"certPathFromArgs", "cert-path", &s.opts.CertPath},
375 | {"composePathFromArgs", "compose-path", &s.opts.ComposePath},
376 | {"targetFromArgs", "target", &s.opts.Target},
377 | {"projectFromArgs", "project", &s.opts.Project},
378 | {"addressFromArgs", "consul-address", &s.opts.ServiceDiscoveryAddress},
379 | {"scaleFromArgs", "scale", &s.opts.Scale},
380 | {"proxyDomainFromArgs", "proxy-host", &s.opts.ProxyHost},
381 | {"proxyHostFromArgs", "proxy-docker-host", &s.opts.ProxyDockerHost},
382 | {"proxyCertPathFromArgs", "proxy-docker-cert-path", &s.opts.ProxyDockerCertPath},
383 | {"1234", "proxy-reconf-port", &s.opts.ProxyReconfPort},
384 | {"consulTemplateFePathFromArgs", "consul-template-fe-path", &s.opts.ConsulTemplateFePath},
385 | {"consulTemplateBePathFromArgs", "consul-template-be-path", &s.opts.ConsulTemplateBePath},
386 | {"testComposePathFromArgs", "test-compose-path", &s.opts.TestComposePath},
387 | }
388 |
389 | for _, d := range data {
390 | os.Args = []string{"myProgram", fmt.Sprintf("--%s=%s", d.key, d.expected)}
391 | ParseArgs(&s.opts)
392 | s.Equal(d.expected, *d.value)
393 | }
394 | }
395 |
396 | func (s OptsTestSuite) Test_ParseArgs_ParsesLongSlices() {
397 | os.Args = []string{"myProgram"}
398 | data := []struct {
399 | expected []string
400 | key string
401 | value *[]string
402 | }{
403 | {[]string{"path1", "path2"}, "service-path", &s.opts.ServicePath},
404 | }
405 |
406 | for _, d := range data {
407 | for _, v := range d.expected {
408 | os.Args = append(os.Args, fmt.Sprintf("--%s", d.key), v)
409 | }
410 |
411 | }
412 |
413 | ParseArgs(&s.opts)
414 |
415 | for _, d := range data {
416 | s.Equal(d.expected, *d.value)
417 | }
418 | }
419 |
420 | func (s OptsTestSuite) TestParseArgs_ShortStrings() {
421 | data := []struct {
422 | expected string
423 | key string
424 | value *string
425 | }{
426 | {"hostFromArgs", "H", &s.opts.Host},
427 | {"composePathFromArgs", "f", &s.opts.ComposePath},
428 | {"targetFromArgs", "t", &s.opts.Target},
429 | {"projectFromArgs", "p", &s.opts.Project},
430 | {"addressFromArgs", "c", &s.opts.ServiceDiscoveryAddress},
431 | {"scaleFromArgs", "s", &s.opts.Scale},
432 | }
433 |
434 | for _, d := range data {
435 | os.Args = []string{"myProgram", fmt.Sprintf("-%s=%s", d.key, d.expected)}
436 | ParseArgs(&s.opts)
437 | s.Equal(d.expected, *d.value)
438 | }
439 | }
440 |
441 | func (s OptsTestSuite) TestParseArgs_LongBools() {
442 | data := []struct {
443 | key string
444 | value *bool
445 | }{
446 | {"blue-green", &s.opts.BlueGreen},
447 | {"pull-side-targets", &s.opts.PullSideTargets},
448 | }
449 |
450 | for _, d := range data {
451 | os.Args = []string{"myProgram", fmt.Sprintf("--%s", d.key)}
452 | ParseArgs(&s.opts)
453 | s.True(*d.value)
454 | }
455 | }
456 |
457 | func (s OptsTestSuite) TestParseArgs_ShortBools() {
458 | data := []struct {
459 | key string
460 | value *bool
461 | }{
462 | {"b", &s.opts.BlueGreen},
463 | {"S", &s.opts.PullSideTargets},
464 | }
465 |
466 | for _, d := range data {
467 | os.Args = []string{"myProgram", fmt.Sprintf("-%s", d.key)}
468 | ParseArgs(&s.opts)
469 | s.True(*d.value)
470 | }
471 | }
472 |
473 | func (s OptsTestSuite) TestParseArgs_LongSlices() {
474 | data := []struct {
475 | expected []string
476 | key string
477 | value *[]string
478 | }{
479 | {[]string{"target1", "target2"}, "side-target", &s.opts.SideTargets},
480 | {[]string{"deploy", "stop-old"}, "flow", &s.opts.Flow},
481 | }
482 |
483 | for _, d := range data {
484 | os.Args = []string{"myProgram"}
485 | for _, v := range d.expected {
486 | os.Args = append(os.Args, fmt.Sprintf("--%s=%s", d.key, v))
487 | }
488 | ParseArgs(&s.opts)
489 | s.Equal(d.expected, *d.value)
490 | }
491 | }
492 |
493 | func (s OptsTestSuite) TestParseArgs_ShortSlices() {
494 | data := []struct {
495 | expected []string
496 | key string
497 | value *[]string
498 | }{
499 | {[]string{"target1", "target2"}, "T", &s.opts.SideTargets},
500 | {[]string{"flow", "stop-old"}, "F", &s.opts.Flow},
501 | }
502 |
503 | for _, d := range data {
504 | os.Args = []string{"myProgram"}
505 | for _, v := range d.expected {
506 | os.Args = append(os.Args, fmt.Sprintf("-%s=%s", d.key, v))
507 | }
508 | ParseArgs(&s.opts)
509 | s.Equal(d.expected, *d.value)
510 | }
511 | }
512 |
513 | func (s OptsTestSuite) TestParseArgs_ReturnsError_WhenFailure() {
514 | os.Args = []string{"myProgram", "--this-flag-does-not-exist=something"}
515 |
516 | actual := ParseArgs(&s.opts)
517 |
518 | s.Error(actual)
519 | }
520 |
521 | // ParseYml
522 |
523 | func (s OptsTestSuite) Test_ParseYml_ReturnsNil() {
524 | actual := ParseYml(&s.opts)
525 |
526 | s.Nil(actual)
527 | }
528 |
529 | func (s OptsTestSuite) Test_ParseYml_ReturnsNil_WhenReadFileFails() {
530 | util.ReadFile = func(fileName string) ([]byte, error) {
531 | return []byte(""), fmt.Errorf("This is an error")
532 | }
533 |
534 | actual := ParseYml(&s.opts)
535 |
536 | s.Nil(actual)
537 | }
538 |
539 | func (s OptsTestSuite) Test_ParseYml_ReturnsError_WhenUnmarshalFails() {
540 | util.ReadFile = func(fileName string) ([]byte, error) {
541 | return []byte("This is not a proper YML"), nil
542 | }
543 |
544 | actual := ParseYml(&s.opts)
545 |
546 | s.Error(actual)
547 | }
548 |
549 | func (s OptsTestSuite) Test_ParseYml_SetsOpts() {
550 | host := "hostFromYml"
551 | certPath := "certPathFromYml"
552 | composePath := "composePathFromYml"
553 | testComposePath := "testComposePathFromYml"
554 | target := "targetFromYml"
555 | sideTarget1 := "sideTarget1FromYml"
556 | sideTarget2 := "sideTarget2FromYml"
557 | project := "projectFromYml"
558 | consulAddress := "consulAddressFromYml"
559 | scale := "scaleFromYml"
560 | flow1 := "deploy"
561 | flow2 := "stop-old"
562 | path1 := "path1"
563 | path2 := "path2"
564 | proxyHost := "proxyHostFromYml"
565 | proxyDockerHost := "proxyDomainFromYml"
566 | proxyDockerCertPath := "proxyCertPathFromYml"
567 | proxyReconfPort := "1245"
568 | consulTemplateFePath := "/path/to/consul/fe/template"
569 | consulTemplateBePath := "/path/to/consul/be/template"
570 | yml := fmt.Sprintf(`
571 | host: %s
572 | cert_path: %s
573 | compose_path: %s
574 | test_compose_path: %s
575 | blue_green: true
576 | target: %s
577 | side_targets:
578 | - %s
579 | - %s
580 | skip_pull_target: true
581 | pull_side_targets: true
582 | project: %s
583 | consul_address: %s
584 | scale: %s
585 | proxy_host: %s
586 | proxy_docker_host: %s
587 | proxy_docker_cert_path: %s
588 | proxy_reconf_port: %s
589 | flow:
590 | - %s
591 | - %s
592 | service_path:
593 | - %s
594 | - %s
595 | consul_template_fe_path: %s
596 | consul_template_be_path: %s`,
597 | host, certPath, composePath, testComposePath, target, sideTarget1, sideTarget2,
598 | project, consulAddress, scale, proxyHost, proxyDockerHost,
599 | proxyDockerCertPath, proxyReconfPort, flow1, flow2, path1,
600 | path2, consulTemplateFePath, consulTemplateBePath,
601 | )
602 | util.ReadFile = func(fileName string) ([]byte, error) {
603 | return []byte(yml), nil
604 | }
605 |
606 | ParseYml(&s.opts)
607 |
608 | s.Equal(host, s.opts.Host)
609 | s.Equal(composePath, s.opts.ComposePath)
610 | s.Equal(testComposePath, s.opts.TestComposePath)
611 | s.True(s.opts.BlueGreen)
612 | s.Equal(target, s.opts.Target)
613 | s.Equal([]string{sideTarget1, sideTarget2}, s.opts.SideTargets)
614 | s.True(s.opts.PullSideTargets)
615 | s.Equal(project, s.opts.Project)
616 | s.Equal(consulAddress, s.opts.ServiceDiscoveryAddress)
617 | s.Equal(scale, s.opts.Scale)
618 | s.Equal(proxyHost, s.opts.ProxyHost)
619 | s.Equal(proxyDockerHost, s.opts.ProxyDockerHost)
620 | s.Equal(proxyDockerCertPath, s.opts.ProxyDockerCertPath)
621 | s.Equal(proxyReconfPort, s.opts.ProxyReconfPort)
622 | s.Equal([]string{flow1, flow2}, s.opts.Flow)
623 | s.Equal([]string{path1, path2}, s.opts.ServicePath)
624 | s.Equal(consulTemplateFePath, s.opts.ConsulTemplateFePath)
625 | s.Equal(consulTemplateBePath, s.opts.ConsulTemplateBePath)
626 | }
627 |
628 | // GetOpts
629 |
630 | func (s OptsTestSuite) TestGetOpts_SetsComposePath() {
631 | opts, _ := GetOpts()
632 |
633 | s.Equal(dockerComposePath, opts.ComposePath)
634 | }
635 |
636 | func (s OptsTestSuite) TestGetOpts_InvokesParseYml() {
637 | called := false
638 | processOpts = func(*Opts) error {
639 | return nil
640 | }
641 | parseYml = func(*Opts) error {
642 | called = true
643 | return nil
644 | }
645 |
646 | _, err := GetOpts()
647 |
648 | s.Nil(err)
649 | s.True(called)
650 | }
651 |
652 | func (s OptsTestSuite) Test_GetOpts_ReturnsError_WhenParseYmlFails() {
653 | restore := parseYml
654 | processOpts = func(*Opts) error {
655 | return nil
656 | }
657 | parseYml = func(*Opts) error {
658 | return fmt.Errorf("This is an error from ParseYml")
659 | }
660 |
661 | _, actual := GetOpts()
662 |
663 | s.Error(actual)
664 | parseYml = restore
665 | }
666 |
667 | func (s OptsTestSuite) Test_GetOpts_InvokesParseEnvVars() {
668 | called := false
669 | processOpts = func(*Opts) error {
670 | return nil
671 | }
672 | parseEnvVars = func(*Opts) error {
673 | called = true
674 | return nil
675 | }
676 |
677 | _, err := GetOpts()
678 |
679 | s.Nil(err)
680 | s.True(called)
681 | }
682 |
683 | func (s OptsTestSuite) Test_GetOpts_ReturnsError_WhenParseEnvVarsFails() {
684 | restore := parseEnvVars
685 | processOpts = func(*Opts) error {
686 | return nil
687 | }
688 | parseEnvVars = func(*Opts) error {
689 | return fmt.Errorf("This is an error from ParseEnvVars")
690 | }
691 |
692 | _, actual := GetOpts()
693 |
694 | s.Error(actual)
695 | parseEnvVars = restore
696 | }
697 |
698 | func (s OptsTestSuite) Test_GetOpts_InvokesParseArgs() {
699 | called := false
700 | processOpts = func(*Opts) error {
701 | return nil
702 | }
703 | parseArgs = func(*Opts) error {
704 | called = true
705 | return nil
706 | }
707 |
708 | _, err := GetOpts()
709 |
710 | s.Nil(err)
711 | s.True(called)
712 | }
713 |
714 | func (s OptsTestSuite) Test_GetOpts_ReturnsError_WhenParseArgsFails() {
715 | restore := parseArgs
716 | processOpts = func(*Opts) error {
717 | return nil
718 | }
719 | parseArgs = func(*Opts) error {
720 | return fmt.Errorf("This is an error from ParseArgs")
721 | }
722 |
723 | _, actual := GetOpts()
724 |
725 | s.Error(actual)
726 | parseArgs = restore
727 | }
728 |
729 | func (s OptsTestSuite) Test_GetOpts_InvokesProcessOpts() {
730 | called := false
731 | processOpts = func(*Opts) error {
732 | called = true
733 | return nil
734 | }
735 |
736 | _, err := GetOpts()
737 |
738 | s.Nil(err)
739 | s.True(called)
740 | }
741 |
742 | func (s OptsTestSuite) Test_GetOpts_ReturnsError_WhenProcessOptsFails() {
743 | restore := processOpts
744 | processOpts = func(*Opts) error {
745 | return fmt.Errorf("This is an error from ProcessOpts")
746 | }
747 |
748 | _, actual := GetOpts()
749 |
750 | s.Error(actual)
751 | processOpts = restore
752 | }
753 |
754 | // Suite
755 |
756 | func TestOptsTestSuite(t *testing.T) {
757 | dockerHost := os.Getenv("DOCKER_HOST")
758 | dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
759 | defer func() {
760 | os.Setenv("DOCKER_HOST", dockerHost)
761 | os.Setenv("DOCKER_CERT_PATH", dockerCertPath)
762 | }()
763 | suite.Run(t, new(OptsTestSuite))
764 | }
765 |
--------------------------------------------------------------------------------
/proxy.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | type Proxy interface {
4 | Provision(dockerHost, reconfPort, certPath, scAddress string) error
5 | Reconfigure(dockerHost, proxyCertPath, host, reconfPort, serviceName, serviceColor string, servicePath []string, consulTemplateFePath, consulTemplateBePath string) error
6 | }
7 |
--------------------------------------------------------------------------------
/proxy_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/stretchr/testify/mock"
5 | )
6 |
7 | // Mock
8 |
9 | type ProxyMock struct {
10 | mock.Mock
11 | }
12 |
13 | func (m *ProxyMock) Provision(host, reconfPort, certPath, scAddress string) error {
14 | args := m.Called(host, certPath, scAddress)
15 | return args.Error(0)
16 | }
17 |
18 | func (m *ProxyMock) Reconfigure(dockerHost, proxyCertPath, host, reconfPort, serviceName, serviceColor string, servicePath []string, consulTemplateFePath, consulTemplateBePath string) error {
19 | args := m.Called(dockerHost, proxyCertPath, host, reconfPort, serviceName, serviceColor, servicePath, consulTemplateFePath, consulTemplateBePath)
20 | return args.Error(0)
21 | }
22 |
23 | func getProxyMock(skipMethod string) *ProxyMock {
24 | mockObj := new(ProxyMock)
25 | if skipMethod != "Provision" {
26 | mockObj.On("Provision", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
27 | }
28 | if skipMethod != "Reconfigure" {
29 | mockObj.On("Reconfigure", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
30 | }
31 | return mockObj
32 | }
33 |
--------------------------------------------------------------------------------
/scripts/bootstrap_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | echo "Installing Ansible..."
6 | apt-get install -y --force-yes software-properties-common
7 | apt-add-repository ppa:ansible/ansible
8 | apt-get update
9 | apt-get install -y ansible
10 | cp /vagrant/ansible/ansible.cfg /etc/ansible/ansible.cfg
11 |
--------------------------------------------------------------------------------
/service_discovery.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | const BlueColor = "blue"
4 | const GreenColor = "green"
5 |
6 | var serviceDiscovery ServiceDiscovery = Consul{}
7 |
8 | func getServiceDiscovery() ServiceDiscovery {
9 | return serviceDiscovery
10 | }
11 |
12 | type ServiceDiscovery interface {
13 | GetScaleCalc(address, serviceName, scale string) (int, error)
14 | GetNextColor(currentColor string) string
15 | GetColor(address, serviceName string) (string, error)
16 | PutScale(address, serviceName string, value int) (string, error)
17 | PutColor(address, serviceName, value string) (string, error)
18 | }
19 |
--------------------------------------------------------------------------------
/service_discovery_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/stretchr/testify/mock"
5 | )
6 |
7 | // Mock
8 |
9 | type ServiceDiscoveryMock struct {
10 | mock.Mock
11 | }
12 |
13 | func (m *ServiceDiscoveryMock) GetScaleCalc(address, serviceName, scale string) (int, error) {
14 | args := m.Called(address, serviceName, scale)
15 | return args.Int(0), args.Error(1)
16 | }
17 |
18 | func (m *ServiceDiscoveryMock) GetNextColor(currentColor string) string {
19 | args := m.Called(currentColor)
20 | return args.String(0)
21 | }
22 |
23 | func (m *ServiceDiscoveryMock) GetColor(address, serviceName string) (string, error) {
24 | args := m.Called(address, serviceName)
25 | return args.String(0), args.Error(1)
26 | }
27 |
28 | func (m *ServiceDiscoveryMock) PutScale(address, serviceName string, value int) (string, error) {
29 | args := m.Called(address, serviceName, value)
30 | return args.String(0), args.Error(1)
31 | }
32 |
33 | func (m *ServiceDiscoveryMock) PutColor(address, serviceName, value string) (string, error) {
34 | args := m.Called(address, serviceName, value)
35 | return args.String(0), args.Error(1)
36 | }
37 |
38 | func getServiceDiscoveryMock(opts Opts, skipMethod string) *ServiceDiscoveryMock {
39 | mockObj := new(ServiceDiscoveryMock)
40 | scaleCalc := 5
41 | if skipMethod != "GetScaleCalc" {
42 | mockObj.On("GetScaleCalc", opts.ServiceDiscoveryAddress, opts.ServiceName, opts.Scale).Return(scaleCalc, nil)
43 | }
44 | if skipMethod != "PutScale" {
45 | mockObj.On("PutScale", opts.ServiceDiscoveryAddress, opts.ServiceName, scaleCalc).Return("", nil)
46 | }
47 | if skipMethod != "GetColor" {
48 | mockObj.On("GetColor", opts.ServiceDiscoveryAddress, opts.ServiceName).Return("orange", nil)
49 | }
50 | if skipMethod != "GetNextColor" {
51 | mockObj.On("GetNextColor", opts.CurrentColor).Return("pink")
52 | }
53 | if skipMethod != "PutColor" {
54 | mockObj.On("PutColor", mock.Anything, mock.Anything, mock.Anything).Return("", nil)
55 | }
56 | return mockObj
57 | }
58 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | docker-machine create -d virtualbox proxy
4 |
5 | export CONSUL_IP=$(docker-machine ip proxy)
6 |
7 | export HOST_IP=$(docker-machine ip proxy)
8 |
9 | eval "$(docker-machine env proxy)"
10 |
11 | docker-compose \
12 | -p setup \
13 | -f docker-compose-setup.yml \
14 | up -d consul-server
15 |
16 | docker-machine create -d virtualbox \
17 | --swarm --swarm-master \
18 | --swarm-discovery="consul://$CONSUL_IP:8500" \
19 | --engine-opt="cluster-store=consul://$CONSUL_IP:8500" \
20 | --engine-opt="cluster-advertise=eth1:2376" \
21 | swarm-master
22 |
23 | docker-machine create -d virtualbox \
24 | --swarm \
25 | --swarm-discovery="consul://$CONSUL_IP:8500" \
26 | --engine-opt="cluster-store=consul://$CONSUL_IP:8500" \
27 | --engine-opt="cluster-advertise=eth1:2376" \
28 | swarm-node-1
29 |
30 | docker-machine create -d virtualbox \
31 | --swarm \
32 | --swarm-discovery="consul://$CONSUL_IP:8500" \
33 | --engine-opt="cluster-store=consul://$CONSUL_IP:8500" \
34 | --engine-opt="cluster-advertise=eth1:2376" \
35 | swarm-node-2
36 |
37 | eval "$(docker-machine env swarm-master)"
38 |
39 | export HOST_IP=$(docker-machine ip swarm-master)
40 |
41 | docker-compose \
42 | -p setup \
43 | -f docker-compose-setup.yml \
44 | up -d registrator
45 |
46 | eval "$(docker-machine env swarm-node-1)"
47 |
48 | export HOST_IP=$(docker-machine ip swarm-node-1)
49 |
50 | docker-compose \
51 | -p setup \
52 | -f docker-compose-setup.yml \
53 | up -d registrator
54 |
55 | eval "$(docker-machine env swarm-node-2)"
56 |
57 | export HOST_IP=$(docker-machine ip swarm-node-2)
58 |
59 | docker-compose \
60 | -p setup \
61 | -f docker-compose-setup.yml \
62 | up -d registrator
--------------------------------------------------------------------------------
/something.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | export GOPATH=~/go/
3 |
4 | go build
5 |
6 | go test --cover -v
7 |
8 | go test -coverprofile=coverage.out dockerflow
9 |
10 | go tool cover --html=coverage.out
11 |
12 | docker run -d \
13 | -p "8500:8500" \
14 | -h "consul" \
15 | progrium/consul -server -bootstrap
16 |
17 | go build && ./docker-flow
18 |
19 | go build && FLOW_HOST=$DOCKER_HOST ./docker-flow
20 | ```
21 |
22 | https://github.com/vfarcic/books-ms/blob/master/Jenkinsfile
23 | https://github.com/vfarcic/ms-lifecycle/blob/master/ansible/roles/jenkins/files/scripts/workflow-util.groovy
24 |
--------------------------------------------------------------------------------
/test_configs/tmpl/go-demo-app-be.tmpl:
--------------------------------------------------------------------------------
1 | backend go-demo-app-be
2 | {{ range $i, $e := service "SERVICE_NAME" "any" }}
3 | server {{$e.Node}}_{{$i}}_{{$e.Port}} {{$e.Address}}:{{$e.Port}} check
4 | {{end}}
5 |
--------------------------------------------------------------------------------
/test_configs/tmpl/go-demo-app-fe.tmpl:
--------------------------------------------------------------------------------
1 | acl url_test-service path_beg /demo
2 | use_backend go-demo-app-be if url_test-service
3 |
--------------------------------------------------------------------------------
/util/util.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "io/ioutil"
5 | "os"
6 | "os/exec"
7 | "time"
8 | )
9 |
10 | var ReadFile = ioutil.ReadFile
11 | var WriteFile = ioutil.WriteFile
12 | var RemoveFile = os.Remove
13 | var ExecCmd = exec.Command
14 | var SetDockerHost = func(host, certPath string) {
15 | if len(host) > 0 {
16 | os.Setenv("DOCKER_HOST", host)
17 | } else {
18 | os.Unsetenv("DOCKER_HOST")
19 | }
20 | if len(certPath) > 0 {
21 | os.Setenv("DOCKER_CERT_PATH", certPath)
22 | } else {
23 | os.Unsetenv("DOCKER_CERT_PATH")
24 | }
25 | }
26 | var RunCmd = func(cmd *exec.Cmd) error {
27 | return cmd.Run()
28 | }
29 | var Sleep = func(d time.Duration) {
30 | time.Sleep(d)
31 | }
32 |
--------------------------------------------------------------------------------