├── demo
├── ansible.cfg
├── vars
│ ├── demo1_var.yml
│ ├── demo3_var.yml
│ ├── demo2_var.yml
│ └── common_var.yml
├── Readme.md
├── demo1.yml
├── Vagrantfile
├── templates
│ ├── superapp.py.j2
│ └── superapp.sh.j2
├── demo2.yml
└── demo3.yml
├── .gitignore
├── templates
├── s6-log.j2
├── consul-profile-path.sh.j2
├── haproxys6.j2
├── consul-s6.j2
├── consul-template-s6.j2
├── consul-template-run.sh.j2
├── haproxy-initial.cfg.j2
├── consul-template.json.j2
├── consul-template-sudo.j2
├── consul-agent-run.sh.j2
├── haproxy-reload.j2
├── consul-agent.json.j2
├── consul-service.j2
├── consul-init.d.sh.j2
├── consul-template-init.d.sh.j2
└── haproxy.ctmp.j2
├── .github
├── PULL_REQUEST_TEMPLATE.md
└── ISSUE_TEMPLATE.md
├── test
├── integration
│ ├── basic-server
│ │ ├── serverspec
│ │ │ ├── spec_helper.rb
│ │ │ ├── consul_service_spec.rb
│ │ │ ├── consul_server_ui_spec.rb
│ │ │ └── consul_server_spec.rb
│ │ └── server.yml
│ ├── cluster-server1
│ │ ├── serverspec
│ │ │ ├── spec_helper.rb
│ │ │ ├── consul_server_ui_spec.rb
│ │ │ ├── consul_service_spec.rb
│ │ │ └── consul_server_spec.rb
│ │ └── server.yml
│ ├── cluster-server2
│ │ ├── serverspec
│ │ │ ├── spec_helper.rb
│ │ │ ├── consul_server_ui_spec.rb
│ │ │ ├── consul_service_spec.rb
│ │ │ └── consul_server_spec.rb
│ │ └── server.yml
│ ├── cluster-server3
│ │ ├── serverspec
│ │ │ ├── spec_helper.rb
│ │ │ ├── consul_server_ui_spec.rb
│ │ │ ├── consul_service_spec.rb
│ │ │ └── consul_server_spec.rb
│ │ └── server.yml
│ ├── tags
│ │ ├── tags.yml
│ │ ├── serverspec
│ │ │ └── consul_service_spec.rb
│ │ └── tags_vars.yml
│ ├── basic-agent
│ │ ├── agent.yml
│ │ ├── serverspec
│ │ │ ├── consul_template_spec.rb
│ │ │ ├── haproxy_spec.rb
│ │ │ ├── consul_agent_spec.rb
│ │ │ └── consul_service_spec.rb
│ │ └── agent_vars.yml
│ ├── cluster-agent1
│ │ ├── serverspec
│ │ │ ├── spec_helper.rb
│ │ │ ├── consul_template_spec.rb
│ │ │ ├── haproxy_spec.rb
│ │ │ ├── consul_agent_spec.rb
│ │ │ └── consul_service_spec.rb
│ │ └── agent.yml
│ ├── cluster-agent2
│ │ ├── serverspec
│ │ │ ├── spec_helper.rb
│ │ │ ├── consul_template_spec.rb
│ │ │ ├── haproxy_spec.rb
│ │ │ ├── consul_agent_spec.rb
│ │ │ └── consul_service_spec.rb
│ │ └── agent.yml
│ ├── cluster-agent3
│ │ ├── serverspec
│ │ │ ├── spec_helper.rb
│ │ │ ├── consul_template_spec.rb
│ │ │ ├── haproxy_spec.rb
│ │ │ ├── consul_agent_spec.rb
│ │ │ └── consul_service_spec.rb
│ │ └── agent.yml
│ ├── helper_spec.rb
│ └── nginx.yml
├── ansible.cfg
├── cluster-test.sh
└── ansible-setup.sh
├── tasks
├── install
│ ├── packages.yml
│ ├── main.yml
│ ├── consul-common.yml
│ ├── consul-agent.yml
│ ├── haproxy.yml
│ └── consul-template.yml
├── ad-hocs
│ ├── main.yml
│ ├── cleardata-dir.yml
│ └── build-raft-peers.yml
├── consul-services.yml
├── ip-match.yml
└── main.yml
├── Gemfile
├── meta
└── main.yml
├── LICENSE
├── vars
└── main.yml
├── .travis.yml
├── .kitchen.yml
├── handlers
└── main.yml
├── CONTRIBUTING.md
├── defaults
└── main.yml
├── library
└── s6.py
└── README.md
/demo/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | roles_path = ../../
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | .kitchen
3 | .galaxy_install_info
4 | *.retry
--------------------------------------------------------------------------------
/templates/s6-log.j2:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | exec logutil-service {{ item.log_dir }}
3 |
--------------------------------------------------------------------------------
/templates/consul-profile-path.sh.j2:
--------------------------------------------------------------------------------
1 | # bin path for consul
2 | export PATH="{{ consul_bin_dir }}:${PATH}"
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # What this PR changes:
2 | -
3 |
4 | # When reviewing, please consider:
5 | -
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # How to reproduce
2 | -
3 |
4 | # Expected behaviour
5 | -
6 |
7 | # Actual behaviour
8 | -
--------------------------------------------------------------------------------
/templates/haproxys6.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/with-contenv sh
2 |
3 | exec haproxy -db -f /etc/haproxy/haproxy.cfg -sf $(pidof haproxy)
4 |
--------------------------------------------------------------------------------
/templates/consul-s6.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/execlineb -P
2 |
3 | s6-setuidgid {{ consul_user }}
4 | {{ consul_bin_dir }}/consul_agent_run.sh
5 |
--------------------------------------------------------------------------------
/test/integration/basic-server/serverspec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | # Required by serverspec
4 | set :backend, :exec
5 |
--------------------------------------------------------------------------------
/test/integration/cluster-server1/serverspec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | # Required by serverspec
4 | set :backend, :exec
5 |
--------------------------------------------------------------------------------
/test/integration/cluster-server2/serverspec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | # Required by serverspec
4 | set :backend, :exec
5 |
--------------------------------------------------------------------------------
/test/integration/cluster-server3/serverspec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | # Required by serverspec
4 | set :backend, :exec
5 |
--------------------------------------------------------------------------------
/templates/consul-template-s6.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/execlineb -P
2 |
3 | s6-setuidgid {{ consul_user }}
4 | {{ consul_bin_dir }}/consul-template-run.sh
5 |
--------------------------------------------------------------------------------
/demo/vars/demo1_var.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | color : "#D80000"
4 | consul_server : True
5 | consul_bootstrap_expect : 1
6 | consul_ui : true
7 |
--------------------------------------------------------------------------------
/test/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | roles_path=../:../../:./test
3 |
4 | # V2
5 | callback_whitelist = changes
6 |
7 | ansible_managed="Ansible managed, Don't modify manually"
8 | retry_files_enabled = fasle
9 |
--------------------------------------------------------------------------------
/tasks/install/packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: packages | Ensure prerequisites packages exists
4 | apt:
5 | name="{{ item }}"
6 | with_items:
7 | - unzip
8 | when: "{{ ansible_distribution == 'Ubuntu' }}"
9 |
--------------------------------------------------------------------------------
/tasks/ad-hocs/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: adhocs | include build raft peers
4 | include: build-raft-peers.yml
5 | when: consul_adhoc_build_raft_peers
6 |
7 | - name: adhocs | include clear data dir
8 | include: cleardata-dir.yml
9 | when: consul_adhoc_clear_data_dir
10 |
--------------------------------------------------------------------------------
/test/integration/tags/tags.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: python2 check
4 | hosts: all
5 | become: True
6 | gather_facts: false
7 |
8 | - name: tags
9 | hosts: all
10 | become: True
11 | gather_facts: true
12 | vars_files:
13 | - tags_vars.yml
14 | roles :
15 | - ansible-consul
16 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 |
3 | group :development do
4 | gem 'kitchen-ansiblepush'
5 | gem 'kitchen-docker', :git => 'https://github.com/ahelal/kitchen-docker.git',
6 | :branch => 'feature/alpine'
7 | gem 'kitchen-verifier-serverspec'
8 | gem 'net-ssh'
9 | gem 'serverspec'
10 | gem 'test-kitchen'
11 | end
12 |
--------------------------------------------------------------------------------
/test/integration/cluster-server1/serverspec/consul_server_ui_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul UI' do
4 |
5 | describe command "curl -s http://127.0.0.1:8500/ui/ | grep '
'" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain "Consul by HashiCorp" }
8 | end
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/test/integration/cluster-server1/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul service' do
4 |
5 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/consul -v" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"consul"' }
8 | end
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/test/integration/cluster-server2/serverspec/consul_server_ui_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul UI' do
4 |
5 | describe command "curl -s http://127.0.0.1:8500/ui/ | grep ''" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain "Consul by HashiCorp" }
8 | end
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/test/integration/cluster-server2/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul service' do
4 |
5 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/consul -v" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"consul"' }
8 | end
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/test/integration/cluster-server3/serverspec/consul_server_ui_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul UI' do
4 |
5 | describe command "curl -s http://127.0.0.1:8500/ui/ | grep ''" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain "Consul by HashiCorp" }
8 | end
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/test/integration/cluster-server3/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul service' do
4 |
5 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/consul -v" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"consul"' }
8 | end
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Adham Helal
4 | description:
5 | company: Hellofresh
6 |
7 | license: license (MIT)
8 | min_ansible_version: 1.9
9 | platforms:
10 | - name: Ubuntu
11 | versions:
12 | - trusty
13 | categories:
14 | - cloud
15 | - monitoring
16 | - networking
17 | - system
18 |
19 | dependencies: []
20 |
21 |
--------------------------------------------------------------------------------
/test/integration/basic-server/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe 'consul service' do
4 |
5 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/consul -v" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"consul"' }
8 | end
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/test/integration/basic-agent/agent.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: python2 check
4 | hosts: all
5 | become: True
6 | gather_facts: false
7 |
8 | - name: agent
9 | hosts: all
10 | become: True
11 | gather_facts: true
12 | vars_files:
13 | - agent_vars.yml
14 |
15 | roles :
16 | - ansible-consul
17 |
18 | post_tasks:
19 | - name: Install nginx
20 | include: ../nginx.yml
21 |
--------------------------------------------------------------------------------
/test/integration/basic-server/server.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: python2 check
4 | hosts: all
5 | become: True
6 | gather_facts: false
7 |
8 | - name: Server
9 | hosts: all
10 | become: True
11 | vars:
12 | consul_server : True
13 | consul_bootstrap_expect : 1
14 | consul_ui : true
15 | consul_haproxy_user : "kitchen"
16 | roles :
17 | - ansible-consul
--------------------------------------------------------------------------------
/test/integration/cluster-server2/server.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Server
4 | hosts: all
5 | become: True
6 | vars:
7 | consul_server : True
8 | consul_bootstrap_expect : 1
9 | consul_ui : true
10 |
11 | pre_tasks:
12 | - name: Update apt cache
13 | apt:
14 | update_cache=yes
15 | cache_valid_time=360
16 |
17 | roles :
18 | - ansible-consul
--------------------------------------------------------------------------------
/test/integration/cluster-server3/server.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Server
4 | hosts: all
5 | become: True
6 | vars:
7 | consul_server : True
8 | consul_bootstrap_expect : 1
9 | consul_ui : true
10 |
11 | pre_tasks:
12 | - name: Update apt cache
13 | apt:
14 | update_cache=yes
15 | cache_valid_time=360
16 |
17 | roles :
18 | - ansible-consul
--------------------------------------------------------------------------------
/tasks/install/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install | main | include consul common
4 | include: consul-common.yml
5 |
6 | - name: install | main | include consul agent
7 | include: consul-agent.yml
8 |
9 | - name: install | main | include consul template
10 | include: consul-template.yml
11 | when: consul_consumer
12 |
13 | - name: install | main | include HA-Proxy
14 | include: haproxy.yml
15 | when: consul_consumer
16 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent1/serverspec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | # Required by serverspec
4 | set :backend, :exec
5 |
6 |
7 | RSpec.configure do |config|
8 | # Use color in STDOUT
9 | config.color = true
10 |
11 | # Use color not only in STDOUT but also in pagers and files
12 | config.tty = true
13 |
14 | # Use the specified formatter
15 | config.formatter = :documentation # :progress, :html, :textmate
16 | end
--------------------------------------------------------------------------------
/test/integration/cluster-agent2/serverspec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | # Required by serverspec
4 | set :backend, :exec
5 |
6 |
7 | RSpec.configure do |config|
8 | # Use color in STDOUT
9 | config.color = true
10 |
11 | # Use color not only in STDOUT but also in pagers and files
12 | config.tty = true
13 |
14 | # Use the specified formatter
15 | config.formatter = :documentation # :progress, :html, :textmate
16 | end
--------------------------------------------------------------------------------
/test/integration/cluster-agent3/serverspec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | # Required by serverspec
4 | set :backend, :exec
5 |
6 |
7 | RSpec.configure do |config|
8 | # Use color in STDOUT
9 | config.color = true
10 |
11 | # Use color not only in STDOUT but also in pagers and files
12 | config.tty = true
13 |
14 | # Use the specified formatter
15 | config.formatter = :documentation # :progress, :html, :textmate
16 | end
--------------------------------------------------------------------------------
/test/cluster-test.sh:
--------------------------------------------------------------------------------
1 | #/bin/sh
2 | set -e
3 |
4 | echo "Phase 1 (basic-server) Converge & Verify"
5 | bundle exec kitchen test basic-server
6 |
7 | echo "Phase 2 (basic-agent) Converge & Verify"
8 | bundle exec kitchen test basic-agent
9 |
10 | echo "Phase 3 (cluster-) converge"
11 | bundle exec kitchen converge cluster-*
12 |
13 | echo "Phase 3 (cluster-) verify"
14 | bundle exec kitchen verify cluster-*
15 |
16 | echo "Phase 3 (cluster-) destroy"
17 | bundle exec kitchen verify cluster-*
18 |
--------------------------------------------------------------------------------
/test/integration/basic-server/serverspec/consul_server_ui_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe 'consul UI' do
4 | ui_port = 8500
5 |
6 | describe port(ui_port) do
7 | it { should be_listening }
8 | end
9 |
10 | describe command "curl -s http://127.0.0.1:#{ui_port}/ui/ | grep ''" do
11 | its(:exit_status) { should eq 0 }
12 | its(:stdout) { should contain "Consul by HashiCorp" }
13 | end
14 |
15 | end
16 |
--------------------------------------------------------------------------------
/demo/vars/demo3_var.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | color : "#808080"
4 | consul_start_join : [ "192.168.56.150" ]
5 |
6 | consul_producer : True
7 | consul_producer_services : [ 'superdb' ]
8 |
9 | #Since this is a demo we will make a list of packages to install
10 | packages_to_install :
11 | - "postgresql"
12 | - "python-psycopg2"
13 | - "vim"
14 | - "curl"
--------------------------------------------------------------------------------
/demo/vars/demo2_var.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | color : "#808080"
4 | consul_start_join : [ "192.168.56.150" ]
5 |
6 | consul_producer : True
7 | consul_producer_services : [ 'superapp' ]
8 | consul_consumer : True
9 | consul_consumer_services : [ 'superdb' ]
10 |
11 | packages_to_install :
12 | - "python-psycopg2"
13 | - "vim"
14 | - "curl"
15 |
16 | superapp_user : "{{ localuser }}"
17 | superapp_pwd : "{{ localhome }}"
18 | superapp_bin : "/usr/bin/python"
19 | superapp_opts : "superapp.py"
20 |
--------------------------------------------------------------------------------
/test/ansible-setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | echo "Running travis "
4 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
5 |
6 | SETUP_VERSION="v0.0.3"
7 | #SETUP_VERBOSITY="vv"
8 |
9 | ## Install Ansible 2.3
10 | ANSIBLE_VERSIONS[0]="2.3.2.0"
11 | INSTALL_TYPE[0]="pip"
12 | ANSIBLE_LABEL[0]="v2.3"
13 |
14 | # Whats the default version
15 | ANSIBLE_DEFAULT_VERSION="v2.3"
16 |
17 | ## Create a temp dir
18 | filename=$( echo ${0} | sed 's|/||g' )
19 | my_temp_dir="$(mktemp -dt ${filename}.XXXX)"
20 |
21 | curl -s https://raw.githubusercontent.com/ahelal/avm/${SETUP_VERSION}/setup.sh -o $my_temp_dir/setup.sh
22 |
23 | ## Run the setup
24 | . $my_temp_dir/setup.sh
25 |
--------------------------------------------------------------------------------
/templates/consul-template-run.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # THIS SCRIPT IS NOT intended to run directly. Use service instead
4 | #
5 |
6 | NAME="consul-template"
7 | CONSUL_LOG_FILE="{{ consul_template_log_file }}"
8 | CONSUL_BIN="{{ consul_bin_dir }}"
9 |
10 | # Make sure to use all available proc
11 | # https://groups.google.com/forum/#!topic/consul-tool/qewFEqgAoF8
12 | export GOMAXPROCS="$(grep -c ^processor /proc/cpuinfo)"
13 |
14 | # Run
15 | echo "$(date) **** Consul-template start" >> "${CONSUL_LOG_FILE}"
16 | exec "${CONSUL_BIN}"/"${NAME}" -config=/etc/consul-template.conf >> "${CONSUL_LOG_FILE}" 2>&1
17 | RC="$?"
18 | echo "$(date) **** Consul-template ended with ${RC}" >> "${CONSUL_LOG_FILE}"
19 | exit "${RC}"
20 |
--------------------------------------------------------------------------------
/test/integration/cluster-server1/server.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Server
4 | hosts: all
5 | become: True
6 | vars:
7 | consul_server : True
8 | consul_bootstrap_expect : 1
9 | consul_ui : true
10 | consul_node_name : "cluster_server1"
11 | consul_datacenter : "cluster"
12 | consul_domain : "cluster.example.com"
13 | consul_encrypt : "ZiDakXKv9D3MvL1UKkO1ew=="
14 | consul_server_port_server : 8300
15 | consul_http_port : 8500
16 | pre_tasks:
17 | - name: Update apt cache
18 | apt:
19 | update_cache=yes
20 | cache_valid_time=360
21 |
22 | roles :
23 | - ansible-consul
--------------------------------------------------------------------------------
/templates/haproxy-initial.cfg.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | # This is a temp valid haproxy config used to bootup ha proxy till consul template kicks in
3 |
4 | global
5 | {% if ansible_distribution == 'Ubuntu' %}
6 | log /dev/log local0
7 | log /dev/log local1 notice
8 | {% endif %}
9 | chroot /var/lib/haproxy
10 | user haproxy
11 | group haproxy
12 | daemon
13 |
14 | listen stats
15 | bind 127.0.0.1:3212
16 | mode http
17 | timeout connect 5000
18 | timeout check 5000
19 | timeout client 30000
20 | timeout server 30000
21 |
22 | defaults
23 | log global
24 | mode http
25 | option httplog
26 | option dontlognull
27 | timeout connect 5000
28 | timeout client 50000
29 | timeout server 50000
--------------------------------------------------------------------------------
/test/integration/cluster-agent1/serverspec/consul_template_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 |
4 | describe 'Consul template' do
5 |
6 | describe service('consul-template') do
7 | it { should be_enabled }
8 | it { should be_running }
9 | end
10 |
11 | describe file('/etc/consul-template.conf') do
12 | it { should be_file }
13 | it { should be_mode 640 }
14 | it { should be_owned_by 'consul' }
15 | it { should be_grouped_into 'consul' }
16 | end
17 |
18 | describe file('/etc/haproxy/haproxy.cfg') do
19 | its(:content) { should contain "# First template : Ansible managed, Don't modify manually\n# Second template : consul template"}
20 | end
21 |
22 | end
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent2/serverspec/consul_template_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 |
4 | describe 'Consul template' do
5 |
6 | describe service('consul-template') do
7 | it { should be_enabled }
8 | it { should be_running }
9 | end
10 |
11 | describe file('/etc/consul-template.conf') do
12 | it { should be_file }
13 | it { should be_mode 640 }
14 | it { should be_owned_by 'consul' }
15 | it { should be_grouped_into 'consul' }
16 | end
17 |
18 | describe file('/etc/haproxy/haproxy.cfg') do
19 | its(:content) { should contain "# First template : Ansible managed, Don't modify manually\n# Second template : consul template"}
20 | end
21 |
22 | end
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent3/serverspec/consul_template_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 |
4 | describe 'Consul template' do
5 |
6 | describe service('consul-template') do
7 | it { should be_enabled }
8 | it { should be_running }
9 | end
10 |
11 | describe file('/etc/consul-template.conf') do
12 | it { should be_file }
13 | it { should be_mode 640 }
14 | it { should be_owned_by 'consul' }
15 | it { should be_grouped_into 'consul' }
16 | end
17 |
18 | describe file('/etc/haproxy/haproxy.cfg') do
19 | its(:content) { should contain "# First template : Ansible managed, Don't modify manually\n# Second template : consul template"}
20 | end
21 |
22 | end
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/tasks/ad-hocs/cleardata-dir.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: adhoc | clear data dir | stop consul
4 | service:
5 | name="consul"
6 | state="stopped"
7 | when: "consul_service == 'service'"
8 |
9 | - name: adhoc | clear data dir | remove consul data dir
10 | file:
11 | state="absent"
12 | path="{{ consul_data_dir }}"
13 | owner="{{ consul_user }}"
14 | group="{{ consul_group }}"
15 |
16 | - name: adhoc | clear data dir | Create consul data dir
17 | file:
18 | state="directory"
19 | path="{{ consul_data_dir }}"
20 | owner="{{ consul_user }}"
21 | group="{{ consul_group }}"
22 |
23 | - name: adhoc | clear data dir | start consul
24 | service:
25 | name="consul"
26 | state="started"
27 | when: "consul_service == 'service'"
28 |
--------------------------------------------------------------------------------
/templates/consul-template.json.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | consul = "{{ consul_template_consul_server }}:{{ consul_template_consul_port }}"
3 | {% if consul_template_templates %}
4 | {% for template in consul_template_templates %}
5 |
6 | # log level
7 | log_level = "{{ consul_template_log_level }}"
8 |
9 | template {
10 | source = "{{ template.source }}"
11 | destination = "{{ template.destination }}"
12 | {% if template.command is defined %}command = "{{ template.command }}"{% endif %}
13 |
14 | {% if template.perms is defined %}perms = {{ template.perms }}{% endif %}
15 |
16 | {% if template.backup is defined %}backup = {{ template.backup | lower }}{% endif %}
17 | left_delimiter = "<%"
18 | right_delimiter = "%>"
19 | }{% endfor %}
20 | {% endif %}
21 |
22 |
--------------------------------------------------------------------------------
/test/integration/basic-agent/serverspec/consul_template_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe 'Consul template' do
4 | if %w(ubuntu).include? os[:family]
5 | describe service('consul-template') do
6 | it { should be_enabled }
7 | it { should be_running }
8 | end
9 | end
10 |
11 | describe file('/etc/consul-template.conf') do
12 | it { should be_file }
13 | it { should be_mode 640 }
14 | it { should be_owned_by 'consul' }
15 | it { should be_grouped_into 'consul' }
16 | end
17 |
18 | describe file('/etc/haproxy/haproxy.cfg') do
19 | its(:content) { should contain "# First template : Ansible managed, Don't modify manually\n# Second template : consul template" }
20 | end
21 | end
22 |
23 | # TODO: check consul template logs for keywords
24 | # TODO: check haproxy logs that it was reloaded
--------------------------------------------------------------------------------
/test/integration/cluster-server1/serverspec/consul_server_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul server' do
4 | describe service('consul') do
5 | it { should be_enabled }
6 | it { should be_running }
7 | end
8 |
9 | describe user('consul') do
10 | it { should exist }
11 | it { should belong_to_group 'consul' }
12 | end
13 |
14 | %w(/opt/consul /opt/consul/bin /opt/consul/data /etc/consul.d ).each do |dir|
15 | describe file(dir) do
16 | it { should be_directory }
17 | it { should be_owned_by('consul') }
18 | end
19 | end
20 |
21 | describe file('/etc/consul.conf') do
22 | it { should be_file }
23 | its (:content) { should match /"server": true/ }
24 | end
25 |
26 | describe file('/var/log/consul/consul-agent.log') do
27 | it { should be_file }
28 | it { should be_owned_by('consul') }
29 | its (:content) { should contain "New leader elected:" }
30 | end
31 |
32 | end
33 |
--------------------------------------------------------------------------------
/test/integration/cluster-server2/serverspec/consul_server_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul server' do
4 | describe service('consul') do
5 | it { should be_enabled }
6 | it { should be_running }
7 | end
8 |
9 | describe user('consul') do
10 | it { should exist }
11 | it { should belong_to_group 'consul' }
12 | end
13 |
14 | %w(/opt/consul /opt/consul/bin /opt/consul/data /etc/consul.d ).each do |dir|
15 | describe file(dir) do
16 | it { should be_directory }
17 | it { should be_owned_by('consul') }
18 | end
19 | end
20 |
21 | describe file('/etc/consul.conf') do
22 | it { should be_file }
23 | its (:content) { should match /"server": true/ }
24 | end
25 |
26 | describe file('/var/log/consul/consul-agent.log') do
27 | it { should be_file }
28 | it { should be_owned_by('consul') }
29 | its (:content) { should contain "New leader elected:" }
30 | end
31 |
32 | end
33 |
--------------------------------------------------------------------------------
/test/integration/cluster-server3/serverspec/consul_server_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul server' do
4 | describe service('consul') do
5 | it { should be_enabled }
6 | it { should be_running }
7 | end
8 |
9 | describe user('consul') do
10 | it { should exist }
11 | it { should belong_to_group 'consul' }
12 | end
13 |
14 | %w(/opt/consul /opt/consul/bin /opt/consul/data /etc/consul.d ).each do |dir|
15 | describe file(dir) do
16 | it { should be_directory }
17 | it { should be_owned_by('consul') }
18 | end
19 | end
20 |
21 | describe file('/etc/consul.conf') do
22 | it { should be_file }
23 | its (:content) { should match /"server": true/ }
24 | end
25 |
26 | describe file('/var/log/consul/consul-agent.log') do
27 | it { should be_file }
28 | it { should be_owned_by('consul') }
29 | its (:content) { should contain "New leader elected:" }
30 | end
31 |
32 | end
33 |
--------------------------------------------------------------------------------
/demo/Readme.md:
--------------------------------------------------------------------------------
1 | Consul simple Demo
2 | ------------------
3 |
4 | This demo will create the three instances
5 |
6 | * demo1
7 | * demo2
8 | * demo3
9 |
10 |
11 | All instances share [common var](https://github.com/hellofresh/ansible-consul/blob/master/demo/vars/common_var.yml)
12 |
13 |
14 | **Demo1**
15 | - Is the consul_server with a UI [demo1 vars](https://github.com/hellofresh/ansible-consul/blob/master/demo/vars/demo1_var.yml)
16 | - Accessible via **192.168.56.150**
17 |
18 | **Demo2**
19 | - Is an app server hosts our **superapp** [demo2 vars](https://github.com/hellofresh/ansible-consul/blob/master/demo/vars/demo2_var.yml)
20 | - It is a producer of **superapp** and consumer of **superdb**
21 | - Accessible via **192.168.56.151**
22 |
23 | **Demo3**
24 | - Is our DB server hosts our **superdb** [demo3 vars](https://github.com/hellofresh/ansible-consul/blob/master/demo/vars/demo1_var.yml)
25 | - It is a producer of **superdb**
26 | - Accessible via **192.168.56.152**
27 |
28 |
--------------------------------------------------------------------------------
/demo/demo1.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Demo1 play
4 | hosts: all
5 | become: True
6 | vars_files:
7 | - "vars/common_var.yml"
8 | - "vars/{{inventory_hostname}}_var.yml"
9 | pre_tasks:
10 |
11 | - name: Create your local user
12 | user:
13 | name="{{ localuser }}"
14 | home="{{ localhome }}"
15 | shell="/bin/bash"
16 | append="true"
17 | group="admin"
18 | comment="{{localuser}}"
19 |
20 | - name: Putting you authorized_key
21 | authorized_key:
22 | key="{{lookup('file', '~/.ssh/id_rsa.pub')}}"
23 | user="{{localuser}}"
24 | manage_dir=yes
25 | ignore_errors: yes
26 |
27 | - name : Change PS1
28 | lineinfile:
29 | dest="{{ localhome }}/.bashrc"
30 | insertafter="EOF"
31 | line="export PS1=\"{{ PS1 }}\""
32 |
33 | - name: Update apt cache
34 | apt:
35 | update_cache=yes
36 | cache_valid_time=360
37 |
38 | roles :
39 | - consul
--------------------------------------------------------------------------------
/test/integration/cluster-agent1/serverspec/haproxy_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 |
4 | describe 'HAPROXY' do
5 |
6 | describe service('haproxy') do
7 | it { should be_enabled }
8 | it { should be_running }
9 | end
10 |
11 | describe file('/etc/haproxy/haproxy.cfg') do
12 | it { should be_file }
13 | it { should be_mode 644 }
14 | it { should be_owned_by 'consul' }
15 | it { should be_grouped_into 'consul' }
16 | end
17 |
18 | describe file('/etc/haproxy/') do
19 | it { should be_directory}
20 | it { should be_mode 775 }
21 | it { should be_owned_by 'root' }
22 | it { should be_grouped_into 'consul' }
23 | end
24 |
25 | describe "stats page" do
26 | describe port(3212) do
27 | it { should be_listening.with('tcp') }
28 | end
29 |
30 | describe command "curl -s http://127.0.0.1:3212" do
31 | its(:exit_status) { should eq 0 }
32 | its(:stdout) { should contain 'Statistics Report' }
33 | end
34 | end
35 | end
--------------------------------------------------------------------------------
/test/integration/cluster-agent2/serverspec/haproxy_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 |
4 | describe 'HAPROXY' do
5 |
6 | describe service('haproxy') do
7 | it { should be_enabled }
8 | it { should be_running }
9 | end
10 |
11 | describe file('/etc/haproxy/haproxy.cfg') do
12 | it { should be_file }
13 | it { should be_mode 644 }
14 | it { should be_owned_by 'consul' }
15 | it { should be_grouped_into 'consul' }
16 | end
17 |
18 | describe file('/etc/haproxy/') do
19 | it { should be_directory}
20 | it { should be_mode 775 }
21 | it { should be_owned_by 'root' }
22 | it { should be_grouped_into 'consul' }
23 | end
24 |
25 | describe "stats page" do
26 | describe port(3212) do
27 | it { should be_listening.with('tcp') }
28 | end
29 |
30 | describe command "curl -s http://127.0.0.1:3212" do
31 | its(:exit_status) { should eq 0 }
32 | its(:stdout) { should contain 'Statistics Report' }
33 | end
34 | end
35 | end
--------------------------------------------------------------------------------
/test/integration/cluster-agent3/serverspec/haproxy_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 |
4 | describe 'HAPROXY' do
5 |
6 | describe service('haproxy') do
7 | it { should be_enabled }
8 | it { should be_running }
9 | end
10 |
11 | describe file('/etc/haproxy/haproxy.cfg') do
12 | it { should be_file }
13 | it { should be_mode 644 }
14 | it { should be_owned_by 'consul' }
15 | it { should be_grouped_into 'consul' }
16 | end
17 |
18 | describe file('/etc/haproxy/') do
19 | it { should be_directory}
20 | it { should be_mode 775 }
21 | it { should be_owned_by 'root' }
22 | it { should be_grouped_into 'consul' }
23 | end
24 |
25 | describe "stats page" do
26 | describe port(3212) do
27 | it { should be_listening.with('tcp') }
28 | end
29 |
30 | describe command "curl -s http://127.0.0.1:3212" do
31 | its(:exit_status) { should eq 0 }
32 | its(:stdout) { should contain 'Statistics Report' }
33 | end
34 | end
35 | end
--------------------------------------------------------------------------------
/test/integration/basic-agent/serverspec/haproxy_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe 'HAPROXY' do
4 |
5 | if %w(ubuntu).include? os[:family]
6 | describe service('haproxy') do
7 | it { should be_enabled }
8 | it { should be_running }
9 | end
10 | end
11 |
12 | describe file('/etc/haproxy/haproxy.cfg') do
13 | it { should be_file }
14 | it { should be_mode 644 }
15 | it { should be_owned_by 'consul' }
16 | it { should be_grouped_into 'consul' }
17 | end
18 |
19 | describe file('/etc/haproxy/') do
20 | it { should be_directory }
21 | it { should be_mode 775 }
22 | it { should be_owned_by 'root' }
23 | it { should be_grouped_into 'consul' }
24 | end
25 |
26 | describe 'stats page' do
27 | describe port(3212) do
28 | it { should be_listening.with('tcp') }
29 | end
30 |
31 | describe command 'curl -s http://127.0.0.1:3212' do
32 | its(:exit_status) { should eq 0 }
33 | its(:stdout) { should contain 'Statistics Report' }
34 | end
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 HelloFresh
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ## Compatibility issue with paths in v1 and v2
4 | path_for_template : ""
5 |
6 | ## Structure of Archive and URL
7 | consul_agent_archive : "consul_{{ consul_agent_version }}_linux_amd64.zip"
8 | consul_agent_download_url : "https://releases.hashicorp.com/consul/{{ consul_agent_version }}/{{ consul_agent_archive }}"
9 | consul_template_archive : "consul-template_{{ consul_template_version }}_linux_amd64.zip"
10 | consul_template_download_url : "https://releases.hashicorp.com/consul-template/{{ consul_template_version }}/{{ consul_template_archive }}"
11 |
12 | # For adhoc
13 | consul_list_of_servers_variables : []
14 | consul_raft_file : "{{ consul_data_dir }}/raft/peers.json"
15 |
16 | # empty old service
17 | old_services : []
18 |
19 | # Compile consul_template_service_options_str so if you define templatg service option this condition will use the filter syntax "|"
20 | # https://github.com/hashicorp/consul-template#service
21 | consul_template_service_options_str : "{{ '' if consul_template_service_options == '' else '|' ~ consul_template_service_options }}"
22 |
--------------------------------------------------------------------------------
/demo/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure("2") do |config|
5 | # boxes at https://atlas.hashicorp.com/search.
6 | config.vm.box = "ubuntu/trusty64"
7 |
8 | ## Number of nodes
9 | nodes = 3
10 | rangeofips = 149
11 | (1..nodes).each do |n|
12 |
13 | vmip = "192.168.56.#{rangeofips + n.to_i}"
14 | name = "demo#{n}"
15 | ##Node Conf
16 | config.vm.provider "virtualbox" do |v|
17 | v.customize ["modifyvm", :id, "--cpus", "1", "--memory", "512"]
18 | end
19 |
20 | config.vm.define name do |cfg|
21 | cfg.vm.box = name
22 | cfg.vm.host_name = name
23 |
24 | #Comment public network to disable bridge
25 | #cfg.vm.network :public_network
26 | cfg.vm.network :private_network, ip: vmip
27 |
28 | cfg.ssh.forward_agent = true
29 |
30 | ##headless or non headless machine
31 | cfg.vm.provider "virtualbox" do |vb|
32 | vb.gui = false
33 | end
34 | ##Ansible Provisioning
35 | cfg.vm.provision :ansible do |ansible|
36 | ansible.playbook = "#{name}.yml"
37 | ##Debugging
38 | ansible.verbose = true
39 | end
40 | end
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/test/integration/helper_spec.rb:
--------------------------------------------------------------------------------
1 | require 'rubygems'
2 | require 'bundler/setup'
3 | require 'serverspec'
4 | require 'pathname'
5 | require 'net/ssh'
6 |
7 | RSpec.configure do |config|
8 | # Use color in STDOUT
9 | config.color = true
10 | # Use color not only in STDOUT but also in pagers and files
11 | config.tty = true
12 |
13 | # By default use ssh
14 | verify_conn = ENV['KITCHEN_VERIFY_CONN'] || 'ssh'
15 | if verify_conn == 'ssh'
16 | set :host, ENV['KITCHEN_HOSTNAME']
17 | # ssh options at http://net-ssh.github.io/net-ssh/Net/SSH.html#method-c-start
18 | set :ssh_options,
19 | user: ENV['KITCHEN_USERNAME'],
20 | port: ENV['KITCHEN_PORT'],
21 | auth_methods: ['publickey'],
22 | keys: [ENV['KITCHEN_SSH_KEY']],
23 | keys_only: true,
24 | paranoid: false,
25 | use_agent: false,
26 | verbose: :error
27 | set :backend, :ssh
28 | set :request_pty, true
29 | puts "serverspec config ssh '#{ENV['KITCHEN_USERNAME']}@#{ENV['KITCHEN_HOSTNAME']} -p #{ENV['KITCHEN_PORT']} -i #{ENV['KITCHEN_SSH_KEY']}'"
30 | elsif verify_conn == 'exec'
31 | puts 'serverspec :backend, :exec'
32 | set :backend, :exec
33 | else
34 | puts "invalid serverspec backend #{verify_conn}"
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/test/integration/nginx.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install nginx ubuntu
4 | apt:
5 | name="{{ item }}"
6 | with_items:
7 | - "nginx"
8 | when: "{{ ansible_distribution == 'Ubuntu' }}"
9 |
10 | - name: start nginx
11 | service:
12 | name="nginx"
13 | state="started"
14 | when: "consul_service == 'service'"
15 | - block:
16 | - name: install nginx alpine
17 | apk:
18 | name="{{ item }}"
19 | state="latest"
20 | with_items:
21 | - "nginx"
22 | when: "{{ ansible_distribution == 'Alpine' }}"
23 |
24 | - name: nginx s6 service dir
25 | file:
26 | path="/var/run/s6/services/nginx"
27 | state="directory"
28 | mode=0755
29 |
30 | - name: nginx s6 service file
31 | copy:
32 | content="#!/bin/sh\nexec exec nginx -c /etc/nginx/nginx.conf -g 'pid /tmp/nginx.pid; daemon off;'\n"
33 | dest="/var/run/s6/services/nginx/run"
34 | mode=0755
35 |
36 | - name: Reload s6 service
37 | shell: s6-svscanctl -a {{ consul_s6_servicedir }}
38 | changed_when: false
39 |
40 | when: "{{ ansible_distribution == 'Alpine' }}"
41 |
42 | - name: Pause till every thing is update_cache
43 | pause:
44 | prompt="Make sure that helth checks pass and our services are part of the game"
45 | seconds=20
46 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: ruby
3 | rvm:
4 | - 2.5
5 |
6 | sudo: required
7 | services:
8 | - docker
9 |
10 | env:
11 | - TEST_COMMAND="bundle exec kitchen test basic-server-alpine34"
12 | - TEST_COMMAND="bundle exec kitchen test basic-server-ubuntu1404"
13 | - TEST_COMMAND="bundle exec kitchen test basic-server-ubuntu1604"
14 | - TEST_COMMAND="bundle exec kitchen test basic-agent-alpine34"
15 | - TEST_COMMAND="bundle exec kitchen test basic-agent-ubuntu1404"
16 | - TEST_COMMAND="bundle exec kitchen test basic-agent-ubuntu1604"
17 | - TEST_COMMAND="bundle exec kitchen test tags-alpine34"
18 | - TEST_COMMAND="bundle exec kitchen test tags-ubuntu1404"
19 | - TEST_COMMAND="bundle exec kitchen test tags-ubuntu1604"
20 | #- TEST_COMMAND="./test/cluster_test.sh"
21 |
22 | before_install:
23 | # Make sure everything's up to date.
24 | - sudo apt-get update -qq
25 | - sudo apt-get install -qq python-apt python-pycurl git python-pip build-essential autoconf
26 |
27 | install:
28 | - bash test/ansible-setup.sh
29 | - bundle install
30 | - ~/.venv_ansible/v2.3/venv/bin/pip install netaddr
31 |
32 | script:
33 | - ansible --version
34 | - ruby --version
35 | - python --version
36 | - pip --version
37 | - bundler --version
38 | - bundle show
39 | - $TEST_COMMAND
40 |
--------------------------------------------------------------------------------
/templates/consul-template-sudo.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | {% if consul_service == 'service' %}
3 | {{ consul_user }} ALL=NOPASSWD:/usr/sbin/service haproxy reload
4 | {% elif consul_service == 's6' %}
5 | # TODO: Should probably use alaises. Keeping permission it open is not cool :(
6 | {{ consul_user }} ALL=NOPASSWD:/bin/s6-svc
7 | {{ consul_user }} ALL=NOPASSWD:/bin/s6-svok
8 | {{ consul_user }} ALL=NOPASSWD:/bin/ln
9 |
10 | # {{ consul_user }} ALL=NOPASSWD:/bin/s6-svc -O /haproxy-current
11 | # {{ consul_user }} ALL=NOPASSWD:/bin/s6-svc -u /haproxy-current
12 | # {{ consul_user }} ALL=NOPASSWD:/bin/s6-svok /haproxy-current
13 |
14 | # {{ consul_user }} ALL=NOPASSWD:/bin/s6-svc -O /haproxy-alt
15 | # {{ consul_user }} ALL=NOPASSWD:/bin/s6-svc -u /haproxy-alt
16 | # {{ consul_user }} ALL=NOPASSWD:/bin/s6-svok /haproxy-alt
17 |
18 | # {{ consul_user }} ALL=NOPASSWD:/bin/ln -sfn {{ consul_s6_servicedir }}/haproxy1 /haproxy-current
19 | # {{ consul_user }} ALL=NOPASSWD:/bin/ln -sfn {{ consul_s6_servicedir }}/haproxy2 /haproxy-current
20 | # {{ consul_user }} ALL=NOPASSWD:/bin/ln -sfn {{ consul_s6_servicedir }}/haproxy1 /haproxy-alt
21 | # {{ consul_user }} ALL=NOPASSWD:/bin/ln -sfn {{ consul_s6_servicedir }}/haproxy2 /haproxy-alt
22 | {{ consul_user }} ALL=NOPASSWD:/usr/bin/whoami
23 | Defaults:{{ consul_user }} !requiretty
24 | {% endif %}
25 |
--------------------------------------------------------------------------------
/tasks/consul-services.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: consul services | Get current services
4 | shell: "ls sv* | sed 's/sv_//g;s/.json//g'"
5 | changed_when: False
6 | args:
7 | chdir: "{{ consul_config_dir }}"
8 | register: registered_consul_services
9 |
10 | - name: consul services | Initialize a flat list
11 | set_fact:
12 | consul_producer_services_flat: []
13 |
14 | - name: consul services | Populate a flat list
15 | set_fact:
16 | consul_producer_services_flat: "{{ consul_producer_services_flat + [ item.name if item is mapping else item ] }}"
17 | with_items: "{{ consul_producer_services }}"
18 |
19 | - name: consul services | Check if any services are old
20 | set_fact:
21 | old_services: "{{ registered_consul_services.stdout_lines | difference(consul_producer_services_flat) }}"
22 |
23 | - name: consul services | Ensure old consul services JSON (if any) are deregister
24 | file:
25 | path="{{ consul_config_dir }}/sv_{{ item }}.json"
26 | state="absent"
27 | with_items: "{{ old_services }}"
28 | notify:
29 | - reload consul service
30 |
31 | - name: consul services | Ensure consul services JSON are registered
32 | template:
33 | src="consul-service.j2"
34 | dest="{{ consul_config_dir }}/sv_{{ item.name if item is mapping else item }}.json"
35 | with_items: "{{ consul_producer_services }}"
36 | notify:
37 | - reload consul service
38 |
--------------------------------------------------------------------------------
/demo/vars/common_var.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | localuser : "{{ lookup('ENV','USER') }}"
4 | localhome : "/home/{{ localuser }}"
5 | PS1 : '\[\e]0;\u@\h: \w\a\]${debian_chroot:+($debian_chroot)}\u@\[\e[0;31m\]\h\[\e[m\]:\w\$ '
6 |
7 | environment_name : "demo"
8 |
9 | consul_network_autobind_range : "192.168.56.0/24"
10 |
11 | # Defination of our services
12 | consul_services :
13 | superapp :
14 | name : "superapp"
15 | tags :
16 | - "test"
17 | - "{{ environment_name }}"
18 | port : 8000
19 | check :
20 | script : "curl localhost:8000 > /dev/null 2>&1"
21 | interval : "10s"
22 | haproxy :
23 | server_options : "check inter 10s fastinter 5s downinter 8s rise 3 fall 2"
24 |
25 | superdb :
26 | name : "superdb"
27 | tags :
28 | - "test"
29 | - "{{ environment_name }}"
30 | port : 5432
31 | check :
32 | script : "netstat -ant | grep 5432 | grep -v grep > /dev/null 2>&1"
33 | interval : "10s"
34 | haproxy :
35 | server_options : "check inter 10s fastinter 5s downinter 8s rise 3 fall 2"
36 | service_mode : "tcp"
37 |
--------------------------------------------------------------------------------
/templates/consul-agent-run.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # THIS SCRIPT IS NOT intended to run directly. Use service instead
4 | #
5 |
6 | NAME="consul"
7 | LOCAL_IP="$(curl http://169.254.169.254/latest/meta-data/local-ipv4)"
8 | LOCAL_HOSTNAME="$(curl -s http://169.254.169.254/latest/meta-data/local-hostname | cut -d "." -f 1)"
9 | CONSUL_CONFIG_DIR="{{ consul_config_dir }}"
10 | CONSUL_DATA_DIR="{{ consul_data_dir }}"
11 | CONSUL_LOG_FILE="{{ consul_agent_log_file }}"
12 | CONSUL_CONFIG_FILE="{{ consul_config_agent_file }}"
13 | CONSUL_BIN="{{ consul_bin_dir }}"
14 | CONSUL_BIND="{{ '${LOCAL_IP}' if consul_network_bind == 'auto' else consul_network_bind }}"
15 | CONSUL_NODE="{{ (consul_node_name_prefix if consul_node_name_prefix is defined else '') + '${LOCAL_HOSTNAME}' if consul_node_name == 'auto' else consul_node_name }}"
16 |
17 | # Make sure to use all available proc
18 | # https://groups.google.com/forum/#!topic/consul-tool/qewFEqgAoF8
19 | export GOMAXPROCS="$(grep -c ^processor /proc/cpuinfo)"
20 |
21 | echo "$(date) **** Consul agent start ****" >> "${CONSUL_LOG_FILE}"
22 | exec "${CONSUL_BIN}"/"${NAME}" agent -config-dir="${CONSUL_CONFIG_DIR}" -config-file="${CONSUL_CONFIG_FILE}" -data-dir="${CONSUL_DATA_DIR}" -bind="${CONSUL_BIND}" -node="${CONSUL_NODE}" >> "${CONSUL_LOG_FILE}" 2>&1
23 | RC="$?"
24 | echo "$(date) **** Consul-agent ended with ${RC} ****" >> "${CONSUL_LOG_FILE}"
25 | exit "${RC}"
26 |
--------------------------------------------------------------------------------
/tasks/install/consul-common.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install | consul common | Ensure consul group exists
4 | group:
5 | name="{{ consul_group }}"
6 | state=present
7 |
8 | - name: install | consul common | Ensure consul user exists
9 | user:
10 | home="{{ consul_home_dir }}"
11 | name="{{ consul_user }}"
12 | group="{{ consul_group }}"
13 | system="yes"
14 |
15 | - name: install | consul common | Ensure consul directory exists
16 | file:
17 | state="directory"
18 | path="{{ item }}"
19 | owner="{{ consul_user }}"
20 | group="{{ consul_group }}"
21 | with_items:
22 | - "{{ consul_tmp_dir }}"
23 | - "{{ consul_home_dir }}"
24 | - "{{ consul_data_dir }}"
25 | - "{{ consul_config_dir }}"
26 | - "{{ consul_template_dir }}"
27 | - "{{ consul_bin_dir }}"
28 | - "{{ consul_log_dir }}"
29 |
30 | - name: install | consul common | Ensure log file is writable by consul user/group
31 | file:
32 | state="touch"
33 | path="{{ item }}"
34 | owner="{{ consul_user }}"
35 | group="{{ consul_group }}"
36 | changed_when: false
37 | with_items:
38 | - "{{ consul_agent_log_file }}"
39 | - "{{ consul_template_log_file }}"
40 |
41 | - name: install | consul common | Ensure consul bin path is resolvable through profile.d
42 | template:
43 | src="consul-profile-path.sh.j2"
44 | dest="/etc/profile.d/consul.sh"
45 | owner="root"
46 | group="root"
47 | mode="0755"
48 |
--------------------------------------------------------------------------------
/tasks/ip-match.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: ip_match | (1) Match range address
4 | set_fact:
5 | consul_bind_tmp="{{ ansible_all_ipv4_addresses | ipaddr(consul_network_autobind_range) }}"
6 | consul_autobind_type_set="Range"
7 | when: "consul_network_autobind_range is defined"
8 |
9 | - name: ip_match | (2) Match private or public address
10 | set_fact:
11 | consul_bind_tmp="{{ ansible_all_ipv4_addresses | ipaddr(consul_network_autobind_type) }}"
12 | consul_autobind_type_set="{{ consul_network_autobind_type }}"
13 | when: "consul_network_autobind_type is defined and consul_autobind_type_set is not defined"
14 | connection: local
15 | delegate_to: localhost
16 |
17 | - name: ip_match | WARNING More than one match found
18 | debug:
19 | msg="Your rule matched more than one IP {{ consul_bind_tmp }}. Sorry will use the first IP '{{ consul_bind_tmp | first }}'"
20 | when: "{{ consul_bind_tmp | length }} > 1"
21 |
22 | - name: ip_match | Failed to match any rule
23 | fail: msg="Could not find an IP in {{ ansible_all_ipv4_addresses }} that match your rules. Please check your rules."
24 | when: "{{ consul_bind_tmp | length }} == 0"
25 |
26 | - name: ip_match | Set consul bind address to match your rule
27 | set_fact:
28 | consul_network_bind="{{ consul_bind_tmp | first }}"
29 |
30 | - name: ip_match | Matched service_ip
31 | debug:
32 | msg="Will use the following IP {{ consul_network_bind }} because of rule '{{ consul_autobind_type_set }}'"
--------------------------------------------------------------------------------
/test/integration/cluster-agent1/serverspec/consul_agent_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul server' do
4 | describe service('consul') do
5 | it { should be_enabled }
6 | it { should be_running }
7 | end
8 |
9 | describe user('consul') do
10 | it { should exist }
11 | it { should belong_to_group 'consul' }
12 | end
13 |
14 | %w(/opt/consul /opt/consul/bin /opt/consul/data /etc/consul.d ).each do |dir|
15 | describe file(dir) do
16 | it { should be_directory }
17 | it { should be_owned_by('consul') }
18 | end
19 | end
20 |
21 | describe file('/etc/consul.conf') do
22 | it { should be_file }
23 | its (:content) { should match /"server": true/ }
24 | end
25 |
26 | describe file('/var/log/consul/consul-agent.log') do
27 | it { should be_file }
28 | it { should be_owned_by('consul') }
29 | its (:content) { should contain "New leader elected:" }
30 | end
31 |
32 | describe port(8300) do
33 | it { should be_listening.with('tcp') }
34 | end
35 | describe port(8301) do
36 | it { should be_listening.with('tcp') }
37 | end
38 |
39 | describe port(8500) do
40 | it { should be_listening.on('127.0.0.1').with('tcp') }
41 | end
42 |
43 | describe 'UI should be disabled' do
44 | describe command "curl -s -I http://127.0.0.1:8500/ui/" do
45 | its(:exit_status) { should eq 0 }
46 | its(:stdout) { should contain 'HTTP/1.1 404 Not Found' }
47 | end
48 | end
49 |
50 | end
51 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent2/serverspec/consul_agent_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul server' do
4 | describe service('consul') do
5 | it { should be_enabled }
6 | it { should be_running }
7 | end
8 |
9 | describe user('consul') do
10 | it { should exist }
11 | it { should belong_to_group 'consul' }
12 | end
13 |
14 | %w(/opt/consul /opt/consul/bin /opt/consul/data /etc/consul.d ).each do |dir|
15 | describe file(dir) do
16 | it { should be_directory }
17 | it { should be_owned_by('consul') }
18 | end
19 | end
20 |
21 | describe file('/etc/consul.conf') do
22 | it { should be_file }
23 | its (:content) { should match /"server": true/ }
24 | end
25 |
26 | describe file('/var/log/consul/consul-agent.log') do
27 | it { should be_file }
28 | it { should be_owned_by('consul') }
29 | its (:content) { should contain 'New leader elected:' }
30 | end
31 |
32 | describe port(8300) do
33 | it { should be_listening.with('tcp') }
34 | end
35 | describe port(8301) do
36 | it { should be_listening.with('tcp') }
37 | end
38 |
39 | describe port(8500) do
40 | it { should be_listening.on('127.0.0.1').with('tcp') }
41 | end
42 |
43 | describe 'UI should be disabled' do
44 | describe command 'curl -s -I http://127.0.0.1:8500/ui/' do
45 | its(:exit_status) { should eq 0 }
46 | its(:stdout) { should contain 'HTTP/1.1 404 Not Found' }
47 | end
48 | end
49 |
50 | end
51 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent3/serverspec/consul_agent_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'consul server' do
4 | describe service('consul') do
5 | it { should be_enabled }
6 | it { should be_running }
7 | end
8 |
9 | describe user('consul') do
10 | it { should exist }
11 | it { should belong_to_group 'consul' }
12 | end
13 |
14 | %w(/opt/consul /opt/consul/bin /opt/consul/data /etc/consul.d ).each do |dir|
15 | describe file(dir) do
16 | it { should be_directory }
17 | it { should be_owned_by('consul') }
18 | end
19 | end
20 |
21 | describe file('/etc/consul.conf') do
22 | it { should be_file }
23 | its (:content) { should match /"server": true/ }
24 | end
25 |
26 | describe file('/var/log/consul/consul-agent.log') do
27 | it { should be_file }
28 | it { should be_owned_by('consul') }
29 | its (:content) { should contain "New leader elected:" }
30 | end
31 |
32 | describe port(8300) do
33 | it { should be_listening.with('tcp') }
34 | end
35 | describe port(8301) do
36 | it { should be_listening.with('tcp') }
37 | end
38 |
39 | describe port(8500) do
40 | it { should be_listening.on('127.0.0.1').with('tcp') }
41 | end
42 |
43 | describe 'UI should be disabled' do
44 | describe command "curl -s -I http://127.0.0.1:8500/ui/" do
45 | its(:exit_status) { should eq 0 }
46 | its(:stdout) { should contain 'HTTP/1.1 404 Not Found' }
47 | end
48 | end
49 |
50 | end
51 |
--------------------------------------------------------------------------------
/test/integration/basic-server/serverspec/consul_server_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | server_port = 8300
4 |
5 | describe 'consul server' do
6 |
7 | describe 'Ports' do
8 | describe port(server_port) do
9 | it { should be_listening }
10 | end
11 | describe port(server_port) do
12 | it { should_not be_listening.on('127.0.0.1') }
13 | end
14 | end
15 | if %w(ubuntu).include? os[:family]
16 | describe service('consul') do
17 | it { should be_enabled }
18 | it { should be_running }
19 | end
20 | end
21 |
22 | describe user('consul') do
23 | it { should exist }
24 | it { should belong_to_group 'consul' }
25 | end
26 |
27 | %w(/opt/consul /opt/consul/bin /opt/consul/data /etc/consul.d ).each do |dir|
28 | describe file(dir) do
29 | it { should be_directory }
30 | it { should be_owned_by('consul') }
31 | end
32 | end
33 |
34 | describe file('/etc/consul.conf') do
35 | it { should be_file }
36 | its (:content) { should match /"server": true/ }
37 | end
38 |
39 | describe file('/var/log/consul/consul-agent.log') do
40 | it { should be_file }
41 | it { should be_owned_by('consul') }
42 | its (:content) { should contain "New leader elected:" }
43 | end
44 |
45 | describe 'datacenter' do
46 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/datacenters" do
47 | its(:exit_status) { should eq 0 }
48 | its(:stdout) { should contain "default" }
49 | end
50 | end
51 |
52 | end
53 |
--------------------------------------------------------------------------------
/test/integration/basic-agent/serverspec/consul_agent_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe 'consul agent' do
4 |
5 | if %w(ubuntu).include? os[:family]
6 | describe service('consul') do
7 | it { should be_enabled }
8 | it { should be_running }
9 | end
10 | end
11 |
12 | describe user('consul') do
13 | it { should exist }
14 | it { should belong_to_group 'consul' }
15 | end
16 |
17 | %w(/opt/consul /opt/consul/bin /opt/consul/data /etc/consul.d).each do |dir|
18 | describe file(dir) do
19 | it { should be_directory }
20 | it { should be_owned_by('consul') }
21 | end
22 | end
23 |
24 | describe file('/etc/consul.conf') do
25 | it { should be_file }
26 | its (:content) { should match /"server": true/ }
27 | end
28 |
29 | describe file('/var/log/consul/consul-agent.log') do
30 | it { should be_file }
31 | it { should be_owned_by('consul') }
32 | its (:content) { should contain 'New leader elected:' }
33 | end
34 |
35 | describe port(8300) do
36 | it { should be_listening.with('tcp') }
37 | end
38 | describe port(8301) do
39 | it { should be_listening.with('tcp') }
40 | end
41 |
42 | describe port(8500) do
43 | it { should be_listening.on('127.0.0.1').with('tcp') }
44 | end
45 |
46 | describe 'UI should be disabled' do
47 | describe command 'curl -s -I http://127.0.0.1:8500/ui/' do
48 | its(:exit_status) { should eq 0 }
49 | its(:stdout) { should contain 'HTTP/1.1 404 Not Found' }
50 | end
51 | end
52 |
53 | end
54 |
--------------------------------------------------------------------------------
/tasks/ad-hocs/build-raft-peers.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: adhoc | rebuild peers | Group by consul server
4 | group_by:
5 | key="consul_server"
6 |
7 | - name: adhoc | rebuild peers | Construct IPs
8 | set_fact:
9 | consul_list_of_servers_variables: "{{ [hostvars[item]['ansible_eth0']['ipv4']['address'] ] | union(consul_list_of_servers_variables) }}"
10 | with_items: "{{ groups['consul_server'] | default([]) }}"
11 |
12 | - name: adhoc | rebuild peers | Construct IPs with Ports
13 | set_fact:
14 | list_of_servers: "{{ consul_list_of_servers_variables | join(':' + consul_server_port_server | string + ',') }}:{{ consul_server_port_server | string }}"
15 |
16 | - name: adhoc | rebuild peers | stop consul
17 | service:
18 | name="consul"
19 | state="stopped"
20 | run_once: true
21 | delegate_to: "{{ item }}"
22 | with_items: "{{ groups['consul_server'] | default([]) }}"
23 | when: "consul_service == 'service'"
24 |
25 | - name: adhoc | rebuild peers | copy new peer
26 | copy:
27 | content="{{ list_of_servers.split(',') | to_json }}"
28 | dest="{{ consul_raft_file }}"
29 | owner="{{ consul_user }}"
30 | group="{{ consul_group }}"
31 | mode="0755"
32 | backup=yes
33 | run_once: true
34 | delegate_to: "{{ item }}"
35 | with_items: "{{ groups['consul_server'] | default([]) }}"
36 |
37 | - name: adhoc | rebuild peers | start consul
38 | service:
39 | name="consul"
40 | state="started"
41 | run_once: true
42 | delegate_to: "{{ item }}"
43 | with_items: "{{ groups['consul_server'] | default([]) }}"
44 | when: "consul_service == 'service'"
45 |
--------------------------------------------------------------------------------
/demo/templates/superapp.py.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from wsgiref.simple_server import make_server
3 | import psycopg2
4 |
5 | def build_response():
6 | conn_string = "host='localhost' dbname='superapp' user='superapp' password='superapp'"
7 | print "Connecting to database\n ->%s" % (conn_string)
8 |
9 | try:
10 | conn = psycopg2.connect(conn_string)
11 | postgres_status = " RUNNING "
12 | print "RUNNING to psql"
13 | except:
14 | postgres_status = " NOT RUNNING :("
15 | print not "RUNNING to psql"
16 |
17 | machine = "{{ inventory_hostname }}"
18 | my_color = "{{ color }}"
19 |
20 | body = ""
21 | body += "\n"
22 | body += ""
23 | body += ""
24 | body += "A Small Hello"
25 | body += ""
26 | body += ""
27 | body += "Hi I am " + machine + "
"
28 | body += "Postgresql status " + postgres_status + "
"
29 | body += ""
30 | body += ""
31 | return body
32 |
33 | def hello_world_app(environ, start_response):
34 | machine = "{{ inventory_hostname }}"
35 | status = '200 OK' # HTTP Status
36 | headers = [('Content-type', 'text/html'), ('x-server',machine)] # HTTP Headers
37 | start_response(status, headers)
38 |
39 | # The returned object is going to be printed
40 | body = build_response()
41 | return [body]
42 |
43 | httpd = make_server('', 8000, hello_world_app)
44 | print "Serving on port 8000..."
45 |
46 | # Serve until process is killed
47 | httpd.serve_forever()
48 |
--------------------------------------------------------------------------------
/demo/demo2.yml:
--------------------------------------------------------------------------------
1 |
2 | ---
3 |
4 | - name: Demo1 play
5 | hosts: all
6 | become: True
7 | vars_files:
8 | - "vars/common_var.yml"
9 | - "vars/{{inventory_hostname}}_var.yml"
10 | pre_tasks:
11 |
12 | - name: Create your local user
13 | user:
14 | name="{{ localuser }}"
15 | home="{{ localhome }}"
16 | shell="/bin/bash"
17 | append="true"
18 | group="admin"
19 | comment="{{localuser}}"
20 |
21 | - name: Putting you authorized_key
22 | authorized_key:
23 | key="{{lookup('file', '~/.ssh/id_rsa.pub')}}"
24 | user="{{localuser}}"
25 | manage_dir=yes
26 | ignore_errors: yes
27 |
28 | - name : Change PS1
29 | lineinfile:
30 | dest="{{ localhome }}/.bashrc"
31 | insertafter="EOF"
32 | line="export PS1=\"{{ PS1 }}\""
33 |
34 | - name: Update apt cache
35 | apt:
36 | update_cache=yes
37 | cache_valid_time=360
38 |
39 | # Install test super app service
40 | - name: Install superapp
41 | template:
42 | src="templates/superapp.py.j2"
43 | dest="{{superapp_pwd}}/superapp.py"
44 |
45 | - name: Install superapp service
46 | template:
47 | src="templates/superapp.sh.j2"
48 | dest="/etc/init.d/superapp"
49 | mode=0755
50 |
51 | - name: Install demo packages
52 | apt:
53 | name="{{item}}"
54 | with_items: packages_to_install
55 | when : packages_to_install is defined
56 |
57 | - name: Enable superapp service
58 | service:
59 | name=superapp
60 | state=started
61 |
62 | roles :
63 | - consul
--------------------------------------------------------------------------------
/templates/haproxy-reload.j2:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | LOG_FILE="{{ consul_template_log_file }}"
4 | exec >> "${LOG_FILE}"
5 | exec 2>&1
6 |
7 | ## Log to consul template
8 | log(){
9 | echo "$(date) haproxy-reload script: ${1}"
10 | }
11 |
12 | ## Run sudo command
13 | sudo_cmd(){
14 | sudo ${*}
15 | }
16 |
17 | ## Get current pids
18 | log "Pre-reload HAproxy $(pidof haproxy)"
19 |
20 | ## Check HAProxy conig
21 | HA_PROXY_CONFIG_ERROR=$( haproxy -f /etc/haproxy/haproxy.cfg -c 2>&1 )
22 | HA_PROXY_RC="$?"
23 | if [ "${HA_PROXY_RC}" != "0" ]; then
24 | log "HAProxy configtest failure"
25 | log "${HA_PROXY_CONFIG_ERROR}"
26 | exit 1
27 | fi
28 |
29 | ## Reload HA Proxy
30 | #{% if consul_service == "service" %}
31 | # Service
32 | HA_PROXY_ERROR=$( sudo /usr/sbin/service haproxy reload 2>&1 )
33 | HA_PROXY_RC="$?"
34 | #{% elif consul_service == "s6" %}
35 | # s6
36 | # https://www.mail-archive.com/supervision@list.skarnet.org/msg01213.html
37 | # TODO in s6 we must check config file
38 | HAPROXY_CURRENT=$(readlink /haproxy-current)
39 | HAPROXY_ALT=$(readlink /haproxy-alt)
40 |
41 | sudo_cmd s6-svc -O "${HAPROXY_CURRENT}"
42 | sudo_cmd s6-svok "${HAPROXY_CURRENT}"
43 | HA_PROXY_ERROR_1="$?"
44 |
45 | sudo_cmd s6-svc -u "${HAPROXY_ALT}"
46 | sudo_cmd s6-svok "${HAPROXY_ALT}"
47 | HA_PROXY_ERROR_2="$?"
48 |
49 | HA_PROXY_RC=${HA_PROXY_ERROR_1} || ${HA_PROXY_ERROR_2}
50 |
51 | sudo_cmd ln -sfn "${HAPROXY_ALT}" /haproxy-current
52 | sudo_cmd ln -sfn "${HAPROXY_CURRENT}" /haproxy-alt
53 | #{% endif %}
54 |
55 | if [ "${HA_PROXY_RC}" != "0" ]; then
56 | log "HAProxy reload failure"
57 | log "${HA_PROXY_ERROR}"
58 | exit 1
59 | fi
60 |
61 | log "Post-reload HAproxy $(pidof haproxy)"
62 | log "HAProxy reloaded successfully"
63 |
64 | exit 0
65 |
--------------------------------------------------------------------------------
/demo/demo3.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Demo3 play
4 | hosts: all
5 | become: True
6 | vars_files:
7 | - "vars/common_var.yml"
8 | - "vars/{{inventory_hostname}}_var.yml"
9 | pre_tasks:
10 |
11 | - name: Create your local user
12 | user:
13 | name="{{ localuser }}"
14 | home="{{ localhome }}"
15 | shell="/bin/bash"
16 | append="true"
17 | group="admin"
18 | comment="{{localuser}}"
19 |
20 | - name: Putting you authorized_key
21 | authorized_key:
22 | key="{{lookup('file', '~/.ssh/id_rsa.pub')}}"
23 | user="{{localuser}}"
24 | manage_dir=yes
25 | ignore_errors: yes
26 |
27 | - name : Change PS1
28 | lineinfile:
29 | dest="{{ localhome }}/.bashrc"
30 | insertafter="EOF"
31 | line="export PS1=\"{{ PS1 }}\""
32 |
33 | - name: Update apt cache
34 | apt:
35 | update_cache=yes
36 | cache_valid_time=360
37 |
38 | - name: Install demo packages
39 | apt:
40 | name="{{item}}"
41 | with_items: packages_to_install
42 |
43 | - name : Configure postgresql
44 | lineinfile:
45 | dest=/etc/postgresql/9.1/main/postgresql.conf
46 | regexp='^listen_addresses'
47 | line="listen_addresses = '*'"
48 | register: postgresql_conf
49 |
50 | - name : Configure postgresql hba.conf
51 | lineinfile:
52 | dest=/etc/postgresql/9.1/main/pg_hba.conf
53 | insertafter="EOF"
54 | line="host all all 0.0.0.0/0 md5"
55 |
56 | - name: Create postgresql db
57 | postgresql_db:
58 | name=superapp
59 | become_user: postgres
60 | when: "'postgresql' in packages_to_install"
61 |
62 | - name : Create postgresql user
63 | postgresql_user:
64 | db=superapp
65 | name=superapp
66 | password=superapp
67 | become_user: postgres
68 |
69 | - name : Service postgresql
70 | service:
71 | name=postgresql
72 | state=restarted
73 |
74 | roles :
75 | - consul
--------------------------------------------------------------------------------
/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Include Ad-hoc
4 | include: ad-hocs/main.yml
5 | when: consul_adhoc_build_raft_peers is defined and consul_adhoc_build_raft_peers or consul_adhoc_clear_data_dir is defined and consul_adhoc_clear_data_dir
6 |
7 | - name: Include Packages
8 | include: install/packages.yml
9 |
10 | - name: Include IP match
11 | include: ip-match.yml
12 | when: consul_network_autobind is defined and consul_network_autobind
13 |
14 | - name: Include install main file
15 | include: install/main.yml
16 |
17 | - name: Include consul service
18 | include: consul-services.yml
19 | when: consul_producer
20 |
21 | # At this point we should flush handler
22 | - meta: flush_handlers
23 |
24 | # block using service service
25 | - block:
26 |
27 | - name: Ensure consul service is running (service)
28 | service:
29 | name="consul"
30 | state="started"
31 |
32 | - name: Ensure haproxy service is Enable and Started (service)
33 | service:
34 | name="haproxy"
35 | state="started"
36 | when: consul_consumer
37 |
38 | - name: Ensure consul-template service is Enable and Started (service)
39 | service:
40 | name="consul-template"
41 | state="started"
42 | when: consul_consumer
43 |
44 | when: "consul_service == 'service'"
45 | # end block using service service
46 |
47 |
48 | # block using service s6
49 | - block:
50 |
51 | - name: Ensure consul service is running (s6)
52 | s6:
53 | name="consul"
54 | state="started"
55 | service_src="{{ consul_s6_servicedir }}"
56 | service_dir="{{ consul_s6_servicedir }}"
57 |
58 | - name: Ensure haproxy-current service is Enable and Started (s6)
59 | s6:
60 | name="haproxy-current"
61 | state="started"
62 | service_src="/"
63 | service_dir="/"
64 | when: consul_consumer
65 |
66 | - name: Ensure haproxy-alt service is Disabled (s6)
67 | s6:
68 | name="haproxy-alt"
69 | state="stopped"
70 | service_src="/"
71 | service_dir="/"
72 | when: consul_consumer
73 |
74 | - name: Ensure consul-template service is Enable and Started (s6)
75 | s6:
76 | name="consul-template"
77 | state="started"
78 | service_src="{{ consul_s6_servicedir }}"
79 | service_dir="{{ consul_s6_servicedir }}"
80 | when: consul_consumer
81 |
82 | when: "consul_service == 's6'"
83 | # end block using service s6
84 |
--------------------------------------------------------------------------------
/templates/consul-agent.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "datacenter": "{{ consul_datacenter }}",
3 | "client_addr": "{{ consul_client_addr }}",
4 | "domain": "{{ consul_domain }}",
5 | "data_dir": "{{ consul_data_dir }}",
6 | "log_level": "{{ consul_log_level }}",
7 | "enable_syslog": {{ consul_log_syslog | to_json }},
8 | "rejoin_after_leave": {{ consul_rejoin_after_leave | to_json }},
9 | "datacenter": "{{ consul_datacenter }}",
10 | "ui": {{ consul_ui | to_json }},
11 | {# Either use consul_start_join or consul_retry_join #}
12 | {% if consul_retry_join is defined and consul_retry_join %}
13 | "retry_join": {{ consul_servers_list | to_json }},
14 | "retry_interval": "{{ consul_retry_interval }}",
15 | "retry_max": {{ consul_retry_max }},
16 | {% elif consul_start_join is defined and consul_start_join %}
17 | "start_join": {{ consul_start_join | to_json }},
18 | {% endif %}
19 | "enable_script_checks": true,
20 | {# Ports #}
21 | "ports": {
22 | "http": {{ consul_http_port }},
23 | "https": {{ consul_https_port }},
24 | "server": {{ consul_server_port_server }}
25 | },
26 |
27 | {# Telemetry #}
28 | {% if consul_telemetry is defined and consul_telemetry %}
29 | "telemetry": {
30 | {% if consul_statsd_address is defined and consul_statsd_address %}
31 | "statsd_address": {{ consul_statsd_address | to_json }},
32 | {% endif %}
33 | {% if consul_statsite_address is defined and consul_statsite_address %}
34 | "statsite_address": {{ consul_statsite_address | to_json }},
35 | {% endif %}
36 | {% if consul_statsite_prefix is defined and consul_statsite_prefix %}
37 | "statsite_prefix": {{ consul_statsite_prefix | to_json }},
38 | {% endif %}
39 | "disable_hostname": {{ consul_disable_hostname | to_json }}
40 | },
41 | {% endif %}
42 |
43 | {# LOGGING #}
44 | {% if consul_syslog is defined and consul_syslog %}
45 | "enable_syslog": true,
46 | {% endif %}
47 | {% if consul_syslog_facility is defined and consul_syslog_facility %}
48 | "syslog_facility": "{{ consul_syslog_facility}}",
49 | {% endif %}
50 |
51 | {% if consul_encrypt is defined %}
52 | "encrypt": "{{ consul_encrypt }}",
53 | {% endif %}
54 | {% if consul_server %}
55 | "server": {{ consul_server | to_json }},
56 | {% if consul_bootstrap_expect is defined %}
57 | "bootstrap_expect": {{ consul_bootstrap_expect | default(1) }},
58 | {% endif %}
59 | {% endif %}
60 | "leave_on_terminate": {{ consul_leave_on_terminate | to_json }}
61 | }
62 |
--------------------------------------------------------------------------------
/templates/consul-service.j2:
--------------------------------------------------------------------------------
1 | {% if item is mapping %}
2 |
3 | {% if 'weight' in consul_services[item.name] %}
4 | {% set weight_value = consul_services[item.name].pop('weight') %}
5 | {% if 'tags' in consul_services[item.name] %}
6 | {% set _ = consul_services[item.name]['tags'].append("WEIGHT:" ~ weight_value ) %}
7 | {% else %}
8 | {% set _ = consul_services[item.name].update({'tags': ['WEIGHT:' ~ weight_value]}) %}
9 | {% endif %}
10 | {% endif %}
11 |
12 | {% if 'local_port' in consul_services[item.name] %}
13 | {% set local_port = consul_services[item.name].pop('local_port') %}
14 | {% else %}
15 | {% set local_port = consul_services[item.name].get('port') %}
16 | {% endif %}
17 |
18 | {% if 'tags' in consul_services[item.name] %}
19 | {% set _ = consul_services[item.name]['tags'].append("local_port:" ~ local_port ) %}
20 | {% else %}
21 | {% set _ = consul_services[item.name].update({'tags': ['local_port:' ~ local_port]}) %}
22 | {% endif %}
23 |
24 | {% if 'tags' in consul_services[item.name] %}
25 | {% set _ = consul_services[item.name]['tags'].extend(item.add_tags) %}
26 | {% else %}
27 | {% set _ = consul_services[item.name].update({'tags': item.add_tags}) %}
28 | {% endif %}
29 |
30 | { {{ '' if consul_services[item.name].pop('haproxy', '') else '' }}
31 | "service": {{ consul_services[item.name] | to_nice_json }}
32 | }
33 |
34 | {% else %}
35 |
36 | {% if 'weight' in consul_services[item] %}
37 | {% set weight_value = consul_services[item].pop('weight') %}
38 | {% if 'tags' in consul_services[item] %}
39 | {% set _ = consul_services[item]['tags'].append("WEIGHT:" ~ weight_value ) %}
40 | {% else %}
41 | {% set _ = consul_services[item].update({'tags': ['WEIGHT:' ~ weight_value]}) %}
42 | {% endif %}
43 | {% endif %}
44 |
45 | {% if 'local_port' in consul_services[item] %}
46 | {% set local_port = consul_services[item].pop('local_port') %}
47 | {% else %}
48 | {% set local_port = consul_services[item].get('port') %}
49 | {% endif %}
50 |
51 | {% if 'tags' in consul_services[item] %}
52 | {% set _ = consul_services[item]['tags'].append("local_port:" ~ local_port ) %}
53 | {% else %}
54 | {% set _ = consul_services[item].update({'tags': ['local_port:' ~ local_port]}) %}
55 | {% endif %}
56 |
57 | { {{ '' if consul_services[item].pop('haproxy', '') else '' }}
58 | "service": {{ consul_services[item] | to_nice_json }}
59 | }
60 |
61 | {% endif %}
62 |
--------------------------------------------------------------------------------
/test/integration/tags/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 |
4 | describe 'hellofresh service' do
5 | describe command 'curl -s -v http://127.0.0.1:8500/v1/health/service/hellofresh' do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"hellofresh"' }
8 | its(:stdout) { should match /"Tags":\[.*"test".*/ }
9 | its(:stdout) { should match /"Tags":\[.*"v1\.1\.5".*/ }
10 | its(:stdout) { should match /"Tags":\[.*"WEIGHT:77".*/ }
11 | its(:stdout) { should match /"Tags":\[.*"env:staging".*/ }
12 | its(:stdout) { should contain '"Port":80' }
13 | its(:stdout) { should contain '"Status":"passing"' }
14 | end
15 | end
16 |
17 | describe 'superssh-different-name service' do
18 | describe command 'curl -s -v http://127.0.0.1:8500/v1/health/service/superssh-different-name' do
19 | its(:exit_status) { should eq 0 }
20 | its(:stdout) { should contain '"Service":"superssh-different-name"' }
21 | its(:stdout) { should match /"Tags":\[.*"env:staging".*/ }
22 | its(:stdout) { should match /"Tags":\[.*"test".*/ }
23 | its(:stdout) { should match /"Tags":\[.*"v2\.1\.2".*/ }
24 | its(:stdout) { should contain '"Port":22' }
25 | its(:stdout) { should contain '"Status":"passing"' }
26 | end
27 | end
28 |
29 | describe 'superdb' do
30 | describe command 'curl -s -v http://127.0.0.1:8500/v1/health/service/superdb' do
31 | its(:exit_status) { should eq 0 }
32 | its(:stdout) { should contain '"Service":"superdb"' }
33 | its(:stdout) { should match /"Tags":\[.*"env:staging".*/ }
34 | its(:stdout) { should match /"Tags":\[.*"test".*/ }
35 | its(:stdout) { should match /"Tags":\[.*"v3\.9\.2".*/ }
36 | its(:stdout) { should contain '"Port":2122' }
37 | its(:stdout) { should contain '"Status":"(warning|critical)"' }
38 | end
39 | end
40 |
41 | describe 'supertaggedapp' do
42 | describe command 'curl -s -v http://127.0.0.1:8500/v1/health/service/supertaggedapp' do
43 | its(:exit_status) { should eq 0 }
44 | its(:stdout) { should contain '"Service":"supertaggedapp"' }
45 | its(:stdout) { should match /"Tags":\[.*"env:staging".*/ }
46 | its(:stdout) { should match /"Tags":\[.*"test".*/ }
47 | its(:stdout) { should match /"Tags":\[.*"v0\.1\.1".*/ }
48 | its(:stdout) { should match /"Tags":\[.*"from:producer".*/ }
49 | its(:stdout) { should contain '"Port":9998' }
50 | its(:stdout) { should contain '"Status":"(warning|critical)"' }
51 | end
52 | end
53 |
--------------------------------------------------------------------------------
/templates/consul-init.d.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ### BEGIN INIT INFO
3 | # Provides: consul
4 | # Required-Start: $local_fs $network $remote_fs $syslog
5 | # Required-Stop: $local_fs $remote_fs $syslog
6 | # Default-Start: 2 3 4 5
7 | # Default-Stop: 0 1 6
8 | # Short-Description: consul agent
9 | # Description: This script starts and stops the consul service daemon
10 | ### END INIT INFO
11 |
12 | NAME="consul"
13 | DESC="consul-service"
14 |
15 | RUN_AS_USER="{{ consul_user }}"
16 | RUN_AS_GROUP="{{ consul_group }}"
17 |
18 | BASE_DIR="{{ consul_home_dir }}"
19 | GREP_NAME="consul agent"
20 |
21 | RUN_CMD="{{ consul_bin_dir }}/consul_agent_run.sh"
22 | #How many sec to wait before checking if proc is up
23 | SLEEP_FOR=1
24 | STOP_TIMEOUT=10
25 |
26 | IGNORE_PLAY_PID=1
27 |
28 | PROG_PID() {
29 | check_prog=$(ps aux| grep -e "$GREP_NAME" | grep -v grep | awk '{ print $2 }' )
30 | echo $check_prog
31 | }
32 |
33 | start() {
34 | PID=$(PROG_PID)
35 | if [ -n "$PID" ] ; then
36 | echo "$NAME is already running (PID: $PID)"
37 | else
38 | echo -n "Starting $NAME "
39 | [ $IGNORE_PLAY_PID == 1 ] && rm -f $BASE_DIR/RUNNING_PID
40 | #Start quite background uid and gid
41 | start-stop-daemon --start --background --name $NAME --chdir $BASE_DIR --chuid $RUN_AS_USER --group $RUN_AS_GROUP --exec $RUN_CMD
42 |
43 | [ "$?" -ne 0 ] && echo "[ FAILED ]" && exit 1
44 | let kwait=$SLEEP_FOR
45 | count=0;
46 | until [ $count -gt $kwait ]
47 | do
48 | echo -n "."
49 | sleep 1
50 | let count=$count+1;
51 | done
52 | PID=$(PROG_PID)
53 | if [ -n "$PID" ]; then
54 | echo "[ OK ]"
55 | else
56 | echo "[ FAILED ]"
57 | exit 1
58 | fi
59 | fi
60 | }
61 |
62 | stop() {
63 | PID=$(PROG_PID)
64 | if [ -n "$PID" ]; then
65 | echo -n "Stoping $NAME "
66 | kill -INT $PID
67 | timeout $STOP_TIMEOUT tail --pid=$PID -f /dev/null
68 | [ "$?" -ne 0 ] && echo "[ FAILED ]" && exit 1
69 | echo "[ OK ]"
70 | fi
71 | }
72 |
73 | status() {
74 | PID=$(PROG_PID)
75 | if [ -n "$PID" ]; then
76 | echo "$NAME is running with PID:$PID"
77 | else
78 | echo "$NAME is not running"
79 | fi
80 | }
81 |
82 | case "$1" in
83 | start)
84 | start ;;
85 | stop)
86 | stop ;;
87 | restart)
88 | stop
89 | start ;;
90 | status)
91 | status ;;
92 | *)
93 | echo "Usage: $0 {start|stop|restart|status}"
94 | exit 1 ;;
95 | esac
96 | exit 0
97 |
--------------------------------------------------------------------------------
/.kitchen.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | driver :
4 | name : docker
5 | socket : unix:///var/run/docker.sock
6 | use_sudo : false
7 | # forward:
8 | # - 3212:3212
9 |
10 | verifier :
11 | name : serverspec
12 | remote_exec : false
13 | default_pattern : true
14 |
15 | provisioner:
16 | name : ansible_push
17 | ansible_config : "test/ansible.cfg"
18 | chef_bootstrap_url : nil
19 | idempotency_test : True
20 | diff : True
21 | sudo : True
22 | verbose : "vvvv"
23 |
24 | platforms :
25 |
26 | - name : "alpine3.4-v2.3"
27 | driver_config :
28 | image : harningt/base-alpine-s6-overlay:3.4
29 | platform : alpine
30 | provision_command:
31 | - apk add python iproute2 net-tools socat
32 | - mkdir -p /opt # needs to be in ansible
33 |
34 | provisioner :
35 | ansible_playbook_bin : "$(avm path v2.3)ansible-playbook"
36 | extra_vars : {consul_service: s6, consul_s6_servicedir: /var/run/s6/services}
37 |
38 | - name : "ubuntu1404-v2.3"
39 | driver_config :
40 | image : ubuntu:14.04
41 | platform : ubuntu
42 | provision_command:
43 | - test -e /usr/bin/socat || (apt-get -y update && apt-get install -y iproute2 net-tools netcat socat)
44 |
45 | provisioner :
46 | ansible_playbook_bin : "$(avm path v2.3)ansible-playbook"
47 |
48 | - name : "ubuntu1604-v2.3"
49 | driver_config :
50 | image : ubuntu:16.04
51 | platform : ubuntu
52 | provision_command:
53 | - test -e /usr/bin/python || (apt-get -y update && apt-get install -y python-minimal)
54 | - test -e /usr/bin/socat || (apt-get -y update && apt-get install -y iproute2 net-tools netcat socat)
55 | provisioner :
56 | ansible_playbook_bin : "$(avm path v2.3)ansible-playbook"
57 |
58 | suites :
59 | - name : basic-server
60 | provisioner :
61 | playbook : "test/integration/basic-server/server.yml"
62 |
63 | - name : basic-agent
64 | provisioner :
65 | playbook : "test/integration/basic-agent/agent.yml"
66 |
67 | - name : tags
68 | provisioner :
69 | playbook : "test/integration/tags/tags.yml"
70 |
--------------------------------------------------------------------------------
/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Place holder to dispatch multi handler
4 | - name: restart consul service
5 | debug:
6 | msg="Notifier restart consul service"
7 | changed_when: true
8 | notify:
9 | - enable consul service (service)
10 | - restart consul service (service)
11 | - Reload s6 service
12 | - restart consul service (s6)
13 |
14 | - name: enable consul service (service)
15 | service:
16 | name="consul"
17 | enabled="yes"
18 | when: "consul_service == 'service'"
19 |
20 | - name: restart consul service (service)
21 | service:
22 | name="consul"
23 | state="restarted"
24 | when: "consul_service == 'service' and consul_start_service"
25 |
26 | - name: Reload s6 service
27 | shell: s6-svscanctl -a {{ consul_s6_servicedir }}
28 | when: "consul_service == 's6'"
29 |
30 | - name: restart consul service (s6)
31 | s6:
32 | name="consul"
33 | state="restarted"
34 | service_src="{{ consul_s6_servicedir }}"
35 | service_dir="{{ consul_s6_servicedir }}"
36 | when: "consul_service == 's6'"
37 |
38 | - name: reload consul service
39 | debug:
40 | msg="Notifier reload consul service"
41 | changed_when: true
42 | notify:
43 | - reload consul service (service)
44 | - Reload s6 service
45 | - restart consul service (s6)
46 |
47 | #TODO: At the moment restart and reload do same thing until we fix kill -HUP option
48 | - name: reload consul service (service)
49 | service:
50 | name="consul"
51 | state="restarted"
52 | when: "consul_service == 'service' and consul_start_service"
53 |
54 | - name: restart consul-template service
55 | debug:
56 | msg="Notifier restart consul-template service"
57 | changed_when: true
58 | notify:
59 | - restart consul-template service (service)
60 | - restart consul-template service (s6)
61 |
62 | - name: restart consul-template service (service)
63 | service:
64 | name="consul-template"
65 | state="restarted"
66 | enabled="yes"
67 | when: "consul_service == 'service'"
68 |
69 | - name: restart consul-template service (s6)
70 | s6:
71 | name="consul-template"
72 | state="restarted"
73 | service_src="{{ consul_s6_servicedir }}"
74 | service_dir="{{ consul_s6_servicedir }}"
75 | when: "consul_service == 's6'"
76 |
77 | - name: reload haproxy service
78 | debug:
79 | msg="Notifier reloaded haproxy service"
80 | changed_when: true
81 | notify:
82 | - reload haproxy service (service)
83 | - Reload s6 service
84 | - reload haproxy service (s6)
85 |
86 | - name: reload haproxy service (service)
87 | service:
88 | name="haproxy"
89 | state="reloaded"
90 | enabled="yes"
91 | when: "consul_service == 'service'"
92 |
93 | - name: reload haproxy service (s6)
94 | shell: "{{ consul_bin_dir }}/haproxy-reload"
95 | when: "consul_service == 's6'"
96 |
97 | - name: reload consul service config
98 | command: pkill -1 consul
99 |
--------------------------------------------------------------------------------
/templates/consul-template-init.d.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ### BEGIN INIT INFO
3 | # Provides: consul-template
4 | # Required-Start: $local_fs $network $remote_fs $syslog
5 | # Required-Stop: $local_fs $remote_fs $syslog
6 | # Default-Start: 2 3 4 5
7 | # Default-Stop: 0 1 6
8 | # Short-Description: consul template
9 | # Description: This script starts and stops the consul-template service daemon
10 | ### END INIT INFO
11 |
12 | NAME="consul-template"
13 | DESC="consul-template-service"
14 |
15 | RUN_AS_USER="{{ consul_user }}"
16 | RUN_AS_GROUP="{{ consul_group }}"
17 |
18 | BASE_DIR="{{ consul_home_dir }}"
19 | GREP_NAME="bin/consul-template"
20 |
21 | RUN_CMD="{{ consul_bin_dir }}/consul-template-run.sh"
22 | #
23 | STOPCMD="kill -9"
24 | #How many sec to wait before checking if proc is up
25 | SLEEP_FOR=1
26 |
27 | IGNORE_PLAY_PID=1
28 |
29 | PROG_PID() {
30 | check_prog=$(ps aux| grep -e "$GREP_NAME" | grep -v grep | awk '{ print $2 }' )
31 | echo $check_prog
32 | }
33 |
34 | start() {
35 | PID=$(PROG_PID)
36 | if [ -n "$PID" ] ; then
37 | echo "$NAME is already running (PID: $PID)"
38 | else
39 | echo -n "Starting $NAME "
40 | [ $IGNORE_PLAY_PID == 1 ] && rm -f $BASE_DIR/RUNNING_PID
41 | #Start quite background uid and gid
42 | start-stop-daemon --start --background --name $NAME --chdir $BASE_DIR --chuid $RUN_AS_USER --group $RUN_AS_GROUP --exec $RUN_CMD
43 |
44 | [ "$?" -ne 0 ] && echo "[ FAILED ]" && exit 1
45 | let kwait=$SLEEP_FOR
46 | count=0;
47 | until [ $count -gt $kwait ]
48 | do
49 | echo -n "."
50 | sleep 1
51 | let count=$count+1;
52 | done
53 | PID=$(PROG_PID)
54 | if [ -n "$PID" ]; then
55 | echo "[ OK ]"
56 | else
57 | echo "[ FAILED ]"
58 | exit 1
59 | fi
60 | fi
61 | }
62 |
63 | stop() {
64 | PID=$(PROG_PID)
65 | if [ -n "$PID" ]; then
66 | echo -n "Stoping $NAME "
67 | kill -9 $PID
68 | [ "$?" -ne 0 ] && echo "[ FAILED ]" && exit 1
69 |
70 | let kwait=$SLEEP_FOR
71 | count=0;
72 | until [ $count -gt $kwait ]
73 | do
74 | echo -n ".";
75 | sleep 1
76 | let count=$count+1;
77 | done
78 | PID=$(PROG_PID)
79 | if [ -n "$PID" ]; then
80 | echo "[ FAILED ]"
81 | exit 1
82 | else
83 | echo "[ OK ]"
84 | fi
85 | else
86 | echo "$NAME not running."
87 | fi
88 | }
89 |
90 | status() {
91 | PID=$(PROG_PID)
92 | if [ -n "$PID" ]; then
93 | echo "$NAME is running with PID:$PID"
94 | else
95 | echo "$NAME is not running"
96 | fi
97 | }
98 |
99 | case "$1" in
100 | start)
101 | start ;;
102 | stop)
103 | stop ;;
104 | restart)
105 | stop
106 | start ;;
107 | status)
108 | status ;;
109 | *)
110 | echo "Usage: $0 {start|stop|restart|status}"
111 | exit 1 ;;
112 | esac
113 | exit 0
114 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent1/agent.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: agent
4 | hosts: all
5 | become: True
6 | vars:
7 | consul_server : True
8 | consul_bootstrap_expect : 1
9 | consul_ui : false
10 |
11 | consul_services :
12 | # External service i.e. hellofresh
13 | hellofresh :
14 | name : "hellofresh"
15 | port : 80
16 | address : "hellofresh.com"
17 | check :
18 | script : "curl http://www.hellofresh.com > /dev/null"
19 | interval : "120s"
20 | haproxy :
21 | server_options : "check inter 120s fastinter 5s downinter 8s rise 3 fall 2"
22 | service_mode : "http"
23 | # A local service superssh that uses localport
24 | superssh :
25 | name : "superssh-different-name"
26 | tags :
27 | - "test"
28 | port : 22
29 | local_port : 2222
30 | check :
31 | script : "netstat -ltnp | grep ':22 ' > /dev/null 2>&1"
32 | interval : "60s"
33 | haproxy :
34 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
35 | service_mode : "tcp"
36 | # A local service superdb that is failing
37 | superdb :
38 | name : "superdb"
39 | tags :
40 | - "userdb"
41 | - "v1.2"
42 | port : 2122 # failing port :)
43 | check :
44 | script : "netstat -ltnp | grep ':2123 ' > /dev/null 2>&1"
45 | interval : "60s"
46 | haproxy :
47 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
48 | service_mode : "tcp"
49 | # A local service superdb that is failing
50 | superapp :
51 | name : "superapp"
52 | tags :
53 | - "v1"
54 | port : 9999
55 | check :
56 | script : "curl localhost:9999 > /dev/null 2>&1"
57 | interval : "60s"
58 | haproxy :
59 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
60 | service_mode : "tcp"
61 |
62 | consul_producer : True
63 | consul_producer_services : [ 'superssh', 'hellofresh', "superdb" ]
64 |
65 | consul_consumer : True
66 | consul_consumer_services : [ 'superdb','superssh', "hellofresh", "superapp" ]
67 |
68 | pre_tasks:
69 | - name: Update apt cache
70 | apt:
71 | update_cache=yes
72 | cache_valid_time=360
73 |
74 | roles :
75 | - ansible-consul
76 |
77 | post_tasks:
78 | - name: Install socat
79 | apt:
80 | name="socat"
81 |
82 | - name: Pause till every thing is update_cache
83 | pause:
84 | prompt="Make sure that helth checks pass and our services are part of the game"
85 | seconds=20
--------------------------------------------------------------------------------
/test/integration/cluster-agent2/agent.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: agent
4 | hosts: all
5 | become: True
6 | vars:
7 | consul_server : True
8 | consul_bootstrap_expect : 1
9 | consul_ui : false
10 |
11 | consul_services :
12 | # External service i.e. hellofresh
13 | hellofresh :
14 | name : "hellofresh"
15 | port : 80
16 | address : "hellofresh.com"
17 | check :
18 | script : "curl http://www.hellofresh.com > /dev/null"
19 | interval : "120s"
20 | haproxy :
21 | server_options : "check inter 120s fastinter 5s downinter 8s rise 3 fall 2"
22 | service_mode : "http"
23 | # A local service superssh that uses localport
24 | superssh :
25 | name : "superssh-different-name"
26 | tags :
27 | - "test"
28 | port : 22
29 | local_port : 2222
30 | check :
31 | script : "netstat -ltnp | grep ':22 ' > /dev/null 2>&1"
32 | interval : "60s"
33 | haproxy :
34 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
35 | service_mode : "tcp"
36 | # A local service superdb that is failing
37 | superdb :
38 | name : "superdb"
39 | tags :
40 | - "userdb"
41 | - "v1.2"
42 | port : 2122 # failing port :)
43 | check :
44 | script : "netstat -ltnp | grep ':2123 ' > /dev/null 2>&1"
45 | interval : "60s"
46 | haproxy :
47 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
48 | service_mode : "tcp"
49 | # A local service superdb that is failing
50 | superapp :
51 | name : "superapp"
52 | tags :
53 | - "v1"
54 | port : 9999
55 | check :
56 | script : "curl localhost:9999 > /dev/null 2>&1"
57 | interval : "60s"
58 | haproxy :
59 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
60 | service_mode : "tcp"
61 |
62 | consul_producer : True
63 | consul_producer_services : [ 'superssh', 'hellofresh', "superdb" ]
64 |
65 | consul_consumer : True
66 | consul_consumer_services : [ 'superdb','superssh', "hellofresh", "superapp" ]
67 |
68 | pre_tasks:
69 | - name: Update apt cache
70 | apt:
71 | update_cache=yes
72 | cache_valid_time=360
73 |
74 | roles :
75 | - ansible-consul
76 |
77 | post_tasks:
78 | - name: Install socat
79 | apt:
80 | name="socat"
81 |
82 | - name: Pause till every thing is update_cache
83 | pause:
84 | prompt="Make sure that helth checks pass and our services are part of the game"
85 | seconds=20
--------------------------------------------------------------------------------
/test/integration/cluster-agent3/agent.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: agent
4 | hosts: all
5 | become: True
6 | vars:
7 | consul_server : True
8 | consul_bootstrap_expect : 1
9 | consul_ui : false
10 |
11 | consul_services :
12 | # External service i.e. hellofresh
13 | hellofresh :
14 | name : "hellofresh"
15 | port : 80
16 | address : "hellofresh.com"
17 | check :
18 | script : "curl http://www.hellofresh.com > /dev/null"
19 | interval : "120s"
20 | haproxy :
21 | server_options : "check inter 120s fastinter 5s downinter 8s rise 3 fall 2"
22 | service_mode : "http"
23 | # A local service superssh that uses localport
24 | superssh :
25 | name : "superssh-different-name"
26 | tags :
27 | - "test"
28 | port : 22
29 | local_port : 2222
30 | check :
31 | script : "netstat -ltnp | grep ':22 ' > /dev/null 2>&1"
32 | interval : "60s"
33 | haproxy :
34 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
35 | service_mode : "tcp"
36 | # A local service superdb that is failing
37 | superdb :
38 | name : "superdb"
39 | tags :
40 | - "userdb"
41 | - "v1.2"
42 | port : 2122 # failing port :)
43 | check :
44 | script : "netstat -ltnp | grep ':2123 ' > /dev/null 2>&1"
45 | interval : "60s"
46 | haproxy :
47 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
48 | service_mode : "tcp"
49 | # A local service superdb that is failing
50 | superapp :
51 | name : "superapp"
52 | tags :
53 | - "v1"
54 | port : 9999
55 | check :
56 | script : "curl localhost:9999 > /dev/null 2>&1"
57 | interval : "60s"
58 | haproxy :
59 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
60 | service_mode : "tcp"
61 |
62 | consul_producer : True
63 | consul_producer_services : [ 'superssh', 'hellofresh', "superdb" ]
64 |
65 | consul_consumer : True
66 | consul_consumer_services : [ 'superdb','superssh', "hellofresh", "superapp" ]
67 |
68 | pre_tasks:
69 | - name: Update apt cache
70 | apt:
71 | update_cache=yes
72 | cache_valid_time=360
73 |
74 | roles :
75 | - ansible-consul
76 |
77 | post_tasks:
78 | - name: Install socat
79 | apt:
80 | name="socat"
81 |
82 | - name: Pause till every thing is update_cache
83 | pause:
84 | prompt="Make sure that helth checks pass and our services are part of the game"
85 | seconds=20
--------------------------------------------------------------------------------
/test/integration/basic-agent/agent_vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | consul_server : True
3 | consul_bootstrap_expect : 1
4 |
5 | consul_ui : false
6 | consul_haproxy_stats_add : "0.0.0.0"
7 | environment_name : "testing"
8 |
9 | consul_services :
10 | # External service i.e. hellofresh
11 | hellofresh :
12 | name : "hellofresh"
13 | port : 80
14 | local_port : 8080
15 | address : "127.0.0.1"
16 | tags :
17 | - "env:testing"
18 | check :
19 | script : "curl http://127.0.0.1:80 > /dev/null"
20 | interval : "10s"
21 | haproxy :
22 | server_options : "check inter 10s fastinter 5s downinter 8s rise 3 fall 2"
23 | service_mode : "http"
24 | # A local service superssh that uses localport
25 | superssh :
26 | name : "superssh-testing"
27 | tags :
28 | - "test"
29 | - "env:testing"
30 | weight : 77
31 | port : 22
32 | local_port : 2222
33 | check :
34 | script : "netstat -ltnp | grep ':22 ' > /dev/null 2>&1"
35 | interval : "60s"
36 | haproxy :
37 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
38 | service_mode : "tcp"
39 | # A local service superdb that is failing
40 | superdb :
41 | name : "superdb"
42 | tags :
43 | - "userdb"
44 | - "v1.2"
45 | - "env:testing"
46 | port : 2122 # failing port :)
47 | check :
48 | script : "netstat -ltnp | grep ':2123 ' > /dev/null 2>&1"
49 | interval : "60s"
50 | haproxy :
51 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
52 | service_mode : "tcp"
53 | # A local service superapp that is failing (not installed)
54 | superapp :
55 | name : "superapp"
56 | tags :
57 | - "v1"
58 | - "env:testing"
59 | port : 9999
60 | check :
61 | script : "curl localhost:9999 > /dev/null 2>&1"
62 | interval : "60s"
63 | haproxy :
64 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
65 | service_mode : "tcp"
66 |
67 | consul_producer : True
68 | consul_producer_services : [ 'superssh', 'hellofresh', "superdb" ]
69 |
70 | consul_consumer : True
71 | consul_consumer_services :
72 | - name: 'superdb'
73 | - 'superssh'
74 | - "hellofresh"
75 | - name: "superapp"
76 | tags_contains: "v1.2"
77 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Request for contributions
2 |
3 | Please contribute to this repository if any of the following is true:
4 | - You have expertise in community development, communication, or education
5 | - You want open source communities to be more collaborative and inclusive
6 | - You want to help lower the burden to first time contributors
7 |
8 | # How to contribute
9 |
10 | Prerequisites:
11 |
12 | - familiarity with [GitHub PRs](https://help.github.com/articles/using-pull-requests) (pull requests) and issues
13 | - knowledge of Markdown for editing `.md` documents
14 |
15 | In particular, this community seeks the following types of contributions:
16 |
17 | - ideas: participate in an Issues thread or start your own to have your voice
18 | heard
19 | - resources: submit a PR to add to [docs README.md](README.md) with links to related content
20 | - outline sections: help us ensure that this repository is comprehensive. if
21 | there is a topic that is overlooked, please add it, even if it is just a stub
22 | in the form of a header and single sentence. Initially, most things fall into
23 | this category
24 | - write: contribute your expertise in an area by helping us expand the included
25 | content
26 | - copy editing: fix typos, clarify language, and generally improve the quality
27 | of the content
28 | - formatting: help keep content easy to read with consistent formatting
29 | - code: Fix issues or contribute new features to this or any related projects
30 |
31 | # Conduct
32 |
33 | We are committed to providing a friendly, safe and welcoming environment for
34 | all, regardless of gender, sexual orientation, disability, ethnicity, religion,
35 | or similar personal characteristic.
36 |
37 | Please be kind and courteous. There's no need to be mean or rude.
38 | Respect that people have differences of opinion and that every design or
39 | implementation choice carries a trade-off and numerous costs. There is seldom
40 | a right answer, merely an optimal answer given a set of values and
41 | circumstances.
42 |
43 | Please keep unstructured critique to a minimum. If you have solid ideas you
44 | want to experiment with, make a fork and see how it works.
45 |
46 | We will exclude you from interaction if you insult, demean or harass anyone.
47 | That is not welcome behavior. We interpret the term "harassment" as
48 | including the definition in the
49 | [Citizen Code of Conduct](http://citizencodeofconduct.org/);
50 | if you have any lack of clarity about what might be included in that concept,
51 | please read their definition. In particular, we don't tolerate behavior that
52 | excludes people in socially marginalized groups.
53 |
54 | Private harassment is also unacceptable. No matter who you are, if you feel
55 | you have been or are being harassed or made uncomfortable by a community
56 | member, please contact one of the
57 | [hellofresh](https://github.com/orgs/hellofresh/people) core team
58 | immediately. Whether you're a regular contributor or a newcomer, we care about
59 | making this community a safe place for you and we've got your back.
60 |
61 | Likewise any spamming, trolling, flaming, baiting or other attention-stealing
62 | behavior is not welcome.
63 |
64 | # Communication
65 |
66 | GitHub issues are the primary way for communicating about specific proposed
67 | changes to this project.
68 |
69 | In both contexts, please follow the conduct guidelines above. Language issues
70 | are often contentious and we'd like to keep discussion brief, civil and focused
71 | on what we're actually doing, not wandering off into too much imaginary stuff.
--------------------------------------------------------------------------------
/demo/templates/superapp.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ### BEGIN INIT INFO
3 | # Provides: superapp
4 | # Required-Start: $local_fs $remote_fs $network
5 | # Required-Stop: $local_fs $remote_fs $network
6 | # Default-Start: 2 3 4 5
7 | # Default-Stop: 0 1 6
8 | # Short-Description: Generic Program
9 | # Description: Generic Program is a generic program to do generic things with
10 | ### END INIT INFO
11 |
12 |
13 | # Documentation available at
14 | # http://refspecs.linuxfoundation.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptfunc.html
15 | # Debian provides some extra functions though
16 | . /lib/lsb/init-functions
17 |
18 |
19 | DAEMON_NAME="superapp"
20 | DAEMON_USER="{{ superapp_user }}"
21 | DAEMON_PATH="{{ superapp_bin }}"
22 | DAEMON_OPTS="{{ superapp_opts }}"
23 | DAEMON_PWD="{{ superapp_pwd }}"
24 | DAEMON_DESC=$(get_lsb_header_val $0 "Short-Description")
25 | DAEMON_PID="/var/run/${DAEMON_NAME}.pid"
26 | DAEMON_NICE=0
27 | DAEMON_LOG='/var/log/generic-prog'
28 |
29 | [ -r "/etc/default/${DAEMON_NAME}" ] && . "/etc/default/${DAEMON_NAME}"
30 |
31 | do_start() {
32 | local result
33 |
34 | pidofproc -p "${DAEMON_PID}" "${DAEMON_PATH}" > /dev/null
35 | if [ $? -eq 0 ]; then
36 | log_warning_msg "${DAEMON_NAME} is already started"
37 | result=0
38 | else
39 | log_daemon_msg "Starting ${DAEMON_DESC}" "${DAEMON_NAME}"
40 | touch "${DAEMON_LOG}"
41 | chown $DAEMON_USER "${DAEMON_LOG}"
42 | chmod u+rw "${DAEMON_LOG}"
43 | if [ -z "${DAEMON_USER}" ]; then
44 | start-stop-daemon --start --quiet --oknodo --background \
45 | --nicelevel $DAEMON_NICE \
46 | --chdir "${DAEMON_PWD}" \
47 | --pidfile "${DAEMON_PID}" --make-pidfile \
48 | --exec "${DAEMON_PATH}" -- $DAEMON_OPTS
49 | result=$?
50 | else
51 | start-stop-daemon --start --quiet --oknodo --background \
52 | --nicelevel $DAEMON_NICE \
53 | --chdir "${DAEMON_PWD}" \
54 | --pidfile "${DAEMON_PID}" --make-pidfile \
55 | --chuid "${DAEMON_USER}" \
56 | --exec "${DAEMON_PATH}" -- $DAEMON_OPTS
57 | result=$?
58 | fi
59 | log_end_msg $result
60 | fi
61 | return $result
62 | }
63 |
64 | do_stop() {
65 | local result
66 |
67 | pidofproc -p "${DAEMON_PID}" "${DAEMON_PATH}" > /dev/null
68 | if [ $? -ne 0 ]; then
69 | log_warning_msg "${DAEMON_NAME} is not started"
70 | result=0
71 | else
72 | log_daemon_msg "Stopping ${DAEMON_DESC}" "${DAEMON_NAME}"
73 | killproc -p "${DAEMON_PID}" "${DAEMON_PATH}"
74 | result=$?
75 | log_end_msg $result
76 | rm "${DAEMON_PID}"
77 | fi
78 | return $result
79 | }
80 |
81 | do_restart() {
82 | local result
83 | do_stop
84 | result=$?
85 | if [ $result = 0 ]; then
86 | do_start
87 | result=$?
88 | fi
89 | return $result
90 | }
91 |
92 | do_status() {
93 | local result
94 | status_of_proc -p "${DAEMON_PID}" "${DAEMON_PATH}" "${DAEMON_NAME}"
95 | result=$?
96 | return $result
97 | }
98 |
99 | do_usage() {
100 | echo $"Usage: $0 {start | stop | restart | status}"
101 | exit 1
102 | }
103 |
104 | case "$1" in
105 | start) do_start; exit $? ;;
106 | stop) do_stop; exit $? ;;
107 | restart) do_restart; exit $? ;;
108 | status) do_status; exit $? ;;
109 | *) do_usage; exit 1 ;;
110 | esac
--------------------------------------------------------------------------------
/tasks/install/consul-agent.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install | consul agent | Check agent binary
4 | stat:
5 | path="{{ consul_bin_dir }}/consul-{{consul_agent_version}}"
6 | register: consul_agent_version_binary
7 |
8 | - name: install | consul agent | Ensure download directory exists
9 | file:
10 | path="{{ consul_tmp_dir }}/consul-{{ consul_agent_version }}"
11 | state=directory
12 | mode=0755
13 |
14 | - name: install | consul agent | Download and unpack consul archive (if needed)
15 | unarchive:
16 | src="{{ consul_agent_download_url }}"
17 | dest="{{ consul_tmp_dir }}/consul-{{ consul_agent_version }}/"
18 | copy=false
19 | owner="{{ consul_user }}"
20 | register: agent_download
21 | ignore_errors: true
22 | when: not consul_agent_version_binary.stat.exists
23 |
24 | - name: install | consul agent | Download and unpack consul archive (if needed with curl)
25 | shell: |
26 | curl {{ consul_agent_download_url }} -o {{ consul_tmp_dir }}/consul-{{ consul_agent_version }}/consul.zip
27 | unzip {{ consul_tmp_dir }}/consul-{{ consul_agent_version }}/consul.zip -d {{ consul_tmp_dir }}/consul-{{ consul_agent_version }}/
28 | when: agent_download | failed
29 |
30 | - name: install | consul agent | link binary (if needed)
31 | command: mv {{ consul_tmp_dir }}/consul-{{ consul_agent_version }}/consul {{ consul_bin_dir }}/consul-{{consul_agent_version}}
32 | when: not consul_agent_version_binary.stat.exists
33 | notify:
34 | - restart consul service
35 |
36 | - name: install | consul agent | Check target file
37 | stat:
38 | path="{{ consul_bin_dir }}/consul"
39 | register: consul_agent_target_link
40 |
41 | - name: install | consul agent | Remove target file (if not symlink)
42 | file:
43 | path="{{ consul_bin_dir }}/consul"
44 | state="absent"
45 | when: not consul_agent_target_link.stat.islnk is defined or not consul_agent_target_link.stat.islnk
46 |
47 | - name: install | consul agent | Consul link binary
48 | file:
49 | src="{{ consul_bin_dir }}/consul-{{consul_agent_version}}"
50 | dest="{{ consul_bin_dir }}/consul"
51 | state="link"
52 | notify:
53 | - restart consul service
54 |
55 | - name: install | consul agent | Ensure consul configuration is deployed
56 | template:
57 | src="consul-agent.json.j2"
58 | dest="{{ consul_config_agent_file }}"
59 | owner="{{ consul_user }}"
60 | group="{{ consul_group }}"
61 | mode="0640"
62 | notify:
63 | - restart consul service
64 |
65 | - name: install | consul agent | Ensure consul run script is deployed
66 | template:
67 | src="consul-agent-run.sh.j2"
68 | dest="{{ consul_bin_dir }}/consul_agent_run.sh"
69 | owner="{{ consul_user }}"
70 | group="{{ consul_group }}"
71 | mode="0755"
72 | notify:
73 | - restart consul service
74 |
75 | - name: install | consul agent | Ensure consul init.d script is deployed
76 | template:
77 | src="consul-init.d.sh.j2"
78 | dest="/etc/init.d/consul"
79 | owner="{{ consul_user }}"
80 | group="{{ consul_group }}"
81 | mode="0755"
82 | when: "consul_service == 'service'"
83 | notify:
84 | - restart consul service
85 |
86 | - name: install | consul agent | Ensure consul s6 service directory exisit
87 | file:
88 | path="{{ consul_s6_servicedir }}/consul"
89 | state="directory"
90 | mode=0755
91 | when: "consul_service == 's6'"
92 |
93 | - name: install | consul agent | Ensure consul s6 script is deployed
94 | template:
95 | src="consul-s6.j2"
96 | dest="{{ consul_s6_servicedir }}/consul/run"
97 | owner="{{ consul_user }}"
98 | group="{{ consul_group }}"
99 | mode="0755"
100 | when: "consul_service == 's6'"
101 | notify:
102 | - restart consul service
103 |
--------------------------------------------------------------------------------
/test/integration/tags/tags_vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | consul_server : True
3 | consul_bootstrap_expect : 1
4 | consul_ui : true # disable
5 | consul_client_addr : "0.0.0.0"
6 | fallback_environment : "staging"
7 | consul_services :
8 | #curl -s -v http://127.0.0.1:8500/v1/health/service/hellofresh
9 | # External service i.e. hellofresh
10 | hellofresh :
11 | name : "hellofresh"
12 | tags :
13 | - "test"
14 | - "v1.1.5"
15 | - "env:staging"
16 | port : 80
17 | address : "hellofresh.com"
18 | weight : "77"
19 | check :
20 | script : "curl https://www.hellofresh.com > /dev/null"
21 | interval : "120s"
22 | haproxy :
23 | server_options : "check inter 120s fastinter 5s downinter 8s rise 3 fall 2"
24 | service_mode : "http"
25 | # A local service superssh that uses localport
26 | superssh :
27 | name : "superssh-different-name"
28 | tags :
29 | - "test"
30 | - "v2.1.2"
31 | - "env:staging"
32 | port : 22
33 | local_port : 2222
34 | check :
35 | script : "netstat -ltnp | grep ':22 ' > /dev/null 2>&1"
36 | interval : "60s"
37 | haproxy :
38 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
39 | service_mode : "tcp"
40 | # A local service superdb that is failing
41 | superdb :
42 | name : "superdb"
43 | tags :
44 | - "test"
45 | - "v3.9.2"
46 | - "env:staging"
47 | port : 2122 # failing port :)
48 | check :
49 | script : "netstat -ltnp | grep ':2123 ' > /dev/null 2>&1"
50 | interval : "60s"
51 | haproxy :
52 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
53 | service_mode : "tcp"
54 | # A local service superdb that is failing
55 | superapp :
56 | name : "superapp"
57 | tags :
58 | - "test"
59 | - "v0.1.1"
60 | - "env:staging"
61 | port : 9999
62 | check :
63 | script : "curl localhost:9999 > /dev/null 2>&1"
64 | interval : "60s"
65 | haproxy :
66 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
67 | service_mode : "tcp"
68 | # A local service supertaggedapp that uses producer tags
69 | supertaggedapp :
70 | name : "supertaggedapp"
71 | tags :
72 | - "test"
73 | - "v0.1.1"
74 | - "env:staging"
75 | port : 9998
76 | check :
77 | script : "curl localhost:9998 > /dev/null 2>&1"
78 | interval : "60s"
79 | haproxy :
80 | server_options : "check inter 60s fastinter 5s downinter 8s rise 3 fall 2"
81 | service_mode : "tcp"
82 |
83 |
84 | consul_producer : True
85 | consul_producer_services :
86 | - 'superssh'
87 | - 'hellofresh'
88 | - 'superdb'
89 | - name: supertaggedapp
90 | add_tags: ['from:producer']
91 |
92 | consul_consumer : True
93 | consul_consumer_services :
94 | - 'superdb'
95 | - name: 'superssh'
96 | - name: 'supertaggedapp'
97 | - name: "hellofresh"
98 | tags_contains: "test"
99 | tag_regex: "v1.1.*"
100 |
--------------------------------------------------------------------------------
/tasks/install/haproxy.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Ubuntu Block
4 | - block:
5 | - name: install | haproxy | Add the HAproxy repository (ubuntu)
6 | apt_repository:
7 | repo="{{ consul_haproxy_ppa_url }}"
8 | update_cache="yes"
9 | when: "{{ consul_haproxy_ppa_install }} == True"
10 |
11 | - name: install | haproxy | Install the HAProxy packages (ubuntu)
12 | apt:
13 | name="haproxy"
14 | register: haproxy_install_ubuntu
15 |
16 | - name: install | haproxy | Enable haproxy initd
17 | replace:
18 | dest='/etc/default/haproxy'
19 | regexp='ENABLED=0'
20 | replace='ENABLED=1'
21 |
22 | when: "{{ ansible_distribution == 'Ubuntu' }}"
23 | # End of Ubuntu Block
24 |
25 | # Alpine Block
26 | - block:
27 | - name: install | haproxy | Install the HAProxy packages (alpine)
28 | apk:
29 | name="haproxy"
30 | update_cache="yes"
31 | register: haproxy_install_alpine
32 |
33 | when: "{{ ansible_distribution == 'Alpine' }}"
34 | # End of Apline Block
35 |
36 |
37 | # This is used to avoid haproxy failing to load if consul-template is delayed
38 | # This will ensure haproxy starts till consul template kicks in and changes the config and reload haproxy
39 | - name: install | haproxy | Deploy a valid haproxy.cfg (if haproxy installation just changed)
40 | template:
41 | src="haproxy-initial.cfg.j2"
42 | dest="/etc/haproxy/haproxy.cfg"
43 | owner="{{ consul_user }}"
44 | group="{{ consul_group }}"
45 | mode="0640"
46 | when: haproxy_install_ubuntu | changed or haproxy_install_alpine | changed
47 |
48 | - name: install | haproxy | Make sure /var/haproxy/ exists
49 | file:
50 | state="directory"
51 | path="{{ item }}"
52 | owner="root"
53 | group="root"
54 | with_items:
55 | - /var/haproxy
56 |
57 | - name: install | haproxy | Ensure haproxy config file is writable by consul group
58 | file:
59 | path="/etc/haproxy/haproxy.cfg"
60 | owner="{{ consul_user }}"
61 | group="{{ consul_group }}"
62 | mode="0644"
63 |
64 | - name: install | haproxy | Ensure haproxy config dir is writable by consul group
65 | file:
66 | path="/etc/haproxy/"
67 | owner="root"
68 | group="{{ consul_group }}"
69 | mode="0775"
70 |
71 | # s6 service Block
72 | - block:
73 | - name: install | haproxy | Ensure haproxy s6 service directory exisit
74 | file:
75 | path="{{ item }}"
76 | state="directory"
77 | owner="{{ consul_haproxy_user }}"
78 | mode=0755
79 | with_items:
80 | - "{{ consul_s6_servicedir }}/haproxy1"
81 | - "{{ consul_s6_servicedir }}/haproxy2"
82 | - "{{ consul_s6_servicedir }}/haproxy1/log"
83 | - "{{ consul_s6_servicedir }}/haproxy2/log"
84 |
85 | - name: install | haproxy | Ensure haproxy s6 logs directory exisit
86 | file:
87 | path="{{ item }}"
88 | state="directory"
89 | owner="nobody"
90 | group="nobody"
91 | mode=0755
92 | with_items:
93 | - "/var/log/haproxy1/"
94 | - "/var/log/haproxy2/"
95 |
96 | - name: install | haproxy | Ensure haproxy s6 script is deployed
97 | template:
98 | src="{{ item.src }}"
99 | dest="{{ item.dest }}"
100 | owner="root"
101 | group="root"
102 | mode="0755"
103 | with_items:
104 | - { src: "haproxys6.j2", dest: "{{ consul_s6_servicedir }}/haproxy1/run" }
105 | - { src: "haproxys6.j2", dest: "{{ consul_s6_servicedir }}/haproxy2/run" }
106 | - { src: "s6-log.j2", dest: "{{ consul_s6_servicedir }}/haproxy1/log/run", log_dir: "/var/log/haproxy1" }
107 | - { src: "s6-log.j2", dest: "{{ consul_s6_servicedir }}/haproxy2/log/run", log_dir: "/var/log/haproxy2" }
108 |
109 | - name: install | haproxy | Ensure haproxy s6 service inital link (first time only)
110 | file:
111 | src="{{ item.src }}"
112 | dest="{{ item.dest }}"
113 | state="link"
114 | with_items:
115 | - src: "{{ consul_s6_servicedir }}/haproxy1"
116 | dest: "/haproxy-current"
117 | - src: "{{ consul_s6_servicedir }}/haproxy2"
118 | dest: "/haproxy-alt"
119 | when: haproxy_install_alpine | changed
120 |
121 | when: consul_service == 's6'
122 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent1/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'superssh service (localport option)' do
4 | describe "definition by name" do
5 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superssh-different-name -v" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"superssh-different-name"' }
8 | its(:stdout) { should contain '"ServiceTags":\["test"]' }
9 | its(:stdout) { should contain '"ServicePort":22' }
10 | end
11 | end
12 |
13 | describe "health is passing" do
14 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/superssh-different-name -v" do
15 | its(:exit_status) { should eq 0 }
16 | its(:stdout) { should contain '"Service":"superssh-different-name"'}
17 | its(:stdout) { should contain '"Status":"passing"'}
18 | end
19 | end
20 |
21 | describe "definition by key should be empty" do
22 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superssh -v" do
23 | its(:exit_status) { should eq 0 }
24 | its(:stdout) { should match '[]' }
25 | end
26 | end
27 |
28 | describe "localport 2222 is open by haproxy" do
29 | describe port(2222) do
30 | it { should be_listening.on('127.0.0.1').with('tcp') }
31 | end
32 | end
33 |
34 | describe "ssh is working on 2222" do
35 | describe command "echo X | nc -v 127.0.0.1 2222 2>&1 | grep SSH" do
36 | its(:exit_status) { should eq 0 }
37 | end
38 | end
39 | end
40 |
41 | describe 'superdb service (A failing service)' do
42 | describe "definition" do
43 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superdb -v" do
44 | its(:exit_status) { should eq 0 }
45 | its(:stdout) { should contain '"ServiceName":"superdb"' }
46 | its(:stdout) { should contain '"ServiceTags":\["userdb","v1.2"]' }
47 | its(:stdout) { should contain '"ServicePort":2122' }
48 | end
49 | end
50 |
51 | describe "health is failing" do
52 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/superdb -v" do
53 | its(:exit_status) { should eq 0 }
54 | its(:stdout) { should contain '"Service":"superdb"' }
55 | its(:stdout) { should contain '"Status":"(warning|critical)"' }
56 | end
57 | end
58 |
59 | describe "localport 2122 is open by haproxy" do
60 | describe port(2122) do
61 | it { should be_listening.on('127.0.0.1').with('tcp') }
62 | end
63 | end
64 |
65 | end
66 |
67 |
68 | describe 'superapp service (a non advertised service)' do
69 |
70 | describe "definition should not exist" do
71 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superapp -v" do
72 | its(:exit_status) { should eq 0 }
73 | its(:stdout) { should match '[]' }
74 | end
75 | end
76 |
77 | describe "localport 9999 is open by haproxy" do
78 | describe port(9999) do
79 | it { should be_listening.on('127.0.0.1').with('tcp') }
80 | end
81 | end
82 |
83 | end
84 |
85 | describe 'hellofresh service (normal port option)' do
86 | describe "definition" do
87 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/hellofresh" do
88 | its(:exit_status) { should eq 0 }
89 | its(:stdout) { should contain '"ServiceName":"hellofresh"' }
90 | its(:stdout) { should contain '"ServiceAddress":"hellofresh.com"' }
91 | its(:stdout) { should contain '"ServicePort":80' }
92 | end
93 | end
94 |
95 | describe "health is passing" do
96 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/hellofresh -v" do
97 | its(:exit_status) { should eq 0 }
98 | its(:stdout) { should contain '"Service":"hellofresh"'}
99 | its(:stdout) { should contain '"Status":"passing"' }
100 | end
101 | end
102 |
103 | describe "localport 80 is open by haproxy" do
104 | describe port(80) do
105 | it { should be_listening.on('127.0.0.1').with('tcp') }
106 | end
107 | end
108 |
109 | describe "HAProxy server backend should be on active" do
110 | describe command "echo 'show stat' | socat unix-connect:/var/haproxy/stats.sock stdio | grep hellofresh,hellofresh" do
111 | its(:exit_status) { should eq 0 }
112 | end
113 | end
114 |
115 |
116 | describe "curling to hellofresh is working on 80" do
117 | describe command "curl -I --resolve hellofresh.com:80:127.0.0.1 -H 'Host: hellofresh.com' http://hellofresh.com" do
118 | its(:exit_status) { should eq 0 }
119 | its(:stdout) { should contain 'HTTP/1.1 301 Moved Permanently' }
120 | end
121 | end
122 | end
123 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent2/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'superssh service (localport option)' do
4 | describe "definition by name" do
5 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superssh-different-name -v" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"superssh-different-name"' }
8 | its(:stdout) { should contain '"ServiceTags":\["test"]' }
9 | its(:stdout) { should contain '"ServicePort":22' }
10 | end
11 | end
12 |
13 | describe "health is passing" do
14 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/superssh-different-name -v" do
15 | its(:exit_status) { should eq 0 }
16 | its(:stdout) { should contain '"Service":"superssh-different-name"'}
17 | its(:stdout) { should contain '"Status":"passing"'}
18 | end
19 | end
20 |
21 | describe "definition by key should be empty" do
22 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superssh -v" do
23 | its(:exit_status) { should eq 0 }
24 | its(:stdout) { should match '[]' }
25 | end
26 | end
27 |
28 | describe "localport 2222 is open by haproxy" do
29 | describe port(2222) do
30 | it { should be_listening.on('127.0.0.1').with('tcp') }
31 | end
32 | end
33 |
34 | describe "ssh is working on 2222" do
35 | describe command "echo X | nc -v 127.0.0.1 2222 2>&1 | grep SSH" do
36 | its(:exit_status) { should eq 0 }
37 | end
38 | end
39 | end
40 |
41 | describe 'superdb service (A failing service)' do
42 | describe "definition" do
43 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superdb -v" do
44 | its(:exit_status) { should eq 0 }
45 | its(:stdout) { should contain '"ServiceName":"superdb"' }
46 | its(:stdout) { should contain '"ServiceTags":\["userdb","v1.2"]' }
47 | its(:stdout) { should contain '"ServicePort":2122' }
48 | end
49 | end
50 |
51 | describe "health is failing" do
52 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/superdb -v" do
53 | its(:exit_status) { should eq 0 }
54 | its(:stdout) { should contain '"Service":"superdb"' }
55 | its(:stdout) { should contain '"Status":"(warning|critical)"' }
56 | end
57 | end
58 |
59 | describe "localport 2122 is open by haproxy" do
60 | describe port(2122) do
61 | it { should be_listening.on('127.0.0.1').with('tcp') }
62 | end
63 | end
64 |
65 | end
66 |
67 |
68 | describe 'superapp service (a non advertised service)' do
69 |
70 | describe "definition should not exist" do
71 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superapp -v" do
72 | its(:exit_status) { should eq 0 }
73 | its(:stdout) { should match '[]' }
74 | end
75 | end
76 |
77 | describe "localport 9999 is open by haproxy" do
78 | describe port(9999) do
79 | it { should be_listening.on('127.0.0.1').with('tcp') }
80 | end
81 | end
82 |
83 | end
84 |
85 | describe 'hellofresh service (normal port option)' do
86 | describe "definition" do
87 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/hellofresh" do
88 | its(:exit_status) { should eq 0 }
89 | its(:stdout) { should contain '"ServiceName":"hellofresh"' }
90 | its(:stdout) { should contain '"ServiceAddress":"hellofresh.com"' }
91 | its(:stdout) { should contain '"ServicePort":80' }
92 | end
93 | end
94 |
95 | describe "health is passing" do
96 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/hellofresh -v" do
97 | its(:exit_status) { should eq 0 }
98 | its(:stdout) { should contain '"Service":"hellofresh"'}
99 | its(:stdout) { should contain '"Status":"passing"' }
100 | end
101 | end
102 |
103 | describe "localport 80 is open by haproxy" do
104 | describe port(80) do
105 | it { should be_listening.on('127.0.0.1').with('tcp') }
106 | end
107 | end
108 |
109 | describe "HAProxy server backend should be on active" do
110 | describe command "echo 'show stat' | socat unix-connect:/var/haproxy/stats.sock stdio | grep hellofresh,hellofresh" do
111 | its(:exit_status) { should eq 0 }
112 | end
113 | end
114 |
115 |
116 | describe "curling to hellofresh is working on 80" do
117 | describe command "curl -I --resolve hellofresh.com:80:127.0.0.1 -H 'Host: hellofresh.com' http://hellofresh.com" do
118 | its(:exit_status) { should eq 0 }
119 | its(:stdout) { should contain 'HTTP/1.1 301 Moved Permanently' }
120 | end
121 | end
122 | end
123 |
--------------------------------------------------------------------------------
/test/integration/cluster-agent3/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'superssh service (localport option)' do
4 | describe "definition by name" do
5 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superssh-different-name -v" do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"superssh-different-name"' }
8 | its(:stdout) { should contain '"ServiceTags":\["test"]' }
9 | its(:stdout) { should contain '"ServicePort":22' }
10 | end
11 | end
12 |
13 | describe "health is passing" do
14 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/superssh-different-name -v" do
15 | its(:exit_status) { should eq 0 }
16 | its(:stdout) { should contain '"Service":"superssh-different-name"'}
17 | its(:stdout) { should contain '"Status":"passing"'}
18 | end
19 | end
20 |
21 | describe "definition by key should be empty" do
22 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superssh -v" do
23 | its(:exit_status) { should eq 0 }
24 | its(:stdout) { should match '[]' }
25 | end
26 | end
27 |
28 | describe "localport 2222 is open by haproxy" do
29 | describe port(2222) do
30 | it { should be_listening.on('127.0.0.1').with('tcp') }
31 | end
32 | end
33 |
34 | describe "ssh is working on 2222" do
35 | describe command "echo X | nc -v 127.0.0.1 2222 2>&1 | grep SSH" do
36 | its(:exit_status) { should eq 0 }
37 | end
38 | end
39 | end
40 |
41 | describe 'superdb service (A failing service)' do
42 | describe "definition" do
43 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superdb -v" do
44 | its(:exit_status) { should eq 0 }
45 | its(:stdout) { should contain '"ServiceName":"superdb"' }
46 | its(:stdout) { should contain '"ServiceTags":\["userdb","v1.2"]' }
47 | its(:stdout) { should contain '"ServicePort":2122' }
48 | end
49 | end
50 |
51 | describe "health is failing" do
52 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/superdb -v" do
53 | its(:exit_status) { should eq 0 }
54 | its(:stdout) { should contain '"Service":"superdb"' }
55 | its(:stdout) { should contain '"Status":"(warning|critical)"' }
56 | end
57 | end
58 |
59 | describe "localport 2122 is open by haproxy" do
60 | describe port(2122) do
61 | it { should be_listening.on('127.0.0.1').with('tcp') }
62 | end
63 | end
64 |
65 | end
66 |
67 |
68 | describe 'superapp service (a non advertised service)' do
69 |
70 | describe "definition should not exist" do
71 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/superapp -v" do
72 | its(:exit_status) { should eq 0 }
73 | its(:stdout) { should match '[]' }
74 | end
75 | end
76 |
77 | describe "localport 9999 is open by haproxy" do
78 | describe port(9999) do
79 | it { should be_listening.on('127.0.0.1').with('tcp') }
80 | end
81 | end
82 |
83 | end
84 |
85 | describe 'hellofresh service (normal port option)' do
86 | describe "definition" do
87 | describe command "curl -s http://127.0.0.1:8500/v1/catalog/service/hellofresh" do
88 | its(:exit_status) { should eq 0 }
89 | its(:stdout) { should contain '"ServiceName":"hellofresh"' }
90 | its(:stdout) { should contain '"ServiceAddress":"hellofresh.com"' }
91 | its(:stdout) { should contain '"ServicePort":80' }
92 | end
93 | end
94 |
95 | describe "health is passing" do
96 | describe command "curl -s http://127.0.0.1:8500/v1/health/service/hellofresh -v" do
97 | its(:exit_status) { should eq 0 }
98 | its(:stdout) { should contain '"Service":"hellofresh"'}
99 | its(:stdout) { should contain '"Status":"passing"' }
100 | end
101 | end
102 |
103 | describe "localport 80 is open by haproxy" do
104 | describe port(80) do
105 | it { should be_listening.on('127.0.0.1').with('tcp') }
106 | end
107 | end
108 |
109 | describe "HAProxy server backend should be on active" do
110 | describe command "echo 'show stat' | socat unix-connect:/var/haproxy/stats.sock stdio | grep hellofresh,hellofresh" do
111 | its(:exit_status) { should eq 0 }
112 | end
113 | end
114 |
115 |
116 | describe "curling to hellofresh is working on 80" do
117 | describe command "curl -I --resolve hellofresh.com:80:127.0.0.1 -H 'Host: hellofresh.com' http://hellofresh.com" do
118 | its(:exit_status) { should eq 0 }
119 | its(:stdout) { should contain 'HTTP/1.1 301 Moved Permanently' }
120 | end
121 | end
122 | end
123 |
--------------------------------------------------------------------------------
/tasks/install/consul-template.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install | consul template | Check template binary
4 | stat:
5 | path="{{ consul_bin_dir }}/consul-template-{{ consul_template_version }}"
6 | register: consul_template_version_binary
7 |
8 | - name: install | consul template | Ensure download directory exists
9 | file:
10 | path="{{ consul_tmp_dir }}/consul-template-{{ consul_template_version }}"
11 | state=directory
12 | mode=0755
13 |
14 | - name: install | consul template | Download and unpack consul--template archive (if needed)
15 | unarchive:
16 | src="{{ consul_template_download_url }}"
17 | dest="{{ consul_tmp_dir }}/consul-template-{{ consul_template_version }}"
18 | copy=false
19 | owner="{{ consul_user }}"
20 | register: template_download
21 | ignore_errors: true
22 | when: not consul_template_version_binary.stat.exists
23 |
24 | - name: install | consul template | Download and unpack consul archive (if needed with curl)
25 | shell: |
26 | curl -s {{ consul_template_download_url }} -o {{ consul_tmp_dir }}/consul-template-{{ consul_template_version }}/consul-template.zip
27 | unzip {{ consul_tmp_dir }}/consul-template-{{ consul_template_version }}/consul-template.zip -d {{ consul_tmp_dir }}/consul-template-{{ consul_template_version }}/
28 | when: template_download | failed
29 |
30 | - name: install | consul template | link binary (if needed)
31 | command: mv {{ consul_tmp_dir }}/consul-template-{{ consul_template_version }}/consul-template {{ consul_bin_dir }}/consul-template-{{consul_template_version}}
32 | when: not consul_template_version_binary.stat.exists
33 | notify:
34 | - restart consul service
35 |
36 | - name: install | consul template | Check target file
37 | stat:
38 | path="{{ consul_bin_dir }}/consul-template"
39 | register: consul_template_target_link
40 |
41 | - name: install | consul template | Remove target file (if not symlink)
42 | file:
43 | path="{{ consul_bin_dir }}/consul-template"
44 | state="absent"
45 | when: not consul_template_target_link.stat.islnk is defined or not consul_template_target_link.stat.islnk
46 |
47 | - name: install | consul template | Consul template link binary
48 | file:
49 | src="{{ consul_bin_dir }}/consul-template-{{ consul_template_version }}"
50 | dest="{{ consul_bin_dir }}/consul-template"
51 | state="link"
52 | notify:
53 | - restart consul service
54 |
55 | - name: install | consul template | Ensure consul-template configuration file is deployed
56 | template:
57 | src="consul-template.json.j2"
58 | dest="{{ consul_config_template_file }}"
59 | owner="{{ consul_user }}"
60 | group="{{ consul_group }}"
61 | mode="0640"
62 | notify:
63 | - restart consul-template service
64 |
65 | - name: install | consul template | Ensure consul run script is deployed
66 | template:
67 | src="consul-template-run.sh.j2"
68 | dest="{{ consul_bin_dir }}/consul-template-run.sh"
69 | owner="{{ consul_user }}"
70 | group="{{ consul_group }}"
71 | mode="0755"
72 | notify:
73 | - restart consul-template service
74 |
75 | - name: install | consul template | Ensure consul init.d script is deployed
76 | template:
77 | src="consul-template-init.d.sh.j2"
78 | dest="/etc/init.d/consul-template"
79 | owner="{{ consul_user }}"
80 | group="{{ consul_group }}"
81 | mode="0755"
82 | when: "consul_service == 'service'"
83 | notify:
84 | - restart consul-template service
85 |
86 | # s6 service Block
87 | - block:
88 | - name: install | consul template | Ensure consul-template s6 service directory exisit
89 | file:
90 | path="{{ item }}"
91 | state="directory"
92 | owner="{{ consul_user }}"
93 | group="{{ consul_group }}"
94 | mode=0755
95 | with_items:
96 | - "{{ consul_s6_servicedir }}/consul-template"
97 | - "{{ consul_s6_servicedir }}/consul-template/log"
98 |
99 | - name: install | consul template | Ensure consul-template s6 logs directory exisit
100 | file:
101 | path="/var/log/s6-consul-template"
102 | state="directory"
103 | owner="nobody"
104 | group="nobody"
105 | mode=0755
106 |
107 | - name: install | consul template | Ensure consul s6 script is deployed
108 | template:
109 | src="{{ item.src }}"
110 | dest="{{ item.dest }}"
111 | owner="{{ consul_user }}"
112 | group="{{ consul_group }}"
113 | mode="0755"
114 | with_items:
115 | - { src: "consul-template-s6.j2", dest: "{{ consul_s6_servicedir }}/consul-template/run"}
116 | - { src: "s6-log.j2", dest: "{{ consul_s6_servicedir }}/consul-template/log/run", log_dir: "/var/log/s6-consul-template", log_user: "consul"}
117 | notify:
118 | - restart consul service
119 |
120 | when: "consul_service == 's6'"
121 |
122 | - name: install | consul template | Ensure templates is deployed
123 | template:
124 | src="{{ item.jtemplate }}"
125 | dest="{{ item.source }}"
126 | owner="{{ consul_user }}"
127 | group="{{ consul_group }}"
128 | mode="0755"
129 | with_items: "{{ consul_template_templates }}"
130 | notify:
131 | - restart consul-template service
132 | - reload haproxy service
133 |
134 | - name: install | consul template | Ensure HAProxy reload script is deployed
135 | template:
136 | src="haproxy-reload.j2"
137 | dest="{{ consul_bin_dir }}/haproxy-reload"
138 | owner="{{ consul_user }}"
139 | group="{{ consul_group }}"
140 | mode="0755"
141 | notify:
142 | - restart consul-template service
143 | - reload haproxy service
144 |
145 | - name: install | consul template | Ensure sudoers file to allow consul template to reload haproxy
146 | template:
147 | src="consul-template-sudo.j2"
148 | dest="/etc/sudoers.d/{{ consul_user }}"
149 | owner="root"
150 | group="root"
151 | mode="0440"
152 | validate='visudo -cf %s'
--------------------------------------------------------------------------------
/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Versions to install
3 | consul_agent_version : "0.9.2"
4 | consul_template_version : "0.19.0"
5 |
6 | ## Start service right away, during provision
7 | consul_start_service: true
8 |
9 | ## Node name
10 | consul_node_name : "{{ inventory_hostname }}"
11 |
12 | ## Type of installation
13 | consul_server : false
14 | consul_consumer : false
15 | consul_producer : false
16 | consul_ui : false
17 |
18 | ## Services
19 | consul_consumer_services : []
20 | consul_producer_services : []
21 |
22 | ## Consul Domain
23 | consul_datacenter : "default"
24 | consul_domain : "consul."
25 |
26 | ## Consul agent configuration
27 | #consul_start_join : [ "127.0.0.1" ]
28 | consul_servers_list : [ "127.0.0.1" ] # you can use ansible groups "{{ groups['consul_servers'] }}"
29 | consul_rejoin_after_leave : true
30 | consul_leave_on_terminate : false
31 | # Retry Options
32 | consul_retry_join : false
33 | consul_retry_interval : 30s
34 | consul_retry_max : 0
35 | # Consul log
36 | consul_log_level : "INFO"
37 | consul_log_syslog : false
38 |
39 | consul_server_port_server : 8300
40 | consul_http_port : 8500
41 | consul_https_port : -1
42 |
43 | # Consul Network :
44 | consul_network_bind : "" # "0.0.0.0"
45 | consul_network_autobind : true
46 | #consul_network_autobind_range : "192.168.56.0/24"
47 | consul_network_autobind_type : "private" # "" or private or public
48 | consul_client_addr : "127.0.0.1"
49 |
50 | # Consul dir structure
51 | consul_home_dir : "/opt/consul"
52 | consul_bin_dir : "{{ consul_home_dir }}/bin"
53 | consul_tmp_dir : "{{ consul_home_dir }}/tmp"
54 | consul_data_dir : "{{ consul_home_dir }}/data"
55 | consul_template_dir : "{{ consul_home_dir }}/templates"
56 | consul_log_dir : "/var/log/consul"
57 | consul_config_dir : "/etc/consul.d"
58 | # if you leave emtpy only "healthy" and passing services will be returned. You can also use "passing,warning" or "all"
59 | # For more info check https://github.com/hashicorp/consul-template#service
60 | consul_template_service_options : ""
61 |
62 | # Consul files
63 | consul_config_agent_file : "/etc/consul.conf"
64 | consul_config_template_file : "/etc/consul-template.conf"
65 | consul_agent_log_file : "{{ consul_log_dir }}/consul-agent.log"
66 | consul_template_log_file : "{{ consul_log_dir }}/consul-template.log"
67 | consul_template_haproxy_file : "{{ consul_template_dir }}/consul_template.cnf"
68 |
69 | # Consul user/Group
70 | consul_user : "consul"
71 | consul_group : "consul"
72 |
73 | # Consul template
74 | consul_template_consul_server : "127.0.0.1"
75 | consul_template_consul_port : "8500"
76 | consul_template_templates :
77 | - source : "{{ consul_template_haproxy_file }}"
78 | destination : "/etc/haproxy/haproxy.cfg"
79 | command : "{{ consul_bin_dir }}/haproxy-reload"
80 | jtemplate : "haproxy.ctmp.j2"
81 |
82 | consul_template_log_level : "warn"
83 |
84 | consul_encrypt : "Tq/xU3fTPyBRoA4R4CxOKg=="
85 |
86 | ## Telemetry
87 | consul_telemetry : False
88 | consul_statsd_address : ""
89 | consul_statsite_address : ""
90 | consul_statsite_prefix : "consul"
91 | consul_disable_hostname : True
92 |
93 | ## HA Proxy
94 | consul_haproxy_ppa_install : False # By default use packaged version of Haproxy
95 | consul_haproxy_ppa_url : "ppa:vbernat/haproxy-1.6"
96 | ## Config global
97 | consul_haproxy_user : "haproxy"
98 | consul_haproxy_group : "haproxy"
99 | consul_haproxy_maxconn : "8192"
100 | consul_haproxy_log : [ "/dev/log local0", "/dev/log local1 info" ]
101 |
102 | consul_haproxy_stats_socket : "socket /var/lib/haproxy/stats.sock group {{ consul_group }} mode 660 level admin"
103 | ## Extra global key, value
104 | consul_haproxy_extra_global :
105 | chroot: "/var/lib/haproxy"
106 | ## Config defaults
107 | consul_haproxy_default_log : "global"
108 | consul_haproxy_default_options :
109 | - "dontlognull"
110 | - "log-separate-errors"
111 | - "redispatch"
112 | consul_haproxy_default_timeout :
113 | - "connect 5s"
114 | - "check 5s"
115 | - "client 120s"
116 | - "server 120s"
117 | consul_haproxy_default_maxconn : 2000
118 | consul_haproxy_default_retries : 3
119 | consul_haproxy_default_balance : "roundrobin"
120 | ## Extra default key, value
121 | #consul_haproxy_extra_default :
122 |
123 | consul_haproxy_default_server_options : "check inter 10s fastinter 5s downinter 8s rise 3 fall 2"
124 | ## Config Stats page by default HAproxy will have default stats
125 | consul_haproxy_stats_enable : True
126 | consul_haproxy_stats_mode : "http"
127 | consul_haproxy_stats_port : 3212
128 | consul_haproxy_stats_uri : "/"
129 |
130 | # Ad-hoc commands RUN WITH CARE
131 | consul_adhoc_build_raft_peers : False
132 |
133 | # Consul supervisor service (service, s6)
134 | consul_service : "service"
135 | consul_s6_servicedir : "/var/run/s6/services"
136 |
--------------------------------------------------------------------------------
/templates/haproxy.ctmp.j2:
--------------------------------------------------------------------------------
1 | # First template : {{ ansible_managed }}
2 | # Second template : consul template
3 |
4 | # Info on config http://www.haproxy.org/download/1.5/doc/configuration.txt
5 | {% macro flatten_value(key, value) -%}
6 | {%- if value is string -%}{# Value is a string #}
7 | {{ key }} {{ value }}
8 | {%- elif value is iterable -%}{# Value is a list #}
9 | {% for nested_value in value %}
10 | {{ key }} {{ nested_value }}
11 | {% endfor %}
12 | {%- endif -%}
13 | {%- endmacro %}
14 |
15 | global
16 | daemon
17 | spread-checks 2
18 | user {{ consul_haproxy_user }}
19 | group {{ consul_haproxy_user }}
20 | maxconn {{ consul_haproxy_maxconn }}
21 | {# TODO: find a way around it. Horrible hack to avoid having alpine install rsyslog #}
22 | {{ flatten_value('log', consul_haproxy_log) if ansible_distribution == 'Ubuntu' else '' }}
23 | stats {{ consul_haproxy_stats_socket }}
24 | {% if consul_haproxy_extra_global is defined %}
25 | {% for key, value in consul_haproxy_extra_global.iteritems() %}
26 | {{ key }} {{ value }}
27 | {% endfor %}
28 | {% endif %}
29 |
30 | defaults
31 | {{ flatten_value('log', consul_haproxy_default_log) }}
32 | {{ flatten_value('option', consul_haproxy_default_options) }}
33 | {{ flatten_value('maxconn', consul_haproxy_default_maxconn) }}
34 | {{ flatten_value('timeout', consul_haproxy_default_timeout) }}
35 | {{ flatten_value('retries', consul_haproxy_default_retries) }}
36 | {{ flatten_value('balance', consul_haproxy_default_balance) }}
37 | {% if consul_haproxy_extra_default is defined %}
38 | {% for key, value in consul_haproxy_extra_default.iteritems() %}
39 | {{ key }} {{ value }}
40 | {% endfor %}
41 | {% endif %}
42 |
43 | {% if consul_haproxy_stats_enable == True %}
44 | listen stats
45 | bind {{ consul_haproxy_stats_add | default("127.0.0.1") }}:{{ consul_haproxy_stats_port }}
46 | mode {{ consul_haproxy_stats_mode }}
47 | stats enable
48 | stats uri {{ consul_haproxy_stats_uri }}
49 | {% endif %}
50 |
51 | # consul_services
52 | {% if consul_consumer_services is defined %}
53 | {% for consumer_service in consul_consumer_services %}
54 | {% if consumer_service is mapping %}
55 | {# Support new style of config #}
56 | {% set service_name = consumer_service.name %}
57 | {% else %}
58 | {% set service_name = consumer_service %}
59 | {% endif %}
60 |
61 | {% set env_name = consumer_service.environment_override | default(false) %}
62 |
63 | {% if not env_name %}
64 | {% if environment_name is defined %}
65 | {% set env_name = environment_name %}
66 | {% else %}
67 | {% set env_name = fallback_environment %}
68 | {% endif %}
69 | {% endif %}
70 |
71 | {% set tags_contains = consumer_service.tags_contains | default(false) %}
72 | {% set tag_regex = consumer_service.tag_regex | default(false) %}
73 | {% set haproxy_setting = consul_services[service_name].haproxy | default({}) %}
74 | # Service Config for {{ service_name }}
75 | backend {{ service_name }}
76 | mode {{ haproxy_setting.service_mode | default('http') }}
77 | <% scratch.Set "serviceFound" "False" -%>
78 | <% scratch.Set "localPort" "{{ consul_services[service_name].local_port | default(consul_services[service_name].port) }}" -%>
79 | <% range service "{{ service_name }}{{ consul_template_service_options_str }}" -%>
80 | <% scratch.Set "weightValue" "100" -%>
81 | <% range .Tags -%>
82 | <% if . | contains "WEIGHT:" -%>
83 | <% $weightValue := . | split ":" -%>
84 | <% $weightValue := index $weightValue 1 -%>
85 | <% scratch.Set "weightValue" $weightValue -%>
86 | <% end -%>
87 | <% if . | contains "local_port:" -%>
88 | <% $localPort := . | split ":" -%>
89 | <% $localPort := index $localPort 1 -%>
90 | <% scratch.Set "localPort" $localPort -%>
91 | <% end -%>
92 | <% end -%>
93 | <% if .Tags | contains "env:{{ env_name }}" -%> # Match envrionment tag
94 | <% scratch.Set "serviceFound" "True" -%>
95 | {% if tags_contains %}<%if .Tags | contains "{{ tags_contains}}"%> # Match tags_contains {{ tags_contains }}{% endif %}
96 | {% if tag_regex %}<%if .Tags | join " " | regexMatch "{{ tag_regex }}"%> # Match tag_regex {{ tag_regex }}{% endif %}
97 | server <%.ID%>_<%.Address%>:<%.Port%> <%.Address%>:<%.Port%> {% if consul_haproxy_default_balance == 'roundrobin' %}weight <% scratch.Get "weightValue" %> {% endif %} {{ haproxy_setting.server_options | default(consul_haproxy_default_server_options) }}
98 | {% if tag_regex %}<%else%> # Did not match tag_regex {{ tag_regex }}<%end%>{% endif %}
99 | {% if tags_contains %}<%else%> # Did not match tags_contains {{ tags_contains }}<%end%>{% endif %}
100 | <%end%>
101 | <% end -%>
102 | <% $serviceFound := scratch.Get "serviceFound" -%>
103 | <% if eq $serviceFound "False" -%>
104 | <% range service "{{ service_name }}-{{ env_name }}{{ consul_template_service_options_str }}" %>
105 | <% scratch.Set "weightValue" "100" -%>
106 | <% range .Tags -%>
107 | <% if . | contains "WEIGHT:" -%>
108 | <% $weightValue := . | split ":" -%>
109 | <% $weightValue := index $weightValue 1 -%>
110 | <% scratch.Set "weightValue" $weightValue -%>
111 | <% end -%>
112 | <% if . | contains "local_port:" -%>
113 | <% $localPort := . | split ":" -%>
114 | <% $localPort := index $localPort 1 -%>
115 | <% scratch.Set "localPort" $localPort -%>
116 | <% end -%><% end -%>
117 | <% scratch.Set "serviceFound" "True" -%>
118 | {% if tags_contains %}<%if .Tags | contains "{{ tags_contains}}"%> # Match tags_contains {{ tags_contains }}{% endif %}
119 | {% if tag_regex %}<%if .Tags | join " " | regexMatch "{{ tag_regex }}"%> # Match tag_regex {{ tag_regex }}{% endif %}
120 | server <%.ID%>_<%.Address%>:<%.Port%> <%.Address%>:<%.Port%> {% if consul_haproxy_default_balance == 'roundrobin' %}weight <% scratch.Get "weightValue" %> {% endif %} {{ haproxy_setting.server_options | default(consul_haproxy_default_server_options) }}
121 | {% if tag_regex %}<%else%> # Did not match tag_regex {{ tag_regex }}<%end%>{% endif %}
122 | {% if tags_contains %}<%else%> # Did not match tags_contains {{ tags_contains }}<%end%>{% endif %}
123 | <%- end %>
124 | <%- end %>
125 | frontend {{ service_name }}
126 | mode {{ haproxy_setting.service_mode | default('http') }}
127 | bind localhost:<% scratch.Get "localPort" %>
128 | default_backend {{ service_name }}
129 | {% endfor %}
130 | {% endif %}
131 |
--------------------------------------------------------------------------------
/test/integration/basic-agent/serverspec/consul_service_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe 'superssh service (localport option)' do
4 | describe 'definition by name' do
5 | describe command 'curl -s -v http://127.0.0.1:8500/v1/catalog/service/superssh-testing' do
6 | its(:exit_status) { should eq 0 }
7 | its(:stdout) { should contain '"ServiceName":"superssh-testing"' }
8 | its(:stdout) { should match /"ServiceTags":\[.*"env:testing".*/ }
9 | its(:stdout) { should match /"ServiceTags":\[.*"WEIGHT:77".*/ }
10 | its(:stdout) { should match /"ServiceTags":\[.*"test".*/ }
11 | its(:stdout) { should match /"ServiceTags":\[.*"local_port:2222".*/ }
12 | its(:stdout) { should contain '"ServicePort":22' }
13 | end
14 | end
15 |
16 | describe 'health is passing' do
17 | describe command 'curl -s -v http://127.0.0.1:8500/v1/health/service/superssh-testing' do
18 | its(:exit_status) { should eq 0 }
19 | its(:stdout) { should contain '"Service":"superssh-testing"' }
20 | its(:stdout) { should contain '"Status":"passing"' }
21 | end
22 | end
23 |
24 | describe 'definition by key should be empty' do
25 | describe command 'curl -s -v http://127.0.0.1:8500/v1/catalog/service/superssh' do
26 | its(:exit_status) { should eq 0 }
27 | its(:stdout) { should match '\[\]' }
28 | end
29 | end
30 |
31 | describe 'localport 2222 is open by haproxy' do
32 | describe port(2222) do
33 | it { should be_listening.on('127.0.0.1').with('tcp') }
34 | end
35 | end
36 |
37 | describe 'ssh is working on 2222' do
38 | describe command 'echo X | nc 127.0.0.1 2222 2>&1 | grep SSH' do
39 | its(:exit_status) { should eq 0 }
40 | end
41 | end
42 | end
43 |
44 | describe 'superdb service (A failing service)' do
45 | describe 'definition' do
46 | describe command 'curl -s -v http://127.0.0.1:8500/v1/catalog/service/superdb' do
47 | its(:exit_status) { should eq 0 }
48 | its(:stdout) { should contain '"ServiceName":"superdb"' }
49 | its(:stdout) { should match /"ServiceTags":\[.*"userdb".*/ }
50 | its(:stdout) { should match /"ServiceTags":\[.*"v1\.2".*/ }
51 | its(:stdout) { should match /"ServiceTags":\[.*"env:testing".*/ }
52 | its(:stdout) { should match /"ServiceTags":\[.*"local_port:2122".*/ }
53 | its(:stdout) { should contain '"ServicePort":2122' }
54 | end
55 | end
56 |
57 | describe 'health is failing' do
58 | describe command 'curl -s -v http://127.0.0.1:8500/v1/health/service/superdb' do
59 | its(:exit_status) { should eq 0 }
60 | its(:stdout) { should contain '"Service":"superdb"' }
61 | its(:stdout) { should contain '"Status":"(warning|critical)"' }
62 | end
63 | end
64 |
65 | describe 'localport 2122 is open by haproxy' do
66 | describe port(2122) do
67 | it { should be_listening.on('127.0.0.1').with('tcp') }
68 | end
69 | end
70 | end
71 |
72 | describe 'superapp service (a non advertised service)' do
73 | describe 'definition should not exist' do
74 | describe command 'curl -s -v http://127.0.0.1:8500/v1/catalog/service/superapp' do
75 | its(:exit_status) { should eq 0 }
76 | its(:stdout) { should match '\[\]' }
77 | end
78 | end
79 |
80 | describe 'localport 9999 is open by haproxy' do
81 | describe port(9999) do
82 | it { should be_listening.on('127.0.0.1').with('tcp') }
83 | end
84 | end
85 | end
86 |
87 | describe 'hellofresh service (normal port option)' do
88 | describe 'definition' do
89 | describe command 'curl -s -v http://127.0.0.1:8500/v1/catalog/service/hellofresh' do
90 | its(:exit_status) { should eq 0 }
91 | its(:stdout) { should contain '"ServiceName":"hellofresh"' }
92 | its(:stdout) { should contain '"ServiceAddress":"127.0.0.1"' }
93 | its(:stdout) { should match /"ServiceTags":\[.*"local_port:8080".*/ }
94 | its(:stdout) { should contain '"ServicePort":80' }
95 | end
96 | end
97 |
98 | describe 'health is passing' do
99 | describe command 'curl -s -v http://127.0.0.1:8500/v1/health/service/hellofresh' do
100 | its(:exit_status) { should eq 0 }
101 | its(:stdout) { should contain '"Service":"hellofresh"' }
102 | its(:stdout) { should contain '"Status":"passing"' }
103 | end
104 | end
105 |
106 | describe 'localport 8080 is open by haproxy' do
107 | describe port(8080) do
108 | it { should be_listening.on('127.0.0.1').with('tcp') }
109 | end
110 | end
111 |
112 | describe 'HAProxy stats unix-connect working' do
113 | describe command "echo 'show stat' | socat unix-connect:/var/lib/haproxy/stats.sock stdio" do
114 | its(:exit_status) { should eq 0 }
115 | end
116 | end
117 |
118 | describe 'HAProxy server backend should be listed and up' do
119 | let(:pre_command) { 'sleep 2' }
120 | describe command "echo 'show stat' | socat unix-connect:/var/lib/haproxy/stats.sock stdio | grep hellofresh,hellofresh | grep UP" do
121 | its(:exit_status) { should eq 0 }
122 | end
123 | end
124 |
125 | describe 'hellofresh backend should have default weight' do
126 | describe command 'echo "get weight hellofresh/`cat /etc/haproxy/haproxy.cfg | grep "server hellofresh" | awk \'{print $2}\'`" | socat unix-connect:/var/lib/haproxy/stats.sock stdio | grep 100' do
127 | its(:stdout) { should contain '100 \(initial 100\)'}
128 | end
129 | end
130 |
131 | describe 'superdb backend should exist' do
132 | describe command "echo 'show stat' | socat unix-connect:/var/lib/haproxy/stats.sock stdio | grep superdb,BACKEND" do
133 | its(:exit_status) { should eq 0 }
134 | end
135 | end
136 |
137 | describe 'superssh-testing backend should have set weight' do
138 | describe command 'echo "get weight superssh/`cat /etc/haproxy/haproxy.cfg | grep "server superssh-testing" | awk \'{print $2}\'`" | socat unix-connect:/var/lib/haproxy/stats.sock stdio | grep 77' do
139 | its(:stdout) { should contain '77 \(initial 77\)'}
140 | end
141 | end
142 |
143 | describe 'Curl hellofresh upstream service is working on 80' do
144 | describe command 'curl http://127.0.0.1:80' do
145 | its(:exit_status) { should eq 0 }
146 | its(:stdout) { should contain 'Thank you for using nginx' }
147 | end
148 | end
149 |
150 | describe 'Curl hellofresh service is working on 8080' do
151 | describe command 'curl http://127.0.0.1:8080' do
152 | its(:exit_status) { should eq 0 }
153 | its(:stdout) { should contain 'Thank you for using nginx' }
154 | end
155 | end
156 | end
157 |
--------------------------------------------------------------------------------
/library/s6.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 | #
4 | # (c) 2015, Brian Coca
5 | #
6 | # This file is part of Ansible
7 | #
8 | # Ansible is free software: you can redistribute it and/or modify
9 | # it under the terms of the GNU General Public License as published by
10 | # the Free Software Foundation, either version 3 of the License, or
11 | # (at your option) any later version.
12 | #
13 | # Ansible is distributed in the hope that it will be useful,
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 | # GNU General Public License for more details.
17 | #
18 | # You should have received a copy of the GNU General Public License
19 | # along with Ansible. If not, see
20 |
21 | ANSIBLE_METADATA = {'metadata_version': '1.0',
22 | 'status': ['stableinterface'],
23 | 'supported_by': 'community'}
24 |
25 |
26 | # This is a modification of @bcoca's `svc` module
27 |
28 | DOCUMENTATION = '''
29 | ---
30 | module: runit
31 | author: "James Sumners (@jsumners)"
32 | version_added: "2.3"
33 | short_description: Manage runit services.
34 | description:
35 | - Controls s6 services on remote hosts using the sv utility.
36 | options:
37 | name:
38 | required: true
39 | description:
40 | - Name of the service to manage.
41 | state:
42 | required: false
43 | choices: [ started, stopped, restarted, killed, reloaded, once ]
44 | description:
45 | - C(started)/C(stopped) are idempotent actions that will not run
46 | commands unless necessary. C(restarted) will always bounce the
47 | service (sv restart) and C(killed) will always bounce the service (sv force-stop).
48 | C(reloaded) will send a HUP (sv reload).
49 | C(once) will run a normally downed sv once (sv once), not really
50 | an idempotent operation.
51 | enabled:
52 | required: false
53 | choices: [ "yes", "no" ]
54 | description:
55 | - Wheater the service is enabled or not, if disabled it also implies stopped.
56 | service_dir:
57 | required: false
58 | default: /var/service
59 | description:
60 | - directory runsv watches for services
61 | service_src:
62 | required: false
63 | default: /etc/sv
64 | description:
65 | - directory where services are defined, the source of symlinks to service_dir.
66 | '''
67 |
68 | EXAMPLES = '''
69 | # Example action to start sv dnscache, if not running
70 | - sv:
71 | name: dnscache
72 | state: started
73 |
74 | # Example action to stop sv dnscache, if running
75 | - sv:
76 | name: dnscache
77 | state: stopped
78 |
79 | # Example action to kill sv dnscache, in all cases
80 | - sv:
81 | name: dnscache
82 | state: killed
83 |
84 | # Example action to restart sv dnscache, in all cases
85 | - sv:
86 | name: dnscache
87 | state: restarted
88 |
89 | # Example action to reload sv dnscache, in all cases
90 | - sv:
91 | name: dnscache
92 | state: reloaded
93 |
94 | # Example using alt sv directory location
95 | - sv:
96 | name: dnscache
97 | state: reloaded
98 | service_dir: /run/service
99 | '''
100 |
101 | import platform
102 | import shlex
103 | from ansible.module_utils.pycompat24 import get_exception
104 | from ansible.module_utils.basic import *
105 |
106 | def _load_dist_subclass(cls, *args, **kwargs):
107 | '''
108 | Used for derivative implementations
109 | '''
110 | subclass = None
111 |
112 | distro = kwargs['module'].params['distro']
113 |
114 | # get the most specific superclass for this platform
115 | if distro is not None:
116 | for sc in cls.__subclasses__():
117 | if sc.distro is not None and sc.distro == distro:
118 | subclass = sc
119 | if subclass is None:
120 | subclass = cls
121 |
122 | return super(cls, subclass).__new__(subclass)
123 |
124 | class Sv(object):
125 | """
126 | Main class that handles daemontools, can be subclassed and overridden in case
127 | we want to use a 'derivative' like encore, s6, etc
128 | """
129 |
130 |
131 | #def __new__(cls, *args, **kwargs):
132 | # return _load_dist_subclass(cls, args, kwargs)
133 |
134 | def __init__(self, module):
135 | self.extra_paths = []
136 | self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
137 |
138 | self.module = module
139 |
140 | self.name = module.params['name']
141 | self.service_dir = module.params['service_dir']
142 | self.service_src = module.params['service_src']
143 | self.enabled = None
144 | self.full_state = None
145 | self.state = None
146 | self.pid = None
147 | self.duration = None
148 | self.wants_down = False
149 |
150 | self.svc_cmd = module.get_bin_path('s6-svc', opt_dirs=self.extra_paths)
151 | self.svstat_cmd = module.get_bin_path('s6-svstat', opt_dirs=self.extra_paths)
152 | self.svc_full = '/'.join([ self.service_dir, self.name ])
153 | self.src_full = '/'.join([ self.service_src, self.name ])
154 |
155 | self.enabled = os.path.lexists(self.svc_full)
156 | if self.enabled:
157 | self.get_status()
158 | else:
159 | self.state = 'stopped'
160 |
161 |
162 | def enable(self):
163 | if os.path.exists(self.src_full):
164 | try:
165 | os.symlink(self.src_full, self.svc_full)
166 | except OSError:
167 | e = get_exception()
168 | self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
169 | else:
170 | self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
171 |
172 | def disable(self):
173 | self.execute_command([self.svc_cmd, 'force-stop',self.src_full])
174 | try:
175 | os.unlink(self.svc_full)
176 | except OSError:
177 | e = get_exception()
178 | self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
179 |
180 | def check_return(self, action, (rc, out, err)):
181 | if rc != 0:
182 | self.module.fail_json(msg="s6 '{}' failed.".format(action), error=err)
183 | return (rc, out, err)
184 |
185 |
186 | def get_status(self):
187 |
188 | (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
189 |
190 | if err is not None and err:
191 | self.full_state = self.state = err
192 | else:
193 | self.full_state = out
194 |
195 | m = re.search('\(pid (\d+)\)', out)
196 | if m:
197 | self.pid = m.group(1)
198 |
199 | m = re.search(' (\d+)s', out)
200 | if m:
201 | self.duration = m.group(1)
202 |
203 | if re.search('want down', out):
204 | self.state = True
205 |
206 | if re.search('^up', out):
207 | self.state = 'started'
208 | elif re.search('^down', out):
209 | self.state = 'stopped'
210 | else:
211 | self.state = 'unknown'
212 | return
213 |
214 | def started(self):
215 | return self.check_return("started", self.start())
216 |
217 | def start(self):
218 | return self.execute_command([self.svc_cmd, '-u', self.svc_full])
219 |
220 | def stopped(self):
221 | return self.check_return("stopped", self.stop())
222 |
223 | def stop(self):
224 | return self.execute_command([self.svc_cmd, '-d', self.svc_full])
225 |
226 | def once(self):
227 | return self.check_return("started once", self.execute_command([self.svc_cmd, '-O', self.svc_full]))
228 |
229 | # def reloaded(self):
230 | # return self.reload()
231 |
232 | # def reload(self):
233 | # return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
234 |
235 | def restarted(self):
236 | if self.state == "started":
237 | self.killed()
238 | elif self.state == 'unknown':
239 | self.module.fail_json(msg="Service is in unknown state. duno what to do")
240 | # Lets start (dep)
241 | if self.wants_down:
242 | return self.once()
243 | else:
244 | return self.start()
245 |
246 | def restart(self):
247 | return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
248 |
249 | def killed(self):
250 | return self.check_return("killed", self.kill())
251 |
252 | def kill(self):
253 | return self.execute_command([self.svc_cmd, '-k', self.svc_full])
254 |
255 | def execute_command(self, cmd):
256 | try:
257 | (rc, out, err) = self.module.run_command(' '.join(cmd))
258 | except Exception:
259 | e = get_exception()
260 | self.module.fail_json(msg="failed to execute: %s" % str(e))
261 | return (rc, out, err)
262 |
263 | def report(self):
264 | self.get_status()
265 | states = {}
266 | for k in self.report_vars:
267 | states[k] = self.__dict__[k]
268 | return states
269 |
270 | # ===========================================
271 | # Main control flow
272 |
273 | def main():
274 | module = AnsibleModule(
275 | argument_spec = dict(
276 | name = dict(required=True),
277 | state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'once']),
278 | enabled = dict(required=False, type='bool'),
279 | dist = dict(required=False, default='runit'),
280 | service_dir = dict(required=False, default='/var/service'),
281 | service_src = dict(required=False, default='/etc/sv'),
282 | ),
283 | supports_check_mode=True,
284 | )
285 |
286 | module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
287 |
288 | state = module.params['state']
289 | enabled = module.params['enabled']
290 |
291 | sv = Sv(module)
292 | changed = False
293 | orig_state = sv.report()
294 |
295 | if enabled is not None and enabled != sv.enabled:
296 | changed = True
297 | if not module.check_mode:
298 | try:
299 | if enabled:
300 | sv.enable()
301 | else:
302 | sv.disable()
303 | except (OSError, IOError):
304 | e = get_exception()
305 | module.fail_json(msg="Could not change service link: %s" % str(e))
306 |
307 | if state is not None and state != sv.state:
308 | changed = True
309 | if not module.check_mode:
310 | getattr(sv,state)()
311 |
312 | module.exit_json(changed=changed, sv=sv.report())
313 |
314 |
315 |
316 |
317 | if __name__ == '__main__':
318 | main()
319 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | --------
2 | # Ansible Consul Role
3 | [](https://travis-ci.org/hellofresh/ansible-consul)
4 |
5 | `consul` is an ansible role to install and manage consul services and client side load balancing.
6 |
7 |
8 | ## Overview
9 |
10 | This is an opinionated setup of consul. That manages services discovery and client side load balancing. It deploys HAproxy with each consumer and use consul template.
11 |
12 | ```
13 | +--------------+
14 | | Consul UI |
15 | | Consul Server|
16 | | |
17 | +--------------+
18 | (Consul Server)
19 |
20 | +----------------+ +--------------+
21 | |HAproxy | | Consul(Agent)|
22 | |Consul(Agent) | | App(Producer)|
23 | |consul(template)| | |
24 | |App(Consumer) | | |
25 | +----------------+ +--------------+
26 | (Consumer) (Producer)
27 | ```
28 |
29 | Using with AMI
30 | --------------
31 |
32 | When the role is used to provision AMI image, ensure the following variables are set to these specific values
33 |
34 | ```yaml
35 | consul_start_service: false # Required: Prevent consul from starting at provision time
36 | consul_node_name: auto # Required: read node name from cloud meta-data
37 | consul_network_bind: auto # Required: read private IP from cloud meta-data
38 | consul_network_autobind: false # Required: disable provision time IP address discovery
39 | consul_node_name_prefix: "service-" # Optional: node name prefix
40 | ```
41 |
42 | Requirements
43 | ------------
44 |
45 | - Developed for Ansible 2.X
46 | - Controller should have python-netaddr
47 |
48 | Role Variables
49 | --------------
50 |
51 | ```yaml
52 | ---
53 |
54 | consul_node_name : "{{ inventory_hostname }}"
55 |
56 | # Type of installation
57 | consul_server : false
58 | consul_consumer : false
59 | consul_producer : false
60 | consul_ui : false
61 | #
62 | consul_consumer_services : []
63 | consul_producer_services : []
64 |
65 | # Consul Domain
66 | consul_datacenter : "default"
67 | consul_domain : "consul."
68 |
69 | #consul_start_join : [ "127.0.0.1" ]
70 | consul_rejoin_after_leave : true
71 | consul_leave_on_terminate : false
72 |
73 | #Consul log
74 | consul_log_level : "INFO"
75 | consul_log_syslog : false
76 |
77 | # Consul Network :
78 | consul_network_bind : "" # "0.0.0.0"
79 | consul_network_autobind : true
80 | #consul_network_autobind_range : "192.168.56.0/24"
81 | consul_network_autobind_type : "private" # "" or private or public
82 | consul_client_addr : "127.0.0.1"
83 |
84 | # Consul dir structure
85 | consul_home_dir : "/opt/consul"
86 | consul_bin_dir : "{{ consul_home_dir }}/bin"
87 | consul_tmp_dir : "{{ consul_home_dir }}/tmp"
88 | consul_data_dir : "{{ consul_home_dir }}/data"
89 | consul_template_dir : "{{ consul_home_dir }}/templates"
90 | consul_log_dir : "/var/log/consul"
91 | consul_config_dir : "/etc/consul.d"
92 | # Consul files
93 | consul_config_agent_file : "/etc/consul.conf"
94 | consul_config_template_file : "/etc/consul-template.conf"
95 | consul_agent_log_file : "{{ consul_log_dir }}/consul-agent.log"
96 | consul_template_log_file : "{{ consul_log_dir }}/consul-template.log"
97 | consul_template_haproxy_file : "{{ consul_template_dir }}/consul_template.cnf"
98 |
99 | # Consul user/Group
100 | consul_user : "consul"
101 | consul_group : "consul"
102 |
103 | # Consul template
104 | consul_template_consul_server : "127.0.0.1"
105 | consul_template_consul_port : "8500"
106 | consul_template_templates :
107 | - source : "{{ consul_template_haproxy_file }}"
108 | destination : "/etc/haproxy/haproxy.cfg"
109 | command : "haproxy -f /etc/haproxy/haproxy.cfg -c && sudo service haproxy reload"
110 | jtemplate : "haproxy.ctmp.j2"
111 |
112 | ## Telemetry
113 | # Enable telemetry
114 | consul_telemetry : False
115 | # StatsD server address and port (address:port)
116 | consul_statsd_address : ""
117 | ---
118 |
119 | consul_agent_version : "0.7.5"
120 | consul_template_version : "0.18.1"
121 |
122 | consul_node_name : "{{ inventory_hostname }}"
123 |
124 | # Type of installation
125 | consul_server : false
126 | consul_consumer : false
127 | consul_producer : false
128 | consul_ui : false
129 |
130 | #
131 | consul_consumer_services : []
132 | consul_producer_services : []
133 |
134 | # Consul Domain
135 | consul_datacenter : "default"
136 | consul_domain : "consul."
137 |
138 | #consul_start_join : [ "127.0.0.1" ]
139 | consul_servers_list : [ "127.0.0.1" ] # you can use ansible groups "{{ groups['consul_servers'] }}"
140 | consul_rejoin_after_leave : true
141 | consul_leave_on_terminate : false
142 | # Retry Options
143 | consul_retry_join : false
144 | consul_retry_interval : 30s
145 | consul_retry_max : 0
146 | # Consul log
147 | consul_log_level : "INFO"
148 | consul_log_syslog : false
149 |
150 | consul_server_port_server : 8300
151 | consul_http_port : 8500
152 | consul_https_port : -1
153 |
154 | # Consul Network :
155 | consul_network_bind : "" # "0.0.0.0"
156 | consul_network_autobind : true
157 | #consul_network_autobind_range : "192.168.56.0/24"
158 | consul_network_autobind_type : "private" # "" or private or public
159 | consul_client_addr : "127.0.0.1"
160 |
161 | # Consul dir structure
162 | consul_home_dir : "/opt/consul"
163 | consul_bin_dir : "{{ consul_home_dir }}/bin"
164 | consul_tmp_dir : "{{ consul_home_dir }}/tmp"
165 | consul_data_dir : "{{ consul_home_dir }}/data"
166 | consul_template_dir : "{{ consul_home_dir }}/templates"
167 | consul_log_dir : "/var/log/consul"
168 | consul_config_dir : "/etc/consul.d"
169 | # if you leave emtpy only "healthy" and passing services will be returned. You can also use "passing,warning" or "all"
170 | # For more info check https://github.com/hashicorp/consul-template#service
171 | consul_template_service_options : ""
172 |
173 | # Consul files
174 | consul_config_agent_file : "/etc/consul.conf"
175 | consul_config_template_file : "/etc/consul-template.conf"
176 | consul_agent_log_file : "{{ consul_log_dir }}/consul-agent.log"
177 | consul_template_log_file : "{{ consul_log_dir }}/consul-template.log"
178 | consul_template_haproxy_file : "{{ consul_template_dir }}/consul_template.cnf"
179 |
180 | # Consul user/Group
181 | consul_user : "consul"
182 | consul_group : "consul"
183 |
184 | # Consul template
185 | consul_template_consul_server : "127.0.0.1"
186 | consul_template_consul_port : "8500"
187 | consul_template_templates :
188 | - source : "{{ consul_template_haproxy_file }}"
189 | destination : "/etc/haproxy/haproxy.cfg"
190 | command : "{{ consul_bin_dir }}/haproxy-reload"
191 | jtemplate : "haproxy.ctmp.j2"
192 |
193 | consul_template_log_level : "warn"
194 |
195 | consul_encrypt : "Tq/xU3fTPyBRoA4R4CxOKg=="
196 |
197 | ## Telemetry
198 | consul_telemetry : False
199 | consul_statsd_address : ""
200 | consul_statsite_address : ""
201 | consul_statsite_prefix : "consul"
202 | consul_disable_hostname : True
203 |
204 | ## HA Proxy
205 | consul_haproxy_ppa_install : False # By default use packaged version of Haproxy
206 | consul_haproxy_ppa_url : "ppa:vbernat/haproxy-1.6"
207 | ## Config global
208 | consul_haproxy_user : "haproxy"
209 | consul_haproxy_group : "haproxy"
210 | consul_haproxy_maxconn : "8192"
211 | consul_haproxy_log : [ "/dev/log local0", "/dev/log local1 info" ]
212 |
213 | consul_haproxy_stats_socket : "socket /var/lib/haproxy/stats.sock group {{ consul_group }} mode 660 level admin"
214 | ## Extra global key, value
215 | consul_haproxy_extra_global :
216 | chroot: "/var/lib/haproxy"
217 | ## Config defaults
218 | consul_haproxy_default_log : "global"
219 | consul_haproxy_default_options :
220 | - "dontlognull"
221 | - "log-separate-errors"
222 | - "redispatch"
223 | consul_haproxy_default_timeout :
224 | - "connect 5s"
225 | - "check 5s"
226 | - "client 120s"
227 | - "server 120s"
228 | consul_haproxy_default_maxconn : 2000
229 | consul_haproxy_default_retries : 3
230 | consul_haproxy_default_balance : "roundrobin"
231 | ## Extra default key, value
232 | #consul_haproxy_extra_default :
233 |
234 | consul_haproxy_default_server_options : "check inter 10s fastinter 5s downinter 8s rise 3 fall 2"
235 | ## Config Stats page by default HAproxy will have default stats
236 | consul_haproxy_stats_enable : True
237 | consul_haproxy_stats_mode : "http"
238 | consul_haproxy_stats_port : 3212
239 | consul_haproxy_stats_uri : "/"
240 |
241 | # Ad-hoc commands RUN WITH CARE
242 | consul_adhoc_build_raft_peers : False
243 | ```
244 |
245 |
246 | ## Usage
247 |
248 | Service definition
249 | ----
250 | The role expects all services to be listed in `consul_services` dictionary:
251 |
252 | ```yaml
253 | consul_services:
254 | hello-app:
255 | name: "hello-app"
256 | tags:
257 | - "env:live"
258 | port: 80
259 | local_port: 8032
260 | check:
261 | script: "curl localhost:80 > /dev/null 2>&1"
262 | interval: "10s"
263 | haproxy:
264 | server_options: "check inter 10s fastinter 5s downinter 8s rise 3 fall 2"
265 |
266 | hello-db:
267 | name: "hello-db"
268 | tags:
269 | - "env:live"
270 | port: 3306
271 | check:
272 | script: "netstat -ant | grep 3306 | grep -v grep > /dev/null 2>&1"
273 | interval: "10s"
274 | haproxy:
275 | server_options: "check inter 10s fastinter 5s downinter 8s rise 3 fall 2"
276 | ```
277 |
278 | Service example
279 | ---------------
280 | ```yaml
281 | hello-app:
282 | name: "hello-app"
283 | tags:
284 | - "env:live"
285 | port: 80
286 | local_port: 8032
287 | check:
288 | script: "curl localhost:80 > /dev/null 2>&1"
289 | interval: "10s"
290 | haproxy:
291 | server_options: "check inter 10s fastinter 5s downinter 8s rise 3 fall 2"
292 | ```
293 | `hello-app`:
294 |
295 | `name`: service name to announce
296 |
297 | `tags`: list of tags to filter by (see tags section)
298 |
299 | `port`: port number server part listens on
300 |
301 | `local_port`: port number CSLB agent (haproxy) will listen on, if absent equals `port`
302 |
303 | `check`: healthcheck script and interval; most of the times as simple as in this example
304 |
305 | `haproxy`: haproxy server check definition (https://cbonte.github.io/haproxy-dconv/1.7/configuration.html#5.2-check)
306 |
307 |
308 | ### Producer configuration
309 | Define list of services you produce (offer to connect to)
310 | ```yaml
311 | consul_producer: True
312 | consul_producer_services:
313 | - hello-app
314 | - other-service
315 | ```
316 | Role will install consul agent and configure it to announce specified services.
317 |
318 | ### Consumer configuration
319 | Define list of services you consume (want to connect to)
320 | ```yaml
321 | consul_consumer: True
322 | consul_consumer_services:
323 | - hello-app
324 | - hello-db
325 | ```
326 | Role will configure consul agent, consul template and haproxy. Haproxy will listen on specified ports, so you would be able to connect to specific service using `127.0.0.1:port`.
327 |
328 | ### Extended syntax
329 | If you want to specify additional parameters you should use extended syntax:
330 | ```yaml
331 | ---
332 | consul_producer: True
333 | consul_producer_services:
334 | # simple syntax
335 | - hello-app
336 | # extended syntax
337 | - name: hello-app
338 | add_tags: ['host-specific-tag']
339 | ```
340 |
341 |
342 | ```yaml
343 | ---
344 | consul_consumer: True
345 | consul_consumer_services:
346 | # simple syntax
347 | - hello-app
348 | # extended syntax
349 | - name: hello-app
350 | tags_contains: "test"
351 | ```
352 |
353 |
354 | ### Using tags
355 |
356 | #### Producer
357 | You can specify additional tags for group/host. These tags will be added to a set of tags globally defined to this service.
358 |
359 | ```yaml
360 | consul_producer: True
361 | consul_producer_services:
362 | - name: hello-app
363 | add_tags: ['host-specific-tag']
364 | ```
365 |
366 | #### Consumer
367 | On consumer side, you can user additional parameters to filter services/nodes by tags.
368 |
369 | ```yaml
370 | consul_consumer: True
371 | consul_consumer_services:
372 | - name: hello-app
373 | tags_contains: "test"
374 | tag_regex: "v1.1.*"
375 | ```
376 |
377 |
378 | Road map
379 | -----
380 | - Support agent retry
381 | - Support template retry
382 |
383 | License
384 | -------
385 |
386 | MIT
387 |
388 |
389 | Contributors (sorted alphabetically on the first name)
390 | ------------------
391 | * [Adham Helal](https://github.com/ahelal)
392 |
393 |
394 | Snippets
395 | -------
396 | Some snippets of code was taken from various sources. We will try our best to list them.
397 |
398 | --------
399 |
400 |
401 |
402 |
403 |
404 | HelloFresh - More Than Food.
405 |
406 |
--------------------------------------------------------------------------------