├── docs ├── images │ └── rancher-example-01.png └── rancher-example.md ├── docker-compose.yml ├── docker-keepalived ├── Dockerfile ├── .bashrc ├── keepalived.conf └── keepalived.sh ├── LICENSE └── README.md /docs/images/rancher-example-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeoAssist/docker-keepalived/HEAD/docs/images/rancher-example-01.png -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # SERVICES 2 | keepalived: 3 | labels: 4 | io.rancher.scheduler.global: 'true' 5 | environment: 6 | - VIRTUAL_IP=172.17.8.150 7 | - VIRTUAL_MASK=24 8 | - CHECK_IP=any 9 | - CHECK_PORT=80 10 | - VRID=150 11 | - INTERFACE=eth0 12 | tty: true 13 | build: docker-keepalived 14 | stdin_open: true 15 | net: host 16 | cap_add: 17 | - NET_ADMIN 18 | - NET_BROADCAST 19 | -------------------------------------------------------------------------------- /docker-keepalived/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8 2 | LABEL maintainer "Neoassist and Steven Iveson " 3 | LABEL source "https://github.com/NeoAssist/docker-keepalived" 4 | LABEL branch "master" 5 | COPY Dockerfile /Dockerfile 6 | COPY .bashrc /root/.bashrc 7 | 8 | RUN apk --update -t add keepalived iproute2 grep bash tcpdump sed && \ 9 | rm -f /var/cache/apk/* /tmp/* && \ 10 | rm -f /sbin/halt /sbin/poweroff /sbin/reboot 11 | 12 | COPY keepalived.sh /usr/bin/keepalived.sh 13 | COPY keepalived.conf /etc/keepalived/keepalived.conf 14 | 15 | RUN chmod +x /usr/bin/keepalived.sh; chown root:root /usr/bin/keepalived.sh 16 | 17 | ENTRYPOINT ["/usr/bin/keepalived.sh"] 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 NeoAssist.com 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docker-keepalived/.bashrc: -------------------------------------------------------------------------------- 1 | # General Aliases 2 | alias apk='apk --progress' 3 | alias ll="ls -ltan" 4 | alias status="pidof keepalived | kill -s USR1; cat /tmp/keepalived.data" 5 | 6 | alias hosts='cat /etc/hosts' 7 | alias ..="cd .." 8 | alias ...="cd ../.." 9 | alias ....="cd ../../.." 10 | alias untar="tar xzvkf" 11 | alias mv="mv -nv" 12 | alias cp="cp -i" 13 | alias ip4="ip -4 addr" 14 | alias ip6="ip -6 addr" 15 | 16 | COL_YEL="\[\e[1;33m\]" 17 | COL_GRA="\[\e[0;37m\]" 18 | COL_WHI="\[\e[1;37m\]" 19 | COL_GRE="\[\e[1;32m\]" 20 | COL_RED="\[\e[1;31m\]" 21 | 22 | # Bash Prompt 23 | if test "$UID" -eq 0 ; then 24 | _COL_USER=$COL_RED 25 | _p=" #" 26 | else 27 | _COL_USER=$COL_GRE 28 | _p=">" 29 | fi 30 | COLORIZED_PROMPT="${_COL_USER}\u${COL_WHI}@${COL_YEL}\h${COL_WHI}:\w${_p} \[\e[m\]" 31 | case $TERM in 32 | *term | rxvt | screen ) 33 | PS1="${COLORIZED_PROMPT}\[\e]0;\u@\h:\w\007\]" ;; 34 | linux ) 35 | PS1="${COLORIZED_PROMPT}" ;; 36 | * ) 37 | PS1="\u@\h:\w${_p} " ;; 38 | esac 39 | -------------------------------------------------------------------------------- /docker-keepalived/keepalived.conf: -------------------------------------------------------------------------------- 1 | global_defs { 2 | #Hostname will be used by default 3 | #router_id your_name 4 | vrrp_version 2 5 | vrrp_garp_master_delay 1 6 | vrrp_garp_master_refresh 60 7 | #Uncomment the next line if you'd like to use unique multicast groups 8 | #vrrp_mcast_group4 224.0.0.{{VRID}} 9 | script_user root 10 | enable_script_security 11 | } 12 | 13 | vrrp_script chk_haproxy { 14 | script "{{CHECK_SCRIPT}}" 15 | timeout 1 16 | interval 1 # check every 1 second 17 | fall 2 # require 2 failures for KO 18 | rise 2 # require 2 successes for OK 19 | } 20 | 21 | vrrp_instance lb-vips { 22 | state BACKUP 23 | interface {{INTERFACE}} 24 | virtual_router_id {{VRID}} 25 | priority 100 26 | advert_int 1 27 | nopreempt #Prevent fail-back 28 | track_script { 29 | chk_haproxy 30 | } 31 | authentication { 32 | auth_type PASS 33 | auth_pass blahblah 34 | } 35 | virtual_ipaddress { 36 | {{VIRTUAL_IP}}/{{VIRTUAL_MASK}} dev {{INTERFACE}} 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /docker-keepalived/keepalived.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup check script 4 | if [[ -z ${CHECK_SCRIPT} ]]; then 5 | if [[ -z ${CHECK_IP} ]] || [[ ${CHECK_IP} = 'any' ]]; then 6 | CHECK_SCRIPT="iptables -t nat -nL CATTLE_PREROUTING | grep ':${CHECK_PORT}'" 7 | else 8 | CHECK_SCRIPT="iptables -nL | grep '${CHECK_IP}' && iptables -t nat -nL CATTLE_PREROUTING | grep ':${CHECK_PORT}'" 9 | fi 10 | fi 11 | 12 | # Substitute variables in config file. 13 | /bin/sed -i "s/{{VIRTUAL_IP}}/${VIRTUAL_IP}/g" /etc/keepalived/keepalived.conf 14 | /bin/sed -i "s/{{VIRTUAL_MASK}}/${VIRTUAL_MASK}/g" /etc/keepalived/keepalived.conf 15 | /bin/sed -i "s/{{CHECK_SCRIPT}}/${CHECK_SCRIPT}/g" /etc/keepalived/keepalived.conf 16 | /bin/sed -i "s/{{VRID}}/${VRID}/g" /etc/keepalived/keepalived.conf 17 | /bin/sed -i "s/{{INTERFACE}}/${INTERFACE}/g" /etc/keepalived/keepalived.conf 18 | 19 | # Make sure we react to these signals by running stop() when we see them - for clean shutdown 20 | # And then exiting 21 | trap "stop; exit 0;" SIGTERM SIGINT 22 | 23 | stop() 24 | { 25 | # We're here because we've seen SIGTERM, likely via a Docker stop command or similar 26 | # Let's shutdown cleanly 27 | echo "SIGTERM caught, terminating keepalived process..." 28 | # Record PIDs 29 | pid=$(pidof keepalived) 30 | # Kill them 31 | kill -TERM $pid > /dev/null 2>&1 32 | # Wait until processes have been killed. 33 | # Use 'wait $pid' instead if you dislike using sleep (the wait command has less OS support) 34 | sleep 1 35 | echo "Terminated." 36 | exit 0 37 | } 38 | 39 | # Make sure the variables we need to run are populated and (roughly) valid 40 | 41 | if ! [[ $VIRTUAL_IP =~ ^(([1-9]|[1-9][0-9]|1[0-9]{2}|2[0-2][0-3])\.)(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-5][0-5])\.){2}([1-9]|[1-9][0-9]|1[0-9]{2}|2[0-5][0-5])$ ]]; then 42 | echo "The VIRTUAL_IP environment variable is null or not a valid IP address, exiting..." 43 | exit 1 44 | fi 45 | 46 | if ! [[ $VIRTUAL_MASK =~ ^([0-9]|[1-2][0-9]|3[0-2])$ ]]; then 47 | echo "The VIRTUAL_MASK environment variable is null or not a valid subnet mask, exiting..." 48 | exit 1 49 | fi 50 | 51 | if ! [[ $VRID =~ ^([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-5][0-5])$ ]]; then 52 | echo "The VRID environment variable is null or not a number between 1 and 255, exiting..." 53 | exit 1 54 | fi 55 | 56 | # Possibly some interfaces are named and don't end in a number so beware of this one 57 | if ! [[ $INTERFACE =~ ^.*[0-9]$ ]]; then 58 | echo "The INTERFACE environment variable is null or doesn't end in a number, exiting..." 59 | exit 1 60 | fi 61 | 62 | # Make sure to clean up VIP before start (in case of ungraceful shutdown) 63 | if [[ $(ip addr | grep $INTERFACE | grep $VIRTUAL_IP) ]] 64 | then 65 | ip addr del $VIRTUAL_IP/$VIRTUAL_MASK dev $INTERFACE 66 | fi 67 | 68 | # This loop runs till until we've started up successfully 69 | while true; do 70 | 71 | # Check if Keepalived is running by recording it's PID (if it's not running $pid will be null): 72 | pid=$(pidof keepalived) 73 | 74 | # If $pid is null, do this to start or restart Keepalived: 75 | while [ -z "$pid" ]; do 76 | #Obviously optional: 77 | #echo "Starting Confd population of files..." 78 | #/usr/bin/confd -onetime 79 | echo "Displaying resulting /etc/keepalived/keepalived.conf contents..." 80 | cat /etc/keepalived/keepalived.conf 81 | echo "Starting Keepalived in the background..." 82 | /usr/sbin/keepalived --dont-fork --dump-conf --log-console --log-detail --vrrp & 83 | # Check if Keepalived is now running by recording it's PID (if it's not running $pid will be null): 84 | pid=$(pidof keepalived) 85 | 86 | # If $pid is null, startup failed; log the fact and sleep for 2s 87 | # We'll then automatically loop through and try again 88 | if [ -z "$pid" ]; then 89 | echo "Startup of Keepalived failed, sleeping for 2s, then retrying..." 90 | sleep 2 91 | fi 92 | 93 | done 94 | 95 | # Break this outer loop once we've started up successfully 96 | # Otherwise, we'll silently restart and Rancher won't know 97 | break 98 | 99 | done 100 | 101 | while true; do 102 | 103 | # Check if Keepalived is STILL running by recording it's PID (if it's not running $pid will be null): 104 | pid=$(pidof keepalived) 105 | # If it is not, lets kill our PID1 process (this script) by breaking out of this while loop: 106 | # This ensures Docker 'sees' the failure and handles it as necessary 107 | if [ -z "$pid" ]; then 108 | echo "Keepalived is no longer running, exiting so Docker can restart the container..." 109 | break 110 | fi 111 | 112 | # If it is, give the CPU a rest 113 | sleep 0.5 114 | 115 | done 116 | 117 | exit 1 118 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # docker-keepalived 2 | --- 3 | ## Purpose 4 | 5 | A Dockerized Keepalived designed for simple high availability (HA) in multi-host container deployments. [Keepalived](http://www.keepalived.org/) provides failover for one or more Virtual IP addresses (VIPs) so they are always available, even if a host fails. 6 | 7 | It has been designed specifically for use within [Rancher](http://rancher.com/) environments using HAProxy 'front ends', but should work with most types of multi-host container deployments that require HA and IP address failover for any kind of listening service (Apache, Nginx etc.). 8 | 9 | ## Services & Address Binding 10 | 11 | HAProxy (and most other listening services) won't bind to an address that doesn't exist within the host's network stack. As Keepalived will only host any particular VIP on a single host, the service(s) on the remaining ones will not be able to bind to the VIP address and will likely fail. Keepalived on those hosts will also fail as it is performing a health check on the service itself (by checking for a listener on the VIP address and the service port you specify). 12 | 13 | In order to avoid this issue, you can either; 14 | 15 | - Configure HAProxy (or whatever service you are using) without an address (so it binds to all of them) with, for example; 16 | - `bind :80` 17 | - `bind *:80` 18 | - `bind 0.0.0.0:80` 19 | 20 | - Enable binding to non-existent addresses by setting the `net.ipv4.ip_nonlocal_bind` kernel parameter to 1 21 | 22 | ### Enabling Non-local Binding - Most Distros 23 | 24 | On Debian, RHEL & most Linux variants simply add `net.ipv4.ip_nonlocal_bind=1` to the end of the **/etc/sysctl.conf** file and force a reload of the file with the `[sudo] sysctl -p` command 25 | 26 | ### Enabling Non-local Binding - RancherOS v0.5.0 and later 27 | 28 | Edit the **/var/lib/rancher/conf/cloud-config.d/user_config.yml** file and add this in an appropriate place: 29 | ``` 30 | rancher: 31 | sysctl: 32 | net.ipv4.ip_nonlocal_bind: 1 33 | ``` 34 | 35 | ### Enabling Non-local Binding - RancherOS v0.4.5 and earlier 36 | 37 | If your not using the default console, see the prior section for Most Distros. If you are read on. 38 | 39 | If you don't already have a **/opt/rancher/bin/start.sh** startup file, edit the **/var/lib/rancher/conf/cloud-config.d/user_config.yml** file and add this to it to create a suitable file which will run the `sysctl -p` command: 40 | ``` 41 | write_files: 42 | - encoding: b64 43 | content: IyEvYmluL3NoCnN5c2N0bCAtcApleGl0Cg== 44 | owner: root:root 45 | path: /opt/rancher/bin/start.sh 46 | permissions: '0744' 47 | ``` 48 | If you do already have this file, add the `sysctl -p` command to it. 49 | 50 | In either case, add this to the end of the **/var/lib/rancher/conf/cloud-config.d/user_config.yml** file to create a suitable **/etc/sysctl.conf** file: 51 | ``` 52 | write_files: 53 | - encoding: b64 54 | content: bmV0LmlwdjQuY29uZi5hbGwuYXJwX2FjY2VwdCA9IDEgCm5ldC5pcHY0LmlwX25vbmxvY2FsX2JpbmQgPSAxIApuZXQuaXB2NC5jb25mLmFsbC5wcm9tb3RlX3NlY29uZGFyaWVzID0gMQo= 55 | owner: root:root 56 | path: /etc/sysctl.conf 57 | permissions: '0644' 58 | ``` 59 | Reboot to have the files written and executed. 60 | 61 | ### Enabling Non-local Binding - CoreOS 62 | 63 | Use this command: `#/bin/sh -c "/usr/sbin/sysctl -w net.ipv4.ip_nonlocal_bind=1` or add this to a unit file with a oneshot execution. 64 | 65 | *Other distributions may have slightly different commands or syntax...google is your friend!* 66 | 67 | **This is still a work in progress, constantly being changed and probably not ready, even for any real testing...** 68 | 69 | ## Health Checks 70 | 71 | If you'd like the health check to only check for something listening on a specified port, rather than an address and port, only set the CHECK_PORT variable, not the CHECK_IP variable. 72 | 73 | If you do want to check the address and port combination, set the CHECK_IP variable to the same value as the VIRTUAL_IP variable. 74 | 75 | If you want to use your own custom script, set the CHECK_SCRIPT variable. 76 | 77 | If using a custom script, note that due to the way Rancher works, you cannot perform a port check for another Rancher service which has a port mapped/bound to/from the host. This won't show up in the output of the `ss` or `netstat` commands. To overcome this; 78 | - Switch to a different kind of monitor, such as a HTTP check. 79 | - Configure the other service to use host mode. 80 | 81 | If you plan on using some other kind of health check which relies on the ability to use DNS names to connect to another Rancher service, ensure you add the `io.rancher.container.dns` label to this service's compose definition and set it's value to `'true'`. 82 | 83 | ## Status Checking 84 | 85 | You can check the status of Keepalived by opening an interactive shell in the container and typing `status`. This is an alias for `pidof keepalived | kill -s USR1; cat /tmp/keepalived.data`. As you can surmise, sending the USR1 signal to the keepalived process causes it to write a status file to **/tmp/keepalived**. 86 | 87 | You can also confirm Keepalived is running on any particular host by confirming a process is listening on protocol number 112 with command `ss -lwn`. You can confirm that process is keepalived with `sudo ss -lwnp`. 88 | 89 | ## Using the Docker Run Command 90 | 91 | If you'd like to quickly test the built image at the CLI using the `docker run` command, something like this will work: 92 | 93 | ``` 94 | docker run -d --privileged --net host --name keepalived -e VIRTUAL_IP=10.11.12.99 -e CHECK_PORT=443 -e VIRTUAL_MASK=24 -e VRID=99 -e INTERFACE=eht0 docker-keepalived 95 | ``` 96 | 97 | ## Thanks & Inspiration 98 | 99 | This has come to be as a result of a discussion held on the Rancher Forums (https://forums.rancher.com/t/rancher-keepalived/1508/16). 100 | 101 | Most if not all credit is due to Steven Iveson (@sjiveson ) - including the explanation above to enable nonlocal_bind. His scripts are at the heart of this. Thanks also to @fabiorauber for bringing up the issue so we could improve the readme. 102 | -------------------------------------------------------------------------------- /docs/rancher-example.md: -------------------------------------------------------------------------------- 1 | # High-availability load balancer with Rancher 2 | 3 | This example demonstrate one way of setting up a high-availability web service 4 | using *Docker*, *Rancher* and *Keepalived*. The following diagram is a 5 | representation of what we are going to achieve: 6 | 7 | ![Overall diagram](images/rancher-example-01.png) 8 | 9 | ## Server/hosts infrastructure 10 | In this example, we are using 4 servers. In the concrete example this is based 11 | on, two are virtual (for running the load balancer containers) and two are 12 | physical (to run multiple web applications which are CPU and RAM intensive). 13 | While this is the concrete set up, you can have 4 virtual machines, or 4 14 | physical machines. It could possibly even run well with only 2 machines (virtual 15 | or physical). 16 | 17 | For the purpose of scheduling container execution through Rancher, each server 18 | is configured with a `role` label. The two virtual machine have the `role` 19 | label set to `load-balancer` and the two physical are set to `apps`. 20 | 21 | ## Web application service 22 | The web application service is your business payload - whatever it is. In my 23 | concret example, it is a Java based web application. The service is configured 24 | in Rancher, setting the appropriate environment variables for the application, 25 | etc. 26 | 27 | The key point on this service is the scheduling. In the Rancher's scheduling 28 | tab, we select *the host `must` have a `host label` of `role` = `app`*. We also 29 | tell Rancher to run 2 containers - however this could be *one container per host* 30 | as well. 31 | 32 | In my example, I could have more servers set up to run the applications and I 33 | don't want more than 2 container instances per service, regardless of how many 34 | servers I add to the pool. I run multiple services and each service will start 35 | 2 containers on any which 2 hosts that have the `role = app` label. To that 36 | effect, I also set a scheduling rule that states: *The host `should not` have a 37 | `service with the name` `(fill in the blank)`*. That way, I can distribute the 38 | load around. 39 | 40 | ## Load balancer service 41 | Next comes the load balancer service. This is done using the standard Rancher 42 | load balancer service. An instance is configured, mapping the requests to the 43 | web application service. 44 | 45 | The key point here is again the scheduling. Here we select *the host `must` have 46 | a `host label` of `role` = `load-balancer`* so that Rancher starts the containers 47 | for the load balancer on the right servers. I also tell Rancher to start a 48 | container instance on every hosts (which match the scheduling rule). 49 | 50 | With this done, Rancher starts two load balancer containers - one on each host. 51 | This is great because now we have two load balancers, but they are both different 52 | IP addresses. 53 | 54 | We could do a DNS round-robin, that would be ok but wouldn't give us a real 55 | failsafe scenario or even a zero-downtime upgrade. That's why we use *Keepalived*. 56 | 57 | ## Keepalived 58 | What we want to do now is make it that one of the two servers on which we run 59 | the load balancer, share a single *public* IP address. *Keepalived* makes it 60 | easy to set that up. Now we could run the daemon locally on each machine but 61 | let's do this as a Docker container. 62 | 63 | In Rancher, we create a *Keepalived* service, using the same scheduling rules 64 | as the load balancer. It would be great to be able to put a sidekick container 65 | attached to the load balancer service but that's not available. 66 | 67 | So to repeat, our scheduling rules are *the host `must` have 68 | a `host label` of `role` = `load-balancer`* and we tell rancher to start a 69 | container instance on every hosts (which match the scheduling rule). 70 | 71 | For *Keepalived* to work however, we must also set the *Networking* to *Host* 72 | instead of *Managed* and add the `NET_ADMIN` and `NET_BROADCAST` *Capabilities* 73 | under *Security/Host*. 74 | 75 | ### Keepalived container image 76 | You can use the `Dockerfile` provided in this project for this or build your 77 | own based on it. 78 | 79 | In my example, I used a static configuration file which is stored in a Git 80 | repository and is downloaded when the container first starts. This is very 81 | peculiar to my infrastructure so won't be detailed here, however, here is the 82 | configuration file that is used: 83 | 84 | ``` 85 | global_defs { 86 | router_id {{ROUTER_ID}} 87 | vrrp_version 2 88 | vrrp_garp_master_delay 1 89 | vrrp_garp_master_refresh 90 | notification_email { 91 | joe@example.com 92 | } 93 | notification_email_from keepalived@example.com 94 | smtp_server smtp.example.com 95 | smtp_connect_timeout 30 96 | } 97 | 98 | vrrp_script chk_haproxy { 99 | script "ss -ltn | grep 80" 100 | timeout 3 # timeout script if it takes more than 3s 101 | interval 2 # check every 2 seconds 102 | fall 2 # require 2 failures for KO 103 | rise 2 # require 2 successes for OK 104 | } 105 | 106 | vrrp_instance apps-vips { 107 | state BACKUP 108 | interface eth0 109 | virtual_router_id 1 110 | priority 100 111 | advert_int 1 112 | nopreempt 113 | dont_track_primary 114 | track_script { 115 | chk_haproxy 116 | } 117 | authentication { 118 | auth_type PASS 119 | auth_pass incommsys-vips 120 | } 121 | virtual_ipaddress { 122 | 10.10.10.2/24 dev eth0 123 | } 124 | smtp_alert 125 | } 126 | ``` 127 | 128 | ``{{ROUTER_ID}}`` is replaced at the container start-up with the container 129 | hostname: 130 | 131 | ``` 132 | ROUTER_ID=$(hostname) 133 | echo "Setting Router ID to: $ROUTER_ID" 134 | sed -i "s/{{ROUTER_ID}}/${ROUTER_ID}/" /etc/keepalived/keepalived.conf 135 | ``` 136 | 137 | Once the containers are started, they will contact each other, establish a 138 | *MASTER* and you will be able to access the application, through the load 139 | balancer, by browsing to the virtual IP address (`10.10.10.2` in the example 140 | configuration above). 141 | 142 | ## References 143 | 144 | - [Rancher + Keepalived](https://forums.rancher.com/t/rancher-keepalived/1508/23) 145 | - [VRRP On Linux Using Keepalived - The Basics](https://packetpushers.net/vrrp-linux-using-keepalived-2/) 146 | - [Fix for: Keepalived router enters fault state on link down](http://s.co.tt/2014/06/06/fix-for-keepalived-router-enters-fault-state-on-link-down/) 147 | - [Hihh-availability with HAProxy and keepalived on Ubuntu 12.04](https://deviantony.wordpress.com/2014/07/18/load-balancing-and-high-availability-with-haproxy-and-keepalived/) 148 | - [keepalived.conf.SYNOPSIS](https://github.com/acassen/keepalived/blob/master/doc/keepalived.conf.SYNOPSIS) 149 | - [Virtual Router Redundancy Protocol](https://en.wikipedia.org/wiki/Virtual_Router_Redundancy_Protocol) --------------------------------------------------------------------------------