├── jq ├── node.cnf ├── Dockerfile ├── report_status.sh ├── entrypoint.sh └── README.md /jq: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/severalnines/galera-docker-pxc56/HEAD/jq -------------------------------------------------------------------------------- /node.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | 3 | datadir=/var/lib/mysql 4 | 5 | default_storage_engine=InnoDB 6 | binlog_format=ROW 7 | 8 | innodb_flush_log_at_trx_commit = 0 9 | innodb_flush_method = O_DIRECT 10 | innodb_file_per_table = 1 11 | innodb_autoinc_lock_mode=2 12 | 13 | bind_address = 0.0.0.0 14 | 15 | wsrep_slave_threads=2 16 | wsrep_cluster_address=gcomm:// 17 | wsrep_provider=/usr/lib64/galera3/libgalera_smm.so 18 | wsrep_node_address=WSREP_NODE_ADDRESS 19 | 20 | wsrep_cluster_name=Theistareykjarbunga 21 | 22 | wsrep_sst_method=xtrabackup-v2 23 | wsrep_sst_auth="root:" 24 | 25 | 26 | !includedir /etc/my.cnf.d 27 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | MAINTAINER Severalnines 3 | 4 | RUN rpmkeys --import https://www.percona.com/downloads/RPM-GPG-KEY-percona 5 | 6 | ARG PXC_VERSION 7 | ENV PXC_VERSION ${PS_VERSION:-5.6.32} 8 | 9 | RUN yum install -y http://www.percona.com/downloads/percona-release/redhat/0.1-3/percona-release-0.1-3.noarch.rpm 10 | RUN yum install -y which Percona-XtraDB-Cluster-56-${PXC_VERSION} 11 | 12 | ADD node.cnf /etc/my.cnf 13 | VOLUME /var/lib/mysql 14 | 15 | COPY entrypoint.sh /entrypoint.sh 16 | COPY report_status.sh /report_status.sh 17 | COPY jq /usr/bin/jq 18 | RUN chmod a+x /usr/bin/jq 19 | 20 | ENTRYPOINT ["/entrypoint.sh"] 21 | 22 | EXPOSE 3306 4567 4568 23 | ONBUILD RUN yum update -y 24 | 25 | CMD [""] 26 | -------------------------------------------------------------------------------- /report_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Report Galera status to etcd periodically. 3 | # report_status.sh [mysql user] [mysql password] [cluster name] [interval] [comma separated etcd hosts] 4 | # Example: 5 | # report_status.sh root myS3cret galera_cluster 15 192.168.55.111:2379,192.168.55.112:2379,192.168.55.113:2379 6 | 7 | USER=$1 8 | PASSWORD=$2 9 | CLUSTER_NAME=$3 10 | TTL=$4 11 | ETCD_HOSTS=$5 12 | 13 | function check_etcd() 14 | { 15 | etcd_hosts=$(echo $ETCD_HOSTS | tr ',' ' ') 16 | flag=1 17 | 18 | # Loop to find a healthy etcd host 19 | for i in $etcd_hosts 20 | do 21 | curl -s http://$i/health > /dev/null || continue 22 | if curl -s http://$i/health | jq -e 'contains({ "health": "true"})' > /dev/null; then 23 | healthy_etcd=$i 24 | flag=0 25 | break 26 | fi 27 | done 28 | 29 | # Flag is 0 if there is a healthy etcd host 30 | [ $flag -ne 0 ] && echo "report>> Couldn't reach healthy etcd nodes." 31 | } 32 | 33 | function report_status() 34 | { 35 | var=$1 36 | 37 | if [ ! -z $var ]; then 38 | check_etcd 39 | 40 | URL="http://$healthy_etcd/v2/keys/galera/$CLUSTER_NAME" 41 | output=$(mysql --user=$USER --password=$PASSWORD -A -Bse "show status like '$var'" 2> /dev/null) 42 | key=$(echo $output | awk {'print $1'}) 43 | value=$(echo $output | awk {'print $2'}) 44 | ipaddr=$(hostname -I | awk {'print $1'}) 45 | 46 | if [ ! -z $value ]; then 47 | curl -s $URL/$ipaddr/$key -X PUT -d "value=$value&ttl=$TTL" > /dev/null 48 | fi 49 | fi 50 | 51 | } 52 | 53 | while true; 54 | do 55 | report_status wsrep_local_state_comment 56 | report_status wsrep_last_committed 57 | # report every ttl - 2 to ensure value does not expired 58 | sleep $(($TTL - 2)) 59 | done 60 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # if command starts with an option, prepend mysqld 5 | if [ "${1:0:1}" = '-' ]; then 6 | CMDARG="$@" 7 | fi 8 | 9 | [ -z "$TTL" ] && TTL=30 10 | 11 | if [ -z "$CLUSTER_NAME" ]; then 12 | echo >&2 'Error: You need to specify CLUSTER_NAME' 13 | exit 1 14 | fi 15 | # Get config 16 | DATADIR="$("mysqld" --verbose --help 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')" 17 | echo "Content of $DATADIR:" 18 | ls -al $DATADIR 19 | 20 | if [ ! -s "$DATADIR/grastate.dat" ]; then 21 | if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" -a -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then 22 | echo >&2 'error: database is uninitialized and password option is not specified ' 23 | echo >&2 ' You need to specify one of MYSQL_ROOT_PASSWORD, MYSQL_ALLOW_EMPTY_PASSWORD and MYSQL_RANDOM_ROOT_PASSWORD' 24 | exit 1 25 | fi 26 | mkdir -p "$DATADIR" 27 | chown -R mysql:mysql "$DATADIR" 28 | 29 | echo 'Running mysql_install_db' 30 | mysql_install_db --user=mysql --datadir="$DATADIR" --rpm --keep-my-cnf 31 | echo 'Finished mysql_install_db' 32 | 33 | mysqld --user=mysql --datadir="$DATADIR" --skip-networking & 34 | pid="$!" 35 | 36 | mysql=( mysql --protocol=socket -uroot ) 37 | 38 | for i in {30..0}; do 39 | if echo 'SELECT 1' | "${mysql[@]}" &> /dev/null; then 40 | break 41 | fi 42 | echo 'MySQL init process in progress...' 43 | sleep 1 44 | done 45 | if [ "$i" = 0 ]; then 46 | echo >&2 'MySQL init process failed.' 47 | exit 1 48 | fi 49 | 50 | # sed is for https://bugs.mysql.com/bug.php?id=20545 51 | mysql_tzinfo_to_sql /usr/share/zoneinfo | sed 's/Local time zone must be set--see zic manual page/FCTY/' | "${mysql[@]}" mysql 52 | if [ ! -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then 53 | MYSQL_ROOT_PASSWORD="$(pwmake 128)" 54 | echo "GENERATED ROOT PASSWORD: $MYSQL_ROOT_PASSWORD" 55 | fi 56 | "${mysql[@]}" <<-EOSQL 57 | -- What's done in this file shouldn't be replicated 58 | -- or products like mysql-fabric won't work 59 | SET @@SESSION.SQL_LOG_BIN=0; 60 | DELETE FROM mysql.user ; 61 | CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ; 62 | GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ; 63 | CREATE USER 'xtrabackup'@'localhost' IDENTIFIED BY '$XTRABACKUP_PASSWORD'; 64 | GRANT RELOAD,LOCK TABLES,REPLICATION CLIENT ON *.* TO 'xtrabackup'@'localhost'; 65 | GRANT REPLICATION CLIENT ON *.* TO monitor@'%' IDENTIFIED BY 'monitor'; 66 | DROP DATABASE IF EXISTS test ; 67 | FLUSH PRIVILEGES ; 68 | EOSQL 69 | if [ ! -z "$MYSQL_ROOT_PASSWORD" ]; then 70 | mysql+=( -p"${MYSQL_ROOT_PASSWORD}" ) 71 | fi 72 | 73 | if [ "$MYSQL_DATABASE" ]; then 74 | echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" | "${mysql[@]}" 75 | mysql+=( "$MYSQL_DATABASE" ) 76 | fi 77 | 78 | if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then 79 | echo "CREATE USER '"$MYSQL_USER"'@'%' IDENTIFIED BY '"$MYSQL_PASSWORD"' ;" | "${mysql[@]}" 80 | 81 | if [ "$MYSQL_DATABASE" ]; then 82 | echo "GRANT ALL ON \`"$MYSQL_DATABASE"\`.* TO '"$MYSQL_USER"'@'%' ;" | "${mysql[@]}" 83 | fi 84 | 85 | echo 'FLUSH PRIVILEGES ;' | "${mysql[@]}" 86 | fi 87 | 88 | if [ ! -z "$MYSQL_ONETIME_PASSWORD" ]; then 89 | "${mysql[@]}" <<-EOSQL 90 | ALTER USER 'root'@'%' PASSWORD EXPIRE; 91 | EOSQL 92 | fi 93 | if ! kill -s TERM "$pid" || ! wait "$pid"; then 94 | echo >&2 'MySQL init process failed.' 95 | exit 1 96 | fi 97 | 98 | echo 99 | echo 'MySQL init process done. Ready for start up.' 100 | echo 101 | fi 102 | chown -R mysql:mysql "$DATADIR" 103 | 104 | function join { local IFS="$1"; shift; echo "$*"; } 105 | 106 | if [ -z "$DISCOVERY_SERVICE" ]; then 107 | cluster_join=$CLUSTER_JOIN 108 | else 109 | echo 110 | echo '>> Registering in the discovery service' 111 | 112 | etcd_hosts=$(echo $DISCOVERY_SERVICE | tr ',' ' ') 113 | flag=1 114 | 115 | echo 116 | # Loop to find a healthy etcd host 117 | for i in $etcd_hosts 118 | do 119 | echo ">> Connecting to http://$i" 120 | curl -s http://$i/health || continue 121 | if curl -s http://$i/health | jq -e 'contains({ "health": "true"})'; then 122 | healthy_etcd=$i 123 | flag=0 124 | break 125 | else 126 | echo >&2 ">> Node $i is unhealty. Proceed to the next node." 127 | fi 128 | done 129 | 130 | # Flag is 0 if there is a healthy etcd host 131 | if [ $flag -ne 0 ]; then 132 | echo ">> Couldn't reach healthy etcd nodes." 133 | exit 1 134 | fi 135 | 136 | echo 137 | echo ">> Selected healthy etcd: $healthy_etcd" 138 | 139 | if [ ! -z "$healthy_etcd" ]; then 140 | URL="http://$healthy_etcd/v2/keys/galera/$CLUSTER_NAME" 141 | 142 | set +e 143 | # Read the list of registered IP addresses 144 | echo >&2 ">> Retrieving list of keys for $CLUSTER_NAME" 145 | sleep $[ ( $RANDOM % 5 ) + 1 ]s 146 | addr=$(curl -s $URL | jq -r '.node.nodes[]?.key' | awk -F'/' '{print $(NF)}') 147 | cluster_join=$(join , $addr) 148 | 149 | ipaddr=$(hostname -i | tr -d ' ') 150 | [ -z $ipaddr ] && ipaddr=$(hostname -I | awk {'print $1'}) 151 | 152 | echo 153 | if [ -z $cluster_join ]; then 154 | echo >&2 ">> Cluster address is empty. This is a the first node to come up." 155 | echo 156 | echo >&2 ">> Registering $ipaddr in http://$healthy_etcd" 157 | curl -s $URL/$ipaddr/ipaddress -X PUT -d "value=$ipaddr" 158 | else 159 | curl -s ${URL}?recursive=true\&sorted=true > /tmp/out 160 | running_nodes=$(cat /tmp/out | jq -r '.node.nodes[].nodes[]? | select(.key | contains ("wsrep_local_state_comment")) | select(.value == "Synced") | .key' | awk -F'/' '{print $(NF-1)}' | tr "\n" ' '| sed -e 's/[[:space:]]*$//') 161 | echo 162 | echo ">> Running nodes: [${running_nodes}]" 163 | 164 | if [ -z "$running_nodes" ]; then 165 | # if there is no Synced node, determine the sequence number. 166 | TMP=/tmp/wsrep-recover 167 | echo >&2 ">> There is no node in synced state." 168 | echo >&2 ">> It's unsafe to bootstrap unless the sequence number is the latest." 169 | echo >&2 ">> Determining the Galera last committed seqno.." 170 | echo 171 | mysqld_safe --wsrep-recover 2>&1 | tee $TMP 172 | seqno=$(cat $TMP | tr ' ' "\n" | grep -e '[a-z0-9]*-[a-z0-9]*:[0-9]' | head -1 | cut -d ":" -f 2) 173 | echo 174 | if [ ! -z $seqno ]; then 175 | echo ">> Reporting seqno:$seqno to ${healthy_etcd}." 176 | WAIT=$(($TTL * 2)) 177 | curl -s $URL/$ipaddr/seqno -X PUT -d "value=$seqno&ttl=$WAIT" 178 | else 179 | echo ">> Unable to determine Galera sequence number." 180 | exit 1 181 | fi 182 | rm $TMP 183 | 184 | echo 185 | echo ">> Sleeping for $TTL seconds to wait for other nodes to report." 186 | sleep $TTL 187 | 188 | echo 189 | echo >&2 ">> Retrieving list of seqno for $CLUSTER_NAME" 190 | bootstrap_flag=1 191 | 192 | # Retrieve seqno from etcd 193 | curl -s ${URL}?recursive=true\&sorted=true > /tmp/out 194 | cluster_seqno=$(cat /tmp/out | jq -r '.node.nodes[].nodes[]? | select(.key | contains ("seqno")) | .value' | tr "\n" ' '| sed -e 's/[[:space:]]*$//') 195 | 196 | for i in $cluster_seqno; do 197 | if [ $i -gt $seqno ]; then 198 | bootstrap_flag=0 199 | echo >&2 ">> Found another node holding a greater seqno ($i/$seqno)" 200 | fi 201 | done 202 | 203 | echo 204 | if [ $bootstrap_flag -eq 1 ]; then 205 | # Find the earliest node to report if there is no higher seqno 206 | node_to_bootstrap=$(cat /tmp/out | jq -c '.node.nodes[].nodes[]?' | grep seqno | tr ',:\"' ' ' | sort -k 11 | head -1 | awk -F'/' '{print $(NF-1)}') 207 | if [ "$node_to_bootstrap" == "$ipaddr" ]; then 208 | echo >&2 ">> This node is safe to bootstrap." 209 | cluster_join= 210 | else 211 | echo >&2 ">> Based on timestamp, $node_to_bootstrap is the chosen node to bootstrap." 212 | echo >&2 ">> Wait again for $TTL seconds to look for a bootstrapped node." 213 | sleep $TTL 214 | curl -s ${URL}?recursive=true\&sorted=true > /tmp/out 215 | 216 | # Look for a synced node again 217 | running_nodes2=$(cat /tmp/out | jq -r '.node.nodes[].nodes[]? | select(.key | contains ("wsrep_local_state_comment")) | select(.value == "Synced") | .key' | awk -F'/' '{print $(NF-1)}' | tr "\n" ' '| sed -e 's/[[:space:]]*$//') 218 | 219 | echo 220 | echo ">> Running nodes: [${running_nodes2}]" 221 | 222 | if [ ! -z "$running_nodes2" ]; then 223 | cluster_join=$(join , $running_nodes2) 224 | else 225 | echo 226 | echo >&2 ">> Unable to find a bootstrapped node to join." 227 | echo >&2 ">> Exiting." 228 | exit 1 229 | fi 230 | 231 | fi 232 | else 233 | echo >&2 ">> Refusing to start for now because there is a node holding higher seqno." 234 | echo >&2 ">> Wait again for $TTL seconds to look for a bootstrapped node." 235 | sleep $TTL 236 | 237 | # Look for a synced node again 238 | curl -s ${URL}?recursive=true\&sorted=true > /tmp/out 239 | running_nodes3=$(cat /tmp/out | jq -r '.node.nodes[].nodes[]? | select(.key | contains ("wsrep_local_state_comment")) | select(.value == "Synced") | .key' | awk -F'/' '{print $(NF-1)}' | tr "\n" ' '| sed -e 's/[[:space:]]*$//') 240 | 241 | echo 242 | echo ">> Running nodes: [${running_nodes3}]" 243 | 244 | if [ ! -z "$running_nodes2" ]; then 245 | cluster_join=$(join , $running_nodes3) 246 | else 247 | echo 248 | echo >&2 ">> Unable to find a bootstrapped node to join." 249 | echo >&2 ">> Exiting." 250 | exit 1 251 | fi 252 | fi 253 | else 254 | # if there is a Synced node, join the address 255 | cluster_join=$(join , $running_nodes) 256 | fi 257 | 258 | fi 259 | set -e 260 | 261 | echo 262 | echo >&2 ">> Cluster address is gcomm://$cluster_join" 263 | else 264 | echo 265 | echo >&2 '>> No healthy etcd host detected. Refused to start.' 266 | exit 1 267 | fi 268 | fi 269 | 270 | echo 271 | echo ">> Starting reporting script in the background" 272 | nohup /report_status.sh root $MYSQL_ROOT_PASSWORD $CLUSTER_NAME $TTL $DISCOVERY_SERVICE & 273 | 274 | # set IP address based on the primary interface 275 | sed -i "s|WSREP_NODE_ADDRESS|$ipaddr|g" /etc/my.cnf 276 | 277 | echo 278 | echo ">> Starting mysqld process" 279 | mysqld --user=mysql --wsrep_cluster_name=$CLUSTER_NAME --wsrep_cluster_address="gcomm://$cluster_join" --wsrep_sst_method=xtrabackup-v2 --wsrep_sst_auth="xtrabackup:$XTRABACKUP_PASSWORD" --log-error=${DATADIR}error.log $CMDARG 280 | 281 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Percona XtraDB Cluster 5.6 Docker Image # 2 | 3 | 4 | ## Table of Contents ## 5 | 6 | 1. [Overview](#overview) 7 | 2. [Requirement](#requirement) 8 | 3. [Image Description](#image-description) 9 | 4. [Run Container](#run-container) 10 | 5. [Build Image](#build-image) 11 | 6. [Discovery Service](#discovery-service) 12 | 7. [Known Limitations](#known-limitations) 13 | 8. [Development](#development) 14 | 15 | 16 | ## Overview ## 17 | 18 | Derived from [perconalab/percona-xtradb-cluster](https://github.com/Percona-Lab/percona-xtradb-cluster-docker), the image supports running Percona XtraDB Cluster 5.6 with Docker orchestration tool like Docker Engine Swarm Mode and Kubernetes and requires etcd to operate correctly. It can also run on a standalone environment. 19 | 20 | Example deployment at Severalnines' [blog post](http://severalnines.com/blog/mysql-docker-deploy-homogeneous-galera-cluster-etcd). 21 | 22 | 23 | ## Requirement ## 24 | 25 | A healthy etcd cluster. Please refer to Severalnines' [blog post](http://severalnines.com/blog/mysql-docker-deploy-homogeneous-galera-cluster-etcd) on how to setup this. 26 | 27 | 28 | ## Image Description ## 29 | 30 | To pull the image, simply: 31 | 32 | ```bash 33 | $ docker pull severalnines/pxc56 34 | ``` 35 | 36 | The image consists of Percona XtraDB Cluster 5.6 and all of its components: 37 | * MySQL client package. 38 | * Percona Xtrabackup. 39 | * jq - Lightweight and flexible command-line JSON processor. 40 | * report_status.sh - report Galera status to etcd every `TTL`. 41 | 42 | 43 | ## Run Container ## 44 | 45 | The Docker image accepts the following parameters: 46 | 47 | * One of `MYSQL_ROOT_PASSWORD` must be defined. 48 | * The image will create the user `xtrabackup@localhost` for the XtraBackup SST method. If you want to use a password for the `xtrabackup` user, set `XTRABACKUP_PASSWORD`. 49 | * If you want to use the discovery service (right now only `etcd` is supported), set the address (ip:port format) to `DISCOVERY_SERVICE`. It can accept multiple addresses separated by a comma. The image will automatically find a running cluser by `CLUSTER_NAME` and join to the existing cluster (or start a new one). 50 | * If you want to start without the discovery service, use the `CLUSTER_JOIN` variable. Empty variables will start a new cluster. To join an existing cluster, set `CLUSTER_JOIN` to the list of IP addresses running cluster nodes. 51 | * `TTL` by default is 30 seconds. Container will report every `TTL - 2` seconds when it's alive (wsrep_cluster_state\_comment=Synced) via `report_status.sh`. If a container is down, it will no longer send an update to etcd thus the key (wsrep_cluster_state_comment) is removed after expiration. This simply indicates that the registered node is no longer synced with the cluster and it will be skipped when constructing the Galera communication address. 52 | 53 | Minimum of 3 containers is recommended for high availability. Running standalone is also possible with standard "docker run" command as shown further down. 54 | 55 | 56 | ### Docker Engine Swarm Mode ### 57 | 58 | 59 | #### Ephemeral Storage #### 60 | 61 | Assuming: 62 | 63 | * etcd cluster is running on 192.168.55.111:2379, 192.168.55.112:2379 and 192.168.55.113:2379. 64 | * Created an overlay network called ``galera-net``. 65 | 66 | Then, to run a three-node Percona XtraDB Cluster on Docker Swarm mode (with ephemeral storage): 67 | 68 | ```bash 69 | $ docker service create \ 70 | --name mysql-galera \ 71 | --replicas 3 \ 72 | -p 3306:3306 \ 73 | --network galera-net \ 74 | --env MYSQL_ROOT_PASSWORD=mypassword \ 75 | --env DISCOVERY_SERVICE=192.168.55.111:2379,192.168.55.112:2379,192.168.55.113:2379 \ 76 | --env XTRABACKUP_PASSWORD=mypassword \ 77 | --env CLUSTER_NAME=my_wsrep_cluster \ 78 | severalnines/pxc56 79 | ``` 80 | 81 | 82 | #### Persistent Storage #### 83 | 84 | Assuming: 85 | 86 | * etcd cluster is running on 192.168.55.111:2379, 192.168.55.112:2379 and 192.168.55.113:2379. 87 | * Created an overlay network called ``galera-net``. 88 | 89 | Then, to run a three-node Percona XtraDB Cluster on Docker Swarm mode (with persistent storage): 90 | 91 | ```bash 92 | $ docker service create \ 93 | --name mysql-galera \ 94 | --replicas 3 \ 95 | -p 3306:3306 \ 96 | --network galera-net \ 97 | --mount type=volume,source=galera-vol,destination=/var/lib/mysql \ 98 | --env MYSQL_ROOT_PASSWORD=mypassword \ 99 | --env DISCOVERY_SERVICE=192.168.55.111:2379,192.168.55.112:2379,192.168.55.113:2379 \ 100 | --env XTRABACKUP_PASSWORD=mypassword \ 101 | --env CLUSTER_NAME=my_wsrep_cluster \ 102 | severalnines/pxc56 103 | ``` 104 | 105 | 106 | #### Custom my.cnf #### 107 | 108 | Assuming: 109 | 110 | * Directory ``/mnt/docker/mysql-config`` is exist on all Docker host for data volume mapping. All custom `my.cnf` should be located under this directory. 111 | * etcd cluster is running on 192.168.55.111:2379, 192.168.55.112:2379 and 192.168.55.113:2379. 112 | * Created an overlay network called ``galera-net``. 113 | 114 | Then, to run a three-node Percona XtraDB Cluster on Docker Swarm mode: 115 | 116 | ```bash 117 | $ docker service create \ 118 | --name mysql-galera \ 119 | --replicas 3 \ 120 | -p 3306:3306 \ 121 | --network galera-net \ 122 | --mount type=volume,source=galera-vol,destination=/var/lib/mysql \ 123 | --mount type=bind,src=/mnt/docker/mysql-config,dst=/etc/my.cnf.d \ 124 | --env MYSQL_ROOT_PASSWORD=mypassword \ 125 | --env DISCOVERY_SERVICE=192.168.55.111:2379,192.168.55.112:2379,192.168.55.113:2379 \ 126 | --env XTRABACKUP_PASSWORD=mypassword \ 127 | --env CLUSTER_NAME=my_wsrep_cluster \ 128 | severalnines/pxc56 129 | ``` 130 | 131 | Verify with: 132 | 133 | ``` 134 | $ docker service ps mysql-galera 135 | ``` 136 | 137 | External applications/clients can connect to any Docker host IP address or hostname on port 3306, requests will be load balanced between the Galera containers. The connection gets NATed to a Virtual IP address for each service "task" (container, in this case) using the Linux kernel's built-in load balancing functionality, IPVS. If the application containers reside in the same overlay network (galera-net), then use the assigned virtual IP address instead. 138 | 139 | You can retrieve it using the inspect option: 140 | 141 | ```bash 142 | $ docker service inspect mysql-galera -f "{{ .Endpoint.VirtualIPs }}" 143 | ``` 144 | 145 | 146 | ### Kubernetes ### 147 | 148 | Coming soon. 149 | 150 | 151 | 152 | ### Without Orchestration Tool ### 153 | 154 | To run a standalone Galera node, the command would be: 155 | 156 | ```bash 157 | $ docker run -d \ 158 | -p 3306 \ 159 | --name=galera \ 160 | -e MYSQL_ROOT_PASSWORD=mypassword \ 161 | -e DISCOVERY_SERVICE=192.168.55.111:2379,192.168.55.112:2379,192.168.55.113:2379 \ 162 | -e CLUSTER_NAME=my_wsrep_cluster \ 163 | -e XTRABACKUP_PASSWORD=mypassword \ 164 | severalnines/pxc56 165 | ``` 166 | 167 | With some iterations, you can create a three-node Galera cluster, as shown in the following example: 168 | 169 | ```bash 170 | $ for i in 1 2 3; 171 | do \ 172 | docker run -d \ 173 | -p 3306 \ 174 | --name=galera${i} \ 175 | -e MYSQL_ROOT_PASSWORD=mypassword \ 176 | -e DISCOVERY_SERVICE=192.168.55.111:2379,192.168.55.112:2379,192.168.55.113:2379 \ 177 | -e CLUSTER_NAME=my_wsrep_cluster \ 178 | -e XTRABACKUP_PASSWORD=mypassword \ 179 | severalnines/pxc56; 180 | done 181 | ``` 182 | 183 | Verify with: 184 | 185 | ```bash 186 | $ docker ps 187 | ``` 188 | 189 | 190 | ## Build Image ## 191 | 192 | To build Docker image, download the Docker related files available at [our Github repository](https://github.com/severalnines/galera-docker-pxc56): 193 | 194 | ```bash 195 | $ git clone https://github.com/severalnines/galera-docker-pxc56 196 | $ cd galera-docker-pxc56 197 | $ docker build -t --rm=true severalnines/pxc56 . 198 | ``` 199 | 200 | Verify with: 201 | 202 | ```bash 203 | $ docker images 204 | ``` 205 | 206 | 207 | ## Discovery Service ## 208 | 209 | All nodes should report to etcd periodically with an expiring key. The default `TTL` value is 30 seconds. Container will report every `TTL - 2` seconds when it's alive (wsrep_cluster_state\_comment=Synced) via `report_status.sh`. If a container is down, it will no longer send an update to etcd thus the key (wsrep_cluster_state_comment) is removed after expiration. This simply indicates that the registered node is no longer synced with the cluster and it will be skipped when constructing the Galera communication address. 210 | 211 | To check the list of running nodes via etcd, run the following (assuming CLUSTER_NAME="my_wsrep_cluster"): 212 | 213 | ```javascript 214 | $ curl -s "http://192.168.55.111:2379/v2/keys/galera/my_wsrep_cluster?recursive=true" | python -m json.tool 215 | { 216 | "action": "get", 217 | "node": { 218 | "createdIndex": 10049, 219 | "dir": true, 220 | "key": "/galera/my_wsrep_cluster", 221 | "modifiedIndex": 10049, 222 | "nodes": [ 223 | { 224 | "createdIndex": 10067, 225 | "dir": true, 226 | "key": "/galera/my_wsrep_cluster/10.255.0.6", 227 | "modifiedIndex": 10067, 228 | "nodes": [ 229 | { 230 | "createdIndex": 10075, 231 | "expiration": "2016-11-29T10:55:35.37622336Z", 232 | "key": "/galera/my_wsrep_cluster/10.255.0.6/wsrep_last_committed", 233 | "modifiedIndex": 10075, 234 | "ttl": 10, 235 | "value": "0" 236 | }, 237 | { 238 | "createdIndex": 10073, 239 | "expiration": "2016-11-29T10:55:34.788170259Z", 240 | "key": "/galera/my_wsrep_cluster/10.255.0.6/wsrep_local_state_comment", 241 | "modifiedIndex": 10073, 242 | "ttl": 10, 243 | "value": "Synced" 244 | } 245 | ] 246 | }, 247 | { 248 | "createdIndex": 10049, 249 | "dir": true, 250 | "key": "/galera/my_wsrep_cluster/10.255.0.7", 251 | "modifiedIndex": 10049, 252 | "nodes": [ 253 | { 254 | "createdIndex": 10049, 255 | "key": "/galera/my_wsrep_cluster/10.255.0.7/ipaddress", 256 | "modifiedIndex": 10049, 257 | "value": "10.255.0.7" 258 | }, 259 | { 260 | "createdIndex": 10074, 261 | "expiration": "2016-11-29T10:55:35.218496083Z", 262 | "key": "/galera/my_wsrep_cluster/10.255.0.7/wsrep_last_committed", 263 | "modifiedIndex": 10074, 264 | "ttl": 10, 265 | "value": "0" 266 | }, 267 | { 268 | "createdIndex": 10072, 269 | "expiration": "2016-11-29T10:55:34.650574629Z", 270 | "key": "/galera/my_wsrep_cluster/10.255.0.7/wsrep_local_state_comment", 271 | "modifiedIndex": 10072, 272 | "ttl": 10, 273 | "value": "Synced" 274 | } 275 | ] 276 | }, 277 | { 278 | "createdIndex": 10070, 279 | "dir": true, 280 | "key": "/galera/my_wsrep_cluster/10.255.0.8", 281 | "modifiedIndex": 10070, 282 | "nodes": [ 283 | { 284 | "createdIndex": 10077, 285 | "expiration": "2016-11-29T10:55:39.681757381Z", 286 | "key": "/galera/my_wsrep_cluster/10.255.0.8/wsrep_last_committed", 287 | "modifiedIndex": 10077, 288 | "ttl": 15, 289 | "value": "0" 290 | }, 291 | { 292 | "createdIndex": 10076, 293 | "expiration": "2016-11-29T10:55:38.638268679Z", 294 | "key": "/galera/my_wsrep_cluster/10.255.0.8/wsrep_local_state_comment", 295 | "modifiedIndex": 10076, 296 | "ttl": 14, 297 | "value": "Synced" 298 | } 299 | ] 300 | } 301 | ] 302 | } 303 | } 304 | ``` 305 | 306 | 307 | ## Known Limitations ## 308 | 309 | * The image are tested and built using Docker version 1.12.3, build 6b644ec on CentOS 7.1. 310 | 311 | * There will be no automatic recovery if a split-brain happens (where all nodes are in Non-Primary state). This is because the MySQL service is still running, yet it will refuse to serve any data and will return error to the client. Docker has no capability to detect this since what it cares about is the foreground MySQL process which is not terminated, killed or stopped. Automating this process is risky, especially if the service discovery is co-located with the Docker host (etcd would also lose contact with other members). Although if the service discovery is healthy externally, it is probably unreachable from the Galera containers perspective, preventing each other to see the container’s status correctly during the glitch. In this case, you will need to intervene manually. Choose the most advanced node to bootstrap and then run the following command to promote the node as Primary (other nodes shall then rejoin automatically if the network recovers): 312 | 313 | ```bash 314 | $ docker exec -it [container] mysql -uroot -pyoursecret -e 'set global wsrep_provider_option="pc.bootstrap=1"' 315 | ``` 316 | 317 | * Also, there is no automatic cleanup for the discovery service registry. You can remove all entries using either the following command (assuming the CLUSTER_NAME is my_wsrep_cluster): 318 | 319 | ```bash 320 | $ curl http://192.168.55.111:2379/v2/keys/galera/my_wsrep_cluster?recursive=true -XDELETE 321 | ``` 322 | 323 | Or using etcdctl command: 324 | 325 | ```bash 326 | $ etcdctl rm /galera/my_wsrep_cluster --recursive 327 | ``` 328 | 329 | 330 | ## Development ## 331 | 332 | Please report bugs, improvements or suggestions by creating issue in [Github](https://github.com/severalnines/galera-docker-pxc56) or via our support channel: [https://support.severalnines.com](https://support.severalnines.com) --------------------------------------------------------------------------------