├── .gitignore ├── 1_install_core.sh ├── README.md ├── boot_local_docker_services.sh ├── cleanup_existing_cluster_containers.sh ├── consul └── restart_consul.sh ├── cst.sh ├── docker-compose.yml ├── docker └── start_docker_daemon.sh ├── ssh_master_1.sh ├── ssh_master_2.sh ├── ssh_master_3.sh ├── ssh_replica_1.sh ├── ssh_replica_2.sh ├── ssh_replica_3.sh ├── start_cluster.sh ├── stop_cluster.sh └── swarm ├── restart_single_swarm_manager.sh └── swarm_join_local_consul.sh /.gitignore: -------------------------------------------------------------------------------- 1 | nohup.out 2 | 3 | *.pyc 4 | *.pyo 5 | *.swp 6 | *.db 7 | 8 | */*.pyc 9 | */*.pyo 10 | */*.swp 11 | */*.db 12 | 13 | */*/*.pyc 14 | */*/*.pyo 15 | */*/*.swp 16 | */*/*.db 17 | 18 | */*/*/*.pyc 19 | */*/*/*.pyo 20 | */*/*/*.swp 21 | */*/*/*.db 22 | 23 | 24 | -------------------------------------------------------------------------------- /1_install_core.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # While Docker 1.9 is not supported in an official RPM it can be built 4 | # using the GitHub releases for trying out the upcoming features 5 | 6 | # Allow toggling components to install and update based off flags 7 | updateconsul=1 8 | updatedocker=1 9 | updatedockermachine=1 10 | updatedockercompose=1 11 | updatedockerswarm=1 12 | 13 | consulversion="0.5.2" 14 | dockerversion="1.9.0" 15 | dockermachineversion="v0.5.0" 16 | dockercomposeversion="1.5.1" 17 | dockerswarmversion="1.0.0" 18 | 19 | echo "" 20 | echo "Updating Consul($updateconsul)" 21 | echo "Updating Docker($updatedocker)" 22 | echo "Updating Docker Machine($updatedockermachine)" 23 | echo "Updating Docker Compose($updatedockercompose)" 24 | echo "Updating Docker Swarm($updatedockerswarm)" 25 | 26 | echo "" 27 | echo "Starting Install" 28 | echo "" 29 | 30 | if [ $updateconsul -eq 1 ]; then 31 | 32 | echo "" 33 | echo "Installing Consul version: $consulversion" 34 | echo "" 35 | pushd /tmp >> /dev/null 36 | rm -f /tmp/consul.zip 37 | rm -f /tmp/consul 38 | wget https://releases.hashicorp.com/consul/${consulversion}/consul_${consulversion}_linux_amd64.zip -O /tmp/consul.zip 39 | unzip consul.zip 40 | cp ./consul /usr/local/bin 41 | cp ./consul /usr/bin 42 | rm -f /tmp/consul.zip 43 | rm -f /tmp/consul 44 | popd >> /dev/null 45 | echo "" 46 | echo "Done Installing Consul version: $consulversion" 47 | echo "" 48 | fi 49 | 50 | # Docker 1.9 is not shipped in an rpm yet: 51 | if [ $updatedocker -eq 1 ]; then 52 | echo "" 53 | echo "Installing Docker version: $dockerversion" 54 | echo "" 55 | # To install, run the following command as root: 56 | curl -sSL -O https://get.docker.com/builds/Linux/x86_64/docker-${dockerversion} && chmod +x docker-${dockerversion} && mv docker-${dockerversion} /usr/local/bin/docker 57 | echo "" 58 | echo "Done Installing Docker version: $dockerversion" 59 | echo "" 60 | fi 61 | 62 | # Now install Docker-Machine: https://github.com/docker/machine/releases/ 63 | if [ $updatedockermachine -eq 1 ]; then 64 | echo "" 65 | echo "Installing Docker Machine version: $dockermachineversion" 66 | echo "" 67 | curl -L https://github.com/docker/machine/releases/download/${dockermachineversion}/docker-machine_linux-amd64.zip >machine.zip && \ 68 | unzip machine.zip && \ 69 | rm machine.zip && \ 70 | mv -f docker-machine* /usr/local/bin 71 | echo "" 72 | echo "Done Installing Docker Machine version: $updatedockermachine" 73 | echo "" 74 | fi 75 | 76 | # Now install Docker-Compose: https://github.com/docker/compose/releases/ 77 | if [ $updatedockercompose -eq 1 ]; then 78 | echo "" 79 | echo "Installing Docker Compose version: $dockercomposeversion" 80 | echo "" 81 | curl -L https://github.com/docker/compose/releases/download/${dockercomposeversion}/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose 82 | chmod +x /usr/local/bin/docker-compose 83 | echo "" 84 | echo "Done Installing Docker Compose version: $updatedockercompose" 85 | echo "" 86 | fi 87 | 88 | # Now install Docker-Swarm: https://github.com/docker/swarm/releases 89 | if [ $updatedockerswarm -eq 1 ]; then 90 | echo "" 91 | echo "Installing Docker Swarm version: $dockerswarmversion" 92 | echo "" 93 | pushd /opt >> /dev/null 94 | if [ ! -e $goworkspacepath ]; then 95 | mkdir $goworkspacepath >> /dev/null 96 | chmod 777 $goworkspacepath >> /dev/null 97 | fi 98 | export GOPATH=$goworkspacepath 99 | echo " - Using GOPATH($GOPATH) set this in ./common.sh if you want to override" 100 | go get github.com/docker/swarm 101 | chmod 777 $GOPATH/bin/swarm 102 | 103 | rm -f /usr/local/bin/swarm >> /dev/null 104 | ln -s $GOPATH/bin/swarm /usr/local/bin/swarm 105 | 106 | pushd >> /dev/null 107 | echo "" 108 | echo "Done Installing Docker Swarm version: $updatedockerswarm" 109 | echo "" 110 | fi 111 | 112 | exit 0 113 | 114 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## A Distributed Redis Cluster using Docker Swarm, Docker Compose, and Supervisor 2 | 3 | ### Overview 4 | 5 | This repository will start a distributed 6-node Redis Cluster using Docker Swarm with Docker Compose and Supervisor for high availability. The redis instances listen on the host node ports: 6379-6384. There are 3 master nodes and 3 replica nodes. If you are running a local Docker Swarm you can run the entire 6-node cluster locally. 6 | 7 | ### How to Install 8 | 9 | 1. Make sure Swarm is installed 10 | 11 | ``` 12 | docker-redis-cluster $ sudo ./1_install_core.sh 13 | ``` 14 | 15 | 1. Restart the local consul, docker daemon, swarm manager, and swarm join 16 | 17 | ``` 18 | docker-redis-cluster $ sudo ./boot_local_docker_services.sh 19 | ``` 20 | 21 | 1. Point to the Docker Swarm 22 | 23 | Please set the terminal environment to use the running Docker Swarm 24 | 25 | ``` 26 | $ export DOCKER_HOST=localhost:4000 27 | $ env | grep DOCKER 28 | DOCKER_HOST=localhost:4000 29 | $ 30 | ``` 31 | 32 | 1. Confirm the Docker Swarm Membership 33 | 34 | Running the swarm locally you should see only 1 node with something similar: 35 | 36 | ``` 37 | $ docker info 38 | Containers: 0 39 | Images: 0 40 | Role: primary 41 | Strategy: spread 42 | Filters: health, port, dependency, affinity, constraint 43 | Nodes: 1 44 | localhost.localdomain: localhost:2375 45 | └ Containers: 0 46 | └ Reserved CPUs: 0 / 2 47 | └ Reserved Memory: 0 B / 4.053 GiB 48 | └ Labels: executiondriver=native-0.2, kernelversion=4.1.7-200.fc22.x86_64, operatingsystem=Fedora 22 (Twenty Two), storagedriver=devicemapper 49 | CPUs: 2 50 | Total Memory: 4.053 GiB 51 | Name: localhost.localdomain 52 | $ 53 | ``` 54 | 55 | ### Start the Redis Cluster 56 | 57 | Assuming consul, docker daemon, swarm manager, and swarm join are running with something similar to: 58 | 59 | ``` 60 | $ ps auwwx | grep consul | grep -v grep 61 | root 29447 0.4 0.4 34110388 19204 pts/4 Sl 19:39 0:14 consul agent -server -data-dir=/tmp/consul -bind=0.0.0.0 -bootstrap-expect 1 62 | root 31650 12.9 1.2 1329604 51208 pts/4 Sl 20:00 3:42 /usr/local/bin/docker daemon -H localhost:2375 --cluster-advertise 0.0.0.0:2375 --cluster-store consul://localhost:8500/developmentswarm 63 | root 31738 0.0 0.5 488084 20512 pts/1 Sl 20:02 0:01 /usr/local/bin/swarm manage -H tcp://localhost:4000 --advertise localhost:4000 consul://localhost:8500/developmentswarm 64 | root 31749 0.0 0.3 128416 14304 pts/1 Sl 20:02 0:00 /usr/local/bin/swarm join --addr=localhost:2375 consul://localhost:8500/developmentswarm 65 | $ 66 | ``` 67 | 68 | 1. Make sure no other Redis nodes are running 69 | 70 | ``` 71 | $ docker ps -a 72 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 73 | $ 74 | ``` 75 | 76 | 1. Start the Redis Cluster 77 | 78 | ``` 79 | docker-redis-cluster $ ./start_cluster.sh 80 | Starting the Cluster on Docker Swarm 81 | Creating redismaster1 82 | Creating redismaster3 83 | Creating redismaster2 84 | Creating redisreplica1 85 | Creating redisreplica2 86 | Creating redisreplica3 87 | Done 88 | docker-redis-cluster $ 89 | ``` 90 | 91 | 1. Confirm the Containers are running 92 | 93 | ``` 94 | $ docker ps 95 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 96 | 89f8ab731348 jayjohnson/redis-clusterable "/bin/sh -c '. /bin/s" 4 minutes ago Up 4 minutes 127.0.0.1:6384->6379/tcp, 127.0.0.1:16384->16379/tcp, 127.0.0.1:26384->26379/tcp localhost.localdomain/redisreplica3 97 | 6ff759f42bb6 jayjohnson/redis-clusterable "/bin/sh -c '. /bin/s" 4 minutes ago Up 4 minutes 127.0.0.1:6383->6379/tcp, 127.0.0.1:16383->16379/tcp, 127.0.0.1:26383->26379/tcp localhost.localdomain/redisreplica2 98 | fe97da8b920a jayjohnson/redis-clusterable "/bin/sh -c '. /bin/s" 4 minutes ago Up 4 minutes 127.0.0.1:6382->6379/tcp, 127.0.0.1:16382->16379/tcp, 127.0.0.1:26382->26379/tcp localhost.localdomain/redisreplica1 99 | 8ed2a60663f6 jayjohnson/redis-clusterable "/bin/sh -c '. /bin/s" 4 minutes ago Up 4 minutes 127.0.0.1:6380->6379/tcp, 127.0.0.1:16380->16379/tcp, 127.0.0.1:26380->26379/tcp localhost.localdomain/redismaster2 100 | 3a779ae52bd7 jayjohnson/redis-clusterable "/bin/sh -c '. /bin/s" 4 minutes ago Up 4 minutes 127.0.0.1:6381->6379/tcp, 127.0.0.1:16381->16379/tcp, 127.0.0.1:26381->26379/tcp localhost.localdomain/redismaster3 101 | 085b750909d5 jayjohnson/redis-clusterable "/bin/sh -c '. /bin/s" 4 minutes ago Up 4 minutes 127.0.0.1:6379->6379/tcp, 127.0.0.1:16379->16379/tcp, 127.0.0.1:26379->26379/tcp localhost.localdomain/redismaster1 102 | $ 103 | ``` 104 | 105 | ### Confirm the Redis Cluster using the Command Line Tool 106 | 107 | 108 | ``` 109 | $ redis-cli -h 127.0.0.1 -p 6384 cluster nodes 110 | aa81e0dc13f4985dd0b70647d338f899454326af :6379 myself,master - 0 0 6 connected 111 | $ 112 | ``` 113 | 114 | ### Inspect the Redis Cluster Details 115 | 116 | ``` 117 | $ ./cst.sh 118 | 119 | Printing Redis Cluster Status: 120 | 121 | redismaster1(6379): 122 | 399de28f4b010ae4395db704887bfdea92aa2144 :6379 myself,master - 0 0 1 connected 0-5460 123 | 124 | redismaster2(6380): 125 | 1c62661d9235463b5f3906a687ddc4010444f4ac :6379 myself,master - 0 0 2 connected 5461-10922 126 | 127 | redismaster3(6381): 128 | 6fc1dda1fec41b4fbb0654d926dbfe17700c412b :6379 myself,master - 0 0 3 connected 10923-16383 129 | 130 | redisreplica1(6382): 131 | fffa24ec3a9d8485e63bf4e0d09bb2428ca7851b :6379 myself,master - 0 0 4 connected 132 | 133 | redisreplica2(6383): 134 | bb6844f7b7a3ccc1c0bc392461d342037916b903 :6379 myself,master - 0 0 5 connected 135 | 136 | redisreplica3(6384): 137 | c05e9a770e8aa11d82793f2d1076b7a94334d83c :6379 myself,master - 0 0 6 connected 138 | 139 | 140 | Done 141 | $ 142 | ``` 143 | 144 | ### Stop the Redis Cluster 145 | 146 | ``` 147 | $ ./stop_cluster.sh 148 | Stopping the Cluster on Docker Swarm 149 | Stopping redisreplica3 ... done 150 | Stopping redisreplica2 ... done 151 | Stopping redisreplica1 ... done 152 | Stopping redismaster2 ... done 153 | Stopping redismaster3 ... done 154 | Stopping redismaster1 ... done 155 | Cleaning up old containers for ports 156 | Done 157 | $ 158 | ``` 159 | 160 | -------------------------------------------------------------------------------- /boot_local_docker_services.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | consulpid=`ps auwwx | grep "consul agent -server" | awk '{print $2}'` 4 | dockerpid=`ps auwwx | grep "docker daemon" | awk '{print $2}'` 5 | swarmmanpid=`ps auwwx | grep "swarm manage" | awk '{print $2}'` 6 | swarmjoinpid=`ps auwwx | grep "swarm join" | awk '{print $2}'` 7 | 8 | echo "Stopping all services" 9 | kill -9 $consulpid $dockerpid $swarmmanpid $swarmjoinpid 10 | 11 | echo "" 12 | echo "Starting Consul" 13 | pushd consul >> /dev/null 14 | ./restart_consul.sh 15 | popd >> /dev/null 16 | 17 | echo "Waiting on consul to start" 18 | sleep 5 19 | 20 | echo "Starting Docker" 21 | pushd docker >> /dev/null 22 | ./start_docker_daemon.sh 23 | popd >> /dev/null 24 | 25 | echo "Waiting on docker daemon to start" 26 | sleep 2 27 | echo "Starting Swarm Manager" 28 | pushd swarm >> /dev/null 29 | ./restart_single_swarm_manager.sh 30 | sleep 2 31 | ./swarm_join_local_consul.sh 32 | popd >> /dev/null 33 | 34 | exit 0 35 | -------------------------------------------------------------------------------- /cleanup_existing_cluster_containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker stop localhost.localdomain/redisreplica3 localhost.localdomain/redisreplica2 localhost.localdomain/redisreplica1 localhost.localdomain/redismaster1 localhost.localdomain/redismaster2 localhost.localdomain/redismaster3 &> /dev/null 4 | 5 | docker rm localhost.localdomain/redisreplica3 localhost.localdomain/redisreplica2 localhost.localdomain/redisreplica1 localhost.localdomain/redismaster1 localhost.localdomain/redismaster2 localhost.localdomain/redismaster3 &> /dev/null 6 | 7 | exit 0 8 | 9 | -------------------------------------------------------------------------------- /consul/restart_consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | log="/tmp/local-consul.log" 4 | 5 | echo "" > $log 6 | echo "" >> $log 7 | echo "Restarting Consul Server Agent" >> $log 8 | 9 | # Make sure to stop the current ones if any 10 | countconsul=`ps auwwx | grep 'consul agent' | grep -v grep | wc -l` 11 | 12 | echo "Found Consul Agents that need to be stopped: $countconsul" >> $log 13 | if [ $countconsul -gt 0 ]; then 14 | echo "Stopping Previous Consul" >> $log 15 | kill -9 `ps auwwx | grep 'consul agent' | grep -v grep | awk '{print $2}'` >> /dev/null 16 | echo "Done Stopping Previous Consul" >> $log 17 | fi 18 | 19 | # For HA consul needs to be running first: 20 | consulinstances=1 21 | consuldatadir="/opt/consul/data" 22 | consullogdir="/opt/consul/logs" 23 | consullogfile="/opt/consul/logs/consul.log" 24 | if [ ! -d $consuldatadir ]; then 25 | mkdir -p $consuldatadir >> $log 26 | chmod 777 $consuldatadir >> $log 27 | fi 28 | if [ ! -d $consullogdir ]; then 29 | mkdir -p $consullogdir >> $log 30 | chmod 777 $consullogdir >> $log 31 | fi 32 | echo "" > $consullogfile 33 | chmod 666 $consullogfile 34 | 35 | echo "" >> $log 36 | echo "Running as Bootstrap Server Node" >> $log 37 | echo "" >> $log 38 | consul agent -server -data-dir="/tmp/consul" -bind=0.0.0.0 -bootstrap-expect 1 &> $consullogfile & 39 | 40 | exit 0 41 | 42 | -------------------------------------------------------------------------------- /cst.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | echo "" 5 | echo "Printing Redis Cluster Status:" 6 | echo "" 7 | 8 | echo "redismaster1(6379):" 9 | redis-cli -h 127.0.0.1 -p 6379 cluster nodes 10 | echo "" 11 | echo "redismaster2(6380):" 12 | redis-cli -h 127.0.0.1 -p 6380 cluster nodes 13 | echo "" 14 | echo "redismaster3(6381):" 15 | redis-cli -h 127.0.0.1 -p 6381 cluster nodes 16 | echo "" 17 | echo "redisreplica1(6382):" 18 | redis-cli -h 127.0.0.1 -p 6382 cluster nodes 19 | echo "" 20 | echo "redisreplica2(6383):" 21 | redis-cli -h 127.0.0.1 -p 6383 cluster nodes 22 | echo "" 23 | echo "redisreplica3(6384):" 24 | redis-cli -h 127.0.0.1 -p 6384 cluster nodes 25 | echo "" 26 | 27 | echo "" 28 | echo "Done" 29 | echo "" 30 | 31 | exit 0 32 | 33 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | redismaster1: 2 | image: jayjohnson/redis-clusterable 3 | container_name: "redismaster1" 4 | hostname: "redismaster1" 5 | environment: 6 | - NODESUPERVISORNAME=redis-master-1 7 | - REDISNODENAME=redismaster1 8 | - REDISNODEPORT=6379 9 | - REDISNODEREPLICAS=redismaster1:6379 redismaster2:6379 redismaster3:6379 redisreplica1:6379 redisreplica2:6379 redisreplica3:6379 10 | ports: 11 | - "6379:6379" 12 | - "16379:16379" 13 | - "26379:26379" 14 | cap_add: 15 | - ALL 16 | - NET_ADMIN 17 | - SYS_ADMIN 18 | 19 | redismaster2: 20 | image: jayjohnson/redis-clusterable 21 | container_name: "redismaster2" 22 | hostname: "redismaster2" 23 | environment: 24 | - NODESUPERVISORNAME=redis-master-2 25 | - REDISNODENAME=redismaster2 26 | - REDISNODEPORT=6380 27 | - REDISNODEREPLICAS=redismaster1:6379 redismaster2:6379 redismaster3:6379 redisreplica1:6379 redisreplica2:6379 redisreplica3:6379 28 | ports: 29 | - "6380:6379" 30 | - "16380:16379" 31 | - "26380:26379" 32 | cap_add: 33 | - ALL 34 | - NET_ADMIN 35 | - SYS_ADMIN 36 | 37 | redismaster3: 38 | image: jayjohnson/redis-clusterable 39 | container_name: "redismaster3" 40 | hostname: "redismaster3" 41 | environment: 42 | - NODESUPERVISORNAME=redis-master-3 43 | - REDISNODENAME=redismaster3 44 | - REDISNODEPORT=6381 45 | - REDISNODEREPLICAS=redismaster1:6379 redismaster2:6379 redismaster3:6379 redisreplica1:6379 redisreplica2:6379 redisreplica3:6379 46 | ports: 47 | - "6381:6379" 48 | - "16381:16379" 49 | - "26381:26379" 50 | cap_add: 51 | - ALL 52 | - NET_ADMIN 53 | - SYS_ADMIN 54 | 55 | redisreplica1: 56 | image: jayjohnson/redis-clusterable 57 | container_name: "redisreplica1" 58 | hostname: "redisreplica1" 59 | environment: 60 | - NODESUPERVISORNAME=redis-replica-1 61 | - REDISNODENAME=redisreplica1 62 | - REDISNODEPORT=6382 63 | - REDISNODEREPLICAS=redismaster1:6379 redismaster2:6379 redismaster3:6379 redisreplica1:6379 redisreplica2:6379 redisreplica3:6379 64 | ports: 65 | - "6382:6379" 66 | - "16382:16379" 67 | - "26382:26379" 68 | cap_add: 69 | - ALL 70 | - NET_ADMIN 71 | - SYS_ADMIN 72 | 73 | redisreplica2: 74 | image: jayjohnson/redis-clusterable 75 | container_name: "redisreplica2" 76 | hostname: "redisreplica2" 77 | environment: 78 | - NODESUPERVISORNAME=redis-replica-2 79 | - REDISNODENAME=redisreplica2 80 | - REDISNODEPORT=6381 81 | - REDISNODEREPLICAS=redismaster1:6379 redismaster2:6379 redismaster3:6379 redisreplica1:6379 redisreplica2:6379 redisreplica3:6379 82 | ports: 83 | - "6383:6379" 84 | - "16383:16379" 85 | - "26383:26379" 86 | cap_add: 87 | - ALL 88 | - NET_ADMIN 89 | - SYS_ADMIN 90 | 91 | redisreplica3: 92 | image: jayjohnson/redis-clusterable 93 | container_name: "redisreplica3" 94 | hostname: "redisreplica3" 95 | environment: 96 | - NODESUPERVISORNAME=redis-replica-3 97 | - REDISNODENAME=redisreplica3 98 | - REDISNODEPORT=6381 99 | - REDISNODEREPLICAS=redismaster1:6379 redismaster2:6379 redismaster3:6379 redisreplica1:6379 redisreplica2:6379 redisreplica3:6379 100 | ports: 101 | - "6384:6379" 102 | - "16384:16379" 103 | - "26384:26379" 104 | cap_add: 105 | - ALL 106 | - NET_ADMIN 107 | - SYS_ADMIN 108 | 109 | -------------------------------------------------------------------------------- /docker/start_docker_daemon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | log="/tmp/local-daemon.log" 4 | 5 | hostfqdn="localhost" 6 | swarmdiscoveryport=4000 7 | consuldiscoveryport=8500 8 | dockerdiscoveryport=2375 9 | uniqueswarmprefix="developmentswarm" 10 | 11 | echo "Starting Docker Daemon: $hostfqdn:$dockerdiscoveryport " > $log 12 | 13 | nohup /usr/local/bin/docker daemon -H $hostfqdn:$dockerdiscoveryport --cluster-advertise 0.0.0.0:$dockerdiscoveryport --cluster-store consul://$hostfqdn:$consuldiscoveryport/$uniqueswarmprefix &> $log & 14 | 15 | echo "Done Starting Docker Daemon" >> $log 16 | 17 | exit 0 18 | 19 | -------------------------------------------------------------------------------- /ssh_master_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nodename="redismaster1" 4 | 5 | echo "SSHing into $nodename" 6 | 7 | docker exec -t -i $nodename /bin/bash 8 | 9 | -------------------------------------------------------------------------------- /ssh_master_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nodename="redismaster2" 4 | 5 | echo "SSHing into $nodename" 6 | 7 | docker exec -t -i $nodename /bin/bash 8 | 9 | -------------------------------------------------------------------------------- /ssh_master_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nodename="redismaster3" 4 | 5 | echo "SSHing into $nodename" 6 | 7 | docker exec -t -i $nodename /bin/bash 8 | 9 | -------------------------------------------------------------------------------- /ssh_replica_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nodename="redisreplica1" 4 | 5 | echo "SSHing into $nodename" 6 | 7 | docker exec -t -i $nodename /bin/bash 8 | 9 | -------------------------------------------------------------------------------- /ssh_replica_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nodename="redisreplica2" 4 | 5 | echo "SSHing into $nodename" 6 | 7 | docker exec -t -i $nodename /bin/bash 8 | 9 | -------------------------------------------------------------------------------- /ssh_replica_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nodename="redisreplica3" 4 | 5 | echo "SSHing into $nodename" 6 | 7 | docker exec -t -i $nodename /bin/bash 8 | 9 | -------------------------------------------------------------------------------- /start_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./cleanup_existing_cluster_containers.sh 4 | 5 | echo "Starting the Cluster on Docker Swarm" 6 | export DOCKER_HOST=localhost:4000 7 | docker-compose --x-networking --x-network-driver overlay up -d 8 | echo "Done" 9 | 10 | 11 | -------------------------------------------------------------------------------- /stop_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Stopping the Cluster on Docker Swarm" 4 | docker-compose stop 5 | 6 | echo "Cleaning up old containers for ports" 7 | ./cleanup_existing_cluster_containers.sh 8 | 9 | echo "Done" 10 | 11 | exit 0 12 | 13 | -------------------------------------------------------------------------------- /swarm/restart_single_swarm_manager.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | log="/tmp/local-swarm.log" 4 | hostfqdn="localhost" 5 | swarmdiscoveryport=4000 6 | consuldiscoveryport=8500 7 | uniqueswarmprefix="developmentswarm" 8 | 9 | echo "Starting Single Swarm Manager Listening on: $hostfqdn:$swarmdiscoveryport " 10 | nohup /usr/local/bin/swarm manage -H tcp://$hostfqdn:$swarmdiscoveryport --advertise $hostfqdn:$swarmdiscoveryport consul://$hostfqdn:$consuldiscoveryport/$uniqueswarmprefix &> $log & 11 | 12 | echo "Done Starting Swarm Manager" 13 | 14 | exit 0 15 | 16 | -------------------------------------------------------------------------------- /swarm/swarm_join_local_consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | log="/tmp/local-joinswarm.log" 4 | hostfqdn="localhost" 5 | swarmdiscoveryport=4000 6 | consuldiscoveryport=8500 7 | uniqueswarmprefix="developmentswarm" 8 | 9 | echo "Starting Swarm Join on: consul://$hostfqdn:$consuldiscoveryport/$uniqueswarmprefix" > $log 10 | nohup /usr/local/bin/swarm join --addr=$hostfqdn:2375 consul://$hostfqdn:$consuldiscoveryport/$uniqueswarmprefix &> $log & 11 | 12 | echo "Done Starting Swarm Join" >> $log 13 | 14 | exit 0 15 | 16 | --------------------------------------------------------------------------------