├── README.md ├── etc └── supervisord.conf ├── Dockerfile └── bin ├── run.sh ├── join-gluster.sh ├── manage-gluster-quota.sh └── add-gluster-peer.sh /README.md: -------------------------------------------------------------------------------- 1 | # rancher-glusterfs-server 2 | Gluster FS Cluster for being used with Rancher Server 3 | -------------------------------------------------------------------------------- /etc/supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | 4 | [program:sshd] 5 | command=/usr/sbin/sshd -D 6 | 7 | [program:glusterd] 8 | command=/usr/sbin/glusterd -p /var/run/glusterd.pid -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | MAINTAINER Manel Martinez 4 | 5 | RUN apt-get update && \ 6 | apt-get install -y python-software-properties software-properties-common 7 | RUN add-apt-repository -y ppa:gluster/glusterfs-3.7 && \ 8 | apt-get update && \ 9 | apt-get install -y glusterfs-server supervisor openssh-server dnsutils sshpass 10 | 11 | ENV ROOT_PASSWORD **ChangeMe** 12 | 13 | ENV SSH_PORT 2222 14 | ENV SSH_USER root 15 | ENV SSH_OPTS -p ${SSH_PORT} -o ConnectTimeout=20 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 16 | ENV GLUSTER_VOL ranchervol 17 | ENV GLUSTER_BRICK_PATH /gluster_volume 18 | ENV GLUSTER_CONF_FLAG /etc/gluster.env 19 | ENV SERVICE_NAME gluster 20 | 21 | ENV DEBUG 0 22 | 23 | VOLUME ["${GLUSTER_BRICK_PATH}"] 24 | VOLUME /var/lib/glusterd 25 | 26 | RUN mkdir -p /var/run/sshd /root/.ssh /var/log/supervisor /var/run/gluster 27 | RUN perl -p -i -e "s/^Port .*/Port ${SSH_PORT}/g" /etc/ssh/sshd_config 28 | RUN perl -p -i -e "s/#?PasswordAuthentication .*/PasswordAuthentication yes/g" /etc/ssh/sshd_config 29 | RUN perl -p -i -e "s/#?PermitRootLogin .*/PermitRootLogin yes/g" /etc/ssh/sshd_config 30 | RUN grep ClientAliveInterval /etc/ssh/sshd_config >/dev/null 2>&1 || echo "ClientAliveInterval 60" >> /etc/ssh/sshd_config 31 | 32 | RUN mkdir -p /usr/local/bin 33 | ADD ./bin /usr/local/bin 34 | RUN chmod +x /usr/local/bin/*.sh 35 | ADD ./etc/supervisord.conf /etc/supervisor/conf.d/supervisord.conf 36 | 37 | CMD ["/usr/local/bin/run.sh"] 38 | -------------------------------------------------------------------------------- /bin/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | [ "$DEBUG" == "1" ] && set -x && set +e 6 | 7 | if [ "${ROOT_PASSWORD}" == "**ChangeMe**" -o -z "${ROOT_PASSWORD}" ]; then 8 | echo "*** ERROR: you need to define ROOT_PASSWORD environment variable - Exiting ..." 9 | exit 1 10 | fi 11 | 12 | if [ "${SERVICE_NAME}" == "**ChangeMe**" -o -z "${SERVICE_NAME}" ]; then 13 | echo "*** ERROR: you need to define SERVICE_NAME environment variable - Exiting ..." 14 | exit 1 15 | fi 16 | 17 | # Required stuff to work 18 | sleep 5 19 | export GLUSTER_PEERS=`dig +short ${SERVICE_NAME} | sort` 20 | if [ -z "${GLUSTER_PEERS}" ]; then 21 | echo "*** ERROR: Could not determine which containers are part of this service." 22 | echo "*** Is this service named \"${SERVICE_NAME}\"? If not, please regenerate the service" 23 | echo "*** and add SERVICE_NAME environment variable which value should be equal to this service name" 24 | echo "*** Exiting ..." 25 | exit 1 26 | fi 27 | export MY_RANCHER_IP=`ip addr | grep inet | grep 10.42 | tail -1 | awk '{print $2}' | awk -F\/ '{print $1}'` 28 | if [ -z "${MY_RANCHER_IP}" ]; then 29 | echo "*** ERROR: Could not determine this container Rancher IP - Exiting ..." 30 | exit 1 31 | fi 32 | 33 | echo "root:${ROOT_PASSWORD}" | chpasswd 34 | 35 | # Prepare a shell to initialize docker environment variables for ssh 36 | echo "#!/bin/bash" > ${GLUSTER_CONF_FLAG} 37 | echo "ROOT_PASSWORD=\"${ROOT_PASSWORD}\"" >> ${GLUSTER_CONF_FLAG} 38 | echo "SSH_PORT=\"${SSH_PORT}\"" >> ${GLUSTER_CONF_FLAG} 39 | echo "SSH_USER=\"${SSH_USER}\"" >> ${GLUSTER_CONF_FLAG} 40 | echo "SSH_OPTS=\"${SSH_OPTS}\"" >> ${GLUSTER_CONF_FLAG} 41 | echo "GLUSTER_VOL=\"${GLUSTER_VOL}\"" >> ${GLUSTER_CONF_FLAG} 42 | echo "GLUSTER_BRICK_PATH=\"${GLUSTER_BRICK_PATH}\"" >> ${GLUSTER_CONF_FLAG} 43 | echo "DEBUG=\"${DEBUG}\"" >> ${GLUSTER_CONF_FLAG} 44 | echo "MY_RANCHER_IP=\"${MY_RANCHER_IP}\"" >> ${GLUSTER_CONF_FLAG} 45 | 46 | join-gluster.sh & 47 | /usr/bin/supervisord 48 | -------------------------------------------------------------------------------- /bin/join-gluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | [ "$DEBUG" == "1" ] && set -x 6 | 7 | function check_if_already_joined { 8 | # Check if I'm part of the cluster 9 | NUMBER_OF_PEERS=`gluster peer status | grep "Number of Peers:" | awk -F: '{print $2}'` 10 | if [ ${NUMBER_OF_PEERS} -ne 0 ]; then 11 | # This container is already part of the cluster 12 | echo "=> This container is already joined with nodes ${GLUSTER_PEERS}, skipping joining ..." 13 | exit 0 14 | fi 15 | } 16 | 17 | echo "=> Waiting for glusterd to start..." 18 | sleep 10 19 | 20 | check_if_already_joined 21 | 22 | # Join the cluster - choose a suitable container 23 | ALIVE=0 24 | for PEER in ${GLUSTER_PEERS}; do 25 | # Skip myself 26 | if [ "${MY_RANCHER_IP}" == "${PEER}" ]; then 27 | continue 28 | fi 29 | echo "=> Checking if I can reach gluster container ${PEER} ..." 30 | if sshpass -p ${ROOT_PASSWORD} ssh ${SSH_OPTS} ${SSH_USER}@${PEER} "hostname" >/dev/null 2>&1; then 31 | echo "=> Gluster container ${PEER} is alive" 32 | ALIVE=1 33 | break 34 | else 35 | echo "*** Could not reach gluster container ${PEER} ..." 36 | fi 37 | done 38 | 39 | if [ ${ALIVE} -eq 0 ]; then 40 | echo "Could not reach any GlusterFS container from this list: ${GLUSTER_PEERS} - Maybe I am the first node in the cluster? Well, I keep waiting for new containers to join me ..." 41 | exit 0 42 | fi 43 | 44 | # If PEER has requested me to join him, just wait for a while 45 | SEMAPHORE_FILE=/tmp/adding-gluster-node.${PEER} 46 | if [ -e ${SEMAPHORE_FILE} ]; then 47 | echo "=> Seems like peer ${PEER} has just requested me to join him" 48 | echo "=> So I'm waiting for 60 seconds to finish it..." 49 | sleep 60 50 | fi 51 | check_if_already_joined 52 | 53 | echo "=> Joining cluster with container ${PEER} ..." 54 | sshpass -p ${ROOT_PASSWORD} ssh ${SSH_OPTS} ${SSH_USER}@${PEER} "add-gluster-peer.sh ${MY_RANCHER_IP}" 55 | if [ $? -eq 0 ]; then 56 | echo "=> Successfully joined cluster with container ${PEER} ..." 57 | else 58 | echo "=> Error joining cluster with container ${PEER} ..." 59 | fi 60 | -------------------------------------------------------------------------------- /bin/manage-gluster-quota.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit status = 0 means sucess 4 | # Exit status = 1 means error 5 | 6 | set -e 7 | 8 | EXIT_OK=0 9 | EXIT_ERROR=1 10 | 11 | GLUSTER_CONF_FLAG=/etc/gluster.env 12 | source ${GLUSTER_CONF_FLAG} 13 | 14 | [ "$DEBUG" == "1" ] && set -x && set +e 15 | 16 | function exit_msg() { 17 | echo $(basename $0): [From container ${MY_RANCHER_IP}] 18 | echo "$1" 19 | exit $2 20 | } 21 | 22 | # Params: 23 | # -o OPERATION 24 | # -d DIRECTORY 25 | # -q QUOTA 26 | 27 | while getopts ":o:d:s:q:" PARAMS; do 28 | case $PARAMS in 29 | o) 30 | OPERATION=`echo $OPTARG | tr '[:lower:]' '[:upper:]'` 31 | ;; 32 | d) 33 | DIRECTORY=$OPTARG 34 | ;; 35 | s) 36 | SERVICE=$OPTARG 37 | ;; 38 | q) 39 | QUOTA=$OPTARG 40 | ;; 41 | esac 42 | done 43 | 44 | [ -z "$OPERATION" ] && exit_msg "Error, operation parameter is missing (parameter -o)" $EXIT_ERROR 45 | 46 | case $OPERATION in 47 | SUMMARY) 48 | msg=`gluster volume quota ${GLUSTER_VOL} list | grep "^/"` 49 | exit_msg "$msg" $? 50 | ;; 51 | SET) 52 | [ -z "$DIRECTORY" ] && exit_msg "Error, directory parameter is missing (parameter -d)" $EXIT_ERROR 53 | [ -z "$QUOTA" ] && exit_msg "Error, quota arameter is missing (parameter -q)" $EXIT_ERROR 54 | 55 | # Set quota on directory 56 | if ! mount | grep "on /run/gluster/${GLUSTER_VOL} type" >/dev/null; then 57 | gluster volume quota ${GLUSTER_VOL} list >/dev/null 58 | sleep 5 59 | fi 60 | if [ ! -d /run/gluster/${GLUSTER_VOL}/${DIRECTORY} ]; then 61 | mkdir /run/gluster/${GLUSTER_VOL}/${DIRECTORY} 62 | chown www-data:www-data /run/gluster/${GLUSTER_VOL}/${DIRECTORY} 63 | fi 64 | msg=`gluster volume quota ${GLUSTER_VOL} limit-usage /${DIRECTORY} $QUOTA` 65 | exit_msg "$msg" $? 66 | ;; 67 | FREE) 68 | [ -z "$DIRECTORY" ] && exit_msg "Error, directory parameter is missing (parameter -d)" $EXIT_ERROR 69 | 70 | msg=`gluster volume quota ${GLUSTER_VOL} list /${DIRECTORY} | grep "^/" | awk '{print $5}'` 71 | exit_msg "$msg" $? 72 | ;; 73 | USED) 74 | [ -z "$DIRECTORY" ] && exit_msg "Error, directory parameter is missing (parameter -d)" $EXIT_ERROR 75 | 76 | msg=`gluster volume quota ${GLUSTER_VOL} list /${DIRECTORY} | grep "^/" | awk '{print $4}'` 77 | exit_msg "$msg" $? 78 | ;; 79 | TOTAL) 80 | [ -z "$DIRECTORY" ] && exit_msg "Error, directory parameter is missing (parameter -d)" $EXIT_ERROR 81 | 82 | msg=`gluster volume quota ${GLUSTER_VOL} list /${DIRECTORY} | grep "^/" | awk '{print $2}'` 83 | exit_msg "$msg" $? 84 | ;; 85 | DELETE) 86 | [ -z "$DIRECTORY" ] && exit_msg "Error, directory parameter is missing (parameter -d)" $EXIT_ERROR 87 | [ -z "$SERVICE" ] && exit_msg "Error, service parameter is missing (parameter -s)" $EXIT_ERROR 88 | 89 | if ! mount | grep "on /run/gluster/${GLUSTER_VOL} type" >/dev/null; then 90 | gluster volume quota ${GLUSTER_VOL} list >/dev/null 91 | sleep 5 92 | fi 93 | if [ ! -d /run/gluster/${GLUSTER_VOL}/${DIRECTORY} ]; then 94 | exit_msg "Could not delete directory ${DIRECTORY}/${SERVICE} - Exiting..." 1 95 | fi 96 | msg=`rm -rf /run/gluster/${GLUSTER_VOL}/${DIRECTORY}/${SERVICE}` 97 | exit_msg "$msg" $? 98 | ;; 99 | *) 100 | exit_msg "ERROR: unknown operation $OPERATION" $EXIT_ERROR 101 | ;; 102 | esac 103 | -------------------------------------------------------------------------------- /bin/add-gluster-peer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit status = 0 means the peer was successfully joined 4 | # Exit status = 1 means there was an error while joining the peer to the cluster 5 | 6 | set -e 7 | 8 | [ "$DEBUG" == "1" ] && set -x && set +e 9 | 10 | PEER=$1 11 | 12 | if [ -z "${PEER}" ]; then 13 | echo "=> ERROR: I was supposed to add a new gluster peer to the cluster but no IP was specified, doing nothing ..." 14 | exit 1 15 | fi 16 | 17 | GLUSTER_CONF_FLAG=/etc/gluster.env 18 | SEMAPHORE_FILE_DIR=/tmp 19 | SEMAPHORE_FILE_NAME=adding-gluster-node. 20 | SEMAPHORE_FILE=/${SEMAPHORE_FILE_DIR}/${SEMAPHORE_FILE_NAME}${PEER} 21 | SEMAPHORE_TIMEOUT=120 22 | source ${GLUSTER_CONF_FLAG} 23 | 24 | function echo() { 25 | builtin echo $(basename $0): [From container ${MY_RANCHER_IP}] $1 26 | } 27 | 28 | function detach() { 29 | echo "=> Some error ocurred while trying to add peer ${PEER} to the cluster - detaching it ..." 30 | gluster peer detach ${PEER} force 31 | rm -f ${SEMAPHORE_FILE} 32 | exit 1 33 | } 34 | 35 | echo "=> Checking if I can reach gluster container ${PEER} ..." 36 | if sshpass -p ${ROOT_PASSWORD} ssh ${SSH_OPTS} ${SSH_USER}@${PEER} "hostname" >/dev/null 2>&1; then 37 | echo "=> Gluster container ${PEER} is alive" 38 | else 39 | echo "*** Could not reach gluster master container ${PEER} - exiting ..." 40 | exit 1 41 | fi 42 | 43 | # Gluster does not like to add two nodes at once 44 | for ((SEMAPHORE_RETRY=0; SEMAPHORE_RETRY/dev/null; then 66 | echo "=> Peer container ${PEER} was part of this cluster but must be dropped now ..." 67 | gluster --mode=script volume remove-brick ${GLUSTER_VOL} replica $((NUMBER_OF_REPLICAS-1)) ${PEER}:${GLUSTER_BRICK_PATH} force 68 | sleep 5 69 | fi 70 | gluster peer detach ${PEER} force 71 | sleep 5 72 | fi 73 | 74 | # Probe the peer 75 | if ! echo "${PEER_STATUS}" | grep "Peer in Cluster" >/dev/null; then 76 | # Peer probe 77 | echo "=> Probing peer ${PEER} ..." 78 | gluster peer probe ${PEER} 79 | sleep 5 80 | fi 81 | 82 | # Check how many peers are already joined in the cluster - needed to add a replica 83 | NUMBER_OF_REPLICAS=`gluster volume info ${GLUSTER_VOL} | grep "Number of Bricks:" | awk '{print $6}'` 84 | # Create the volume 85 | if ! gluster volume list | grep "^${GLUSTER_VOL}$" >/dev/null; then 86 | echo "=> Creating GlusterFS volume ${GLUSTER_VOL}..." 87 | gluster volume create ${GLUSTER_VOL} replica 2 ${MY_RANCHER_IP}:${GLUSTER_BRICK_PATH} ${PEER}:${GLUSTER_BRICK_PATH} force || detach 88 | sleep 1 89 | fi 90 | 91 | # Start the volume 92 | if ! gluster volume status ${GLUSTER_VOL} >/dev/null; then 93 | echo "=> Starting GlusterFS volume ${GLUSTER_VOL}..." 94 | gluster volume start ${GLUSTER_VOL} 95 | sleep 1 96 | # Enable quota on this volume 97 | gluster volume quota ${GLUSTER_VOL} enable 98 | fi 99 | 100 | if ! gluster volume info ${GLUSTER_VOL} | grep ": ${PEER}:${GLUSTER_BRICK_PATH}$" >/dev/null; then 101 | echo "=> Adding brick ${PEER}:${GLUSTER_BRICK_PATH} to the cluster (replica=$((NUMBER_OF_REPLICAS+1)))..." 102 | gluster volume add-brick ${GLUSTER_VOL} replica $((NUMBER_OF_REPLICAS+1)) ${PEER}:${GLUSTER_BRICK_PATH} force || detach 103 | fi 104 | 105 | rm -f ${SEMAPHORE_FILE} 106 | exit 0 107 | --------------------------------------------------------------------------------