├── .gitignore ├── makefile ├── local-compose.yml ├── docker-compose.yml ├── Dockerfile ├── etc └── containerpilot.json ├── demo.sh ├── setup.sh ├── README.md └── bin └── manage.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | _env 3 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | MAKEFLAGS += --warn-undefined-variables 2 | SHELL := /bin/bash 3 | .SHELLFLAGS := -eu -o pipefail 4 | .DEFAULT_GOAL := build 5 | 6 | TAG?=latest 7 | 8 | # run the Docker build 9 | build: 10 | docker build -t="autopilotpattern/couchbase:${TAG}" . 11 | 12 | # push our image to the public registry 13 | ship: build 14 | docker push "autopilotpattern/couchbase:${TAG}" 15 | -------------------------------------------------------------------------------- /local-compose.yml: -------------------------------------------------------------------------------- 1 | couchbase: 2 | extends: 3 | file: docker-compose.yml 4 | service: couchbase 5 | build: . 6 | mem_limit: 1g 7 | environment: 8 | - CONSUL=consul 9 | links: 10 | - consul:consul 11 | ports: 12 | - "8091:8091" 13 | - "8092:8092" 14 | 15 | consul: 16 | extends: 17 | file: docker-compose.yml 18 | service: consul 19 | ports: 20 | - "8500:8500" 21 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # The first instance will automatically bootstrap the cluster. 2 | # Scale this tier and each additional instance will automatically 3 | # self-configure as a member of the cluster 4 | couchbase: 5 | image: autopilotpattern/couchbase:enterprise-4.0.0-r2 6 | restart: always 7 | mem_limit: 4096m 8 | ports: 9 | - 8091 10 | - 8092 11 | env_file: _env 12 | labels: 13 | - triton.cns.services=couchbase 14 | 15 | 16 | # Start with a single host which will bootstrap the cluster. 17 | # In production we'll want to use an HA cluster. 18 | consul: 19 | image: progrium/consul:latest 20 | restart: always 21 | mem_limit: 128m 22 | ports: 23 | - 8500 # expose Consul's UI on the public IP 24 | dns: 25 | - 127.0.0.1 26 | labels: 27 | - triton.cns.services=consul 28 | command: -server -bootstrap -ui-dir /ui 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Autopilot pattern Couchbase 2 | FROM couchbase/server:enterprise-4.0.0 3 | 4 | # install jq 5 | RUN apt-get update && \ 6 | apt-get install -y \ 7 | jq \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | # get ContainerPilot release 11 | ENV CONTAINERPILOT_VERSION 2.0.0 12 | ENV CONTAINERPILOT file:///etc/containerpilot.json 13 | 14 | RUN export CP_SHA1=a82b1257328551eb93fc9a8cc1dd3f3e64664dd5 \ 15 | && curl -Lso /tmp/containerpilot.tar.gz \ 16 | "https://github.com/joyent/containerpilot/releases/download/${CONTAINERPILOT_VERSION}/containerpilot-${CONTAINERPILOT_VERSION}.tar.gz" \ 17 | && echo "${CP_SHA1} /tmp/containerpilot.tar.gz" | sha1sum -c \ 18 | && tar zxf /tmp/containerpilot.tar.gz -C /bin \ 19 | && rm /tmp/containerpilot.tar.gz 20 | 21 | # Add ContainerPilot configuration files and handlers 22 | COPY etc/containerpilot.json etc/containerpilot.json 23 | COPY bin/* /usr/local/bin/ 24 | 25 | # Metadata 26 | EXPOSE 8091 8092 11207 11210 11211 18091 18092 27 | VOLUME /opt/couchbase/var 28 | 29 | CMD ["/bin/containerpilot", \ 30 | "/usr/sbin/runsvdir-start", \ 31 | "couchbase-server", \ 32 | "--", \ 33 | "-noinput"] # so we don't get dropped into the erlang shell 34 | -------------------------------------------------------------------------------- /etc/containerpilot.json: -------------------------------------------------------------------------------- 1 | { 2 | "consul": "{{ .CONSUL }}:8500", 3 | "services": [ 4 | { 5 | "name": "couchbase-api", 6 | "port": 8092, 7 | "health": ["/usr/local/bin/manage.sh", "health"], 8 | "poll": 5, 9 | "ttl": 15 10 | }, 11 | { 12 | "name": "couchbase-web", 13 | "port": 8091, 14 | "health": ["curl", "-Ls", "-o", "/dev/null", "--fail", "http://localhost:8091"], 15 | "poll": 5, 16 | "ttl": 15 17 | } 18 | ], 19 | "telemetry": { 20 | "port": 9090, 21 | "sensors": [ 22 | { 23 | "name": "couchbase_cpu_utilization", 24 | "help": "Couchbase CPU utilization rate", 25 | "type": "gauge", 26 | "poll": 5, 27 | "check": ["/usr/local/bin/manage.sh", "stats", "systemStats.cpu_utilization_rate"] 28 | }, 29 | { 30 | "name": "couchbase_mem_free", 31 | "help": "Couchbase node memory free", 32 | "type": "gauge", 33 | "poll": 5, 34 | "check": ["/usr/local/bin/manage.sh", "stats", "systemStats.mem_free"] 35 | }, 36 | { 37 | "name": "couchbase_swap_used", 38 | "help": "Couchbase swap usage", 39 | "type": "gauge", 40 | "poll": 5, 41 | "check": ["/usr/local/bin/manage.sh", "stats", "systemStats.swap_used"] 42 | } 43 | ] 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PREFIX=cb 4 | export COMPOSE_HTTP_TIMEOUT=300 5 | source ./_env 6 | 7 | echo 'Starting Couchbase cluster' 8 | 9 | echo 10 | echo 'Pulling the most recent images' 11 | docker-compose pull 12 | 13 | echo 14 | echo 'Starting containers' 15 | docker-compose --project-name=${PREFIX} up -d --no-recreate 16 | 17 | CONSUL="$(triton ip ${PREFIX}_consul_1):8500" 18 | echo 19 | echo 'Consul is now running' 20 | echo "Dashboard: $CONSUL" 21 | command -v open >/dev/null 2>&1 && `open http://$CONSUL/ui/` 22 | 23 | CBDASHBOARD="$(triton ip ${PREFIX}_couchbase_1):8091" 24 | echo 25 | echo 'Couchbase cluster running and bootstrapped' 26 | echo "Dashboard: $CBDASHBOARD" 27 | command -v open >/dev/null 2>&1 && `open http://$CBDASHBOARD/index.html#sec=servers` 28 | 29 | echo 30 | echo 'Creating couchbase bucket' 31 | # we're specifying a bucket with 2 replicas and using 70% of the 4096MB 32 | # we specified for the container in our docker-compose.yml 33 | curl -s -XPOST -u ${COUCHBASE_USER}:${COUCHBASE_PASS} \ 34 | -d 'name=couchbase' \ 35 | -d 'authType=none' \ 36 | -d 'ramQuotaMB=2856' \ 37 | -d 'replicaNumber=2' \ 38 | -d 'proxyPort=11222' \ 39 | "http://${CBDASHBOARD}/pools/default/buckets" 40 | 41 | echo 42 | echo 'Scaling Couchbase cluster to three nodes' 43 | echo "docker-compose --project-name=$PREFIX scale couchbase=3" 44 | docker-compose --project-name=${PREFIX} scale couchbase=3 45 | 46 | echo 47 | echo "Go ahead, try a lucky 7 node cluster:" 48 | echo "docker-compose --project-name=$PREFIX scale couchbase=7" 49 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | help() { 5 | echo 'Usage ./setup.sh [-f docker-compose.yml] [-p project]' 6 | echo 7 | echo 'Checks that your Triton and Docker environment is sane and configures' 8 | echo 'an environment file to use.' 9 | echo 10 | echo 'Optional flags:' 11 | echo ' -f use this file as the docker-compose config file' 12 | echo ' -p use this name as the project prefix for docker-compose' 13 | } 14 | 15 | 16 | # default values which can be overriden by -f or -p flags 17 | export COMPOSE_PROJECT_NAME=cb 18 | export COMPOSE_FILE= 19 | 20 | # give the docker remote api more time before timeout 21 | export COMPOSE_HTTP_TIMEOUT=300 22 | 23 | # populated by `check` function whenever we're using Triton 24 | TRITON_USER= 25 | TRITON_DC= 26 | TRITON_ACCOUNT= 27 | 28 | # --------------------------------------------------- 29 | # Top-level commmands 30 | 31 | 32 | # Check for correct configuration 33 | check() { 34 | 35 | command -v docker >/dev/null 2>&1 || { 36 | echo 37 | tput rev # reverse 38 | tput bold # bold 39 | echo 'Docker is required, but does not appear to be installed.' 40 | tput sgr0 # clear 41 | echo 'See https://docs.joyent.com/public-cloud/api-access/docker' 42 | exit 1 43 | } 44 | command -v json >/dev/null 2>&1 || { 45 | echo 46 | tput rev # reverse 47 | tput bold # bold 48 | echo 'Error! JSON CLI tool is required, but does not appear to be installed.' 49 | tput sgr0 # clear 50 | echo 'See https://apidocs.joyent.com/cloudapi/#getting-started' 51 | exit 1 52 | } 53 | 54 | # if we're not testing on Triton, don't bother checking Triton config 55 | if [ ! -z "${COMPOSE_FILE}" ]; then 56 | exit 0 57 | fi 58 | 59 | command -v triton >/dev/null 2>&1 || { 60 | echo 61 | tput rev # reverse 62 | tput bold # bold 63 | echo 'Error! Joyent Triton CLI is required, but does not appear to be installed.' 64 | tput sgr0 # clear 65 | echo 'See https://www.joyent.com/blog/introducing-the-triton-command-line-tool' 66 | exit 1 67 | } 68 | 69 | # make sure Docker client is pointed to the same place as the Triton client 70 | local docker_user=$(docker info 2>&1 | awk -F": " '/SDCAccount:/{print $2}') 71 | local docker_dc=$(echo $DOCKER_HOST | awk -F"/" '{print $3}' | awk -F'.' '{print $1}') 72 | TRITON_USER=$(triton profile get | awk -F": " '/account:/{print $2}') 73 | TRITON_DC=$(triton profile get | awk -F"/" '/url:/{print $3}' | awk -F'.' '{print $1}') 74 | TRITON_ACCOUNT=$(triton account get | awk -F": " '/id:/{print $2}') 75 | if [ ! "$docker_user" = "$TRITON_USER" ] || [ ! "$docker_dc" = "$TRITON_DC" ]; then 76 | echo 77 | tput rev # reverse 78 | tput bold # bold 79 | echo 'Error! The Triton CLI configuration does not match the Docker CLI configuration.' 80 | tput sgr0 # clear 81 | echo 82 | echo "Docker user: ${docker_user}" 83 | echo "Triton user: ${TRITON_USER}" 84 | echo "Docker data center: ${docker_dc}" 85 | echo "Triton data center: ${TRITON_DC}" 86 | exit 1 87 | fi 88 | 89 | local triton_cns_enabled=$(triton account get | awk -F": " '/cns/{print $2}') 90 | if [ ! "true" == "$triton_cns_enabled" ]; then 91 | echo 92 | tput rev # reverse 93 | tput bold # bold 94 | echo 'Error! Triton CNS is required and not enabled.' 95 | tput sgr0 # clear 96 | echo 97 | exit 1 98 | fi 99 | 100 | if [ ! -f "_env" ]; then 101 | echo "Creating a configuration file..." 102 | echo 'COUCHBASE_USER=' > _env 103 | echo 'COUCHBASE_PASS=' >> _env 104 | echo >> _env 105 | echo CONSUL=consul.svc.${TRITON_ACCOUNT}.${TRITON_DC}.cns.joyent.com >> _env 106 | echo 'Edit the _env file to include a COUCHBASE_USER and COUCHBASE_PASS' 107 | fi 108 | } 109 | 110 | # --------------------------------------------------- 111 | # parse arguments 112 | 113 | while getopts "f:p:h" optchar; do 114 | case "${optchar}" in 115 | f) export COMPOSE_FILE=${OPTARG} ;; 116 | p) export COMPOSE_PROJECT_NAME=${OPTARG} ;; 117 | esac 118 | done 119 | shift $(expr $OPTIND - 1 ) 120 | 121 | until 122 | cmd=$1 123 | if [ ! -z "$cmd" ]; then 124 | shift 1 125 | $cmd "$@" 126 | if [ $? == 127 ]; then 127 | help 128 | fi 129 | exit 130 | fi 131 | do 132 | echo 133 | done 134 | 135 | # default behavior 136 | check 137 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Autopilot Pattern Couchbase 2 | 3 | [![DockerPulls](https://img.shields.io/docker/pulls/autopilotpattern/couchbase.svg)](https://registry.hub.docker.com/u/autopilotpattern/couchbase/) 4 | [![DockerStars](https://img.shields.io/docker/stars/autopilotpattern/couchbase.svg)](https://registry.hub.docker.com/u/autopilotpattern/couchbase/) 5 | [![ImageLayers](https://badge.imagelayers.io/autopilotpattern/couchbase:latest.svg)](https://imagelayers.io/?images=autopilotpattern/couchbase:latest) 6 | [![Join the chat at https://gitter.im/autopilotpattern/general](https://badges.gitter.im/autopilotpattern/general.svg)](https://gitter.im/autopilotpattern/general) 7 | 8 | This repo is a demonstration of the [Autopilot Pattern](http://autopilotpattern.io/) as applied to [Couchbase](http://www.couchbase.com/). Couchbase's built-in cluster awareness and automatic management of data, including sharding and cross-datacenter replication make it ideal for deployment as a part of an entire stack using the Autopilot Pattern. 9 | 10 | Included here is everything you need to deploy a Couchbase cluster that can be easily scaled just by using `docker-compose scale couchbase=$n`. The repo consists of a Dockerfile to build a Couchbase container image, a couple of shell scripts to setup your environment and assist orchestration, and a Docker Compose file to tie it all together. 11 | 12 | ### Bootstrapping Couchbase 13 | 14 | A new Couchbase node only needs to know where to find one other node in order to join a cluster. In this pattern we're using a ContainerPilot `health` check handler to coordinate the creation of the cluster. We're using [Consul](https://www.consul.io/) as a service discovery layer. Consul is running in its default configuration as delivered in [Jeff Lindsay's excellent image](https://registry.hub.docker.com/u/progrium/consul/), but Couchbase uses a ContainerPilot health check handler to enable the magic [here](https://github.com/autopilotpattern/couchbase/blob/master/bin/manage.sh). 15 | 16 | Each time the [`health` handler](https://github.com/autopilotpattern/couchbase/blob/master/bin/manage.sh) runs, it first checks to see if the node has already been joined to a cluster. If so, it continues on to health check the node and then send a heartbeat to Consul. If not, the handler needs to figure out whether the cluster has been initialized. The steps are as follows: 17 | 18 | 1. Has another node been registered with Consul for the cluster? If so, we can join to it. 19 | 1. Is another node in the middle of bootstrapping the cluster? If so, wait for it and then join to it. 20 | 1. Otherwise, bootstrap the cluster but let any other nodes know that we're doing it by writing a lock in Consul. 21 | 22 | ### Getting started 23 | 24 | 1. [Get a Joyent account](https://my.joyent.com/landing/signup/) and [add your SSH key](https://docs.joyent.com/public-cloud/getting-started). 25 | 1. Install the [Docker Toolbox](https://docs.docker.com/installation/mac/) (including `docker` and `docker-compose`) on your laptop or other environment, as well as the [Joyent Triton CLI](https://www.joyent.com/blog/introducing-the-triton-command-line-tool) (`triton` replaces our old `sdc-*` CLI tools) 26 | 1. [Configure Docker and Docker Compose for use with Joyent](https://docs.joyent.com/public-cloud/api-access/docker): 27 | 28 | ```bash 29 | curl -O https://raw.githubusercontent.com/joyent/sdc-docker/master/tools/sdc-docker-setup.sh && chmod +x sdc-docker-setup.sh 30 | ./sdc-docker-setup.sh -k us-east-1.api.joyent.com ~/.ssh/ 31 | ``` 32 | 33 | Check that everything is configured correctly by running `./setup.sh`. If it returns without an error you're all set. This script will create and `_env` file that includes the Triton CNS name for the Consul service. You'll want to edit this file to update the username and password for Couchbase. 34 | 35 | ### Running the cluster 36 | 37 | Once you've cloned the repo and run `./setup.sh`, you can start a new cluster with just Docker Compose: 38 | 39 | ```bash 40 | $ docker-compose --project-name=cb up -d 41 | Creating cb_consul_1 42 | Creating cb_couchbase_1 43 | ``` 44 | 45 | Because one Couchbase container can get lonely, we can use Docker Compose to give it some friends: 46 | 47 | ```bash 48 | $ docker-compose -p cb scale couchbase=3 49 | Creating couchbase_couchbase_2 50 | Creating couchbase_couchbase_3 51 | 52 | $ docker-compose -p cb ps 53 | Name Command State Ports 54 | ------------------------------------------------------------------------------------- 55 | cb_consul_1 /bin/start -server -bootst ... Up 53/tcp, 53/udp, 8300/tcp... 56 | cb_couchbase_1 /bin/containerpilot /usr/... Up 11207/tcp, 11210/tcp, 57 | 11211/tcp, 18091/tcp, 58 | 18092/tcp, 8093/tcp, 59 | 0.0.0.0:8091/tcp->8091/tcp, 60 | 0.0.0.0:8092/tcp->8092/tcp, 61 | cb_couchbase_2 /bin/containerpilot /usr/... Up 11207/tcp, 11210/tcp, 62 | 11211/tcp, 18091/tcp, 63 | 18092/tcp, 8093/tcp, 64 | 0.0.0.0:8091/tcp->8091/tcp, 65 | 0.0.0.0:8092/tcp->8092/tcp, 66 | cb_couchbase_3 /bin/containerpilot /usr/... Up 11207/tcp, 11210/tcp, 67 | 11211/tcp, 18091/tcp, 68 | 18092/tcp, 8093/tcp, 69 | 0.0.0.0:8091/tcp->8091/tcp, 70 | 0.0.0.0:8092/tcp->8092/tcp, 71 | ``` 72 | 73 | A shell script (`./demo.sh`) has been provided to run these two commands as well as find and open the Couchbase dashboard in your web browser. Sign in with the username and password you provided in the environment file to see the working cluster. As the cluster scales up you'll be able to see node(s) join the cluster. 74 | 75 | ### Initializing a bucket 76 | 77 | Standing up the cluster does not initialize any Couchbase buckets, because these are specific to your application(s). The `./demo.sh` script will create a Couchbase bucket using the Couchbase REST API as an example of what your application's `preStart` handler should do. 78 | 79 | ### Consul notes 80 | 81 | [Bootstrapping](https://www.consul.io/docs/guides/bootstrapping.html), [Consul clusters](https://www.consul.io/intro/getting-started/join.html), and the details about [adding and removing nodes](https://www.consul.io/docs/guides/servers.html). The [CLI](https://www.consul.io/docs/commands/index.html) and [HTTP](https://www.consul.io/docs/agent/http.html) API are also documented. 82 | 83 | [Check for registered instances of a named service](https://www.consul.io/docs/agent/http/catalog.html#catalog_service) 84 | 85 | ```bash 86 | curl -v http://consul:8500/v1/catalog/service/couchbase | json -aH ServiceAddress 87 | ``` 88 | 89 | [Register an instance of a service](https://www.consul.io/docs/agent/http/catalog.html#catalog_register) 90 | 91 | ```bash 92 | export MYIP=$(ip addr show eth0 | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}') 93 | curl http://consul:8500/v1/agent/service/register -d "$(printf '{"ID": "couchbase-%s","Name": "couchbase","Address": "%s"}' $MYIP $MYIP)" 94 | ``` 95 | -------------------------------------------------------------------------------- /bin/manage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | help() { 4 | echo "Setup and run a Couchbase cluster node. Uses Consul to find other" 5 | echo "nodes in the cluster or bootstraps the cluster if it does not yet" 6 | echo "exist." 7 | echo 8 | echo "Usage: ./manage.sh health => runs health check and bootstrap." 9 | echo " ./manage.sh => run another function for debugging." 10 | } 11 | 12 | trap cleanup EXIT 13 | 14 | # This container's private IP 15 | export IP_PRIVATE=$(ip addr show eth0 | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}') 16 | 17 | # Discovery vars 18 | COUCHBASE_SERVICE_NAME=${COUCHBASE_SERVICE_NAME:-couchbase-api} 19 | export CONSUL=${CONSUL:-consul} 20 | 21 | # Couchbase username and password 22 | export COUCHBASE_USER=${COUCHBASE_USER:-Administrator} 23 | export COUCHBASE_PASS=${COUCHBASE_PASS:-password} 24 | CB_CONN="-c 127.0.0.1:8091 -u ${COUCHBASE_USER} -p ${COUCHBASE_PASS}" 25 | 26 | # ------------------------------------------- 27 | # Top-level health check handler 28 | 29 | 30 | health() { 31 | # if we're already initialized and joined to a cluster, 32 | # we can just run the health check and exit 33 | checkLock 34 | initNode 35 | isNodeInCluster 36 | if [ $? -eq 0 ]; then 37 | doHealthCheck 38 | exit $? 39 | fi 40 | 41 | # if there is a healthy cluster we join it, otherwise we try 42 | # to create a new cluster. If another node is in the process 43 | # of creating a new cluster, we'll wait for it instead. 44 | echo 'Looking for an existing cluster...' 45 | while true; do 46 | local node=$(getHealthyClusterIp) 47 | if [[ ${node} != "null" ]]; then 48 | joinCluster $node 49 | else 50 | obtainBootstrapLock 51 | if [ $? -eq 0 ]; then 52 | initCluster 53 | else 54 | sleep 3 55 | fi 56 | fi 57 | done 58 | } 59 | 60 | 61 | # ------------------------------------------- 62 | # Status checking 63 | 64 | 65 | # The couchbase-cli provides no documented mechanism to verify that we've 66 | # initialized the node. But if we try to node-init with the default password 67 | # and it fails, then we know we've previously initialized this node. 68 | # Either way we can merrily continue. 69 | initNode() { 70 | # couchbase takes a while to become responsive on start, so we need to 71 | # make sure it's up first. 72 | while true; do 73 | # an uninitialized node will have default creds 74 | couchbase-cli server-info -c 127.0.0.1:8091 -u access -p password &>/dev/null 75 | if [ $? -eq 0 ]; then 76 | break 77 | fi 78 | # check the initialized creds as well 79 | couchbase-cli server-info ${CB_CONN} &>/dev/null 80 | if [ $? -eq 0 ]; then 81 | break 82 | fi 83 | echo -n '.' 84 | sleep 1 85 | done 86 | couchbase-cli node-init -c 127.0.0.1:8091 -u access -p password \ 87 | --node-init-data-path=/opt/couchbase/var/lib/couchbase/data \ 88 | --node-init-index-path=/opt/couchbase/var/lib/couchbase/data \ 89 | --node-init-hostname=${IP_PRIVATE} &>/dev/null \ 90 | && echo '# Node initialized' 91 | } 92 | 93 | isNodeInCluster() { 94 | couchbase-cli server-list ${CB_CONN} | grep ${IP_PRIVATE} &>/dev/null 95 | return $? 96 | } 97 | 98 | doHealthCheck() { 99 | local status=$(couchbase-cli server-info ${CB_CONN} | jq -r .status) 100 | if [[ $status != "healthy" ]]; then 101 | echo "Node not healthy, status was: $status" 102 | return 1 103 | fi 104 | return 0 105 | } 106 | 107 | 108 | # ------------------------------------------- 109 | # Joining a cluster 110 | 111 | 112 | # We only need one IP from the healthy cluster in order to join it. 113 | getHealthyClusterIp() { 114 | echo $(curl -Lsf http://${CONSUL}:8500/v1/health/service/${COUCHBASE_SERVICE_NAME}?passing | jq -r .[0].Service.Address) 115 | } 116 | 117 | # If we fail to join the cluster, then bail out and hit it on the 118 | # next health check 119 | joinCluster(){ 120 | echo '# Joining cluster...' 121 | local node=$1 122 | curl -Lsif -u ${COUCHBASE_USER}:${COUCHBASE_PASS} \ 123 | -d "hostname=${IP_PRIVATE}&user=admin&password=password" \ 124 | "http://${node}:8091/controller/addNode" || exit 1 125 | echo 'Joined cluster!' 126 | rebalance 127 | exit 0 128 | } 129 | 130 | # We need to rebalance for each node because we can't guarantee that we won't 131 | # try to rebalance while another node is coming up. Doing this in a loop because 132 | # we can't queue-up rebalances -- the rebalance command cannot be called while a 133 | # rebalance is in progress 134 | rebalance() { 135 | echo '# Rebalancing cluster...' 136 | while true; do 137 | echo -n '.' 138 | couchbase-cli rebalance ${CB_CONN} && return 139 | sleep .7 140 | done 141 | } 142 | 143 | 144 | # ------------------------------------------- 145 | # Bootstrapping a cluster 146 | 147 | # Try to obtain a lock in Consul. If we can't get the lock then another node 148 | # is trying to bootstrap the cluster. The cluster-init node will have 120s 149 | # to show up as healthy in Consul. 150 | obtainBootstrapLock() { 151 | echo 'No cluster nodes found, trying to obtain lock on bootstrap...' 152 | local session=$(curl -Lsf -XPUT -d '{"Name": "couchbase-bootstrap", "TTL": "120s"}' http://${CONSUL}:8500/v1/session/create | jq -r .ID) || return $? 153 | local lock=$(curl -Lsf -XPUT http://${CONSUL}:8500/v1/kv/couchbase-bootstrap?acquire=$session) 154 | if [[ $lock == "true" ]]; then 155 | return 0 156 | else 157 | return 1 158 | fi 159 | } 160 | 161 | # bootstrap the Couchbase cluster and set resource limits 162 | initCluster() { 163 | echo 164 | echo '# Bootstrapping cluster...' 165 | 166 | # Couchbase resource limits 167 | local avail_memory=$(free -m | grep -o "Mem:\s*[0-9]*" | grep -o "[0-9]*") 168 | local cb_memory=$((($avail_memory/10)*7)) 169 | 170 | couchbase-cli cluster-init -c 127.0.0.1:8091 -u access -p password \ 171 | --cluster-init-username=${COUCHBASE_USER} \ 172 | --cluster-init-password=${COUCHBASE_PASS} \ 173 | --cluster-init-port=8091 \ 174 | --cluster-init-ramsize=${cb_memory} \ 175 | --services=data,index,query 176 | 177 | echo '# Cluster bootstrapped' 178 | echo 179 | exit 0 180 | } 181 | 182 | # filters JSON coming back from REST API for this node with argument. examples: 183 | # stats mcdMemoryAllocated 184 | # stats systemStats.mem_free 185 | stats() { 186 | curl -s --fail -u ${COUCHBASE_USER}:${COUCHBASE_PASS} \ 187 | http://127.0.0.1:8091/pools/default | \ 188 | jq -r "$(printf '.nodes[] | select(.hostname | contains("%s")) | .%s' "${IP_PRIVATE}" "$1")" 189 | } 190 | 191 | 192 | # ------------------------------------------- 193 | # helpers 194 | 195 | # make sure we're running only one init process at a time 196 | # even with overlapping health check handlers 197 | checkLock() { 198 | if ! mkdir /var/lock/couchbase-init; then 199 | echo 'couchbase-init lock in place, skipping' 200 | fi 201 | } 202 | 203 | cleanup() { 204 | rm -rf /var/lock/couchbase-init 205 | } 206 | 207 | # ------------------------------------------- 208 | 209 | until 210 | cmd=$1 211 | if [ -z "$cmd" ]; then 212 | help 213 | fi 214 | shift 1 215 | $cmd "$@" 216 | [ "$?" -ne 127 ] 217 | do 218 | help 219 | exit 220 | done 221 | --------------------------------------------------------------------------------