├── files ├── hbase_jmx_config.yaml ├── prometheus.yml ├── hdfs │ ├── hdfs-site.xml │ └── core-site.xml ├── hbase │ ├── hbase-site.xml │ └── hbase └── hbasedashboard.json ├── README.md └── run-demo.sh /files/hbase_jmx_config.yaml: -------------------------------------------------------------------------------- 1 | rules: 2 | - pattern: Hadoop<>Namespace_([^\W_]+)_table_([^\W_]+)_region_([^\W_]+)_metric_(\w+) 3 | name: HBase_metric_$4 4 | labels: 5 | namespace: "$1" 6 | table: "$2" 7 | region: "$3" 8 | - pattern: Hadoop<>(\w+) 9 | name: HBase_$2_$3_$4 10 | -------------------------------------------------------------------------------- /files/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | evaluation_interval: 15s 4 | 5 | scrape_configs: 6 | - job_name: 'prometheus' 7 | 8 | static_configs: 9 | - targets: ['localhost:9090'] 10 | 11 | - job_name: 'hbase' 12 | 13 | static_configs: 14 | - targets: ['192.168.59.3:7000', '192.168.59.3:7001', '192.168.59.3:7002', '192.168.59.3:7003', '192.168.59.3:7004', '192.168.59.3:7005'] 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Monitoring HBase with Prometheus demo 2 | 3 | This repo serves as a demo for [this blog](https://blog.godatadriven.com/hbase-prometheus-monitoring). Running `./run-demo.sh` will set up HBase pseudo-distributed with 2 masters and 4 regionservers, Prometheus and Grafana. Browse to http://localhost:3000/dashboard/db/hbase to view the metrics. 4 | 5 | There are several dependencies, e.g. Docker on OS X is used for Prometheus and Grafana. You might need to install something or edit configs. 6 | 7 | To simulate some usage and view nice graphs on the HBase cluster, you could use [HBase performance evaluation](http://hbase.apache.org/book.html#__code_hbase_pe_code). 8 | 9 | ## Software & versions used 10 | * Prometheus 1.5.0 11 | * Grafana 4.1.0-beta1 12 | * HBase 1.2.4 13 | * Docker 14 | -------------------------------------------------------------------------------- /files/hdfs/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | dfs.replication 22 | 1 23 | 24 | 25 | -------------------------------------------------------------------------------- /files/hdfs/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | fs.defaultFS 22 | hdfs://localhost:8020 23 | 24 | 25 | -------------------------------------------------------------------------------- /files/hbase/hbase-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 24 | 25 | 26 | hbase.cluster.distributed 27 | true 28 | 29 | 30 | hbase.rootdir 31 | hdfs://localhost:8020/hbase 32 | 33 | 34 | -------------------------------------------------------------------------------- /run-demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Demo installation script for HBase monitoring with Prometheus JMX exporter and Grafana 3 | # This script will install in /tmp/demo 4 | 5 | # Manually stop all HBase & HDFS processes, use jps to check 6 | rm -v -rf /tmp/demo 7 | rm -v -rf /tmp/hadoop-${USER} # cleanup HDFS 8 | docker rm --force demo_prometheus demo_grafana 9 | 10 | GREEN='\033[0;32m' 11 | NOCOLOR='\033[0m' 12 | SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd) 13 | DEMO_DIR=/tmp/demo 14 | 15 | # Run Prometheus 16 | docker run --name demo_prometheus -d -p 9090:9090 prom/prometheus:v1.5.0 && 17 | echo -e "${GREEN}You can now browse to http://localhost:9090 for the Prometheus UI${NOCOLOR}" 18 | 19 | # Run Grafana 20 | docker run --name demo_grafana -d -i -p 3000:3000 grafana/grafana:4.1.0-beta1 && 21 | echo -e "${GREEN}You can now browse to http://localhost:3000 for the Grafana UI${NOCOLOR}" 22 | 23 | # Create directory for local files 24 | mkdir -p $DEMO_DIR 25 | cd $DEMO_DIR 26 | 27 | # Download HBase 28 | wget http://apache.proserve.nl/hbase/1.2.4/hbase-1.2.4-bin.tar.gz 29 | tar zxvf hbase-1.2.4-bin.tar.gz 30 | rm hbase-1.2.4-bin.tar.gz 31 | 32 | # Download Hadoop 33 | wget http://apache.proserve.nl/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz 34 | tar zxvf hadoop-2.7.3.tar.gz 35 | rm hadoop-2.7.3.tar.gz 36 | 37 | # Start HDFS 38 | cp $SCRIPT_DIR/files/hdfs/hdfs-site.xml $DEMO_DIR/hadoop-2.7.3/etc/hadoop/ 39 | cp $SCRIPT_DIR/files/hdfs/core-site.xml $DEMO_DIR/hadoop-2.7.3/etc/hadoop/ 40 | $DEMO_DIR/hadoop-2.7.3/bin/hdfs namenode -format -force -nonInterActive 41 | $DEMO_DIR/hadoop-2.7.3/sbin/start-dfs.sh 42 | echo -e "${GREEN}Namenode UI available at http://localhost:50070${NOCOLOR}" 43 | 44 | # Download Prometheus JMX exporter & copy config 45 | wget http://central.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.7/jmx_prometheus_javaagent-0.7.jar 46 | cp $SCRIPT_DIR/files/hbase_jmx_config.yaml $DEMO_DIR 47 | 48 | # Start 2 HBase masters & 3 HBase regionservers with Prometheus JMX exporter. start-hbase.sh starts 1 additional regionserver. 49 | cp $SCRIPT_DIR/files/hbase/hbase-site.xml $DEMO_DIR/hbase-1.2.4/conf/ 50 | cp $SCRIPT_DIR/files/hbase/hbase $DEMO_DIR/hbase-1.2.4/bin/hbase 51 | echo "export JAVA_HOME=${JAVA_HOME}" | cat - $DEMO_DIR/hbase-1.2.4/conf/hbase-env.sh > $DEMO_DIR/hbase-1.2.4/conf/hbase-env.sh.tmp && mv $DEMO_DIR/hbase-1.2.4/conf/hbase-env.sh.tmp $DEMO_DIR/hbase-1.2.4/conf/hbase-env.sh 52 | ${DEMO_DIR}/hbase-1.2.4/bin/start-hbase.sh 53 | ${DEMO_DIR}/hbase-1.2.4/bin/local-master-backup.sh start 1 54 | ${DEMO_DIR}/hbase-1.2.4/bin/local-regionservers.sh start 2 3 4 55 | echo -e "${GREEN}You can now browse to http://localhost:16010 for the HBase master UI${NOCOLOR}" 56 | 57 | # Configure Prometheus 58 | docker cp $SCRIPT_DIR/files/prometheus.yml demo_prometheus:/etc/prometheus/prometheus.yml 59 | curl -X POST http://localhost:9090/-/reload 60 | 61 | # Add Prometheus datasource to Grafana 62 | curl 'http://admin:admin@localhost:3000/api/datasources' -X POST -H 'Content-Type: application/json;charset=UTF-8' --data-binary '{"name":"Demo Prometheus","type":"prometheus","url":"http://localhost:9090","access":"direct","isDefault":true}' 63 | curl 'http://admin:admin@localhost:3000/api/dashboards/db' -X POST -H 'Content-Type: application/json;charset=UTF-8' -d @$SCRIPT_DIR/files/hbasedashboard.json 64 | echo -e "${GREEN}You can now browse to http://localhost:3000/dashboard/db/hbase for the demo dashboard${NOCOLOR}" 65 | -------------------------------------------------------------------------------- /files/hbasedashboard.json: -------------------------------------------------------------------------------- 1 | {"dashboard": 2 | { 3 | "__inputs": [ 4 | { 5 | "name": "Demo Prometheus", 6 | "label": "Demo Prometheus", 7 | "description": "", 8 | "type": "datasource", 9 | "pluginId": "prometheus", 10 | "pluginName": "Prometheus" 11 | } 12 | ], 13 | "__requires": [ 14 | { 15 | "type": "panel", 16 | "id": "graph", 17 | "name": "Graph", 18 | "version": "" 19 | }, 20 | { 21 | "type": "panel", 22 | "id": "table", 23 | "name": "Table", 24 | "version": "" 25 | }, 26 | { 27 | "type": "grafana", 28 | "id": "grafana", 29 | "name": "Grafana", 30 | "version": "4.1.0-beta1" 31 | }, 32 | { 33 | "type": "datasource", 34 | "id": "prometheus", 35 | "name": "Prometheus", 36 | "version": "1.0.0" 37 | } 38 | ], 39 | "id": null, 40 | "title": "HBase", 41 | "tags": [], 42 | "style": "dark", 43 | "timezone": "browser", 44 | "editable": true, 45 | "graphTooltip": 0, 46 | "hideControls": false, 47 | "time": { 48 | "from": "now-1m", 49 | "to": "now" 50 | }, 51 | "timepicker": { 52 | "refresh_intervals": [ 53 | "5s", 54 | "10s", 55 | "30s", 56 | "1m", 57 | "5m", 58 | "15m", 59 | "30m", 60 | "1h", 61 | "2h", 62 | "1d" 63 | ], 64 | "time_options": [ 65 | "5m", 66 | "15m", 67 | "1h", 68 | "6h", 69 | "12h", 70 | "24h", 71 | "2d", 72 | "7d", 73 | "30d" 74 | ] 75 | }, 76 | "templating": { 77 | "list": [] 78 | }, 79 | "annotations": { 80 | "list": [] 81 | }, 82 | "refresh": "5s", 83 | "schemaVersion": 14, 84 | "version": 7, 85 | "links": [], 86 | "gnetId": null, 87 | "rows": [ 88 | { 89 | "title": "Dashboard Row", 90 | "panels": [ 91 | { 92 | "id": 2, 93 | "title": "Number of requests", 94 | "span": 12, 95 | "type": "graph", 96 | "targets": [ 97 | { 98 | "expr": "sum(HBase_Master_Server_clusterRequests)", 99 | "intervalFactor": 2, 100 | "refId": "A", 101 | "step": 2, 102 | "legendFormat": "Total requests to both HMasters" 103 | } 104 | ], 105 | "datasource": "Demo Prometheus", 106 | "renderer": "flot", 107 | "yaxes": [ 108 | { 109 | "label": null, 110 | "show": true, 111 | "logBase": 1, 112 | "min": null, 113 | "max": null, 114 | "format": "short" 115 | }, 116 | { 117 | "label": null, 118 | "show": true, 119 | "logBase": 1, 120 | "min": null, 121 | "max": null, 122 | "format": "short" 123 | } 124 | ], 125 | "xaxis": { 126 | "show": true, 127 | "mode": "time", 128 | "name": null, 129 | "values": [] 130 | }, 131 | "lines": true, 132 | "fill": 1, 133 | "linewidth": 1, 134 | "points": false, 135 | "pointradius": 5, 136 | "bars": false, 137 | "stack": false, 138 | "percentage": false, 139 | "legend": { 140 | "show": true, 141 | "values": false, 142 | "min": false, 143 | "max": false, 144 | "current": false, 145 | "total": false, 146 | "avg": false 147 | }, 148 | "nullPointMode": "null", 149 | "steppedLine": false, 150 | "tooltip": { 151 | "value_type": "individual", 152 | "shared": true, 153 | "sort": 0 154 | }, 155 | "timeFrom": null, 156 | "timeShift": null, 157 | "aliasColors": {}, 158 | "seriesOverrides": [], 159 | "thresholds": [], 160 | "links": [] 161 | } 162 | ], 163 | "showTitle": false, 164 | "titleSize": "h6", 165 | "height": 250, 166 | "repeat": null, 167 | "repeatRowId": null, 168 | "repeatIteration": null, 169 | "collapse": false 170 | }, 171 | { 172 | "title": "Dashboard Row", 173 | "panels": [ 174 | { 175 | "aliasColors": {}, 176 | "bars": false, 177 | "datasource": "Demo Prometheus", 178 | "fill": 1, 179 | "id": 1, 180 | "legend": { 181 | "avg": false, 182 | "current": false, 183 | "max": false, 184 | "min": false, 185 | "show": true, 186 | "total": false, 187 | "values": false 188 | }, 189 | "lines": true, 190 | "linewidth": 1, 191 | "links": [], 192 | "nullPointMode": "null", 193 | "percentage": false, 194 | "pointradius": 5, 195 | "points": false, 196 | "renderer": "flot", 197 | "seriesOverrides": [], 198 | "span": 6, 199 | "stack": false, 200 | "steppedLine": false, 201 | "targets": [ 202 | { 203 | "expr": "sum(HBase_metric_storeFileSize) by (instance)", 204 | "intervalFactor": 2, 205 | "legendFormat": "{{instance}}", 206 | "metric": "HBase_metric_storeFileSize", 207 | "refId": "A", 208 | "step": 2 209 | } 210 | ], 211 | "thresholds": [], 212 | "timeFrom": null, 213 | "timeShift": null, 214 | "title": "Storefilesize per regionserver", 215 | "tooltip": { 216 | "shared": true, 217 | "sort": 0, 218 | "value_type": "individual" 219 | }, 220 | "type": "graph", 221 | "xaxis": { 222 | "mode": "time", 223 | "name": null, 224 | "show": true, 225 | "values": [] 226 | }, 227 | "yaxes": [ 228 | { 229 | "format": "decbits", 230 | "label": null, 231 | "logBase": 1, 232 | "max": null, 233 | "min": null, 234 | "show": true 235 | }, 236 | { 237 | "format": "short", 238 | "label": null, 239 | "logBase": 1, 240 | "max": null, 241 | "min": null, 242 | "show": true 243 | } 244 | ] 245 | }, 246 | { 247 | "id": 3, 248 | "title": "Storefilesize per regionserver", 249 | "span": 6, 250 | "type": "table", 251 | "targets": [ 252 | { 253 | "expr": "sum(HBase_metric_storeFileSize) by (instance)", 254 | "intervalFactor": 2, 255 | "refId": "A", 256 | "step": 2, 257 | "legendFormat": "{{instance}}" 258 | } 259 | ], 260 | "styles": [ 261 | { 262 | "type": "date", 263 | "pattern": "Time", 264 | "dateFormat": "YYYY-MM-DD HH:mm:ss" 265 | }, 266 | { 267 | "unit": "bits", 268 | "type": "number", 269 | "decimals": 2, 270 | "colors": [ 271 | "rgba(245, 54, 54, 0.9)", 272 | "rgba(237, 129, 40, 0.89)", 273 | "rgba(50, 172, 45, 0.97)" 274 | ], 275 | "colorMode": null, 276 | "pattern": "/.*/", 277 | "thresholds": [] 278 | } 279 | ], 280 | "transform": "timeseries_to_columns", 281 | "pageSize": 5, 282 | "showHeader": true, 283 | "columns": [], 284 | "scroll": true, 285 | "fontSize": "100%", 286 | "sort": { 287 | "col": 0, 288 | "desc": true 289 | }, 290 | "links": [] 291 | } 292 | ], 293 | "showTitle": false, 294 | "titleSize": "h6", 295 | "height": "250px", 296 | "repeat": null, 297 | "repeatRowId": null, 298 | "repeatIteration": null, 299 | "collapse": false 300 | } 301 | ] 302 | } 303 | } 304 | -------------------------------------------------------------------------------- /files/hbase/hbase: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | # 3 | #/** 4 | # * Licensed to the Apache Software Foundation (ASF) under one 5 | # * or more contributor license agreements. See the NOTICE file 6 | # * distributed with this work for additional information 7 | # * regarding copyright ownership. The ASF licenses this file 8 | # * to you under the Apache License, Version 2.0 (the 9 | # * "License"); you may not use this file except in compliance 10 | # * with the License. You may obtain a copy of the License at 11 | # * 12 | # * http://www.apache.org/licenses/LICENSE-2.0 13 | # * 14 | # * Unless required by applicable law or agreed to in writing, software 15 | # * distributed under the License is distributed on an "AS IS" BASIS, 16 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # * See the License for the specific language governing permissions and 18 | # * limitations under the License. 19 | # */ 20 | # 21 | # The hbase command script. Based on the hadoop command script putting 22 | # in hbase classes, libs and configurations ahead of hadoop's. 23 | # 24 | # TODO: Narrow the amount of duplicated code. 25 | # 26 | # Environment Variables: 27 | # 28 | # JAVA_HOME The java implementation to use. Overrides JAVA_HOME. 29 | # 30 | # HBASE_CLASSPATH Extra Java CLASSPATH entries. 31 | # 32 | # HBASE_CLASSPATH_PREFIX Extra Java CLASSPATH entries that should be 33 | # prefixed to the system classpath. 34 | # 35 | # HBASE_HEAPSIZE The maximum amount of heap to use. 36 | # Default is unset and uses the JVMs default setting 37 | # (usually 1/4th of the available memory). 38 | # 39 | # HBASE_LIBRARY_PATH HBase additions to JAVA_LIBRARY_PATH for adding 40 | # native libraries. 41 | # 42 | # HBASE_OPTS Extra Java runtime options. 43 | # 44 | # HBASE_CONF_DIR Alternate conf dir. Default is ${HBASE_HOME}/conf. 45 | # 46 | # HBASE_ROOT_LOGGER The root appender. Default is INFO,console 47 | # 48 | # JRUBY_HOME JRuby path: $JRUBY_HOME/lib/jruby.jar should exist. 49 | # Defaults to the jar packaged with HBase. 50 | # 51 | # JRUBY_OPTS Extra options (eg '--1.9') passed to the hbase shell. 52 | # Empty by default. 53 | # 54 | # HBASE_SHELL_OPTS Extra options passed to the hbase shell. 55 | # Empty by default. 56 | # 57 | bin=`dirname "$0"` 58 | bin=`cd "$bin">/dev/null; pwd` 59 | 60 | # This will set HBASE_HOME, etc. 61 | . "$bin"/hbase-config.sh 62 | 63 | cygwin=false 64 | case "`uname`" in 65 | CYGWIN*) cygwin=true;; 66 | esac 67 | 68 | # Detect if we are in hbase sources dir 69 | in_dev_env=false 70 | if [ -d "${HBASE_HOME}/target" ]; then 71 | in_dev_env=true 72 | fi 73 | 74 | # if no args specified, show usage 75 | if [ $# = 0 ]; then 76 | echo "Usage: hbase [] []" 77 | echo "Options:" 78 | echo " --config DIR Configuration direction to use. Default: ./conf" 79 | echo " --hosts HOSTS Override the list in 'regionservers' file" 80 | echo " --auth-as-server Authenticate to ZooKeeper using servers configuration" 81 | echo "" 82 | echo "Commands:" 83 | echo "Some commands take arguments. Pass no args or -h for usage." 84 | echo " shell Run the HBase shell" 85 | echo " hbck Run the hbase 'fsck' tool" 86 | echo " snapshot Create a new snapshot of a table" 87 | echo " snapshotinfo Tool for dumping snapshot information" 88 | echo " wal Write-ahead-log analyzer" 89 | echo " hfile Store file analyzer" 90 | echo " zkcli Run the ZooKeeper shell" 91 | echo " upgrade Upgrade hbase" 92 | echo " master Run an HBase HMaster node" 93 | echo " regionserver Run an HBase HRegionServer node" 94 | echo " zookeeper Run a Zookeeper server" 95 | echo " rest Run an HBase REST server" 96 | echo " thrift Run the HBase Thrift server" 97 | echo " thrift2 Run the HBase Thrift2 server" 98 | echo " clean Run the HBase clean up script" 99 | echo " classpath Dump hbase CLASSPATH" 100 | echo " mapredcp Dump CLASSPATH entries required by mapreduce" 101 | echo " pe Run PerformanceEvaluation" 102 | echo " ltt Run LoadTestTool" 103 | echo " version Print the version" 104 | echo " CLASSNAME Run the class named CLASSNAME" 105 | exit 1 106 | fi 107 | 108 | # get arguments 109 | COMMAND=$1 110 | shift 111 | 112 | JAVA=$JAVA_HOME/bin/java 113 | 114 | # override default settings for this command, if applicable 115 | if [ -f "$HBASE_HOME/conf/hbase-env-$COMMAND.sh" ]; then 116 | . "$HBASE_HOME/conf/hbase-env-$COMMAND.sh" 117 | fi 118 | 119 | add_size_suffix() { 120 | # add an 'm' suffix if the argument is missing one, otherwise use whats there 121 | local val="$1" 122 | local lastchar=${val: -1} 123 | if [[ "mMgG" == *$lastchar* ]]; then 124 | echo $val 125 | else 126 | echo ${val}m 127 | fi 128 | } 129 | 130 | if [[ -n "$HBASE_HEAPSIZE" ]]; then 131 | JAVA_HEAP_MAX="-Xmx$(add_size_suffix $HBASE_HEAPSIZE)" 132 | fi 133 | 134 | if [[ -n "$HBASE_OFFHEAPSIZE" ]]; then 135 | JAVA_OFFHEAP_MAX="-XX:MaxDirectMemorySize=$(add_size_suffix $HBASE_OFFHEAPSIZE)" 136 | fi 137 | 138 | # so that filenames w/ spaces are handled correctly in loops below 139 | ORIG_IFS=$IFS 140 | IFS= 141 | 142 | # CLASSPATH initially contains $HBASE_CONF_DIR 143 | CLASSPATH="${HBASE_CONF_DIR}" 144 | CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar 145 | 146 | add_to_cp_if_exists() { 147 | if [ -d "$@" ]; then 148 | CLASSPATH=${CLASSPATH}:"$@" 149 | fi 150 | } 151 | 152 | # For releases, add hbase & webapps to CLASSPATH 153 | # Webapps must come first else it messes up Jetty 154 | if [ -d "$HBASE_HOME/hbase-webapps" ]; then 155 | add_to_cp_if_exists "${HBASE_HOME}" 156 | fi 157 | #add if we are in a dev environment 158 | if [ -d "$HBASE_HOME/hbase-server/target/hbase-webapps" ]; then 159 | add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target" 160 | fi 161 | 162 | add_maven_deps_to_classpath() { 163 | f="${HBASE_HOME}/target/cached_classpath.txt" 164 | if [ ! -f "${f}" ] 165 | then 166 | echo "As this is a development environment, we need ${f} to be generated from maven (command: mvn install -DskipTests)" 167 | exit 1 168 | fi 169 | CLASSPATH=${CLASSPATH}:`cat "${f}"` 170 | } 171 | 172 | 173 | #Add the development env class path stuff 174 | if $in_dev_env; then 175 | add_maven_deps_to_classpath 176 | fi 177 | 178 | #add the hbase jars for each module 179 | for f in $HBASE_HOME/hbase-jars/hbase*.jar; do 180 | if [[ $f = *sources.jar ]] 181 | then 182 | : # Skip sources.jar 183 | elif [ -f $f ] 184 | then 185 | CLASSPATH=${CLASSPATH}:$f; 186 | fi 187 | done 188 | 189 | # Add libs to CLASSPATH 190 | for f in $HBASE_HOME/lib/*.jar; do 191 | CLASSPATH=${CLASSPATH}:$f; 192 | done 193 | 194 | # default log directory & file 195 | if [ "$HBASE_LOG_DIR" = "" ]; then 196 | HBASE_LOG_DIR="$HBASE_HOME/logs" 197 | fi 198 | if [ "$HBASE_LOGFILE" = "" ]; then 199 | HBASE_LOGFILE='hbase.log' 200 | fi 201 | 202 | function append_path() { 203 | if [ -z "$1" ]; then 204 | echo $2 205 | else 206 | echo $1:$2 207 | fi 208 | } 209 | 210 | JAVA_PLATFORM="" 211 | 212 | # if HBASE_LIBRARY_PATH is defined lets use it as first or second option 213 | if [ "$HBASE_LIBRARY_PATH" != "" ]; then 214 | JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" "$HBASE_LIBRARY_PATH") 215 | fi 216 | 217 | #If avail, add Hadoop to the CLASSPATH and to the JAVA_LIBRARY_PATH 218 | # Allow this functionality to be disabled 219 | if [ "$HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP" != "true" ] ; then 220 | HADOOP_IN_PATH=$(PATH="${HADOOP_HOME:-${HADOOP_PREFIX}}/bin:$PATH" which hadoop 2>/dev/null) 221 | if [ -f ${HADOOP_IN_PATH} ]; then 222 | HADOOP_JAVA_LIBRARY_PATH=$(HADOOP_CLASSPATH="$CLASSPATH" ${HADOOP_IN_PATH} \ 223 | org.apache.hadoop.hbase.util.GetJavaProperty java.library.path 2>/dev/null) 224 | if [ -n "$HADOOP_JAVA_LIBRARY_PATH" ]; then 225 | JAVA_LIBRARY_PATH=$(append_path "${JAVA_LIBRARY_PATH}" "$HADOOP_JAVA_LIBRARY_PATH") 226 | fi 227 | CLASSPATH=$(append_path "${CLASSPATH}" `${HADOOP_IN_PATH} classpath 2>/dev/null`) 228 | fi 229 | fi 230 | 231 | # Add user-specified CLASSPATH last 232 | if [ "$HBASE_CLASSPATH" != "" ]; then 233 | CLASSPATH=${CLASSPATH}:${HBASE_CLASSPATH} 234 | fi 235 | 236 | # Add user-specified CLASSPATH prefix first 237 | if [ "$HBASE_CLASSPATH_PREFIX" != "" ]; then 238 | CLASSPATH=${HBASE_CLASSPATH_PREFIX}:${CLASSPATH} 239 | fi 240 | 241 | # cygwin path translation 242 | if $cygwin; then 243 | CLASSPATH=`cygpath -p -w "$CLASSPATH"` 244 | HBASE_HOME=`cygpath -d "$HBASE_HOME"` 245 | HBASE_LOG_DIR=`cygpath -d "$HBASE_LOG_DIR"` 246 | fi 247 | 248 | if [ -d "${HBASE_HOME}/build/native" -o -d "${HBASE_HOME}/lib/native" ]; then 249 | if [ -z $JAVA_PLATFORM ]; then 250 | JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"` 251 | fi 252 | if [ -d "$HBASE_HOME/build/native" ]; then 253 | JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" ${HBASE_HOME}/build/native/${JAVA_PLATFORM}/lib) 254 | fi 255 | 256 | if [ -d "${HBASE_HOME}/lib/native" ]; then 257 | JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" ${HBASE_HOME}/lib/native/${JAVA_PLATFORM}) 258 | fi 259 | fi 260 | 261 | # cygwin path translation 262 | if $cygwin; then 263 | JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"` 264 | fi 265 | 266 | # restore ordinary behaviour 267 | unset IFS 268 | 269 | #Set the right GC options based on the what we are running 270 | declare -a server_cmds=("master" "regionserver" "thrift" "thrift2" "rest" "avro" "zookeeper") 271 | for cmd in ${server_cmds[@]}; do 272 | if [[ $cmd == $COMMAND ]]; then 273 | server=true 274 | break 275 | fi 276 | done 277 | 278 | if [[ $server ]]; then 279 | HBASE_OPTS="$HBASE_OPTS $SERVER_GC_OPTS" 280 | else 281 | HBASE_OPTS="$HBASE_OPTS $CLIENT_GC_OPTS" 282 | fi 283 | 284 | if [ "$AUTH_AS_SERVER" == "true" ] || [ "$COMMAND" = "hbck" ]; then 285 | if [ -n "$HBASE_SERVER_JAAS_OPTS" ]; then 286 | HBASE_OPTS="$HBASE_OPTS $HBASE_SERVER_JAAS_OPTS" 287 | else 288 | HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS" 289 | fi 290 | fi 291 | 292 | # figure out which class to run 293 | if [ "$COMMAND" = "shell" ] ; then 294 | # eg export JRUBY_HOME=/usr/local/share/jruby 295 | if [ "$JRUBY_HOME" != "" ] ; then 296 | CLASSPATH="$JRUBY_HOME/lib/jruby.jar:$CLASSPATH" 297 | HBASE_OPTS="$HBASE_OPTS -Djruby.home=$JRUBY_HOME -Djruby.lib=$JRUBY_HOME/lib" 298 | fi 299 | #find the hbase ruby sources 300 | if [ -d "$HBASE_HOME/lib/ruby" ]; then 301 | HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/lib/ruby" 302 | else 303 | HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-shell/src/main/ruby" 304 | fi 305 | HBASE_OPTS="$HBASE_OPTS $HBASE_SHELL_OPTS" 306 | CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb" 307 | elif [ "$COMMAND" = "hbck" ] ; then 308 | CLASS='org.apache.hadoop.hbase.util.HBaseFsck' 309 | # TODO remove old 'hlog' version 310 | elif [ "$COMMAND" = "hlog" -o "$COMMAND" = "wal" ] ; then 311 | CLASS='org.apache.hadoop.hbase.wal.WALPrettyPrinter' 312 | elif [ "$COMMAND" = "hfile" ] ; then 313 | CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter' 314 | elif [ "$COMMAND" = "zkcli" ] ; then 315 | CLASS="org.apache.hadoop.hbase.zookeeper.ZooKeeperMainServer" 316 | elif [ "$COMMAND" = "upgrade" ] ; then 317 | CLASS="org.apache.hadoop.hbase.migration.UpgradeTo96" 318 | elif [ "$COMMAND" = "snapshot" ] ; then 319 | CLASS="org.apache.hadoop.hbase.snapshot.CreateSnapshot" 320 | elif [ "$COMMAND" = "snapshotinfo" ] ; then 321 | CLASS="org.apache.hadoop.hbase.snapshot.SnapshotInfo" 322 | elif [ "$COMMAND" = "master" ] ; then 323 | CLASS='org.apache.hadoop.hbase.master.HMaster' 324 | if [ "$1" != "stop" ] && [ "$1" != "clear" ] ; then 325 | HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS" 326 | fi 327 | elif [ "$COMMAND" = "regionserver" ] ; then 328 | CLASS='org.apache.hadoop.hbase.regionserver.HRegionServer' 329 | if [ "$1" != "stop" ] ; then 330 | HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS" 331 | fi 332 | elif [ "$COMMAND" = "thrift" ] ; then 333 | CLASS='org.apache.hadoop.hbase.thrift.ThriftServer' 334 | if [ "$1" != "stop" ] ; then 335 | HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS" 336 | fi 337 | elif [ "$COMMAND" = "thrift2" ] ; then 338 | CLASS='org.apache.hadoop.hbase.thrift2.ThriftServer' 339 | if [ "$1" != "stop" ] ; then 340 | HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS" 341 | fi 342 | elif [ "$COMMAND" = "rest" ] ; then 343 | CLASS='org.apache.hadoop.hbase.rest.RESTServer' 344 | if [ "$1" != "stop" ] ; then 345 | HBASE_OPTS="$HBASE_OPTS $HBASE_REST_OPTS" 346 | fi 347 | elif [ "$COMMAND" = "zookeeper" ] ; then 348 | CLASS='org.apache.hadoop.hbase.zookeeper.HQuorumPeer' 349 | if [ "$1" != "stop" ] ; then 350 | HBASE_OPTS="$HBASE_OPTS $HBASE_ZOOKEEPER_OPTS" 351 | fi 352 | elif [ "$COMMAND" = "clean" ] ; then 353 | case $1 in 354 | --cleanZk|--cleanHdfs|--cleanAll) 355 | matches="yes" ;; 356 | *) ;; 357 | esac 358 | if [ $# -ne 1 -o "$matches" = "" ]; then 359 | echo "Usage: hbase clean (--cleanZk|--cleanHdfs|--cleanAll)" 360 | echo "Options: " 361 | echo " --cleanZk cleans hbase related data from zookeeper." 362 | echo " --cleanHdfs cleans hbase related data from hdfs." 363 | echo " --cleanAll cleans hbase related data from both zookeeper and hdfs." 364 | exit 1; 365 | fi 366 | "$bin"/hbase-cleanup.sh --config ${HBASE_CONF_DIR} $@ 367 | exit $? 368 | elif [ "$COMMAND" = "mapredcp" ] ; then 369 | CLASS='org.apache.hadoop.hbase.util.MapreduceDependencyClasspathTool' 370 | elif [ "$COMMAND" = "classpath" ] ; then 371 | echo $CLASSPATH 372 | exit 0 373 | elif [ "$COMMAND" = "pe" ] ; then 374 | CLASS='org.apache.hadoop.hbase.PerformanceEvaluation' 375 | HBASE_OPTS="$HBASE_OPTS $HBASE_PE_OPTS" 376 | elif [ "$COMMAND" = "ltt" ] ; then 377 | CLASS='org.apache.hadoop.hbase.util.LoadTestTool' 378 | HBASE_OPTS="$HBASE_OPTS $HBASE_LTT_OPTS" 379 | elif [ "$COMMAND" = "version" ] ; then 380 | CLASS='org.apache.hadoop.hbase.util.VersionInfo' 381 | else 382 | CLASS=$COMMAND 383 | fi 384 | 385 | # Have JVM dump heap if we run out of memory. Files will be 'launch directory' 386 | # and are named like the following: java_pid21612.hprof. Apparently it doesn't 387 | # 'cost' to have this flag enabled. Its a 1.6 flag only. See: 388 | # http://blogs.sun.com/alanb/entry/outofmemoryerror_looks_a_bit_better 389 | HBASE_OPTS="$HBASE_OPTS -Dhbase.log.dir=$HBASE_LOG_DIR" 390 | HBASE_OPTS="$HBASE_OPTS -Dhbase.log.file=$HBASE_LOGFILE" 391 | HBASE_OPTS="$HBASE_OPTS -Dhbase.home.dir=$HBASE_HOME" 392 | HBASE_OPTS="$HBASE_OPTS -Dhbase.id.str=$HBASE_IDENT_STRING" 393 | HBASE_OPTS="$HBASE_OPTS -Dhbase.root.logger=${HBASE_ROOT_LOGGER:-INFO,console}" 394 | if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then 395 | HBASE_OPTS="$HBASE_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" 396 | export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH" 397 | fi 398 | 399 | # Enable security logging on the master and regionserver only 400 | if [ "$COMMAND" = "master" ] || [ "$COMMAND" = "regionserver" ]; then 401 | HBASE_OPTS="$HBASE_OPTS -Dhbase.security.logger=${HBASE_SECURITY_LOGGER:-INFO,RFAS}" 402 | else 403 | HBASE_OPTS="$HBASE_OPTS -Dhbase.security.logger=${HBASE_SECURITY_LOGGER:-INFO,NullAppender}" 404 | fi 405 | 406 | HEAP_SETTINGS="$JAVA_HEAP_MAX $JAVA_OFFHEAP_MAX" 407 | # Exec unless HBASE_NOEXEC is set. 408 | export CLASSPATH 409 | 410 | if [ "$COMMAND" = "master" ] || [ "$COMMAND" = "regionserver" ]; then 411 | for port in {7000..7010}; do 412 | if [ `lsof -n -i:$port | grep LISTEN | wc -l` == "1" ]; then 413 | echo "Checking port $port - port $port in use" 414 | else 415 | echo "Checking port $port - port $port not in use - using port $port" 416 | HBASE_OPTS="$HBASE_OPTS -javaagent:/tmp/demo/jmx_prometheus_javaagent-0.7.jar=$port:/tmp/demo/hbase_jmx_config.yaml" 417 | break 418 | fi 419 | done 420 | fi 421 | 422 | if [ "${HBASE_NOEXEC}" != "" ]; then 423 | "$JAVA" -Dproc_$COMMAND -XX:OnOutOfMemoryError="kill -9 %p" $HEAP_SETTINGS $HBASE_OPTS $CLASS "$@" 424 | else 425 | exec "$JAVA" -Dproc_$COMMAND -XX:OnOutOfMemoryError="kill -9 %p" $HEAP_SETTINGS $HBASE_OPTS $CLASS "$@" 426 | fi 427 | --------------------------------------------------------------------------------