├── .gitignore ├── LICENSE ├── README.md ├── es-index-template.sh ├── lib ├── default_format.js ├── elasticsearch.js └── regex_format.js ├── package.json ├── send-test.sh └── utils └── httpReq.js /.gitignore: -------------------------------------------------------------------------------- 1 | lib-cov 2 | *.seed 3 | *.log 4 | *.csv 5 | *.dat 6 | *.out 7 | *.pid 8 | *.gz 9 | 10 | pids 11 | logs 12 | results 13 | 14 | npm-debug.log 15 | node_modules 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 markkimsal 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | statsd-elasticsearch-backend 2 | ============================ 3 | 4 | Elasticsearch backend for statsd 5 | 6 | ## Overview 7 | 8 | This backend allows [Statsd][statsd] to save to [Elasticsearch][elasticsearch]. Supports dynamic index creation per day and follows the logstash naming convention of statsd-YYYY.MM.DD for index creation. 9 | 10 | ## History 11 | 12 | Originally written by Github user rameshpy, this library was created as a feature branch of etsy/statsd. The statsd project recommended that this library be converted to its own repository as all other backends currently do. This repository started as a restructuring of the existing feature branch into a standalone backend repository. 13 | 14 | ## Installation 15 | 16 | $ cd /path/to/statsd 17 | $ npm install git://github.com/markkimsal/statsd-elasticsearch-backend.git 18 | 19 | To install from behind a proxy server: 20 | 21 | $ export https_proxy=http://your.proxyserver.org:8080 22 | $ export http_proxy=http://your.proxyserver.org:8080 23 | $ cd /path/to/statsd 24 | $ npm install git+https://github.com/markkimsal/statsd-elasticsearch-backend.git 25 | 26 | 27 | ## Configuration 28 | 29 | Merge the following configuration into your top-level existing configuration. 30 | Add a structure to your configuration called "elasticsearch" 31 | 32 | ```js 33 | 34 | backends: [ 'statsd-elasticsearch-backend', 'other-backends'], 35 | debug: true, 36 | elasticsearch: { 37 | port: 9200, 38 | host: "localhost", 39 | path: "/", 40 | indexPrefix: "statsd", 41 | //indexTimestamp: "year", //for index statsd-2015 42 | //indexTimestamp: "month", //for index statsd-2015.01 43 | indexTimestamp: "day", //for index statsd-2015.01.01 44 | countType: "counter", 45 | timerType: "timer", 46 | timerDataType: "timer_data", 47 | gaugeDataType: "gauge", 48 | formatter: "default_format" 49 | } 50 | ``` 51 | 52 | The field _path_ is equal to "/" if you directly connect to ES. 53 | But when ES is on behind the proxy (nginx,haproxy), for example http://domain.com/elastic-proxy/, then following settings required: 54 | ``` 55 | port: 80, 56 | host: "domain.com", 57 | path: "/elastic-proxy/", 58 | ``` 59 | Nginx config proxy example: 60 | ``` 61 | location /elastic-proxy/ { 62 | proxy_pass http://localhost:9200/; 63 | } 64 | ``` 65 | 66 | The field _indexPrefix_ is used as the prefix for your dynamic indices: for example "statsd-2014.02.04" 67 | 68 | The field _indexTimestamp_ allows you to determine the timestamping for your dynamic index. "year", "month" and "day" would produce "statsd-2014", "statsd-2014.02", "statsd-2014.02.04" respectively. 69 | 70 | The type configuration options allow you to specify different elasticsearch \_types for each statsd measurement. 71 | 72 | ## Important upgrade from 0.2 to 0.3 73 | 74 | Previously, the config value for timerDataType was always ignored and timer data was alwasy saved as the timerType + '\_stats'. If you are upgrading a live instance from 0.2 please be aware that the value of timerDataType will now be respected and you should ensure that you have the proper type mappings (especially for @timestamp) or that your timerDataType is set to timerType + '\_stats'. 75 | 76 | In addition to the above, the value of timerDataType was always overwriting timerData, so all timer information was being saved to the type "timer\_data" when the sample configuration would lead you to believe that it was being saved to the type "timer". 77 | 78 | In summary, the ES \_types of "timer_data" and "timer_data_stats" will now be "timer" and "timer_data" if the sample configuration is used. 79 | 80 | ## Template Mapping (basically required) 81 | 82 | To configure Elasticsearch to automatically apply index template settings based on a naming pattern look at the es-index-template.sh file. It will probably need customization (the timer_data type) for your particular statsd configuration (re: threshold pct and bins). 83 | 84 | From your etc/statsd installation type the following to get the basic template mapping 85 | ``` 86 | sh node_modules/statsd-elasticsearch-backend/es-index-template.sh 87 | # if your ES is on another machine or port 88 | ES_HOST=10.1.10.200 ES_PORT=9201 sh node_modules/statsd-elasticsearch-backend/es-index-template.sh 89 | ``` 90 | Without this, your timestamps will not be interpreted as timestamps. 91 | 92 | ## Test your installation 93 | 94 | Send a UDP packet that statsd understands with netcat. 95 | 96 | ``` 97 | echo "accounts.authentication.password.failed:1|c" | nc -u -w0 127.0.0.1 8125 98 | echo "accounts.authentication.login.time:320|ms|@0.1" | nc -u -w0 127.0.0.1 8125 99 | echo "accounts.authentication.login.num_users:333|g" | nc -u -w0 127.0.0.1 8125 100 | echo "accounts.authentication.login.num_users:-10|g" | nc -u -w0 127.0.0.1 8125 101 | ``` 102 | 103 | ## Default Metric Name Mapping 104 | 105 | Each key sent to the elasticsearch backend will be broken up by dots (.) and each part of the key will be treated as a document property in elastic search. The first for keys will be treated as namespace, group, target, and action, with any remaining keys concatenated into the "action" key with dots. 106 | For example: 107 | 108 | ```js 109 | accounts.authentication.password.failure.count:1|c 110 | ``` 111 | 112 | The above would be mapped into a JSON document like this: 113 | ```js 114 | { 115 | "_type":"counter", 116 | "ns":"accounts", 117 | "grp":"authentication", 118 | "tgt":"password", 119 | "act":"failure.count", 120 | "val":"1", 121 | "@timestamp":"1393853783000" 122 | } 123 | ``` 124 | 125 | Currently the keys are hardcoded to: namespace, group, target, and action, as in the above example. Having configurable naming conventions is the goal of a 1.0 release. 126 | The idea for mapping came mostly from: [http://matt.aimonetti.net/posts/2013/06/26/practical-guide-to-graphite-monitoring/] 127 | 128 | ## Configurable Metric Formatters 129 | 130 | As of 0.4.0 you can now choose to use from a selection of metric key formatters or write your own. 131 | 132 | The config value _formatter_ will resolve to the name of a file under lib/ with a .js extension added to it. 133 | 134 | ```` 135 | formatter: my_own_format # this will require ('lib/' + 'my_own_format' + '.js); 136 | ``` 137 | In this module you will need to export a number of functions. The 4 that are supported right now are: 138 | ``` 139 | counters( key, value, ts, array ) 140 | timers( key, value, ts, array ) 141 | timer_data( key, value, ts, array ) 142 | gauges( key, value, ts, array ) 143 | ``` 144 | 145 | Look at lib/default\_format.js for a template to build your own. 146 | 147 | ## Basic Auth 148 | 149 | In order to use basic auth in your application, add two keys to configuration of application: 150 | 151 | ```js 152 | backends: [ 'statsd-elasticsearch-backend', 'other-backends'], 153 | debug: true, 154 | elasticsearch: { 155 | ... 156 | username: "username", 157 | password: "password" 158 | ... 159 | } 160 | ``` 161 | -------------------------------------------------------------------------------- /es-index-template.sh: -------------------------------------------------------------------------------- 1 | curl -XPUT "${ES_HOST:-localhost}:${ES_PORT:-9200}/_template/statsd-template" -d ' 2 | { 3 | "template" : "statsd-*", 4 | "settings" : { 5 | "number_of_shards" : 1 6 | }, 7 | "mappings" : { 8 | "counter" : { 9 | "_source" : { "enabled" : true }, 10 | "properties": { 11 | "@timestamp": { 12 | "type": "date" 13 | }, 14 | "val": { 15 | "type": "double", 16 | "index": "not_analyzed" 17 | }, 18 | "ns": { 19 | "type": "string", 20 | "index": "not_analyzed" 21 | }, 22 | "grp": { 23 | "type": "string", 24 | "index": "not_analyzed" 25 | }, 26 | "tgt": { 27 | "type": "string", 28 | "index": "not_analyzed" 29 | }, 30 | "act": { 31 | "type": "string", 32 | "index": "not_analyzed" 33 | } 34 | } 35 | }, 36 | "gauge" : { 37 | "_source" : { "enabled" : true }, 38 | "properties": { 39 | "@timestamp": { 40 | "type": "date" 41 | }, 42 | "val": { 43 | "type": "double", 44 | "index": "not_analyzed" 45 | }, 46 | "ns": { 47 | "type": "string", 48 | "index": "not_analyzed" 49 | }, 50 | "grp": { 51 | "type": "string", 52 | "index": "not_analyzed" 53 | }, 54 | "tgt": { 55 | "type": "string", 56 | "index": "not_analyzed" 57 | }, 58 | "act": { 59 | "type": "string", 60 | "index": "not_analyzed" 61 | } 62 | } 63 | }, 64 | "timer" : { 65 | "_source" : { "enabled" : true }, 66 | "properties": { 67 | "@timestamp": { 68 | "type": "date" 69 | }, 70 | "val": { 71 | "type": "double", 72 | "index": "not_analyzed" 73 | }, 74 | "ns": { 75 | "type": "string", 76 | "index": "not_analyzed" 77 | }, 78 | "grp": { 79 | "type": "string", 80 | "index": "not_analyzed" 81 | }, 82 | "tgt": { 83 | "type": "string", 84 | "index": "not_analyzed" 85 | }, 86 | "act": { 87 | "type": "string", 88 | "index": "not_analyzed" 89 | } 90 | } 91 | }, 92 | "timer_data" : { 93 | "_source" : { "enabled" : true }, 94 | "properties": { 95 | "@timestamp": { 96 | "type": "date" 97 | }, 98 | "count_ps": { 99 | "type": "float", 100 | "index": "not_analyzed" 101 | }, 102 | "count": { 103 | "type": "float", 104 | "index": "not_analyzed" 105 | }, 106 | "upper": { 107 | "type": "float", 108 | "index": "not_analyzed" 109 | }, 110 | "lower": { 111 | "type": "float", 112 | "index": "not_analyzed" 113 | }, 114 | "mean": { 115 | "type": "float", 116 | "index": "not_analyzed" 117 | }, 118 | "median": { 119 | "type": "float", 120 | "index": "not_analyzed" 121 | }, 122 | "mean": { 123 | "type": "float", 124 | "index": "not_analyzed" 125 | }, 126 | "upper": { 127 | "type": "float", 128 | "index": "not_analyzed" 129 | }, 130 | "std": { 131 | "type": "float", 132 | "index": "not_analyzed" 133 | }, 134 | "sum": { 135 | "type": "float", 136 | "index": "not_analyzed" 137 | }, 138 | "mean_90": { 139 | "type": "float", 140 | "index": "not_analyzed" 141 | }, 142 | "upper_90": { 143 | "type": "float", 144 | "index": "not_analyzed" 145 | }, 146 | "sum_90": { 147 | "type": "float", 148 | "index": "not_analyzed" 149 | }, 150 | "bin_100": { 151 | "type": "integer", 152 | "index": "not_analyzed" 153 | }, 154 | "bin_500": { 155 | "type": "integer", 156 | "index": "not_analyzed" 157 | }, 158 | "bin_1000": { 159 | "type": "integer", 160 | "index": "not_analyzed" 161 | }, 162 | "bin_inf": { 163 | "type": "integer", 164 | "index": "not_analyzed" 165 | }, 166 | "ns": { 167 | "type": "string", 168 | "index": "not_analyzed" 169 | }, 170 | "grp": { 171 | "type": "string", 172 | "index": "not_analyzed" 173 | }, 174 | "tgt": { 175 | "type": "string", 176 | "index": "not_analyzed" 177 | }, 178 | "act": { 179 | "type": "string", 180 | "index": "not_analyzed" 181 | } 182 | } 183 | } 184 | } 185 | }' 186 | -------------------------------------------------------------------------------- /lib/default_format.js: -------------------------------------------------------------------------------- 1 | 2 | var counters = function (key, value, ts, bucket) { 3 | var listKeys = key.split('.'); 4 | var act = listKeys.slice(3, listKeys.length).join('.'); 5 | bucket.push({ 6 | "ns": listKeys[0] || '', 7 | "grp":listKeys[1] || '', 8 | "tgt":listKeys[2] || '', 9 | "act":act || '', 10 | "val":value, 11 | "@timestamp": ts 12 | }); 13 | return 1; 14 | } 15 | 16 | var timers = function (key, series, ts, bucket) { 17 | var listKeys = key.split('.'); 18 | var act = listKeys.slice(3, listKeys.length).join('.'); 19 | for (keyTimer in series) { 20 | bucket.push({ 21 | "ns": listKeys[0] || '', 22 | "grp":listKeys[1] || '', 23 | "tgt":listKeys[2] || '', 24 | "act":act || '', 25 | "val":series[keyTimer], 26 | "@timestamp": ts 27 | }); 28 | } 29 | return series.length; 30 | } 31 | 32 | var timer_data = function (key, value, ts, bucket) { 33 | var listKeys = key.split('.'); 34 | var act = listKeys.slice(3, listKeys.length).join('.'); 35 | value["@timestamp"] = ts; 36 | value["ns"] = listKeys[0] || ''; 37 | value["grp"] = listKeys[1] || ''; 38 | value["tgt"] = listKeys[2] || ''; 39 | value["act"] = act || ''; 40 | if (value['histogram']) { 41 | for (var keyH in value['histogram']) { 42 | value[keyH] = value['histogram'][keyH]; 43 | } 44 | delete value['histogram']; 45 | } 46 | bucket.push(value); 47 | } 48 | 49 | exports.counters = counters; 50 | exports.timers = timers; 51 | exports.timer_data = timer_data; 52 | exports.gauges = counters; 53 | -------------------------------------------------------------------------------- /lib/elasticsearch.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Flush stats to ElasticSearch (http://www.elasticsearch.org/) 3 | * 4 | * To enable this backend, include 'elastic' in the backends 5 | * configuration array: 6 | * 7 | * backends: ['./backends/elastic'] 8 | * (if the config file is in the statsd folder) 9 | * 10 | * A sample configuration can be found in exampleElasticConfig.js 11 | * 12 | * This backend supports the following config options: 13 | * 14 | * host: hostname or IP of ElasticSearch server 15 | * port: port of Elastic Search Server 16 | * path: http path of Elastic Search Server (default: '/') 17 | * indexPrefix: Prefix of the dynamic index to be created (default: 'statsd') 18 | * indexTimestamp: Timestamping format of the index, either "year", "month", "day", or "hour" 19 | * indexType: The dociment type of the saved stat (default: 'stat') 20 | */ 21 | 22 | var net = require('net'), 23 | util = require('util'), 24 | http = require('http'); 25 | // this will be instantiated to the logger 26 | var lg; 27 | var debug; 28 | var flushInterval; 29 | var elasticHost; 30 | var elasticPort; 31 | var elasticPath; 32 | var elasticIndex; 33 | var elasticIndexTimestamp; 34 | var elasticCountType; 35 | var elasticTimerType; 36 | var elasticUsername; 37 | var elasticPassword; 38 | 39 | var elasticStats = {}; 40 | 41 | 42 | var es_bulk_insert = function elasticsearch_bulk_insert(listCounters, listTimers, listTimerData, listGaugeData) { 43 | 44 | var renderKV = function(k, v) { 45 | if (typeof v == 'number') { 46 | return '"'+k+'":'+v; 47 | } 48 | return '"'+k+'":"'+v+'"'; 49 | /* 50 | if (k === '@timestamp') { 51 | var s = new Date(v).toISOString(); 52 | return '"'+k+'":"'+s+'"'; 53 | } else if (k === 'val') { 54 | return '"'+k+'":'+v; 55 | } else { 56 | return '"'+k+'":"'+v+'"'; 57 | } 58 | */ 59 | }; 60 | 61 | var indexDate = new Date(); 62 | 63 | var statsdIndex = elasticIndex + '-' + indexDate.getUTCFullYear() 64 | 65 | if (elasticIndexTimestamp == 'month' || elasticIndexTimestamp == 'day' || elasticIndexTimestamp == 'hour'){ 66 | var indexMo = indexDate.getUTCMonth() +1; 67 | if (indexMo < 10) { 68 | indexMo = '0'+indexMo; 69 | } 70 | statsdIndex += '.' + indexMo; 71 | } 72 | 73 | if (elasticIndexTimestamp == 'day' || elasticIndexTimestamp == 'hour'){ 74 | var indexDt = indexDate.getUTCDate(); 75 | if (indexDt < 10) { 76 | indexDt = '0'+indexDt; 77 | } 78 | statsdIndex += '.' + indexDt; 79 | } 80 | 81 | if (elasticIndexTimestamp == 'hour'){ 82 | var indexDt = indexDate.getUTCHours(); 83 | if (indexDt < 10) { 84 | indexDt = '0'+indexDt; 85 | } 86 | statsdIndex += '.' + indexDt; 87 | } 88 | 89 | var payload = ''; 90 | for (key in listCounters) { 91 | payload += '{"index":{"_index":"'+statsdIndex+'","_type":"'+elasticCountType+'"}}'+"\n"; 92 | payload += '{'; 93 | innerPayload = ''; 94 | for (statKey in listCounters[key]){ 95 | if (innerPayload) innerPayload += ','; 96 | innerPayload += renderKV(statKey, listCounters[key][statKey]); 97 | //innerPayload += '"'+statKey+'":"'+listCounters[key][statKey]+'"'; 98 | } 99 | payload += innerPayload +'}'+"\n"; 100 | } 101 | for (key in listTimers) { 102 | payload += '{"index":{"_index":"'+statsdIndex+'","_type":"'+elasticTimerType+'"}}'+"\n"; 103 | payload += '{'; 104 | innerPayload = ''; 105 | for (statKey in listTimers[key]){ 106 | if (innerPayload) innerPayload += ','; 107 | innerPayload += renderKV(statKey, listTimers[key][statKey]); 108 | //innerPayload += '"'+statKey+'":"'+listTimers[key][statKey]+'"'; 109 | } 110 | payload += innerPayload +'}'+"\n"; 111 | } 112 | for (key in listTimerData) { 113 | payload += '{"index":{"_index":"'+statsdIndex+'","_type":"'+elasticTimerDataType+'"}}'+"\n"; 114 | payload += '{'; 115 | innerPayload = ''; 116 | for (statKey in listTimerData[key]){ 117 | if (innerPayload) innerPayload += ','; 118 | innerPayload += renderKV(statKey, listTimerData[key][statKey]); 119 | //innerPayload += '"'+statKey+'":"'+listTimerData[key][statKey]+'"'; 120 | } 121 | payload += innerPayload +'}'+"\n"; 122 | } 123 | for (key in listGaugeData) { 124 | payload += '{"index":{"_index":"'+statsdIndex+'","_type":"'+elasticGaugeDataType+'"}}'+"\n"; 125 | payload += '{'; 126 | innerPayload = ''; 127 | for (statKey in listGaugeData[key]){ 128 | if (innerPayload) innerPayload += ','; 129 | innerPayload += renderKV(statKey, listGaugeData[key][statKey]); 130 | //innerPayload += '"'+statKey+'":"'+listGaugeData[key][statKey]+'"'; 131 | } 132 | payload += innerPayload +'}'+"\n"; 133 | } 134 | 135 | 136 | var optionsPost = { 137 | host: elasticHost, 138 | port: elasticPort, 139 | path: elasticPath + statsdIndex + '/' + '/_bulk', 140 | method: 'POST', 141 | headers: { 142 | 'Content-Type': 'application/json', 143 | 'Content-Length': payload.length 144 | } 145 | }; 146 | 147 | if(elasticUsername && elasticPassword) { 148 | optionsPost.auth = elasticUsername + ':' + elasticPassword; 149 | } 150 | 151 | var req = http.request(optionsPost, function(res) { 152 | res.on('data', function(d) { 153 | if (Math.floor(res.statusCode / 100) == 5){ 154 | var errdata = "HTTP " + res.statusCode + ": " + d; 155 | lg.log('error', errdata); 156 | } 157 | }); 158 | }).on('error', function(err) { 159 | lg.log('error', 'Error with HTTP request, no stats flushed.'); 160 | console.log(err); 161 | }); 162 | 163 | if (debug) { 164 | lg.log('ES payload:'); 165 | lg.log(payload); 166 | } 167 | req.write(payload); 168 | req.end(); 169 | } 170 | 171 | var flush_stats = function elastic_flush(ts, metrics) { 172 | var statString = ''; 173 | var numStats = 0; 174 | var key; 175 | var array_counts = new Array(); 176 | var array_timers = new Array(); 177 | var array_timer_data = new Array(); 178 | var array_gauges = new Array(); 179 | 180 | ts = ts*1000; 181 | /* 182 | var gauges = metrics.gauges; 183 | var pctThreshold = metrics.pctThreshold; 184 | */ 185 | 186 | for (key in metrics.counters) { 187 | numStats += fm.counters(key, metrics.counters[key], ts, array_counts); 188 | } 189 | 190 | for (key in metrics.timers) { 191 | numStats += fm.timers(key, metrics.timers[key], ts, array_timers); 192 | } 193 | 194 | if (array_timers.length > 0) { 195 | for (key in metrics.timer_data) { 196 | fm.timer_data(key, metrics.timer_data[key], ts, array_timer_data); 197 | } 198 | } 199 | 200 | for (key in metrics.gauges) { 201 | numStats += fm.gauges(key, metrics.gauges[key], ts, array_gauges); 202 | } 203 | if (debug) { 204 | lg.log('metrics:'); 205 | lg.log( JSON.stringify(metrics) ); 206 | } 207 | 208 | es_bulk_insert(array_counts, array_timers, array_timer_data, array_gauges); 209 | 210 | if (debug) { 211 | lg.log("debug", "flushed " + numStats + " stats to ES"); 212 | } 213 | }; 214 | 215 | var elastic_backend_status = function (writeCb) { 216 | for (stat in elasticStats) { 217 | writeCb(null, 'elastic', stat, elasticStats[stat]); 218 | } 219 | }; 220 | 221 | exports.init = function elasticsearch_init(startup_time, config, events, logger) { 222 | 223 | debug = config.debug; 224 | lg = logger; 225 | 226 | var configEs = config.elasticsearch || { }; 227 | 228 | elasticHost = configEs.host || 'localhost'; 229 | elasticPort = configEs.port || 9200; 230 | elasticPath = configEs.path || '/'; 231 | elasticIndex = configEs.indexPrefix || 'statsd'; 232 | elasticIndexTimestamp = configEs.indexTimestamp || 'day'; 233 | elasticCountType = configEs.countType || 'counter'; 234 | elasticTimerType = configEs.timerType || 'timer'; 235 | elasticTimerDataType = configEs.timerDataType || elasticTimerType + '_stats'; 236 | elasticGaugeDataType = configEs.gaugeDataType || 'gauge'; 237 | elasticFormatter = configEs.formatter || 'default_format'; 238 | elasticUsername = configEs.username || undefined; 239 | elasticPassword = configEs.password || undefined; 240 | 241 | fm = require('./' + elasticFormatter + '.js') 242 | if (debug) { 243 | lg.log("debug", "loaded formatter " + elasticFormatter); 244 | } 245 | 246 | if (fm.init) { 247 | fm.init(configEs); 248 | } 249 | flushInterval = config.flushInterval; 250 | 251 | elasticStats.last_flush = startup_time; 252 | elasticStats.last_exception = startup_time; 253 | 254 | 255 | events.on('flush', flush_stats); 256 | events.on('status', elastic_backend_status); 257 | 258 | return true; 259 | }; 260 | 261 | -------------------------------------------------------------------------------- /lib/regex_format.js: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Remember that setting a new regix in the config for statsd uses 4 | a native regular expression data type, not a string! 5 | */ 6 | var named = require('named-regexp').named 7 | , extend = require('util')._extend; 8 | 9 | var keyRegex; 10 | 11 | var init = function(config) { 12 | keyRegex = named(config.keyRegex || /^(:[^.]+)\.(:[^.]+)\.(:[^.]+)(?:\.(:[^.]+))?/); 13 | } 14 | 15 | var counters = function (key, value, ts, bucket) { 16 | var matched = keyRegex.exec(key); 17 | if (matched === null) return 0; 18 | 19 | bucket.push(extend(matched.captures, { 20 | "val": value, 21 | "@timestamp": ts 22 | })); 23 | return 1; 24 | } 25 | 26 | var timers = function (key, series, ts, bucket) { 27 | var matched = keyRegex.exec(key); 28 | if (matched === null) return 0; 29 | for (keyTimer in series) { 30 | bucket.push(extend({ 31 | "val": series[keyTimer], 32 | "@timestamp": ts 33 | }, matched.captures)); 34 | } 35 | return series.length; 36 | } 37 | 38 | var timer_data = function (key, value, ts, bucket) { 39 | var matched = keyRegex.exec(key); 40 | if (matched === null) return; 41 | 42 | var value = extend(value, matched.captures); 43 | 44 | value["@timestamp"] = ts; 45 | if (value['histogram']) { 46 | for (var keyH in value['histogram']) { 47 | value[keyH] = value['histogram'][keyH]; 48 | } 49 | delete value['histogram']; 50 | } 51 | bucket.push(value); 52 | } 53 | 54 | exports.counters = counters; 55 | exports.gauges = counters; 56 | exports.timers = timers; 57 | exports.timer_data = timer_data; 58 | exports.init = init; 59 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "author": "Ramesh Perumalsamy", 3 | "contributors": [ 4 | { 5 | "name": "Mark Kimsal" 6 | } 7 | ], 8 | "name": "statsd-elasticsearch-backend", 9 | "description": "A StatsD backend for Elasticsearch", 10 | "version": "0.4.2", 11 | "homepage": "https://github.com/markkimsal/statsd-elasticsearch-backend", 12 | "repository": { 13 | "type": "git", 14 | "url": "git://github.com/markkkimsal/statsd-elasticsearch-backend.git" 15 | }, 16 | "keywords": [ 17 | "elasticsearch", 18 | "metrics", 19 | "statsd" 20 | ], 21 | "engines": { 22 | "node": ">=0.8" 23 | }, 24 | "dependencies": { 25 | "named-regexp": "^0.1.1" 26 | }, 27 | "devDependencies": {}, 28 | "main": "lib/elasticsearch.js" 29 | } 30 | -------------------------------------------------------------------------------- /send-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "accounts.authentication.password.failed:1|c" | nc -u -w0 127.0.0.1 8125 3 | echo "accounts.authentication.login.time:320|ms|@0.1" | nc -u -w0 127.0.0.1 8125 4 | echo "accounts.authentication.login.num_users:333|g" | nc -u -w0 127.0.0.1 8125 5 | echo "accounts.authentication.login.num_users:-10|g" | nc -u -w0 127.0.0.1 8125 6 | 7 | -------------------------------------------------------------------------------- /utils/httpReq.js: -------------------------------------------------------------------------------- 1 | // module dependencies 2 | var http = require('http'), 3 | url = require('url'); 4 | 5 | 6 | /** 7 | * UrlReq - Wraps the http.request function making it nice for unit testing APIs. 8 | * 9 | * @param {string} reqUrl The required url in any form 10 | * @param {object} options An options object (this is optional) 11 | * @param {Function} cb This is passed the 'res' object from your request 12 | * 13 | * Credits : https://gist.github.com/1943352 14 | * Picked it up from the above URL 15 | */ 16 | exports.urlReq = function(reqUrl, options, cb){ 17 | if(typeof options === "function"){ cb = options; options = {}; }// incase no options passed in 18 | 19 | // parse url to chunks 20 | reqUrl = url.parse(reqUrl); 21 | 22 | // http.request settings 23 | var settings = { 24 | host: reqUrl.hostname, 25 | port: reqUrl.port || 80, 26 | path: reqUrl.pathname, 27 | headers: options.headers || {}, 28 | method: options.method || 'GET' 29 | }; 30 | 31 | // if there are params: 32 | if(options.params){ 33 | settings.headers['Content-Type'] = 'application/json'; 34 | }; 35 | 36 | // MAKE THE REQUEST 37 | var req = http.request(settings); 38 | 39 | // if there are params: write them to the request 40 | if(options.params){ req.write(options.params) }; 41 | 42 | // when the response comes back 43 | req.on('response', function(res){ 44 | res.body = ''; 45 | res.setEncoding('utf-8'); 46 | 47 | // concat chunks 48 | res.on('data', function(chunk){ 49 | res.body += chunk ; 50 | }); 51 | 52 | // when the response has finished 53 | res.on('end', function(){ 54 | 55 | // fire callback 56 | cb(res.body, res); 57 | }); 58 | }); 59 | 60 | // end the request 61 | req.end(); 62 | } 63 | 64 | 65 | --------------------------------------------------------------------------------