├── .gitignore ├── exampleConfig.js ├── CHANGELOG.md ├── LICENSE ├── package.json ├── README.md ├── test └── test.js └── lib └── newrelic-infra.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules -------------------------------------------------------------------------------- /exampleConfig.js: -------------------------------------------------------------------------------- 1 | { 2 | debug: false, 3 | port: 8125, 4 | backends: [ "@newrelic/statsd-infra-backend" ], 5 | newrelic: { 6 | port: 8001, 7 | host: "localhost", 8 | rules: [ 9 | { 10 | matchExpression: "app1.production.localhost.sample_metric", 11 | metricSchema: "{app}.{environment}.{hostname}.{metricName}", 12 | eventType: "MyorgApplicationSample", 13 | labels: { 14 | role: "test", 15 | environment: "{environment}" 16 | } 17 | }, 18 | ] 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](http://keepachangelog.com/) 5 | and this project adheres to [Semantic Versioning](http://semver.org/). 6 | 7 | ## 1.0.4 8 | ### Fixed 9 | - Fix `handlebars`, `uglify-js` and `commander` security vulnerabilities 10 | 11 | ## 1.0.3 12 | ### Fixed 13 | - Fix `lodash` and `lodash.template` security vulnerabilities 14 | 15 | ## 1.0.2 16 | - Fix metrics overlapping buffer by appending events 17 | 18 | ## 1.0.1 19 | ### Added 20 | - Configuration example file 21 | 22 | ### Fixed 23 | - Fix stats admin command 24 | 25 | ## 1.0.0 26 | ### Added 27 | - Initial release: Reporting StatsD metrics as New Relic Infrastructure metrics 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 New Relic, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@newrelic/statsd-infra-backend", 3 | "version": "1.0.3", 4 | "description": "New Relic Infrastructure backend for StatsD", 5 | "main": "lib/newrelic-infra.js", 6 | "scripts": { 7 | "test": "mocha", 8 | "release": "standard-version -m 'Release v%s'" 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "https://github.com/newrelic/statsd-infra-backend.git" 13 | }, 14 | "keywords": [ 15 | "newrelic", 16 | "statsd", 17 | "metrics", 18 | "infrastructure" 19 | ], 20 | "author": "New Relic Infrastructure team", 21 | "contributors": [ 22 | { 23 | "name": "Toni Reina ", 24 | "web": "https://github.com/areina" 25 | } 26 | ], 27 | "license": "SEE LICENSE IN LICENSE", 28 | "engines": { 29 | "node": ">=0.10" 30 | }, 31 | "devDependencies": { 32 | "mocha": "^5.2.0", 33 | "nock": "^9.6.1", 34 | "standard-version": "^6.0.1" 35 | }, 36 | "standard-version": { 37 | "skip": { 38 | "changelog": true 39 | } 40 | }, 41 | "dependencies": { 42 | "lodash": "^4.17.13", 43 | "lodash.template": "^4.5.0" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | StatsD backend for sending metrics to New Relic Infrastructure 4 | 5 | ## Requirements 6 | 7 | * StatsD versions >= 0.3.0. 8 | * New Relic Infrastructure Agent >= v1.0.818 9 | 10 | ## Installation 11 | 12 | ```sh 13 | $ cd /path/to/statsd 14 | $ npm install @newrelic/statsd-infra-backend 15 | ``` 16 | 17 | ## Enabling 18 | 19 | 1. Add `@newrelic/statsd-infra-backend` backend to the list of StatsD backends in the StatsD configuration file. 20 | 21 | ```js 22 | { 23 | backends: ["@newrelic/statsd-infra-backend"], 24 | } 25 | ``` 26 | 27 | 2. Configure the necessary configuration values for running this backend: 28 | 29 | ```js 30 | newrelic: { 31 | port: 8001, 32 | rules: [ 33 | { 34 | matchExpression: "myapp.*redis.*", 35 | metricSchema: "{app}.{environment}.{service}.{serviceName}.{metricName}", 36 | eventType: "RedisStatsdSample", 37 | labels: { 38 | role: "cache", 39 | environment: "{environment}" 40 | } 41 | } 42 | ] 43 | } 44 | ``` 45 | 46 | See our [example config file](exampleConfig.js) for a complete StatsD configuration. 47 | 48 | 3. Start/restart the StatsD daemon and your metrics should now be pushed to your 49 | New Relic Infrastructure account. 50 | 51 | ## Development 52 | 53 | - Fork and clone this project 54 | - Download project dependencies using `npm` 55 | - Modify the code 56 | - Ensure everything is running properly executing tests: `npm test` 57 | - Push the code to your fork 58 | - Send a Pull Request 59 | 60 | ## Release 61 | 62 | - Update the `CHANGELOG.md` file with all the info about the new release. 63 | - Run `npm run release`. Check 64 | [this](https://github.com/conventional-changelog/standard-version#release-as-a-target-type-imperatively-like-npm-version) 65 | too see all the different options for this command. 66 | - Run `git push --follow-tags origin master && npm publish` to publish the package 67 | - Create the github release pointing to the tag created by `npm run release` 68 | 69 | ## License 70 | 71 | New Relic Infrastructure Backend for StatsD is free-to-use, proprietary 72 | software. Please see the full license (found in [LICENSE](LICENSE) in this 73 | distribution) for details on its license and the licenses of its dependencies. 74 | -------------------------------------------------------------------------------- /test/test.js: -------------------------------------------------------------------------------- 1 | process.env.NODE_ENV = 'test'; 2 | 3 | const assert = require('assert'); 4 | const events = require('events'); 5 | const nri = require('../lib/newrelic-infra.js'); 6 | const util = require('util'); 7 | const nock = require('nock'); 8 | 9 | describe('Unit testing indexForMetric', function () { 10 | 11 | it('indexes a metric by all the attributes except the value field', function(done){ 12 | let fields = { 13 | "app": "foo", 14 | "client": "bar", 15 | "foo": 0.13 16 | }; 17 | 18 | assert.equal(nri.indexForMetric(fields, "foo"), "app=foo;client=bar"); 19 | done(); 20 | }); 21 | 22 | it('indexes by sorting alphabetically', function(done){ 23 | let fields = { 24 | "client": "bar", 25 | "app": "foo", 26 | "foo": 0.13 27 | }; 28 | 29 | assert.equal(nri.indexForMetric(fields, "foo"), "app=foo;client=bar"); 30 | done(); 31 | }); 32 | 33 | }); 34 | 35 | describe('New Relic Infrastructure StatsD Backend', function () { 36 | before(function () { 37 | nock.disableNetConnect(); 38 | }); 39 | const defaultConfig = { 40 | debug: false, 41 | newrelic: { 42 | port: 9070, 43 | rules: [{ 44 | matchExpression: '.*', 45 | metricSchema: '{metricName}', 46 | entityType: 'foo', 47 | entityName: 'bar', 48 | eventType: 'Example' 49 | }] 50 | } 51 | }; 52 | const defaultIntegration = { 53 | name: 'com.newrelic.statsd', 54 | integration_version: '0.1.0', 55 | protocol_version: '1', 56 | metrics: [], 57 | inventory: {}, 58 | events: [] 59 | }; 60 | 61 | describe('nriInitBackend', function () { 62 | const timestamp = 12345; 63 | 64 | it('no matching rules', function () { 65 | const emitter = new events.EventEmitter(); 66 | const config = Object.assign({}, defaultConfig); 67 | config.newrelic.rules = [{ 68 | matchExpression: '.*redis.*', 69 | metricSchema: '{app}.{service}.{metricName}', 70 | entityType: 'Redis Cluster', 71 | entityName: 'Production Host1', 72 | eventType: 'RedisSample' 73 | }]; 74 | 75 | const metrics = { 76 | gauges: {my_gauge: 1}, 77 | counters: {my_counter: 10}, 78 | counter_rates: {my_counter: 1} 79 | }; 80 | const httpserver = nock('http://localhost:9070') 81 | .post('/v1/data') 82 | .reply(204); 83 | 84 | nri.init(null, config, emitter, util); 85 | emitter.emit('flush', timestamp, metrics); 86 | assert.equal(httpserver.isDone(), false); 87 | nock.cleanAll(); 88 | }); 89 | 90 | it('valid matching rules', function (done) { 91 | const emitter = new events.EventEmitter(); 92 | const config = Object.assign({}, defaultConfig); 93 | config.newrelic.rules = [{ 94 | matchExpression: '.*redis.*', 95 | metricSchema: '{app}.{service}.{metricName}', 96 | entityType: 'Redis Cluster', 97 | entityName: 'Production Host1', 98 | eventType: 'RedisSample' 99 | }]; 100 | const metrics = { 101 | gauges: {'myapp.redis.my_gauge': 1}, 102 | counters: {'myapp.redis.my_counter': 10}, 103 | counter_rates: {'myapp.redis.my_counter': 1}, 104 | timer_data: { 105 | 'myapp.redis.my_timer': { 106 | sum: 10, 107 | mean: 10 108 | } 109 | } 110 | }; 111 | const expected = defaultIntegration; 112 | expected.metrics = [ 113 | { 114 | "event_type": "RedisSample", 115 | "app": "myapp", 116 | "service": "redis", 117 | "my_counter": 10, 118 | "my_counterPerSecond": 1, 119 | "my_gauge": 1, 120 | "my_timer.mean": 10, 121 | "my_timer.sum": 10, 122 | } 123 | ]; 124 | 125 | const httpserver = nock('http://localhost:9070') 126 | .post('/v1/data') 127 | .reply(204, function (uri, requestBody) { 128 | assert.deepEqual(requestBody, expected); 129 | done(); 130 | }); 131 | nri.init(null, config, emitter, util); 132 | emitter.emit('flush', timestamp, metrics); 133 | assert.equal(httpserver.isDone(), true); 134 | }); 135 | 136 | it('matching rules with invalid metricSchema', function () { 137 | const emitter = new events.EventEmitter(); 138 | const config = Object.assign({}, defaultConfig); 139 | config.newrelic.rules = [{ 140 | matchExpression: '.*redis.*', 141 | metricSchema: '{app}.{service}.{metricName}', 142 | entityType: 'Redis Cluster', 143 | entityName: 'Production Host1', 144 | eventType: 'RedisSample' 145 | }]; 146 | 147 | const metrics = { 148 | gauges: {'redis.my_gauge': 1} 149 | }; 150 | const httpserver = nock('http://localhost:9070') 151 | .post('/v1/data') 152 | .reply(204); 153 | 154 | nri.init(null, config, emitter, util); 155 | emitter.emit('flush', timestamp, metrics); 156 | assert.equal(httpserver.isDone(), false); 157 | nock.cleanAll(); 158 | }); 159 | 160 | it('nomad telemetry', function (done) { 161 | const emitter = new events.EventEmitter(); 162 | const config = Object.assign({}, defaultConfig); 163 | config.newrelic.rules = [{ 164 | matchExpression: 'nomad.client.allocs.cpu.*', 165 | metricSchema: "{app}.{client}.{type}.{resource}.{metricName}.{jobName}.{taskGroupName}.{allocationID}.{taskName}.{ipAddress}", 166 | entityType: 'Nomad telemetry', 167 | entityName: 'Production Host1', 168 | eventType: 'eventFoo' 169 | }]; 170 | const metrics = { 171 | gauges: { 172 | 'nomad.client.allocs.cpu.total_percent.job-a.task-group-a.xxx-yyy.task-a.ip-foo-bar': 0.58028 173 | ,'nomad.client.allocs.cpu.total_percent.job-b.task-group-b.yyy-zzz.task-b.ip-foo-bar': 0.026463 174 | ,'nomad.client.allocs.cpu.foo_bar_baz_q.job-b.task-group-b.yyy-zzz.task-b.ip-foo-bar': 0.01 175 | } 176 | }; 177 | const expected = defaultIntegration; 178 | expected.metrics = [ 179 | { 180 | event_type: "eventFoo", 181 | app: "nomad", 182 | client: "client", 183 | type: "allocs", 184 | ipAddress: "ip-foo-bar", 185 | jobName: "job-a", 186 | resource: "cpu", 187 | taskGroupName: "task-group-a", 188 | allocationID: "xxx-yyy", 189 | taskName: "task-a", 190 | total_percent: 0.58028 191 | }, { 192 | event_type: "eventFoo", 193 | app: "nomad", 194 | type: "allocs", 195 | client: "client", 196 | ipAddress: "ip-foo-bar", 197 | jobName: "job-b", 198 | resource: "cpu", 199 | taskGroupName: "task-group-b", 200 | allocationID: "yyy-zzz", 201 | taskName: "task-b", 202 | foo_bar_baz_q: 0.01, 203 | total_percent: 0.026463 204 | } 205 | ]; 206 | 207 | const httpserver = nock('http://localhost:9070') 208 | .post('/v1/data') 209 | .reply(204, function (uri, requestBody) { 210 | assert.deepEqual(requestBody, expected); 211 | done(); 212 | }); 213 | nri.init(null, config, emitter, util); 214 | emitter.emit('flush', timestamp, metrics); 215 | assert.equal(httpserver.isDone(), true); 216 | }); 217 | 218 | it('limit of keys exceeded', function (done) { 219 | const emitter = new events.EventEmitter(); 220 | const config = Object.assign({}, defaultConfig); 221 | const metricsLimit = 2; 222 | config.newrelic.rules = [{ 223 | matchExpression: '.*redis.*', 224 | metricSchema: '{app}.{service}.{metricName}', 225 | entityType: 'Redis Cluster', 226 | entityName: 'Production Host1', 227 | eventType: 'RedisSample' 228 | }]; 229 | config.newrelic.metricsLimit = metricsLimit; 230 | const metrics = { 231 | gauges: {'myapp.redis.my_gauge': 1}, 232 | counters: {'myapp.redis.my_counter': 10}, 233 | counter_rates: {'myapp.redis.my_counter': 1}, 234 | timer_data: { 235 | 'myapp.redis.my_timer': { 236 | sum: 10, 237 | mean: 10 238 | } 239 | } 240 | }; 241 | const expected = defaultIntegration; 242 | expected.metrics = [ 243 | { 244 | event_type: 'StatsdLimitErrorSample', 245 | numberOfMetrics: 8, 246 | configuredLimit: metricsLimit 247 | }]; 248 | const httpserver = nock('http://localhost:9070') 249 | .post('/v1/data') 250 | .reply(204, function (uri, requestBody) { 251 | assert.deepEqual(requestBody, expected); 252 | done(); 253 | }); 254 | nri.init(null, config, emitter, util); 255 | emitter.emit('flush', timestamp, metrics); 256 | assert.equal(httpserver.isDone(), true); 257 | }); 258 | 259 | }); 260 | }); 261 | -------------------------------------------------------------------------------- /lib/newrelic-infra.js: -------------------------------------------------------------------------------- 1 | /* jshint node:true, laxcomma:true */ 2 | 3 | /* 4 | * Flush stats to New Relic Infrastucture Agent. 5 | * 6 | * To enable this backend, include 'newrelic-infra' in the backends 7 | * configuration array: 8 | * 9 | * backends: ['newrelic-infra'] 10 | * 11 | * This backend supports the following config options in 'newrelic' key: 12 | * 13 | * port: Port where Infrastructure Agent is listening. Defaults to '8001'. 14 | * rules: A list of rules to convert StatsD metrics and compose New Relic 15 | * Infrastructure payloads. 16 | * 17 | */ 18 | 19 | if (!Object.assign) { 20 | Object.defineProperty(Object, 'assign', { 21 | enumerable: false, 22 | configurable: true, 23 | writable: true, 24 | value: function(target) { 25 | 'use strict'; 26 | if (target === undefined || target === null) { 27 | throw new TypeError('Cannot convert first argument to object'); 28 | } 29 | 30 | var to = Object(target); 31 | for (var i = 1; i < arguments.length; i++) { 32 | var nextSource = arguments[i]; 33 | if (nextSource === undefined || nextSource === null) { 34 | continue; 35 | } 36 | nextSource = Object(nextSource); 37 | 38 | var keysArray = Object.keys(Object(nextSource)); 39 | for (var nextIndex = 0, len = keysArray.length; nextIndex < len; nextIndex++) { 40 | var nextKey = keysArray[nextIndex]; 41 | var desc = Object.getOwnPropertyDescriptor(nextSource, nextKey); 42 | if (desc !== undefined && desc.enumerable) { 43 | to[nextKey] = nextSource[nextKey]; 44 | } 45 | } 46 | } 47 | return to; 48 | } 49 | }); 50 | } 51 | 52 | var http = require('http'); 53 | 54 | var globalLogger; 55 | var debug; 56 | var host; 57 | var port; 58 | var sendTimeoutInSeconds = 1; 59 | var rules = []; 60 | var metricsLimit; 61 | var sendLimitErrors; 62 | var nriStats = {}; 63 | 64 | var logRequestInfo = function nriRequestInfo(req) { 65 | return req.method + ' http://' + req.getHeader('host') + '' + req.path 66 | }; 67 | 68 | var sendPayload = function nriSend(host, port, payload) { 69 | if (debug) { 70 | globalLogger.log('Sending payload: ' + payload); 71 | } 72 | var startTime = Date.now(); 73 | var options = { 74 | host: host, 75 | port: port, 76 | path: '/v1/data', 77 | method: 'POST', 78 | headers: { 79 | 'Content-Length': Buffer.byteLength(payload), 80 | 'Content-Type': 'application/json', 81 | 'User-Agent': 'StatsD-backend' 82 | } 83 | }; 84 | var req = http.request(options, function(res) { 85 | var resData = ""; 86 | res.on('data', function(d) { 87 | resData += d; 88 | }); 89 | res.on('end', function() { 90 | switch (Math.floor(res.statusCode / 100)) { 91 | case 5: 92 | if (debug) { 93 | globalLogger.log('Unexpected error from New Relic Infrastructure Agent. HTTP ' + res.statusCode + ' error: ' + resData); 94 | } 95 | nriStats.last_exception = Math.round(Date.now() / 1000); 96 | break; 97 | case 4: 98 | if (debug) { 99 | globalLogger.log('Error sending JSON payload to New Relic Infrastructure Agent (' + logRequestInfo(req) + '). HTTP ' + res.statusCode + ' error: ' + resData); 100 | } 101 | nriStats.last_exception = Math.round(Date.now() / 1000); 102 | break; 103 | case 2: 104 | if (debug) { 105 | globalLogger.log('Payload sent successfully'); 106 | } 107 | break; 108 | default: 109 | if (debug) { 110 | globalLogger.log('Unexpected response from New Relic Infrastructure Agent. HTTP ' + res.statusCode + ' error: ' + resData); 111 | } 112 | } 113 | }); 114 | }); 115 | req.on('error', function(e) { 116 | if (debug) { 117 | globalLogger.log('Unexepected error requesting New Relic Infrastructure Agent. Error: ' + e.message); 118 | } 119 | }); 120 | req.setTimeout(sendTimeoutInSeconds * 1000, function() { 121 | if (debug) { 122 | globalLogger.log('Request timed out sending JSON payload to New Relic Infrastructure agent'); 123 | } 124 | req.end(); 125 | }); 126 | req.write(payload); 127 | req.end(); 128 | nriStats.flush_time = Math.round(Date.now() - startTime); 129 | nriStats.flush_length = payload.length; 130 | nriStats.last_flush = Math.round(Date.now() / 1000); 131 | }; 132 | 133 | function indexForMetric(metricFields, valueField) { 134 | var idx = ''; 135 | var keys = Object.keys(metricFields); 136 | keys.sort(); 137 | var separator = ''; 138 | keys.forEach(function (key) { 139 | if (valueField === key) { 140 | return 141 | } 142 | idx = idx + separator + key + '=' + metricFields[key]; 143 | separator = ';' 144 | }); 145 | 146 | return idx; 147 | } 148 | 149 | var collectMetrics = function nriCollectMetrics(rawMetrics, rules) { 150 | var gauges = rawMetrics.gauges || {}; 151 | var counters = rawMetrics.counters || {}; 152 | var counterRates = rawMetrics.counter_rates; 153 | var timerData = rawMetrics.timer_data || {}; 154 | var sets = rawMetrics.sets || {}; 155 | var data = {}; 156 | 157 | var ruleTemplate = function evalSchema(tpl, metricData) { 158 | var re = /{([^}]+)?}/g; 159 | var match; 160 | var result = tpl; 161 | while ((match = re.exec(tpl))) { 162 | result = result.replace(match[0], metricData[match[1]]); 163 | } 164 | return result; 165 | }; 166 | var validateKeyWithSchema = function validate(key, schema) { 167 | var splittedSchema = schema.split('.'); 168 | var splittedKey = key.split('.'); 169 | 170 | return splittedKey.length >= splittedSchema.length; 171 | }; 172 | var extractSchemaFields = function(key, schema) { 173 | var splittedSchema = schema.split('.'); 174 | var splittedKey = key.split('.'); 175 | var fields = {}; 176 | splittedSchema.forEach(function(sword, idx) { 177 | var word = sword.match(/{([^}]*).*/); 178 | var value; 179 | if (word !== null) { 180 | word = word[1]; 181 | if (idx + 1 === splittedSchema.length && idx + 1 < splittedKey.length) { 182 | value = splittedKey.slice(idx, splittedKey.length).join('.'); 183 | } else { 184 | value = splittedKey[idx]; 185 | } 186 | fields[word] = value; 187 | } 188 | }); 189 | return fields; 190 | }; 191 | var evalRule = function evalRule(rule, metricName, value) { 192 | var re = new RegExp(rule.matchExpression); 193 | var found = re.test(metricName); 194 | 195 | if (found) { 196 | if (validateKeyWithSchema(metricName, rule.metricSchema)) { 197 | var metricFields = extractSchemaFields(metricName, rule.metricSchema); 198 | var eventType = rule.eventType; 199 | if (debug) { 200 | if (/{.*}/.test(eventType)) { 201 | globalLogger.log('You can\'t use variable substitutions for EventType: ${rule.eventType}'); 202 | } 203 | } 204 | var entityName = ruleTemplate(rule.entityName, metricFields); 205 | var entityType = ruleTemplate(rule.entityType, metricFields); 206 | var entityId = entityType + ':' + entityName; 207 | 208 | var valueField = metricFields.metricName; 209 | metricFields[metricFields.metricName] = value; 210 | metricFields['event_type'] = eventType; 211 | delete metricFields.metricName; 212 | 213 | var idx = indexForMetric(metricFields, valueField); 214 | 215 | if (data.hasOwnProperty(entityId)) { 216 | if (!data[entityId].metrics.hasOwnProperty(idx)) { 217 | data[entityId].metrics[idx] = metricFields; 218 | } else { 219 | data[entityId].metrics[idx][valueField] = value; 220 | } 221 | } else { 222 | data[entityId] = { 223 | entity: { name: entityName, type: entityType }, 224 | metrics: {} 225 | }; 226 | data[entityId].metrics[idx] = metricFields; 227 | } 228 | 229 | Object.keys(rule.labels || {}).forEach(function(label) { 230 | data[entityId].metrics[idx]['label.' + label] = ruleTemplate(rule.labels[label], metricFields); 231 | }); 232 | } else if (debug) { 233 | globalLogger.log("It isn't possible to compose an event for key " + metricName + ". It has less elements than metric schema: " + rule.metricSchema); 234 | } 235 | } 236 | 237 | return found; 238 | }; 239 | 240 | if (debug) { 241 | var expressions = rules.map(function(rule) { return rule.matchExpression; }); 242 | globalLogger.log('Matching keys against rule expressions: [' + expressions.join(', ') + ']'); 243 | } 244 | 245 | var matchedKeys = 0; 246 | 247 | Object.keys(counters).forEach(function(metricName) { 248 | rules.forEach(function(rule) { 249 | if (evalRule(rule, metricName, counters[metricName])) { 250 | evalRule(rule, metricName + 'PerSecond', counterRates[metricName]); 251 | matchedKeys++; 252 | } 253 | }); 254 | }); 255 | 256 | Object.keys(timerData).forEach(function(metricName) { 257 | rules.forEach(function(rule) { 258 | Object.keys(timerData[metricName]).forEach(function(timerKey) { 259 | evalRule(rule, metricName + '.' + timerKey, timerData[metricName][timerKey]); 260 | }); 261 | }); 262 | }); 263 | 264 | Object.keys(gauges).forEach(function(metricName) { 265 | rules.forEach(function(rule) { 266 | evalRule(rule, metricName, gauges[metricName]); 267 | }); 268 | }); 269 | 270 | Object.keys(sets).forEach(function(metricName) { 271 | rules.forEach(function(rule) { 272 | evalRule(rule, metricName + '.count', sets[metricName].size()); 273 | }); 274 | }); 275 | 276 | if (debug) { 277 | var totalKeys = Object.keys(counters).length + Object.keys(timerData).length + Object.keys(gauges).length + Object.keys(sets).length; 278 | globalLogger.log('Matched keys ' + matchedKeys + '. Total keys: ' + totalKeys); 279 | } 280 | 281 | return data; 282 | }; 283 | 284 | var composePayload = function nriPayload(data) { 285 | var defaultIntegration = { 286 | name: 'com.newrelic.statsd', 287 | integration_version: '0.1.0', 288 | protocol_version: '1' 289 | }; 290 | 291 | var v1Payload = function (v1data) { 292 | var integration = Object.assign({}, defaultIntegration); 293 | var metricSets = []; 294 | 295 | Object.keys(v1data).forEach(function (entityId) { 296 | var metrics = v1data[entityId].metrics; 297 | Object.keys(metrics).forEach(function (idx) { 298 | var values = metrics[idx]; 299 | var metricsLength = Object.keys(values).length; 300 | 301 | if (metricsLength > metricsLimit) { 302 | if (sendLimitErrors) { 303 | metricSets.push( 304 | { 305 | event_type: "StatsdLimitErrorSample", 306 | numberOfMetrics: metricsLength, 307 | configuredLimit: metricsLimit, 308 | } 309 | ) 310 | } 311 | if (debug) { 312 | globalLogger.log("The event has more than " + metricsLimit + " metrics and can't be processed. Metrics length: " + metricsLength); 313 | } 314 | nriStats.last_exception = Math.round(Date.now() / 1000); 315 | } else { 316 | metricSets.push(values); 317 | } 318 | }); 319 | }); 320 | return Object.assign(integration, {metrics: metricSets, inventory: {}, events: []}); 321 | }; 322 | 323 | var v2Payload = function(v2data) { 324 | var integration = Object.assign({}, defaultIntegration); 325 | var entitiesData = []; 326 | 327 | Object.keys(v2data).forEach(function(entityId) { 328 | var metrics = v2data[entityId].metrics; 329 | var metricSets = []; 330 | Object.keys(metrics).forEach(function(eventType) { 331 | var entityName = v2data[entityId].entity.type + ':' + v2data[entityId].entity.name; 332 | var displayName = v2data[entityId].entity.name; 333 | var values = metrics[eventType]; 334 | var metricsLength = Object.keys(values).length; 335 | if (metricsLength > metricsLimit) { 336 | if (sendLimitErrors) { 337 | metricSets.push( 338 | { 339 | event_type: "StatsdLimitErrorSample", 340 | entityName: entityName, 341 | displayName: displayName, 342 | numberOfMetrics: metricsLength, 343 | configuredLimit: metricsLimit 344 | } 345 | ); 346 | } 347 | if (debug) { 348 | globalLogger.log("The event has more than " + metricsLimit + " metrics and can't be processed. Metrics length: " + metricsLength); 349 | } 350 | } else { 351 | var event = Object.assign({ event_type: eventType }, values); 352 | Object.assign(event, 353 | { entityName: entityName, 354 | displayName: displayName 355 | }); 356 | metricSets.push(event); 357 | } 358 | }); 359 | 360 | entitiesData.push({ 361 | entity: v2data[entityId].entity, 362 | metrics: metricSets, 363 | events: [], 364 | inventory: {} 365 | }); 366 | }); 367 | 368 | return Object.assign({}, integration, { protocol_version: '2', data: entitiesData }); 369 | }; 370 | 371 | return v1Payload(data); 372 | }; 373 | 374 | var flushMetrics = function nriFlush(timestamp, rawMetrics) { 375 | if (rules.length === 0 && debug) { 376 | globalLogger.log("There are not rules configured for backend 'newrelic'. Without rules, we can not know how to process and send StatsD metrics to New Relic Infrastructure."); 377 | } 378 | var metricsByEntity = collectMetrics(rawMetrics, rules); 379 | var payload = composePayload(metricsByEntity); 380 | 381 | if ((payload.metrics && payload.metrics.length > 0) || (payload.data && payload.data.length > 0)) { 382 | sendPayload(host, port, JSON.stringify(payload)); 383 | } 384 | 385 | return payload; 386 | }; 387 | 388 | var backendStatus = function nriBackendStatus(writeCb) { 389 | Object.keys(nriStats).forEach(function(stat) { 390 | writeCb(null, 'newrelic', stat, nriStats[stat]); 391 | }); 392 | }; 393 | 394 | exports.indexForMetric = indexForMetric; 395 | 396 | exports.init = function nriInitBackend(startupTime, config, events, logger) { 397 | debug = config.debug; 398 | if (logger == null) { 399 | globalLogger = require('util'); 400 | } else { 401 | globalLogger = logger; 402 | } 403 | 404 | if (config.newrelic) { 405 | host = config.newrelic.host || 'localhost'; 406 | port = parseInt(config.newrelic.port, 10) || 8001; 407 | rules = config.newrelic.rules || []; 408 | metricsLimit = Number(config.newrelic.metricsLimit || 150) 409 | if (config.newrelic.sendLimitErrors == null) { 410 | sendLimitErrors = true; 411 | } else { 412 | sendLimitErrors = config.newrelic.sendLimitErrors; 413 | } 414 | } 415 | 416 | nriStats.last_flush = startupTime; 417 | nriStats.last_exception = startupTime; 418 | 419 | events.on('flush', flushMetrics); 420 | events.on('status', backendStatus); 421 | return true; 422 | }; 423 | --------------------------------------------------------------------------------