├── package.json ├── .github └── workflows │ ├── node.js.yml │ └── bump.yml ├── LICENSE ├── .gitignore ├── lib ├── splunkdriver.test.js ├── splunkdriver.metrics.test.js └── splunkdriver.js └── README.md /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "author": "Eric Miller", 3 | "description": "A backend for StatsD to emit stats to Splunk HTTP Event Collector", 4 | "name": "splunk-statsd-backend", 5 | "version": "0.2.17", 6 | "homepage": "https://github.com/emiller42/splunk-statsd-backend", 7 | "repository": { 8 | "type": "git", 9 | "url": "https://github.com/emiller42/splunk-statsd-backend.git" 10 | }, 11 | "main": "lib/splunkdriver.js", 12 | "dependencies": { 13 | "axios": "^1.6.0" 14 | }, 15 | "devDependencies": { 16 | "jest": "^29.7.0" 17 | }, 18 | "engines": { 19 | "node": "*" 20 | }, 21 | "scripts": { 22 | "test": "jest" 23 | }, 24 | "keywords": [ 25 | "metrics", 26 | "statsd", 27 | "splunk" 28 | ], 29 | "license": "MIT", 30 | "jest": { 31 | "clearMocks": true 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/node.js.yml: -------------------------------------------------------------------------------- 1 | # This workflow will do a clean install of node dependencies, build the source code and run tests across different versions of node 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-nodejs-with-github-actions 3 | 4 | name: Node.js CI 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | strategy: 18 | matrix: 19 | node-version: [18.x, 20.x, 21.x] 20 | # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ 21 | 22 | steps: 23 | - uses: actions/checkout@v2 24 | - name: Use Node.js ${{ matrix.node-version }} 25 | uses: actions/setup-node@v2 26 | with: 27 | node-version: ${{ matrix.node-version }} 28 | - run: npm ci 29 | - run: npm run build --if-present 30 | - run: npm test 31 | -------------------------------------------------------------------------------- /.github/workflows/bump.yml: -------------------------------------------------------------------------------- 1 | name: 'Bump Version' 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'main' 7 | 8 | jobs: 9 | bump-version: 10 | name: 'Bump Version on main' 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: 'Checkout source code' 15 | uses: 'actions/checkout@v2' 16 | with: 17 | ref: ${{ github.ref }} 18 | - name: 'cat package.json' 19 | run: cat ./package.json 20 | - name: 'Automated Version Bump' 21 | id: version-bump 22 | uses: 'phips28/gh-action-bump-version@master' 23 | with: 24 | tag-prefix: 'v' 25 | commit-message: 'CI: bumps version to {{version}} [skip ci]' 26 | env: 27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 28 | - name: 'cat package.json' 29 | run: cat ./package.json 30 | - name: 'Output Step' 31 | env: 32 | NEW_TAG: ${{ steps.version-bump.outputs.newTag }} 33 | run: echo "new tag $NEW_TAG" 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Eric Miller 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # File created using '.gitignore Generator' for Visual Studio Code: https://bit.ly/vscode-gig 2 | 3 | # Created by https://www.toptal.com/developers/gitignore/api/visualstudiocode,macos,node 4 | # Edit at https://www.toptal.com/developers/gitignore?templates=visualstudiocode,macos,node 5 | 6 | ### macOS ### 7 | # General 8 | .DS_Store 9 | .AppleDouble 10 | .LSOverride 11 | 12 | # Icon must end with two \r 13 | Icon 14 | 15 | 16 | # Thumbnails 17 | ._* 18 | 19 | # Files that might appear in the root of a volume 20 | .DocumentRevisions-V100 21 | .fseventsd 22 | .Spotlight-V100 23 | .TemporaryItems 24 | .Trashes 25 | .VolumeIcon.icns 26 | .com.apple.timemachine.donotpresent 27 | 28 | # Directories potentially created on remote AFP share 29 | .AppleDB 30 | .AppleDesktop 31 | Network Trash Folder 32 | Temporary Items 33 | .apdisk 34 | 35 | ### Node ### 36 | # Logs 37 | logs 38 | *.log 39 | npm-debug.log* 40 | yarn-debug.log* 41 | yarn-error.log* 42 | lerna-debug.log* 43 | 44 | # Diagnostic reports (https://nodejs.org/api/report.html) 45 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 46 | 47 | # Runtime data 48 | pids 49 | *.pid 50 | *.seed 51 | *.pid.lock 52 | 53 | # Directory for instrumented libs generated by jscoverage/JSCover 54 | lib-cov 55 | 56 | # Coverage directory used by tools like istanbul 57 | coverage 58 | *.lcov 59 | 60 | # nyc test coverage 61 | .nyc_output 62 | 63 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 64 | .grunt 65 | 66 | # Bower dependency directory (https://bower.io/) 67 | bower_components 68 | 69 | # node-waf configuration 70 | .lock-wscript 71 | 72 | # Compiled binary addons (https://nodejs.org/api/addons.html) 73 | build/Release 74 | 75 | # Dependency directories 76 | node_modules/ 77 | jspm_packages/ 78 | 79 | # TypeScript v1 declaration files 80 | typings/ 81 | 82 | # TypeScript cache 83 | *.tsbuildinfo 84 | 85 | # Optional npm cache directory 86 | .npm 87 | 88 | # Optional eslint cache 89 | .eslintcache 90 | 91 | # Optional stylelint cache 92 | .stylelintcache 93 | 94 | # Microbundle cache 95 | .rpt2_cache/ 96 | .rts2_cache_cjs/ 97 | .rts2_cache_es/ 98 | .rts2_cache_umd/ 99 | 100 | # Optional REPL history 101 | .node_repl_history 102 | 103 | # Output of 'npm pack' 104 | *.tgz 105 | 106 | # Yarn Integrity file 107 | .yarn-integrity 108 | 109 | # dotenv environment variables file 110 | .env 111 | .env.test 112 | .env*.local 113 | 114 | # parcel-bundler cache (https://parceljs.org/) 115 | .cache 116 | .parcel-cache 117 | 118 | # Next.js build output 119 | .next 120 | 121 | # Nuxt.js build / generate output 122 | .nuxt 123 | dist 124 | 125 | # Storybook build outputs 126 | .out 127 | .storybook-out 128 | storybook-static 129 | 130 | # rollup.js default build output 131 | dist/ 132 | 133 | # Gatsby files 134 | .cache/ 135 | # Comment in the public line in if your project uses Gatsby and not Next.js 136 | # https://nextjs.org/blog/next-9-1#public-directory-support 137 | # public 138 | 139 | # vuepress build output 140 | .vuepress/dist 141 | 142 | # Serverless directories 143 | .serverless/ 144 | 145 | # FuseBox cache 146 | .fusebox/ 147 | 148 | # DynamoDB Local files 149 | .dynamodb/ 150 | 151 | # TernJS port file 152 | .tern-port 153 | 154 | # Stores VSCode versions used for testing VSCode extensions 155 | .vscode-test 156 | 157 | # Temporary folders 158 | tmp/ 159 | temp/ 160 | 161 | ### VisualStudioCode ### 162 | .vscode/* 163 | !.vscode/settings.json 164 | !.vscode/tasks.json 165 | !.vscode/launch.json 166 | !.vscode/extensions.json 167 | *.code-workspace 168 | 169 | ### VisualStudioCode Patch ### 170 | # Ignore all local history of files 171 | .history 172 | .ionide 173 | 174 | # End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,macos,node 175 | 176 | # Custom rules (everything added below won't be overriden by 'Generate .gitignore File' if you use 'Update' option) 177 | 178 | -------------------------------------------------------------------------------- /lib/splunkdriver.test.js: -------------------------------------------------------------------------------- 1 | const splunkdriver = require('./splunkdriver.js'); 2 | const events = require('events'); 3 | const axios = require('axios'); 4 | 5 | jest.mock('../node_modules/axios'); 6 | axios.post = jest.fn().mockResolvedValue({}); 7 | 8 | const emitter = new events.EventEmitter() 9 | 10 | describe('legacy json events', () => { 11 | test('successful init', () => { 12 | const config = { 13 | splunk: { 14 | splunkToken: 'mySplunkToken' 15 | } 16 | }; 17 | expect(splunkdriver.init(Date.now(), config, emitter, console)).toBeTruthy(); 18 | }); 19 | 20 | test('empty flush', () => { 21 | flush({}); 22 | expect(axios.post).toHaveBeenCalledTimes(1); 23 | expect(axios.post).toHaveBeenCalledWith('https://127.0.0.1:8088/services/collector/event', expect.any(String), { 24 | headers: { 25 | Authorization: 'Splunk mySplunkToken' 26 | } 27 | }); 28 | }); 29 | 30 | test('flush with counters', () => { 31 | const metrics = { 32 | counter_rates: { 33 | "foo": 10.0, 34 | "bar": 15.0, 35 | }, 36 | counters: { 37 | "foo": 100, 38 | "bar": 150, 39 | }, 40 | }; 41 | flush(metrics); 42 | expect(axios.post).toHaveBeenCalledTimes(1); 43 | const body = axios.post.mock.calls[0][1]; 44 | expect(body).toEqual(expectedCounter("foo", 100, 10)); 45 | expect(body).toEqual(expectedCounter("bar", 150, 15.0)); 46 | }); 47 | 48 | test('flush with gauges', () => { 49 | const metrics = { 50 | gauges: { 51 | "foo": 90, 52 | "bar": 102, 53 | "baz": 2.5, 54 | }, 55 | }; 56 | flush(metrics); 57 | expect(axios.post).toHaveBeenCalledTimes(1); 58 | const body = axios.post.mock.calls[0][1]; 59 | expect(body).toEqual(expectedGauge("foo", 90)); 60 | expect(body).toEqual(expectedGauge("bar", 102)); 61 | expect(body).toEqual(expectedGauge("baz", 2.5)); 62 | }); 63 | 64 | test('flush with sets', () => { 65 | const metrics = { 66 | sets: { 67 | foo: { 68 | size() { 69 | return 3 70 | }, 71 | }, 72 | bar: { 73 | size() { 74 | return 1 75 | }, 76 | } 77 | } 78 | }; 79 | 80 | flush(metrics); 81 | expect(axios.post).toHaveBeenCalledTimes(1); 82 | const body = axios.post.mock.calls[0][1]; 83 | expect(body).toEqual(expectedSet("foo", 3)); 84 | expect(body).toEqual(expectedSet("bar", 1)); 85 | }); 86 | 87 | test('flush with timers', () => { 88 | const data = { 89 | std: 81.64965809277261, 90 | upper: 300, 91 | lower: 100, 92 | count: 3, 93 | count_ps: 30, 94 | sum: 600, 95 | sum_squares: 140000, 96 | mean: 200, 97 | median: 200, 98 | }; 99 | 100 | const metrics = { 101 | timer_data: { 102 | "foo": data 103 | } 104 | }; 105 | 106 | flush(metrics); 107 | expect(axios.post).toHaveBeenCalledTimes(1); 108 | const body = axios.post.mock.calls[0][1]; 109 | data.metricType = "timer"; 110 | data.metricName = "foo"; 111 | expect(body).toEqual(expectedMetric(data)); 112 | 113 | }); 114 | }); 115 | 116 | 117 | function flush(metrics, eventEmitter = emitter) { 118 | eventEmitter.emit('flush', Date.now(), metrics); 119 | } 120 | 121 | function expectedMetric(metric) { 122 | return expect.stringContaining(JSON.stringify(metric)); 123 | } 124 | 125 | function expectedSet(metricName, count, metricType = "set") { 126 | return expectedMetric({ 127 | count, 128 | metricType, 129 | metricName, 130 | }); 131 | } 132 | 133 | function expectedCounter(metricName, count, rate, metricType = "counter") { 134 | return expectedMetric({ 135 | rate, 136 | count, 137 | metricType, 138 | metricName, 139 | }); 140 | } 141 | 142 | function expectedGauge(metricName, value, metricType = "gauge") { 143 | return expectedMetric({ 144 | value, 145 | metricType, 146 | metricName, 147 | }); 148 | } -------------------------------------------------------------------------------- /lib/splunkdriver.metrics.test.js: -------------------------------------------------------------------------------- 1 | const splunkdriver = require('./splunkdriver.js'); 2 | const events = require('events'); 3 | const axios = require('axios'); 4 | 5 | jest.mock('../node_modules/axios'); 6 | axios.post = jest.fn().mockResolvedValue({}); 7 | 8 | const timestamp = Date.now(); 9 | Date.now = jest.fn(() => timestamp); 10 | 11 | const emitter = new events.EventEmitter(); 12 | 13 | describe('Splunk Metrics events', () => { 14 | test('successful init', () => { 15 | const config = { 16 | splunk: { 17 | splunkToken: 'mySplunkToken', 18 | useMetrics: true, 19 | } 20 | }; 21 | expect(splunkdriver.init(Date.now(), config, emitter, console)).toBeTruthy(); 22 | 23 | }); 24 | 25 | test('empty flush', () => { 26 | flush({}); 27 | expect(axios.post).toHaveBeenCalledTimes(1); 28 | expect(axios.post).toHaveBeenCalledWith('https://127.0.0.1:8088/services/collector', expect.any(String), { 29 | headers: { 30 | Authorization: 'Splunk mySplunkToken' 31 | } 32 | }); 33 | }); 34 | 35 | test('flush with counters', () => { 36 | const metrics = { 37 | counter_rates: { 38 | "foo": 10.0, 39 | "bar": 15.0, 40 | }, 41 | counters: { 42 | "foo": 100, 43 | "bar": 150, 44 | }, 45 | }; 46 | 47 | const expected = { 48 | time: timestamp, 49 | event: 'metric', 50 | source: 'statsd', 51 | sourcetype: '_json', 52 | fields: { 53 | metric_type: 'counter', 54 | 'metric_name:foo.rate': 10, 55 | 'metric_name:foo.count': 100, 56 | 'metric_name:bar.rate': 15, 57 | 'metric_name:bar.count': 150 58 | } 59 | }; 60 | 61 | flush(metrics); 62 | const parsedBody = parseBody(axios.post.mock.calls[0][1]); 63 | expect(parsedBody).toContainEqual(expected); 64 | }); 65 | 66 | test('flush with gauges', () => { 67 | const metrics = { 68 | gauges: { 69 | "foo": 90, 70 | "bar": 102, 71 | "baz": 2.5, 72 | }, 73 | }; 74 | const expected = { 75 | time: timestamp, 76 | event: 'metric', 77 | source: 'statsd', 78 | sourcetype: '_json', 79 | fields: { 80 | metric_type: 'gauge', 81 | 'metric_name:foo': 90, 82 | 'metric_name:bar': 102, 83 | 'metric_name:baz': 2.5 84 | } 85 | }; 86 | 87 | flush(metrics); 88 | const parsedBody = parseBody(axios.post.mock.calls[0][1]); 89 | expect(parsedBody).toContainEqual(expected); 90 | }); 91 | 92 | test('flush with sets', () => { 93 | const metrics = { 94 | sets: { 95 | foo: { 96 | size() { 97 | return 3 98 | }, 99 | }, 100 | bar: { 101 | size() { 102 | return 1 103 | }, 104 | } 105 | } 106 | }; 107 | 108 | const expected = { 109 | time: timestamp, 110 | event: 'metric', 111 | source: 'statsd', 112 | sourcetype: '_json', 113 | fields: { 114 | metric_type: 'set', 115 | 'metric_name:foo': 3, 116 | 'metric_name:bar': 1 117 | } 118 | }; 119 | 120 | flush(metrics); 121 | const parsedBody = parseBody(axios.post.mock.calls[0][1]); 122 | expect(parsedBody).toContainEqual(expected); 123 | }); 124 | 125 | test('flush with timers', () => { 126 | const data = { 127 | std: 81.64965809277261, 128 | upper: 300, 129 | lower: 100, 130 | count: 3, 131 | count_ps: 30, 132 | sum: 600, 133 | sum_squares: 140000, 134 | mean: 200, 135 | median: 200, 136 | }; 137 | 138 | const metrics = { 139 | timer_data: { 140 | "foo": data 141 | } 142 | }; 143 | 144 | const expected = { 145 | time: timestamp, 146 | event: 'metric', 147 | source: 'statsd', 148 | sourcetype: '_json', 149 | fields: { 150 | metric_type: 'timer', 151 | 'metric_name:foo.std': data.std, 152 | 'metric_name:foo.upper': data.upper, 153 | 'metric_name:foo.lower': data.lower, 154 | 'metric_name:foo.count': data.count, 155 | 'metric_name:foo.count_ps': data.count_ps, 156 | 'metric_name:foo.sum': data.sum, 157 | 'metric_name:foo.sum_squares': data.sum_squares, 158 | 'metric_name:foo.mean': data.mean, 159 | 'metric_name:foo.median': data.median, 160 | } 161 | }; 162 | 163 | flush(metrics); 164 | const parsedBody = parseBody(axios.post.mock.calls[0][1]); 165 | expect(parsedBody).toContainEqual(expected); 166 | 167 | }); 168 | }); 169 | 170 | function parseBody(body) { 171 | return JSON.parse(`[${body.replace(/}{/g, '},{')}]`); 172 | } 173 | 174 | function flush(metrics, eventEmitter = emitter) { 175 | eventEmitter.emit('flush', Date.now(), metrics); 176 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | splunk-statsd-backend 2 | ===================== 3 | Backend plugin for [statsd](https://github.com/statsd/statsd) to output metrics to [Splunk](https://www.splunk.com) HTTP Event Collector (HEC) 4 | 5 | # Installation 6 | ```bash 7 | $ cd /path/to/statsd/install 8 | $ npm install splunk-statsd-backend 9 | ``` 10 | 11 | # Configuration 12 | ```js 13 | { 14 | backends: ['splunk-statsd-backend', 'other-backends'], 15 | splunk: { 16 | splunkHost: '127.0.0.1', // the hostname of the Splunk Collector you wish to send metrics (default: 127.0.0.1) 17 | splunkPort: 8088, // port that the event collector is listening on (Default: 8088) 18 | useSSL: true, // HEC is using SSL (Default: true) 19 | strictSSL: true, // Should collectd should validate ssl certificates. Set to false if Splunk is using self-signed certs. (Default: true) 20 | splunkToken: 'abcde', // HEC token for authentication with Splunk (required) 21 | // the following are somewhat equivalent to the 'prefix*' options for the graphite backend 22 | timerLabel: 'timer', // Label applied to all timer metrics (default: 'timer') 23 | counterLabel: 'counter', // Label applied to all counter metrics (default: 'counter') 24 | gaugeLabel: 'gauge', // Label applied to all gauge metrics (default: 'gauge') 25 | setLabel: 'set', // Label applied to all set metrics (default: 'set') 26 | // the following populate splunk-specific fields 27 | host: 'foo', // Specify a 'host' value for the events sent to Splunk. Leave unset to let Splunk infer this value. 28 | source: 'statsd', // Specify a 'source' value for the events sent to Splunk. (default: statsd) 29 | sourcetype: '_json', // Specify a 'sourcetype' value for the events sent to Splunk. (default: _json) 30 | index: 'main', // Specify the target index for the events sent to Splunk. Leave unset to let Splunk control destination index. 31 | useMetrics: false // Send data in Splunk Metrics format. (default: false) 32 | } 33 | } 34 | ``` 35 | 36 | # Implementation Details and Examples (JSON formatted events) 37 | This backend will transform statsd metrics into a format suitable for batch collection by the Splunk HTTP Event Collector. Further, the events are properly formed JSON, allowing ['Indexed Extractions'](http://dev.splunk.com/view/event-collector/SP-CAAAFB6) to be applied out of the box. All metrics are sent in a single HTTP POST request to the collector. 38 | 39 | A batch event follows this format: 40 | ```js 41 | { "time": , "source": "my_source", "sourcetype": "my_sourcetype", "index": "my_index", "event": {...event payload...} } 42 | ``` 43 | 44 | Where the event payload will contain all relevant fields for the metrics. (Examples further down) 45 | 46 | ## Field Names 47 | * `metricType` will be set according to the *Label fields. ('timer', 'counter', etc.) 48 | * `metricName` will be a direct passthrough of the metric name provided to statsd. (`my.counter:123|c` sets `metricName = "my.counter"`) 49 | * Other event field names are derived from the stats they represent. 50 | 51 | ## Example Counter 52 | ```js 53 | { 54 | "event": { 55 | "rate": 1704.6, 56 | "count": 17046, 57 | "metricType": "counter", 58 | "metricName": "foo.requests" 59 | }, 60 | "time": 1485314310, 61 | "source": "statsd", 62 | "sourcetype": "_json" 63 | } 64 | ``` 65 | 66 | ## Example Timer (with Histogram) 67 | ```js 68 | { 69 | "event": { 70 | "count_90": 304, 71 | "mean_90": 143.07236842105263, 72 | "upper_90": 280, 73 | "sum_90": 43494, 74 | "sum_squares_90": 8083406, 75 | "std": 86.5952973729948, 76 | "upper": 300, 77 | "lower": 1, 78 | "count": 338, 79 | "count_ps": 33.8, 80 | "sum": 53402, 81 | "sum_squares": 10971776, 82 | "mean": 157.9940828402367, 83 | "median": 157.5, 84 | "histogram": { 85 | "bin_50": 49, 86 | "bin_100": 45, 87 | "bin_150": 66, 88 | "bin_200": 60, 89 | "bin_inf": 118 90 | }, 91 | "metricType": "timer", 92 | "metricName": "foo.duration" 93 | }, 94 | "time": 1485314310, 95 | "source": "statsd", 96 | "sourcetype": "_json" 97 | } 98 | ``` 99 | 100 | ## Example Gauge 101 | ```js 102 | { 103 | "event": { 104 | "value": 2, 105 | "metricType": "gauge", 106 | "metricName": "foo.pct_util" 107 | }, 108 | "time": 1485314310, 109 | "source": "statsd", 110 | "sourcetype": "_json" 111 | } 112 | ``` 113 | 114 | ## Example Set 115 | ```js 116 | { 117 | "event": { 118 | "count": 98, 119 | "metricType": "set", 120 | "metricName": "foo.uniques" 121 | }, 122 | "time": 1485314310, 123 | "source": "statsd", 124 | "sourcetype": "_json" 125 | } 126 | ``` 127 | 128 | # Implementation Details and Examples (Splunk Metrics) 129 | when setting `useMetrics: true` in your config, the backend will format StatsD metrics in a way suitable for ingestion as [Splunk Metrics](https://docs.splunk.com/Documentation/Splunk/8.2.0/Metrics/Overview). All metrics of a given type will be included in a single event object using the [multiple-metric JSON format](https://docs.splunk.com/Documentation/Splunk/8.2.0/Metrics/GetMetricsInOther#The_multiple-metric_JSON_format) and all objects will be sent in a single POST request to the collector. 130 | 131 | The event object follows this format: 132 | ```js 133 | { 134 | "time": "", 135 | "event": "metric", 136 | "host": "", 137 | "source": "", 138 | "sourcetype": "", 139 | "index": "", 140 | "event": { 141 | "metric_type": "", 142 | // repeated metrics 143 | "metric_name:": "". 144 | } 145 | } 146 | ``` 147 | 148 | ## Notes on Sourcetype 149 | 150 | Splunk has build-in handling of some sourcetypes when processing metrics. The [`statsd` sourcetype](https://docs.splunk.com/Documentation/Splunk/8.2.0/Metrics/GetMetricsInStatsd) in particular is used to send raw StatsD data directly to Splunk, rather than to a StatsD server for processing. It is recommended that you avoid these built-in sourcetypes when using this backend. 151 | 152 | ## Field Names 153 | 154 | * The `metric_type` dimension will be set according to the *Label fields (`timer`, `counter`, etc) 155 | * In cases where the metric has a single value (gauges, sets) the metric name will be a direct passthrough of the metric name provided by StatsD. (`my.gauge:97|g` becomes `"metric_name:my.gauge": 97`) 156 | * In cases where the metric has multiple values (counters, timers) the specific measurement will be appended following dot-notation. (`my.counter:123|c` sets `metric_name:my.counter.count` and `metric_name:my.counter.rate`) 157 | 158 | ## Example Counters 159 | 160 | ```js 161 | { 162 | "time": 1485314310, 163 | "event": "metric", 164 | "source": "statsd", 165 | "sourcetype": "_json", 166 | "event": { 167 | "metric_type": "counter", 168 | "metric_name:foo.count": 17046, 169 | "metric_name:foo.rate": 1704.6, 170 | "metric_name:bar.count": 32567, 171 | "metric_name:bar.rate": 3256.7, 172 | // etc. 173 | } 174 | } 175 | ``` 176 | 177 | ## Example Timer (with Histogram) 178 | 179 | ```js 180 | { 181 | "time": 1485314310, 182 | "event": "metric", 183 | "source": "statsd", 184 | "sourcetype": "_json", 185 | "event": { 186 | "metric_type": "timer", 187 | "metric_name:foo.duration.count_90": 304, 188 | "metric_name:foo.duration.mean_90": 143.07236842105263, 189 | "metric_name:foo.duration.upper_90": 280, 190 | "metric_name:foo.duration.sum_90": 43494, 191 | "metric_name:foo.duration.sum_squares_90": 8083406, 192 | "metric_name:foo.duration.std": 86.5952973729948, 193 | "metric_name:foo.duration.upper": 300, 194 | "metric_name:foo.duration.lower": 1, 195 | "metric_name:foo.duration.count": 338, 196 | "metric_name:foo.duration.count_ps": 33.8, 197 | "metric_name:foo.duration.sum": 53402, 198 | "metric_name:foo.duration.sum_squares": 10971776, 199 | "metric_name:foo.duration.mean": 157.9940828402367, 200 | "metric_name:foo.duration.median": 157.5, 201 | "metric_name:foo.duration.histogram.bin_50": 49, 202 | "metric_name:foo.duration.histogram.bin_100": 45, 203 | "metric_name:foo.duration.histogram.bin_150": 66, 204 | "metric_name:foo.duration.histogram.bin_200": 60, 205 | "metric_name:foo.duration.histogram.bin_inf": 118 206 | // etc. 207 | } 208 | } 209 | ``` 210 | 211 | ## Example Gauges 212 | 213 | ```js 214 | { 215 | "time": 1485314310, 216 | "event": "metric", 217 | "source": "statsd", 218 | "sourcetype": "_json", 219 | "event": { 220 | "metric_type": "gauge", 221 | "metric_name:foo.pct_util": 2, 222 | "metric_name:bar.pct_util": 17, 223 | // etc. 224 | } 225 | } 226 | ``` 227 | 228 | ## Example Sets 229 | 230 | ```js 231 | { 232 | "time": 1485314310, 233 | "event": "metric", 234 | "source": "statsd", 235 | "sourcetype": "_json", 236 | "event": { 237 | "metric_type": "set", 238 | "metric_name:foo.uniques": 98, 239 | "metric_name:bar.uniques": 127, 240 | // etc. 241 | } 242 | } 243 | ``` 244 | 245 | # Backend Metrics 246 | The following internal metrics are calculated and emitted under the `splunkStats` metricName 247 | * `calculationTime` - time spent parsing metrics in ms 248 | * `numStats` - The number of metrics processed 249 | * `flush_length` - the length of the event payload sent to Splunk 250 | * `flush_time` - the response time of the POST request to Splunk 251 | * `last_exception` - the timestamp of the last time a POST failed 252 | * `last_flush` - the timestamp of the last flush 253 | 254 | # Running tests 255 | ```sh 256 | $ cd /path/to/splunk-statsd-backend 257 | $ npm install 258 | $ npm test 259 | ``` -------------------------------------------------------------------------------- /lib/splunkdriver.js: -------------------------------------------------------------------------------- 1 | /*jshint node:true, laxcomma:true */ 2 | 3 | /* 4 | * Flush stats to Splunk HEC (http://dev.splunk.com/view/event-collector/SP-CAAAE6M) 5 | * 6 | * backends: ["splunk-statsd-backend"] 7 | * splunk: 8 | * splunkHost: '127.0.0.1', // the hostname of the Splunk Collector you wish to send metrics (default: 127.0.0.1) 9 | * splunkPort: 8088, // port that the event collector is listening on (Default: 8088) 10 | * useSSL: true, // HEC is using SSL (Default: true) 11 | * strictSSL: true, // Should collectd should validate ssl certificates. Set to false if Splunk is using self-signed certs. (Default: true) 12 | * splunkToken: 'abcde', // HEC token for authentication with Splunk (required) 13 | * // the following are somewhat equivalent to the 'prefix*' options for the graphite backend 14 | * timerLabel: 'timer', // Label applied to all timer metrics (default: 'timer') 15 | * counterLabel: counter, // Label applied to all counter metrics (default: 'counter') 16 | * gaugeLabel: gauge, // Label applied to all gauge metrics (default: 'gauge') 17 | * setLabel: Set, // Label applied to all set metrics (default: 'set') 18 | * // the following populate splunk-specific fields 19 | * host: 'foo', // Specify a 'host' value for the events sent to Splunk. Leave unset to let Splunk infer this value. 20 | * source: 'statsd', // Specify a 'source' value for the events sent to Splunk. (default: statsd) 21 | * sourcetype: _json, // Specify a 'sourcetype' value for the events sent to Splunk. (default: _json) 22 | * index: 'main' // Specify the target index for the events sent to Splunk. Leave unset to let Splunk control destination index. 23 | * 24 | * This backend has been adapted using the backends provided with the 25 | * main statsd distribution for guidance. (https://github.com/etsy/statsd) 26 | */ 27 | 28 | const axios = require('axios'); 29 | const https = require('https'); 30 | 31 | // this will be instantiated to the logger 32 | let l; 33 | 34 | let flushCounts; 35 | let splunkHost; 36 | let splunkPort; 37 | let useSSL; 38 | let strictSSL; 39 | let splunkToken; 40 | let counterLabel; 41 | let timerLabel; 42 | let gaugeLabel; 43 | let setLabel; 44 | let host; 45 | let source; 46 | let sourcetype; 47 | let index; 48 | let prefixStats; 49 | let useMetrics; 50 | 51 | const splunkStats = {}; 52 | 53 | class JsonMetric { 54 | constructor(metricType, key, metrics, ts) { 55 | this.event = metrics; 56 | this.event.metricType = metricType; 57 | this.event.metricName = key; 58 | this.time = ts; 59 | if (host) this.host = host; 60 | if (source) this.source = source; 61 | if (sourcetype) this.sourcetype = sourcetype; 62 | if (index) this.index = index; 63 | } 64 | toString() { 65 | return JSON.stringify(this); 66 | } 67 | } 68 | 69 | class JsonMetrics { 70 | constructor() { 71 | this.metrics = []; 72 | } 73 | addCounters(counters, counterRates, ts) { 74 | for (const counter in counters) { 75 | const metric = { 76 | rate: counterRates[counter] 77 | }; 78 | if (flushCounts) metric.count = counters[counter]; 79 | this.add(counterLabel, counter, metric, ts); 80 | } 81 | } 82 | addTimers(timerData, ts) { 83 | for (const timer in timerData) { 84 | this.add(timerLabel, timer, timerData[timer], ts); 85 | } 86 | } 87 | addGauges(gauges, ts) { 88 | for (const gauge in gauges) { 89 | const metric = { 90 | value: gauges[gauge] 91 | }; 92 | this.add(gaugeLabel, gauge, metric, ts); 93 | } 94 | } 95 | addSets(sets, ts) { 96 | for (const set in sets) { 97 | const metric = { 98 | count: sets[set].size() 99 | }; 100 | this.add(setLabel, set, metric, ts); 101 | } 102 | } 103 | addStatsdMetrics(statsdMetrics, ts) { 104 | this.add(prefixStats, 'statsd', statsdMetrics, ts); 105 | } 106 | addSplunkStats(splunkStats, ts) { 107 | this.add(prefixStats, 'statsd.splunkStats', splunkStats, ts); 108 | } 109 | add(metricType, key, metrics, ts) { 110 | this.metrics.push(new JsonMetric(metricType, key, metrics, ts)); 111 | } 112 | toString() { 113 | return this.metrics.map((m) => m.toString()).join(''); 114 | } 115 | } 116 | 117 | class SplunkMetric { 118 | constructor(metricType, ts) { 119 | this.time = ts; 120 | this.event = "metric"; 121 | if (host) this.host = host; 122 | if (source) this.source = source; 123 | if (sourcetype) this.sourcetype = sourcetype; 124 | if (index) this.index = index; 125 | this.fields = {}; 126 | this.fields.metric_type = metricType; 127 | } 128 | add(metricName, value) { 129 | this.fields[`metric_name:${metricName}`] = value; 130 | } 131 | toString() { 132 | return JSON.stringify(this); 133 | } 134 | } 135 | 136 | class SplunkMetrics { 137 | constructor() { 138 | this.metrics = {}; 139 | } 140 | addCounters(counters, counterRates, ts) { 141 | const collection = this.getMultiMetricCollection(counterLabel, ts); 142 | for (const counter in counters) { 143 | collection.add(`${counter}.rate`, counterRates[counter]); 144 | if (flushCounts) collection.add(`${counter}.count`, counters[counter]); 145 | } 146 | } 147 | addTimers(timerData, ts) { 148 | const collection = this.getMultiMetricCollection(timerLabel, ts); 149 | for (const timer in timerData) { 150 | for (const [metric, value] of Object.entries(timerData[timer])) { 151 | if (metric == 'histogram') { 152 | for (const bin in value) { 153 | collection.add(`${timer}.histogram.${bin}`, value[bin]); 154 | } 155 | } else { 156 | collection.add(`${timer}.${metric}`, value); 157 | } 158 | } 159 | } 160 | } 161 | addGauges(gauges, ts) { 162 | const collection = this.getMultiMetricCollection(gaugeLabel, ts); 163 | for (const gauge in gauges) { 164 | collection.add(gauge, gauges[gauge]); 165 | } 166 | } 167 | addSets(sets, ts) { 168 | const collection = this.getMultiMetricCollection(setLabel, ts); 169 | for (const set in sets) { 170 | collection.add(set, sets[set].size()); 171 | } 172 | } 173 | addStatsdMetrics(statsdMetrics, ts) { 174 | const collection = this.getMultiMetricCollection(prefixStats, ts); 175 | for (const metric in statsdMetrics) { 176 | collection.add(`statsd.${metric}`, statsdMetrics[metric]); 177 | } 178 | } 179 | addSplunkStats(splunkStats, ts) { 180 | const collection = this.getMultiMetricCollection(prefixStats, ts); 181 | for (const stat in splunkStats) { 182 | collection.add(`statsd.splunkStats.${stat}`, splunkStats[stat]); 183 | } 184 | } 185 | getMultiMetricCollection(metricType, ts) { 186 | if (!this.metrics[metricType]) { 187 | this.metrics[metricType] = new SplunkMetric(metricType, ts); 188 | } 189 | return this.metrics[metricType]; 190 | } 191 | toString() { 192 | return Object.values(this.metrics).map((m) => m.toString()).join(''); 193 | } 194 | } 195 | 196 | function hecOutput(stats, ts) { 197 | stats.addSplunkStats(splunkStats, ts); 198 | const statsPayload = stats.toString(); 199 | const splunkUrl = new URL(`http://${splunkHost}:${splunkPort}`); 200 | if (useSSL) splunkUrl.protocol = 'https'; 201 | splunkUrl.pathname = useMetrics ? '/services/collector' : '/services/collector/event'; 202 | const options = { 203 | headers: { 204 | Authorization: `Splunk ${splunkToken}` 205 | } 206 | }; 207 | 208 | if (!strictSSL) { 209 | options.httpsAgent = new https.Agent({ 210 | rejectUnauthorized: false 211 | }); 212 | } 213 | 214 | const starttime = Date.now(); 215 | axios.post(splunkUrl.href, statsPayload, options) 216 | .catch((error) => { 217 | splunkStats.last_exception = Math.round(Date.now() / 1000); 218 | l.log(error, 'ERROR'); 219 | }) 220 | .finally(() => { 221 | splunkStats.flush_time = (Date.now() - starttime); 222 | splunkStats.flush_length = statsPayload.length; 223 | splunkStats.last_flush = Math.round(Date.now() / 1000); 224 | }); 225 | } 226 | 227 | function flushStats(ts, metrics) { 228 | const starttime = Date.now(); 229 | let numStats = 0; 230 | const counters = metrics.counters; 231 | const gauges = metrics.gauges; 232 | const sets = metrics.sets; 233 | const counterRates = metrics.counter_rates; 234 | const timerData = metrics.timer_data; 235 | const statsdMetrics = metrics.statsd_metrics; 236 | /* unused metrics fields: 237 | * metrics.timers - raw timer data 238 | * metrics.timer_counters - number of datapoints in each timer. (Equivalent to timer_data[timer].count) 239 | * metrics.pct_threshold - equivalent to config.percentThreshold of timers 240 | */ 241 | 242 | const stats = useMetrics ? new SplunkMetrics() : new JsonMetrics(); 243 | 244 | if (counters) { 245 | stats.addCounters(counters, counterRates, ts); 246 | numStats += Object.keys(counters).length; 247 | } 248 | 249 | if (timerData) { 250 | stats.addTimers(timerData, ts); 251 | numStats += Object.keys(timerData).length; 252 | } 253 | 254 | if (gauges) { 255 | stats.addGauges(gauges, ts); 256 | numStats += Object.keys(gauges).length; 257 | } 258 | 259 | if (sets) { 260 | stats.addSets(sets, ts); 261 | numStats += Object.keys(sets).length; 262 | } 263 | 264 | splunkStats.numStats = numStats; 265 | splunkStats.calculationTime = (Date.now() - starttime); 266 | 267 | if (statsdMetrics) { 268 | stats.addStatsdMetrics(statsdMetrics, ts); 269 | } 270 | 271 | hecOutput(stats, ts); 272 | } 273 | 274 | function splunkInit(startup_time, config, events, logger) { 275 | l = logger; 276 | splunkHost = config.splunk.splunkHost || '127.0.0.1'; 277 | splunkPort = config.splunk.splunkPort || 8088; 278 | useSSL = typeof (config.splunk.useSSL) === 'undefined' ? true : config.splunk.useSSL; 279 | strictSSL = typeof (config.splunk.strictSSL) === 'undefined' ? true : config.splunk.strictSSL; 280 | splunkToken = config.splunk.splunkToken; 281 | counterLabel = config.splunk.counterLabel || 'counter'; 282 | timerLabel = config.splunk.timerLabel || 'timer'; 283 | gaugeLabel = config.splunk.gaugeLabel || 'gauge'; 284 | setLabel = config.splunk.setLabel || 'set'; 285 | source = config.splunk.source || 'statsd'; 286 | sourcetype = config.splunk.sourcetype || '_json'; 287 | host = config.splunk.host; 288 | index = config.splunk.index; 289 | flushCounts = typeof (config.flush_counts) === "undefined" ? true : config.flush_counts; 290 | prefixStats = config.prefixStats || 'statsd'; 291 | useMetrics = typeof (config.splunk.useMetrics) === 'undefined' ? false : config.splunk.useMetrics; 292 | 293 | splunkStats.last_flush = startup_time; 294 | splunkStats.last_exception = startup_time; 295 | splunkStats.flush_time = 0; 296 | splunkStats.flush_length = 0; 297 | 298 | if (useMetrics && sourcetype == 'statsd') { 299 | l.log('You are attempting to send metrics events with sourcetype=statsd. ' + 300 | 'Splunk attempts to process these events as raw StatsD input, ' + 301 | 'which will *not* behave as expected. (https://splk.it/34HZe2k)', 'WARN') 302 | } 303 | 304 | events.on('flush', flushStats); 305 | //events.on('status', backend_status); 306 | 307 | return true; 308 | } 309 | 310 | exports.init = splunkInit; --------------------------------------------------------------------------------