├── .npmrc ├── .gitignore ├── .github ├── labeler-config.yml └── workflows │ ├── labeler.yml │ ├── opentelemetry.yml │ ├── lint.yml │ └── test.yml ├── test ├── fixtures │ ├── ecs-container-metadata.json │ ├── cgroup │ └── cgroup_result.js ├── lib │ ├── unref-client.js │ ├── call-me-back-maybe.js │ └── utils.js ├── metadata-filter.test.js ├── extraMetadata.test.js ├── abort.test.js ├── backoff-delay.test.js ├── stringify.test.js ├── expectExtraMetadata.test.js ├── writev.test.js ├── side-effects.test.js ├── central-config.test.js ├── lambda-usage.test.js ├── apm-server-version.test.js ├── k8s.test.js ├── container-info.test.js ├── truncate.test.js ├── basic.test.js ├── config.test.js └── edge-cases.test.js ├── scripts ├── license-header.js └── run-tests.js ├── lib ├── ndjson.js ├── logging.js ├── central-config.js ├── detect-hostname.js ├── container-info.js └── truncate.js ├── .eslintrc.json ├── LICENSE ├── NOTICE.md ├── package.json ├── Makefile └── CHANGELOG.md /.npmrc: -------------------------------------------------------------------------------- 1 | package-lock=false 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | /package-lock.json 3 | /.nyc_output 4 | /node_modules 5 | /target 6 | /tmp 7 | -------------------------------------------------------------------------------- /.github/labeler-config.yml: -------------------------------------------------------------------------------- 1 | # add 'agent-nodejs' label to all new issues 2 | agent-nodejs: 3 | - '.*' 4 | -------------------------------------------------------------------------------- /test/fixtures/ecs-container-metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "ContainerID": "34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376" 3 | } 4 | -------------------------------------------------------------------------------- /scripts/license-header.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | -------------------------------------------------------------------------------- /.github/workflows/labeler.yml: -------------------------------------------------------------------------------- 1 | name: "Issue Labeler" 2 | on: 3 | issues: 4 | types: [opened] 5 | pull_request_target: 6 | types: [opened] 7 | 8 | jobs: 9 | triage: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: AlexanderWert/issue-labeler@v2.3 13 | with: 14 | repo-token: "${{ secrets.GITHUB_TOKEN }}" 15 | configuration-path: .github/labeler-config.yml 16 | enable-versioned-regex: 0 17 | -------------------------------------------------------------------------------- /.github/workflows/opentelemetry.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: OpenTelemetry Export Trace 3 | 4 | on: 5 | workflow_run: 6 | workflows: 7 | - Lint 8 | - Test 9 | types: [completed] 10 | 11 | jobs: 12 | otel-export-trace: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: elastic/apm-pipeline-library/.github/actions/opentelemetry@current 16 | with: 17 | vaultUrl: ${{ secrets.VAULT_ADDR }} 18 | vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} 19 | vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} 20 | -------------------------------------------------------------------------------- /lib/ndjson.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const stringify = require('fast-safe-stringify') 10 | 11 | exports.serialize = function serialize (obj) { 12 | const str = tryJSONStringify(obj) || stringify(obj) 13 | return str + '\n' 14 | } 15 | 16 | function tryJSONStringify (obj) { 17 | try { 18 | return JSON.stringify(obj) 19 | } catch (e) {} 20 | } 21 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | # Lint this change with the latest node LTS. 2 | name: Lint 3 | 4 | # https://github.community/t/how-to-trigger-an-action-on-push-or-pull-request-but-not-both/16662/2 5 | on: 6 | push: 7 | branches: 8 | - main 9 | paths-ignore: 10 | - '*.md' 11 | pull_request: 12 | branches: 13 | - main 14 | paths-ignore: 15 | - '*.md' 16 | 17 | jobs: 18 | lint: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v2 22 | - uses: actions/setup-node@v2 23 | with: 24 | node-version: '18' 25 | - run: npm install 26 | - run: npm run lint 27 | -------------------------------------------------------------------------------- /lib/logging.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Logging utilities for the APM http client. 10 | 11 | // A logger that does nothing and supports enough of the pino API 12 | // (https://getpino.io/#/docs/api?id=logger) for use as a fallback in 13 | // this package. 14 | class NoopLogger { 15 | trace () {} 16 | debug () {} 17 | info () {} 18 | warn () {} 19 | error () {} 20 | fatal () {} 21 | child () { return this } 22 | isLevelEnabled (_level) { return false } 23 | } 24 | 25 | module.exports = { 26 | NoopLogger 27 | } 28 | -------------------------------------------------------------------------------- /test/lib/unref-client.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // This is used in test/side-effects.js to ensure that a Client with a 10 | // (sometimes long-lived) request open to APM server does *not* keep a node 11 | // process alive. 12 | 13 | const Client = require('../../') 14 | 15 | const client = new Client({ 16 | // logger: require('pino')({ level: 'trace' }, process.stderr), // uncomment for debugging 17 | serverUrl: process.argv[2], 18 | secretToken: 'secret', 19 | agentName: 'my-agent-name', 20 | agentVersion: 'my-agent-version', 21 | serviceName: 'my-service-name', 22 | userAgent: 'my-user-agent' 23 | }) 24 | 25 | process.stdout.write(String(Date.now()) + '\n') 26 | 27 | client.sendSpan({ hello: 'world' }) // Don't end the stream 28 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "env": { 4 | "node": true 5 | }, 6 | "extends": "standard", 7 | "plugins": [ 8 | "license-header" 9 | ], 10 | "rules": { 11 | "license-header/header": ["error", "./scripts/license-header.js"], 12 | // Regarding "no-var": The opinion of this repository's maintainers is that 13 | // while const/let are useful, the use of `var` is not bad and therefore 14 | // does not need to be ruled out. Eliminating the use of `var` would be a 15 | // large diff that (a) could theoretically cause bugs due to lexical scoping 16 | // changes and (b) could theoretically impact perf (e.g. see 17 | // https://github.com/microsoft/TypeScript/issues/52924). New code MAY 18 | // prefer const/let over `var`. Code in "examples/" MUST use const/let -- 19 | // this is enforced by "examples/.eslintrc.json". 20 | "no-var": "off" 21 | }, 22 | "ignorePatterns": [ 23 | "/*.example.js", // a pattern for uncommited local dev files to avoid linting 24 | "/*.example.mjs", // a pattern for uncommited local dev files to avoid linting 25 | "/.nyc_output" 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015-2019 Elasticsearch BV 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # Test with all supported node versions. 2 | name: Test 3 | 4 | # https://github.community/t/how-to-trigger-an-action-on-push-or-pull-request-but-not-both/16662/2 5 | on: 6 | push: 7 | branches: 8 | - main 9 | paths-ignore: 10 | - '*.md' 11 | pull_request: 12 | branches: 13 | - main 14 | paths-ignore: 15 | - '*.md' 16 | 17 | jobs: 18 | test-vers: 19 | strategy: 20 | fail-fast: false 21 | matrix: 22 | node: ['8.6', '8', '10', '12', '14', '15', '16', '18', '19', '20'] 23 | runs-on: ubuntu-latest 24 | steps: 25 | - uses: actions/checkout@v3 26 | - uses: actions/setup-node@v3 27 | with: 28 | node-version: ${{ matrix.node }} 29 | - run: npm install 30 | - run: npm test 31 | 32 | test-windows: 33 | runs-on: windows-latest 34 | steps: 35 | - uses: actions/checkout@v3 36 | - uses: actions/setup-node@v3 37 | with: 38 | # What Node.js version to test on Windows is a balance between which 39 | # is the current LTS version (https://github.com/nodejs/release) and 40 | # which version more of our users are using. 41 | node-version: 16 42 | - run: npm install 43 | - run: npm test 44 | -------------------------------------------------------------------------------- /lib/central-config.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Central config-related utilities for the APM http client. 10 | 11 | const INTERVAL_DEFAULT_S = 300 // 5 min 12 | const INTERVAL_MIN_S = 5 13 | const INTERVAL_MAX_S = 86400 // 1d 14 | 15 | /** 16 | * Determine an appropriate delay until the next fetch of Central Config. 17 | * Default to 5 minutes, minimum 5s, max 1d. 18 | * 19 | * The maximum of 1d ensures we don't get surprised by an overflow value to 20 | * `setTimeout` per https://developer.mozilla.org/en-US/docs/Web/API/setTimeout#maximum_delay_value 21 | * 22 | * @param {Number|undefined} seconds - A number of seconds, typically pulled 23 | * from a `Cache-Control: max-age=${seconds}` header on a previous central 24 | * config request. 25 | * @returns {Number} 26 | */ 27 | function getCentralConfigIntervalS (seconds) { 28 | if (typeof seconds !== 'number' || isNaN(seconds) || seconds <= 0) { 29 | return INTERVAL_DEFAULT_S 30 | } 31 | return Math.min(Math.max(seconds, INTERVAL_MIN_S), INTERVAL_MAX_S) 32 | } 33 | 34 | module.exports = { 35 | getCentralConfigIntervalS, 36 | 37 | // These are exported for testing. 38 | INTERVAL_DEFAULT_S, 39 | INTERVAL_MIN_S, 40 | INTERVAL_MAX_S 41 | } 42 | -------------------------------------------------------------------------------- /test/metadata-filter.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const test = require('tape') 10 | const utils = require('./lib/utils') 11 | 12 | const APMServer = utils.APMServer 13 | const processIntakeReq = utils.processIntakeReq 14 | 15 | test('addMetadataFilter', function (t) { 16 | let theMetadata 17 | 18 | const server = APMServer(function (req, res) { 19 | const objStream = processIntakeReq(req) 20 | let n = 0 21 | objStream.on('data', function (obj) { 22 | if (++n === 1) { 23 | theMetadata = obj.metadata 24 | } 25 | }) 26 | objStream.on('end', function () { 27 | res.statusCode = 202 28 | res.end() 29 | }) 30 | }) 31 | 32 | server.client({ apmServerVersion: '8.0.0' }, function (client) { 33 | client.addMetadataFilter(function (md) { 34 | delete md.process.argv 35 | md.labels = { foo: 'bar' } 36 | return md 37 | }) 38 | 39 | client.sendSpan({ foo: 42 }) 40 | client.flush(function () { 41 | t.ok(theMetadata, 'APM server got metadata') 42 | t.equal(theMetadata.process.argv, undefined, 'metadata.process.argv was removed') 43 | t.equal(theMetadata.labels.foo, 'bar', 'metadata.labels.foo was added') 44 | client.end() 45 | server.close() 46 | t.end() 47 | }) 48 | }) 49 | }) 50 | -------------------------------------------------------------------------------- /test/extraMetadata.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Test usage of `extraMetadata: ...`. 10 | 11 | const test = require('tape') 12 | const utils = require('./lib/utils') 13 | 14 | const APMServer = utils.APMServer 15 | const processIntakeReq = utils.processIntakeReq 16 | 17 | test('extraMetadata', function (t) { 18 | const apmEvents = [] 19 | const extraMetadata = { 20 | foo: 'bar', 21 | service: { 22 | language: { 23 | name: 'spam' 24 | } 25 | } 26 | } 27 | 28 | const server = APMServer(function (req, res) { 29 | const objStream = processIntakeReq(req) 30 | objStream.on('data', function (obj) { 31 | apmEvents.push(obj) 32 | }) 33 | objStream.on('end', function () { 34 | res.statusCode = 202 35 | res.end() 36 | }) 37 | }).client({ extraMetadata, apmServerVersion: '8.0.0' }, function (client) { 38 | client.sendTransaction({ req: 1 }) 39 | 40 | client.flush(() => { 41 | t.equal(apmEvents.length, 2, 'APM Server got 2 events') 42 | t.ok(apmEvents[0].metadata, 'event 0 is metadata') 43 | t.equal(apmEvents[0].metadata.foo, 'bar', 'extraMetadata added "foo" field') 44 | t.equal(apmEvents[0].metadata.service.language.name, 'spam', 45 | 'extraMetadata overrode nested service.language.name field properly') 46 | t.ok(apmEvents[1].transaction, 'event 1 is a transaction') 47 | 48 | client.end() 49 | server.close() 50 | t.end() 51 | }) 52 | }) 53 | }) 54 | -------------------------------------------------------------------------------- /test/lib/call-me-back-maybe.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | // A script, used by test/side-effects.js, to test that the client.flush 8 | // callback is called. 9 | // 10 | // We expect both `console.log`s to write their output. 11 | // 12 | // Two important things are required to reproduce the issue: 13 | // 1. There cannot be other activities going on that involve active libuv 14 | // handles. For this Client that means: 15 | // - ensure no `_pollConfig` requests via `centralConfig: false` 16 | // - do not provide a `cloudMetadataFetcher` 17 | // - set `apmServerVersion` to not have an APM Server version fetch request 18 | // 2. There must be a listening APM server to which to send data. 19 | 20 | const Client = require('../../') // elastic-apm-http-client 21 | 22 | const serverUrl = process.argv[2] 23 | 24 | const client = new Client({ 25 | // logger: require('pino')({ level: 'trace', ...require('@elastic/ecs-pino-format')() }, process.stderr), // uncomment for debugging 26 | serverUrl, 27 | serviceName: 'call-me-back-maybe', 28 | agentName: 'my-nodejs-agent', 29 | agentVersion: '1.2.3', 30 | userAgent: 'my-nodejs-agent/1.2.3', 31 | centralConfig: false, // important for repro, see above 32 | apmServerVersion: '8.0.0' // important for repro, see above 33 | }) 34 | 35 | const e = { exception: { message: 'boom', type: 'Error' } } 36 | 37 | client.sendError(e, function sendCb () { 38 | console.log('sendCb called') 39 | client.flush(function flushCb () { 40 | console.log('flushCb called') 41 | }) 42 | }) 43 | -------------------------------------------------------------------------------- /NOTICE.md: -------------------------------------------------------------------------------- 1 | apm-nodejs-http-client 2 | 3 | Copyright 2015-2022 Elasticsearch B.V. 4 | 5 | # Notice 6 | 7 | This project contains several dependencies which have been vendored in 8 | due to a need for minor changes. Where possible changes have been 9 | contributed back to the original project. 10 | 11 | ## container-info 12 | 13 | - **path:** [lib/container-info.js](lib/container-info.js) 14 | - **author:** Stephen Belanger 15 | - **project url:** https://github.com/Qard/container-info 16 | - **original file:** https://github.com/Qard/container-info/blob/master/index.js 17 | - **license:** MIT License (MIT), http://opensource.org/licenses/MIT 18 | 19 | ``` 20 | ### Copyright (c) 2018 Stephen Belanger 21 | 22 | #### Licensed under MIT License 23 | 24 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 25 | 26 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 27 | 28 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 29 | ``` 30 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "elastic-apm-http-client", 3 | "version": "12.0.0", 4 | "description": "A low-level HTTP client for communicating with the Elastic APM intake API", 5 | "main": "index.js", 6 | "directories": { 7 | "test": "test" 8 | }, 9 | "files": [ 10 | "lib" 11 | ], 12 | "scripts": { 13 | "lint": "eslint --ext=js,mjs,cjs . # requires node >=16.0.0", 14 | "lint:fix": "eslint --ext=js,mjs,cjs --fix . # requires node >=16.0.0", 15 | "test": "nyc node ./scripts/run-tests.js" 16 | }, 17 | "engines": { 18 | "node": "^8.6.0 || 10 || >=12" 19 | }, 20 | "author": "Thomas Watson (https://twitter.com/wa7son)", 21 | "license": "MIT", 22 | "dependencies": { 23 | "agentkeepalive": "^4.2.1", 24 | "breadth-filter": "^2.0.0", 25 | "end-of-stream": "^1.4.4", 26 | "fast-safe-stringify": "^2.0.7", 27 | "fast-stream-to-buffer": "^1.0.0", 28 | "object-filter-sequence": "^1.0.0", 29 | "readable-stream": "^3.4.0", 30 | "semver": "^6.3.0", 31 | "stream-chopper": "^3.0.1" 32 | }, 33 | "devDependencies": { 34 | "eslint": "^8.42.0", 35 | "eslint-config-standard": "^17.1.0", 36 | "eslint-plugin-import": "^2.27.5", 37 | "eslint-plugin-license-header": "^0.6.0", 38 | "eslint-plugin-n": "^16.0.1", 39 | "eslint-plugin-promise": "^6.1.1", 40 | "glob": "^7.2.3", 41 | "ndjson": "^1.5.0", 42 | "nyc": "^14.1.1", 43 | "tape": "^4.11.0" 44 | }, 45 | "repository": { 46 | "type": "git", 47 | "url": "git+https://github.com/elastic/apm-nodejs-http-client.git" 48 | }, 49 | "bugs": { 50 | "url": "https://github.com/elastic/apm-nodejs-http-client/issues" 51 | }, 52 | "homepage": "https://github.com/elastic/apm-nodejs-http-client", 53 | "keywords": [ 54 | "elastic", 55 | "apm", 56 | "http", 57 | "client" 58 | ] 59 | } 60 | -------------------------------------------------------------------------------- /test/fixtures/cgroup: -------------------------------------------------------------------------------- 1 | 14:pids:/kubepods/kubepods/besteffort/pod0e886e9a-3879-45f9-b44d-86ef9df03224/244a65edefdffe31685c42317c9054e71dc1193048cf9459e2a4dd35cbc1dba4 2 | 13:cpuset:/kubepods/pod5eadac96-ab58-11ea-b82b-0242ac110009/7fe41c8a2d1da09420117894f11dd91f6c3a44dfeb7d125dc594bd53468861df 3 | 12:freezer:/kubepods.slice/kubepods-pod22949dce_fd8b_11ea_8ede_98f2b32c645c.slice/docker-b15a5bdedd2e7645c3be271364324321b908314e4c77857bbfd32a041148c07f.scope 4 | 11:devices:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 5 | 10:perf_event:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 6 | 9:memory:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 7 | 8:freezer:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 8 | 7:hugetlb:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 9 | 6:cpuset:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 10 | 5:blkio:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 11 | 4:cpu,cpuacct:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 12 | 3:net_cls,net_prio:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 13 | 2:pids:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 14 | 1:name=systemd:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 15 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Some targets depend on https://github/trentm/json being on the PATH. 2 | JSON ?= json 3 | 4 | .PHONY: all 5 | all: 6 | npm install 7 | 8 | .PHONY: test 9 | test: 10 | npm test 11 | 12 | .PHONY: lint 13 | lint: 14 | npm run lint 15 | 16 | # Ensure CHANGELOG.md and package.json have the same version for a release. 17 | .PHONY: check-version 18 | check-version: 19 | @echo version is: $(shell cat package.json | json version) 20 | [[ v`cat package.json | $(JSON) version` == `grep '^## ' CHANGELOG.md | head -1 | tail -1 | awk '{print $$2}'` ]] 21 | 22 | .PHONY: check 23 | check: lint check-version 24 | 25 | # Tag and release a new release based on the current package.json#version. 26 | # This long bit of Makefile does the following: 27 | # - ensure the repo isn't dirty (changed files) 28 | # - warn if we have a tag for this release already 29 | # - interactively confirm 30 | # - git tag 31 | # - npm publish 32 | .PHONY: cutarelease 33 | cutarelease: check 34 | [[ `basename $(shell git symbolic-ref HEAD)` == 'main' ]] # Can only release from 'main' branch. 35 | [[ -z `git status --short` ]] # If this fails, the working dir is dirty. 36 | @which $(JSON) 2>/dev/null 1>/dev/null && \ 37 | ver=$(shell $(JSON) -f package.json version) && \ 38 | name=$(shell $(JSON) -f package.json name) && \ 39 | publishedVer=$(shell npm view -j $(shell $(JSON) -f package.json name)@$(shell $(JSON) -f package.json version) version 2>/dev/null) && \ 40 | if [[ -n "$$publishedVer" ]]; then \ 41 | echo "error: $$name@$$ver is already published to npm"; \ 42 | exit 1; \ 43 | fi && \ 44 | echo "** Are you sure you want to tag and publish $$name@$$ver to npm?" && \ 45 | echo "** Enter to continue, Ctrl+C to abort." && \ 46 | read 47 | ver=$(shell cat package.json | $(JSON) version) && \ 48 | date=$(shell date -u "+%Y-%m-%d") && \ 49 | git tag -a "v$$ver" -m "version $$ver ($$date)" && \ 50 | git push origin "v$$ver" && \ 51 | npm publish 52 | -------------------------------------------------------------------------------- /scripts/run-tests.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Run all "test/**/*.test.js" files, each in a separate process. 10 | 11 | var spawn = require('child_process').spawn 12 | 13 | var glob = require('glob') 14 | 15 | // ---- support functions 16 | 17 | // Run a single test file. 18 | function runTestFile (testFile, cb) { 19 | console.log(`# running test: node ${testFile}`) 20 | var ps = spawn('node', [testFile], { stdio: 'inherit' }) 21 | ps.on('error', cb) 22 | ps.on('close', function (code) { 23 | if (code !== 0) { 24 | const err = new Error('non-zero error code') 25 | err.code = 'ENONZERO' 26 | err.exitCode = code 27 | return cb(err) 28 | } 29 | cb() 30 | }) 31 | } 32 | 33 | function series (tasks, cb) { 34 | var results = [] 35 | var pos = 0 36 | 37 | function done (err, result) { 38 | if (err) return cb(err) 39 | results.push(result) 40 | 41 | if (++pos === tasks.length) { 42 | cb(null, results) 43 | } else { 44 | tasks[pos](done) 45 | } 46 | } 47 | 48 | setImmediate(tasks[pos], done) 49 | } 50 | 51 | function handlerBind (handler) { 52 | return function (task) { 53 | return handler.bind(null, task) 54 | } 55 | } 56 | 57 | function mapSeries (tasks, handler, cb) { 58 | series(tasks.map(handlerBind(handler)), cb) 59 | } 60 | 61 | // ---- mainline 62 | 63 | function main () { 64 | var testFiles = glob.sync( 65 | // Find all ".test.js" files, except those in "fixtures" dirs and in 66 | // "node_modules" dirs created for test packages. 67 | 'test/**/*.test.js', 68 | { ignore: ['**/node_modules/**', '**/fixtures/**'] } 69 | ) 70 | 71 | mapSeries(testFiles, runTestFile, function (err) { 72 | if (err) throw err 73 | }) 74 | } 75 | 76 | main() 77 | -------------------------------------------------------------------------------- /test/abort.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const test = require('tape') 10 | const utils = require('./lib/utils') 11 | 12 | const APMServer = utils.APMServer 13 | const processIntakeReq = utils.processIntakeReq 14 | const assertIntakeReq = utils.assertIntakeReq 15 | const assertMetadata = utils.assertMetadata 16 | const assertEvent = utils.assertEvent 17 | 18 | test('abort request if server responds early', function (t) { 19 | t.plan(assertIntakeReq.asserts * 2 + assertMetadata.asserts + assertEvent.asserts + 2) 20 | 21 | let reqs = 0 22 | let client 23 | 24 | const datas = [ 25 | assertMetadata, 26 | assertEvent({ span: { foo: 2 } }) 27 | ] 28 | 29 | const timer = setTimeout(function () { 30 | throw new Error('the test got stuck') 31 | }, 5000) 32 | 33 | const server = APMServer(function (req, res) { 34 | const reqNo = ++reqs 35 | 36 | assertIntakeReq(t, req) 37 | 38 | if (reqNo === 1) { 39 | res.writeHead(500) 40 | res.end('bad') 41 | 42 | // Wait a little to ensure the current stream have ended, so the next 43 | // span will force a new stream to be created 44 | setTimeout(function () { 45 | client.sendSpan({ foo: 2 }) 46 | client.flush() 47 | }, 50) 48 | } else if (reqNo === 2) { 49 | req = processIntakeReq(req) 50 | req.on('data', function (obj) { 51 | datas.shift()(t, obj) 52 | }) 53 | req.on('end', function () { 54 | res.end() 55 | clearTimeout(timer) 56 | server.close() 57 | client.destroy() // Destroy keep-alive agent. 58 | t.end() 59 | }) 60 | } else { 61 | t.fail('should not get more than two requests') 62 | } 63 | }).client({ apmServerVersion: '8.0.0' }, function (_client) { 64 | client = _client 65 | client.sendSpan({ foo: 1 }) 66 | client.on('request-error', function (err) { 67 | t.equal(err.code, 500, 'should generate request-error with 500 status code') 68 | t.equal(err.response, 'bad', 'should generate request-error with expected body') 69 | }) 70 | }) 71 | }) 72 | -------------------------------------------------------------------------------- /test/backoff-delay.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Test Client.prototype._getBackoffDelay. 10 | 11 | const test = require('tape') 12 | 13 | const Client = require('../') 14 | const { validOpts } = require('./lib/utils') 15 | 16 | function assertDelayWithinTenPercentOf (t, value, target, context) { 17 | const jitter = target * 0.10 18 | t.ok(target - jitter <= value && value <= target + jitter, 19 | `delay ~ ${target}ms ${context}, got ${value}`) 20 | } 21 | 22 | test('_getBackoffDelay', function (t) { 23 | const client = new Client(validOpts()) 24 | 25 | // From https://github.com/elastic/apm/blob/main/specs/agents/transport.md#transport-errors 26 | // "The grace period should be calculated in seconds using the algorithm 27 | // min(reconnectCount++, 6) ** 2 ± 10%, where reconnectCount starts at zero. 28 | // So the delay after the first error is 0 seconds, then circa 1, 4, 9, 16, 25 29 | // and finally 36 seconds. We add ±10% jitter to the calculated grace period 30 | // in case multiple agents entered the grace period simultaneously." 31 | t.equal(client._getBackoffDelay(false), 0, 'no backoff delay with no errors') 32 | t.equal(client._getBackoffDelay(true), 0, 'delay=0 after one error') 33 | assertDelayWithinTenPercentOf(t, client._getBackoffDelay(true), 1000, 'after one error') 34 | assertDelayWithinTenPercentOf(t, client._getBackoffDelay(true), 4000, 'after two errors') 35 | assertDelayWithinTenPercentOf(t, client._getBackoffDelay(true), 9000, 'after three errors') 36 | assertDelayWithinTenPercentOf(t, client._getBackoffDelay(true), 16000, 'after four errors') 37 | assertDelayWithinTenPercentOf(t, client._getBackoffDelay(true), 25000, 'after five errors') 38 | assertDelayWithinTenPercentOf(t, client._getBackoffDelay(true), 36000, 'after six errors') 39 | assertDelayWithinTenPercentOf(t, client._getBackoffDelay(true), 36000, 'after seven or more errors') 40 | assertDelayWithinTenPercentOf(t, client._getBackoffDelay(true), 36000, 'after seven or more errors') 41 | t.equal(client._getBackoffDelay(false), 0, 'delay back to 0ms after a success') 42 | 43 | t.end() 44 | }) 45 | -------------------------------------------------------------------------------- /test/stringify.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const test = require('tape') 10 | const utils = require('./lib/utils') 11 | 12 | const APMServer = utils.APMServer 13 | const processIntakeReq = utils.processIntakeReq 14 | const assertIntakeReq = utils.assertIntakeReq 15 | const assertMetadata = utils.assertMetadata 16 | const assertEvent = utils.assertEvent 17 | 18 | const dataTypes = ['transaction', 'error'] 19 | const properties = ['request', 'response'] 20 | 21 | const upper = { 22 | transaction: 'Transaction', 23 | error: 'Error' 24 | } 25 | 26 | dataTypes.forEach(function (dataType) { 27 | properties.forEach(function (prop) { 28 | const sendFn = 'send' + upper[dataType] 29 | 30 | test(`stringify ${dataType} ${prop} headers`, function (t) { 31 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 32 | const datas = [ 33 | assertMetadata, 34 | assertEvent({ 35 | [dataType]: { 36 | context: { 37 | [prop]: { 38 | headers: { 39 | string: 'foo', 40 | number: '42', 41 | bool: 'true', 42 | nan: 'NaN', 43 | object: '[object Object]', 44 | array: ['foo', '42', 'true', 'NaN', '[object Object]'] 45 | } 46 | } 47 | } 48 | } 49 | }) 50 | ] 51 | const server = APMServer(function (req, res) { 52 | assertIntakeReq(t, req) 53 | req = processIntakeReq(req) 54 | req.on('data', function (obj) { 55 | datas.shift()(t, obj) 56 | }) 57 | req.on('end', function () { 58 | res.end() 59 | server.close() 60 | t.end() 61 | }) 62 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 63 | client[sendFn]({ 64 | context: { 65 | [prop]: { 66 | headers: { 67 | string: 'foo', 68 | number: 42, 69 | bool: true, 70 | nan: NaN, 71 | object: { foo: 'bar' }, 72 | array: ['foo', 42, true, NaN, { foo: 'bar' }] 73 | } 74 | } 75 | } 76 | }) 77 | client.flush(function () { 78 | client.destroy() // Destroy keep-alive agent when done on client-side. 79 | }) 80 | }) 81 | }) 82 | }) 83 | }) 84 | -------------------------------------------------------------------------------- /lib/detect-hostname.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Detect the current hostname, preferring the FQDN if possible. 10 | // Spec: https://github.com/elastic/apm/blob/main/specs/agents/metadata.md#hostname 11 | 12 | const os = require('os') 13 | const { spawnSync } = require('child_process') 14 | 15 | /** 16 | * *Synchronously* detect the current hostname, preferring the FQDN. 17 | * This is sent to APM server as `metadata.system.detected_hostname` 18 | * and is intended to fit the ECS `host.name` value 19 | * (https://www.elastic.co/guide/en/ecs/current/ecs-host.html#field-host-name). 20 | * 21 | * @returns {String} 22 | */ 23 | function detectHostname () { 24 | let hostname = null 25 | let out 26 | const fallback = os.hostname() 27 | 28 | switch (os.platform()) { 29 | case 'win32': 30 | // https://learn.microsoft.com/en-us/dotnet/api/system.net.dns.gethostentry 31 | out = spawnSync( 32 | 'powershell.exe', 33 | ['[System.Net.Dns]::GetHostEntry($env:computerName).HostName'], 34 | { encoding: 'utf8', shell: true, timeout: 2000 } 35 | ) 36 | if (!out.error) { 37 | hostname = out.stdout.trim() 38 | break 39 | } 40 | 41 | // https://learn.microsoft.com/en-us/windows-server/administration/windows-commands/hostname 42 | out = spawnSync( 43 | 'hostname.exe', 44 | { encoding: 'utf8', shell: true, timeout: 2000 } 45 | ) 46 | if (!out.error) { 47 | hostname = out.stdout.trim() 48 | break 49 | } 50 | 51 | if ('COMPUTERNAME' in process.env) { 52 | hostname = process.env['COMPUTERNAME'].trim() // eslint-disable-line dot-notation 53 | } 54 | break 55 | 56 | default: 57 | out = spawnSync('/bin/hostname', ['-f'], { encoding: 'utf8', shell: false, timeout: 500 }) 58 | if (!out.error) { 59 | hostname = out.stdout.trim() 60 | } 61 | // I'm going a little off of the APM spec here by *not* falling back to 62 | // HOSTNAME or HOST envvars. Latest discussion point is here: 63 | // https://github.com/elastic/apm/pull/517#issuecomment-940973458 64 | // My understanding is HOSTNAME is a *Bash*-set envvar. 65 | break 66 | } 67 | 68 | if (!hostname) { 69 | hostname = fallback 70 | } 71 | hostname = hostname.trim().toLowerCase() 72 | return hostname 73 | } 74 | 75 | module.exports = { 76 | detectHostname 77 | } 78 | 79 | // ---- main 80 | 81 | if (require.main === module) { 82 | console.log(detectHostname()) 83 | } 84 | -------------------------------------------------------------------------------- /test/expectExtraMetadata.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Test usage of `expectExtraMetadata: true` and `setExtraMetadata()`. 10 | 11 | const test = require('tape') 12 | const utils = require('./lib/utils') 13 | 14 | const APMServer = utils.APMServer 15 | const processIntakeReq = utils.processIntakeReq 16 | 17 | test('expectExtraMetadata and setExtraMetadata used properly', function (t) { 18 | const apmEvents = [] 19 | 20 | const server = APMServer(function (req, res) { 21 | const objStream = processIntakeReq(req) 22 | objStream.on('data', function (obj) { 23 | apmEvents.push(obj) 24 | }) 25 | objStream.on('end', function () { 26 | res.statusCode = 202 27 | res.end() 28 | }) 29 | }).client({ expectExtraMetadata: true, apmServerVersion: '8.0.0' }, function (client) { 30 | client.setExtraMetadata({ 31 | foo: 'bar', 32 | service: { 33 | runtime: { 34 | name: 'MyLambda' 35 | } 36 | } 37 | }) 38 | client.sendTransaction({ req: 1 }) 39 | 40 | client.flush(() => { 41 | t.equal(apmEvents.length, 2, 'APM Server got 2 events') 42 | t.ok(apmEvents[0].metadata, 'event 0 is metadata') 43 | t.equal(apmEvents[0].metadata.foo, 'bar', 'setExtraMetadata added "foo" field') 44 | t.equal(apmEvents[0].metadata.service.runtime.name, 'MyLambda', 45 | 'setExtraMetadata set nested service.runtime.name field properly') 46 | t.ok(apmEvents[1].transaction, 'event 1 is a transaction') 47 | 48 | client.end() 49 | server.close() 50 | t.end() 51 | }) 52 | }) 53 | }) 54 | 55 | test('empty setExtraMetadata is fine, and calling after send* is fine', function (t) { 56 | const apmEvents = [] 57 | 58 | const server = APMServer(function (req, res) { 59 | const objStream = processIntakeReq(req) 60 | objStream.on('data', function (obj) { 61 | apmEvents.push(obj) 62 | }) 63 | objStream.on('end', function () { 64 | res.statusCode = 202 65 | res.end() 66 | }) 67 | }).client({ expectExtraMetadata: true, apmServerVersion: '8.0.0' }, function (client) { 68 | client.sendTransaction({ req: 1 }) 69 | client.setExtraMetadata() 70 | 71 | client.flush(() => { 72 | t.equal(apmEvents.length, 2, 'APM Server got 2 events') 73 | t.ok(apmEvents[0].metadata, 'event 0 is metadata') 74 | t.ok(apmEvents[1].transaction, 'event 1 is a transaction') 75 | 76 | client.end() 77 | server.close() 78 | t.end() 79 | }) 80 | }) 81 | }) 82 | 83 | test('expectExtraMetadata:true with *no* setExtraMetadata call results in a corked client', function (t) { 84 | const server = APMServer(function (req, res) { 85 | t.fail('do NOT expect to get intake request to APM server') 86 | }).client({ expectExtraMetadata: true, apmServerVersion: '8.0.0' }, function (client) { 87 | // Explicitly *not* calling setExtraMetadata(). 88 | client.sendTransaction({ req: 1 }) 89 | 90 | client.flush(() => { 91 | t.fail('should *not* callback from flush') 92 | }) 93 | setTimeout(() => { 94 | t.pass('hit timeout without an intake request to APM server') 95 | client.destroy() 96 | server.close() 97 | t.end() 98 | }, 1000) 99 | }) 100 | }) 101 | -------------------------------------------------------------------------------- /lib/container-info.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const fs = require('fs') 10 | 11 | const uuidSource = '[0-9a-f]{8}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{12}' 12 | const containerSource = '[0-9a-f]{64}' 13 | const taskSource = '[0-9a-f]{32}' 14 | const awsEcsSource = '[0-9a-f]{32}-[0-9]{10}' 15 | 16 | const lineReg = /^(\d+):([^:]*):(.+)$/ 17 | const podReg = new RegExp(`pod(${uuidSource})(?:.slice)?$`) 18 | const containerReg = new RegExp(`(${uuidSource}|${containerSource})(?:.scope)?$`) 19 | const taskReg = new RegExp(`^/ecs/(${taskSource})/.*$`) 20 | 21 | let ecsMetadata 22 | resetEcsMetadata(process.env.ECS_CONTAINER_METADATA_FILE) 23 | 24 | function resetEcsMetadata (file) { 25 | ecsMetadata = ecsMetadataSync(file) 26 | } 27 | 28 | function parseLine (line) { 29 | const [id, groups, path] = (line.match(lineReg) || []).slice(1) 30 | const data = { id, groups, path } 31 | const parts = path.split('/') 32 | const basename = parts.pop() 33 | const controllers = groups.split(',') 34 | if (controllers) data.controllers = controllers 35 | 36 | const containerId = (basename.match(containerReg) || [])[1] 37 | if (containerId) data.containerId = containerId 38 | 39 | const podId = (parts.pop().match(podReg) || [])[1] 40 | if (podId) data.podId = podId.replace(/_/g, '-') 41 | 42 | const taskId = (path.match(taskReg) || [])[1] 43 | if (taskId) data.taskId = taskId 44 | 45 | // if we reach the end and there's still no conatinerId match 46 | // and there's not an ECS metadata file, try the ECS regular 47 | // expression in order to get a container id in fargate 48 | if (!containerId && !ecsMetadata) { 49 | if (basename.match(awsEcsSource)) { 50 | data.containerId = basename 51 | } 52 | } 53 | return data 54 | } 55 | 56 | function parse (contents) { 57 | const data = { 58 | entries: [] 59 | } 60 | 61 | for (let line of contents.split('\n')) { 62 | line = line.trim() 63 | if (line) { 64 | const lineData = parseLine(line) 65 | data.entries.push(lineData) 66 | if (lineData.containerId) { 67 | data.containerId = lineData.containerId 68 | } 69 | if (lineData.podId) { 70 | data.podId = lineData.podId 71 | } 72 | if (lineData.taskId) { 73 | data.taskId = lineData.taskId 74 | if (ecsMetadata) { 75 | data.containerId = ecsMetadata.ContainerID 76 | } 77 | } 78 | } 79 | } 80 | 81 | return data 82 | } 83 | 84 | function containerInfo (pid = 'self') { 85 | return new Promise((resolve) => { 86 | fs.readFile(`/proc/${pid}/cgroup`, (err, data) => { 87 | resolve(err ? undefined : parse(data.toString())) 88 | }) 89 | }) 90 | } 91 | 92 | function containerInfoSync (pid = 'self') { 93 | try { 94 | const data = fs.readFileSync(`/proc/${pid}/cgroup`) 95 | return parse(data.toString()) 96 | } catch (err) {} 97 | } 98 | 99 | function ecsMetadataSync (ecsMetadataFile) { 100 | try { 101 | return ecsMetadataFile && JSON.parse(fs.readFileSync(ecsMetadataFile)) 102 | } catch (err) {} 103 | } 104 | 105 | module.exports = containerInfo 106 | containerInfo.sync = containerInfoSync 107 | containerInfo.parse = parse 108 | containerInfo.resetEcsMetadata = resetEcsMetadata // Exported for testing-only. 109 | -------------------------------------------------------------------------------- /test/writev.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const test = require('tape') 10 | const utils = require('./lib/utils') 11 | 12 | const APMServer = utils.APMServer 13 | 14 | const dataTypes = ['span', 'transaction', 'error'] 15 | 16 | dataTypes.forEach(function (dataType) { 17 | const sendFn = 'send' + dataType.charAt(0).toUpperCase() + dataType.substr(1) 18 | 19 | test(`bufferWindowSize - default value (${dataType})`, function (t) { 20 | const server = APMServer().client(function (client) { 21 | // Send one less span than bufferWindowSize 22 | for (let n = 1; n <= 50; n++) { 23 | client[sendFn]({ req: n }) 24 | t.ok(client._writableState.corked, 'should be corked') 25 | } 26 | 27 | // This span should trigger the uncork 28 | client[sendFn]({ req: 51 }) 29 | 30 | // Wait a little to allow the above write to finish before destroying 31 | process.nextTick(function () { 32 | t.notOk(client._writableState.corked, 'should be uncorked') 33 | 34 | client.destroy() 35 | server.close() 36 | t.end() 37 | }) 38 | }) 39 | }) 40 | 41 | test(`bufferWindowSize - custom value (${dataType})`, function (t) { 42 | const server = APMServer().client({ bufferWindowSize: 5 }, function (client) { 43 | // Send one less span than bufferWindowSize 44 | for (let n = 1; n <= 5; n++) { 45 | client[sendFn]({ req: n }) 46 | t.ok(client._writableState.corked, 'should be corked') 47 | } 48 | 49 | // This span should trigger the uncork 50 | client[sendFn]({ req: 6 }) 51 | 52 | // Wait a little to allow the above write to finish before destroying 53 | process.nextTick(function () { 54 | t.notOk(client._writableState.corked, 'should be uncorked') 55 | 56 | client.destroy() 57 | server.close() 58 | t.end() 59 | }) 60 | }) 61 | }) 62 | 63 | test(`bufferWindowTime - default value (${dataType})`, function (t) { 64 | const server = APMServer().client(function (client) { 65 | client[sendFn]({ req: 1 }) 66 | t.ok(client._writableState.corked, 'should be corked') 67 | 68 | // Wait twice as long as bufferWindowTime 69 | setTimeout(function () { 70 | t.notOk(client._writableState.corked, 'should be uncorked') 71 | client.destroy() 72 | server.close() 73 | t.end() 74 | }, 40) 75 | }) 76 | }) 77 | 78 | test(`bufferWindowTime - custom value (${dataType})`, function (t) { 79 | const server = APMServer().client({ bufferWindowTime: 150 }, function (client) { 80 | client[sendFn]({ req: 1 }) 81 | t.ok(client._writableState.corked, 'should be corked') 82 | 83 | // Wait twice as long as the default bufferWindowTime 84 | setTimeout(function () { 85 | t.ok(client._writableState.corked, 'should be corked') 86 | }, 40) 87 | 88 | // Wait twice as long as the custom bufferWindowTime 89 | setTimeout(function () { 90 | t.notOk(client._writableState.corked, 'should be uncorked') 91 | client.destroy() 92 | server.close() 93 | t.end() 94 | }, 300) 95 | }) 96 | }) 97 | 98 | test(`write on destroyed (${dataType})`, function (t) { 99 | const server = APMServer(function (req, res) { 100 | t.fail('should not send anything to the APM Server') 101 | }).client({ bufferWindowSize: 1, apmServerVersion: '8.0.0' }, function (client) { 102 | client.on('error', function (err) { 103 | t.error(err) 104 | }) 105 | 106 | client[sendFn]({ req: 1 }) 107 | client[sendFn]({ req: 2 }) 108 | 109 | // Destroy the client before the _writev function have a chance to be called 110 | client.destroy() 111 | 112 | setTimeout(function () { 113 | server.close() 114 | t.end() 115 | }, 10) 116 | }) 117 | }) 118 | }) 119 | -------------------------------------------------------------------------------- /test/side-effects.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const path = require('path') 10 | const exec = require('child_process').exec 11 | const test = require('tape') 12 | const utils = require('./lib/utils') 13 | 14 | const APMServer = utils.APMServer 15 | const processIntakeReq = utils.processIntakeReq 16 | const assertIntakeReq = utils.assertIntakeReq 17 | const assertMetadata = utils.assertMetadata 18 | const assertEvent = utils.assertEvent 19 | 20 | // Exec a script that uses `client.sendSpan(...)` and then finishes. The 21 | // script process should finish quickly. This is exercising the "beforeExit" 22 | // (to end an ongoing intake request) and Client.prototype._unref (to hold the 23 | // process to complete sending to APM server) handling in the client. 24 | test('client should not hold the process open', function (t) { 25 | t.plan(1 + assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 26 | 27 | const thingsToAssert = [ 28 | assertMetadata, 29 | assertEvent({ span: { hello: 'world' } }) 30 | ] 31 | 32 | const server = APMServer(function (req, res) { 33 | // Handle the server info endpoint. 34 | if (req.method === 'GET' && req.url === '/') { 35 | req.resume() 36 | res.statusCode = 200 37 | res.end(JSON.stringify({ build_date: '...', build_sha: '...', version: '8.0.0' })) 38 | return 39 | } 40 | 41 | // Handle an intake request. 42 | assertIntakeReq(t, req) 43 | req = processIntakeReq(req) 44 | req.on('data', function (obj) { 45 | thingsToAssert.shift()(t, obj) 46 | }) 47 | req.on('end', function () { 48 | res.statusCode = 202 49 | res.end() 50 | server.close() 51 | }) 52 | }) 53 | 54 | server.listen(function () { 55 | const url = 'http://localhost:' + server.address().port 56 | const file = path.join(__dirname, 'lib', 'unref-client.js') 57 | exec(`node ${file} ${url}`, function (err, stdout, stderr) { 58 | if (stderr.trim()) { 59 | t.comment('stderr from unref-client.js:\n' + stderr) 60 | } 61 | if (err) { 62 | throw err 63 | } 64 | const end = Date.now() 65 | const start = Number(stdout) 66 | const duration = end - start 67 | t.ok(duration < 300, `should not take more than 300ms to complete (was: ${duration}ms)`) 68 | t.end() 69 | }) 70 | }) 71 | }) 72 | 73 | // This is the same test as the previous, except this time the APM server is 74 | // not responding. Normally the `intakeResTimeout` value is used to handle 75 | // timing out intake requests. However, that timeout defaults to 10s, which is 76 | // very long to hold a closing process open. `makeIntakeRequest` overrides 77 | // `intakeResTimeout` to *1s* if the client is ending. We test that ~1s timeout 78 | // here. 79 | test('client should not hold the process open even if APM server not responding', function (t) { 80 | t.plan(2 + assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 81 | 82 | const thingsToAssert = [ 83 | assertMetadata, 84 | assertEvent({ span: { hello: 'world' } }) 85 | ] 86 | 87 | const server = APMServer(function (req, res) { 88 | // Handle the server info endpoint. 89 | if (req.method === 'GET' && req.url === '/') { 90 | req.resume() 91 | // Intentionally do not respond. 92 | return 93 | } 94 | 95 | // Handle an intake request. 96 | assertIntakeReq(t, req) 97 | req = processIntakeReq(req) 98 | req.on('data', function (obj) { 99 | thingsToAssert.shift()(t, obj) 100 | }) 101 | req.on('end', function () { 102 | res.statusCode = 202 103 | // Here the server is intentionally not responding: 104 | // res.end() 105 | // server.close() 106 | }) 107 | }) 108 | 109 | server.listen(function () { 110 | const url = 'http://localhost:' + server.address().port 111 | const file = path.join(__dirname, 'lib', 'unref-client.js') 112 | exec(`node ${file} ${url}`, function (err, stdout, stderr) { 113 | if (stderr.trim()) { 114 | t.comment('stderr from unref-client.js:\n' + stderr) 115 | } 116 | t.ifErr(err, `no error from executing ${file}`) 117 | const end = Date.now() 118 | const start = Number(stdout) 119 | const duration = end - start 120 | t.ok(duration > 700 && duration < 1300, 121 | `should take approximately 1000ms to timeout (was: ${duration}ms)`) 122 | 123 | server.close() 124 | t.end() 125 | }) 126 | }) 127 | }) 128 | -------------------------------------------------------------------------------- /test/fixtures/cgroup_result.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | module.exports = { 10 | entries: [ 11 | { 12 | id: '14', 13 | groups: 'pids', 14 | path: '/kubepods/kubepods/besteffort/pod0e886e9a-3879-45f9-b44d-86ef9df03224/244a65edefdffe31685c42317c9054e71dc1193048cf9459e2a4dd35cbc1dba4', 15 | controllers: ['pids'], 16 | containerId: '244a65edefdffe31685c42317c9054e71dc1193048cf9459e2a4dd35cbc1dba4', 17 | podId: '0e886e9a-3879-45f9-b44d-86ef9df03224' 18 | }, 19 | { 20 | id: '13', 21 | groups: 'cpuset', 22 | path: '/kubepods/pod5eadac96-ab58-11ea-b82b-0242ac110009/7fe41c8a2d1da09420117894f11dd91f6c3a44dfeb7d125dc594bd53468861df', 23 | controllers: ['cpuset'], 24 | containerId: '7fe41c8a2d1da09420117894f11dd91f6c3a44dfeb7d125dc594bd53468861df', 25 | podId: '5eadac96-ab58-11ea-b82b-0242ac110009' 26 | }, 27 | { 28 | id: '12', 29 | groups: 'freezer', 30 | path: '/kubepods.slice/kubepods-pod22949dce_fd8b_11ea_8ede_98f2b32c645c.slice/docker-b15a5bdedd2e7645c3be271364324321b908314e4c77857bbfd32a041148c07f.scope', 31 | controllers: ['freezer'], 32 | containerId: 'b15a5bdedd2e7645c3be271364324321b908314e4c77857bbfd32a041148c07f', 33 | podId: '22949dce-fd8b-11ea-8ede-98f2b32c645c' 34 | }, 35 | { 36 | id: '11', 37 | groups: 'devices', 38 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 39 | controllers: ['devices'], 40 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 41 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 42 | }, 43 | { 44 | id: '10', 45 | groups: 'perf_event', 46 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 47 | controllers: ['perf_event'], 48 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 49 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 50 | }, 51 | { 52 | id: '9', 53 | groups: 'memory', 54 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 55 | controllers: ['memory'], 56 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 57 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 58 | }, 59 | { 60 | id: '8', 61 | groups: 'freezer', 62 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 63 | controllers: ['freezer'], 64 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 65 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 66 | }, 67 | { 68 | id: '7', 69 | groups: 'hugetlb', 70 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 71 | controllers: ['hugetlb'], 72 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 73 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 74 | }, 75 | { 76 | id: '6', 77 | groups: 'cpuset', 78 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 79 | controllers: ['cpuset'], 80 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 81 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 82 | }, 83 | { 84 | id: '5', 85 | groups: 'blkio', 86 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 87 | controllers: ['blkio'], 88 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 89 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 90 | }, 91 | { 92 | id: '4', 93 | groups: 'cpu,cpuacct', 94 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 95 | controllers: ['cpu', 'cpuacct'], 96 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 97 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 98 | }, 99 | { 100 | id: '3', 101 | groups: 'net_cls,net_prio', 102 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 103 | controllers: ['net_cls', 'net_prio'], 104 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 105 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 106 | }, 107 | { 108 | id: '2', 109 | groups: 'pids', 110 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 111 | controllers: ['pids'], 112 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 113 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 114 | }, 115 | { 116 | id: '1', 117 | groups: 'name=systemd', 118 | path: '/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 119 | controllers: ['name=systemd'], 120 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 121 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 122 | } 123 | ], 124 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 125 | podId: '74c13223-5a00-11e9-b385-42010a80018d' 126 | } 127 | -------------------------------------------------------------------------------- /test/central-config.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const test = require('tape') 10 | 11 | const { APMServer, validOpts, assertConfigReq } = require('./lib/utils') 12 | const { 13 | getCentralConfigIntervalS, 14 | INTERVAL_DEFAULT_S, 15 | INTERVAL_MIN_S, 16 | INTERVAL_MAX_S 17 | } = require('../lib/central-config') 18 | const Client = require('../') 19 | 20 | test('getCentralConfigIntervalS', function (t) { 21 | const testCases = [ 22 | // [ , ] 23 | [-4, INTERVAL_DEFAULT_S], 24 | [-1, INTERVAL_DEFAULT_S], 25 | [0, 300], 26 | [1, INTERVAL_MIN_S], 27 | [2, INTERVAL_MIN_S], 28 | [3, INTERVAL_MIN_S], 29 | [4, INTERVAL_MIN_S], 30 | [5, INTERVAL_MIN_S], 31 | [6, 6], 32 | [7, 7], 33 | [8, 8], 34 | [9, 9], 35 | [10, 10], 36 | [86398, 86398], 37 | [86399, 86399], 38 | [86400, 86400], 39 | [86401, INTERVAL_MAX_S], 40 | [86402, INTERVAL_MAX_S], 41 | [86403, INTERVAL_MAX_S], 42 | [86404, INTERVAL_MAX_S], 43 | [NaN, INTERVAL_DEFAULT_S], 44 | [null, INTERVAL_DEFAULT_S], 45 | [undefined, INTERVAL_DEFAULT_S], 46 | [false, INTERVAL_DEFAULT_S], 47 | [true, INTERVAL_DEFAULT_S], 48 | ['a string', INTERVAL_DEFAULT_S], 49 | [{}, INTERVAL_DEFAULT_S], 50 | [[], INTERVAL_DEFAULT_S] 51 | ] 52 | 53 | testCases.forEach(testCase => { 54 | t.equal(getCentralConfigIntervalS(testCase[0]), testCase[1], 55 | `getCentralConfigIntervalS(${testCase[0]}) -> ${testCase[1]}`) 56 | }) 57 | t.end() 58 | }) 59 | 60 | test('central config disabled', function (t) { 61 | const origPollConfig = Client.prototype._pollConfig 62 | Client.prototype._pollConfig = function () { 63 | t.fail('should not call _pollConfig') 64 | } 65 | 66 | t.on('end', function () { 67 | Client.prototype._pollConfig = origPollConfig 68 | }) 69 | 70 | Client(validOpts()) 71 | t.end() 72 | }) 73 | 74 | test('central config enabled', function (t) { 75 | t.plan(1) 76 | 77 | const origPollConfig = Client.prototype._pollConfig 78 | Client.prototype._pollConfig = function () { 79 | t.pass('should call _pollConfig') 80 | } 81 | 82 | t.on('end', function () { 83 | Client.prototype._pollConfig = origPollConfig 84 | }) 85 | 86 | Client(validOpts({ centralConfig: true })) 87 | t.end() 88 | }) 89 | 90 | // Test central-config handling of Etag and If-None-Match headers using a mock 91 | // apm-server that uses the `Cache-Control: max-age=1 ...` header to speed up 92 | // the polling interval of the client. (This is foiled by `INTERVAL_MIN_S = 5`.) 93 | test('polling', function (t) { 94 | const expectedConf = { foo: 'bar' } 95 | const headers = { 'Cache-Control': 'max-age=1, must-revalidate' } 96 | let reqs = 0 97 | let client 98 | 99 | const server = APMServer(function (req, res) { 100 | assertConfigReq(t, req) 101 | 102 | switch (++reqs) { 103 | case 1: 104 | t.ok(!('if-none-match' in req.headers), 'should not have If-None-Match header') 105 | res.writeHead(500, Object.assign({ 'Content-Type': 'application/json' }, headers)) 106 | res.end('{"invalid JSON"}') 107 | break 108 | case 2: 109 | t.ok(!('if-none-match' in req.headers), 'should not have If-None-Match header') 110 | res.writeHead(503, Object.assign({ 'Content-Type': 'application/json' }, headers)) 111 | res.end(JSON.stringify('valid JSON')) 112 | break 113 | case 3: 114 | t.ok(!('if-none-match' in req.headers), 'should not have If-None-Match header') 115 | res.writeHead(503, Object.assign({ 'Content-Type': 'application/json' }, headers)) 116 | res.end(JSON.stringify({ error: 'from error property' })) 117 | break 118 | case 4: 119 | t.ok(!('if-none-match' in req.headers), 'should not have If-None-Match header') 120 | res.writeHead(403, headers) 121 | res.end() 122 | break 123 | case 5: 124 | t.ok(!('if-none-match' in req.headers), 'should not have If-None-Match header') 125 | res.writeHead(404, headers) 126 | res.end() 127 | break 128 | case 6: 129 | t.ok(!('if-none-match' in req.headers), 'should not have If-None-Match header') 130 | res.writeHead(200, Object.assign({ Etag: '"42"' }, headers)) 131 | res.end(JSON.stringify(expectedConf)) 132 | break 133 | case 7: 134 | t.equal(req.headers['if-none-match'], '"42"') 135 | res.writeHead(304, Object.assign({ Etag: '"42"' }, headers)) 136 | res.end() 137 | client.destroy() 138 | server.close() 139 | t.end() 140 | break 141 | default: 142 | t.fail('too many request') 143 | } 144 | }).client({ centralConfig: true, apmServerVersion: '8.0.0' }, function (_client) { 145 | client = _client 146 | client.on('config', function (conf) { 147 | t.equal(reqs, 6, 'should emit config after 6th request') 148 | t.deepEqual(conf, expectedConf) 149 | }) 150 | client.on('request-error', function (err) { 151 | if (reqs === 1) { 152 | t.equal(err.code, 500) 153 | t.equal(err.message, 'Unexpected APM Server response when polling config') 154 | t.equal(err.response, '{"invalid JSON"}') 155 | } else if (reqs === 2) { 156 | t.equal(err.code, 503) 157 | t.equal(err.message, 'Unexpected APM Server response when polling config') 158 | t.equal(err.response, 'valid JSON') 159 | } else if (reqs === 3) { 160 | t.equal(err.code, 503) 161 | t.equal(err.message, 'Unexpected APM Server response when polling config') 162 | t.equal(err.response, 'from error property') 163 | } else if (reqs === 7) { 164 | // The mock APMServer above hard-destroys the connection on req 7. If 165 | // the client's keep-alive agent has an open socket, we expect a 166 | // "socket hang up" (ECONNRESET) error here. 167 | t.equal(err.message, 'socket hang up') 168 | } else { 169 | t.error(err, 'got an err on req ' + reqs + ', err=' + err.message) 170 | } 171 | }) 172 | }) 173 | }) 174 | -------------------------------------------------------------------------------- /test/lambda-usage.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Test the expected usage of this Client in an AWS Lambda environment. 10 | // The "Notes on Lambda usage" section in the README.md describes the 11 | // expected usage. 12 | // 13 | // Note: This test file needs to be run in its own process. 14 | 15 | // Must set this before the Client is imported so it thinks it is in a Lambda env. 16 | process.env.AWS_LAMBDA_FUNCTION_NAME = 'myFn' 17 | 18 | const { URL } = require('url') 19 | const zlib = require('zlib') 20 | const test = require('tape') 21 | const { APMServer } = require('./lib/utils') 22 | 23 | test('lambda usage', suite => { 24 | let server 25 | let client 26 | let reqsToServer = [] 27 | let lateSpanInSameTickCallbackCalled = false 28 | let lateSpanInNextTickCallbackCalled = false 29 | 30 | test('setup mock APM server', t => { 31 | server = APMServer(function (req, res) { 32 | if (req.method === 'POST' && req.url === '/register/transaction') { 33 | req.resume() 34 | req.on('end', () => { 35 | res.writeHead(200) 36 | res.end() 37 | }) 38 | return 39 | } else if (!(req.method === 'POST' && req.url.startsWith('/intake/v2/events'))) { 40 | req.resume() 41 | req.on('end', () => { 42 | res.writeHead(404) 43 | res.end() 44 | }) 45 | return 46 | } 47 | 48 | // Capture intake req data to this mock APM server to `reqsToServer`. 49 | const reqInfo = { 50 | method: req.method, 51 | path: req.url, 52 | url: new URL(req.url, 'http://localhost'), 53 | headers: req.headers, 54 | events: [] 55 | } 56 | let instream = req 57 | if (req.headers['content-encoding'] === 'gzip') { 58 | instream = req.pipe(zlib.createGunzip()) 59 | } else { 60 | instream.setEncoding('utf8') 61 | } 62 | let body = '' 63 | instream.on('data', chunk => { 64 | body += chunk 65 | }) 66 | instream.on('end', () => { 67 | body 68 | .split(/\n/g) // parse each line 69 | .filter(line => line.trim()) // ... if it is non-empty 70 | .forEach(line => { 71 | reqInfo.events.push(JSON.parse(line)) // ... append to reqInfo.events 72 | }) 73 | reqsToServer.push(reqInfo) 74 | res.writeHead(202) // the expected response from intake API endpoint 75 | res.end('{}') 76 | }) 77 | }) 78 | 79 | server.client({ 80 | apmServerVersion: '8.0.0', 81 | centralConfig: false 82 | }, function (client_) { 83 | client = client_ 84 | t.end() 85 | }) 86 | }) 87 | 88 | test('clients stays corked before .lambdaStart()', t => { 89 | // Add more events than `bufferWindowSize` and wait for more than 90 | // `bufferWindowTime`, and the Client should *still* be corked. 91 | const aTrans = { name: 'aTrans', type: 'custom', result: 'success' /* ... */ } 92 | for (let i = 0; i < client._conf.bufferWindowSize + 1; i++) { 93 | client.sendTransaction(aTrans) 94 | } 95 | setTimeout(() => { 96 | t.equal(client._writableState.corked, 1, 97 | 'corked after bufferWindowSize events and bufferWindowTime') 98 | t.equal(reqsToServer.length, 0, 'no intake request was made to APM Server') 99 | t.end() 100 | }, client._conf.bufferWindowTime + 10) 101 | }) 102 | 103 | test('lambda invocation', async (t) => { 104 | client.lambdaStart() // 1. start of invocation 105 | 106 | // 2. Registering transaction 107 | t.equal(client.lambdaShouldRegisterTransactions(), true, '.lambdaShouldRegisterTransactions() is true') 108 | await client.lambdaRegisterTransaction( 109 | { name: 'GET /aStage/myFn', type: 'lambda', outcome: 'unknown' /* ... */ }, 110 | '063de0d2-1705-4eeb-9dfd-045d76b8cdec') 111 | t.equal(client.lambdaShouldRegisterTransactions(), true, '.lambdaShouldRegisterTransactions() is true after register') 112 | 113 | setTimeout(() => { 114 | client.sendTransaction({ name: 'GET /aStage/myFn', type: 'lambda', result: 'success' /* ... */ }) 115 | client.sendSpan({ name: 'mySpan', type: 'custom', result: 'success' /* ... */ }) 116 | 117 | // 3. Flush at end of invocation 118 | client.flush({ lambdaEnd: true }, function () { 119 | t.ok(reqsToServer.length > 1, 'at least 2 intake requests to APM Server') 120 | t.equal(reqsToServer[reqsToServer.length - 1].url.searchParams.get('flushed'), 'true', 121 | 'the last intake request had "?flushed=true" query param') 122 | 123 | let allEvents = [] 124 | reqsToServer.forEach(r => { allEvents = allEvents.concat(r.events) }) 125 | t.equal(allEvents[allEvents.length - 2].transaction.name, 'GET /aStage/myFn', 126 | 'second last event is the lambda transaction') 127 | t.equal(allEvents[allEvents.length - 1].span.name, 'mySpan', 128 | 'last event is the lambda span') 129 | 130 | reqsToServer = [] // reset 131 | t.end() 132 | }) 133 | 134 | // Explicitly send late events and flush *after* the 135 | // `client.flush({lambdaEnd:true})` -- both in the same tick and next 136 | // ticks -- to test that these get buffered until the next lambda 137 | // invocation. 138 | client.sendSpan({ name: 'lateSpanInSameTick', type: 'custom' /* ... */ }) 139 | client.flush(function () { 140 | lateSpanInSameTickCallbackCalled = true 141 | }) 142 | setImmediate(() => { 143 | client.sendSpan({ name: 'lateSpanInNextTick', type: 'custom' /* ... */ }) 144 | client.flush(function () { 145 | lateSpanInNextTickCallbackCalled = true 146 | }) 147 | }) 148 | }, 10) 149 | }) 150 | 151 | // Give some time to make sure there isn't some unexpected short async 152 | // interaction. 153 | test('pause between lambda invocations', t => { 154 | setTimeout(() => { 155 | t.end() 156 | }, 1000) 157 | }) 158 | 159 | test('second lambda invocation', t => { 160 | t.equal(lateSpanInSameTickCallbackCalled, false, 'lateSpanInSameTick flush callback not yet called') 161 | t.equal(lateSpanInNextTickCallbackCalled, false, 'lateSpanInNextTick flush callback not yet called') 162 | t.equal(reqsToServer.length, 0, 'no intake request was made to APM Server since last lambdaEnd') 163 | 164 | client.lambdaStart() 165 | setTimeout(() => { 166 | client.flush({ lambdaEnd: true }, () => { 167 | t.equal(reqsToServer.length, 3, '3 intake requests to APM Server') 168 | t.equal(lateSpanInSameTickCallbackCalled, true, 'lateSpanInSameTick flush callback has now been called') 169 | t.equal(lateSpanInNextTickCallbackCalled, true, 'lateSpanInNextTick flush callback has now been called') 170 | 171 | t.equal(reqsToServer[0].events.length, 2, 172 | 'the first intake request has 2 events') 173 | t.equal(reqsToServer[0].events[1].span.name, 'lateSpanInSameTick', 174 | 'of which the second event is the lateSpanInSameTick') 175 | t.equal(reqsToServer[1].events.length, 2, 176 | 'the second intake request has 2 events') 177 | t.equal(reqsToServer[1].events[1].span.name, 'lateSpanInNextTick', 178 | 'of which the second event is the lateSpanInNextTick') 179 | t.equal(reqsToServer[reqsToServer.length - 1].url.searchParams.get('flushed'), 'true', 180 | 'the last intake request had "?flushed=true" query param') 181 | t.end() 182 | }) 183 | }, 10) 184 | }) 185 | 186 | test('teardown', t => { 187 | server.close() 188 | client.destroy() 189 | t.end() 190 | }) 191 | 192 | suite.end() 193 | }) 194 | -------------------------------------------------------------------------------- /test/lib/utils.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const http = require('http') 10 | const https = require('https') 11 | const { URL } = require('url') 12 | const zlib = require('zlib') 13 | const semver = require('semver') 14 | const ndjson = require('ndjson') 15 | const Client = require('../../') 16 | 17 | exports.APMServer = APMServer 18 | exports.processIntakeReq = processIntakeReq 19 | exports.assertIntakeReq = assertIntakeReq 20 | exports.assertConfigReq = assertConfigReq 21 | exports.assertMetadata = assertMetadata 22 | exports.assertEvent = assertEvent 23 | exports.validOpts = validOpts 24 | 25 | function APMServer (opts, onreq) { 26 | if (typeof opts === 'function') return APMServer(null, opts) 27 | opts = opts || {} 28 | 29 | const secure = !!opts.secure 30 | 31 | const server = secure 32 | ? https.createServer({ cert: tlsCert, key: tlsKey }, onreq) 33 | : http.createServer(onreq) 34 | 35 | // Because we use a keep-alive agent in the client, we need to unref the 36 | // sockets received by the server. If not, the server would hold open the app 37 | // even after the tests have completed 38 | server.on('connection', function (socket) { 39 | socket.unref() 40 | }) 41 | 42 | server.client = function (clientOpts, onclient) { 43 | if (typeof clientOpts === 'function') { 44 | onclient = clientOpts 45 | clientOpts = {} 46 | } 47 | server.listen(function () { 48 | onclient(new Client(validOpts(Object.assign({ 49 | // logger: require('pino')({ level: 'trace' }), // uncomment for debugging 50 | serverUrl: `http${secure ? 's' : ''}://localhost:${server.address().port}`, 51 | secretToken: 'secret' 52 | }, clientOpts)))) 53 | }) 54 | return server 55 | } 56 | 57 | return server 58 | } 59 | 60 | function processIntakeReq (req) { 61 | return req.pipe(zlib.createGunzip()).pipe(ndjson.parse()) 62 | } 63 | 64 | function assertIntakeReq (t, req) { 65 | t.equal(req.method, 'POST', 'should make a POST request') 66 | t.equal(req.url, '/intake/v2/events', 'should send request to /intake/v2/events') 67 | t.equal(req.headers.authorization, 'Bearer secret', 'should add secret token') 68 | t.equal(req.headers['content-type'], 'application/x-ndjson', 'should send reqeust as ndjson') 69 | t.equal(req.headers['content-encoding'], 'gzip', 'should compress request') 70 | t.equal(req.headers.accept, 'application/json', 'should expect json in response') 71 | t.equal(req.headers['user-agent'], 'my-user-agent', 'should add proper User-Agent') 72 | } 73 | assertIntakeReq.asserts = 7 74 | 75 | function assertConfigReq (t, req) { 76 | const url = new URL(req.url, 'relative:///') 77 | 78 | t.equal(req.method, 'GET', 'should make a GET request') 79 | t.equal(url.pathname, '/config/v1/agents', 'should send request to /config/v1/agents') 80 | t.equal(url.search, '?service.name=my-service-name&service.environment=development', 'should encode query in query params') 81 | t.equal(req.headers.authorization, 'Bearer secret', 'should add secret token') 82 | t.equal(req.headers['user-agent'], 'my-user-agent', 'should add proper User-Agent') 83 | } 84 | assertConfigReq.asserts = 5 85 | 86 | function assertMetadata (t, obj) { 87 | t.deepEqual(Object.keys(obj), ['metadata']) 88 | const metadata = obj.metadata 89 | const metadataKeys = new Set(Object.keys(metadata)) 90 | t.ok(metadataKeys.has('service')) 91 | t.ok(metadataKeys.has('process')) 92 | t.ok(metadataKeys.has('system')) 93 | const service = metadata.service 94 | t.equal(service.name, 'my-service-name') 95 | t.equal(service.runtime.name, 'node') 96 | t.equal(service.runtime.version, process.versions.node) 97 | t.ok(semver.valid(service.runtime.version)) 98 | t.equal(service.language.name, 'javascript') 99 | t.equal(service.agent.name, 'my-agent-name') 100 | t.equal(service.agent.version, 'my-agent-version') 101 | const _process = metadata.process 102 | t.ok(_process.pid > 0, `pid should be > 0, was ${_process.pid}`) 103 | if (semver.gte(process.version, '8.10.0')) { 104 | t.ok(_process.ppid > 0, `ppid should be > 0, was ${_process.ppid}`) 105 | } else { 106 | t.equal(_process.ppid, undefined) 107 | } 108 | 109 | if (_process.title.length === 1) { 110 | // because of truncation test 111 | t.equal(_process.title, process.title[0]) 112 | } else { 113 | const regex = /(node|cmd.exe)/ 114 | t.ok(regex.test(_process.title), `process.title should match ${regex} (was: ${_process.title})`) 115 | } 116 | 117 | t.ok(Array.isArray(_process.argv), 'process.title should be an array') 118 | t.ok(_process.argv.length >= 2, 'process.title should contain at least two elements') 119 | var regex = /node(\.exe)?$/i 120 | t.ok(regex.test(_process.argv[0]), `process.argv[0] should match ${regex} (was: ${_process.argv[0]})`) 121 | regex = /(test.*\.js|tape)$/ 122 | t.ok(regex.test(_process.argv[1]), `process.argv[1] should match ${regex} (was: ${_process.argv[1]})"`) 123 | const system = metadata.system 124 | if ('detected_hostname' in system) { 125 | t.ok(typeof system.detected_hostname, 'string') 126 | t.ok(system.detected_hostname.length > 0) 127 | } else { 128 | t.ok(typeof system.hostname, 'string') 129 | t.ok(system.hostname.length > 0) 130 | } 131 | t.ok(typeof system.architecture, 'string') 132 | t.ok(system.architecture.length > 0) 133 | t.ok(typeof system.platform, 'string') 134 | t.ok(system.platform.length > 0) 135 | } 136 | assertMetadata.asserts = 24 137 | 138 | function assertEvent (expect) { 139 | return function (t, obj) { 140 | const key = Object.keys(expect)[0] 141 | const val = expect[key] 142 | switch (key) { 143 | case 'transaction': 144 | if (!('name' in val)) val.name = 'undefined' 145 | if (!('type' in val)) val.type = 'undefined' 146 | if (!('result' in val)) val.result = 'undefined' 147 | break 148 | case 'span': 149 | if (!('name' in val)) val.name = 'undefined' 150 | if (!('type' in val)) val.type = 'undefined' 151 | break 152 | case 'error': 153 | break 154 | case 'metricset': 155 | break 156 | default: 157 | t.fail('unexpected event type: ' + key) 158 | } 159 | t.deepEqual(obj, expect) 160 | } 161 | } 162 | assertEvent.asserts = 1 163 | 164 | function validOpts (opts) { 165 | return Object.assign({ 166 | agentName: 'my-agent-name', 167 | agentVersion: 'my-agent-version', 168 | serviceName: 'my-service-name', 169 | userAgent: 'my-user-agent' 170 | }, opts) 171 | } 172 | 173 | // tlsCert and tlsKey were generated via the same method as Go's builtin 174 | // test certificate/key pair, using 175 | // https://github.com/golang/go/blob/master/src/crypto/tls/generate_cert.go: 176 | // 177 | // go run generate_cert.go --rsa-bits 1024 --host 127.0.0.1,::1,localhost \ 178 | // --ca --start-date "Jan 1 00:00:00 1970" \ 179 | // --duration=1000000h 180 | // 181 | // The certificate is valid for 127.0.0.1, ::1, and localhost; and expires in the year 2084. 182 | 183 | const tlsCert = `-----BEGIN CERTIFICATE----- 184 | MIICETCCAXqgAwIBAgIQQalo5z3llnTiwERMPZQxujANBgkqhkiG9w0BAQsFADAS 185 | MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw 186 | MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB 187 | iQKBgQDrW9Z8jSgTMeN9Dt36HBj/kbU/aeFp10GshKm8IKWBpyyWKrTSjiYJIpTK 188 | l/6sdC77UCDokYAk66T+IXIvvRvqOtD1HUt+KLlqZ7acunTp1Qq4PnASHBm9fdKs 189 | F1c8gWlEXOMzCsC5BmokcijW7z8JTKszAVi2vpq5MHbtYxZXKQIDAQABo2YwZDAO 190 | BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw 191 | AwEB/zAsBgNVHREEJTAjgglsb2NhbGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAA 192 | AAEwDQYJKoZIhvcNAQELBQADgYEA4yzI/6gjkACdvrnlFm/MJlDQztPYYEAtQ6Sp 193 | 0q0PMQcynLfhH94KMjxJb31HNPJYXr7UrE6gwL2sUnfioXUTQTk35okpphR8MGu2 194 | hZ704px4wdeK/9B5Vh96oMZLYhm9SXizRVAZz7bPFYNMrhyk9lrWZXOaX526w4wI 195 | Y5LTiUQ= 196 | -----END CERTIFICATE-----` 197 | 198 | const tlsKey = `-----BEGIN PRIVATE KEY----- 199 | MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAOtb1nyNKBMx430O 200 | 3focGP+RtT9p4WnXQayEqbwgpYGnLJYqtNKOJgkilMqX/qx0LvtQIOiRgCTrpP4h 201 | ci+9G+o60PUdS34ouWpntpy6dOnVCrg+cBIcGb190qwXVzyBaURc4zMKwLkGaiRy 202 | KNbvPwlMqzMBWLa+mrkwdu1jFlcpAgMBAAECgYEAtZc9LQooIm86izHeWOw26XD9 203 | u/iwf94igL42y70QlbFreE1pCI++jwvMa2fMijh2S1bunSIuEc5yldUuaeDp2FtJ 204 | k7U9orbJspnWy6ixk1KgpjffdHP73r4S3a5G81G8sq9Uvwl0vxF90eTvg9C7kUfk 205 | J1YMy4zcpLtwkCHEkNUCQQDx79t6Dqswi8vDoS0+MCIJNCO4J49ZchL8aXE8n9GT 206 | mF+eOsKy6e5qYH0oYPpeXchwf1tWhX1gBCb3fXrtOoPTAkEA+QoX9S1XofY8YS1M 207 | iNVVSkLjpKgVoTQVe4j+vj16NHouVQ+oOvEUca2LTrHRx+utdar1NSexl51NO0Lj 208 | 3sqnkwJAPNWCC3Pqyb8tEljRxoRV2piYrrKL0gLkEUH2LjdFfGZhDKlb0Z8OywLO 209 | Fbwk2FuejeMINX5FY0JIBg0wPrxq7wJAMoot2n/dLO0/y6jZw1sn9+4jLKM/4Hsl 210 | cPCYYhsv1b6F8JVA2tVaBMfnYY0MubnGdf6/zI3FqLMvnTsx62DNKQJBAMYUaw/D 211 | plXTexeEU/c0BRxQdOkGmDqOQtnuRQUCQq6gu+occTeilgFoKHWT3QcZHIpHxawJ 212 | N2K67EWPRgr3suE= 213 | -----END PRIVATE KEY-----` 214 | -------------------------------------------------------------------------------- /test/apm-server-version.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | // Test that fetching the APM Server version works as expected. 10 | // 11 | // Notes: 12 | // - Testing that the APM Server version fetch request does not hold the 13 | // process open is tested in "side-effects.test.js". 14 | 15 | const test = require('tape') 16 | const { APMServer, validOpts } = require('./lib/utils') 17 | const Client = require('../') 18 | 19 | test('no APM server version fetch if apmServerVersion is given', function (t) { 20 | t.plan(1) 21 | const server = APMServer(function (req, res) { 22 | t.fail(`there should not be an APM server request: ${req.method} ${req.url}`) 23 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 24 | setTimeout(() => { 25 | t.pass('made it to timeout with no APM server request') 26 | server.close() 27 | client.destroy() 28 | t.end() 29 | }, 100) 30 | }) 31 | }) 32 | 33 | test('APM server version fetch works for "6.6.0"', function (t) { 34 | const server = APMServer(function (req, res) { 35 | t.equal(req.method, 'GET') 36 | t.equal(req.url, '/', 'got APM Server information API request') 37 | 38 | res.writeHead(200) 39 | const verInfo = { 40 | build_date: '2021-09-16T02:05:39Z', 41 | build_sha: 'a183f675ecd03fca4a897cbe85fda3511bc3ca43', 42 | version: '6.6.0' 43 | } 44 | // Pre-7.0.0 versions of APM Server responded with this body: 45 | res.end(JSON.stringify({ ok: verInfo })) 46 | }).client({}, function (client) { 47 | t.strictEqual(client._apmServerVersion, undefined, 48 | 'client._apmServerVersion is undefined immediately after client creation') 49 | t.equal(client.supportsKeepingUnsampledTransaction(), true, 50 | 'client.supportsKeepingUnsampledTransaction() defaults to true before fetch') 51 | t.equal(client.supportsActivationMethodField(), true, 52 | 'client.supportsActivationMethodField() defaults to true before fetch') 53 | 54 | // Currently there isn't a mechanism to wait for the fetch request, so for 55 | // now just wait a bit. 56 | setTimeout(() => { 57 | t.ok(client._apmServerVersion, 'client._apmServerVersion is set') 58 | t.equal(client._apmServerVersion.toString(), '6.6.0') 59 | t.equal(client.supportsKeepingUnsampledTransaction(), true, 60 | 'client.supportsKeepingUnsampledTransaction() is true after fetch') 61 | t.equal(client.supportsActivationMethodField(), false, 62 | 'client.supportsActivationMethodField() is false after fetch') 63 | 64 | server.close() 65 | client.destroy() 66 | t.end() 67 | }, 200) 68 | }) 69 | }) 70 | 71 | test('APM server version fetch works for "7.16.0"', function (t) { 72 | const server = APMServer(function (req, res) { 73 | t.equal(req.method, 'GET') 74 | t.equal(req.url, '/', 'got APM Server information API request') 75 | 76 | res.writeHead(200) 77 | const verInfo = { 78 | build_date: '2021-09-16T02:05:39Z', 79 | build_sha: 'a183f675ecd03fca4a897cbe85fda3511bc3ca43', 80 | version: '7.16.0' 81 | } 82 | res.end(JSON.stringify(verInfo, null, 2)) 83 | }).client({}, function (client) { 84 | t.strictEqual(client._apmServerVersion, undefined, 85 | 'client._apmServerVersion is undefined immediately after client creation') 86 | t.equal(client.supportsKeepingUnsampledTransaction(), true, 87 | 'client.supportsKeepingUnsampledTransaction() defaults to true before fetch') 88 | 89 | // Currently there isn't a mechanism to wait for the fetch request, so for 90 | // now just wait a bit. 91 | setTimeout(() => { 92 | t.ok(client._apmServerVersion, 'client._apmServerVersion is set') 93 | t.equal(client._apmServerVersion.toString(), '7.16.0') 94 | t.equal(client.supportsKeepingUnsampledTransaction(), true, 95 | 'client.supportsKeepingUnsampledTransaction() is true after fetch') 96 | 97 | server.close() 98 | client.destroy() 99 | t.end() 100 | }, 200) 101 | }) 102 | }) 103 | 104 | test('APM server version fetch works for "8.0.0"', function (t) { 105 | const server = APMServer(function (req, res) { 106 | t.equal(req.method, 'GET') 107 | t.equal(req.url, '/', 'got APM Server information API request') 108 | 109 | res.writeHead(200) 110 | const verInfo = { 111 | build_date: '2021-09-16T02:05:39Z', 112 | build_sha: 'a183f675ecd03fca4a897cbe85fda3511bc3ca43', 113 | version: '8.0.0' 114 | } 115 | res.end(JSON.stringify(verInfo, null, 2)) 116 | }).client({}, function (client) { 117 | t.strictEqual(client._apmServerVersion, undefined, 118 | 'client._apmServerVersion is undefined immediately after client creation') 119 | t.equal(client.supportsKeepingUnsampledTransaction(), true, 120 | 'client.supportsKeepingUnsampledTransaction() defaults to true before fetch') 121 | 122 | // Currently there isn't a mechanism to wait for the fetch request, so for 123 | // now just wait a bit. 124 | setTimeout(() => { 125 | t.ok(client._apmServerVersion, 'client._apmServerVersion is set') 126 | t.equal(client._apmServerVersion.toString(), '8.0.0') 127 | t.equal(client.supportsKeepingUnsampledTransaction(), false, 128 | 'client.supportsKeepingUnsampledTransaction() is false after fetch') 129 | 130 | server.close() 131 | client.destroy() 132 | t.end() 133 | }, 200) 134 | }) 135 | }) 136 | 137 | /** 138 | * APM server 8.7.0 included a bug where APM agents sending `activation_method` 139 | * was harmful. This test ensures we don't send that field to v8.7.0. 140 | * 141 | * See https://github.com/elastic/apm/pull/780 142 | */ 143 | test('APM server version fetch works for "8.7.0"', function (t) { 144 | const server = APMServer(function (req, res) { 145 | res.writeHead(200) 146 | const verInfo = { 147 | build_date: '2023-03-30T22:17:50Z', 148 | build_sha: 'a183f675ecd03fca4a897cbe85fda3511bc3ca43', 149 | version: '8.7.0' 150 | } 151 | res.end(JSON.stringify(verInfo, null, 2)) 152 | }).client({ 153 | agentActivationMethod: 'env-attach' 154 | }, function (client) { 155 | t.strictEqual(client._apmServerVersion, undefined, 156 | 'client._apmServerVersion is undefined immediately after client creation') 157 | t.equal(client._conf.agentActivationMethod, 'env-attach', '_conf.agentActivationMethod') 158 | t.equal(client.supportsActivationMethodField(), true, 159 | 'client.supportsActivationMethodField() defaults to true before fetch') 160 | t.ok('activation_method' in JSON.parse(client._encodedMetadata).metadata.service.agent, 161 | 'metadata includes "activation_method" before fetch') 162 | 163 | // Currently there isn't a mechanism to wait for the fetch request, so for 164 | // now just wait a bit. 165 | setTimeout(() => { 166 | t.ok(client._apmServerVersion, 'client._apmServerVersion is set') 167 | t.equal(client._apmServerVersion.toString(), '8.7.0') 168 | t.equal(client.supportsActivationMethodField(), false, 169 | 'client.supportsActivationMethodField() is false after fetch') 170 | t.equal(JSON.parse(client._encodedMetadata).metadata.service.agent.activation_method, undefined, 171 | 'metadata does not include "activation_method" after fetch') 172 | 173 | server.close() 174 | client.destroy() 175 | t.end() 176 | }, 200) 177 | }) 178 | }) 179 | 180 | /** 181 | * Starting with APM server 8.7.1, `activation_method` should be sent. 182 | * See https://github.com/elastic/apm/pull/780 183 | */ 184 | test('APM server version fetch works for "8.7.1"', function (t) { 185 | const server = APMServer(function (req, res) { 186 | res.writeHead(200) 187 | const verInfo = { 188 | build_date: '2023-03-30T22:17:50Z', 189 | build_sha: 'a183f675ecd03fca4a897cbe85fda3511bc3ca43', 190 | version: '8.7.1' 191 | } 192 | res.end(JSON.stringify(verInfo, null, 2)) 193 | }).client({ 194 | agentActivationMethod: 'env-attach' 195 | }, function (client) { 196 | t.strictEqual(client._apmServerVersion, undefined, 197 | 'client._apmServerVersion is undefined immediately after client creation') 198 | t.equal(client._conf.agentActivationMethod, 'env-attach', '_conf.agentActivationMethod') 199 | t.equal(client.supportsActivationMethodField(), true, 200 | 'client.supportsActivationMethodField() defaults to true before fetch') 201 | t.ok('activation_method' in JSON.parse(client._encodedMetadata).metadata.service.agent, 202 | 'metadata includes "activation_method" before fetch') 203 | 204 | // Currently there isn't a mechanism to wait for the fetch request, so for 205 | // now just wait a bit. 206 | setTimeout(() => { 207 | t.ok(client._apmServerVersion, 'client._apmServerVersion is set') 208 | t.equal(client._apmServerVersion.toString(), '8.7.1') 209 | t.equal(client.supportsActivationMethodField(), true, 210 | 'client.supportsActivationMethodField() is true after fetch') 211 | t.equal(JSON.parse(client._encodedMetadata).metadata.service.agent.activation_method, 'env-attach', 212 | 'metadata includes "activation_method" after fetch') 213 | 214 | server.close() 215 | client.destroy() 216 | t.end() 217 | }, 200) 218 | }) 219 | }) 220 | 221 | test('APM server version is null on fetch error', function (t) { 222 | const HOPEFULLY_UNUSED_PORT_HACK = 62345 223 | const client = new Client(validOpts({ 224 | serverUrl: 'http://localhost:' + HOPEFULLY_UNUSED_PORT_HACK 225 | })) 226 | client.on('request-error', err => { 227 | t.ok(err, 'got a "request-error" event') 228 | t.ok(/error fetching APM Server version/.test(err.message), 229 | 'error message is about APM Server version fetching') 230 | t.strictEqual(client._apmServerVersion, null, 'client._apmServerVersion') 231 | t.equal(client.supportsKeepingUnsampledTransaction(), true, 232 | 'client.supportsKeepingUnsampledTransaction() defaults to true after failed fetch') 233 | 234 | client.destroy() 235 | t.end() 236 | }) 237 | }) 238 | -------------------------------------------------------------------------------- /test/k8s.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const test = require('tape') 10 | const { APMServer, processIntakeReq } = require('./lib/utils') 11 | const getContainerInfo = require('../lib/container-info') 12 | 13 | test('no environment variables', function (t) { 14 | t.plan(1) 15 | 16 | const server = APMServer(function (req, res) { 17 | req = processIntakeReq(req) 18 | req.once('data', function (obj) { 19 | t.equal(obj.metadata.kubernetes, undefined) 20 | }) 21 | req.on('end', function () { 22 | res.end() 23 | server.close() 24 | t.end() 25 | }) 26 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 27 | client.sendError({}) 28 | client.flush(() => { client.destroy() }) 29 | }) 30 | }) 31 | 32 | test('kubernetesNodeName only', function (t) { 33 | t.plan(1) 34 | 35 | const server = APMServer(function (req, res) { 36 | req = processIntakeReq(req) 37 | req.once('data', function (obj) { 38 | t.deepEqual(obj.metadata.system.kubernetes, { node: { name: 'foo' } }) 39 | }) 40 | req.on('end', function () { 41 | res.end() 42 | server.close() 43 | t.end() 44 | }) 45 | }).client({ kubernetesNodeName: 'foo', apmServerVersion: '8.0.0' }, function (client) { 46 | client.sendError({}) 47 | client.flush(() => { client.destroy() }) 48 | }) 49 | }) 50 | 51 | test('kubernetesNamespace only', function (t) { 52 | t.plan(1) 53 | 54 | const server = APMServer(function (req, res) { 55 | req = processIntakeReq(req) 56 | req.once('data', function (obj) { 57 | t.deepEqual(obj.metadata.system.kubernetes, { namespace: 'foo' }) 58 | }) 59 | req.on('end', function () { 60 | res.end() 61 | server.close() 62 | t.end() 63 | }) 64 | }).client({ kubernetesNamespace: 'foo', apmServerVersion: '8.0.0' }, function (client) { 65 | client.sendError({}) 66 | client.flush(() => { client.destroy() }) 67 | }) 68 | }) 69 | 70 | test('kubernetesPodName only', function (t) { 71 | t.plan(1) 72 | 73 | const server = APMServer(function (req, res) { 74 | req = processIntakeReq(req) 75 | req.once('data', function (obj) { 76 | t.deepEqual(obj.metadata.system.kubernetes, { pod: { name: 'foo' } }) 77 | }) 78 | req.on('end', function () { 79 | res.end() 80 | server.close() 81 | t.end() 82 | }) 83 | }).client({ kubernetesPodName: 'foo', apmServerVersion: '8.0.0' }, function (client) { 84 | client.sendError({}) 85 | client.flush(() => { client.destroy() }) 86 | }) 87 | }) 88 | 89 | test('kubernetesPodUID only', function (t) { 90 | t.plan(1) 91 | 92 | const server = APMServer(function (req, res) { 93 | req = processIntakeReq(req) 94 | req.once('data', function (obj) { 95 | t.deepEqual(obj.metadata.system.kubernetes, { pod: { uid: 'foo' } }) 96 | }) 97 | req.on('end', function () { 98 | res.end() 99 | server.close() 100 | t.end() 101 | }) 102 | }).client({ kubernetesPodUID: 'foo', apmServerVersion: '8.0.0' }, function (client) { 103 | client.sendError({}) 104 | client.flush(() => { client.destroy() }) 105 | }) 106 | }) 107 | 108 | test('all', function (t) { 109 | t.plan(1) 110 | 111 | const server = APMServer(function (req, res) { 112 | req = processIntakeReq(req) 113 | req.once('data', function (obj) { 114 | t.deepEqual(obj.metadata.system.kubernetes, { 115 | namespace: 'bar', 116 | node: { name: 'foo' }, 117 | pod: { name: 'baz', uid: 'qux' } 118 | }) 119 | }) 120 | req.on('end', function () { 121 | res.end() 122 | server.close() 123 | t.end() 124 | }) 125 | }).client({ kubernetesNodeName: 'foo', kubernetesNamespace: 'bar', kubernetesPodName: 'baz', kubernetesPodUID: 'qux', apmServerVersion: '8.0.0' }, function (client) { 126 | client.sendError({}) 127 | client.flush(() => { client.destroy() }) 128 | }) 129 | }) 130 | 131 | test('all except kubernetesNodeName', function (t) { 132 | t.plan(1) 133 | 134 | const server = APMServer(function (req, res) { 135 | req = processIntakeReq(req) 136 | req.once('data', function (obj) { 137 | t.deepEqual(obj.metadata.system.kubernetes, { 138 | namespace: 'bar', 139 | pod: { name: 'baz', uid: 'qux' } 140 | }) 141 | }) 142 | req.on('end', function () { 143 | res.end() 144 | server.close() 145 | t.end() 146 | }) 147 | }).client({ kubernetesNamespace: 'bar', kubernetesPodName: 'baz', kubernetesPodUID: 'qux', apmServerVersion: '8.0.0' }, function (client) { 148 | client.sendError({}) 149 | client.flush(() => { client.destroy() }) 150 | }) 151 | }) 152 | 153 | test('all except kubernetesNamespace', function (t) { 154 | t.plan(1) 155 | 156 | const server = APMServer(function (req, res) { 157 | req = processIntakeReq(req) 158 | req.once('data', function (obj) { 159 | t.deepEqual(obj.metadata.system.kubernetes, { 160 | node: { name: 'foo' }, 161 | pod: { name: 'baz', uid: 'qux' } 162 | }) 163 | }) 164 | req.on('end', function () { 165 | res.end() 166 | server.close() 167 | t.end() 168 | }) 169 | }).client({ kubernetesNodeName: 'foo', kubernetesPodName: 'baz', kubernetesPodUID: 'qux', apmServerVersion: '8.0.0' }, function (client) { 170 | client.sendError({}) 171 | client.flush(() => { client.destroy() }) 172 | }) 173 | }) 174 | 175 | test('all except kubernetesPodName', function (t) { 176 | t.plan(1) 177 | 178 | const server = APMServer(function (req, res) { 179 | req = processIntakeReq(req) 180 | req.once('data', function (obj) { 181 | t.deepEqual(obj.metadata.system.kubernetes, { 182 | namespace: 'bar', 183 | node: { name: 'foo' }, 184 | pod: { uid: 'qux' } 185 | }) 186 | }) 187 | req.on('end', function () { 188 | res.end() 189 | server.close() 190 | t.end() 191 | }) 192 | }).client({ kubernetesNodeName: 'foo', kubernetesNamespace: 'bar', kubernetesPodUID: 'qux', apmServerVersion: '8.0.0' }, function (client) { 193 | client.sendError({}) 194 | client.flush(() => { client.destroy() }) 195 | }) 196 | }) 197 | 198 | test('all except kubernetesPodUID', function (t) { 199 | t.plan(1) 200 | 201 | const server = APMServer(function (req, res) { 202 | req = processIntakeReq(req) 203 | req.once('data', function (obj) { 204 | t.deepEqual(obj.metadata.system.kubernetes, { 205 | namespace: 'bar', 206 | node: { name: 'foo' }, 207 | pod: { name: 'baz' } 208 | }) 209 | }) 210 | req.on('end', function () { 211 | res.end() 212 | server.close() 213 | t.end() 214 | }) 215 | }).client({ kubernetesNodeName: 'foo', kubernetesNamespace: 'bar', kubernetesPodName: 'baz', apmServerVersion: '8.0.0' }, function (client) { 216 | client.sendError({}) 217 | client.flush(() => { client.destroy() }) 218 | }) 219 | }) 220 | 221 | test('Tests for ../lib/container-info', function (t) { 222 | const fixtures = [ 223 | { 224 | source: '12:freezer:/kubepods.slice/kubepods-pod22949dce_fd8b_11ea_8ede_98f2b32c645c.slice/docker-b15a5bdedd2e7645c3be271364324321b908314e4c77857bbfd32a041148c07f.scope', 225 | expectedPodId: '22949dce-fd8b-11ea-8ede-98f2b32c645c' 226 | }, 227 | { 228 | source: '11:devices:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 229 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 230 | }, 231 | { 232 | source: '10:perf_event:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 233 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 234 | }, 235 | { 236 | source: '9:memory:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 237 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 238 | }, 239 | { 240 | source: '8:freezer:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 241 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 242 | }, 243 | { 244 | source: '7:hugetlb:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 245 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 246 | }, 247 | { 248 | source: '6:cpuset:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 249 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 250 | }, 251 | { 252 | source: '5:blkio:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 253 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 254 | }, 255 | { 256 | source: '4:cpu,cpuacct:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 257 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 258 | }, 259 | { 260 | source: '3:net_cls,net_prio:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 261 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 262 | }, 263 | { 264 | source: '2:pids:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 265 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 266 | }, 267 | { 268 | source: '1:name=systemd:/kubepods/besteffort/pod74c13223-5a00-11e9-b385-42010a80018d/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 269 | expectedPodId: '74c13223-5a00-11e9-b385-42010a80018d' 270 | } 271 | ] 272 | for (const [, fixture] of fixtures.entries()) { 273 | const info = getContainerInfo.parse(fixture.source) 274 | t.equals(info.podId, fixture.expectedPodId, 'expected pod ID returned') 275 | } 276 | 277 | t.end() 278 | }) 279 | -------------------------------------------------------------------------------- /lib/truncate.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | var breadthFilter = require('breadth-filter') 10 | 11 | exports.metadata = truncMetadata 12 | exports.transaction = truncTransaction 13 | exports.span = truncSpan 14 | exports.error = truncError 15 | exports.metricset = truncMetricSet 16 | 17 | // Truncate the string `s` to a `max` maximum number of JavaScript characters. 18 | // 19 | // Note that JavaScript uses UCS-2 internally, so characters outside of the 20 | // BMP are represented as surrogate pairs. These count as *two* characters. 21 | // The result is that a string with surrogate pairs will appear to be truncated 22 | // shorter than expected: 23 | // unitrunc('aaaa', 4) // => 'aaaa' 24 | // unitrunc('😂😂😂😂', 4) // => '😂😂' 25 | // 26 | // This will avoid truncating in the middle of a surrogate pair by truncating 27 | // one character earlier. For example: 28 | // unitrunc('foo😂bar', 4) // => 'foo' 29 | function unitrunc (s, max) { 30 | if (s.length > max) { 31 | if (max <= 0) { 32 | return '' 33 | } 34 | // If the last character is a "high" surrogate (D800–DBFF) per 35 | // https://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates 36 | // then we would truncate in the middle of a surrogate pair. Move back one 37 | // char to have a clean(er) truncation. 38 | const endChar = s.charCodeAt(max - 1) 39 | if (endChar >= 0xd800 && endChar <= 0xdbff) { 40 | return s.slice(0, max - 1) 41 | } else { 42 | return s.slice(0, max) 43 | } 44 | } 45 | return s 46 | } 47 | 48 | function truncMetadata (metadata, opts) { 49 | return breadthFilter(metadata, { 50 | onArray, 51 | onObject, 52 | onValue (value, key, path) { 53 | if (typeof value !== 'string') { 54 | return value 55 | } 56 | 57 | let max = opts.truncateStringsAt 58 | switch (path[0]) { 59 | case 'service': 60 | switch (path[1]) { 61 | case 'name': 62 | case 'version': 63 | case 'environment': 64 | max = opts.truncateKeywordsAt 65 | break 66 | 67 | case 'agent': 68 | case 'framework': 69 | case 'language': 70 | case 'runtime': 71 | switch (path[2]) { 72 | case 'name': 73 | case 'version': 74 | max = opts.truncateKeywordsAt 75 | break 76 | } 77 | break 78 | } 79 | break 80 | 81 | case 'process': 82 | if (path[1] === 'title') { 83 | max = opts.truncateKeywordsAt 84 | } 85 | break 86 | 87 | case 'system': 88 | switch (path[1]) { 89 | case 'architecture': 90 | case 'hostname': 91 | case 'platform': 92 | max = opts.truncateKeywordsAt 93 | break 94 | } 95 | break 96 | case 'cloud': 97 | switch (path[1]) { 98 | case 'availability_zone': 99 | case 'provider': 100 | case 'region': 101 | max = opts.truncateKeywordsAt 102 | break 103 | case 'account': 104 | switch (path[2]) { 105 | case 'id': 106 | case 'name': 107 | max = opts.truncateKeywordsAt 108 | break 109 | } 110 | break 111 | case 'instance': 112 | switch (path[2]) { 113 | case 'id': 114 | case 'name': 115 | max = opts.truncateKeywordsAt 116 | break 117 | } 118 | break 119 | case 'machine': 120 | switch (path[2]) { 121 | case 'type': 122 | max = opts.truncateKeywordsAt 123 | break 124 | } 125 | break 126 | case 'project': 127 | switch (path[2]) { 128 | case 'id': 129 | case 'name': 130 | max = opts.truncateKeywordsAt 131 | break 132 | } 133 | } 134 | break 135 | } 136 | 137 | return unitrunc(value, max) 138 | } 139 | }) 140 | } 141 | 142 | function truncTransaction (trans, opts) { 143 | const result = breadthFilter(trans, { 144 | onArray, 145 | onObject: onObjectWithHeaders, 146 | onValue (value, key, path) { 147 | if (typeof value !== 'string') { 148 | if (isHeader(path)) return String(value) 149 | 150 | return value 151 | } 152 | 153 | let max = opts.truncateStringsAt 154 | switch (path[0]) { 155 | case 'name': 156 | case 'type': 157 | case 'result': 158 | case 'id': 159 | case 'trace_id': 160 | case 'parent_id': 161 | max = opts.truncateKeywordsAt 162 | break 163 | 164 | case 'context': 165 | max = contextLength(path, opts) 166 | break 167 | } 168 | 169 | return unitrunc(value, max) 170 | } 171 | }) 172 | 173 | return Object.assign({ 174 | name: 'undefined', 175 | type: 'undefined', 176 | result: 'undefined' 177 | }, result) 178 | } 179 | 180 | function truncSpan (span, opts) { 181 | let result = breadthFilter(span, { 182 | onArray, 183 | onObject, 184 | onValue (value, key, path) { 185 | if (typeof value !== 'string') { 186 | return value 187 | } 188 | 189 | let max = opts.truncateStringsAt 190 | switch (path[0]) { 191 | case 'name': 192 | case 'type': 193 | case 'id': 194 | case 'trace_id': 195 | case 'parent_id': 196 | case 'transaction_id': 197 | case 'subtype': 198 | case 'action': 199 | max = opts.truncateKeywordsAt 200 | break 201 | 202 | case 'context': 203 | max = contextLength(path, opts) 204 | break 205 | } 206 | 207 | return unitrunc(value, max) 208 | } 209 | }) 210 | 211 | result = truncateCustomKeys( 212 | result, 213 | opts.truncateCustomKeysAt, 214 | [ 215 | 'name', 216 | 'type', 217 | 'id', 218 | 'trace_id', 219 | 'parent_id', 220 | 'transaction_id', 221 | 'subtype', 222 | 'action', 223 | 'context' 224 | ] 225 | ) 226 | 227 | return Object.assign({ 228 | name: 'undefined', 229 | type: 'undefined' 230 | }, result) 231 | } 232 | 233 | function truncError (error, opts) { 234 | return breadthFilter(error, { 235 | onArray, 236 | onObject: onObjectWithHeaders, 237 | onValue (value, key, path) { 238 | if (typeof value !== 'string') { 239 | if (isHeader(path)) return String(value) 240 | 241 | return value 242 | } 243 | 244 | let max = opts.truncateStringsAt 245 | switch (path[0]) { 246 | case 'id': 247 | case 'trace_id': 248 | case 'parent_id': 249 | case 'transaction_id': 250 | max = opts.truncateKeywordsAt 251 | break 252 | 253 | case 'context': 254 | max = contextLength(path, opts) 255 | break 256 | 257 | case 'log': 258 | switch (path[1]) { 259 | case 'level': 260 | case 'logger_name': 261 | case 'param_message': 262 | max = opts.truncateKeywordsAt 263 | break 264 | 265 | case 'message': 266 | if (opts.truncateErrorMessagesAt === undefined) { 267 | max = opts.truncateLongFieldsAt 268 | } else if (opts.truncateErrorMessagesAt < 0) { 269 | return value // skip truncation 270 | } else { 271 | max = opts.truncateErrorMessagesAt 272 | } 273 | break 274 | } 275 | break 276 | 277 | case 'exception': 278 | switch (path[1]) { 279 | case 'type': 280 | case 'code': 281 | case 'module': 282 | max = opts.truncateKeywordsAt 283 | break 284 | case 'message': 285 | if (opts.truncateErrorMessagesAt === undefined) { 286 | max = opts.truncateLongFieldsAt 287 | } else if (opts.truncateErrorMessagesAt < 0) { 288 | return value // skip truncation 289 | } else { 290 | max = opts.truncateErrorMessagesAt 291 | } 292 | break 293 | } 294 | break 295 | } 296 | 297 | return unitrunc(value, max) 298 | } 299 | }) 300 | } 301 | 302 | function truncMetricSet (metricset, opts) { 303 | return breadthFilter(metricset, { 304 | onArray, 305 | onObject, 306 | onValue (value, key, path) { 307 | if (typeof value !== 'string') { 308 | return value 309 | } 310 | 311 | const max = path[0] === 'tags' 312 | ? opts.truncateKeywordsAt 313 | : opts.truncateStringsAt 314 | 315 | return unitrunc(value, max) 316 | } 317 | }) 318 | } 319 | 320 | function contextLength (path, opts) { 321 | switch (path[1]) { 322 | case 'db': 323 | if (path[2] === 'statement') { 324 | return opts.truncateLongFieldsAt 325 | } 326 | break 327 | 328 | case 'message': 329 | if (path[2] === 'body') { 330 | return opts.truncateLongFieldsAt 331 | } 332 | break 333 | 334 | case 'request': 335 | switch (path[2]) { 336 | case 'method': 337 | case 'http_version': 338 | return opts.truncateKeywordsAt 339 | 340 | case 'body': 341 | return opts.truncateLongFieldsAt 342 | 343 | case 'url': 344 | switch (path[3]) { 345 | case 'protocol': 346 | case 'hostname': 347 | case 'port': 348 | case 'pathname': 349 | case 'search': 350 | case 'hash': 351 | case 'raw': 352 | case 'full': 353 | return opts.truncateKeywordsAt 354 | } 355 | break 356 | } 357 | break 358 | 359 | case 'user': 360 | switch (path[2]) { 361 | case 'id': 362 | case 'email': 363 | case 'username': 364 | return opts.truncateKeywordsAt 365 | } 366 | break 367 | 368 | case 'tags': 369 | return opts.truncateKeywordsAt 370 | 371 | case 'destination': 372 | switch (path[2]) { 373 | case 'address': 374 | return opts.truncateKeywordsAt 375 | 376 | case 'service': 377 | switch (path[3]) { 378 | case 'name': 379 | case 'resource': 380 | case 'type': 381 | return opts.truncateKeywordsAt 382 | } 383 | break 384 | } 385 | break 386 | } 387 | 388 | return opts.truncateStringsAt 389 | } 390 | 391 | function isHeader (path) { 392 | return path[0] === 'context' && (path[1] === 'request' || path[1] === 'response') && path[2] === 'headers' && path[3] 393 | } 394 | 395 | function onObjectWithHeaders (value, key, path, isNew) { 396 | if (isHeader(path)) return String(value) 397 | return onObject(value, key, path, isNew) 398 | } 399 | 400 | function onObject (value, key, path, isNew) { 401 | return isNew ? {} : '[Circular]' 402 | } 403 | 404 | function onArray (value, key, path, isNew) { 405 | return isNew ? [] : '[Circular]' 406 | } 407 | 408 | function truncateCustomKeys (value, max, keywords) { 409 | if (typeof value !== 'object' || value === null) { 410 | return value 411 | } 412 | const result = value 413 | const keys = Object.keys(result) 414 | const truncatedKeys = keys.map(k => { 415 | if (keywords.includes(k)) { 416 | return k 417 | } 418 | return unitrunc(k, max) 419 | }) 420 | 421 | for (const [index, k] of keys.entries()) { 422 | const value = result[k] 423 | delete result[k] 424 | const newKey = truncatedKeys[index] 425 | result[newKey] = truncateCustomKeys(value, max, keywords) 426 | } 427 | return result 428 | } 429 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # elastic-apm-http-client changelog 2 | 3 | ## v12.0.0 4 | 5 | - **Breaking change.** The `hostname` configuration option has been renamed to 6 | `configuredHostname`. As well, the hostname detection has changed to prefer 7 | using a FQDN, if available. See [the spec](https://github.com/elastic/apm/blob/main/specs/agents/metadata.md#hostname). 8 | (https://github.com/elastic/apm-agent-nodejs/issues/3310) 9 | 10 | - The APM client will send `metadata.system.detected_hostname` and 11 | `metadata.system.configured_hostname` as appropriate for APM server versions 12 | >=7.4, rather than the now deprecated `metadata.system.hostname`. 13 | See [the spec](https://github.com/elastic/apm/blob/main/specs/agents/metadata.md#hostname). 14 | 15 | ## v11.4.0 16 | 17 | - Add support for pre-registering of partial transactions for AWS Lambda. 18 | This adds `client.lambdaShouldRegisterTransactions()` and 19 | `client.lambdaRegisterTransaction(transaction, awsRequestId)` so the 20 | APM agent can register a partial transaction with the Elastic Lambda 21 | extension before executing the user's handler. In some error cases 22 | (`uncaughtException`, `unhandledRejection`, Lambda timeout), the extension 23 | can report that transaction when the APM agent is unable. 24 | (https://github.com/elastic/apm-agent-nodejs/issues/3136) 25 | 26 | ## v11.3.1 27 | 28 | - Tweak logic to only exclude `metadata.service.agent.activation_method` when 29 | the APM server version is known to be 8.7.0 -- i.e. optimistically assume 30 | it is a version that is fine. The APM server 8.7.0 issue isn't so severe that 31 | we want a fast first serverless function invocation to not send the field. 32 | (https://github.com/elastic/apm/pull/783) 33 | 34 | ## v11.3.0 35 | 36 | - Ensure `metadata.service.agent.activation_method` is only sent for APM 37 | server version 8.7.1 or later. APM server 8.7.0 included a bug where 38 | receiving `activation_method` is harmful. 39 | (https://github.com/elastic/apm-agent-nodejs/issues/3230) 40 | 41 | This change adds the `client.supportsActivationMethodField()` method. 42 | 43 | ## v11.2.0 44 | 45 | - Support a new `agentActivationMethod` string config var that is added to 46 | `metadata.service.agent.activation_method`. Spec: 47 | https://github.com/elastic/apm/blob/main/specs/agents/metadata.md#activation-method 48 | 49 | ## v11.1.0 50 | 51 | - Add an `extraMetadata` config option, which is an object to merge into the 52 | built metadata object. This is an alternative to the existing 53 | `cloudMetadataFetcher` and `expectExtraMetadata` options which provide ways 54 | to asynchronously provide metadata. Only one (or zero) of these three options 55 | may be used. 56 | 57 | ## v11.0.4 58 | 59 | - Update the default `serverUrl` to "http://127.0.0.1:8200". We no longer use 60 | "localhost" to avoid ambiguity if localhost resolves to multiple addresses 61 | (e.g. IPv4 and IPv6). APM server only listens on IPv4 by default. 62 | (https://github.com/elastic/apm-agent-nodejs/pull/3049) 63 | 64 | ## v11.0.3 65 | 66 | - Prevent a possible tight loop in central config fetching. 67 | (https://github.com/elastic/apm-agent-nodejs/issues/3029) 68 | 69 | ## v11.0.2 70 | 71 | **Bad release. Upgrade to 11.0.3.** 72 | 73 | - Add guards to ensure that a crazy `Cache-Control: max-age=...` response 74 | header cannot accidentally result in inappropriate intervals for fetching 75 | central config. The re-fetch delay is clamped to `[5 seconds, 1 day]`. 76 | (https://github.com/elastic/apm-agent-nodejs/issues/2941) 77 | 78 | - Improve container-info gathering to support AWS ECS/Fargate environments. 79 | (https://github.com/elastic/apm-agent-nodejs/issues/2914) 80 | 81 | ## v11.0.1 82 | 83 | - Fix an issue when running in a Lambda function, where a missing or erroring 84 | APM Lambda extension could result in apmclient back-off such that (a) the 85 | end-of-lambda-invocation signaling (`?flushed=true`) would not happen and 86 | (b) premature "beforeExit" event could result in the Lambda Runtime 87 | responding `null` before the Lambda function could respond 88 | (https://github.com/elastic/apm-agent-nodejs/issues/1831). 89 | 90 | ## v11.0.0 91 | 92 | - Add support for coordinating data flushing in an AWS Lambda environment. The 93 | following two API additions are used to ensure that (a) the Elastic Lambda 94 | extension is signaled at invocation end [per spec](https://github.com/elastic/apm/blob/main/specs/agents/tracing-instrumentation-aws-lambda.md#data-flushing) 95 | and (b) a new intake request is not started when a Lambda function invocation 96 | is not active. 97 | 98 | - `Client#lambdaStart()` should be used to indicate when a Lambda function 99 | invocation begins. 100 | - `Client#flush([opts,] cb)` now supports an optional `opts.lambdaEnd` 101 | boolean. Set it to true to indicate this is a flush at the end of a Lambda 102 | function invocation. 103 | 104 | This is a **BREAKING CHANGE**, because current versions of elastic-apm-node 105 | depend on `^10.4.0`. If this were released as another 10.x, then usage of 106 | current elastic-apm-node with this version of the client would break 107 | behavior in a Lambda environment. 108 | 109 | - Add the `freeSocketTimeout` option, with a default of 4000 (ms), and switch 110 | from Node.js's core `http.Agent` to the [agentkeepalive package](https://github.com/node-modules/agentkeepalive) 111 | to fix ECONNRESET issues with HTTP Keep-Alive usage talking to APM Server 112 | (https://github.com/elastic/apm-agent-nodejs/issues/2594). 113 | 114 | ## v10.4.0 115 | 116 | - Add APM Server version checking to the client. On creation the client will 117 | call the [APM Server Information API](https://www.elastic.co/guide/en/apm/server/current/server-info.html) 118 | to get the server version and save that. 119 | 120 | The new `Client#supportsKeepingUnsampledTransaction()` boolean method returns 121 | `true` if APM Server is a version that requires unsampled transactions to 122 | be sent. This will be used by the APM Agent to [drop unsampled transactions 123 | for newer APM Servers](https://github.com/elastic/apm-agent-nodejs/issues/2455). 124 | 125 | There is a new `apmServerVersion: ` config option to tell the Client 126 | to skip fetching the APM Server version and use the given value. This config 127 | option is intended mainly for internal test suite usage. 128 | 129 | ## v10.3.0 130 | 131 | - Add the `expectExtraMetadata: true` configuration option and 132 | `Client#setExtraMetadata(metadata)` method to provide a mechanism for the 133 | Node.js APM Agent to pass in metadata asynchronously and be sure that the 134 | client will not begin an intake request until that metadata is provided. 135 | This is to support passing in [AWS Lambda metadata that cannot be gathered 136 | until the first Lambda function 137 | invocation](https://github.com/elastic/apm-agent-nodejs/issues/2404). 138 | (Note: The `expectExtraMetadata` option cannot be used in combination with 139 | `cloudMetadataFetcher`.) 140 | 141 | - Use `Z_BEST_SPEED` for gzip compression per 142 | https://github.com/elastic/apm/blob/main/specs/agents/transport.md#compression 143 | 144 | ## v10.2.0 145 | 146 | - The client will no longer append data to the configured `userAgent` string. 147 | Before this it would append " elastic-apm-http-client/$ver node/$ver". This 148 | is to support [the APM agents spec for 149 | User-Agent](https://github.com/elastic/apm/blob/main/specs/agents/transport.md#user-agent). 150 | 151 | 152 | ## v10.1.0 153 | 154 | - Fix client handling of an AWS Lambda environment: 155 | 1. `client.flush()` will initiate a quicker completion of the current intake 156 | request. 157 | 2. The process 'beforeExit' event is *not* used to start a graceful shutdown 158 | of the client, because the Lambda Runtime sometimes uses 'beforeExit' to 159 | handle *freezing* of the Lambda VM instance. That VM instance is typically 160 | unfrozen and used again, for which this Client is still needed. 161 | 162 | ## v10.0.0 163 | 164 | - All truncation of string fields (per `truncate*At` config options) have 165 | changed from truncating at a number of unicode chars, rather than a number 166 | of bytes. This is both faster and matches [the json-schema spec](https://json-schema.org/draft/2019-09/json-schema-validation.html#rfc.section.6.3.1) 167 | for [apm-server intake fields](https://www.elastic.co/guide/en/apm/server/current/events-api.html#events-api-schema-definition) 168 | that specify `maxLength`. 169 | - BREAKING CHANGE: The `truncateQueriesAt` config option has been removed. 170 | - In its place the `truncateLongFieldsAt` config option has been added to cover 171 | `span.context.db.statement` and a number of other possibly-long fields (per 172 | [spec](https://github.com/elastic/apm/blob/main/specs/agents/field-limits.md#long_field_max_length-configuration)). 173 | This *does* mean that in rare cases of long field values longer than the 174 | default 10000 chars, this change will result in those values being truncated. 175 | - The `truncateErrorMessagesAt` config option has been deprecated, in favor 176 | of `truncateLongFieldsAt`. Note, however, that `truncateLongFieldsAt` does 177 | *not* support the special case `-1` value to disable truncation. If 178 | `truncateErrorMessagesAt` is not specified, the value for 179 | `truncateLongFieldsAt` is used. This means the effective default is now 10000, 180 | no longer 2048. 181 | 182 | ## v9.9.0 183 | 184 | - feat: Use uninstrumented HTTP(S) client request functions to avoid tracing 185 | requests made by the APM agent itself. 186 | ([#161](https://github.com/elastic/apm-nodejs-http-client/pull/161)) 187 | 188 | ## v9.8.1 189 | 190 | - perf: eliminate encodeObject stack and faster loop in `_writeBatch` 191 | ([#159](https://github.com/elastic/apm-nodejs-http-client/pull/159)) 192 | - test: start testing with node 16 193 | ([#157](https://github.com/elastic/apm-nodejs-http-client/pull/157)) 194 | 195 | ## v9.8.0 196 | 197 | - Add `client.addMetadataFilter(fn)`. See the 198 | [APM agent issue](https://github.com/elastic/apm-agent-nodejs/issues/1916). 199 | 200 | ## v9.7.1 201 | 202 | - Fix to ensure the `client.flush(cb)` callback is called in the (expected to 203 | be rare) case where there are no active handles -- i.e., the process is 204 | exiting. 205 | ([#150](https://github.com/elastic/apm-nodejs-http-client/issues/150)) 206 | 207 | ## v9.7.0 208 | 209 | - A number of changes were made to fix issues with the APM agent under heavy 210 | load and with a slow or non-responsive APM server. 211 | ([#144](https://github.com/elastic/apm-nodejs-http-client/pull/144)) 212 | 213 | 1. A new `maxQueueSize` config option is added (default 1024 for now) to 214 | control how many events (transactions, spans, errors, metricsets) 215 | will be queued before being dropped if events are incoming faster 216 | than can be sent to APM server. This ensures the APM agent memory usage 217 | does not grow unbounded. 218 | 219 | 2. JSON encoding of events (when uncorking) is done in limited size 220 | batches to control the amount of single chunk CPU eventloop blocking 221 | time. (See MAX_WRITE_BATCH_SIZE in Client._writev.) Internal stats 222 | are collected to watch for long(est) batch processing times. 223 | 224 | 3. The handling of individual requests to the APM Server intake API has 225 | be rewritten to handle some error cases -- especially from a 226 | non-responsive APM server -- and to ensure that only one intake 227 | request is being performed at a time. Two new config options -- 228 | `intakeResTimeout` and `intakeResTimeoutOnEnd` -- have been added to 229 | allow fine control over some parts of this handling. See the comment on 230 | `makeIntakeRequest` for the best overview. 231 | 232 | 4. Support for backoff on intake API requests has been implemented per 233 | https://github.com/elastic/apm/blob/main/specs/agents/transport.md#transport-errors 234 | 235 | - Started testing against node v15 in preparation for supporting the coming 236 | node v16. 237 | 238 | ## v9.6.0 239 | 240 | - Fix config initialization such that the keep-alive agent is used all the 241 | time, as intended. Before this change the keep-alive HTTP(S) agent would only 242 | be used if a second call to `client.config(...)` was made. For the [Elastic 243 | APM Agent](https://github.com/elastic/apm-agent-nodejs)'s usage of this 244 | module, that was when any of the express, fastify, restify, hapi, or koa 245 | modules was instrumented. ([#139](https://github.com/elastic/apm-nodejs-http-client/pull/139)) 246 | 247 | A compatibility note for direct users of this APM http-client: 248 | Options passed to the 249 | [`Writable`](https://nodejs.org/api/stream.html#stream_new_stream_writable_options) 250 | and [`http[s].Agent`](https://nodejs.org/api/http.html#http_new_agent_options) 251 | constructors no longer include the full options object passed to the 252 | [Client constructor](https://github.com/elastic/apm-nodejs-http-client/blob/main/README.md#new-clientoptions). 253 | Therefore usage of *undocumented* options can no longer be used. 254 | 255 | ## v9.5.1 256 | 257 | - Fix possible crash when polling apm-server for config. Specifically it 258 | could happen with the Elastic Node.js APM agent when: 259 | 260 | 1. using node.js v12; 261 | 2. instrumenting one of hapi, restify, koa, express, or fastify; and 262 | 3. on a *second* request to APM server *that fails* (non-200 response). 263 | 264 | https://github.com/elastic/apm-agent-nodejs/issues/1749 265 | 266 | ## v9.5.0 267 | 268 | (This changelog was started after the 9.5.0 release.) 269 | -------------------------------------------------------------------------------- /test/container-info.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const fs = require('fs') 10 | const path = require('path') 11 | const tape = require('tape') 12 | 13 | const data = fs.readFileSync(path.join(__dirname, 'fixtures', 'cgroup')) 14 | const expected = require('./fixtures/cgroup_result') 15 | 16 | process.env.ECS_CONTAINER_METADATA_FILE = path.join(__dirname, 'fixtures', 'ecs-container-metadata.json') 17 | 18 | const containerInfo = require('../lib/container-info') 19 | const { parse, sync } = containerInfo 20 | 21 | tape.test('basics', t => { 22 | t.deepEqual(parse(` 23 | 12:devices:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 24 | 11:hugetlb:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 25 | 10:memory:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 26 | 9:freezer:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 27 | 8:perf_event:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 28 | 7:blkio:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 29 | 6:pids:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 30 | 5:rdma:/ 31 | 4:cpuset:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 32 | 3:net_cls,net_prio:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 33 | 2:cpu,cpuacct:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 34 | 1:name=systemd:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76 35 | 0::/system.slice/docker.service 36 | `), { 37 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 38 | entries: [ 39 | { 40 | id: '12', 41 | groups: 'devices', 42 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 43 | controllers: ['devices'], 44 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 45 | }, 46 | { 47 | id: '11', 48 | groups: 'hugetlb', 49 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 50 | controllers: ['hugetlb'], 51 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 52 | }, 53 | { 54 | id: '10', 55 | groups: 'memory', 56 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 57 | controllers: ['memory'], 58 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 59 | }, 60 | { 61 | id: '9', 62 | groups: 'freezer', 63 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 64 | controllers: ['freezer'], 65 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 66 | }, 67 | { 68 | id: '8', 69 | groups: 'perf_event', 70 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 71 | controllers: ['perf_event'], 72 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 73 | }, 74 | { 75 | id: '7', 76 | groups: 'blkio', 77 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 78 | controllers: ['blkio'], 79 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 80 | }, 81 | { 82 | id: '6', 83 | groups: 'pids', 84 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 85 | controllers: ['pids'], 86 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 87 | }, 88 | { 89 | id: '5', 90 | groups: 'rdma', 91 | path: '/', 92 | controllers: ['rdma'] 93 | }, 94 | { 95 | id: '4', 96 | groups: 'cpuset', 97 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 98 | controllers: ['cpuset'], 99 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 100 | }, 101 | { 102 | id: '3', 103 | groups: 'net_cls,net_prio', 104 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 105 | controllers: ['net_cls', 'net_prio'], 106 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 107 | }, 108 | { 109 | id: '2', 110 | groups: 'cpu,cpuacct', 111 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 112 | controllers: ['cpu', 'cpuacct'], 113 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 114 | }, 115 | { 116 | id: '1', 117 | groups: 'name=systemd', 118 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76', 119 | controllers: ['name=systemd'], 120 | containerId: '051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76' 121 | }, 122 | { 123 | id: '0', 124 | groups: '', 125 | path: '/system.slice/docker.service', 126 | controllers: [''] 127 | } 128 | ] 129 | }) 130 | 131 | t.deepEqual(parse(` 132 | 3:cpuacct:/ecs/eb9d3d0c-8936-42d7-80d8-f82b2f1a629e/7e9139716d9e5d762d22f9f877b87d1be8b1449ac912c025a984750c5dbff157 133 | `), { 134 | containerId: '7e9139716d9e5d762d22f9f877b87d1be8b1449ac912c025a984750c5dbff157', 135 | entries: [ 136 | { 137 | id: '3', 138 | groups: 'cpuacct', 139 | path: '/ecs/eb9d3d0c-8936-42d7-80d8-f82b2f1a629e/7e9139716d9e5d762d22f9f877b87d1be8b1449ac912c025a984750c5dbff157', 140 | controllers: ['cpuacct'], 141 | containerId: '7e9139716d9e5d762d22f9f877b87d1be8b1449ac912c025a984750c5dbff157' 142 | } 143 | ] 144 | }) 145 | 146 | t.deepEqual(parse(` 147 | 1:name=systemd:/system.slice/docker-cde7c2bab394630a42d73dc610b9c57415dced996106665d427f6d0566594411.scope 148 | `), { 149 | containerId: 'cde7c2bab394630a42d73dc610b9c57415dced996106665d427f6d0566594411', 150 | entries: [ 151 | { 152 | id: '1', 153 | groups: 'name=systemd', 154 | path: '/system.slice/docker-cde7c2bab394630a42d73dc610b9c57415dced996106665d427f6d0566594411.scope', 155 | controllers: ['name=systemd'], 156 | containerId: 'cde7c2bab394630a42d73dc610b9c57415dced996106665d427f6d0566594411' 157 | } 158 | ] 159 | }) 160 | 161 | t.deepEqual(parse(` 162 | 1:name=systemd:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76/not_hex 163 | `), { 164 | entries: [ 165 | { 166 | id: '1', 167 | groups: 'name=systemd', 168 | path: '/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76/not_hex', 169 | controllers: ['name=systemd'] 170 | } 171 | ] 172 | }) 173 | 174 | t.deepEqual(parse(` 175 | 1:name=systemd:/kubepods/besteffort/pode9b90526-f47d-11e8-b2a5-080027b9f4fb/15aa6e53-b09a-40c7-8558-c6c31e36c88a 176 | `), { 177 | containerId: '15aa6e53-b09a-40c7-8558-c6c31e36c88a', 178 | podId: 'e9b90526-f47d-11e8-b2a5-080027b9f4fb', 179 | entries: [ 180 | { 181 | id: '1', 182 | groups: 'name=systemd', 183 | path: '/kubepods/besteffort/pode9b90526-f47d-11e8-b2a5-080027b9f4fb/15aa6e53-b09a-40c7-8558-c6c31e36c88a', 184 | controllers: ['name=systemd'], 185 | containerId: '15aa6e53-b09a-40c7-8558-c6c31e36c88a', 186 | podId: 'e9b90526-f47d-11e8-b2a5-080027b9f4fb' 187 | } 188 | ] 189 | }) 190 | 191 | t.deepEqual(parse(` 192 | 1:name=systemd:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90d81341_92de_11e7_8cf2_507b9d4141fa.slice/crio-2227daf62df6694645fee5df53c1f91271546a9560e8600a525690ae252b7f63.scope 193 | `), { 194 | containerId: '2227daf62df6694645fee5df53c1f91271546a9560e8600a525690ae252b7f63', 195 | podId: '90d81341-92de-11e7-8cf2-507b9d4141fa', 196 | entries: [ 197 | { 198 | id: '1', 199 | groups: 'name=systemd', 200 | path: '/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90d81341_92de_11e7_8cf2_507b9d4141fa.slice/crio-2227daf62df6694645fee5df53c1f91271546a9560e8600a525690ae252b7f63.scope', 201 | controllers: ['name=systemd'], 202 | containerId: '2227daf62df6694645fee5df53c1f91271546a9560e8600a525690ae252b7f63', 203 | podId: '90d81341-92de-11e7-8cf2-507b9d4141fa' 204 | } 205 | ] 206 | }) 207 | 208 | t.deepEqual(parse(` 209 | 1:name=systemd:/ecs/46686c7c701cdfdf2549f88f7b9575e9/46686c7c701cdfdf2549f88f7b9575e9-2574839563 210 | `), { 211 | containerId: '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 212 | taskId: '46686c7c701cdfdf2549f88f7b9575e9', 213 | entries: [ 214 | { 215 | id: '1', 216 | groups: 'name=systemd', 217 | path: '/ecs/46686c7c701cdfdf2549f88f7b9575e9/46686c7c701cdfdf2549f88f7b9575e9-2574839563', 218 | controllers: ['name=systemd'], 219 | taskId: '46686c7c701cdfdf2549f88f7b9575e9' 220 | } 221 | ] 222 | }) 223 | 224 | t.deepEqual(parse(` 225 | 12:devices:/user.slice 226 | 11:hugetlb:/ 227 | 10:memory:/user.slice 228 | 9:freezer:/ 229 | 8:perf_event:/ 230 | 7:blkio:/user.slice 231 | 6:pids:/user.slice/user-1000.slice/session-2.scope 232 | 5:rdma:/ 233 | 4:cpuset:/ 234 | 3:net_cls,net_prio:/ 235 | 2:cpu,cpuacct:/user.slice 236 | 1:name=systemd:/user.slice/user-1000.slice/session-2.scope 237 | 0::/user.slice/user-1000.slice/session-2.scope 238 | `), { 239 | entries: [ 240 | { 241 | id: '12', 242 | groups: 'devices', 243 | path: '/user.slice', 244 | controllers: ['devices'] 245 | }, 246 | { 247 | id: '11', 248 | groups: 'hugetlb', 249 | path: '/', 250 | controllers: ['hugetlb'] 251 | }, 252 | { 253 | id: '10', 254 | groups: 'memory', 255 | path: '/user.slice', 256 | controllers: ['memory'] 257 | }, 258 | { 259 | id: '9', 260 | groups: 'freezer', 261 | path: '/', 262 | controllers: ['freezer'] 263 | }, 264 | { 265 | id: '8', 266 | groups: 'perf_event', 267 | path: '/', 268 | controllers: ['perf_event'] 269 | }, 270 | { 271 | id: '7', 272 | groups: 'blkio', 273 | path: '/user.slice', 274 | controllers: ['blkio'] 275 | }, 276 | { 277 | id: '6', 278 | groups: 'pids', 279 | path: '/user.slice/user-1000.slice/session-2.scope', 280 | controllers: ['pids'] 281 | }, 282 | { 283 | id: '5', 284 | groups: 'rdma', 285 | path: '/', 286 | controllers: ['rdma'] 287 | }, 288 | { 289 | id: '4', 290 | groups: 'cpuset', 291 | path: '/', 292 | controllers: ['cpuset'] 293 | }, 294 | { 295 | id: '3', 296 | groups: 'net_cls,net_prio', 297 | path: '/', 298 | controllers: ['net_cls', 'net_prio'] 299 | }, 300 | { 301 | id: '2', 302 | groups: 'cpu,cpuacct', 303 | path: '/user.slice', 304 | controllers: ['cpu', 'cpuacct'] 305 | }, 306 | { 307 | id: '1', 308 | groups: 'name=systemd', 309 | path: '/user.slice/user-1000.slice/session-2.scope', 310 | controllers: ['name=systemd'] 311 | }, 312 | { 313 | id: '0', 314 | groups: '', 315 | path: '/user.slice/user-1000.slice/session-2.scope', 316 | controllers: [''] 317 | } 318 | ] 319 | }) 320 | 321 | t.end() 322 | }) 323 | 324 | tape.test('containerInfo()', t => { 325 | t.plan(2) 326 | 327 | const readFile = fs.readFile 328 | fs.readFile = function (path, cb) { 329 | fs.readFile = readFile 330 | t.equal(path, '/proc/self/cgroup') 331 | cb(null, data) 332 | } 333 | 334 | containerInfo().then(result => { 335 | t.deepEqual(result, expected) 336 | t.end() 337 | }) 338 | }) 339 | 340 | tape.test('containerInfo(123)', t => { 341 | t.plan(2) 342 | 343 | const readFile = fs.readFile 344 | fs.readFile = function (path, cb) { 345 | fs.readFile = readFile 346 | t.equal(path, '/proc/123/cgroup') 347 | cb(null, data) 348 | } 349 | 350 | containerInfo(123).then(result => { 351 | t.deepEqual(result, expected) 352 | t.end() 353 | }) 354 | }) 355 | 356 | tape.test('containerInfo() - error', t => { 357 | t.plan(1) 358 | 359 | const readFile = fs.readFile 360 | fs.readFile = function (path, cb) { 361 | fs.readFile = readFile 362 | cb(new Error('boom')) 363 | } 364 | 365 | containerInfo(123).then(result => { 366 | t.deepEqual(result, undefined) 367 | t.end() 368 | }) 369 | }) 370 | 371 | tape.test('containerInfoSync()', t => { 372 | t.plan(2) 373 | 374 | const readFileSync = fs.readFileSync 375 | fs.readFileSync = function (path) { 376 | fs.readFileSync = readFileSync 377 | t.equal(path, '/proc/self/cgroup') 378 | return data 379 | } 380 | 381 | t.deepEqual(sync(), expected) 382 | t.end() 383 | }) 384 | 385 | tape.test('containerInfoSync(123)', t => { 386 | t.plan(2) 387 | 388 | const readFileSync = fs.readFileSync 389 | fs.readFileSync = function (path) { 390 | fs.readFileSync = readFileSync 391 | t.equal(path, '/proc/123/cgroup') 392 | return data 393 | } 394 | 395 | t.deepEqual(sync(123), expected) 396 | t.end() 397 | }) 398 | 399 | tape.test('containerInfoSync() - error', t => { 400 | t.plan(1) 401 | 402 | const readFileSync = fs.readFileSync 403 | fs.readFileSync = function () { 404 | fs.readFileSync = readFileSync 405 | throw new Error('boom') 406 | } 407 | 408 | t.deepEqual(sync(), undefined) 409 | t.end() 410 | }) 411 | 412 | tape.test('ecs without metadata file present', t => { 413 | const originalEcsFile = process.env.ECS_CONTAINER_METADATA_FILE 414 | containerInfo.resetEcsMetadata(null) 415 | 416 | t.equals( 417 | containerInfo.parse('15:name=systemd:/ecs/03752a671e744971a862edcee6195646/03752a671e744971a862edcee6195646-4015103728').containerId, 418 | '03752a671e744971a862edcee6195646-4015103728', 419 | 'fargate id parsed' 420 | ) 421 | 422 | containerInfo.resetEcsMetadata(originalEcsFile) 423 | t.equals( 424 | containerInfo.parse('15:name=systemd:/ecs/03752a671e744971a862edcee6195646/03752a671e744971a862edcee6195646-4015103728').containerId, 425 | '34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376', 426 | 'container id from metadata file' 427 | ) 428 | t.end() 429 | }) 430 | -------------------------------------------------------------------------------- /test/truncate.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const test = require('tape') 10 | const utils = require('./lib/utils') 11 | 12 | const APMServer = utils.APMServer 13 | const processIntakeReq = utils.processIntakeReq 14 | const assertIntakeReq = utils.assertIntakeReq 15 | const assertMetadata = utils.assertMetadata 16 | const assertEvent = utils.assertEvent 17 | const truncate = require('../lib/truncate') 18 | 19 | const options = [ 20 | {}, // default options 21 | { truncateKeywordsAt: 100, truncateErrorMessagesAt: 200, truncateStringsAt: 300, truncateLongFieldsAt: 400 }, 22 | { truncateErrorMessagesAt: -1 } 23 | ] 24 | 25 | options.forEach(function (opts) { 26 | const clientOpts = Object.assign({ apmServerVersion: '8.0.0' }, opts) 27 | const veryLong = 12000 28 | const lineLen = opts.truncateStringsAt || 1024 29 | const longFieldLen = opts.truncateLongFieldsAt || 10000 30 | const keywordLen = opts.truncateKeywordsAt || 1024 31 | const customKeyLen = opts.truncateCustomKeysAt || 1024 32 | const errMsgLen = opts.truncateErrorMessagesAt === -1 33 | ? veryLong 34 | : (opts.truncateErrorMessagesAt || longFieldLen) 35 | 36 | test('truncate transaction', function (t) { 37 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 38 | const datas = [ 39 | assertMetadata, 40 | assertEvent({ 41 | transaction: { 42 | id: 'abc123', 43 | name: genStr('a', keywordLen), 44 | type: genStr('b', keywordLen), 45 | result: genStr('c', keywordLen), 46 | sampled: true, 47 | context: { 48 | request: { 49 | method: genStr('d', keywordLen), 50 | url: { 51 | protocol: genStr('e', keywordLen), 52 | hostname: genStr('f', keywordLen), 53 | port: genStr('g', keywordLen), 54 | pathname: genStr('h', keywordLen), 55 | search: genStr('i', keywordLen), 56 | hash: genStr('j', keywordLen), 57 | raw: genStr('k', keywordLen), 58 | full: genStr('l', keywordLen) 59 | } 60 | }, 61 | user: { 62 | id: genStr('m', keywordLen), 63 | email: genStr('n', keywordLen), 64 | username: genStr('o', keywordLen) 65 | }, 66 | custom: { 67 | foo: genStr('p', lineLen) 68 | } 69 | } 70 | } 71 | }) 72 | ] 73 | const server = APMServer(function (req, res) { 74 | assertIntakeReq(t, req) 75 | req = processIntakeReq(req) 76 | req.on('data', function (obj) { 77 | datas.shift()(t, obj) 78 | }) 79 | req.on('end', function () { 80 | res.end() 81 | server.close() 82 | t.end() 83 | }) 84 | }).client(clientOpts, function (client) { 85 | client.sendTransaction({ 86 | id: 'abc123', 87 | name: genStr('a', veryLong), 88 | type: genStr('b', veryLong), 89 | result: genStr('c', veryLong), 90 | sampled: true, 91 | context: { 92 | request: { 93 | method: genStr('d', veryLong), 94 | url: { 95 | protocol: genStr('e', veryLong), 96 | hostname: genStr('f', veryLong), 97 | port: genStr('g', veryLong), 98 | pathname: genStr('h', veryLong), 99 | search: genStr('i', veryLong), 100 | hash: genStr('j', veryLong), 101 | raw: genStr('k', veryLong), 102 | full: genStr('l', veryLong) 103 | } 104 | }, 105 | user: { 106 | id: genStr('m', veryLong), 107 | email: genStr('n', veryLong), 108 | username: genStr('o', veryLong) 109 | }, 110 | custom: { 111 | foo: genStr('p', veryLong) 112 | } 113 | } 114 | }) 115 | client.flush(() => { client.destroy() }) 116 | }) 117 | }) 118 | 119 | test('truncate span', function (t) { 120 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 121 | const datas = [ 122 | assertMetadata, 123 | assertEvent({ 124 | span: { 125 | id: 'abc123', 126 | name: genStr('a', keywordLen), 127 | type: genStr('b', keywordLen), 128 | stacktrace: [ 129 | { pre_context: [genStr('c', lineLen), genStr('d', lineLen)], context_line: genStr('e', lineLen), post_context: [genStr('f', lineLen), genStr('g', lineLen)] }, 130 | { pre_context: [genStr('h', lineLen), genStr('i', lineLen)], context_line: genStr('j', lineLen), post_context: [genStr('k', lineLen), genStr('l', lineLen)] } 131 | ], 132 | context: { 133 | custom: { 134 | foo: genStr('m', lineLen) 135 | }, 136 | db: { 137 | statement: genStr('n', longFieldLen) 138 | }, 139 | destination: { 140 | address: genStr('o', keywordLen), 141 | port: 80, 142 | service: { 143 | name: genStr('p', keywordLen), 144 | resource: genStr('q', keywordLen), 145 | type: genStr('r', keywordLen) 146 | } 147 | } 148 | } 149 | } 150 | }) 151 | ] 152 | const server = APMServer(function (req, res) { 153 | assertIntakeReq(t, req) 154 | req = processIntakeReq(req) 155 | req.on('data', function (obj) { 156 | datas.shift()(t, obj) 157 | }) 158 | req.on('end', function () { 159 | res.end() 160 | server.close() 161 | t.end() 162 | }) 163 | }).client(clientOpts, function (client) { 164 | client.sendSpan({ 165 | id: 'abc123', 166 | name: genStr('a', veryLong), 167 | type: genStr('b', veryLong), 168 | stacktrace: [ 169 | { pre_context: [genStr('c', veryLong), genStr('d', veryLong)], context_line: genStr('e', veryLong), post_context: [genStr('f', veryLong), genStr('g', veryLong)] }, 170 | { pre_context: [genStr('h', veryLong), genStr('i', veryLong)], context_line: genStr('j', veryLong), post_context: [genStr('k', veryLong), genStr('l', veryLong)] } 171 | ], 172 | context: { 173 | custom: { 174 | foo: genStr('m', veryLong) 175 | }, 176 | db: { 177 | statement: genStr('n', veryLong) 178 | }, 179 | destination: { 180 | address: genStr('o', veryLong), 181 | port: 80, 182 | service: { 183 | name: genStr('p', veryLong), 184 | resource: genStr('q', veryLong), 185 | type: genStr('r', veryLong) 186 | } 187 | } 188 | } 189 | }) 190 | client.flush(() => { client.destroy() }) 191 | }) 192 | }) 193 | 194 | test('truncate span custom keys', function (t) { 195 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 196 | const datas = [ 197 | assertMetadata, 198 | assertEvent({ 199 | span: { 200 | id: 'abc123', 201 | name: 'cool-name', 202 | type: 'cool-type', 203 | context: { 204 | custom: { 205 | [genStr('a', customKeyLen)]: 'truncate my key', 206 | [genStr('b', customKeyLen)]: null 207 | }, 208 | db: { 209 | statement: 'SELECT * FROM USERS' 210 | } 211 | } 212 | } 213 | }) 214 | ] 215 | const server = APMServer(function (req, res) { 216 | assertIntakeReq(t, req) 217 | req = processIntakeReq(req) 218 | req.on('data', function (obj) { 219 | datas.shift()(t, obj) 220 | }) 221 | req.on('end', function () { 222 | res.end() 223 | server.close() 224 | t.end() 225 | }) 226 | }).client(clientOpts, function (client) { 227 | client.sendSpan({ 228 | id: 'abc123', 229 | name: 'cool-name', 230 | type: 'cool-type', 231 | context: { 232 | custom: { 233 | [genStr('a', veryLong)]: 'truncate my key', 234 | [genStr('b', veryLong)]: null 235 | }, 236 | db: { 237 | statement: 'SELECT * FROM USERS' 238 | } 239 | } 240 | }) 241 | client.flush(() => { client.destroy() }) 242 | }) 243 | }) 244 | 245 | test('truncate error', function (t) { 246 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 247 | const datas = [ 248 | assertMetadata, 249 | assertEvent({ 250 | error: { 251 | id: 'abc123', 252 | log: { 253 | level: genStr('a', keywordLen), 254 | logger_name: genStr('b', keywordLen), 255 | message: genStr('c', errMsgLen), 256 | param_message: genStr('d', keywordLen), 257 | stacktrace: [ 258 | { pre_context: [genStr('e', lineLen), genStr('f', lineLen)], context_line: genStr('g', lineLen), post_context: [genStr('h', lineLen), genStr('i', lineLen)] }, 259 | { pre_context: [genStr('j', lineLen), genStr('k', lineLen)], context_line: genStr('l', lineLen), post_context: [genStr('m', lineLen), genStr('n', lineLen)] } 260 | ] 261 | }, 262 | exception: { 263 | message: genStr('o', errMsgLen), 264 | type: genStr('p', keywordLen), 265 | code: genStr('q', keywordLen), 266 | module: genStr('r', keywordLen), 267 | stacktrace: [ 268 | { pre_context: [genStr('s', lineLen), genStr('t', lineLen)], context_line: genStr('u', lineLen), post_context: [genStr('v', lineLen), genStr('w', lineLen)] }, 269 | { pre_context: [genStr('x', lineLen), genStr('y', lineLen)], context_line: genStr('z', lineLen), post_context: [genStr('A', lineLen), genStr('B', lineLen)] } 270 | ] 271 | }, 272 | context: { 273 | request: { 274 | method: genStr('C', keywordLen), 275 | url: { 276 | protocol: genStr('D', keywordLen), 277 | hostname: genStr('E', keywordLen), 278 | port: genStr('F', keywordLen), 279 | pathname: genStr('G', keywordLen), 280 | search: genStr('H', keywordLen), 281 | hash: genStr('I', keywordLen), 282 | raw: genStr('J', keywordLen), 283 | full: genStr('K', keywordLen) 284 | } 285 | }, 286 | user: { 287 | id: genStr('L', keywordLen), 288 | email: genStr('M', keywordLen), 289 | username: genStr('N', keywordLen) 290 | }, 291 | custom: { 292 | foo: genStr('O', lineLen) 293 | }, 294 | tags: { 295 | bar: genStr('P', keywordLen) 296 | } 297 | } 298 | } 299 | }) 300 | ] 301 | const server = APMServer(function (req, res) { 302 | assertIntakeReq(t, req) 303 | req = processIntakeReq(req) 304 | req.on('data', function (obj) { 305 | datas.shift()(t, obj) 306 | }) 307 | req.on('end', function () { 308 | res.end() 309 | server.close() 310 | t.end() 311 | }) 312 | }).client(clientOpts, function (client) { 313 | client.sendError({ 314 | id: 'abc123', 315 | log: { 316 | level: genStr('a', veryLong), 317 | logger_name: genStr('b', veryLong), 318 | message: genStr('c', veryLong), 319 | param_message: genStr('d', veryLong), 320 | stacktrace: [ 321 | { pre_context: [genStr('e', veryLong), genStr('f', veryLong)], context_line: genStr('g', veryLong), post_context: [genStr('h', veryLong), genStr('i', veryLong)] }, 322 | { pre_context: [genStr('j', veryLong), genStr('k', veryLong)], context_line: genStr('l', veryLong), post_context: [genStr('m', veryLong), genStr('n', veryLong)] } 323 | ] 324 | }, 325 | exception: { 326 | message: genStr('o', veryLong), 327 | type: genStr('p', veryLong), 328 | code: genStr('q', veryLong), 329 | module: genStr('r', veryLong), 330 | stacktrace: [ 331 | { pre_context: [genStr('s', veryLong), genStr('t', veryLong)], context_line: genStr('u', veryLong), post_context: [genStr('v', veryLong), genStr('w', veryLong)] }, 332 | { pre_context: [genStr('x', veryLong), genStr('y', veryLong)], context_line: genStr('z', veryLong), post_context: [genStr('A', veryLong), genStr('B', veryLong)] } 333 | ] 334 | }, 335 | context: { 336 | request: { 337 | method: genStr('C', veryLong), 338 | url: { 339 | protocol: genStr('D', veryLong), 340 | hostname: genStr('E', veryLong), 341 | port: genStr('F', veryLong), 342 | pathname: genStr('G', veryLong), 343 | search: genStr('H', veryLong), 344 | hash: genStr('I', veryLong), 345 | raw: genStr('J', veryLong), 346 | full: genStr('K', veryLong) 347 | } 348 | }, 349 | user: { 350 | id: genStr('L', veryLong), 351 | email: genStr('M', veryLong), 352 | username: genStr('N', veryLong) 353 | }, 354 | custom: { 355 | foo: genStr('O', veryLong) 356 | }, 357 | tags: { 358 | bar: genStr('P', veryLong) 359 | } 360 | } 361 | }) 362 | client.flush(() => { client.destroy() }) 363 | }) 364 | }) 365 | 366 | test('truncate metricset', function (t) { 367 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 368 | const datas = [ 369 | assertMetadata, 370 | assertEvent({ 371 | metricset: { 372 | timestamp: 1496170422281000, 373 | tags: { 374 | foo: genStr('a', keywordLen) 375 | }, 376 | samples: { 377 | metric_name: { 378 | value: 4 379 | } 380 | } 381 | } 382 | }) 383 | ] 384 | const server = APMServer(function (req, res) { 385 | assertIntakeReq(t, req) 386 | req = processIntakeReq(req) 387 | req.on('data', function (obj) { 388 | datas.shift()(t, obj) 389 | }) 390 | req.on('end', function () { 391 | res.end() 392 | server.close() 393 | t.end() 394 | }) 395 | }).client(clientOpts, function (client) { 396 | client.sendMetricSet({ 397 | timestamp: 1496170422281000, 398 | tags: { 399 | foo: genStr('a', veryLong) 400 | }, 401 | samples: { 402 | metric_name: { 403 | value: 4 404 | } 405 | } 406 | }) 407 | client.flush(() => { client.destroy() }) 408 | }) 409 | }) 410 | }) 411 | 412 | function genStr (ch, length) { 413 | return new Array(length + 1).join(ch) 414 | } 415 | 416 | test('truncate cloud metadata', function (t) { 417 | // tests that each cloud metadata field is truncated 418 | // at `truncateKeywordsAt` values 419 | const opts = { 420 | truncateKeywordsAt: 100, 421 | truncateStringsAt: 50 422 | } 423 | 424 | const longString = (new Array(500).fill('x').join('')) 425 | const toTruncate = { 426 | cloud: { 427 | account: { 428 | id: longString, 429 | name: longString 430 | }, 431 | availability_zone: longString, 432 | instance: { 433 | id: longString, 434 | name: longString 435 | }, 436 | machine: { 437 | type: longString 438 | }, 439 | project: { 440 | id: longString, 441 | name: longString 442 | }, 443 | provider: longString, 444 | region: longString 445 | } 446 | } 447 | const { cloud } = truncate.metadata(toTruncate, opts) 448 | 449 | t.ok(cloud.account.id.length === 100, 'account.id.length was truncated') 450 | t.ok(cloud.account.name.length === 100, 'account.name.length was truncated') 451 | t.ok(cloud.availability_zone.length === 100, 'availability_zone was truncated') 452 | t.ok(cloud.instance.id.length === 100, 'instance.id was truncated') 453 | t.ok(cloud.instance.name.length === 100, 'instance.name was truncated') 454 | t.ok(cloud.machine.type.length === 100, 'machine.type was truncated') 455 | t.ok(cloud.project.id.length === 100, 'project.id was truncated') 456 | t.ok(cloud.project.name.length === 100, 'project.name was truncated') 457 | t.ok(cloud.provider.length === 100, 'provider was truncated') 458 | t.ok(cloud.region.length === 100, 'region was truncated') 459 | 460 | t.end() 461 | }) 462 | 463 | test('do not break surrogate pairs in truncation', function (t) { 464 | const span = { 465 | name: 'theSpan', 466 | type: 'theType', 467 | context: { 468 | db: { 469 | statement: 'foo🎉bar' 470 | } 471 | } 472 | } 473 | const truncateLongFieldsAt = 4 474 | const truncatedSpan = truncate.span(span, { truncateLongFieldsAt }) 475 | t.ok(truncatedSpan.context.db.statement.length <= truncateLongFieldsAt, 476 | 'context.db.statement was truncated') 477 | t.equal(truncatedSpan.context.db.statement, 'foo', 478 | 'context.db.statement was truncated without breaking a surrogate pair') 479 | t.end() 480 | }) 481 | -------------------------------------------------------------------------------- /test/basic.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const test = require('tape') 10 | const utils = require('./lib/utils') 11 | const Client = require('..') 12 | const APMServer = utils.APMServer 13 | const processIntakeReq = utils.processIntakeReq 14 | const assertIntakeReq = utils.assertIntakeReq 15 | const assertMetadata = utils.assertMetadata 16 | const assertEvent = utils.assertEvent 17 | 18 | const dataTypes = ['span', 'transaction', 'error', 'metricset'] 19 | 20 | const upper = { 21 | span: 'Span', 22 | transaction: 'Transaction', 23 | error: 'Error', 24 | metricset: 'MetricSet' 25 | } 26 | 27 | dataTypes.forEach(function (dataType) { 28 | const sendFn = 'send' + upper[dataType] 29 | 30 | test(`client.${sendFn}() + client.flush()`, function (t) { 31 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 32 | const datas = [ 33 | assertMetadata, 34 | assertEvent({ [dataType]: { foo: 42 } }) 35 | ] 36 | const server = APMServer(function (req, res) { 37 | assertIntakeReq(t, req) 38 | req = processIntakeReq(req) 39 | req.on('data', function (obj) { 40 | datas.shift()(t, obj) 41 | }) 42 | req.on('end', function () { 43 | res.end() 44 | server.close() 45 | t.end() 46 | }) 47 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 48 | client[sendFn]({ foo: 42 }) 49 | client.flush(() => { client.destroy() }) 50 | }) 51 | }) 52 | 53 | test(`client.${sendFn}(callback) + client.flush()`, function (t) { 54 | t.plan(1 + assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 55 | const datas = [ 56 | assertMetadata, 57 | assertEvent({ [dataType]: { foo: 42 } }) 58 | ] 59 | const server = APMServer(function (req, res) { 60 | assertIntakeReq(t, req) 61 | req = processIntakeReq(req) 62 | req.on('data', function (obj) { 63 | datas.shift()(t, obj) 64 | }) 65 | req.on('end', function () { 66 | res.end() 67 | }) 68 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 69 | let nexttick = false 70 | client[sendFn]({ foo: 42 }, function () { 71 | t.ok(nexttick, 'should call callback') 72 | }) 73 | client.flush(() => { 74 | client.end() 75 | server.close() 76 | t.end() 77 | }) 78 | nexttick = true 79 | }) 80 | }) 81 | 82 | test(`client.${sendFn}() + client.end()`, function (t) { 83 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 84 | let client 85 | const datas = [ 86 | assertMetadata, 87 | assertEvent({ [dataType]: { foo: 42 } }) 88 | ] 89 | const server = APMServer(function (req, res) { 90 | assertIntakeReq(t, req) 91 | req = processIntakeReq(req) 92 | req.on('data', function (obj) { 93 | datas.shift()(t, obj) 94 | }) 95 | req.on('end', function () { 96 | res.end() 97 | server.close() 98 | client.destroy() 99 | t.end() 100 | }) 101 | }).client({ apmServerVersion: '8.0.0' }, function (client_) { 102 | client = client_ 103 | client[sendFn]({ foo: 42 }) 104 | client.end() 105 | }) 106 | }) 107 | 108 | test(`single client.${sendFn}`, function (t) { 109 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 110 | let client 111 | const datas = [ 112 | assertMetadata, 113 | assertEvent({ [dataType]: { foo: 42 } }) 114 | ] 115 | const server = APMServer(function (req, res) { 116 | assertIntakeReq(t, req) 117 | req = processIntakeReq(req) 118 | req.on('data', function (obj) { 119 | datas.shift()(t, obj) 120 | }) 121 | req.on('end', function () { 122 | res.end() 123 | server.close() 124 | client.destroy() 125 | t.end() 126 | }) 127 | }).client({ time: 100, apmServerVersion: '8.0.0' }, function (client_) { 128 | client = client_ 129 | client[sendFn]({ foo: 42 }) 130 | }) 131 | }) 132 | 133 | test(`multiple client.${sendFn} (same request)`, function (t) { 134 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts * 3) 135 | let client 136 | const datas = [ 137 | assertMetadata, 138 | assertEvent({ [dataType]: { req: 1 } }), 139 | assertEvent({ [dataType]: { req: 2 } }), 140 | assertEvent({ [dataType]: { req: 3 } }) 141 | ] 142 | const server = APMServer(function (req, res) { 143 | assertIntakeReq(t, req) 144 | req = processIntakeReq(req) 145 | req.on('data', function (obj) { 146 | datas.shift()(t, obj) 147 | }) 148 | req.on('end', function () { 149 | res.end() 150 | server.close() 151 | client.destroy() 152 | t.end() 153 | }) 154 | }).client({ time: 100, apmServerVersion: '8.0.0' }, function (client_) { 155 | client = client_ 156 | client[sendFn]({ req: 1 }) 157 | client[sendFn]({ req: 2 }) 158 | client[sendFn]({ req: 3 }) 159 | }) 160 | }) 161 | 162 | test(`multiple client.${sendFn} (multiple requests)`, function (t) { 163 | t.plan(assertIntakeReq.asserts * 2 + assertMetadata.asserts * 2 + assertEvent.asserts * 6) 164 | 165 | let clientReqNum = 0 166 | let clientSendNum = 0 167 | let serverReqNum = 0 168 | let client 169 | 170 | const datas = [ 171 | assertMetadata, 172 | assertEvent({ [dataType]: { req: 1, send: 1 } }), 173 | assertEvent({ [dataType]: { req: 1, send: 2 } }), 174 | assertEvent({ [dataType]: { req: 1, send: 3 } }), 175 | assertMetadata, 176 | assertEvent({ [dataType]: { req: 2, send: 4 } }), 177 | assertEvent({ [dataType]: { req: 2, send: 5 } }), 178 | assertEvent({ [dataType]: { req: 2, send: 6 } }) 179 | ] 180 | 181 | const server = APMServer(function (req, res) { 182 | const reqNum = ++serverReqNum 183 | assertIntakeReq(t, req) 184 | req = processIntakeReq(req) 185 | req.on('data', function (obj) { 186 | datas.shift()(t, obj) 187 | }) 188 | req.on('end', function () { 189 | res.end() 190 | if (reqNum === 1) { 191 | send() 192 | } else { 193 | server.close() 194 | client.destroy() 195 | t.end() 196 | } 197 | }) 198 | }).client({ time: 100, apmServerVersion: '8.0.0' }, function (_client) { 199 | client = _client 200 | send() 201 | }) 202 | 203 | function send () { 204 | clientReqNum++ 205 | for (let n = 0; n < 3; n++) { 206 | client[sendFn]({ req: clientReqNum, send: ++clientSendNum }) 207 | } 208 | } 209 | }) 210 | }) 211 | 212 | test('client.flush(callback) - with active request', function (t) { 213 | t.plan(4 + assertIntakeReq.asserts + assertMetadata.asserts) 214 | const datas = [ 215 | assertMetadata, 216 | { span: { foo: 42, name: 'undefined', type: 'undefined' } } 217 | ] 218 | const server = APMServer(function (req, res) { 219 | assertIntakeReq(t, req) 220 | req = processIntakeReq(req) 221 | req.on('data', function (obj) { 222 | const expect = datas.shift() 223 | if (typeof expect === 'function') expect(t, obj) 224 | else t.deepEqual(obj, expect) 225 | }) 226 | req.on('end', function () { 227 | res.end() 228 | }) 229 | }).client({ bufferWindowTime: -1, apmServerVersion: '8.0.0' }, function (client) { 230 | t.equal(client._activeIntakeReq, false, 'no outgoing HTTP request to begin with') 231 | client.sendSpan({ foo: 42 }) 232 | t.equal(client._activeIntakeReq, true, 'an outgoing HTTP request should be active') 233 | client.flush(function () { 234 | t.equal(client._activeIntakeReq, false, 'the outgoing HTTP request should be done') 235 | client.end() 236 | server.close() 237 | t.end() 238 | }) 239 | }) 240 | }) 241 | 242 | test('client.flush(callback) - with queued request', function (t) { 243 | t.plan(4 + assertIntakeReq.asserts * 2 + assertMetadata.asserts * 2) 244 | const datas = [ 245 | assertMetadata, 246 | { span: { req: 1, name: 'undefined', type: 'undefined' } }, 247 | assertMetadata, 248 | { span: { req: 2, name: 'undefined', type: 'undefined' } } 249 | ] 250 | const server = APMServer(function (req, res) { 251 | assertIntakeReq(t, req) 252 | req = processIntakeReq(req) 253 | req.on('data', function (obj) { 254 | const expect = datas.shift() 255 | if (typeof expect === 'function') expect(t, obj) 256 | else t.deepEqual(obj, expect) 257 | }) 258 | req.on('end', function () { 259 | res.end() 260 | }) 261 | }).client({ bufferWindowTime: -1, apmServerVersion: '8.0.0' }, function (client) { 262 | client.sendSpan({ req: 1 }) 263 | client.flush() 264 | client.sendSpan({ req: 2 }) 265 | t.equal(client._activeIntakeReq, true, 'an outgoing HTTP request should be active') 266 | client.flush(function () { 267 | t.equal(client._activeIntakeReq, false, 'the outgoing HTTP request should be done') 268 | client.end() 269 | server.close() 270 | t.end() 271 | }) 272 | }) 273 | }) 274 | 275 | test('2nd flush before 1st flush have finished', function (t) { 276 | t.plan(4 + assertIntakeReq.asserts * 2 + assertMetadata.asserts * 2) 277 | let requestStarts = 0 278 | let requestEnds = 0 279 | const datas = [ 280 | assertMetadata, 281 | { span: { req: 1, name: 'undefined', type: 'undefined' } }, 282 | assertMetadata, 283 | { span: { req: 2, name: 'undefined', type: 'undefined' } } 284 | ] 285 | const server = APMServer(function (req, res) { 286 | requestStarts++ 287 | assertIntakeReq(t, req) 288 | req = processIntakeReq(req) 289 | req.on('data', function (obj) { 290 | const expect = datas.shift() 291 | if (typeof expect === 'function') expect(t, obj) 292 | else t.deepEqual(obj, expect) 293 | }) 294 | req.on('end', function () { 295 | requestEnds++ 296 | res.end() 297 | }) 298 | }).client({ bufferWindowTime: -1, apmServerVersion: '8.0.0' }, function (client) { 299 | client.sendSpan({ req: 1 }) 300 | client.flush() 301 | client.sendSpan({ req: 2 }) 302 | client.flush(() => { client.destroy() }) 303 | setTimeout(function () { 304 | t.equal(requestStarts, 2, 'should have received 2 requests') 305 | t.equal(requestEnds, 2, 'should have received 2 requests completely') 306 | t.end() 307 | server.close() 308 | }, 200) 309 | }) 310 | }) 311 | 312 | test('client.end(callback)', function (t) { 313 | t.plan(1 + assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 314 | let client 315 | const datas = [ 316 | assertMetadata, 317 | assertEvent({ span: { foo: 42 } }) 318 | ] 319 | const server = APMServer(function (req, res) { 320 | assertIntakeReq(t, req) 321 | req = processIntakeReq(req) 322 | req.on('data', function (obj) { 323 | datas.shift()(t, obj) 324 | }) 325 | req.on('end', function () { 326 | res.end() 327 | server.close() 328 | client.destroy() 329 | t.end() 330 | }) 331 | }).client({ apmServerVersion: '8.0.0' }, function (client_) { 332 | client = client_ 333 | client.sendSpan({ foo: 42 }) 334 | client.end(function () { 335 | t.pass('should call callback') 336 | }) 337 | }) 338 | }) 339 | 340 | test('client.sent', function (t) { 341 | t.plan(4) 342 | const server = APMServer(function (req, res) { 343 | t.comment('APM server got a request') 344 | req.resume() 345 | req.on('end', function () { 346 | res.end() 347 | }) 348 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 349 | client.sendError({ foo: 42 }) 350 | client.sendSpan({ foo: 42 }) 351 | client.sendTransaction({ foo: 42 }) 352 | t.equal(client.sent, 0, 'sent=0 after 1st round of sending') 353 | client.flush(function () { 354 | t.equal(client.sent, 3, 'sent=3 after 1st flush') 355 | client.sendError({ foo: 42 }) 356 | client.sendSpan({ foo: 42 }) 357 | client.sendTransaction({ foo: 42 }) 358 | t.equal(client.sent, 3, 'sent=3 after 2nd round of sending') 359 | client.flush(function () { 360 | t.equal(client.sent, 6, 'sent=6 after 2nd flush') 361 | client.end() 362 | server.close() 363 | t.end() 364 | }) 365 | }) 366 | }) 367 | }) 368 | 369 | test('should not open new request until it\'s needed after flush', function (t) { 370 | let client 371 | let requests = 0 372 | let expectRequest = false 373 | const server = APMServer(function (req, res) { 374 | t.equal(expectRequest, true, 'should only send new request when expected') 375 | expectRequest = false 376 | 377 | req.resume() 378 | req.on('end', function () { 379 | res.end() 380 | 381 | if (++requests === 2) { 382 | server.close() 383 | client.destroy() 384 | t.end() 385 | } else { 386 | setTimeout(sendData, 250) 387 | } 388 | }) 389 | }).client({ apmServerVersion: '8.0.0' }, function (_client) { 390 | client = _client 391 | sendData() 392 | }) 393 | 394 | function sendData () { 395 | expectRequest = true 396 | client.sendError({ foo: 42 }) 397 | client.flush() 398 | } 399 | }) 400 | 401 | test('should not open new request until it\'s needed after timeout', function (t) { 402 | let client 403 | let requests = 0 404 | let expectRequest = false 405 | const server = APMServer(function (req, res) { 406 | t.equal(expectRequest, true, 'should only send new request when expected') 407 | expectRequest = false 408 | 409 | req.resume() 410 | req.on('end', function () { 411 | res.end() 412 | 413 | if (++requests === 2) { 414 | server.close() 415 | client.destroy() 416 | t.end() 417 | } else { 418 | setTimeout(sendData, 250) 419 | } 420 | }) 421 | }).client({ time: 1, apmServerVersion: '8.0.0' }, function (_client) { 422 | client = _client 423 | sendData() 424 | }) 425 | 426 | function sendData () { 427 | expectRequest = true 428 | client.sendError({ foo: 42 }) 429 | } 430 | }) 431 | 432 | test('cloud metadata: _encodedMetadata maintains cloud info after re-config', function (t) { 433 | const conf = { 434 | agentName: 'a', 435 | agentVersion: 'b', 436 | serviceName: 'c', 437 | userAgent: 'd' 438 | } 439 | const client = new Client(conf) 440 | 441 | // test initial values 442 | const metadataPreUpdate = JSON.parse(client._encodedMetadata).metadata 443 | t.equals(metadataPreUpdate.service.name, conf.serviceName, 'initial service name set') 444 | t.equals(metadataPreUpdate.service.agent.name, conf.agentName, 'initial agent name set') 445 | t.equals(metadataPreUpdate.service.agent.version, conf.agentVersion, 'initial agent version set') 446 | t.ok(!metadataPreUpdate.cloud, 'no cloud metadata set initially') 447 | 448 | // Simulate cloud metadata having been gathered. 449 | client._cloudMetadata = { foo: 'bar' } 450 | client._resetEncodedMetadata() 451 | 452 | // Ensure cloud metadata is on `_encodedMetadata`. 453 | const metadataPostCloud = JSON.parse(client._encodedMetadata).metadata 454 | t.equals(metadataPostCloud.service.name, conf.serviceName, 'service name still set') 455 | t.equals(metadataPostCloud.service.agent.name, conf.agentName, 'agent name still set') 456 | t.equals(metadataPostCloud.service.agent.version, conf.agentVersion, 'agent version still set') 457 | t.ok(metadataPostCloud.cloud, 'cloud metadata set after fetch') 458 | t.equals(metadataPostCloud.cloud.foo, 'bar', 'cloud metadata set after fetch') 459 | 460 | // Simulate an update of some metadata from re-config. 461 | client.config({ 462 | frameworkName: 'superFastify', 463 | frameworkVersion: '1.0.0' 464 | }) 465 | 466 | // Ensure _encodedMetadata keeps cloud info and updates appropriately. 467 | const metadataPostUpdate = JSON.parse(client._encodedMetadata).metadata 468 | t.equals(metadataPostUpdate.service.name, conf.serviceName, 'service name still set') 469 | t.equals(metadataPostUpdate.service.agent.name, conf.agentName, 'agent name still set') 470 | t.equals(metadataPostUpdate.service.agent.version, conf.agentVersion, 'agent version still set') 471 | t.equals(metadataPostUpdate.service.framework.name, 'superFastify', 'service.framework.name properly set') 472 | t.equals(metadataPostUpdate.service.framework.version, '1.0.0', 'service.framework.version properly set') 473 | t.ok(metadataPostUpdate.cloud, 'cloud metadata still set after re-config') 474 | t.equals(metadataPostUpdate.cloud.foo, 'bar', 'cloud metadata "passed through" after re-config') 475 | t.end() 476 | }) 477 | 478 | test('cloud metadata: _fetchAndEncodeMetadata with fetcher configured ', function (t) { 479 | // test with a fetcher configured 480 | const conf = { 481 | agentName: 'a', 482 | agentVersion: 'b', 483 | serviceName: 'c', 484 | userAgent: 'd' 485 | } 486 | conf.cloudMetadataFetcher = {} 487 | conf.cloudMetadataFetcher.getCloudMetadata = function (cb) { 488 | process.nextTick(cb, null, { foo: 'bar' }) 489 | } 490 | const client = new Client(conf) 491 | client._fetchAndEncodeMetadata(function () { 492 | const metadata = JSON.parse(client._encodedMetadata).metadata 493 | t.equals(metadata.service.name, conf.serviceName, 'service name set') 494 | t.equals(metadata.service.agent.name, conf.agentName, 'agent name set') 495 | t.equals(metadata.service.agent.version, conf.agentVersion, 'agent version set') 496 | t.ok(metadata.cloud, 'cloud metadata set with a fetcher configured') 497 | t.equals(metadata.cloud.foo, 'bar', 'cloud metadata value represented') 498 | t.end() 499 | }) 500 | }) 501 | 502 | test('cloud metadata: _fetchAndEncodeMetadata with fetcher configured but an error', function (t) { 503 | // fetcher configured but its callback returns an error 504 | const conf = { 505 | agentName: 'a', 506 | agentVersion: 'b', 507 | serviceName: 'c', 508 | userAgent: 'd' 509 | } 510 | conf.cloudMetadataFetcher = {} 511 | conf.cloudMetadataFetcher.getCloudMetadata = function (cb) { 512 | const error = new Error('whoops') 513 | process.nextTick(cb, error, { foo: 'bar' }) 514 | } 515 | const client = new Client(conf) 516 | client._fetchAndEncodeMetadata(function () { 517 | const metadata = JSON.parse(client._encodedMetadata).metadata 518 | t.equals(metadata.service.name, conf.serviceName, 'service name set') 519 | t.equals(metadata.service.agent.name, conf.agentName, 'agent name set') 520 | t.equals(metadata.service.agent.version, conf.agentVersion, 'agent version set') 521 | t.ok(!metadata.cloud, 'cloud metadata not set when there is a fetcher error') 522 | t.end() 523 | }) 524 | }) 525 | -------------------------------------------------------------------------------- /test/config.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const fs = require('fs') 10 | const getContainerInfo = require('../lib/container-info') 11 | const http = require('http') 12 | const ndjson = require('ndjson') 13 | const os = require('os') 14 | const path = require('path') 15 | const semver = require('semver') 16 | const test = require('tape') 17 | const URL = require('url').URL 18 | 19 | const utils = require('./lib/utils') 20 | const pkg = require('../package') 21 | const Client = require('../') 22 | const { detectHostname } = require('../lib/detect-hostname') 23 | 24 | const APMServer = utils.APMServer 25 | const processIntakeReq = utils.processIntakeReq 26 | const validOpts = utils.validOpts 27 | 28 | const detectedHostname = detectHostname() 29 | 30 | test('package', function (t) { 31 | // these values are in the User-Agent header tests, so we need to make sure 32 | // they are as we expect 33 | t.equal(pkg.name, 'elastic-apm-http-client') 34 | t.ok(semver.valid(pkg.version)) 35 | t.end() 36 | }) 37 | 38 | test('throw if missing required options', function (t) { 39 | t.throws(() => new Client(), 'throws if no options are provided') 40 | t.throws(() => new Client({ agentName: 'foo' }), 'throws if only agentName is provided') 41 | t.throws(() => new Client({ agentVersion: 'foo' }), 'throws if only agentVersion is provided') 42 | t.throws(() => new Client({ serviceName: 'foo' }), 'throws if only serviceName is provided') 43 | t.throws(() => new Client({ userAgent: 'foo' }), 'throws if only userAgent is provided') 44 | t.throws(() => new Client({ agentName: 'foo', agentVersion: 'foo', serviceName: 'foo' }), 'throws if userAgent is missing') 45 | t.throws(() => new Client({ agentName: 'foo', agentVersion: 'foo', userAgent: 'foo' }), 'throws if serviceName is missing') 46 | t.throws(() => new Client({ agentName: 'foo', serviceName: 'foo', userAgent: 'foo' }), 'throws if agentVersion is missing') 47 | t.throws(() => new Client({ agentVersion: 'foo', serviceName: 'foo', userAgent: 'foo' }), 'throws if agentName is missing') 48 | t.doesNotThrow(() => new Client({ agentName: 'foo', agentVersion: 'foo', serviceName: 'foo', userAgent: 'foo' }), 'doesn\'t throw if required options are provided') 49 | t.end() 50 | }) 51 | 52 | test('should work without new', function (t) { 53 | const client = Client(validOpts()) 54 | t.ok(client instanceof Client) 55 | t.end() 56 | }) 57 | 58 | test('null value config options shouldn\'t throw', function (t) { 59 | t.doesNotThrow(function () { 60 | new Client(validOpts({ // eslint-disable-line no-new 61 | size: null, 62 | time: null, 63 | serverTimeout: null, 64 | type: null, 65 | serverUrl: null, 66 | keepAlive: null, 67 | labels: null 68 | })) 69 | }) 70 | t.end() 71 | }) 72 | 73 | test('no secretToken or apiKey', function (t) { 74 | t.plan(1) 75 | let client 76 | const server = APMServer(function (req, res) { 77 | t.notOk('authorization' in req.headers, 'no Authorization header') 78 | res.end() 79 | server.close() 80 | client.destroy() 81 | t.end() 82 | }) 83 | server.listen(function () { 84 | client = new Client(validOpts({ 85 | serverUrl: 'http://localhost:' + server.address().port, 86 | apmServerVersion: '8.0.0' 87 | })) 88 | client.sendSpan({ foo: 42 }) 89 | client.end() 90 | }) 91 | }) 92 | 93 | test('has apiKey', function (t) { 94 | t.plan(1) 95 | let client 96 | const server = APMServer(function (req, res) { 97 | t.equal(req.headers.authorization, 'ApiKey FooBar123', 'should use apiKey in authorization header') 98 | res.end() 99 | server.close() 100 | client.destroy() 101 | t.end() 102 | }) 103 | server.listen(function () { 104 | client = new Client(validOpts({ 105 | serverUrl: 'http://localhost:' + server.address().port, 106 | apiKey: 'FooBar123', 107 | apmServerVersion: '8.0.0' 108 | })) 109 | client.sendSpan({ foo: 42 }) 110 | client.end() 111 | }) 112 | }) 113 | 114 | test('custom headers', function (t) { 115 | t.plan(1) 116 | 117 | let client 118 | const server = APMServer(function (req, res) { 119 | t.equal(req.headers['x-foo'], 'bar') 120 | res.end() 121 | server.close() 122 | client.destroy() 123 | t.end() 124 | }).listen(function () { 125 | client = new Client(validOpts({ 126 | serverUrl: 'http://localhost:' + server.address().port, 127 | headers: { 128 | 'X-Foo': 'bar' 129 | }, 130 | apmServerVersion: '8.0.0' 131 | })) 132 | client.sendSpan({ foo: 42 }) 133 | client.end() 134 | }) 135 | }) 136 | 137 | test('serverUrl is invalid', function (t) { 138 | t.throws(function () { 139 | new Client(validOpts({ // eslint-disable-line no-new 140 | serverUrl: 'invalid', 141 | apmServerVersion: '8.0.0' 142 | })) 143 | }) 144 | t.end() 145 | }) 146 | 147 | test('serverUrl contains path', function (t) { 148 | t.plan(1) 149 | let client 150 | const server = APMServer(function (req, res) { 151 | t.equal(req.url, '/subpath/intake/v2/events') 152 | res.end() 153 | server.close() 154 | client.destroy() 155 | t.end() 156 | }).listen(function () { 157 | client = new Client(validOpts({ 158 | serverUrl: 'http://localhost:' + server.address().port + '/subpath', 159 | apmServerVersion: '8.0.0' 160 | })) 161 | client.sendSpan({ foo: 42 }) 162 | client.end() 163 | }) 164 | }) 165 | 166 | test('reject unauthorized TLS by default', function (t) { 167 | t.plan(3) 168 | const server = APMServer({ secure: true }, function (req, res) { 169 | t.fail('should should not get request') 170 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 171 | client.on('request-error', function (err) { 172 | t.ok(err instanceof Error) 173 | let expectedErrorMessage = 'self signed certificate' 174 | if (semver.gte(process.version, 'v17.0.0')) { 175 | expectedErrorMessage = 'self-signed certificate' 176 | } 177 | t.equal(err.message, expectedErrorMessage) 178 | t.equal(err.code, 'DEPTH_ZERO_SELF_SIGNED_CERT') 179 | server.close() 180 | t.end() 181 | }) 182 | client.sendSpan({ foo: 42 }) 183 | client.end() 184 | }) 185 | }) 186 | 187 | test('allow unauthorized TLS if asked', function (t) { 188 | t.plan(1) 189 | let client 190 | const server = APMServer({ secure: true }, function (req, res) { 191 | t.pass('should let request through') 192 | res.end() 193 | client.destroy() 194 | server.close() 195 | t.end() 196 | }).client({ rejectUnauthorized: false, apmServerVersion: '8.0.0' }, function (client_) { 197 | client = client_ 198 | client.sendSpan({ foo: 42 }) 199 | client.end() 200 | }) 201 | }) 202 | 203 | test('allow self-signed TLS certificate by specifying the CA', function (t) { 204 | t.plan(1) 205 | let client 206 | const server = APMServer({ secure: true }, function (req, res) { 207 | t.pass('should let request through') 208 | res.end() 209 | client.destroy() 210 | server.close() 211 | t.end() 212 | }) 213 | server.client({ serverCaCert: server.cert, apmServerVersion: '8.0.0' }, function (client_) { 214 | client = client_ 215 | client.sendSpan({ foo: 42 }) 216 | client.end() 217 | }) 218 | }) 219 | 220 | test('metadata', function (t) { 221 | t.plan(11) 222 | let client 223 | const opts = { 224 | agentName: 'custom-agentName', 225 | agentVersion: 'custom-agentVersion', 226 | agentActivationMethod: 'custom-agentActivationMethod', 227 | serviceName: 'custom-serviceName', 228 | serviceNodeName: 'custom-serviceNodeName', 229 | serviceVersion: 'custom-serviceVersion', 230 | frameworkName: 'custom-frameworkName', 231 | frameworkVersion: 'custom-frameworkVersion', 232 | configuredHostname: 'custom-hostname', 233 | environment: 'production', 234 | globalLabels: { 235 | foo: 'bar', 236 | doesNotNest: { 237 | nope: 'this should be [object Object]' 238 | } 239 | }, 240 | apmServerVersion: '8.7.1' // avoid the APM server version fetch request 241 | } 242 | const server = APMServer(function (req, res) { 243 | req = processIntakeReq(req) 244 | req.once('data', function (obj) { 245 | const expects = { 246 | metadata: { 247 | service: { 248 | name: 'custom-serviceName', 249 | environment: 'production', 250 | runtime: { 251 | name: 'node', 252 | version: process.versions.node 253 | }, 254 | language: { 255 | name: 'javascript' 256 | }, 257 | agent: { 258 | name: 'custom-agentName', 259 | version: 'custom-agentVersion', 260 | activation_method: 'custom-agentActivationMethod' 261 | }, 262 | framework: { 263 | name: 'custom-frameworkName', 264 | version: 'custom-frameworkVersion' 265 | }, 266 | version: 'custom-serviceVersion', 267 | node: { 268 | configured_name: 'custom-serviceNodeName' 269 | } 270 | }, 271 | process: { 272 | pid: process.pid, 273 | title: process.title, 274 | argv: process.argv 275 | }, 276 | system: { 277 | architecture: process.arch, 278 | platform: process.platform, 279 | detected_hostname: detectedHostname, 280 | configured_hostname: 'custom-hostname' 281 | }, 282 | labels: { 283 | foo: 'bar', 284 | doesNotNest: '[object Object]' 285 | } 286 | } 287 | } 288 | 289 | if (semver.gte(process.version, '8.10.0')) { 290 | expects.metadata.process.ppid = process.ppid 291 | } 292 | 293 | t.deepEqual(obj, expects) 294 | 295 | t.ok(semver.valid(obj.metadata.service.runtime.version)) 296 | t.ok(obj.metadata.process.pid > 0, `pid should be > 0, was ${obj.metadata.process.pid}`) 297 | if (semver.gte(process.version, '8.10.0')) { 298 | t.ok(obj.metadata.process.ppid > 0, `ppid should be > 0, was ${obj.metadata.process.ppid}`) 299 | } else { 300 | t.equal(obj.metadata.process.ppid, undefined) 301 | } 302 | t.ok(Array.isArray(obj.metadata.process.argv)) 303 | t.ok(obj.metadata.process.argv.every(arg => typeof arg === 'string')) 304 | t.ok(obj.metadata.process.argv.every(arg => arg.length > 0)) 305 | t.equal(typeof obj.metadata.system.architecture, 'string') 306 | t.ok(obj.metadata.system.architecture.length > 0) 307 | t.equal(typeof obj.metadata.system.platform, 'string') 308 | t.ok(obj.metadata.system.platform.length > 0) 309 | }) 310 | req.on('end', function () { 311 | res.end() 312 | client.destroy() 313 | server.close() 314 | t.end() 315 | }) 316 | }).client(opts, function (client_) { 317 | client = client_ 318 | client.sendSpan({ foo: 42 }) 319 | client.end() 320 | }) 321 | }) 322 | 323 | test('metadata - default values', function (t) { 324 | t.plan(1) 325 | let client 326 | const opts = { 327 | agentName: 'custom-agentName', 328 | agentVersion: 'custom-agentVersion', 329 | serviceName: 'custom-serviceName', 330 | apmServerVersion: '8.0.0' // avoid the APM server version fetch request 331 | } 332 | const server = APMServer(function (req, res) { 333 | req = processIntakeReq(req) 334 | req.once('data', function (obj) { 335 | const expects = { 336 | metadata: { 337 | service: { 338 | name: 'custom-serviceName', 339 | environment: 'development', 340 | runtime: { 341 | name: 'node', 342 | version: process.versions.node 343 | }, 344 | language: { 345 | name: 'javascript' 346 | }, 347 | agent: { 348 | name: 'custom-agentName', 349 | version: 'custom-agentVersion' 350 | } 351 | }, 352 | process: { 353 | pid: process.pid, 354 | title: process.title, 355 | argv: process.argv 356 | }, 357 | system: { 358 | architecture: process.arch, 359 | platform: process.platform, 360 | detected_hostname: detectedHostname 361 | } 362 | } 363 | } 364 | 365 | if (semver.gte(process.version, '8.10.0')) { 366 | expects.metadata.process.ppid = process.ppid 367 | } 368 | 369 | t.deepEqual(obj, expects) 370 | }) 371 | 372 | req.on('end', function () { 373 | res.end() 374 | client.destroy() 375 | server.close() 376 | t.end() 377 | }) 378 | }).client(opts, function (client_) { 379 | client = client_ 380 | client.sendSpan({ foo: 42 }) 381 | client.end() 382 | }) 383 | }) 384 | 385 | test('metadata - container info', function (t) { 386 | // Clear Client and APMServer from require cache 387 | delete require.cache[require.resolve('../')] 388 | delete require.cache[require.resolve('./lib/utils')] 389 | const sync = getContainerInfo.sync 390 | getContainerInfo.sync = function sync () { 391 | return { 392 | containerId: 'container-id', 393 | podId: 'pod-id' 394 | } 395 | } 396 | t.on('end', () => { 397 | getContainerInfo.sync = sync 398 | }) 399 | 400 | const APMServer = require('./lib/utils').APMServer 401 | 402 | let client 403 | const server = APMServer(function (req, res) { 404 | req = processIntakeReq(req) 405 | req.once('data', function (obj) { 406 | t.ok(obj.metadata) 407 | t.ok(obj.metadata.system) 408 | t.deepEqual(obj.metadata.system.container, { 409 | id: 'container-id' 410 | }) 411 | t.deepEqual(obj.metadata.system.kubernetes, { 412 | pod: { 413 | name: detectedHostname.split('.')[0], 414 | uid: 'pod-id' 415 | } 416 | }) 417 | }) 418 | req.on('end', function () { 419 | res.end() 420 | client.destroy() 421 | server.close() 422 | t.end() 423 | }) 424 | }).client({ apmServerVersion: '8.0.0' }, function (client_) { 425 | client = client_ 426 | client.sendSpan({ foo: 42 }) 427 | client.end() 428 | }) 429 | }) 430 | 431 | test('agentName', function (t) { 432 | t.plan(1) 433 | let client 434 | const server = APMServer(function (req, res) { 435 | req = processIntakeReq(req) 436 | req.once('data', function (obj) { 437 | t.equal(obj.metadata.service.name, 'custom') 438 | }) 439 | req.on('end', function () { 440 | res.end() 441 | client.destroy() 442 | server.close() 443 | t.end() 444 | }) 445 | }).client({ serviceName: 'custom', apmServerVersion: '8.0.0' }, function (client_) { 446 | client = client_ 447 | client.sendSpan({ foo: 42 }) 448 | client.end() 449 | }) 450 | }) 451 | 452 | test('payloadLogFile', function (t) { 453 | t.plan(6) 454 | 455 | const receivedObjects = [] 456 | const filename = path.join(os.tmpdir(), Date.now() + '.ndjson') 457 | let requests = 0 458 | 459 | let client 460 | const server = APMServer(function (req, res) { 461 | const request = ++requests 462 | 463 | req = processIntakeReq(req) 464 | 465 | req.on('data', function (obj) { 466 | receivedObjects.push(obj) 467 | }) 468 | 469 | req.on('end', function () { 470 | res.end() 471 | 472 | if (request === 2) { 473 | client.destroy() 474 | server.close() 475 | t.equal(receivedObjects.length, 5, 'should have received 5 objects') 476 | 477 | const file = fs.createReadStream(filename).pipe(ndjson.parse()) 478 | 479 | file.on('data', function (obj) { 480 | const expected = receivedObjects.shift() 481 | const n = 5 - receivedObjects.length 482 | t.deepEqual(obj, expected, `expected line ${n} in the log file to match item no ${n} received by the server`) 483 | }) 484 | 485 | file.on('end', function () { 486 | t.end() 487 | }) 488 | } 489 | }) 490 | }).client({ payloadLogFile: filename, apmServerVersion: '8.0.0' }, function (client_) { 491 | client = client_ 492 | client.sendTransaction({ req: 1 }) 493 | client.sendSpan({ req: 2 }) 494 | client.flush() // force the client to make a 2nd request so that we test reusing the file across requests 495 | client.sendError({ req: 3 }) 496 | client.end() 497 | }) 498 | }) 499 | 500 | test('update conf', function (t) { 501 | t.plan(1) 502 | let client 503 | const server = APMServer(function (req, res) { 504 | req = processIntakeReq(req) 505 | req.once('data', function (obj) { 506 | t.equal(obj.metadata.service.name, 'bar') 507 | }) 508 | req.on('end', function () { 509 | res.end() 510 | client.destroy() 511 | server.close() 512 | t.end() 513 | }) 514 | }).client({ serviceName: 'foo', apmServerVersion: '8.0.0' }, function (client_) { 515 | client = client_ 516 | client.config({ serviceName: 'bar' }) 517 | client.sendSpan({ foo: 42 }) 518 | client.end() 519 | }) 520 | }) 521 | 522 | // There was a case (https://github.com/elastic/apm-agent-nodejs/issues/1749) 523 | // where a non-200 response from apm-server would crash the agent. 524 | test('503 response from apm-server for central config should not crash', function (t) { 525 | let client 526 | 527 | // If this test goes wrong, it can hang. Clean up after a 30s timeout. 528 | const abortTimeout = setTimeout(function () { 529 | t.fail('test hung, aborting after a timeout') 530 | cleanUpAndEnd() 531 | }, 30000) 532 | 533 | function cleanUpAndEnd () { 534 | if (abortTimeout) { 535 | clearTimeout(abortTimeout) 536 | } 537 | client.destroy() 538 | mockApmServer.close(function () { 539 | t.end() 540 | }) 541 | } 542 | 543 | // 1. Start a mock apm-server that returns 503 for central config queries. 544 | const mockApmServer = http.createServer(function (req, res) { 545 | const parsedUrl = new URL(req.url, 'http://localhost:0') 546 | let resBody = '{}' 547 | if (parsedUrl.pathname === '/config/v1/agents') { 548 | resBody = '{"ok":false,"message":"The requested resource is currently unavailable."}\n' 549 | res.writeHead(503) 550 | } 551 | res.end(resBody) 552 | }) 553 | 554 | mockApmServer.listen(function () { 555 | client = new Client(validOpts({ 556 | serverUrl: 'http://localhost:' + mockApmServer.address().port, 557 | // Turn centralConfig *off*. We'll manually trigger a poll for central 558 | // config via internal methods, so that we don't need to muck with 559 | // internal `setTimeout` intervals. 560 | centralConfig: false, 561 | apmServerVersion: '8.0.0' 562 | })) 563 | 564 | // 2. Ensure the client conditions for the crash. 565 | // One of the crash conditions at the time was a second `client.config` 566 | // to ensure the request options were using the keep-alive agent. 567 | client.config() 568 | t.ok(client._conf.requestConfig.agent, 569 | 'agent for central config requests is defined') 570 | 571 | client.on('config', function (config) { 572 | t.fail('do not expect to get a successful central config response') 573 | }) 574 | client.on('request-error', function (err) { 575 | t.ok(err, 'got request-error on _pollConfig') 576 | t.ok(err.message.indexOf('Unexpected APM Server response when polling config') !== -1, 577 | 'request-error from _pollConfig includes expected error message') 578 | cleanUpAndEnd() 579 | }) 580 | 581 | // 3. Make a poll for central config. 582 | client._pollConfig() 583 | }) 584 | }) 585 | -------------------------------------------------------------------------------- /test/edge-cases.test.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Elasticsearch B.V. and other contributors where applicable. 3 | * Licensed under the BSD 2-Clause License; you may not use this file except in 4 | * compliance with the BSD 2-Clause License. 5 | */ 6 | 7 | 'use strict' 8 | 9 | const { exec } = require('child_process') 10 | const http = require('http') 11 | const path = require('path') 12 | const test = require('tape') 13 | const utils = require('./lib/utils') 14 | 15 | const Client = require('../') 16 | 17 | const APMServer = utils.APMServer 18 | const processIntakeReq = utils.processIntakeReq 19 | const assertIntakeReq = utils.assertIntakeReq 20 | const assertMetadata = utils.assertMetadata 21 | const assertEvent = utils.assertEvent 22 | const validOpts = utils.validOpts 23 | 24 | test('Event: close - if chopper ends', function (t) { 25 | t.plan(1) 26 | let client 27 | const server = APMServer(function (req, res) { 28 | client._chopper.end() 29 | setTimeout(function () { 30 | // wait a little to allow close to be emitted 31 | t.end() 32 | server.close() 33 | }, 10) 34 | }).listen(function () { 35 | client = new Client(validOpts({ 36 | serverUrl: 'http://localhost:' + server.address().port, 37 | apmServerVersion: '8.0.0' 38 | })) 39 | 40 | client.on('finish', function () { 41 | t.fail('should not emit finish event') 42 | }) 43 | client.on('close', function () { 44 | t.pass('should emit close event') 45 | }) 46 | 47 | client.sendSpan({ req: 1 }) 48 | }) 49 | }) 50 | 51 | test('Event: close - if chopper is destroyed', function (t) { 52 | t.plan(1) 53 | let client 54 | const server = APMServer(function (req, res) { 55 | client._chopper.destroy() 56 | setTimeout(function () { 57 | // wait a little to allow close to be emitted 58 | t.end() 59 | server.close() 60 | }, 10) 61 | }).listen(function () { 62 | client = new Client(validOpts({ 63 | serverUrl: 'http://localhost:' + server.address().port, 64 | apmServerVersion: '8.0.0' 65 | })) 66 | 67 | client.on('finish', function () { 68 | t.fail('should not emit finish event') 69 | }) 70 | client.on('close', function () { 71 | t.pass('should emit close event') 72 | }) 73 | 74 | client.sendSpan({ req: 1 }) 75 | }) 76 | }) 77 | 78 | test('write after end', function (t) { 79 | t.plan(2) 80 | const server = APMServer(function (req, res) { 81 | t.fail('should never get any request') 82 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 83 | client.on('error', function (err) { 84 | t.ok(err instanceof Error) 85 | t.equal(err.message, 'write after end') 86 | server.close() 87 | t.end() 88 | }) 89 | client.end() 90 | client.sendSpan({ foo: 42 }) 91 | }) 92 | }) 93 | 94 | test('request with error - no body', function (t) { 95 | const server = APMServer(function (req, res) { 96 | res.statusCode = 418 97 | res.end() 98 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 99 | client.on('request-error', function (err) { 100 | t.ok(err instanceof Error) 101 | t.equal(err.message, 'Unexpected APM Server response') 102 | t.equal(err.code, 418) 103 | t.equal(err.accepted, undefined) 104 | t.equal(err.errors, undefined) 105 | t.equal(err.response, undefined) 106 | client.destroy() 107 | server.close() 108 | t.end() 109 | }) 110 | client.sendSpan({ foo: 42 }) 111 | client.flush() 112 | }) 113 | }) 114 | 115 | test('request with error - non json body', function (t) { 116 | const server = APMServer(function (req, res) { 117 | res.statusCode = 418 118 | res.end('boom!') 119 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 120 | client.on('request-error', function (err) { 121 | t.ok(err instanceof Error) 122 | t.equal(err.message, 'Unexpected APM Server response') 123 | t.equal(err.code, 418) 124 | t.equal(err.accepted, undefined) 125 | t.equal(err.errors, undefined) 126 | t.equal(err.response, 'boom!') 127 | client.destroy() 128 | server.close() 129 | t.end() 130 | }) 131 | client.sendSpan({ foo: 42 }) 132 | client.flush() 133 | }) 134 | }) 135 | 136 | test('request with error - invalid json body', function (t) { 137 | const server = APMServer(function (req, res) { 138 | res.statusCode = 418 139 | res.setHeader('Content-Type', 'application/json') 140 | res.end('boom!') 141 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 142 | client.on('request-error', function (err) { 143 | t.ok(err instanceof Error) 144 | t.equal(err.message, 'Unexpected APM Server response') 145 | t.equal(err.code, 418) 146 | t.equal(err.accepted, undefined) 147 | t.equal(err.errors, undefined) 148 | t.equal(err.response, 'boom!') 149 | client.destroy() 150 | server.close() 151 | t.end() 152 | }) 153 | client.sendSpan({ foo: 42 }) 154 | client.flush() 155 | }) 156 | }) 157 | 158 | test('request with error - json body without accepted or errors properties', function (t) { 159 | const body = JSON.stringify({ foo: 'bar' }) 160 | const server = APMServer(function (req, res) { 161 | res.statusCode = 418 162 | res.setHeader('Content-Type', 'application/json') 163 | res.end(body) 164 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 165 | client.on('request-error', function (err) { 166 | t.ok(err instanceof Error) 167 | t.equal(err.message, 'Unexpected APM Server response') 168 | t.equal(err.code, 418) 169 | t.equal(err.accepted, undefined) 170 | t.equal(err.errors, undefined) 171 | t.equal(err.response, body) 172 | client.destroy() 173 | server.close() 174 | t.end() 175 | }) 176 | client.sendSpan({ foo: 42 }) 177 | client.flush() 178 | }) 179 | }) 180 | 181 | test('request with error - json body with accepted and errors properties', function (t) { 182 | const server = APMServer(function (req, res) { 183 | res.statusCode = 418 184 | res.setHeader('Content-Type', 'application/json') 185 | res.end(JSON.stringify({ accepted: 42, errors: [{ message: 'bar' }] })) 186 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 187 | client.on('request-error', function (err) { 188 | t.ok(err instanceof Error) 189 | t.equal(err.message, 'Unexpected APM Server response') 190 | t.equal(err.code, 418) 191 | t.equal(err.accepted, 42) 192 | t.deepEqual(err.errors, [{ message: 'bar' }]) 193 | t.equal(err.response, undefined) 194 | client.destroy() 195 | server.close() 196 | t.end() 197 | }) 198 | client.sendSpan({ foo: 42 }) 199 | client.flush() 200 | }) 201 | }) 202 | 203 | test('request with error - json body where Content-Type contains charset', function (t) { 204 | const server = APMServer(function (req, res) { 205 | res.statusCode = 418 206 | res.setHeader('Content-Type', 'application/json; charset=utf-8') 207 | res.end(JSON.stringify({ accepted: 42, errors: [{ message: 'bar' }] })) 208 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 209 | client.on('request-error', function (err) { 210 | t.ok(err instanceof Error) 211 | t.equal(err.message, 'Unexpected APM Server response') 212 | t.equal(err.code, 418) 213 | t.equal(err.accepted, 42) 214 | t.deepEqual(err.errors, [{ message: 'bar' }]) 215 | t.equal(err.response, undefined) 216 | client.destroy() 217 | server.close() 218 | t.end() 219 | }) 220 | client.sendSpan({ foo: 42 }) 221 | client.flush() 222 | }) 223 | }) 224 | 225 | test('socket hang up', function (t) { 226 | const server = APMServer(function (req, res) { 227 | req.socket.destroy() 228 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 229 | let closed = false 230 | client.on('request-error', function (err) { 231 | t.equal(err.message, 'socket hang up') 232 | t.equal(err.code, 'ECONNRESET') 233 | // wait a little in case 'close' is emitted async 234 | setTimeout(function () { 235 | t.equal(closed, false, 'client should not emit close') 236 | t.end() 237 | server.close() 238 | client.destroy() 239 | }, 50) 240 | }) 241 | client.on('close', function () { 242 | closed = true 243 | }) 244 | client.on('finish', function () { 245 | t.fail('should not emit finish') 246 | }) 247 | client.sendSpan({ foo: 42 }) 248 | }) 249 | }) 250 | 251 | test('socket hang up - continue with new request', function (t) { 252 | t.plan(4 + assertIntakeReq.asserts * 2 + assertMetadata.asserts + assertEvent.asserts) 253 | let reqs = 0 254 | let client 255 | const datas = [ 256 | assertMetadata, 257 | assertEvent({ span: { req: 2 } }) 258 | ] 259 | const server = APMServer(function (req, res) { 260 | assertIntakeReq(t, req) 261 | 262 | if (++reqs === 1) return req.socket.destroy() 263 | 264 | // We have to attach the listener directly to the HTTP request stream as it 265 | // will receive the gzip header once the write have been made on the 266 | // client. If we were to attach it to the gunzip+ndjson, it would not fire 267 | req.on('data', function () { 268 | client.flush() 269 | }) 270 | 271 | req = processIntakeReq(req) 272 | req.on('data', function (obj) { 273 | datas.shift()(t, obj) 274 | }) 275 | req.on('end', function () { 276 | t.pass('should end request') 277 | res.end() 278 | client.end() // cleanup 1: end the client stream so it can 'finish' 279 | }) 280 | }).client({ apmServerVersion: '8.0.0' }, function (_client) { 281 | client = _client 282 | client.on('request-error', function (err) { 283 | t.equal(err.message, 'socket hang up', 'got "socket hang up" request-error') 284 | t.equal(err.code, 'ECONNRESET', 'request-error code is "ECONNRESET"') 285 | client.sendSpan({ req: 2 }) 286 | }) 287 | client.on('finish', function () { 288 | t.equal(reqs, 2, 'should emit finish after last request') 289 | client.end() 290 | server.close() 291 | t.end() 292 | }) 293 | client.sendSpan({ req: 1 }) 294 | }) 295 | }) 296 | 297 | test('intakeResTimeoutOnEnd', function (t) { 298 | const server = APMServer(function (req, res) { 299 | req.resume() 300 | }).client({ 301 | intakeResTimeoutOnEnd: 500, 302 | apmServerVersion: '8.0.0' 303 | }, function (client) { 304 | const start = Date.now() 305 | client.on('request-error', function (err) { 306 | t.ok(err, 'got a request-error from the client') 307 | const end = Date.now() 308 | const delta = end - start 309 | t.ok(delta > 400 && delta < 600, `timeout should be about 500ms, got ${delta}ms`) 310 | t.equal(err.message, 'intake response timeout: APM server did not respond within 0.5s of gzip stream finish') 311 | server.close() 312 | t.end() 313 | }) 314 | client.sendSpan({ foo: 42 }) 315 | client.end() 316 | }) 317 | }) 318 | 319 | test('intakeResTimeout', function (t) { 320 | const server = APMServer(function (req, res) { 321 | req.resume() 322 | }).client({ 323 | intakeResTimeout: 400, 324 | apmServerVersion: '8.0.0' 325 | }, function (client) { 326 | const start = Date.now() 327 | client.on('request-error', function (err) { 328 | t.ok(err, 'got a request-error from the client') 329 | const end = Date.now() 330 | const delta = end - start 331 | t.ok(delta > 300 && delta < 500, `timeout should be about 400ms, got ${delta}ms`) 332 | t.equal(err.message, 'intake response timeout: APM server did not respond within 0.4s of gzip stream finish') 333 | server.close() 334 | t.end() 335 | }) 336 | client.sendSpan({ foo: 42 }) 337 | // Do *not* `client.end()` else we are testing intakeResTimeoutOnEnd. 338 | client.flush() 339 | }) 340 | }) 341 | 342 | test('socket timeout - server response too slow', function (t) { 343 | const server = APMServer(function (req, res) { 344 | req.resume() 345 | }).client({ 346 | serverTimeout: 1000, 347 | // Set the intake res timeout higher to be able to test serverTimeout. 348 | intakeResTimeoutOnEnd: 5000, 349 | apmServerVersion: '8.0.0' 350 | }, function (client) { 351 | const start = Date.now() 352 | client.on('request-error', function (err) { 353 | t.ok(err, 'got a request-error from the client') 354 | const end = Date.now() 355 | const delta = end - start 356 | t.ok(delta > 1000 && delta < 2000, `timeout should occur between 1-2 seconds: delta=${delta}ms`) 357 | t.equal(err.message, 'APM Server response timeout (1000ms)') 358 | server.close() 359 | t.end() 360 | }) 361 | client.sendSpan({ foo: 42 }) 362 | client.end() 363 | }) 364 | }) 365 | 366 | test('socket timeout - client request too slow', function (t) { 367 | const server = APMServer(function (req, res) { 368 | req.resume() 369 | req.on('end', function () { 370 | res.end() 371 | }) 372 | }).client({ serverTimeout: 1000, apmServerVersion: '8.0.0' }, function (client) { 373 | const start = Date.now() 374 | client.on('request-error', function (err) { 375 | const end = Date.now() 376 | const delta = end - start 377 | t.ok(delta > 1000 && delta < 2000, 'timeout should occur between 1-2 seconds') 378 | t.equal(err.message, 'APM Server response timeout (1000ms)') 379 | server.close() 380 | t.end() 381 | }) 382 | client.sendSpan({ foo: 42 }) 383 | }) 384 | }) 385 | 386 | test('client.destroy() - on fresh client', function (t) { 387 | t.plan(1) 388 | const client = new Client(validOpts()) 389 | client.on('finish', function () { 390 | t.fail('should not emit finish') 391 | }) 392 | client.on('close', function () { 393 | t.pass('should emit close') 394 | }) 395 | client.destroy() 396 | process.nextTick(function () { 397 | // wait a little to allow close to be emitted 398 | t.end() 399 | }) 400 | }) 401 | 402 | test('client.destroy() - on ended client', function (t) { 403 | t.plan(2) 404 | let client 405 | 406 | // create a server that doesn't unref incoming sockets to see if 407 | // `client.destroy()` will make the server close without hanging 408 | const server = http.createServer(function (req, res) { 409 | req.resume() 410 | req.on('end', function () { 411 | res.end() 412 | client.destroy() 413 | server.close() 414 | process.nextTick(function () { 415 | // wait a little to allow close to be emitted 416 | t.end() 417 | }) 418 | }) 419 | }) 420 | 421 | server.listen(function () { 422 | client = new Client(validOpts({ 423 | serverUrl: 'http://localhost:' + server.address().port, 424 | apmServerVersion: '8.0.0' 425 | })) 426 | client.on('finish', function () { 427 | t.pass('should emit finish only once') 428 | }) 429 | client.on('close', function () { 430 | t.pass('should emit close event') 431 | }) 432 | client.sendSpan({ foo: 42 }) 433 | client.end() 434 | }) 435 | }) 436 | 437 | test('client.destroy() - on client with request in progress', function (t) { 438 | t.plan(1) 439 | let client 440 | 441 | // create a server that doesn't unref incoming sockets to see if 442 | // `client.destroy()` will make the server close without hanging 443 | const server = http.createServer(function (req, res) { 444 | server.close() 445 | client.destroy() 446 | process.nextTick(function () { 447 | // wait a little to allow close to be emitted 448 | t.end() 449 | }) 450 | }) 451 | 452 | server.listen(function () { 453 | client = new Client(validOpts({ 454 | serverUrl: 'http://localhost:' + server.address().port 455 | // TODO: the _fetchApmServerVersion() here *is* hanging. 456 | })) 457 | client.on('finish', function () { 458 | t.fail('should not emit finish') 459 | }) 460 | client.on('close', function () { 461 | t.pass('should emit close event') 462 | }) 463 | client.sendSpan({ foo: 42 }) 464 | }) 465 | }) 466 | 467 | // If the client is destroyed while waiting for cloud metadata to be fetched, 468 | // there should not be an error: 469 | // Error: Cannot call write after a stream was destroyed 470 | // when cloud metadata *has* returned. 471 | test('getCloudMetadata after client.destroy() should not result in error', function (t) { 472 | const server = http.createServer(function (req, res) { 473 | res.end('bye') 474 | }) 475 | 476 | server.listen(function () { 477 | // 1. Create a client with a slow cloudMetadataFetcher. 478 | const client = new Client(validOpts({ 479 | serverUrl: 'http://localhost:' + server.address().port, 480 | cloudMetadataFetcher: { 481 | getCloudMetadata: function (cb) { 482 | setTimeout(function () { 483 | t.comment('calling back with cloud metadata') 484 | cb(null, { fake: 'cloud metadata' }) 485 | }, 1000) 486 | } 487 | } 488 | })) 489 | client.on('close', function () { 490 | t.pass('should emit close event') 491 | }) 492 | client.on('finish', function () { 493 | t.fail('should not emit finish') 494 | }) 495 | client.on('error', function (err) { 496 | t.ifError(err, 'should not get a client "error" event') 497 | }) 498 | client.on('cloud-metadata', function () { 499 | t.end() 500 | }) 501 | 502 | // 2. Start sending something to the (mock) APM server. 503 | client.sendSpan({ foo: 42 }) 504 | 505 | // 3. Then destroy the client soon after, but before the `getCloudMetadata` 506 | // above finishes. 507 | setImmediate(function () { 508 | t.comment('destroy client') 509 | client.destroy() 510 | server.close() 511 | }) 512 | }) 513 | }) 514 | 515 | // FWIW, the current apm-agent-nodejs will happily call 516 | // `client.sendTransaction()` after it has called `client.destroy()`. 517 | test('client.send*() after client.destroy() should not result in error', function (t) { 518 | const mockApmServer = http.createServer(function (req, res) { 519 | res.end('bye') 520 | }) 521 | 522 | mockApmServer.listen(function () { 523 | const UNCORK_TIMER_MS = 100 524 | const client = new Client(validOpts({ 525 | serverUrl: 'http://localhost:' + mockApmServer.address().port, 526 | bufferWindowTime: UNCORK_TIMER_MS 527 | })) 528 | 529 | // 2. We should *not* receive: 530 | // Error: Cannot call write after a stream was destroyed 531 | client.on('error', function (err) { 532 | t.ifErr(err, 'should *not* receive a "Cannot call write after a stream was destroyed" error') 533 | }) 534 | 535 | // 1. Destroy the client, and then call one of its `.send*()` methods. 536 | client.destroy() 537 | client.sendSpan({ a: 'fake span' }) 538 | 539 | // 3. Give it until after `conf.bufferWindowTime` time (the setTimeout 540 | // length used for `_corkTimer`) -- which is the error code path we 541 | // are testing. 542 | setTimeout(function () { 543 | t.ok('waited 2 * UNCORK_TIMER_MS') 544 | mockApmServer.close(function () { 545 | t.end() 546 | }) 547 | }, 2 * UNCORK_TIMER_MS) 548 | }) 549 | }) 550 | 551 | const dataTypes = ['span', 'transaction', 'error'] 552 | dataTypes.forEach(function (dataType) { 553 | const sendFn = 'send' + dataType.charAt(0).toUpperCase() + dataType.substr(1) 554 | 555 | test(`client.${sendFn}(): handle circular references`, function (t) { 556 | t.plan(assertIntakeReq.asserts + assertMetadata.asserts + assertEvent.asserts) 557 | const datas = [ 558 | assertMetadata, 559 | assertEvent({ [dataType]: { foo: 42, bar: '[Circular]' } }) 560 | ] 561 | const server = APMServer(function (req, res) { 562 | assertIntakeReq(t, req) 563 | req = processIntakeReq(req) 564 | req.on('data', function (obj) { 565 | datas.shift()(t, obj) 566 | }) 567 | req.on('end', function () { 568 | res.end() 569 | server.close() 570 | t.end() 571 | }) 572 | }).client({ apmServerVersion: '8.0.0' }, function (client) { 573 | const obj = { foo: 42 } 574 | obj.bar = obj 575 | client[sendFn](obj) 576 | client.flush(() => { client.destroy() }) 577 | }) 578 | }) 579 | }) 580 | 581 | // Ensure that the client.flush(cb) callback is called even if there are no 582 | // active handles -- i.e. the process is exiting. We test this out of process 583 | // to ensure no conflict with other tests or the test framework. 584 | test('client.flush callbacks must be called, even if no active handles', function (t) { 585 | let theError 586 | 587 | const server = APMServer(function (req, res) { 588 | const objStream = processIntakeReq(req) 589 | let n = 0 590 | objStream.on('data', function (obj) { 591 | if (++n === 2) { 592 | theError = obj.error 593 | } 594 | }) 595 | objStream.on('end', function () { 596 | res.statusCode = 202 597 | res.end() 598 | server.close() 599 | }) 600 | }) 601 | 602 | server.listen(function () { 603 | const url = 'http://localhost:' + server.address().port 604 | const script = path.resolve(__dirname, 'lib', 'call-me-back-maybe.js') 605 | const start = Date.now() 606 | exec(`"${process.execPath}" ${script} ${url}`, function (err, stdout, stderr) { 607 | if (stderr.trim()) { 608 | t.comment(`stderr from ${script}:\n${stderr}`) 609 | } 610 | if (err) { 611 | throw err 612 | } 613 | t.equal(stdout, 'sendCb called\nflushCb called\n', 614 | 'stdout shows both callbacks were called') 615 | const duration = Date.now() - start 616 | t.ok(duration < 1000, `should complete quickly, ie. not timeout (was: ${duration}ms)`) 617 | 618 | t.ok(theError, `APM server got an error object from ${script}`) 619 | if (theError) { 620 | t.equal(theError.exception.message, 'boom', 'error message is "boom"') 621 | } 622 | t.end() 623 | }) 624 | }) 625 | }) 626 | --------------------------------------------------------------------------------