├── .clang-format ├── .editorconfig ├── .eslintignore ├── .eslintrc.json ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── build.yml │ ├── lint.yml │ ├── package-size.yml │ ├── pr-labels.yml │ └── release.yml ├── .gitignore ├── .gitlab-ci.yml ├── .gitlab └── benchmarks.yml ├── .nycrc ├── .prettierrc.js ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── appveyor.yml ├── benchmark └── sirun │ ├── .eslintrc.json │ ├── run-all-variants.js │ ├── runall.sh │ └── wall-profiler │ ├── index.js │ └── meta.json ├── binding.gyp ├── bindings ├── binding.cc ├── contexts.hh ├── defer.hh ├── per-isolate-data.cc ├── per-isolate-data.hh ├── profile-translator.hh ├── profilers │ ├── heap.cc │ ├── heap.hh │ ├── wall.cc │ └── wall.hh ├── test │ ├── binding.cc │ └── tap.h ├── thread-cpu-clock.cc ├── thread-cpu-clock.hh ├── translate-heap-profile.cc ├── translate-heap-profile.hh ├── translate-time-profile.cc ├── translate-time-profile.hh └── wrap.hh ├── codecov.yaml ├── package-lock.json ├── package.json ├── renovate.json ├── scripts ├── .eslintrc.json └── cctest.js ├── suppressions └── lsan_suppr.txt ├── system-test ├── Dockerfile.linux ├── Dockerfile.node10-alpine ├── Dockerfile.node12-alpine ├── Dockerfile.node14-alpine ├── Dockerfile.node15-alpine ├── Dockerfile.node16-alpine ├── busybench-js │ ├── package.json │ └── src │ │ └── busybench.js ├── busybench │ ├── package.json │ ├── src │ │ └── busybench.ts │ └── tsconfig.json ├── system_test.sh └── test.sh ├── tools ├── build │ ├── Dockerfile.alpine │ ├── Dockerfile.linux │ ├── build.sh │ └── linux_build_and_test.sh ├── kokoro │ ├── release │ │ ├── common.cfg │ │ ├── linux.cfg │ │ └── publish.cfg │ └── system-test │ │ ├── continuous │ │ ├── linux-prebuild.cfg │ │ ├── linux-v8-canary.cfg │ │ └── linux.cfg │ │ └── presubmit │ │ ├── linux-prebuild.cfg │ │ └── linux.cfg ├── publish.sh └── retry.sh ├── ts ├── src │ ├── heap-profiler-bindings.ts │ ├── heap-profiler.ts │ ├── index.ts │ ├── logger.ts │ ├── profile-encoder.ts │ ├── profile-serializer.ts │ ├── sourcemapper │ │ └── sourcemapper.ts │ ├── time-profiler-bindings.ts │ ├── time-profiler.ts │ └── v8-types.ts └── test │ ├── check_profile.ts │ ├── oom.ts │ ├── profiles-for-tests.ts │ ├── test-heap-profiler.ts │ ├── test-profile-encoder.ts │ ├── test-profile-serializer.ts │ ├── test-time-profiler.ts │ ├── test-worker-threads.ts │ └── worker.ts └── tsconfig.json /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | # BasedOnStyle: Google 4 | AccessModifierOffset: -1 5 | AlignAfterOpenBracket: Align 6 | AlignConsecutiveAssignments: false 7 | AlignConsecutiveDeclarations: false 8 | AlignEscapedNewlines: Right 9 | AlignOperands: true 10 | AlignTrailingComments: true 11 | AllowAllParametersOfDeclarationOnNextLine: true 12 | AllowShortBlocksOnASingleLine: false 13 | AllowShortCaseLabelsOnASingleLine: false 14 | AllowShortFunctionsOnASingleLine: Inline 15 | AllowShortIfStatementsOnASingleLine: true 16 | AllowShortLoopsOnASingleLine: true 17 | AlwaysBreakAfterDefinitionReturnType: None 18 | AlwaysBreakAfterReturnType: None 19 | AlwaysBreakBeforeMultilineStrings: false 20 | AlwaysBreakTemplateDeclarations: true 21 | BinPackArguments: false 22 | BinPackParameters: false 23 | BraceWrapping: 24 | AfterClass: false 25 | AfterControlStatement: false 26 | AfterEnum: false 27 | AfterFunction: false 28 | AfterNamespace: false 29 | AfterObjCDeclaration: false 30 | AfterStruct: false 31 | AfterUnion: false 32 | AfterExternBlock: false 33 | BeforeCatch: false 34 | BeforeElse: false 35 | IndentBraces: false 36 | SplitEmptyFunction: true 37 | SplitEmptyRecord: true 38 | SplitEmptyNamespace: true 39 | BreakBeforeBinaryOperators: None 40 | BreakBeforeBraces: Attach 41 | BreakBeforeInheritanceComma: false 42 | BreakBeforeTernaryOperators: true 43 | BreakConstructorInitializersBeforeComma: false 44 | BreakConstructorInitializers: BeforeColon 45 | BreakAfterJavaFieldAnnotations: false 46 | BreakStringLiterals: true 47 | ColumnLimit: 80 48 | CommentPragmas: '^ IWYU pragma:' 49 | CompactNamespaces: false 50 | ConstructorInitializerAllOnOneLineOrOnePerLine: true 51 | ConstructorInitializerIndentWidth: 4 52 | ContinuationIndentWidth: 4 53 | Cpp11BracedListStyle: true 54 | DerivePointerAlignment: false 55 | DisableFormat: false 56 | ExperimentalAutoDetectBinPacking: false 57 | FixNamespaceComments: true 58 | ForEachMacros: 59 | - foreach 60 | - Q_FOREACH 61 | - BOOST_FOREACH 62 | IncludeBlocks: Preserve 63 | IncludeCategories: 64 | - Regex: '^' 65 | Priority: 2 66 | - Regex: '^<.*\.h>' 67 | Priority: 1 68 | - Regex: '^<.*' 69 | Priority: 2 70 | - Regex: '.*' 71 | Priority: 3 72 | IncludeIsMainRegex: '([-_](test|unittest))?$' 73 | IndentCaseLabels: true 74 | IndentPPDirectives: None 75 | IndentWidth: 2 76 | IndentWrappedFunctionNames: false 77 | JavaScriptQuotes: Leave 78 | JavaScriptWrapImports: true 79 | KeepEmptyLinesAtTheStartOfBlocks: false 80 | MacroBlockBegin: '' 81 | MacroBlockEnd: '' 82 | MaxEmptyLinesToKeep: 1 83 | NamespaceIndentation: None 84 | ObjCBlockIndentWidth: 2 85 | ObjCSpaceAfterProperty: false 86 | ObjCSpaceBeforeProtocolList: false 87 | PenaltyBreakAssignment: 2 88 | PenaltyBreakBeforeFirstCallParameter: 1 89 | PenaltyBreakComment: 300 90 | PenaltyBreakFirstLessLess: 120 91 | PenaltyBreakString: 1000 92 | PenaltyExcessCharacter: 1000000 93 | PenaltyReturnTypeOnItsOwnLine: 200 94 | PointerAlignment: Left 95 | ReflowComments: true 96 | SortIncludes: true 97 | SortUsingDeclarations: true 98 | SpaceAfterCStyleCast: false 99 | SpaceAfterTemplateKeyword: true 100 | SpaceBeforeAssignmentOperators: true 101 | SpaceBeforeParens: ControlStatements 102 | SpaceInEmptyParentheses: false 103 | SpacesBeforeTrailingComments: 2 104 | SpacesInAngles: false 105 | SpacesInContainerLiterals: true 106 | SpacesInCStyleCastParentheses: false 107 | SpacesInParentheses: false 108 | SpacesInSquareBrackets: false 109 | Standard: Auto 110 | TabWidth: 8 111 | UseTab: Never 112 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | ; http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 2 8 | end_of_line = lf 9 | charset = utf-8 10 | trim_trailing_whitespace = true 11 | insert_final_newline = true 12 | 13 | [*.md] 14 | indent_size = 4 15 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | **/node_modules 2 | **/coverage 3 | build/ 4 | proto/ 5 | out/ 6 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./node_modules/gts" 3 | } 4 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @szegedi @nsavoire @Qard 2 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **What does this PR do?**: 2 | 3 | 4 | **Motivation**: 5 | 6 | 7 | **Additional Notes**: 8 | 9 | 10 | **How to test the change?**: 11 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | asan: 11 | strategy: 12 | matrix: 13 | version: [18, 20, 22, 24] 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v3 17 | - uses: actions/setup-node@v3 18 | with: 19 | node-version: ${{ matrix.version }} 20 | - run: npm install 21 | - run: npm run test:js-asan 22 | 23 | valgrind: 24 | strategy: 25 | matrix: 26 | version: [18, 20, 22, 24] 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v3 30 | - uses: actions/setup-node@v3 31 | with: 32 | node-version: ${{ matrix.version }} 33 | - run: sudo apt-get update && sudo apt-get install valgrind 34 | - run: npm install 35 | - run: npm run test:js-valgrind 36 | 37 | build: 38 | uses: Datadog/action-prebuildify/.github/workflows/build.yml@main 39 | with: 40 | target-name: 'dd_pprof' # target name in binding.gyp 41 | package-manager: 'npm' # npm or yarn 42 | cache: true # enable caching of dependencies based on lockfile 43 | min-node-version: 16 44 | skip: 'linux-arm,linux-ia32' # skip building for these platforms 45 | 46 | dev_publish: 47 | needs: build 48 | runs-on: ubuntu-latest 49 | if: github.ref == 'refs/heads/main' 50 | environment: npm 51 | env: 52 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 53 | steps: 54 | - uses: actions/checkout@v2 55 | - uses: actions/download-artifact@v4 56 | - uses: actions/setup-node@v3 57 | with: 58 | registry-url: 'https://registry.npmjs.org' 59 | - run: npm install 60 | - id: pkg 61 | run: | 62 | content=`cat ./package.json | tr '\n' ' '` 63 | echo "::set-output name=json::$content" 64 | - run: npm version --no-git-tag-version ${{ fromJson(steps.pkg.outputs.json).version }}-$(git rev-parse --short HEAD)+${{ github.run_id }}.${{ github.run_attempt }} 65 | - run: npm publish --tag dev 66 | 67 | build-successful: 68 | if: always() 69 | needs: [build] 70 | runs-on: ubuntu-latest 71 | steps: 72 | - name: Determine if everything is passing 73 | run: exit 1 74 | if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} 75 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | jobs: 8 | lint: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v2 12 | - uses: actions/setup-node@v2 13 | - run: yarn 14 | - run: yarn lint 15 | -------------------------------------------------------------------------------- /.github/workflows/package-size.yml: -------------------------------------------------------------------------------- 1 | name: Package Size 2 | 3 | on: 4 | pull_request: 5 | schedule: 6 | - cron: '0 4 * * *' 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | package-size-report: 14 | runs-on: ubuntu-latest 15 | permissions: 16 | pull-requests: write 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Setup Node.js 20 | uses: actions/setup-node@v2 21 | with: 22 | node-version: '16' 23 | - run: yarn 24 | - name: Compute module size tree and report 25 | uses: qard/heaviest-objects-in-the-universe@v1 26 | with: 27 | github-token: ${{ secrets.GITHUB_TOKEN }} 28 | -------------------------------------------------------------------------------- /.github/workflows/pr-labels.yml: -------------------------------------------------------------------------------- 1 | name: Pull Request Labels 2 | on: 3 | pull_request: 4 | types: [opened, labeled, unlabeled, synchronize] 5 | branches: 6 | - 'main' 7 | jobs: 8 | label: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: mheap/github-action-required-labels@v1 12 | with: 13 | mode: exactly 14 | count: 1 15 | labels: "semver-patch, semver-minor, semver-major" 16 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - v[0-9]+.x 7 | 8 | jobs: 9 | build: 10 | uses: Datadog/action-prebuildify/.github/workflows/build.yml@main 11 | with: 12 | target-name: 'dd_pprof' # target name in binding.gyp 13 | package-manager: 'npm' # npm or yarn 14 | cache: true # enable caching of dependencies based on lockfile 15 | min-node-version: 16 16 | skip: 'linux-arm,linux-ia32' # skip building for these platforms 17 | 18 | publish: 19 | needs: build 20 | runs-on: ubuntu-latest 21 | environment: npm 22 | env: 23 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 24 | permissions: 25 | contents: write 26 | steps: 27 | - uses: actions/checkout@v2 28 | - uses: actions/download-artifact@v4 29 | - uses: actions/setup-node@v3 30 | with: 31 | registry-url: 'https://registry.npmjs.org' 32 | - run: npm install 33 | - run: npm publish 34 | - id: pkg 35 | run: | 36 | content=`cat ./package.json | tr '\n' ' '` 37 | echo "::set-output name=json::$content" 38 | - run: | 39 | git tag v${{ fromJson(steps.pkg.outputs.json).version }} 40 | git push origin v${{ fromJson(steps.pkg.outputs.json).version }} 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .nyc_output 2 | .coverage 3 | .vscode 4 | /*.build 5 | /build 6 | out 7 | node_modules 8 | system-test/busybench/package-lock.json 9 | system-test/busybench-js/package-lock.json 10 | prebuilds 11 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - benchmarks 3 | 4 | include: ".gitlab/benchmarks.yml" 5 | -------------------------------------------------------------------------------- /.gitlab/benchmarks.yml: -------------------------------------------------------------------------------- 1 | variables: 2 | BASE_CI_IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/benchmarking-platform:pprof-nodejs 3 | 4 | .benchmark_base: 5 | tags: ["runner:apm-k8s-tweaked-metal"] 6 | image: $BASE_CI_IMAGE 7 | stage: benchmarks 8 | rules: 9 | - if: $CI_COMMIT_TAG 10 | when: never 11 | - when: on_success 12 | variables: 13 | UPSTREAM_PROJECT_ID: $CI_PROJECT_ID 14 | UPSTREAM_PROJECT_NAME: $CI_PROJECT_NAME 15 | UPSTREAM_BRANCH: $CI_COMMIT_REF_NAME 16 | UPSTREAM_COMMIT_SHA: $CI_COMMIT_SHA 17 | 18 | KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: pprof-nodejs 19 | FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY: "true" 20 | before_script: 21 | - git config --global url."https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/".insteadOf "https://github.com/DataDog/" 22 | - git clone --branch pprof-nodejs https://github.com/DataDog/benchmarking-platform /platform 23 | 24 | benchmarks: 25 | extends: .benchmark_base 26 | interruptible: true 27 | timeout: 1h 28 | script: 29 | - export ARTIFACTS_DIR="$(pwd)/reports/${CI_JOB_ID}" && (mkdir -p "${ARTIFACTS_DIR}" || :) 30 | - cd /platform 31 | - ./steps/capture-hardware-software-info.sh 32 | - ./steps/run-benchmarks.sh 33 | parallel: 34 | matrix: 35 | - MAJOR_NODE_VERSION: 18 36 | - MAJOR_NODE_VERSION: 20 37 | - MAJOR_NODE_VERSION: 22 38 | - MAJOR_NODE_VERSION: 24 39 | artifacts: 40 | name: "reports" 41 | paths: 42 | - reports/ 43 | expire_in: 3 months 44 | 45 | benchmarks-pr-comment: 46 | extends: .benchmark_base 47 | needs: 48 | - job: benchmarks 49 | artifacts: true 50 | script: 51 | - export ARTIFACTS_DIR="$(pwd)/reports" 52 | - cd /platform 53 | - find "$ARTIFACTS_DIR" 54 | - ./steps/aggregate-results.sh 55 | - find "$ARTIFACTS_DIR" 56 | - source "$ARTIFACTS_DIR/.env" 57 | - ./steps/analyze-results.sh 58 | - "./steps/upload-results-to-s3.sh || :" 59 | - "./steps/post-pr-comment.sh || :" 60 | artifacts: 61 | name: "reports" 62 | paths: 63 | - reports/ 64 | expire_in: 3 months 65 | -------------------------------------------------------------------------------- /.nycrc: -------------------------------------------------------------------------------- 1 | { 2 | "report-dir": "./.coverage", 3 | "reporter": "lcov", 4 | "exclude": [ 5 | "src/*{/*,/**/*}.js", 6 | "src/*/v*/*.js", 7 | "test/**/*.js", 8 | "build/test" 9 | ], 10 | "watermarks": { 11 | "branches": [ 12 | 95, 13 | 100 14 | ], 15 | "functions": [ 16 | 95, 17 | 100 18 | ], 19 | "lines": [ 20 | 95, 21 | 100 22 | ], 23 | "statements": [ 24 | 95, 25 | 100 26 | ] 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /.prettierrc.js: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // https://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | module.exports = { 16 | endOfLine:"auto", 17 | ...require('gts/.prettierrc.json') 18 | } 19 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to become a contributor and submit your own code 2 | 3 | **Table of contents** 4 | 5 | * [Contributor License Agreements](#contributor-license-agreements) 6 | * [Contributing a patch](#contributing-a-patch) 7 | * [Running the tests](#running-the-tests) 8 | * [Releasing the library](#releasing-the-library) 9 | 10 | ## Contributor License Agreements 11 | 12 | We'd love to accept your sample apps and patches! Before we can take them, we 13 | have to jump a couple of legal hurdles. 14 | 15 | Please fill out either the individual or corporate Contributor License Agreement 16 | (CLA). 17 | 18 | * If you are an individual writing original source code and you're sure you 19 | own the intellectual property, then you'll need to sign an [individual CLA](https://developers.google.com/open-source/cla/individual). 20 | * If you work for a company that wants to allow you to contribute your work, 21 | then you'll need to sign a [corporate CLA](https://developers.google.com/open-source/cla/corporate). 22 | 23 | Follow either of the two links above to access the appropriate CLA and 24 | instructions for how to sign and return it. Once we receive it, we'll be able to 25 | accept your pull requests. 26 | 27 | ## Contributing A Patch 28 | 29 | 1. Submit an issue describing your proposed change to the repo in question. 30 | 1. The repo owner will respond to your issue promptly. 31 | 1. If your proposed change is accepted, and you haven't already done so, sign a 32 | Contributor License Agreement (see details above). 33 | 1. Fork the desired repo, develop and test your code changes. 34 | 1. Ensure that your code adheres to the existing style in the code to which 35 | you are contributing. 36 | 1. Ensure that your code has an appropriate set of tests which all pass. 37 | 1. Submit a pull request. 38 | 39 | ## Running the tests 40 | 41 | 1. [Prepare your environment for Node.js setup][setup]. 42 | 43 | 1. Install dependencies: 44 | ```sh 45 | npm install 46 | ``` 47 | 48 | 1. Run the tests: 49 | ```sh 50 | npm test 51 | ``` 52 | 53 | 1. Lint (and maybe fix) any changes: 54 | ```sh 55 | npm run fix 56 | ``` 57 | 58 | [setup]: https://cloud.google.com/nodejs/docs/setup 59 | 60 | # Running the system test 61 | The system test starts a simple benchmark, uses this module to collect a time 62 | and a heap profile, and verifies that the profiles contain functions from 63 | within the benchmark. 64 | 65 | To run the system test, [golang](https://golang.org/) must be installed. 66 | 67 | The following command can be used to run the system test with all supported 68 | versions of Node.JS: 69 | ```sh 70 | sh system-test/system_test.sh 71 | ``` 72 | 73 | To run the system test with the v8 canary build, use: 74 | ```sh 75 | RUN_ONLY_V8_CANARY_TEST=true sh system-test/system_test.sh 76 | ``` -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pprof support for Node.js 2 | 3 | [![NPM Version][npm-image]][npm-url] 4 | [![Build Status][build-image]][build-url] 5 | [![Known Vulnerabilities][snyk-image]][snyk-url] 6 | 7 | [pprof][pprof-url] support for Node.js. 8 | 9 | ## Prerequisites 10 | 1. Your application will need to be using Node.js 16 or greater. 11 | 12 | 2. The `pprof` module has a native component that is used to collect profiles 13 | with v8's CPU and Heap profilers. You may need to install additional 14 | dependencies to build this module. 15 | * `pprof` has prebuilt binaries available for Linux arm64/x64, 16 | Alpine Linux x64, macOS arm64/x64, and Windows x64 for Node 16/18/20/22/23. 17 | No additional dependencies are required. 18 | * For other environments: on environments that `pprof` does not have 19 | prebuilt binaries for, the module 20 | [`node-gyp`](https://www.npmjs.com/package/node-gyp) will be used to 21 | build binaries. See `node-gyp`'s 22 | [documentation](https://github.com/nodejs/node-gyp#installation) 23 | for information on dependencies required to build binaries with `node-gyp`. 24 | 25 | 3. The [`pprof`][pprof-url] CLI can be used to view profiles collected with 26 | this module. Instructions for installing the `pprof` CLI can be found 27 | [here][pprof-install-url]. 28 | 29 | ## Basic Set-up 30 | 31 | Install [`pprof`][npm-url] with `npm` or add to your `package.json`. 32 | ```sh 33 | # Install through npm while saving to the local 'package.json' 34 | npm install --save @datadog/pprof 35 | ``` 36 | 37 | ## Using the Profiler 38 | 39 | ### Collect a Wall Time Profile 40 | 41 | #### In code: 42 | 1. Update code to collect and save a profile: 43 | ```javascript 44 | const profile = await pprof.time.profile({ 45 | durationMillis: 10000, // time in milliseconds for which to 46 | // collect profile. 47 | }); 48 | const buf = await pprof.encode(profile); 49 | fs.writeFile('wall.pb.gz', buf, (err) => { 50 | if (err) throw err; 51 | }); 52 | ``` 53 | 54 | 2. View the profile with command line [`pprof`][pprof-url]: 55 | ```sh 56 | pprof -http=: wall.pb.gz 57 | ``` 58 | 59 | #### Requiring from the command line 60 | 61 | 1. Start program from the command line: 62 | ```sh 63 | node --require @datadog/pprof app.js 64 | ``` 65 | 66 | 2. A wall time profile for the job will be saved in 67 | `pprof-profile-${process.pid}.pb.gz`. View the profile with command line 68 | [`pprof`][pprof-url]: 69 | ```sh 70 | pprof -http=: pprof-profile-${process.pid}.pb.gz 71 | ``` 72 | 73 | ### Collect a Heap Profile 74 | 1. Enable heap profiling at the start of the application: 75 | ```javascript 76 | // The average number of bytes between samples. 77 | const intervalBytes = 512 * 1024; 78 | 79 | // The maximum stack depth for samples collected. 80 | const stackDepth = 64; 81 | 82 | heap.start(intervalBytes, stackDepth); 83 | ``` 84 | 2. Collect heap profiles: 85 | 86 | * Collecting and saving a profile in profile.proto format: 87 | ```javascript 88 | const profile = await pprof.heap.profile(); 89 | const buf = await pprof.encode(profile); 90 | fs.writeFile('heap.pb.gz', buf, (err) => { 91 | if (err) throw err; 92 | }) 93 | ``` 94 | 95 | * View the profile with command line [`pprof`][pprof-url]. 96 | ```sh 97 | pprof -http=: heap.pb.gz 98 | ``` 99 | 100 | * Collecting a heap profile with V8 allocation profile format: 101 | ```javascript 102 | const profile = await pprof.heap.v8Profile(); 103 | ``` 104 | 105 | [build-image]: https://github.com/Datadog/pprof-nodejs/actions/workflows/build.yml/badge.svg?branch=main 106 | [build-url]: https://github.com/Datadog/pprof-nodejs/actions/workflows/build.yml 107 | [coveralls-image]: https://coveralls.io/repos/google/pprof-nodejs/badge.svg?branch=main&service=github 108 | [npm-image]: https://badge.fury.io/js/pprof.svg 109 | [npm-url]: https://npmjs.org/package/pprof 110 | [pprof-url]: https://github.com/google/pprof 111 | [pprof-install-url]: https://github.com/google/pprof#building-pprof 112 | [snyk-image]: https://snyk.io/test/github/google/pprof-nodejs/badge.svg 113 | [snyk-url]: https://snyk.io/test/github/google/pprof-nodejs 114 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | environment: 2 | matrix: 3 | - nodejs_version: "6" 4 | - nodejs_version: "8" 5 | - nodejs_version: "10" 6 | - nodejs_version: "11" 7 | 8 | install: 9 | - ps: Install-Product node $env:nodejs_version 10 | - npm install 11 | 12 | test_script: 13 | - node --version 14 | - npm --version 15 | - npm test 16 | 17 | build: off 18 | -------------------------------------------------------------------------------- /benchmark/sirun/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "parserOptions": { 4 | "ecmaVersion": 2020 5 | }, 6 | "extends": [ 7 | "eslint:recommended", 8 | "standard" 9 | ], 10 | "env": { 11 | "node" : true 12 | }, 13 | "rules": { 14 | "max-len": [2, 120, 2], 15 | "no-var": 2, 16 | "no-console": 2, 17 | "prefer-const": 2, 18 | "object-curly-spacing": [2, "always"], 19 | "import/no-extraneous-dependencies": 2, 20 | "standard/no-callback-literal": 0, 21 | "no-prototype-builtins": 0 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /benchmark/sirun/run-all-variants.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const childProcess = require('child_process') 4 | const path = require('path') 5 | const readline = require('readline') 6 | 7 | process.env.DD_TRACE_TELEMETRY_ENABLED = 'false' 8 | 9 | function exec (...args) { 10 | return new Promise((resolve, reject) => { 11 | const proc = childProcess.spawn(...args) 12 | streamAddVersion(proc.stdout) 13 | proc.on('error', reject) 14 | proc.on('exit', (code) => { 15 | if (code === 0) { 16 | resolve() 17 | } else { 18 | reject(new Error('Process exited with non-zero code.')) 19 | } 20 | }) 21 | }) 22 | } 23 | 24 | const metaJson = require(path.join(process.cwd(), 'meta.json')) 25 | 26 | const env = Object.assign({}, process.env, { DD_TRACE_STARTUP_LOGS: 'false' }) 27 | 28 | function streamAddVersion (input) { 29 | input.rl = readline.createInterface({ input }) 30 | input.rl.on('line', function (line) { 31 | try { 32 | const json = JSON.parse(line.toString()) 33 | json.nodeVersion = process.versions.node 34 | // eslint-disable-next-line no-console 35 | console.log(JSON.stringify(json)) 36 | } catch (e) { 37 | // eslint-disable-next-line no-console 38 | console.log(line) 39 | } 40 | }) 41 | } 42 | 43 | function getStdio () { 44 | return ['inherit', 'pipe', 'inherit'] 45 | } 46 | 47 | (async () => { 48 | try { 49 | if (metaJson.variants) { 50 | const variants = metaJson.variants 51 | for (const variant in variants) { 52 | const variantEnv = Object.assign({}, env, { SIRUN_VARIANT: variant }) 53 | await exec('sirun', ['meta.json'], { env: variantEnv, stdio: getStdio() }) 54 | } 55 | } else { 56 | await exec('sirun', ['meta.json'], { env, stdio: getStdio() }) 57 | } 58 | } catch (e) { 59 | setImmediate(() => { 60 | throw e // Older Node versions don't fail on uncaught promise rejections. 61 | }) 62 | } 63 | })() 64 | -------------------------------------------------------------------------------- /benchmark/sirun/runall.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | if [ -n "${MAJOR_NODE_VERSION:-}" ]; then 6 | if test -f ~/.nvm/nvm.sh; then 7 | source ~/.nvm/nvm.sh 8 | else 9 | source "${NVM_DIR:-usr/local/nvm}/nvm.sh" 10 | fi 11 | 12 | nvm use "${MAJOR_NODE_VERSION}" 13 | 14 | pushd ../../ 15 | npm install 16 | popd 17 | fi 18 | 19 | VERSION=$(node -v) 20 | echo "using Node.js ${VERSION}" 21 | 22 | for d in *; do 23 | if [ -d "${d}" ]; then 24 | pushd "$d" 25 | time node ../run-all-variants.js >> ../results.ndjson 26 | popd 27 | fi 28 | done 29 | 30 | if [ "${DEBUG_RESULTS:-false}" == "true" ]; then 31 | echo "Benchmark Results:" 32 | cat ./results.ndjson 33 | fi 34 | 35 | echo "all tests for ${VERSION} have now completed." 36 | -------------------------------------------------------------------------------- /benchmark/sirun/wall-profiler/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const profiler = require('../../../out/src/time-profiler') 4 | const { createServer, request } = require('http') 5 | 6 | const concurrency = Number(process.env.CONCURRENCY || '10') 7 | const requestFrequency = Number(process.env.REQUEST_FREQUENCY || '15') 8 | const sampleFrequency = Number(process.env.SAMPLE_FREQUENCY || '999') 9 | 10 | const server = createServer((req, res) => { 11 | setImmediate(() => { 12 | res.end('hello') 13 | }) 14 | }) 15 | 16 | function get (options) { 17 | return new Promise((resolve, reject) => { 18 | const req = request(options, (res) => { 19 | const chunks = [] 20 | res.on('error', reject) 21 | res.on('data', chunks.push.bind(chunks)) 22 | res.on('end', () => { 23 | resolve(Buffer.concat(chunks)) 24 | }) 25 | }) 26 | req.on('error', reject) 27 | req.end() 28 | }) 29 | } 30 | 31 | function delay (ms) { 32 | return new Promise(resolve => setTimeout(resolve, ms)) 33 | } 34 | 35 | async function storm (requestFrequency, task) { 36 | const gap = (1 / requestFrequency) * 1e9 37 | while (server.listening) { 38 | const start = process.hrtime.bigint() 39 | try { 40 | await task() 41 | } catch (e) { 42 | // Ignore ECONNRESET if server is shutting down 43 | if (e.code !== 'ECONNRESET' || server.listening) { 44 | throw e 45 | } 46 | } 47 | const end = process.hrtime.bigint() 48 | const remainder = gap - Number(end - start) 49 | await delay(Math.max(0, remainder / 1e6)) 50 | } 51 | } 52 | 53 | server.listen(8080, '0.0.0.0', async () => { 54 | if (!concurrency) return 55 | const { address, port } = server.address() 56 | const getter = get.bind(null, { 57 | hostname: address, 58 | path: '/', 59 | port 60 | }) 61 | const task = storm.bind(null, requestFrequency, getter) 62 | const tasks = Array.from({ length: concurrency }, task) 63 | await Promise.all(tasks) 64 | }) 65 | 66 | if (sampleFrequency !== 0) { 67 | profiler.start({ intervalMicros: 1e6 / sampleFrequency }) 68 | } 69 | 70 | setTimeout(() => { 71 | if (profiler.isStarted()) { 72 | profiler.stop() 73 | } 74 | server.close() 75 | }, 1000) 76 | -------------------------------------------------------------------------------- /benchmark/sirun/wall-profiler/meta.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "profiler", 3 | "run": "node index.js", 4 | "cachegrind": true, 5 | "iterations": 10, 6 | "variants": { 7 | "idle-no-wall-profiler": { 8 | "env": { 9 | "CONCURRENCY": "0", 10 | "REQUEST_FREQUENCY": "0", 11 | "SAMPLE_FREQUENCY": "0" 12 | } 13 | }, 14 | "idle-with-wall-profiler": { 15 | "env": { 16 | "CONCURRENCY": "0", 17 | "REQUEST_FREQUENCY": "0", 18 | "SAMPLE_FREQUENCY": "999" 19 | } 20 | }, 21 | "light-load-no-wall-profiler": { 22 | "env": { 23 | "CONCURRENCY": "5", 24 | "REQUEST_FREQUENCY": "5", 25 | "SAMPLE_FREQUENCY": "0" 26 | } 27 | }, 28 | "light-load-with-wall-profiler": { 29 | "env": { 30 | "CONCURRENCY": "5", 31 | "REQUEST_FREQUENCY": "5", 32 | "SAMPLE_FREQUENCY": "999" 33 | } 34 | }, 35 | "heavy-load-no-wall-profiler": { 36 | "env": { 37 | "CONCURRENCY": "15", 38 | "REQUEST_FREQUENCY": "50", 39 | "SAMPLE_FREQUENCY": "0" 40 | } 41 | }, 42 | "heavy-load-with-wall-profiler": { 43 | "env": { 44 | "CONCURRENCY": "15", 45 | "REQUEST_FREQUENCY": "50", 46 | "SAMPLE_FREQUENCY": "999" 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /binding.gyp: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "address_sanitizer%": 0, # enable address + undefined behaviour sanitizer 4 | "thread_sanitizer%": 0, # enable thread sanitizer, 5 | "build_tests%": 0 6 | }, 7 | "conditions": [ 8 | [ 9 | "build_tests != 'true'", 10 | { 11 | "targets": [ 12 | { 13 | "target_name": "dd_pprof", 14 | "sources": [ 15 | "bindings/profilers/heap.cc", 16 | "bindings/profilers/wall.cc", 17 | "bindings/per-isolate-data.cc", 18 | "bindings/thread-cpu-clock.cc", 19 | "bindings/translate-heap-profile.cc", 20 | "bindings/translate-time-profile.cc", 21 | "bindings/binding.cc" 22 | ], 23 | "include_dirs": [ 24 | "bindings", 25 | " 18 | #include 19 | #include 20 | 21 | #include "profilers/heap.hh" 22 | #include "profilers/wall.hh" 23 | 24 | #ifdef __linux__ 25 | #include 26 | #include 27 | #endif 28 | 29 | static NAN_METHOD(GetNativeThreadId) { 30 | #ifdef __APPLE__ 31 | uint64_t native_id; 32 | (void)pthread_threadid_np(NULL, &native_id); 33 | #elif defined(__linux__) 34 | pid_t native_id = syscall(SYS_gettid); 35 | #elif defined(_MSC_VER) 36 | DWORD native_id = GetCurrentThreadId(); 37 | #endif 38 | info.GetReturnValue().Set(v8::Integer::New(info.GetIsolate(), native_id)); 39 | } 40 | 41 | #if defined(__GNUC__) && !defined(__clang__) 42 | #pragma GCC diagnostic push 43 | #pragma GCC diagnostic ignored "-Wcast-function-type" 44 | #endif 45 | NODE_MODULE_INIT(/* exports, module, context */) { 46 | #if defined(__GNUC__) && !defined(__clang__) 47 | #pragma GCC diagnostic pop 48 | #endif 49 | 50 | dd::HeapProfiler::Init(exports); 51 | dd::WallProfiler::Init(exports); 52 | Nan::SetMethod(exports, "getNativeThreadId", GetNativeThreadId); 53 | } 54 | -------------------------------------------------------------------------------- /bindings/contexts.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | #include 21 | 22 | namespace dd { 23 | 24 | struct NodeInfo { 25 | v8::Local contexts; 26 | uint32_t hitcount; 27 | }; 28 | 29 | using ContextsByNode = std::unordered_map; 30 | } // namespace dd 31 | -------------------------------------------------------------------------------- /bindings/defer.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | 21 | namespace details { 22 | 23 | struct DeferDummy {}; 24 | 25 | template 26 | class DeferHolder { 27 | public: 28 | DeferHolder(DeferHolder&&) = default; 29 | DeferHolder(const DeferHolder&) = delete; 30 | DeferHolder& operator=(DeferHolder&&) = delete; 31 | DeferHolder& operator=(const DeferHolder&) = delete; 32 | 33 | template 34 | explicit DeferHolder(T&& f) : _func(std::forward(f)) {} 35 | 36 | ~DeferHolder() { reset(); } 37 | 38 | void reset() { 39 | if (_active) { 40 | _func(); 41 | _active = false; 42 | } 43 | } 44 | 45 | void release() { _active = false; } 46 | 47 | private: 48 | F _func; 49 | bool _active = true; 50 | }; 51 | 52 | template 53 | DeferHolder operator*(DeferDummy, F&& f) { 54 | return DeferHolder{std::forward(f)}; 55 | } 56 | 57 | } // namespace details 58 | 59 | template 60 | details::DeferHolder make_defer(F&& f) { 61 | return details::DeferHolder{std::forward(f)}; 62 | } 63 | 64 | #define DEFER_(LINE) zz_defer##LINE 65 | #define DEFER(LINE) DEFER_(LINE) 66 | #define defer \ 67 | [[maybe_unused]] const auto& DEFER(__COUNTER__) = details::DeferDummy{}* [&]() 68 | -------------------------------------------------------------------------------- /bindings/per-isolate-data.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include "per-isolate-data.hh" 22 | 23 | namespace dd { 24 | 25 | static std::unordered_map per_isolate_data_; 26 | static std::mutex mutex; 27 | 28 | PerIsolateData* PerIsolateData::For(v8::Isolate* isolate) { 29 | const std::lock_guard lock(mutex); 30 | auto maybe = per_isolate_data_.find(isolate); 31 | if (maybe != per_isolate_data_.end()) { 32 | return &maybe->second; 33 | } 34 | 35 | per_isolate_data_.emplace(std::make_pair(isolate, PerIsolateData())); 36 | 37 | auto pair = per_isolate_data_.find(isolate); 38 | auto perIsolateData = &pair->second; 39 | 40 | node::AddEnvironmentCleanupHook( 41 | isolate, 42 | [](void* data) { 43 | const std::lock_guard lock(mutex); 44 | per_isolate_data_.erase(static_cast(data)); 45 | }, 46 | isolate); 47 | 48 | return perIsolateData; 49 | } 50 | 51 | Nan::Global& PerIsolateData::WallProfilerConstructor() { 52 | return wall_profiler_constructor; 53 | } 54 | 55 | std::shared_ptr& PerIsolateData::GetHeapProfilerState() { 56 | return heap_profiler_state; 57 | } 58 | 59 | } // namespace dd 60 | -------------------------------------------------------------------------------- /bindings/per-isolate-data.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | namespace dd { 25 | 26 | struct HeapProfilerState; 27 | 28 | class PerIsolateData { 29 | private: 30 | Nan::Global wall_profiler_constructor; 31 | std::shared_ptr heap_profiler_state; 32 | 33 | PerIsolateData() {} 34 | 35 | public: 36 | static PerIsolateData* For(v8::Isolate* isolate); 37 | 38 | Nan::Global& WallProfilerConstructor(); 39 | std::shared_ptr& GetHeapProfilerState(); 40 | }; 41 | 42 | } // namespace dd 43 | -------------------------------------------------------------------------------- /bindings/profile-translator.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include 18 | 19 | namespace dd { 20 | class ProfileTranslator { 21 | v8::Isolate* isolate = v8::Isolate::GetCurrent(); 22 | v8::Local context = isolate->GetCurrentContext(); 23 | v8::Local emptyArray = v8::Array::New(isolate, 0); 24 | 25 | protected: 26 | v8::Local NewObject() { return v8::Object::New(isolate); } 27 | 28 | v8::Local NewInteger(int x) { 29 | return v8::Integer::New(isolate, x); 30 | } 31 | 32 | v8::Local NewBoolean(bool x) { 33 | return v8::Boolean::New(isolate, x); 34 | } 35 | 36 | template 37 | v8::Local NewNumber(T x) { 38 | return v8::Number::New(isolate, x); 39 | } 40 | 41 | v8::Local NewArray(int length) { 42 | return length == 0 ? emptyArray : v8::Array::New(isolate, length); 43 | } 44 | 45 | v8::Local NewString(const char* str) { 46 | return v8::String::NewFromUtf8(isolate, str).ToLocalChecked(); 47 | } 48 | 49 | v8::MaybeLocal Get(v8::Local arr, uint32_t index) { 50 | return arr->Get(context, index); 51 | } 52 | 53 | v8::Maybe Set(v8::Local arr, 54 | uint32_t index, 55 | v8::Local value) { 56 | return arr->Set(context, index, value); 57 | } 58 | 59 | v8::Maybe Set(v8::Local obj, 60 | v8::Local key, 61 | v8::Local value) { 62 | return obj->Set(context, key, value); 63 | } 64 | 65 | ProfileTranslator() = default; 66 | }; 67 | }; // namespace dd 68 | -------------------------------------------------------------------------------- /bindings/profilers/heap.hh: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2018 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | 21 | namespace dd { 22 | 23 | class HeapProfiler { 24 | public: 25 | // Signature: 26 | // startSamplingHeapProfiler() 27 | static NAN_METHOD(StartSamplingHeapProfiler); 28 | 29 | // Signature: 30 | // stopSamplingHeapProfiler() 31 | static NAN_METHOD(StopSamplingHeapProfiler); 32 | 33 | // Signature: 34 | // getAllocationProfile(): AllocationProfileNode 35 | static NAN_METHOD(GetAllocationProfile); 36 | 37 | static NAN_METHOD(MonitorOutOfMemory); 38 | 39 | static NAN_MODULE_INIT(Init); 40 | }; 41 | 42 | } // namespace dd 43 | -------------------------------------------------------------------------------- /bindings/profilers/wall.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include "contexts.hh" 20 | #include "thread-cpu-clock.hh" 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | namespace dd { 30 | 31 | struct Result { 32 | Result() = default; 33 | explicit Result(const char* msg) : success{false}, msg{msg} {}; 34 | 35 | bool success = true; 36 | std::string msg; 37 | }; 38 | 39 | class WallProfiler : public Nan::ObjectWrap { 40 | public: 41 | enum class CollectionMode { kNoCollect, kPassThrough, kCollectContexts }; 42 | 43 | private: 44 | enum Fields { kSampleCount, kFieldCount }; 45 | 46 | using ContextPtr = std::shared_ptr>; 47 | 48 | std::chrono::microseconds samplingPeriod_{0}; 49 | v8::CpuProfiler* cpuProfiler_ = nullptr; 50 | // TODO: Investigate use of v8::Persistent instead of shared_ptr to 51 | // avoid heap allocation. Need to figure out the right move/copy semantics in 52 | // and out of the ring buffer. 53 | 54 | // We're using a pair of shared pointers and an atomic pointer-to-current as 55 | // a way to ensure signal safety on update. 56 | ContextPtr context1_; 57 | ContextPtr context2_; 58 | std::atomic curContext_; 59 | 60 | std::atomic gcCount = 0; 61 | double gcAsyncId; 62 | 63 | std::atomic collectionMode_; 64 | std::atomic noCollectCallCount_; 65 | std::string profileId_; 66 | uint64_t profileIdx_ = 0; 67 | bool includeLines_ = false; 68 | bool withContexts_ = false; 69 | bool started_ = false; 70 | bool workaroundV8Bug_; 71 | static inline constexpr bool detectV8Bug_ = true; 72 | bool collectCpuTime_; 73 | bool collectAsyncId_; 74 | bool isMainThread_; 75 | int v8ProfilerStuckEventLoopDetected_ = 0; 76 | ProcessCpuClock::time_point startProcessCpuTime_{}; 77 | int64_t startThreadCpuTime_ = 0; 78 | /* threadCpuStopWatch_ is used to measure CPU consumed by JS thread owning the 79 | * WallProfiler object during profiling period of main worker thread. */ 80 | ThreadCpuStopWatch threadCpuStopWatch_; 81 | uint32_t* fields_; 82 | v8::Global jsArray_; 83 | 84 | struct SampleContext { 85 | ContextPtr context; 86 | int64_t time_from; 87 | int64_t time_to; 88 | int64_t cpu_time; 89 | double async_id; 90 | }; 91 | 92 | using ContextBuffer = std::vector; 93 | ContextBuffer contexts_; 94 | 95 | ~WallProfiler(); 96 | void Dispose(v8::Isolate* isolate); 97 | 98 | // A new CPU profiler object will be created each time profiling is started 99 | // to work around https://bugs.chromium.org/p/v8/issues/detail?id=11051. 100 | v8::CpuProfiler* CreateV8CpuProfiler(); 101 | 102 | ContextsByNode GetContextsByNode(v8::CpuProfile* profile, 103 | ContextBuffer& contexts, 104 | int64_t startCpuTime); 105 | 106 | bool waitForSignal(uint64_t targetCallCount = 0); 107 | 108 | public: 109 | /** 110 | * @param samplingPeriodMicros sampling interval, in microseconds 111 | * @param durationMicros the duration of sampling, in microseconds. This 112 | * parameter is informative; it is up to the caller to call the Stop method 113 | * every period. The parameter is used to preallocate data structures that 114 | * should not be reallocated in async signal safe code. 115 | */ 116 | explicit WallProfiler(std::chrono::microseconds samplingPeriod, 117 | std::chrono::microseconds duration, 118 | bool includeLines, 119 | bool withContexts, 120 | bool workaroundV8bug, 121 | bool collectCpuTime, 122 | bool collectAsyncId, 123 | bool isMainThread); 124 | 125 | v8::Local GetContext(v8::Isolate*); 126 | void SetContext(v8::Isolate*, v8::Local); 127 | void PushContext(int64_t time_from, 128 | int64_t time_to, 129 | int64_t cpu_time, 130 | double async_id); 131 | Result StartImpl(); 132 | std::string StartInternal(); 133 | Result StopImpl(bool restart, v8::Local& profile); 134 | 135 | CollectionMode collectionMode() { 136 | auto res = collectionMode_.load(std::memory_order_relaxed); 137 | if (res == CollectionMode::kNoCollect) { 138 | noCollectCallCount_.fetch_add(1, std::memory_order_relaxed); 139 | } 140 | std::atomic_signal_fence(std::memory_order_acquire); 141 | return res; 142 | } 143 | 144 | bool collectCpuTime() const { return collectCpuTime_; } 145 | 146 | int v8ProfilerStuckEventLoopDetected() const { 147 | return v8ProfilerStuckEventLoopDetected_; 148 | } 149 | 150 | ThreadCpuClock::duration GetAndResetThreadCpu() { 151 | return threadCpuStopWatch_.GetAndReset(); 152 | } 153 | 154 | double GetAsyncId(v8::Isolate* isolate); 155 | void OnGCStart(v8::Isolate* isolate); 156 | void OnGCEnd(); 157 | 158 | static NAN_METHOD(New); 159 | static NAN_METHOD(Start); 160 | static NAN_METHOD(Stop); 161 | static NAN_METHOD(V8ProfilerStuckEventLoopDetected); 162 | static NAN_METHOD(Dispose); 163 | static NAN_MODULE_INIT(Init); 164 | static NAN_GETTER(GetContext); 165 | static NAN_SETTER(SetContext); 166 | static NAN_GETTER(SharedArrayGetter); 167 | }; 168 | 169 | } // namespace dd 170 | -------------------------------------------------------------------------------- /bindings/test/binding.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include "nan.h" 22 | #include "node.h" 23 | #include "tap.h" 24 | #include "v8.h" 25 | 26 | #if defined(__GNUC__) && !defined(__clang__) 27 | #pragma GCC diagnostic push 28 | #pragma GCC diagnostic ignored "-Wcast-function-type" 29 | #endif 30 | NODE_MODULE_INIT(/* exports, module, context */) { 31 | #if defined(__GNUC__) && !defined(__clang__) 32 | #pragma GCC diagnostic pop 33 | #endif 34 | 35 | Tap t; 36 | const char* env_var = std::getenv("TEST"); 37 | std::string name(env_var == nullptr ? "" : env_var); 38 | 39 | std::unordered_map> tests = {}; 40 | 41 | if (name.empty()) { 42 | t.plan(tests.size()); 43 | for (auto test : tests) { 44 | t.test(test.first, test.second); 45 | } 46 | } else { 47 | t.plan(1); 48 | if (tests.count(name)) { 49 | t.test(name, tests[name]); 50 | } else { 51 | std::ostringstream s; 52 | s << "Unknown test: " << name; 53 | t.fail(s.str()); 54 | } 55 | } 56 | 57 | // End test and set `process.exitCode` 58 | int exitCode = t.end(); 59 | auto processKey = Nan::New("process").ToLocalChecked(); 60 | auto process = Nan::Get(context->Global(), processKey).ToLocalChecked(); 61 | Nan::Set(process.As(), 62 | Nan::New("exitCode").ToLocalChecked(), 63 | Nan::New(exitCode)); 64 | } 65 | -------------------------------------------------------------------------------- /bindings/thread-cpu-clock.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include "thread-cpu-clock.hh" 18 | 19 | #ifdef __linux__ 20 | #include 21 | #include 22 | #include 23 | #elif __APPLE__ 24 | #define _DARWIN_C_SOURCE 25 | #include 26 | #include 27 | #include 28 | #elif _WIN32 29 | #include 30 | #endif 31 | 32 | namespace dd { 33 | 34 | namespace { 35 | constexpr std::chrono::nanoseconds timespec_to_duration(timespec ts) { 36 | return std::chrono::seconds{ts.tv_sec} + std::chrono::nanoseconds{ts.tv_nsec}; 37 | } 38 | 39 | #ifdef _WIN32 40 | constexpr std::chrono::nanoseconds filetime_to_nanos(FILETIME t) { 41 | return std::chrono::nanoseconds{ 42 | ((static_cast(t.dwHighDateTime) << 32) | 43 | static_cast(t.dwLowDateTime)) * 44 | 100}; 45 | } 46 | #endif 47 | } // namespace 48 | 49 | CurrentThreadCpuClock::time_point CurrentThreadCpuClock::now() noexcept { 50 | #ifndef _WIN32 51 | timespec ts; 52 | clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts); 53 | return time_point{timespec_to_duration(ts)}; 54 | #else 55 | FILETIME creationTime, exitTime, kernelTime, userTime; 56 | if (!GetThreadTimes(GetCurrentThread(), 57 | &creationTime, 58 | &exitTime, 59 | &kernelTime, 60 | &userTime)) { 61 | return {}; 62 | } 63 | return time_point{filetime_to_nanos(kernelTime) + 64 | filetime_to_nanos(userTime)}; 65 | #endif 66 | } 67 | 68 | ProcessCpuClock::time_point ProcessCpuClock::now() noexcept { 69 | #ifndef _WIN32 70 | timespec ts; 71 | clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts); 72 | return time_point{timespec_to_duration(ts)}; 73 | #else 74 | FILETIME creationTime, exitTime, kernelTime, userTime; 75 | if (!GetProcessTimes(GetCurrentProcess(), 76 | &creationTime, 77 | &exitTime, 78 | &kernelTime, 79 | &userTime)) { 80 | return {}; 81 | } 82 | return time_point{filetime_to_nanos(kernelTime) + 83 | filetime_to_nanos(userTime)}; 84 | #endif 85 | } 86 | 87 | ThreadCpuClock::ThreadCpuClock() { 88 | #ifdef __linux__ 89 | pthread_getcpuclockid(pthread_self(), &clockid_); 90 | #elif __APPLE__ 91 | thread_ = mach_thread_self(); 92 | #elif _WIN32 93 | thread_ = GetCurrentThread(); 94 | #endif 95 | } 96 | 97 | ThreadCpuClock::time_point ThreadCpuClock::now() const noexcept { 98 | #ifdef __linux__ 99 | timespec ts; 100 | if (clock_gettime(clockid_, &ts)) { 101 | return {}; 102 | } 103 | return time_point{timespec_to_duration(ts)}; 104 | #elif __APPLE__ 105 | mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; 106 | thread_basic_info_data_t info; 107 | kern_return_t kr = 108 | thread_info(thread_, THREAD_BASIC_INFO, (thread_info_t)&info, &count); 109 | 110 | if (kr != KERN_SUCCESS) { 111 | return {}; 112 | } 113 | 114 | return time_point{ 115 | std::chrono::seconds{info.user_time.seconds + info.system_time.seconds} + 116 | std::chrono::microseconds{info.user_time.microseconds + 117 | info.system_time.microseconds}}; 118 | #elif _WIN32 119 | FILETIME creationTime, exitTime, kernelTime, userTime; 120 | if (!GetThreadTimes( 121 | thread_, &creationTime, &exitTime, &kernelTime, &userTime)) { 122 | return {}; 123 | } 124 | return time_point{filetime_to_nanos(kernelTime) + 125 | filetime_to_nanos(userTime)}; 126 | #endif 127 | 128 | return {}; 129 | } 130 | 131 | } // namespace dd 132 | -------------------------------------------------------------------------------- /bindings/thread-cpu-clock.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #ifdef __linux__ 24 | #include 25 | #elif __APPLE__ 26 | #include 27 | #elif _WIN32 28 | #include 29 | #endif 30 | 31 | namespace dd { 32 | 33 | struct CurrentThreadCpuClock { 34 | using duration = std::chrono::nanoseconds; 35 | using rep = duration::rep; 36 | using period = duration::period; 37 | using time_point = std::chrono::time_point; 38 | 39 | static constexpr bool is_steady = true; 40 | 41 | static time_point now() noexcept; 42 | }; 43 | 44 | struct ProcessCpuClock { 45 | using duration = std::chrono::nanoseconds; 46 | using rep = duration::rep; 47 | using period = duration::period; 48 | using time_point = std::chrono::time_point; 49 | 50 | static constexpr bool is_steady = true; 51 | 52 | static time_point now() noexcept; 53 | }; 54 | 55 | class ThreadCpuClock { 56 | public: 57 | using duration = std::chrono::nanoseconds; 58 | using rep = duration::rep; 59 | using period = duration::period; 60 | using time_point = std::chrono::time_point; 61 | 62 | static constexpr bool is_steady = true; 63 | 64 | ThreadCpuClock(); 65 | time_point now() const noexcept; 66 | 67 | private: 68 | #ifdef __linux__ 69 | clockid_t clockid_; 70 | #elif __APPLE__ 71 | mach_port_t thread_; 72 | #elif _WIN32 73 | HANDLE thread_; 74 | #endif 75 | }; 76 | 77 | class ThreadCpuStopWatch { 78 | public: 79 | ThreadCpuStopWatch() { last_ = clock_.now(); } 80 | 81 | ThreadCpuClock::duration GetAndReset() { 82 | auto now = clock_.now(); 83 | auto d = now - last_; 84 | last_ = now; 85 | return d; 86 | } 87 | 88 | private: 89 | ThreadCpuClock clock_; 90 | ThreadCpuClock::time_point last_; 91 | }; 92 | 93 | } // namespace dd 94 | -------------------------------------------------------------------------------- /bindings/translate-heap-profile.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include "translate-heap-profile.hh" 18 | #include "profile-translator.hh" 19 | 20 | namespace dd { 21 | 22 | namespace { 23 | class HeapProfileTranslator : ProfileTranslator { 24 | #define NODE_FIELDS \ 25 | X(name) \ 26 | X(scriptName) \ 27 | X(scriptId) \ 28 | X(lineNumber) \ 29 | X(columnNumber) \ 30 | X(children) \ 31 | X(allocations) 32 | 33 | #define ALLOCATION_FIELDS \ 34 | X(sizeBytes) \ 35 | X(count) 36 | 37 | #define X(name) v8::Local str_##name = NewString(#name); 38 | NODE_FIELDS 39 | ALLOCATION_FIELDS 40 | #undef X 41 | 42 | public: 43 | v8::Local TranslateAllocationProfile( 44 | v8::AllocationProfile::Node* node) { 45 | v8::Local children = NewArray(node->children.size()); 46 | for (size_t i = 0; i < node->children.size(); i++) { 47 | Set(children, i, TranslateAllocationProfile(node->children[i])); 48 | } 49 | 50 | v8::Local allocations = NewArray(node->allocations.size()); 51 | for (size_t i = 0; i < node->allocations.size(); i++) { 52 | auto alloc = node->allocations[i]; 53 | Set(allocations, 54 | i, 55 | CreateAllocation(NewNumber(alloc.size), NewNumber(alloc.count))); 56 | } 57 | 58 | return CreateNode(node->name, 59 | node->script_name, 60 | NewInteger(node->script_id), 61 | NewInteger(node->line_number), 62 | NewInteger(node->column_number), 63 | children, 64 | allocations); 65 | } 66 | 67 | private: 68 | v8::Local CreateNode(v8::Local name, 69 | v8::Local scriptName, 70 | v8::Local scriptId, 71 | v8::Local lineNumber, 72 | v8::Local columnNumber, 73 | v8::Local children, 74 | v8::Local allocations) { 75 | v8::Local js_node = NewObject(); 76 | #define X(name) Set(js_node, str_##name, name); 77 | NODE_FIELDS 78 | #undef X 79 | #undef NODE_FIELDS 80 | return js_node; 81 | } 82 | 83 | v8::Local CreateAllocation(v8::Local count, 84 | v8::Local sizeBytes) { 85 | v8::Local js_alloc = NewObject(); 86 | #define X(name) Set(js_alloc, str_##name, name); 87 | ALLOCATION_FIELDS 88 | #undef X 89 | #undef ALLOCATION_FIELDS 90 | return js_alloc; 91 | } 92 | 93 | public: 94 | explicit HeapProfileTranslator() {} 95 | }; 96 | } // namespace 97 | 98 | v8::Local TranslateAllocationProfile( 99 | v8::AllocationProfile::Node* node) { 100 | return HeapProfileTranslator().TranslateAllocationProfile(node); 101 | } 102 | 103 | } // namespace dd 104 | -------------------------------------------------------------------------------- /bindings/translate-heap-profile.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | 21 | namespace dd { 22 | 23 | v8::Local TranslateAllocationProfile( 24 | v8::AllocationProfile::Node* node); 25 | 26 | } // namespace dd 27 | -------------------------------------------------------------------------------- /bindings/translate-time-profile.cc: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2018 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include "translate-time-profile.hh" 18 | #include "profile-translator.hh" 19 | 20 | namespace dd { 21 | 22 | namespace { 23 | class TimeProfileTranslator : ProfileTranslator { 24 | private: 25 | ContextsByNode* contextsByNode; 26 | v8::Local emptyArray = NewArray(0); 27 | v8::Local zero = NewInteger(0); 28 | 29 | #define FIELDS \ 30 | X(name) \ 31 | X(scriptName) \ 32 | X(scriptId) \ 33 | X(lineNumber) \ 34 | X(columnNumber) \ 35 | X(hitCount) \ 36 | X(children) \ 37 | X(contexts) 38 | 39 | #define X(name) v8::Local str_##name = NewString(#name); 40 | FIELDS 41 | #undef X 42 | 43 | v8::Local getContextsForNode(const v8::CpuProfileNode* node, 44 | uint32_t& hitcount) { 45 | hitcount = node->GetHitCount(); 46 | if (!contextsByNode) { 47 | // custom contexts are not enabled, keep the node hitcount and return 48 | // empty array 49 | return emptyArray; 50 | } 51 | 52 | auto it = contextsByNode->find(node); 53 | auto contexts = emptyArray; 54 | if (it != contextsByNode->end()) { 55 | hitcount = it->second.hitcount; 56 | contexts = it->second.contexts; 57 | } else { 58 | // no context found for node, discard it since every sample taken from 59 | // signal handler should have a matching context if it does not, it means 60 | // sample was captured by a deopt event 61 | hitcount = 0; 62 | } 63 | return contexts; 64 | } 65 | 66 | v8::Local CreateTimeNode(v8::Local name, 67 | v8::Local scriptName, 68 | v8::Local scriptId, 69 | v8::Local lineNumber, 70 | v8::Local columnNumber, 71 | v8::Local hitCount, 72 | v8::Local children, 73 | v8::Local contexts) { 74 | v8::Local js_node = NewObject(); 75 | #define X(name) Set(js_node, str_##name, name); 76 | FIELDS 77 | #undef X 78 | #undef FIELDS 79 | return js_node; 80 | } 81 | 82 | v8::Local GetLineNumberTimeProfileChildren( 83 | const v8::CpuProfileNode* node) { 84 | unsigned int index = 0; 85 | v8::Local children; 86 | int32_t count = node->GetChildrenCount(); 87 | 88 | unsigned int hitLineCount = node->GetHitLineCount(); 89 | unsigned int hitCount = node->GetHitCount(); 90 | auto scriptId = NewInteger(node->GetScriptId()); 91 | if (hitLineCount > 0) { 92 | std::vector entries(hitLineCount); 93 | node->GetLineTicks(&entries[0], hitLineCount); 94 | children = NewArray(count + hitLineCount); 95 | for (const v8::CpuProfileNode::LineTick entry : entries) { 96 | Set(children, 97 | index++, 98 | CreateTimeNode(node->GetFunctionName(), 99 | node->GetScriptResourceName(), 100 | scriptId, 101 | NewInteger(entry.line), 102 | zero, 103 | NewInteger(entry.hit_count), 104 | emptyArray, 105 | emptyArray)); 106 | } 107 | } else if (hitCount > 0) { 108 | // Handle nodes for pseudo-functions like "process" and "garbage 109 | // collection" which do not have hit line counts. 110 | children = NewArray(count + 1); 111 | Set(children, 112 | index++, 113 | CreateTimeNode(node->GetFunctionName(), 114 | node->GetScriptResourceName(), 115 | scriptId, 116 | NewInteger(node->GetLineNumber()), 117 | NewInteger(node->GetColumnNumber()), 118 | NewInteger(hitCount), 119 | emptyArray, 120 | emptyArray)); 121 | } else { 122 | children = NewArray(count); 123 | } 124 | 125 | for (int32_t i = 0; i < count; i++) { 126 | Set(children, 127 | index++, 128 | TranslateLineNumbersTimeProfileNode(node, node->GetChild(i))); 129 | }; 130 | 131 | return children; 132 | } 133 | 134 | v8::Local TranslateLineNumbersTimeProfileNode( 135 | const v8::CpuProfileNode* parent, const v8::CpuProfileNode* node) { 136 | return CreateTimeNode(parent->GetFunctionName(), 137 | parent->GetScriptResourceName(), 138 | NewInteger(parent->GetScriptId()), 139 | NewInteger(node->GetLineNumber()), 140 | NewInteger(node->GetColumnNumber()), 141 | zero, 142 | GetLineNumberTimeProfileChildren(node), 143 | emptyArray); 144 | } 145 | 146 | // In profiles with line level accurate line numbers, a node's line number 147 | // and column number refer to the line/column from which the function was 148 | // called. 149 | v8::Local TranslateLineNumbersTimeProfileRoot( 150 | const v8::CpuProfileNode* node) { 151 | int32_t count = node->GetChildrenCount(); 152 | std::vector> childrenArrs(count); 153 | int32_t childCount = 0; 154 | for (int32_t i = 0; i < count; i++) { 155 | v8::Local c = 156 | GetLineNumberTimeProfileChildren(node->GetChild(i)); 157 | childCount = childCount + c->Length(); 158 | childrenArrs[i] = c; 159 | } 160 | 161 | v8::Local children = NewArray(childCount); 162 | int32_t idx = 0; 163 | for (int32_t i = 0; i < count; i++) { 164 | v8::Local arr = childrenArrs[i]; 165 | for (uint32_t j = 0; j < arr->Length(); j++) { 166 | Set(children, idx, Get(arr, j).ToLocalChecked()); 167 | idx++; 168 | } 169 | } 170 | 171 | return CreateTimeNode(node->GetFunctionName(), 172 | node->GetScriptResourceName(), 173 | NewInteger(node->GetScriptId()), 174 | NewInteger(node->GetLineNumber()), 175 | NewInteger(node->GetColumnNumber()), 176 | zero, 177 | children, 178 | emptyArray); 179 | } 180 | 181 | v8::Local TranslateTimeProfileNode( 182 | const v8::CpuProfileNode* node) { 183 | int32_t count = node->GetChildrenCount(); 184 | v8::Local children = NewArray(count); 185 | for (int32_t i = 0; i < count; i++) { 186 | Set(children, i, TranslateTimeProfileNode(node->GetChild(i))); 187 | } 188 | 189 | uint32_t hitcount = 0; 190 | auto contexts = getContextsForNode(node, hitcount); 191 | 192 | return CreateTimeNode(node->GetFunctionName(), 193 | node->GetScriptResourceName(), 194 | NewInteger(node->GetScriptId()), 195 | NewInteger(node->GetLineNumber()), 196 | NewInteger(node->GetColumnNumber()), 197 | NewInteger(hitcount), 198 | children, 199 | contexts); 200 | } 201 | 202 | public: 203 | explicit TimeProfileTranslator(ContextsByNode* nls = nullptr) 204 | : contextsByNode(nls) {} 205 | 206 | v8::Local TranslateTimeProfile(const v8::CpuProfile* profile, 207 | bool includeLineInfo, 208 | bool hasCpuTime, 209 | int64_t nonJSThreadsCpuTime) { 210 | v8::Local js_profile = NewObject(); 211 | 212 | if (includeLineInfo) { 213 | Set(js_profile, 214 | NewString("topDownRoot"), 215 | TranslateLineNumbersTimeProfileRoot(profile->GetTopDownRoot())); 216 | } else { 217 | Set(js_profile, 218 | NewString("topDownRoot"), 219 | TranslateTimeProfileNode(profile->GetTopDownRoot())); 220 | } 221 | Set(js_profile, NewString("startTime"), NewNumber(profile->GetStartTime())); 222 | Set(js_profile, NewString("endTime"), NewNumber(profile->GetEndTime())); 223 | Set(js_profile, NewString("hasCpuTime"), NewBoolean(hasCpuTime)); 224 | 225 | Set(js_profile, 226 | NewString("nonJSThreadsCpuTime"), 227 | NewNumber(nonJSThreadsCpuTime)); 228 | return js_profile; 229 | } 230 | }; 231 | } // namespace 232 | 233 | v8::Local TranslateTimeProfile(const v8::CpuProfile* profile, 234 | bool includeLineInfo, 235 | ContextsByNode* contextsByNode, 236 | bool hasCpuTime, 237 | int64_t nonJSThreadsCpuTime) { 238 | return TimeProfileTranslator(contextsByNode) 239 | .TranslateTimeProfile( 240 | profile, includeLineInfo, hasCpuTime, nonJSThreadsCpuTime); 241 | } 242 | 243 | } // namespace dd 244 | -------------------------------------------------------------------------------- /bindings/translate-time-profile.hh: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2018 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include 20 | #include "contexts.hh" 21 | 22 | namespace dd { 23 | 24 | v8::Local TranslateTimeProfile( 25 | const v8::CpuProfile* profile, 26 | bool includeLineInfo, 27 | ContextsByNode* contextsByNode = nullptr, 28 | bool hasCpuTime = false, 29 | int64_t nonJSThreadsCpuTime = 0); 30 | 31 | } // namespace dd 32 | -------------------------------------------------------------------------------- /bindings/wrap.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Datadog, Inc 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include // cppcheck-suppress missingIncludeSystem 20 | 21 | namespace dd { 22 | 23 | class LabelWrap { 24 | protected: 25 | v8::Global handle_; 26 | 27 | public: 28 | LabelWrap(v8::Local object) 29 | : handle_(v8::Isolate::GetCurrent(), object) {} 30 | 31 | v8::Local handle() { 32 | return handle_.Get(v8::Isolate::GetCurrent()); 33 | } 34 | }; 35 | 36 | }; // namespace dd 37 | -------------------------------------------------------------------------------- /codecov.yaml: -------------------------------------------------------------------------------- 1 | ignore: 2 | proto 3 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@datadog/pprof", 3 | "version": "6.0.0-pre", 4 | "description": "pprof support for Node.js", 5 | "repository": "datadog/pprof-nodejs", 6 | "main": "out/src/index.js", 7 | "types": "out/src/index.d.ts", 8 | "scripts": { 9 | "build:asan": "node-gyp configure build --jobs=max --address_sanitizer", 10 | "build:tsan": "node-gyp configure build --jobs=max --thread_sanitizer", 11 | "build": "node-gyp configure build --jobs=max", 12 | "codecov": "nyc report --reporter=json && codecov -f coverage/*.json", 13 | "compile": "tsc -p .", 14 | "fix": "gts fix", 15 | "format": "clang-format --style file -i --glob='bindings/**/*.{h,hh,cpp,cc}'", 16 | "install": "exit 0", 17 | "lint": "jsgl --local . && gts check && clang-format --style file -n -Werror --glob='bindings/**/*.{h,hh,cpp,cc}'", 18 | "prepare": "npm run compile && npm run rebuild", 19 | "pretest:js-asan": "npm run compile && npm run build:asan", 20 | "pretest:js-tsan": "npm run compile && npm run build:tsan", 21 | "pretest:js-valgrind": "npm run pretest", 22 | "pretest": "npm run compile", 23 | "rebuild": "node-gyp rebuild --jobs=max", 24 | "test:cpp": "node scripts/cctest.js", 25 | "test:js-asan": "LSAN_OPTIONS='suppressions=./suppressions/lsan_suppr.txt' LD_PRELOAD=`gcc -print-file-name=libasan.so` mocha out/test/test-*.js", 26 | "test:js-tsan": "LD_PRELOAD=`gcc -print-file-name=libtsan.so` mocha out/test/test-*.js", 27 | "test:js-valgrind": "valgrind --leak-check=full mocha out/test/test-*.js", 28 | "test:js": "nyc mocha -r source-map-support/register out/test/test-*.js", 29 | "test": "npm run test:js" 30 | }, 31 | "author": { 32 | "name": "Google Inc." 33 | }, 34 | "license": "Apache-2.0", 35 | "dependencies": { 36 | "delay": "^5.0.0", 37 | "node-gyp-build": "<4.0", 38 | "p-limit": "^3.1.0", 39 | "pprof-format": "^2.1.0", 40 | "source-map": "^0.7.4" 41 | }, 42 | "devDependencies": { 43 | "@types/mocha": "^10.0.1", 44 | "@types/node": ">=16", 45 | "@types/sinon": "^10.0.15", 46 | "@types/tmp": "^0.2.3", 47 | "@typescript-eslint/eslint-plugin": "^5.60.1", 48 | "clang-format": "^1.8.0", 49 | "codecov": "^3.8.2", 50 | "deep-copy": "^1.4.2", 51 | "eslint-config-standard": "^17.1.0", 52 | "eslint-plugin-import": "^2.26.0", 53 | "eslint-plugin-n": "^16.0.1", 54 | "eslint-plugin-promise": "^6.1.1", 55 | "gts": "^4.0.1", 56 | "js-green-licenses": "^4.0.0", 57 | "mocha": "^10.2.0", 58 | "nan": "^2.22.2", 59 | "nyc": "^15.1.0", 60 | "sinon": "^15.2.0", 61 | "source-map-support": "^0.5.21", 62 | "tmp": "0.2.1", 63 | "typescript": "<5.1" 64 | }, 65 | "files": [ 66 | "out/src", 67 | "out/third_party/cloud-debug-nodejs", 68 | "proto", 69 | "package-lock.json", 70 | "package.json", 71 | "README.md", 72 | "scripts/preinstall.js", 73 | "scripts/require-package-json.js", 74 | "scripts/should_rebuild.js", 75 | "prebuilds" 76 | ], 77 | "nyc": { 78 | "exclude": [ 79 | "proto", 80 | "out/test", 81 | "out/system-test" 82 | ] 83 | }, 84 | "engines": { 85 | "node": ">=16" 86 | }, 87 | "//": "Temporary fix to make nan@2.22.2 work with Node 24", 88 | "postinstall": "sed -i '' 's/^.* Holder() const.*//' ./node_modules/nan/nan_callbacks_12_inl.h" 89 | } 90 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:base", 4 | ":preserveSemverRanges", 5 | ":pinDigestsDisabled" 6 | ], 7 | "packageRules": [ 8 | { 9 | "extends": "packages:linters", 10 | "groupName": "linters" 11 | } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /scripts/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "parserOptions": { 4 | "ecmaVersion": 2020 5 | }, 6 | "extends": [ 7 | "eslint:recommended", 8 | "standard" 9 | ], 10 | "env": { 11 | "node" : true 12 | }, 13 | "rules": { 14 | "max-len": [2, 120, 2], 15 | "no-var": 2, 16 | "no-console": 2, 17 | "prefer-const": 2, 18 | "object-curly-spacing": [2, "always"], 19 | "import/no-extraneous-dependencies": 2, 20 | "standard/no-callback-literal": 0, 21 | "no-prototype-builtins": 0 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /scripts/cctest.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { execSync } = require('child_process') 4 | const { existsSync } = require('fs') 5 | const { join } = require('path') 6 | 7 | const name = process.argv[2] || 'test_dd_pprof' 8 | 9 | const cmd = [ 10 | 'node-gyp', 11 | 'configure', 12 | 'build', 13 | '--build_tests' 14 | ].join(' ') 15 | 16 | execSync(cmd, { stdio: [0, 1, 2] }) 17 | 18 | function findBuild (mode) { 19 | const path = join(__dirname, '..', 'build', mode, name) + '.node' 20 | if (!existsSync(path)) { 21 | // eslint-disable-next-line no-console 22 | console.warn(`No ${mode} binary found for ${name} at: ${path}`) 23 | return 24 | } 25 | return path 26 | } 27 | 28 | const path = findBuild('Release') || findBuild('Debug') 29 | if (!path) { 30 | // eslint-disable-next-line no-console 31 | console.error(`No ${name} build found`) 32 | process.exitCode = 1 33 | } else { 34 | execSync(`node ${path}`, { stdio: [0, 1, 2] }) 35 | } 36 | -------------------------------------------------------------------------------- /suppressions/lsan_suppr.txt: -------------------------------------------------------------------------------- 1 | leak:CRYPTO_zalloc -------------------------------------------------------------------------------- /system-test/Dockerfile.linux: -------------------------------------------------------------------------------- 1 | FROM golang:1.16-stretch as builder 2 | RUN apt-get update && apt-get install -y \ 3 | git \ 4 | && rm -rf /var/lib/apt/lists/* 5 | WORKDIR /root/ 6 | RUN go get github.com/google/pprof 7 | 8 | FROM debian:stretch 9 | 10 | ARG NODE_VERSION 11 | ARG NVM_NODEJS_ORG_MIRROR 12 | ARG ADDITIONAL_PACKAGES 13 | ARG VERIFY_TIME_LINE_NUMBERS 14 | 15 | RUN apt-get update && apt-get install -y curl $ADDITIONAL_PACKAGES \ 16 | && rm -rf /var/lib/apt/lists/* 17 | 18 | ENV NVM_DIR /bin/.nvm 19 | RUN mkdir -p $NVM_DIR 20 | 21 | 22 | # Install nvm with node and npm 23 | RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.37.2/install.sh | bash \ 24 | && . $NVM_DIR/nvm.sh \ 25 | && nvm install $NODE_VERSION 26 | 27 | ENV BASH_ENV /root/.bashrc 28 | 29 | WORKDIR /root/ 30 | COPY --from=builder /go/bin/pprof /bin 31 | -------------------------------------------------------------------------------- /system-test/Dockerfile.node10-alpine: -------------------------------------------------------------------------------- 1 | FROM golang:1.16-alpine as builder 2 | RUN apk add --no-cache git 3 | WORKDIR /root/ 4 | RUN go get github.com/google/pprof 5 | 6 | 7 | FROM node:10-alpine 8 | 9 | ARG ADDITIONAL_PACKAGES 10 | 11 | RUN apk add --no-cache bash $ADDITIONAL_PACKAGES 12 | WORKDIR /root/ 13 | COPY --from=builder /go/bin/pprof /bin 14 | RUN chmod a+x /bin/pprof 15 | -------------------------------------------------------------------------------- /system-test/Dockerfile.node12-alpine: -------------------------------------------------------------------------------- 1 | FROM golang:1.16-alpine as builder 2 | RUN apk add --no-cache git 3 | WORKDIR /root/ 4 | RUN go get github.com/google/pprof 5 | 6 | 7 | FROM node:12-alpine 8 | 9 | ARG ADDITIONAL_PACKAGES 10 | 11 | RUN apk add --no-cache bash $ADDITIONAL_PACKAGES 12 | WORKDIR /root/ 13 | COPY --from=builder /go/bin/pprof /bin 14 | RUN chmod a+x /bin/pprof 15 | -------------------------------------------------------------------------------- /system-test/Dockerfile.node14-alpine: -------------------------------------------------------------------------------- 1 | FROM golang:1.16-alpine as builder 2 | RUN apk add --no-cache git 3 | WORKDIR /root/ 4 | RUN go get github.com/google/pprof 5 | 6 | 7 | FROM node:14-alpine 8 | 9 | ARG ADDITIONAL_PACKAGES 10 | 11 | RUN apk add --no-cache bash $ADDITIONAL_PACKAGES 12 | WORKDIR /root/ 13 | COPY --from=builder /go/bin/pprof /bin 14 | RUN chmod a+x /bin/pprof 15 | -------------------------------------------------------------------------------- /system-test/Dockerfile.node15-alpine: -------------------------------------------------------------------------------- 1 | FROM golang:1.16-alpine as builder 2 | RUN apk add --no-cache git 3 | WORKDIR /root/ 4 | RUN go get github.com/google/pprof 5 | 6 | 7 | FROM node:15-alpine 8 | 9 | ARG ADDITIONAL_PACKAGES 10 | 11 | RUN apk add --no-cache bash $ADDITIONAL_PACKAGES 12 | WORKDIR /root/ 13 | COPY --from=builder /go/bin/pprof /bin 14 | RUN chmod a+x /bin/pprof 15 | -------------------------------------------------------------------------------- /system-test/Dockerfile.node16-alpine: -------------------------------------------------------------------------------- 1 | FROM golang:1.16-alpine as builder 2 | RUN apk add --no-cache git 3 | WORKDIR /root/ 4 | RUN go get github.com/google/pprof 5 | 6 | 7 | FROM node:16-alpine 8 | 9 | ARG ADDITIONAL_PACKAGES 10 | 11 | RUN apk add --no-cache bash $ADDITIONAL_PACKAGES 12 | WORKDIR /root/ 13 | COPY --from=builder /go/bin/pprof /bin 14 | RUN chmod a+x /bin/pprof 15 | -------------------------------------------------------------------------------- /system-test/busybench-js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "busybench", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "dependencies": {}, 7 | "devDependencies": {}, 8 | "scripts": { 9 | "test": "echo \"Error: no test specified\" && exit 1" 10 | }, 11 | "author": "", 12 | "license": "ISC", 13 | "engines": { 14 | "node": ">=12" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /system-test/busybench-js/src/busybench.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2019 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | const fs = require('fs'); 18 | // eslint-disable-next-line node/no-missing-require 19 | const pprof = require('pprof'); 20 | 21 | const writeFilePromise = fs.promises.writeFile; 22 | 23 | const startTime = Date.now(); 24 | const testArr = []; 25 | 26 | /** 27 | * Fills several arrays, then calls itself with setTimeout. 28 | * It continues to do this until durationSeconds after the startTime. 29 | */ 30 | function busyLoop(durationSeconds) { 31 | for (let i = 0; i < testArr.length; i++) { 32 | for (let j = 0; j < testArr[i].length; j++) { 33 | testArr[i][j] = Math.sqrt(j * testArr[i][j]); 34 | } 35 | } 36 | if (Date.now() - startTime < 1000 * durationSeconds) { 37 | setTimeout(() => busyLoop(durationSeconds), 5); 38 | } 39 | } 40 | 41 | function benchmark(durationSeconds) { 42 | // Allocate 16 MiB in 64 KiB chunks. 43 | for (let i = 0; i < 16 * 16; i++) { 44 | testArr[i] = new Array(64 * 1024); 45 | } 46 | busyLoop(durationSeconds); 47 | } 48 | 49 | async function collectAndSaveTimeProfile( 50 | durationSeconds, 51 | sourceMapper, 52 | lineNumbers 53 | ) { 54 | const profile = await pprof.time.profile({ 55 | durationMillis: 1000 * durationSeconds, 56 | lineNumbers: lineNumbers, 57 | sourceMapper: sourceMapper, 58 | }); 59 | const buf = await pprof.encode(profile); 60 | await writeFilePromise('time.pb.gz', buf); 61 | } 62 | 63 | async function collectAndSaveHeapProfile(sourceMapper) { 64 | const profile = pprof.heap.profile(undefined, sourceMapper); 65 | const buf = await pprof.encode(profile); 66 | await writeFilePromise('heap.pb.gz', buf); 67 | } 68 | 69 | async function collectAndSaveProfiles(collectLineNumberTimeProfile) { 70 | const sourceMapper = await pprof.SourceMapper.create([process.cwd()]); 71 | collectAndSaveHeapProfile(sourceMapper); 72 | collectAndSaveTimeProfile( 73 | durationSeconds / 2, 74 | sourceMapper, 75 | collectLineNumberTimeProfile 76 | ); 77 | } 78 | 79 | const durationSeconds = Number(process.argv.length > 2 ? process.argv[2] : 30); 80 | const collectLineNumberTimeProfile = Boolean( 81 | process.argv.length > 3 ? process.argv[3] : false 82 | ); 83 | 84 | pprof.heap.start(512 * 1024, 64); 85 | benchmark(durationSeconds); 86 | 87 | collectAndSaveProfiles(collectLineNumberTimeProfile); 88 | -------------------------------------------------------------------------------- /system-test/busybench/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "busybench", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "build/src/busybench.js", 6 | "types": "build/src/busybench.d.ts", 7 | "files": [ 8 | "build/src" 9 | ], 10 | "license": "Apache-2.0", 11 | "keywords": [], 12 | "scripts": { 13 | "test": "echo \"Error: no test specified\" && exit 1", 14 | "check": "gts check", 15 | "clean": "gts clean", 16 | "compile": "tsc -p .", 17 | "fix": "gts fix", 18 | "prepare": "npm run compile", 19 | "pretest": "npm run compile", 20 | "posttest": "npm run check" 21 | }, 22 | "devDependencies": {}, 23 | "dependencies": {}, 24 | "engines": { 25 | "node": ">=12" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /system-test/busybench/src/busybench.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2019 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import {writeFile as writeFilePromise} from 'fs/promises'; 18 | // eslint-disable-next-line node/no-extraneous-import 19 | import {encode, heap, SourceMapper, time} from 'pprof'; 20 | 21 | const startTime: number = Date.now(); 22 | const testArr: number[][] = []; 23 | 24 | /** 25 | * Fills several arrays, then calls itself with setTimeout. 26 | * It continues to do this until durationSeconds after the startTime. 27 | */ 28 | function busyLoop(durationSeconds: number) { 29 | for (let i = 0; i < testArr.length; i++) { 30 | for (let j = 0; j < testArr[i].length; j++) { 31 | testArr[i][j] = Math.sqrt(j * testArr[i][j]); 32 | } 33 | } 34 | if (Date.now() - startTime < 1000 * durationSeconds) { 35 | setTimeout(() => busyLoop(durationSeconds), 5); 36 | } 37 | } 38 | 39 | function benchmark(durationSeconds: number) { 40 | // Allocate 16 MiB in 64 KiB chunks. 41 | for (let i = 0; i < 16 * 16; i++) { 42 | testArr[i] = new Array(64 * 1024); 43 | } 44 | busyLoop(durationSeconds); 45 | } 46 | 47 | async function collectAndSaveTimeProfile( 48 | durationSeconds: number, 49 | sourceMapper: SourceMapper 50 | ): Promise { 51 | const profile = await time.profile({ 52 | durationMillis: 1000 * durationSeconds, 53 | sourceMapper, 54 | }); 55 | const buf = await encode(profile); 56 | await writeFilePromise('time.pb.gz', buf); 57 | } 58 | 59 | async function collectAndSaveHeapProfile( 60 | sourceMapper: SourceMapper 61 | ): Promise { 62 | const profile = await heap.profile(undefined, sourceMapper); 63 | const buf = await encode(profile); 64 | await writeFilePromise('heap.pb.gz', buf); 65 | } 66 | 67 | async function collectAndSaveProfiles(): Promise { 68 | const sourceMapper = await SourceMapper.create([process.cwd()]); 69 | collectAndSaveTimeProfile(durationSeconds, sourceMapper); 70 | collectAndSaveHeapProfile(sourceMapper); 71 | } 72 | 73 | const durationSeconds = Number(process.argv.length > 2 ? process.argv[2] : 30); 74 | heap.start(512 * 1024, 64); 75 | benchmark(durationSeconds); 76 | 77 | collectAndSaveProfiles(); 78 | -------------------------------------------------------------------------------- /system-test/busybench/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./node_modules/gts/tsconfig-google.json", 3 | "compilerOptions": { 4 | "rootDir": ".", 5 | "outDir": "build", 6 | "lib": [ "es2015" ], 7 | "target": "es2015" 8 | }, 9 | "include": [ 10 | "src/*.ts", 11 | "src/**/*.ts", 12 | "test/*.ts", 13 | "test/**/*.ts" 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /system-test/system_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Trap all errors. 4 | trap "echo '** AT LEAST ONE OF TESTS FAILED **'" ERR 5 | 6 | # Fail on any error, show commands run. 7 | set -eox pipefail 8 | 9 | . $(dirname $0)/../tools/retry.sh 10 | 11 | cd $(dirname $0) 12 | 13 | if [[ -z "$BINARY_HOST" ]]; then 14 | ADDITIONAL_PACKAGES="python3 g++ make" 15 | fi 16 | 17 | if [[ "$RUN_ONLY_V8_CANARY_TEST" == "true" ]]; then 18 | NVM_NODEJS_ORG_MIRROR="https://nodejs.org/download/v8-canary" 19 | NODE_VERSIONS=(node) 20 | else 21 | NODE_VERSIONS=(10 12 14 15 16) 22 | fi 23 | 24 | for i in ${NODE_VERSIONS[@]}; do 25 | # Test Linux support for the given node version. 26 | retry docker build -f Dockerfile.linux --build-arg NODE_VERSION=$i \ 27 | --build-arg ADDITIONAL_PACKAGES="$ADDITIONAL_PACKAGES" \ 28 | --build-arg NVM_NODEJS_ORG_MIRROR="$NVM_NODEJS_ORG_MIRROR" \ 29 | -t node$i-linux . 30 | 31 | docker run -v $PWD/..:/src -e BINARY_HOST="$BINARY_HOST" node$i-linux \ 32 | /src/system-test/test.sh 33 | 34 | # Test support for accurate line numbers with node versions supporting this 35 | # feature. 36 | if [ "$i" != "10" ]; then 37 | docker run -v $PWD/..:/src -e BINARY_HOST="$BINARY_HOST" \ 38 | -e VERIFY_TIME_LINE_NUMBERS="true" node$i-linux \ 39 | /src/system-test/test.sh 40 | fi 41 | 42 | # Skip running on alpine if NVM_NODEJS_ORG_MIRROR is specified. 43 | if [[ ! -z "$NVM_NODEJS_ORG_MIRROR" ]]; then 44 | continue 45 | fi 46 | 47 | # Test Alpine support for the given node version. 48 | retry docker build -f Dockerfile.node$i-alpine \ 49 | --build-arg ADDITIONAL_PACKAGES="$ADDITIONAL_PACKAGES" -t node$i-alpine . 50 | 51 | docker run -v $PWD/..:/src -e BINARY_HOST="$BINARY_HOST" node$i-alpine \ 52 | /src/system-test/test.sh 53 | done 54 | 55 | echo '** ALL TESTS PASSED **' 56 | -------------------------------------------------------------------------------- /system-test/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | trap "cd $(dirname $0)/.. && npm run clean" EXIT 4 | trap "echo '** TEST FAILED **'" ERR 5 | 6 | . $(dirname $0)/../tools/retry.sh 7 | 8 | function timeout_after() { 9 | # timeout on Node 11 alpine image requires -t to specify time. 10 | if [[ -f /bin/busybox ]] && [[ $(node -v) =~ ^v11.* ]]; then 11 | timeout -t "${@}" 12 | else 13 | timeout "${@}" 14 | fi 15 | } 16 | 17 | npm_install() { 18 | timeout_after 60 npm install --quiet "${@}" 19 | } 20 | 21 | set -eox pipefail 22 | cd $(dirname $0)/.. 23 | 24 | NODEDIR=$(dirname $(dirname $(which node))) 25 | 26 | # TODO: Remove when a new version of nan (current version 2.12.1) is released. 27 | # For v8-canary tests, we need to use the version of NAN on github, which 28 | # contains unreleased fixes that allow the native component to be compiled 29 | # with Node's V8 canary build. 30 | [ -z $NVM_NODEJS_ORG_MIRROR ] \ 31 | || retry npm_install https://github.com/nodejs/nan.git 32 | 33 | retry npm_install --nodedir="$NODEDIR" \ 34 | ${BINARY_HOST:+--pprof_binary_host_mirror=$BINARY_HOST} >/dev/null 35 | 36 | npm run compile 37 | npm pack --quiet 38 | VERSION=$(node -e "console.log(require('./package.json').version);") 39 | PROFILER="$PWD/pprof-$VERSION.tgz" 40 | 41 | if [[ "$VERIFY_TIME_LINE_NUMBERS" == "true" ]]; then 42 | BENCHDIR="$PWD/system-test/busybench-js" 43 | BENCHPATH="src/busybench.js" 44 | else 45 | BENCHDIR="$PWD/system-test/busybench" 46 | BENCHPATH="build/src/busybench.js" 47 | fi 48 | 49 | TESTDIR=$(mktemp -d) 50 | cp -r "$BENCHDIR" "$TESTDIR/busybench" 51 | cd "$TESTDIR/busybench" 52 | 53 | retry npm_install typescript gts @types/node >/dev/null 54 | retry npm_install --nodedir="$NODEDIR" \ 55 | $([ -z "$BINARY_HOST" ] && echo "--build-from-source=pprof" \ 56 | || echo "--pprof_binary_host_mirror=$BINARY_HOST")\ 57 | "$PROFILER">/dev/null 58 | 59 | if [[ "$VERIFY_TIME_LINE_NUMBERS" != "true" ]]; then 60 | npm run compile 61 | fi 62 | 63 | node -v 64 | node --trace-warnings "$BENCHPATH" 10 $VERIFY_TIME_LINE_NUMBERS 65 | 66 | if [[ "$VERIFY_TIME_LINE_NUMBERS" == "true" ]]; then 67 | pprof -lines -top -nodecount=2 time.pb.gz 68 | pprof -lines -top -nodecount=2 time.pb.gz | \ 69 | grep "busyLoop.*src/busybench.js:3[3-5]" 70 | pprof -filefunctions -top -nodecount=2 heap.pb.gz 71 | pprof -filefunctions -top -nodecount=2 heap.pb.gz | \ 72 | grep "busyLoop.*src/busybench.js" 73 | else 74 | pprof -filefunctions -top -nodecount=2 time.pb.gz 75 | pprof -filefunctions -top -nodecount=2 time.pb.gz | \ 76 | grep "busyLoop.*src/busybench.ts" 77 | pprof -filefunctions -top -nodecount=2 heap.pb.gz 78 | pprof -filefunctions -top -nodecount=2 heap.pb.gz | \ 79 | grep "busyLoop.*src/busybench.ts" 80 | fi 81 | 82 | 83 | echo '** TEST PASSED **' 84 | -------------------------------------------------------------------------------- /tools/build/Dockerfile.alpine: -------------------------------------------------------------------------------- 1 | FROM node:14-alpine 2 | RUN apk add --no-cache python curl bash python g++ make 3 | -------------------------------------------------------------------------------- /tools/build/Dockerfile.linux: -------------------------------------------------------------------------------- 1 | # Docker image on which pre-compiled binaries for non-alpine linux are built. 2 | # An older Docker image is used intentionally, because the resulting binaries 3 | # are dynamically linked to certain C++ libraries, like libstdc++. Using an 4 | # older docker image allows for some backwards compatibility. 5 | 6 | # node:14-stretch images has dependencies required to build pre-built binaries 7 | # already installed. 8 | FROM node:14-stretch 9 | -------------------------------------------------------------------------------- /tools/build/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 Google Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Fail on any error. 18 | set -e pipefail 19 | 20 | # Display commands 21 | set -x 22 | 23 | cd $(dirname $0)/../.. 24 | BASE_DIR=$PWD 25 | 26 | ARTIFACTS_OUT="${BASE_DIR}/artifacts" 27 | mkdir -p "$ARTIFACTS_OUT" 28 | 29 | npm install --quiet 30 | 31 | for version in 10.0.0 12.0.0 14.0.0 15.0.0 16.0.0 32 | do 33 | ./node_modules/.bin/node-pre-gyp configure rebuild package \ 34 | --target=$version --target_arch="x64" 35 | cp -r build/stage/* "${ARTIFACTS_OUT}/" 36 | rm -rf build 37 | done 38 | 39 | # Remove node_modules directory. When this script is run in a docker container 40 | # with user root, then a system test running after this script cannot run npm 41 | # install. 42 | rm -r node_modules 43 | -------------------------------------------------------------------------------- /tools/build/linux_build_and_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 Google Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | . $(dirname $0)/../retry.sh 18 | 19 | # Fail on any error. 20 | set -e pipefail 21 | 22 | # Display commands 23 | set -x 24 | 25 | if [[ -z "$BUILD_TYPE" ]]; then 26 | case $KOKORO_JOB_TYPE in 27 | CONTINUOUS_INTEGRATION) 28 | BUILD_TYPE=continuous 29 | ;; 30 | PRESUBMIT_GITHUB) 31 | BUILD_TYPE=presubmit 32 | ;; 33 | RELEASE) 34 | BUILD_TYPE=release 35 | ;; 36 | *) 37 | echo "Unknown build type: ${KOKORO_JOB_TYPE}" 38 | exit 1 39 | ;; 40 | esac 41 | fi 42 | 43 | cd $(dirname $0)/../.. 44 | BASE_DIR=$PWD 45 | 46 | retry docker build -t build-linux -f tools/build/Dockerfile.linux tools/build 47 | retry docker run -v "${BASE_DIR}":"${BASE_DIR}" build-linux \ 48 | "${BASE_DIR}/tools/build/build.sh" 49 | 50 | retry docker build -t build-alpine -f tools/build/Dockerfile.alpine tools/build 51 | retry docker run -v "${BASE_DIR}":"${BASE_DIR}" build-alpine \ 52 | "${BASE_DIR}/tools/build/build.sh" 53 | 54 | GCS_LOCATION="cprof-e2e-nodejs-artifacts/pprof-nodejs/kokoro/${BUILD_TYPE}/${KOKORO_BUILD_NUMBER}" 55 | retry gcloud auth activate-service-account \ 56 | --key-file="${KOKORO_KEYSTORE_DIR}/72935_cloud-profiler-e2e-service-account-key" 57 | 58 | retry gsutil cp -r "${BASE_DIR}/artifacts/." "gs://${GCS_LOCATION}/" 59 | 60 | # Test the agent 61 | export BINARY_HOST="https://storage.googleapis.com/${GCS_LOCATION}" 62 | "${BASE_DIR}/system-test/system_test.sh" 63 | 64 | if [ "$BUILD_TYPE" == "release" ]; then 65 | retry gsutil cp -r "${BASE_DIR}/artifacts/." "gs://cloud-profiler/pprof-nodejs/release" 66 | fi 67 | -------------------------------------------------------------------------------- /tools/kokoro/release/common.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | before_action { 16 | fetch_keystore { 17 | keystore_resource { 18 | keystore_config_id: 72935 19 | keyname: "cloud-profiler-e2e-service-account-key" 20 | } 21 | } 22 | } 23 | 24 | env_vars { 25 | key: "BUILD_TYPE" 26 | value: "release" 27 | } 28 | -------------------------------------------------------------------------------- /tools/kokoro/release/linux.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Kokoro config for job in release workflow which builds non-alpine Linux 16 | # binaries. 17 | 18 | # Location of the build script in this repository. 19 | build_file: "pprof-nodejs/tools/build/linux_build_and_test.sh" 20 | -------------------------------------------------------------------------------- /tools/kokoro/release/publish.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Kokoro config for job in release workflow which publishes module to npm. 16 | 17 | # Get npm token from Keystore 18 | before_action { 19 | fetch_keystore { 20 | keystore_resource { 21 | keystore_config_id: 72935 22 | keyname: "pprof-npm-token" 23 | } 24 | } 25 | } 26 | 27 | build_file: "pprof-nodejs/tools/publish.sh" 28 | -------------------------------------------------------------------------------- /tools/kokoro/system-test/continuous/linux-prebuild.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | before_action { 4 | fetch_keystore { 5 | keystore_resource { 6 | keystore_config_id: 72935 7 | keyname: "cloud-profiler-e2e-service-account-key" 8 | } 9 | } 10 | } 11 | 12 | build_file: "pprof-nodejs/tools/build/linux_build_and_test.sh" 13 | -------------------------------------------------------------------------------- /tools/kokoro/system-test/continuous/linux-v8-canary.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | build_file: "pprof-nodejs/system-test/system_test.sh" 4 | 5 | env_vars { 6 | key: "RUN_ONLY_V8_CANARY_TEST" 7 | value: "true" 8 | } 9 | -------------------------------------------------------------------------------- /tools/kokoro/system-test/continuous/linux.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | build_file: "pprof-nodejs/system-test/system_test.sh" 4 | -------------------------------------------------------------------------------- /tools/kokoro/system-test/presubmit/linux-prebuild.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | before_action { 4 | fetch_keystore { 5 | keystore_resource { 6 | keystore_config_id: 72935 7 | keyname: "cloud-profiler-e2e-service-account-key" 8 | } 9 | } 10 | } 11 | 12 | build_file: "pprof-nodejs/tools/build/linux_build_and_test.sh" 13 | -------------------------------------------------------------------------------- /tools/kokoro/system-test/presubmit/linux.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | build_file: "pprof-nodejs/system-test/system_test.sh" 4 | -------------------------------------------------------------------------------- /tools/publish.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | . $(dirname $0)/retry.sh 18 | 19 | set -eo pipefail 20 | 21 | # Install desired version of Node.js 22 | retry curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.37.2/install.sh | bash >/dev/null 23 | export NVM_DIR="$HOME/.nvm" >/dev/null 24 | [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" >/dev/null 25 | 26 | retry nvm install 10 &>/dev/null 27 | 28 | cd $(dirname $0)/.. 29 | 30 | NPM_TOKEN=$(cat $KOKORO_KEYSTORE_DIR/72935_pprof-npm-token) 31 | echo "//wombat-dressing-room.appspot.com/:_authToken=${NPM_TOKEN}" > ~/.npmrc 32 | 33 | retry npm install --quiet 34 | npm publish --access=public \ 35 | --registry=https://wombat-dressing-room.appspot.com 36 | -------------------------------------------------------------------------------- /tools/retry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | retry() { 17 | for attempt in {1..3}; do 18 | [ $attempt == 1 ] || sleep 10 # Backing off after a failed attempt. 19 | "${@}" && return 0 20 | done 21 | return 1 22 | } 23 | -------------------------------------------------------------------------------- /ts/src/heap-profiler-bindings.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2018 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import * as path from 'path'; 18 | 19 | import {AllocationProfileNode} from './v8-types'; 20 | 21 | const findBinding = require('node-gyp-build'); 22 | const profiler = findBinding(path.join(__dirname, '..', '..')); 23 | 24 | // Wrappers around native heap profiler functions. 25 | 26 | export function startSamplingHeapProfiler( 27 | heapIntervalBytes: number, 28 | heapStackDepth: number 29 | ) { 30 | profiler.heapProfiler.startSamplingHeapProfiler( 31 | heapIntervalBytes, 32 | heapStackDepth 33 | ); 34 | } 35 | 36 | export function stopSamplingHeapProfiler() { 37 | profiler.heapProfiler.stopSamplingHeapProfiler(); 38 | } 39 | 40 | export function getAllocationProfile(): AllocationProfileNode { 41 | return profiler.heapProfiler.getAllocationProfile(); 42 | } 43 | 44 | export type NearHeapLimitCallback = (profile: AllocationProfileNode) => void; 45 | 46 | export function monitorOutOfMemory( 47 | heapLimitExtensionSize: number, 48 | maxHeapLimitExtensionCount: number, 49 | dumpHeapProfileOnSdterr: boolean, 50 | exportCommand: Array | undefined, 51 | callback: NearHeapLimitCallback | undefined, 52 | callbackMode: number, 53 | isMainThread: boolean 54 | ) { 55 | profiler.heapProfiler.monitorOutOfMemory( 56 | heapLimitExtensionSize, 57 | maxHeapLimitExtensionCount, 58 | dumpHeapProfileOnSdterr, 59 | exportCommand, 60 | callback, 61 | callbackMode, 62 | isMainThread 63 | ); 64 | } 65 | -------------------------------------------------------------------------------- /ts/src/heap-profiler.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import {Profile} from 'pprof-format'; 18 | 19 | import { 20 | getAllocationProfile, 21 | startSamplingHeapProfiler, 22 | stopSamplingHeapProfiler, 23 | monitorOutOfMemory as monitorOutOfMemoryImported, 24 | } from './heap-profiler-bindings'; 25 | import {serializeHeapProfile} from './profile-serializer'; 26 | import {SourceMapper} from './sourcemapper/sourcemapper'; 27 | import { 28 | AllocationProfileNode, 29 | GenerateAllocationLabelsFunction, 30 | } from './v8-types'; 31 | import {isMainThread} from 'worker_threads'; 32 | 33 | let enabled = false; 34 | let heapIntervalBytes = 0; 35 | let heapStackDepth = 0; 36 | 37 | /* 38 | * Collects a heap profile when heapProfiler is enabled. Otherwise throws 39 | * an error. 40 | * 41 | * Data is returned in V8 allocation profile format. 42 | */ 43 | export function v8Profile(): AllocationProfileNode { 44 | if (!enabled) { 45 | throw new Error('Heap profiler is not enabled.'); 46 | } 47 | return getAllocationProfile(); 48 | } 49 | 50 | /** 51 | * Collects a profile and returns it serialized in pprof format. 52 | * Throws if heap profiler is not enabled. 53 | * 54 | * @param ignoreSamplePath 55 | * @param sourceMapper 56 | */ 57 | export function profile( 58 | ignoreSamplePath?: string, 59 | sourceMapper?: SourceMapper, 60 | generateLabels?: GenerateAllocationLabelsFunction 61 | ): Profile { 62 | return convertProfile( 63 | v8Profile(), 64 | ignoreSamplePath, 65 | sourceMapper, 66 | generateLabels 67 | ); 68 | } 69 | 70 | export function convertProfile( 71 | rootNode: AllocationProfileNode, 72 | ignoreSamplePath?: string, 73 | sourceMapper?: SourceMapper, 74 | generateLabels?: GenerateAllocationLabelsFunction 75 | ): Profile { 76 | const startTimeNanos = Date.now() * 1000 * 1000; 77 | // Add node for external memory usage. 78 | // Current type definitions do not have external. 79 | // TODO: remove any once type definition is updated to include external. 80 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 81 | const {external}: {external: number} = process.memoryUsage() as any; 82 | if (external > 0) { 83 | const externalNode: AllocationProfileNode = { 84 | name: '(external)', 85 | scriptName: '', 86 | children: [], 87 | allocations: [{sizeBytes: external, count: 1}], 88 | }; 89 | rootNode.children.push(externalNode); 90 | } 91 | return serializeHeapProfile( 92 | rootNode, 93 | startTimeNanos, 94 | heapIntervalBytes, 95 | ignoreSamplePath, 96 | sourceMapper, 97 | generateLabels 98 | ); 99 | } 100 | 101 | /** 102 | * Starts heap profiling. If heap profiling has already been started with 103 | * the same parameters, this is a noop. If heap profiler has already been 104 | * started with different parameters, this throws an error. 105 | * 106 | * @param intervalBytes - average number of bytes between samples. 107 | * @param stackDepth - maximum stack depth for samples collected. 108 | */ 109 | export function start(intervalBytes: number, stackDepth: number) { 110 | if (enabled) { 111 | throw new Error( 112 | `Heap profiler is already started with intervalBytes ${heapIntervalBytes} and stackDepth ${stackDepth}` 113 | ); 114 | } 115 | heapIntervalBytes = intervalBytes; 116 | heapStackDepth = stackDepth; 117 | startSamplingHeapProfiler(heapIntervalBytes, heapStackDepth); 118 | enabled = true; 119 | } 120 | 121 | // Stops heap profiling. If heap profiling has not been started, does nothing. 122 | export function stop() { 123 | if (enabled) { 124 | enabled = false; 125 | stopSamplingHeapProfiler(); 126 | } 127 | } 128 | 129 | export type NearHeapLimitCallback = (profile: Profile) => void; 130 | 131 | export const CallbackMode = { 132 | Async: 1, 133 | Interrupt: 2, 134 | Both: 3, 135 | }; 136 | 137 | /** 138 | * Add monitoring for v8 heap, heap profiler must already be started. 139 | * When an out of heap memory event occurs: 140 | * - an extension of heap memory of |heapLimitExtensionSize| bytes is 141 | * requested to v8. This extension can occur |maxHeapLimitExtensionCount| 142 | * number of times. If the extension amount is not enough to satisfy 143 | * memory allocation that triggers GC and OOM, process will abort. 144 | * - heap profile is dumped as folded stacks on stderr if 145 | * |dumpHeapProfileOnSdterr| is true 146 | * - heap profile is dumped in temporary file and a new process is spawned 147 | * with |exportCommand| arguments and profile path appended at the end. 148 | * - |callback| is called. Callback can be invoked only if 149 | * heapLimitExtensionSize is enough for the process to continue. Invocation 150 | * will be done by a RequestInterrupt if |callbackMode| is Interrupt or Both, 151 | * this might be unsafe since Isolate should not be reentered 152 | * from RequestInterrupt, but this allows to interrupt synchronous code. 153 | * Otherwise the callback is scheduled to be called asynchronously. 154 | * @param heapLimitExtensionSize - amount of bytes heap should be expanded 155 | * with upon OOM 156 | * @param maxHeapLimitExtensionCount - maximum number of times heap size 157 | * extension can occur 158 | * @param dumpHeapProfileOnSdterr - dump heap profile on stderr upon OOM 159 | * @param exportCommand - command to execute upon OOM, filepath of a 160 | * temporary file containing heap profile will be appended 161 | * @param callback - callback to call when OOM occurs 162 | * @param callbackMode 163 | */ 164 | export function monitorOutOfMemory( 165 | heapLimitExtensionSize: number, 166 | maxHeapLimitExtensionCount: number, 167 | dumpHeapProfileOnSdterr: boolean, 168 | exportCommand?: Array, 169 | callback?: NearHeapLimitCallback, 170 | callbackMode?: number 171 | ) { 172 | if (!enabled) { 173 | throw new Error( 174 | 'Heap profiler must already be started to call monitorOutOfMemory' 175 | ); 176 | } 177 | let newCallback; 178 | if (typeof callback !== 'undefined') { 179 | newCallback = (profile: AllocationProfileNode) => { 180 | callback(convertProfile(profile)); 181 | }; 182 | } 183 | monitorOutOfMemoryImported( 184 | heapLimitExtensionSize, 185 | maxHeapLimitExtensionCount, 186 | dumpHeapProfileOnSdterr, 187 | exportCommand || [], 188 | newCallback, 189 | typeof callbackMode !== 'undefined' ? callbackMode : CallbackMode.Async, 190 | isMainThread 191 | ); 192 | } 193 | -------------------------------------------------------------------------------- /ts/src/index.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2019 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | import {writeFileSync} from 'fs'; 17 | 18 | import * as heapProfiler from './heap-profiler'; 19 | import {encodeSync} from './profile-encoder'; 20 | import * as timeProfiler from './time-profiler'; 21 | export { 22 | AllocationProfileNode, 23 | TimeProfileNode, 24 | ProfileNode, 25 | LabelSet, 26 | } from './v8-types'; 27 | 28 | export {encode, encodeSync} from './profile-encoder'; 29 | export {SourceMapper} from './sourcemapper/sourcemapper'; 30 | export {setLogger} from './logger'; 31 | export {getNativeThreadId} from './time-profiler'; 32 | 33 | export const time = { 34 | profile: timeProfiler.profile, 35 | start: timeProfiler.start, 36 | stop: timeProfiler.stop, 37 | getContext: timeProfiler.getContext, 38 | setContext: timeProfiler.setContext, 39 | isStarted: timeProfiler.isStarted, 40 | v8ProfilerStuckEventLoopDetected: 41 | timeProfiler.v8ProfilerStuckEventLoopDetected, 42 | getState: timeProfiler.getState, 43 | constants: timeProfiler.constants, 44 | }; 45 | 46 | export const heap = { 47 | start: heapProfiler.start, 48 | stop: heapProfiler.stop, 49 | profile: heapProfiler.profile, 50 | convertProfile: heapProfiler.convertProfile, 51 | v8Profile: heapProfiler.v8Profile, 52 | monitorOutOfMemory: heapProfiler.monitorOutOfMemory, 53 | CallbackMode: heapProfiler.CallbackMode, 54 | }; 55 | 56 | // If loaded with --require, start profiling. 57 | if (module.parent && module.parent.id === 'internal/preload') { 58 | time.start({}); 59 | process.on('exit', () => { 60 | // The process is going to terminate imminently. All work here needs to 61 | // be synchronous. 62 | const profile = time.stop(); 63 | const buffer = encodeSync(profile); 64 | writeFileSync(`pprof-profile-${process.pid}.pb.gz`, buffer); 65 | }); 66 | } 67 | -------------------------------------------------------------------------------- /ts/src/logger.ts: -------------------------------------------------------------------------------- 1 | export interface Logger { 2 | error(...args: Array<{}>): void; 3 | trace(...args: Array<{}>): void; 4 | debug(...args: Array<{}>): void; 5 | info(...args: Array<{}>): void; 6 | warn(...args: Array<{}>): void; 7 | fatal(...args: Array<{}>): void; 8 | } 9 | 10 | export class NullLogger implements Logger { 11 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 12 | info(...args: Array<{}>): void { 13 | return; 14 | } 15 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 16 | error(...args: Array<{}>): void { 17 | return; 18 | } 19 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 20 | trace(...args: Array<{}>): void { 21 | return; 22 | } 23 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 24 | warn(...args: Array<{}>): void { 25 | return; 26 | } 27 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 28 | fatal(...args: Array<{}>): void { 29 | return; 30 | } 31 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 32 | debug(...args: Array<{}>): void { 33 | return; 34 | } 35 | } 36 | 37 | export let logger = new NullLogger(); 38 | 39 | export function setLogger(newLogger: Logger) { 40 | logger = newLogger; 41 | } 42 | -------------------------------------------------------------------------------- /ts/src/profile-encoder.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2019 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import {promisify} from 'util'; 18 | import {gzip, gzipSync} from 'zlib'; 19 | 20 | import {Profile} from 'pprof-format'; 21 | 22 | const gzipPromise = promisify(gzip); 23 | 24 | export function encode(profile: Profile): Promise { 25 | return profile.encodeAsync().then(gzipPromise); 26 | } 27 | 28 | export function encodeSync(profile: Profile): Buffer { 29 | return gzipSync(profile.encode()); 30 | } 31 | -------------------------------------------------------------------------------- /ts/src/profile-serializer.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import { 18 | Function, 19 | Label, 20 | LabelInput, 21 | Line, 22 | Location, 23 | Profile, 24 | Sample, 25 | ValueType, 26 | StringTable, 27 | ProfileInput, 28 | } from 'pprof-format'; 29 | import { 30 | GeneratedLocation, 31 | SourceLocation, 32 | SourceMapper, 33 | } from './sourcemapper/sourcemapper'; 34 | import { 35 | AllocationProfileNode, 36 | GenerateAllocationLabelsFunction, 37 | GenerateTimeLabelsFunction, 38 | ProfileNode, 39 | TimeProfile, 40 | TimeProfileNode, 41 | } from './v8-types'; 42 | 43 | export const NON_JS_THREADS_FUNCTION_NAME = 'Non JS threads activity'; 44 | export const GARBAGE_COLLECTION_FUNCTION_NAME = 'Garbage Collection'; 45 | 46 | /** 47 | * A stack of function IDs. 48 | */ 49 | type Stack = number[]; 50 | 51 | /** 52 | * A function which converts entry into one or more samples, then 53 | * appends those sample(s) to samples. 54 | */ 55 | type AppendEntryToSamples = ( 56 | entry: Entry, 57 | samples: Sample[] 58 | ) => void; 59 | 60 | /** 61 | * Profile node and stack trace to that node. 62 | */ 63 | interface Entry { 64 | node: T; 65 | stack: Stack; 66 | } 67 | 68 | function isGeneratedLocation( 69 | location: SourceLocation 70 | ): location is GeneratedLocation { 71 | return ( 72 | location.column !== undefined && 73 | location.line !== undefined && 74 | location.line > 0 75 | ); 76 | } 77 | 78 | /** 79 | * Takes v8 profile and populates sample, location, and function fields of 80 | * profile.proto. 81 | * 82 | * @param profile - profile.proto with empty sample, location, and function 83 | * fields. 84 | * @param root - root of v8 profile tree describing samples to be appended 85 | * to profile. 86 | * @param appendToSamples - function which converts entry to sample(s) and 87 | * appends these to end of an array of samples. 88 | * @param stringTable - string table for the existing profile. 89 | */ 90 | function serialize( 91 | profile: ProfileInput, 92 | root: T, 93 | appendToSamples: AppendEntryToSamples, 94 | stringTable: StringTable, 95 | ignoreSamplesPath?: string, 96 | sourceMapper?: SourceMapper 97 | ) { 98 | const samples: Sample[] = []; 99 | const locations: Location[] = []; 100 | const functions: Function[] = []; 101 | const functionIdMap = new Map(); 102 | const locationIdMap = new Map(); 103 | 104 | const entries: Array> = (root.children as T[]).map((n: T) => ({ 105 | node: n, 106 | stack: [], 107 | })); 108 | while (entries.length > 0) { 109 | const entry = entries.pop()!; 110 | const node = entry.node; 111 | 112 | // mjs files have a `file://` prefix in the scriptName -> remove it 113 | if (node.scriptName.startsWith('file://')) { 114 | node.scriptName = node.scriptName.slice(7); 115 | } 116 | 117 | if (ignoreSamplesPath && node.scriptName.indexOf(ignoreSamplesPath) > -1) { 118 | continue; 119 | } 120 | const stack = entry.stack; 121 | const location = getLocation(node, sourceMapper); 122 | stack.unshift(location.id as number); 123 | appendToSamples(entry, samples); 124 | for (const child of node.children as T[]) { 125 | entries.push({node: child, stack: stack.slice()}); 126 | } 127 | } 128 | 129 | profile.sample = samples; 130 | profile.location = locations; 131 | profile.function = functions; 132 | profile.stringTable = stringTable; 133 | 134 | function getLocation( 135 | node: ProfileNode, 136 | sourceMapper?: SourceMapper 137 | ): Location { 138 | let profLoc: SourceLocation = { 139 | file: node.scriptName || '', 140 | line: node.lineNumber, 141 | column: node.columnNumber, 142 | name: node.name, 143 | }; 144 | 145 | if (profLoc.line) { 146 | if (sourceMapper && isGeneratedLocation(profLoc)) { 147 | profLoc = sourceMapper.mappingInfo(profLoc); 148 | } 149 | } 150 | const keyStr = `${node.scriptId}:${profLoc.line}:${profLoc.column}:${profLoc.name}`; 151 | let id = locationIdMap.get(keyStr); 152 | if (id !== undefined) { 153 | // id is index+1, since 0 is not valid id. 154 | return locations[id - 1]; 155 | } 156 | id = locations.length + 1; 157 | locationIdMap.set(keyStr, id); 158 | const line = getLine(profLoc, node.scriptId); 159 | const location = new Location({id, line: [line]}); 160 | locations.push(location); 161 | return location; 162 | } 163 | 164 | function getLine(loc: SourceLocation, scriptId?: number): Line { 165 | return new Line({ 166 | functionId: getFunction(loc, scriptId).id, 167 | line: loc.line, 168 | }); 169 | } 170 | 171 | function getFunction(loc: SourceLocation, scriptId?: number): Function { 172 | let name = loc.name; 173 | const keyStr = name 174 | ? `${scriptId}:${name}` 175 | : `${scriptId}:${loc.line}:${loc.column}`; 176 | let id = functionIdMap.get(keyStr); 177 | if (id !== undefined) { 178 | // id is index+1, since 0 is not valid id. 179 | return functions[id - 1]; 180 | } 181 | id = functions.length + 1; 182 | functionIdMap.set(keyStr, id); 183 | if (!name) { 184 | if (loc.line) { 185 | if (loc.column) { 186 | name = `(anonymous:L#${loc.line}:C#${loc.column})`; 187 | } else { 188 | name = `(anonymous:L#${loc.line})`; 189 | } 190 | } else { 191 | name = '(anonymous)'; 192 | } 193 | } 194 | const nameId = stringTable.dedup(name); 195 | const f = new Function({ 196 | id, 197 | name: nameId, 198 | systemName: nameId, 199 | filename: stringTable.dedup(loc.file || ''), 200 | }); 201 | functions.push(f); 202 | return f; 203 | } 204 | } 205 | 206 | /** 207 | * @return value type for sample counts (type:sample, units:count), and 208 | * adds strings used in this value type to the table. 209 | */ 210 | function createSampleCountValueType(table: StringTable): ValueType { 211 | return new ValueType({ 212 | type: table.dedup('sample'), 213 | unit: table.dedup('count'), 214 | }); 215 | } 216 | 217 | /** 218 | * @return value type for time samples (type:wall, units:nanoseconds), and 219 | * adds strings used in this value type to the table. 220 | */ 221 | function createTimeValueType(table: StringTable): ValueType { 222 | return new ValueType({ 223 | type: table.dedup('wall'), 224 | unit: table.dedup('nanoseconds'), 225 | }); 226 | } 227 | 228 | /** 229 | * @return value type for cpu samples (type:cpu, units:nanoseconds), and 230 | * adds strings used in this value type to the table. 231 | */ 232 | function createCpuValueType(table: StringTable): ValueType { 233 | return new ValueType({ 234 | type: table.dedup('cpu'), 235 | unit: table.dedup('nanoseconds'), 236 | }); 237 | } 238 | 239 | /** 240 | * @return value type for object counts (type:objects, units:count), and 241 | * adds strings used in this value type to the table. 242 | */ 243 | function createObjectCountValueType(table: StringTable): ValueType { 244 | return new ValueType({ 245 | type: table.dedup('objects'), 246 | unit: table.dedup('count'), 247 | }); 248 | } 249 | 250 | /** 251 | * @return value type for memory allocations (type:space, units:bytes), and 252 | * adds strings used in this value type to the table. 253 | */ 254 | function createAllocationValueType(table: StringTable): ValueType { 255 | return new ValueType({ 256 | type: table.dedup('space'), 257 | unit: table.dedup('bytes'), 258 | }); 259 | } 260 | 261 | function computeTotalHitCount(root: TimeProfileNode): number { 262 | return ( 263 | root.hitCount + 264 | (root.children as TimeProfileNode[]).reduce( 265 | (sum, node) => sum + computeTotalHitCount(node), 266 | 0 267 | ) 268 | ); 269 | } 270 | 271 | /** Perform some modifications on time profile: 272 | * - Add non-JS thread activity node if available 273 | * - remove `(program)` nodes 274 | * - remove `(idle)` nodes with no context 275 | * - set `(idle)` nodes' wall time to zero when they have a context 276 | * - Convert `(garbage collector)` node to `Garbage Collection` 277 | * - Put `non-JS thread activity` node and `Garbage Collection` under a top level `Node.js` node 278 | * This function does not change the input profile. 279 | */ 280 | function updateTimeProfile(prof: TimeProfile): TimeProfile { 281 | const newTopLevelChildren: TimeProfileNode[] = []; 282 | 283 | let runtimeNode: TimeProfileNode | undefined; 284 | 285 | function getRuntimeNode(): TimeProfileNode { 286 | if (!runtimeNode) { 287 | runtimeNode = { 288 | name: 'Node.js', 289 | scriptName: '', 290 | scriptId: 0, 291 | lineNumber: 0, 292 | columnNumber: 0, 293 | children: [], 294 | hitCount: 0, 295 | }; 296 | newTopLevelChildren.push(runtimeNode); 297 | } 298 | return runtimeNode; 299 | } 300 | 301 | for (const child of prof.topDownRoot.children as TimeProfileNode[]) { 302 | if (child.name === '(program)') { 303 | continue; 304 | } 305 | if (child.name === '(idle)' && child.contexts?.length === 0) { 306 | continue; 307 | } 308 | if (child.name === '(garbage collector)') { 309 | // Create a new node to avoid modifying the input one 310 | const newChild: TimeProfileNode = { 311 | ...child, 312 | name: GARBAGE_COLLECTION_FUNCTION_NAME, 313 | }; 314 | getRuntimeNode().children.push(newChild); 315 | } else { 316 | newTopLevelChildren.push(child); 317 | } 318 | } 319 | 320 | if (prof.hasCpuTime && prof.nonJSThreadsCpuTime) { 321 | const node: TimeProfileNode = { 322 | name: NON_JS_THREADS_FUNCTION_NAME, 323 | scriptName: '', 324 | scriptId: 0, 325 | lineNumber: 0, 326 | columnNumber: 0, 327 | children: [], 328 | hitCount: 0, // 0 because this should not be accounted for wall time 329 | contexts: [ 330 | { 331 | context: {}, 332 | timestamp: BigInt(0), 333 | cpuTime: prof.nonJSThreadsCpuTime, 334 | asyncId: -1, 335 | }, 336 | ], 337 | }; 338 | getRuntimeNode().children.push(node); 339 | } 340 | return { 341 | ...prof, 342 | topDownRoot: {...prof.topDownRoot, children: newTopLevelChildren}, 343 | }; 344 | } 345 | 346 | /** 347 | * Converts v8 time profile into into a profile proto. 348 | * (https://github.com/google/pprof/blob/master/proto/profile.proto) 349 | * 350 | * @param prof - profile to be converted. 351 | * @param intervalMicros - average time (microseconds) between samples. 352 | */ 353 | export function serializeTimeProfile( 354 | prof: TimeProfile, 355 | intervalMicros: number, 356 | sourceMapper?: SourceMapper, 357 | recomputeSamplingInterval = false, 358 | generateLabels?: GenerateTimeLabelsFunction 359 | ): Profile { 360 | // If requested, recompute sampling interval from profile duration and total number of hits, 361 | // since profile duration should be #hits x interval. 362 | // Recomputing an average interval is more accurate, since in practice intervals between 363 | // samples are larger than the requested sampling interval (eg. 12.5ms vs 10ms requested). 364 | // For very short durations, computation becomes meaningless (eg. if there is only one hit), 365 | // therefore keep intervalMicros as a lower bound and 2 * intervalMicros as upper bound. 366 | if (recomputeSamplingInterval) { 367 | const totalHitCount = computeTotalHitCount(prof.topDownRoot); 368 | if (totalHitCount > 0) { 369 | intervalMicros = Math.min( 370 | Math.max( 371 | Math.floor((prof.endTime - prof.startTime) / totalHitCount), 372 | intervalMicros 373 | ), 374 | 2 * intervalMicros 375 | ); 376 | } 377 | } 378 | const intervalNanos = intervalMicros * 1000; 379 | const appendTimeEntryToSamples: AppendEntryToSamples = ( 380 | entry: Entry, 381 | samples: Sample[] 382 | ) => { 383 | let unlabelledHits = entry.node.hitCount; 384 | let unlabelledCpuTime = 0; 385 | const isIdle = entry.node.name === '(idle)'; 386 | for (const context of entry.node.contexts || []) { 387 | const labels = generateLabels 388 | ? generateLabels({node: entry.node, context}) 389 | : context.context ?? {}; 390 | if (Object.keys(labels).length > 0) { 391 | // Only assign wall time if there are hits, some special nodes such as `(Non-JS threads)` 392 | // have zero hit count (since they do not count as wall time) and should not be assigned any 393 | // wall time. Also, `(idle)` nodes should be assigned zero wall time. 394 | const values = 395 | unlabelledHits > 0 ? [1, isIdle ? 0 : intervalNanos] : [0, 0]; 396 | if (prof.hasCpuTime) { 397 | values.push(context.cpuTime ?? 0); 398 | } 399 | const sample = new Sample({ 400 | locationId: entry.stack, 401 | value: values, 402 | label: buildLabels(labels, stringTable), 403 | }); 404 | samples.push(sample); 405 | unlabelledHits--; 406 | } else if (prof.hasCpuTime) { 407 | unlabelledCpuTime += context.cpuTime ?? 0; 408 | } 409 | } 410 | if ((!isIdle && unlabelledHits > 0) || unlabelledCpuTime > 0) { 411 | const labels = generateLabels ? generateLabels({node: entry.node}) : {}; 412 | const values = 413 | unlabelledHits > 0 414 | ? [unlabelledHits, isIdle ? 0 : unlabelledHits * intervalNanos] 415 | : [0, 0]; 416 | if (prof.hasCpuTime) { 417 | values.push(unlabelledCpuTime); 418 | } 419 | const sample = new Sample({ 420 | locationId: entry.stack, 421 | value: values, 422 | label: buildLabels(labels, stringTable), 423 | }); 424 | samples.push(sample); 425 | } 426 | }; 427 | 428 | const stringTable = new StringTable(); 429 | const sampleValueType = createSampleCountValueType(stringTable); 430 | const timeValueType = createTimeValueType(stringTable); 431 | 432 | const sampleTypes = [sampleValueType, timeValueType]; 433 | if (prof.hasCpuTime) { 434 | const cpuValueType = createCpuValueType(stringTable); 435 | sampleTypes.push(cpuValueType); 436 | } 437 | 438 | const profile = { 439 | sampleType: sampleTypes, 440 | timeNanos: Date.now() * 1000 * 1000, 441 | durationNanos: (prof.endTime - prof.startTime) * 1000, 442 | periodType: timeValueType, 443 | period: intervalNanos, 444 | }; 445 | 446 | const updatedProf = updateTimeProfile(prof); 447 | 448 | serialize( 449 | profile, 450 | updatedProf.topDownRoot, 451 | appendTimeEntryToSamples, 452 | stringTable, 453 | undefined, 454 | sourceMapper 455 | ); 456 | 457 | return new Profile(profile); 458 | } 459 | 460 | function buildLabels(labelSet: object, stringTable: StringTable): Label[] { 461 | const labels: Label[] = []; 462 | 463 | for (const [key, value] of Object.entries(labelSet)) { 464 | const labelInput: LabelInput = { 465 | key: stringTable.dedup(key), 466 | }; 467 | switch (typeof value) { 468 | case 'string': 469 | labelInput.str = stringTable.dedup(value); 470 | break; 471 | case 'number': 472 | case 'bigint': 473 | labelInput.num = value; 474 | break; 475 | default: 476 | continue; 477 | } 478 | labels.push(new Label(labelInput)); 479 | } 480 | 481 | return labels; 482 | } 483 | 484 | /** 485 | * Converts v8 heap profile into into a profile proto. 486 | * (https://github.com/google/pprof/blob/master/proto/profile.proto) 487 | * 488 | * @param prof - profile to be converted. 489 | * @param startTimeNanos - start time of profile, in nanoseconds (POSIX time). 490 | * @param durationsNanos - duration of the profile (wall clock time) in 491 | * nanoseconds. 492 | * @param intervalBytes - bytes allocated between samples. 493 | */ 494 | export function serializeHeapProfile( 495 | prof: AllocationProfileNode, 496 | startTimeNanos: number, 497 | intervalBytes: number, 498 | ignoreSamplesPath?: string, 499 | sourceMapper?: SourceMapper, 500 | generateLabels?: GenerateAllocationLabelsFunction 501 | ): Profile { 502 | const appendHeapEntryToSamples: AppendEntryToSamples< 503 | AllocationProfileNode 504 | > = (entry: Entry, samples: Sample[]) => { 505 | if (entry.node.allocations.length > 0) { 506 | const labels = generateLabels 507 | ? buildLabels(generateLabels({node: entry.node}), stringTable) 508 | : []; 509 | for (const alloc of entry.node.allocations) { 510 | const sample = new Sample({ 511 | locationId: entry.stack, 512 | value: [alloc.count, alloc.sizeBytes * alloc.count], 513 | label: labels, 514 | // TODO: add tag for allocation size 515 | }); 516 | samples.push(sample); 517 | } 518 | } 519 | }; 520 | 521 | const stringTable = new StringTable(); 522 | const sampleValueType = createObjectCountValueType(stringTable); 523 | const allocationValueType = createAllocationValueType(stringTable); 524 | 525 | const profile = { 526 | sampleType: [sampleValueType, allocationValueType], 527 | timeNanos: startTimeNanos, 528 | periodType: allocationValueType, 529 | period: intervalBytes, 530 | }; 531 | 532 | serialize( 533 | profile, 534 | prof, 535 | appendHeapEntryToSamples, 536 | stringTable, 537 | ignoreSamplesPath, 538 | sourceMapper 539 | ); 540 | 541 | return new Profile(profile); 542 | } 543 | -------------------------------------------------------------------------------- /ts/src/sourcemapper/sourcemapper.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2016 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | // Originally copied from cloud-debug-nodejs's sourcemapper.ts from 18 | // https://github.com/googleapis/cloud-debug-nodejs/blob/7bdc2f1f62a3b45b7b53ea79f9444c8ed50e138b/src/agent/io/sourcemapper.ts 19 | // Modified to map from generated code to source code, rather than from source 20 | // code to generated code. 21 | 22 | import * as fs from 'fs'; 23 | import * as path from 'path'; 24 | import * as sourceMap from 'source-map'; 25 | import {logger} from '../logger'; 26 | import pLimit from 'p-limit'; 27 | 28 | const readFile = fs.promises.readFile; 29 | 30 | const CONCURRENCY = 10; 31 | const MAP_EXT = '.map'; 32 | 33 | function error(msg: string) { 34 | logger.debug(`Error: ${msg}`); 35 | return new Error(msg); 36 | } 37 | 38 | export interface MapInfoCompiled { 39 | mapFileDir: string; 40 | mapConsumer: sourceMap.RawSourceMap; 41 | } 42 | 43 | export interface GeneratedLocation { 44 | file: string; 45 | name?: string; 46 | line: number; 47 | column: number; 48 | } 49 | 50 | export interface SourceLocation { 51 | file?: string; 52 | name?: string; 53 | line?: number; 54 | column?: number; 55 | } 56 | 57 | /** 58 | * @param {!Map} infoMap The map that maps input source files to 59 | * SourceMapConsumer objects that are used to calculate mapping information 60 | * @param {string} mapPath The path to the source map file to process. The 61 | * path should be relative to the process's current working directory 62 | * @private 63 | */ 64 | async function processSourceMap( 65 | infoMap: Map, 66 | mapPath: string, 67 | debug: boolean 68 | ): Promise { 69 | // this handles the case when the path is undefined, null, or 70 | // the empty string 71 | if (!mapPath || !mapPath.endsWith(MAP_EXT)) { 72 | throw error(`The path "${mapPath}" does not specify a source map file`); 73 | } 74 | mapPath = path.normalize(mapPath); 75 | 76 | let contents; 77 | try { 78 | contents = await readFile(mapPath, 'utf8'); 79 | } catch (e) { 80 | throw error('Could not read source map file ' + mapPath + ': ' + e); 81 | } 82 | 83 | let consumer: sourceMap.RawSourceMap; 84 | try { 85 | // TODO: Determine how to reconsile the type conflict where `consumer` 86 | // is constructed as a SourceMapConsumer but is used as a 87 | // RawSourceMap. 88 | // TODO: Resolve the cast of `contents as any` (This is needed because the 89 | // type is expected to be of `RawSourceMap` but the existing 90 | // working code uses a string.) 91 | consumer = (await new sourceMap.SourceMapConsumer( 92 | contents as {} as sourceMap.RawSourceMap 93 | )) as {} as sourceMap.RawSourceMap; 94 | } catch (e) { 95 | throw error( 96 | 'An error occurred while reading the ' + 97 | 'sourceMap file ' + 98 | mapPath + 99 | ': ' + 100 | e 101 | ); 102 | } 103 | 104 | /* If the source map file defines a "file" attribute, use it as 105 | * the output file where the path is relative to the directory 106 | * containing the map file. Otherwise, use the name of the output 107 | * file (with the .map extension removed) as the output file. 108 | 109 | * With nextjs/webpack, when there are subdirectories in `pages` directory, 110 | * the generated source maps do not reference correctly the generated files 111 | * in their `file` property. 112 | * For example if the generated file / source maps have paths: 113 | * /pages/sub/foo.js(.map) 114 | * foo.js.map will have ../pages/sub/foo.js as `file` property instead of 115 | * ../../pages/sub/foo.js 116 | * To workaround this, check first if file referenced in `file` property 117 | * exists and if it does not, check if generated file exists alongside the 118 | * source map file. 119 | */ 120 | const dir = path.dirname(mapPath); 121 | const generatedPathCandidates = []; 122 | if (consumer.file) { 123 | generatedPathCandidates.push(path.resolve(dir, consumer.file)); 124 | } 125 | const samePath = path.resolve(dir, path.basename(mapPath, MAP_EXT)); 126 | if ( 127 | generatedPathCandidates.length === 0 || 128 | generatedPathCandidates[0] !== samePath 129 | ) { 130 | generatedPathCandidates.push(samePath); 131 | } 132 | 133 | for (const generatedPath of generatedPathCandidates) { 134 | try { 135 | await fs.promises.access(generatedPath, fs.constants.F_OK); 136 | infoMap.set(generatedPath, {mapFileDir: dir, mapConsumer: consumer}); 137 | if (debug) { 138 | logger.debug(`Loaded source map for ${generatedPath} => ${mapPath}`); 139 | } 140 | return; 141 | } catch (err) { 142 | if (debug) { 143 | logger.debug(`Generated path ${generatedPath} does not exist`); 144 | } 145 | } 146 | } 147 | if (debug) { 148 | logger.debug(`Unable to find generated file for ${mapPath}`); 149 | } 150 | } 151 | 152 | export class SourceMapper { 153 | infoMap: Map; 154 | debug: boolean; 155 | 156 | static async create( 157 | searchDirs: string[], 158 | debug = false 159 | ): Promise { 160 | if (debug) { 161 | logger.debug( 162 | `Looking for source map files in dirs: [${searchDirs.join(', ')}]` 163 | ); 164 | } 165 | const mapFiles: string[] = []; 166 | for (const dir of searchDirs) { 167 | try { 168 | const mf = await getMapFiles(dir); 169 | mf.forEach(mapFile => { 170 | mapFiles.push(path.resolve(dir, mapFile)); 171 | }); 172 | } catch (e) { 173 | throw error(`failed to get source maps from ${dir}: ${e}`); 174 | } 175 | } 176 | if (debug) { 177 | logger.debug(`Found source map files: [${mapFiles.join(', ')}]`); 178 | } 179 | return createFromMapFiles(mapFiles, debug); 180 | } 181 | 182 | /** 183 | * @param {Array.} sourceMapPaths An array of paths to .map source map 184 | * files that should be processed. The paths should be relative to the 185 | * current process's current working directory 186 | * @param {Logger} logger A logger that reports errors that occurred while 187 | * processing the given source map files 188 | * @constructor 189 | */ 190 | constructor(debug = false) { 191 | this.infoMap = new Map(); 192 | this.debug = debug; 193 | } 194 | 195 | /** 196 | * Used to get the information about the transpiled file from a given input 197 | * source file provided there isn't any ambiguity with associating the input 198 | * path to exactly one output transpiled file. 199 | * 200 | * @param inputPath The (possibly relative) path to the original source file. 201 | * @return The `MapInfoCompiled` object that describes the transpiled file 202 | * associated with the specified input path. `null` is returned if either 203 | * zero files are associated with the input path or if more than one file 204 | * could possibly be associated with the given input path. 205 | */ 206 | private getMappingInfo(inputPath: string): MapInfoCompiled | null { 207 | if (this.infoMap.has(path.normalize(inputPath))) { 208 | return this.infoMap.get(inputPath) as MapInfoCompiled; 209 | } 210 | return null; 211 | } 212 | 213 | /** 214 | * Used to determine if the source file specified by the given path has 215 | * a .map file and an output file associated with it. 216 | * 217 | * If there is no such mapping, it could be because the input file is not 218 | * the input to a transpilation process or it is the input to a transpilation 219 | * process but its corresponding .map file was not given to the constructor 220 | * of this mapper. 221 | * 222 | * @param {string} inputPath The path to an input file that could 223 | * possibly be the input to a transpilation process. The path should be 224 | * relative to the process's current working directory. 225 | */ 226 | hasMappingInfo(inputPath: string): boolean { 227 | return this.getMappingInfo(inputPath) !== null; 228 | } 229 | 230 | /** 231 | * @param {string} inputPath The path to an input file that could possibly 232 | * be the input to a transpilation process. The path should be relative to 233 | * the process's current working directory 234 | * @param {number} The line number in the input file where the line number is 235 | * zero-based. 236 | * @param {number} (Optional) The column number in the line of the file 237 | * specified where the column number is zero-based. 238 | * @return {Object} The object returned has a "file" attribute for the 239 | * path of the output file associated with the given input file (where the 240 | * path is relative to the process's current working directory), 241 | * a "line" attribute of the line number in the output file associated with 242 | * the given line number for the input file, and an optional "column" number 243 | * of the column number of the output file associated with the given file 244 | * and line information. 245 | * 246 | * If the given input file does not have mapping information associated 247 | * with it then the input location is returned. 248 | */ 249 | mappingInfo(location: GeneratedLocation): SourceLocation { 250 | const inputPath = path.normalize(location.file); 251 | const entry = this.getMappingInfo(inputPath); 252 | if (entry === null) { 253 | if (this.debug) { 254 | logger.debug( 255 | `Source map lookup failed: no map found for ${location.file} (normalized: ${inputPath})` 256 | ); 257 | } 258 | return location; 259 | } 260 | 261 | const generatedPos = { 262 | line: location.line, 263 | column: location.column > 0 ? location.column - 1 : 0, // SourceMapConsumer expects column to be 0-based 264 | }; 265 | 266 | // TODO: Determine how to remove the explicit cast here. 267 | const consumer: sourceMap.SourceMapConsumer = 268 | entry.mapConsumer as {} as sourceMap.SourceMapConsumer; 269 | 270 | const pos = consumer.originalPositionFor(generatedPos); 271 | if (pos.source === null) { 272 | if (this.debug) { 273 | logger.debug( 274 | `Source map lookup failed for ${location.name}(${location.file}:${location.line}:${location.column})` 275 | ); 276 | } 277 | return location; 278 | } 279 | 280 | const loc = { 281 | file: path.resolve(entry.mapFileDir, pos.source), 282 | line: pos.line || undefined, 283 | name: pos.name || location.name, 284 | column: pos.column === null ? undefined : pos.column + 1, // convert column back to 1-based 285 | }; 286 | 287 | if (this.debug) { 288 | logger.debug( 289 | `Source map lookup succeeded for ${location.name}(${location.file}:${location.line}:${location.column}) => ${loc.name}(${loc.file}:${loc.line}:${loc.column})` 290 | ); 291 | } 292 | return loc; 293 | } 294 | } 295 | 296 | async function createFromMapFiles( 297 | mapFiles: string[], 298 | debug: boolean 299 | ): Promise { 300 | const limit = pLimit(CONCURRENCY); 301 | const mapper = new SourceMapper(debug); 302 | const promises: Array> = mapFiles.map(mapPath => 303 | limit(() => processSourceMap(mapper.infoMap, mapPath, debug)) 304 | ); 305 | try { 306 | await Promise.all(promises); 307 | } catch (err) { 308 | throw error( 309 | 'An error occurred while processing the source map files' + err 310 | ); 311 | } 312 | return mapper; 313 | } 314 | 315 | function isErrnoException(e: unknown): e is NodeJS.ErrnoException { 316 | return e instanceof Error && 'code' in e; 317 | } 318 | 319 | function isNonFatalError(error: unknown) { 320 | const nonFatalErrors = ['ENOENT', 'EPERM', 'EACCES', 'ELOOP']; 321 | 322 | return ( 323 | isErrnoException(error) && error.code && nonFatalErrors.includes(error.code) 324 | ); 325 | } 326 | 327 | async function* walk( 328 | dir: string, 329 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 330 | fileFilter = (filename: string) => true, 331 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 332 | directoryFilter = (root: string, dirname: string) => true 333 | ): AsyncIterable { 334 | async function* walkRecursive(dir: string): AsyncIterable { 335 | try { 336 | for await (const d of await fs.promises.opendir(dir)) { 337 | const entry = path.join(dir, d.name); 338 | if (d.isDirectory() && directoryFilter(dir, d.name)) { 339 | yield* walkRecursive(entry); 340 | } else if (d.isFile() && fileFilter(d.name)) { 341 | // check that the file is readable 342 | await fs.promises.access(entry, fs.constants.R_OK); 343 | yield entry; 344 | } 345 | } 346 | } catch (error) { 347 | if (!isNonFatalError(error)) { 348 | throw error; 349 | } else { 350 | logger.debug(() => `Non fatal error: ${error}`); 351 | } 352 | } 353 | } 354 | 355 | yield* walkRecursive(dir); 356 | } 357 | 358 | async function getMapFiles(baseDir: string): Promise { 359 | const mapFiles: string[] = []; 360 | for await (const entry of walk( 361 | baseDir, 362 | filename => /\.[cm]?js\.map$/.test(filename), 363 | (root, dirname) => 364 | root !== '/proc' && dirname !== '.git' && dirname !== 'node_modules' 365 | )) { 366 | mapFiles.push(path.relative(baseDir, entry)); 367 | } 368 | return mapFiles; 369 | } 370 | -------------------------------------------------------------------------------- /ts/src/time-profiler-bindings.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2018 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | import {join} from 'path'; 17 | 18 | const findBinding = require('node-gyp-build'); 19 | const profiler = findBinding(join(__dirname, '..', '..')); 20 | 21 | export const TimeProfiler = profiler.TimeProfiler; 22 | export const constants = profiler.constants; 23 | export const getNativeThreadId = profiler.getNativeThreadId; 24 | -------------------------------------------------------------------------------- /ts/src/time-profiler.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import delay from 'delay'; 18 | 19 | import { 20 | serializeTimeProfile, 21 | GARBAGE_COLLECTION_FUNCTION_NAME, 22 | NON_JS_THREADS_FUNCTION_NAME, 23 | } from './profile-serializer'; 24 | import {SourceMapper} from './sourcemapper/sourcemapper'; 25 | import { 26 | TimeProfiler, 27 | getNativeThreadId, 28 | constants as profilerConstants, 29 | } from './time-profiler-bindings'; 30 | import {GenerateTimeLabelsFunction} from './v8-types'; 31 | import {isMainThread} from 'worker_threads'; 32 | 33 | const {kSampleCount} = profilerConstants; 34 | 35 | const DEFAULT_INTERVAL_MICROS: Microseconds = 1000; 36 | const DEFAULT_DURATION_MILLIS: Milliseconds = 60000; 37 | 38 | type Microseconds = number; 39 | type Milliseconds = number; 40 | 41 | let gProfiler: InstanceType | undefined; 42 | let gSourceMapper: SourceMapper | undefined; 43 | let gIntervalMicros: Microseconds; 44 | let gV8ProfilerStuckEventLoopDetected = 0; 45 | 46 | /** Make sure to stop profiler before node shuts down, otherwise profiling 47 | * signal might cause a crash if it occurs during shutdown */ 48 | process.once('exit', () => { 49 | if (isStarted()) stop(); 50 | }); 51 | 52 | export interface TimeProfilerOptions { 53 | /** time in milliseconds for which to collect profile. */ 54 | durationMillis?: Milliseconds; 55 | /** average time in microseconds between samples */ 56 | intervalMicros?: Microseconds; 57 | sourceMapper?: SourceMapper; 58 | 59 | /** 60 | * This configuration option is experimental. 61 | * When set to true, functions will be aggregated at the line level, rather 62 | * than at the function level. 63 | * This defaults to false. 64 | */ 65 | lineNumbers?: boolean; 66 | withContexts?: boolean; 67 | workaroundV8Bug?: boolean; 68 | collectCpuTime?: boolean; 69 | collectAsyncId?: boolean; 70 | } 71 | 72 | const DEFAULT_OPTIONS: TimeProfilerOptions = { 73 | durationMillis: DEFAULT_DURATION_MILLIS, 74 | intervalMicros: DEFAULT_INTERVAL_MICROS, 75 | lineNumbers: false, 76 | withContexts: false, 77 | workaroundV8Bug: true, 78 | collectCpuTime: false, 79 | collectAsyncId: false, 80 | }; 81 | 82 | export async function profile(options: TimeProfilerOptions = {}) { 83 | options = {...DEFAULT_OPTIONS, ...options}; 84 | start(options); 85 | await delay(options.durationMillis!); 86 | return stop(); 87 | } 88 | 89 | // Temporarily retained for backwards compatibility with older tracer 90 | export function start(options: TimeProfilerOptions = {}) { 91 | options = {...DEFAULT_OPTIONS, ...options}; 92 | if (gProfiler) { 93 | throw new Error('Wall profiler is already started'); 94 | } 95 | 96 | gProfiler = new TimeProfiler({...options, isMainThread}); 97 | gSourceMapper = options.sourceMapper; 98 | gIntervalMicros = options.intervalMicros!; 99 | gV8ProfilerStuckEventLoopDetected = 0; 100 | 101 | gProfiler.start(); 102 | 103 | // If contexts are enabled, set an initial empty context 104 | if (options.withContexts) { 105 | setContext({}); 106 | } 107 | } 108 | 109 | export function stop( 110 | restart = false, 111 | generateLabels?: GenerateTimeLabelsFunction 112 | ) { 113 | if (!gProfiler) { 114 | throw new Error('Wall profiler is not started'); 115 | } 116 | 117 | const profile = gProfiler.stop(restart); 118 | if (restart) { 119 | gV8ProfilerStuckEventLoopDetected = 120 | gProfiler.v8ProfilerStuckEventLoopDetected(); 121 | // Workaround for v8 bug, where profiler event processor thread is stuck in 122 | // a loop eating 100% CPU, leading to empty profiles. 123 | // Fully stop and restart the profiler to reset the profile to a valid state. 124 | if (gV8ProfilerStuckEventLoopDetected > 0) { 125 | gProfiler.stop(false); 126 | gProfiler.start(); 127 | } 128 | } else { 129 | gV8ProfilerStuckEventLoopDetected = 0; 130 | } 131 | 132 | const serialized_profile = serializeTimeProfile( 133 | profile, 134 | gIntervalMicros, 135 | gSourceMapper, 136 | true, 137 | generateLabels 138 | ); 139 | if (!restart) { 140 | gProfiler.dispose(); 141 | gProfiler = undefined; 142 | gSourceMapper = undefined; 143 | } 144 | return serialized_profile; 145 | } 146 | 147 | export function getState() { 148 | if (!gProfiler) { 149 | throw new Error('Wall profiler is not started'); 150 | } 151 | return gProfiler.state; 152 | } 153 | 154 | export function setContext(context?: object) { 155 | if (!gProfiler) { 156 | throw new Error('Wall profiler is not started'); 157 | } 158 | gProfiler.context = context; 159 | } 160 | 161 | export function getContext() { 162 | if (!gProfiler) { 163 | throw new Error('Wall profiler is not started'); 164 | } 165 | return gProfiler.context; 166 | } 167 | 168 | export function isStarted() { 169 | return !!gProfiler; 170 | } 171 | 172 | // Return 0 if no issue detected, 1 if possible issue, 2 if issue detected for certain 173 | export function v8ProfilerStuckEventLoopDetected() { 174 | return gV8ProfilerStuckEventLoopDetected; 175 | } 176 | 177 | export const constants = { 178 | kSampleCount, 179 | GARBAGE_COLLECTION_FUNCTION_NAME, 180 | NON_JS_THREADS_FUNCTION_NAME, 181 | }; 182 | export {getNativeThreadId}; 183 | -------------------------------------------------------------------------------- /ts/src/v8-types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | // Type Definitions based on implementation in bindings/ 18 | 19 | export interface TimeProfile { 20 | /** Time in nanoseconds at which profile was stopped. */ 21 | endTime: number; 22 | topDownRoot: TimeProfileNode; 23 | /** Time in nanoseconds at which profile was started. */ 24 | startTime: number; 25 | hasCpuTime?: boolean; 26 | /** CPU time of non-JS threads, only reported for the main worker thread */ 27 | nonJSThreadsCpuTime?: number; 28 | } 29 | 30 | export interface ProfileNode { 31 | // name is the function name. 32 | name?: string; 33 | scriptName: string; 34 | scriptId?: number; 35 | lineNumber?: number; 36 | columnNumber?: number; 37 | children: ProfileNode[]; 38 | } 39 | 40 | export interface TimeProfileNodeContext { 41 | context?: object; 42 | timestamp: bigint; // end of sample taking; in microseconds since epoch 43 | cpuTime?: number; // cpu time in nanoseconds 44 | asyncId?: number; // async_hooks.executionAsyncId() at the time of sample taking 45 | } 46 | 47 | export interface TimeProfileNode extends ProfileNode { 48 | hitCount: number; 49 | contexts?: TimeProfileNodeContext[]; 50 | } 51 | 52 | export interface AllocationProfileNode extends ProfileNode { 53 | allocations: Allocation[]; 54 | } 55 | 56 | export interface Allocation { 57 | sizeBytes: number; 58 | count: number; 59 | } 60 | export interface LabelSet { 61 | [key: string]: string | number; 62 | } 63 | 64 | export interface GenerateAllocationLabelsFunction { 65 | ({node}: {node: AllocationProfileNode}): LabelSet; 66 | } 67 | 68 | export interface GenerateTimeLabelsArgs { 69 | node: TimeProfileNode; 70 | context?: TimeProfileNodeContext; 71 | } 72 | 73 | export interface GenerateTimeLabelsFunction { 74 | (args: GenerateTimeLabelsArgs): LabelSet; 75 | } 76 | -------------------------------------------------------------------------------- /ts/test/check_profile.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs'; 2 | 3 | if (fs.existsSync(process.argv[1])) { 4 | fs.writeFileSync('oom_check.log', 'ok'); 5 | } else { 6 | fs.writeFileSync('oom_check.log', 'ko'); 7 | } 8 | -------------------------------------------------------------------------------- /ts/test/oom.ts: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /* eslint-disable no-console */ 4 | import {Worker, isMainThread, threadId} from 'worker_threads'; 5 | import {heap} from '../src/index'; 6 | import path from 'path'; 7 | 8 | const nworkers = Number(process.argv[2] || 0); 9 | const workerMaxOldGenerationSizeMb = process.argv[3]; 10 | const maxCount = Number(process.argv[4] || 12); 11 | const sleepMs = Number(process.argv[5] || 50); 12 | const sizeQuantum = Number(process.argv[6] || 5 * 1024 * 1024); 13 | 14 | console.log(`${isMainThread ? 'Main thread' : `Worker ${threadId}`}: \ 15 | nworkers=${nworkers} workerMaxOldGenerationSizeMb=${workerMaxOldGenerationSizeMb} \ 16 | maxCount=${maxCount} sleepMs=${sleepMs} sizeQuantum=${sizeQuantum}`); 17 | 18 | heap.start(1024 * 1024, 64); 19 | heap.monitorOutOfMemory(0, 0, false, [ 20 | process.execPath, 21 | path.join(__dirname, 'check_profile.js'), 22 | ]); 23 | 24 | if (isMainThread) { 25 | for (let i = 0; i < nworkers; i++) { 26 | const worker = new Worker(__filename, { 27 | argv: [0, ...process.argv.slice(3)], 28 | ...(workerMaxOldGenerationSizeMb 29 | ? {resourceLimits: {maxOldGenerationSizeMb: 50}} 30 | : {}), 31 | }); 32 | const threadId = worker.threadId; 33 | worker 34 | .on('error', err => { 35 | console.log(`Worker ${threadId} error: ${err}`); 36 | }) 37 | .on('exit', code => { 38 | console.log(`Worker ${threadId} exit: ${code}`); 39 | }); 40 | } 41 | } 42 | 43 | const leak: number[][] = []; 44 | let count = 0; 45 | 46 | function foo(size: number) { 47 | count += 1; 48 | const n = size / 8; 49 | const x: number[] = []; 50 | x.length = n; 51 | for (let i = 0; i < n; i++) { 52 | x[i] = Math.random(); 53 | } 54 | leak.push(x); 55 | 56 | if (count < maxCount) { 57 | setTimeout(() => foo(size), sleepMs); 58 | } 59 | } 60 | 61 | setTimeout(() => foo(sizeQuantum), sleepMs); 62 | -------------------------------------------------------------------------------- /ts/test/test-heap-profiler.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import * as sinon from 'sinon'; 18 | 19 | import * as heapProfiler from '../src/heap-profiler'; 20 | import * as v8HeapProfiler from '../src/heap-profiler-bindings'; 21 | import {AllocationProfileNode, LabelSet} from '../src/v8-types'; 22 | import {fork} from 'child_process'; 23 | import path from 'path'; 24 | import fs from 'fs'; 25 | 26 | import { 27 | heapProfileExcludePath, 28 | heapProfileIncludePath, 29 | heapProfileWithExternal, 30 | v8HeapProfile, 31 | v8HeapWithPathProfile, 32 | heapProfileIncludePathWithLabels, 33 | } from './profiles-for-tests'; 34 | 35 | const copy = require('deep-copy'); 36 | const assert = require('assert'); 37 | 38 | describe('HeapProfiler', () => { 39 | let startStub: sinon.SinonStub<[number, number], void>; 40 | let stopStub: sinon.SinonStub<[], void>; 41 | let profileStub: sinon.SinonStub<[], AllocationProfileNode>; 42 | let dateStub: sinon.SinonStub<[], number>; 43 | let memoryUsageStub: sinon.SinonStub<[], NodeJS.MemoryUsage>; 44 | beforeEach(() => { 45 | startStub = sinon.stub(v8HeapProfiler, 'startSamplingHeapProfiler'); 46 | stopStub = sinon.stub(v8HeapProfiler, 'stopSamplingHeapProfiler'); 47 | dateStub = sinon.stub(Date, 'now').returns(0); 48 | }); 49 | 50 | afterEach(() => { 51 | heapProfiler.stop(); 52 | startStub.restore(); 53 | stopStub.restore(); 54 | profileStub.restore(); 55 | dateStub.restore(); 56 | memoryUsageStub.restore(); 57 | }); 58 | describe('profile', () => { 59 | it('should return a profile equal to the expected profile when external memory is allocated', async () => { 60 | profileStub = sinon 61 | .stub(v8HeapProfiler, 'getAllocationProfile') 62 | .returns(copy(v8HeapProfile)); 63 | memoryUsageStub = sinon.stub(process, 'memoryUsage').returns({ 64 | external: 1024, 65 | rss: 2048, 66 | heapTotal: 4096, 67 | heapUsed: 2048, 68 | arrayBuffers: 512, 69 | }); 70 | const intervalBytes = 1024 * 512; 71 | const stackDepth = 32; 72 | heapProfiler.start(intervalBytes, stackDepth); 73 | const profile = heapProfiler.profile(); 74 | assert.deepEqual(heapProfileWithExternal, profile); 75 | }); 76 | 77 | it('should return a profile equal to the expected profile when including all samples', async () => { 78 | profileStub = sinon 79 | .stub(v8HeapProfiler, 'getAllocationProfile') 80 | .returns(copy(v8HeapWithPathProfile)); 81 | memoryUsageStub = sinon.stub(process, 'memoryUsage').returns({ 82 | external: 0, 83 | rss: 2048, 84 | heapTotal: 4096, 85 | heapUsed: 2048, 86 | arrayBuffers: 512, 87 | }); 88 | const intervalBytes = 1024 * 512; 89 | const stackDepth = 32; 90 | heapProfiler.start(intervalBytes, stackDepth); 91 | const profile = heapProfiler.profile(); 92 | assert.deepEqual(heapProfileIncludePath, profile); 93 | }); 94 | 95 | it('should return a profile equal to the expected profile when excluding profiler samples', async () => { 96 | profileStub = sinon 97 | .stub(v8HeapProfiler, 'getAllocationProfile') 98 | .returns(copy(v8HeapWithPathProfile)); 99 | memoryUsageStub = sinon.stub(process, 'memoryUsage').returns({ 100 | external: 0, 101 | rss: 2048, 102 | heapTotal: 4096, 103 | heapUsed: 2048, 104 | arrayBuffers: 512, 105 | }); 106 | const intervalBytes = 1024 * 512; 107 | const stackDepth = 32; 108 | heapProfiler.start(intervalBytes, stackDepth); 109 | const profile = heapProfiler.profile('@google-cloud/profiler'); 110 | assert.deepEqual(heapProfileExcludePath, profile); 111 | }); 112 | 113 | it('should return a profile equal to the expected profile when adding labels', async () => { 114 | profileStub = sinon 115 | .stub(v8HeapProfiler, 'getAllocationProfile') 116 | .returns(copy(v8HeapWithPathProfile)); 117 | memoryUsageStub = sinon.stub(process, 'memoryUsage').returns({ 118 | external: 0, 119 | rss: 2048, 120 | heapTotal: 4096, 121 | heapUsed: 2048, 122 | arrayBuffers: 512, 123 | }); 124 | const intervalBytes = 1024 * 512; 125 | const stackDepth = 32; 126 | heapProfiler.start(intervalBytes, stackDepth); 127 | const labels: LabelSet = {baz: 'bar'}; 128 | const profile = heapProfiler.profile(undefined, undefined, () => { 129 | return labels; 130 | }); 131 | assert.deepEqual(heapProfileIncludePathWithLabels, profile); 132 | }); 133 | 134 | it('should throw error when not started', () => { 135 | assert.throws( 136 | () => { 137 | heapProfiler.profile(); 138 | }, 139 | (err: Error) => { 140 | return err.message === 'Heap profiler is not enabled.'; 141 | } 142 | ); 143 | }); 144 | 145 | it('should throw error when started then stopped', () => { 146 | const intervalBytes = 1024 * 512; 147 | const stackDepth = 32; 148 | heapProfiler.start(intervalBytes, stackDepth); 149 | heapProfiler.stop(); 150 | assert.throws( 151 | () => { 152 | heapProfiler.profile(); 153 | }, 154 | (err: Error) => { 155 | return err.message === 'Heap profiler is not enabled.'; 156 | } 157 | ); 158 | }); 159 | }); 160 | 161 | describe('start', () => { 162 | it('should call startSamplingHeapProfiler', () => { 163 | const intervalBytes1 = 1024 * 512; 164 | const stackDepth1 = 32; 165 | heapProfiler.start(intervalBytes1, stackDepth1); 166 | assert.ok( 167 | startStub.calledWith(intervalBytes1, stackDepth1), 168 | 'expected startSamplingHeapProfiler to be called' 169 | ); 170 | }); 171 | it('should throw error when enabled and started with different parameters', () => { 172 | const intervalBytes1 = 1024 * 512; 173 | const stackDepth1 = 32; 174 | heapProfiler.start(intervalBytes1, stackDepth1); 175 | assert.ok( 176 | startStub.calledWith(intervalBytes1, stackDepth1), 177 | 'expected startSamplingHeapProfiler to be called' 178 | ); 179 | startStub.resetHistory(); 180 | const intervalBytes2 = 1024 * 128; 181 | const stackDepth2 = 64; 182 | try { 183 | heapProfiler.start(intervalBytes2, stackDepth2); 184 | } catch (e) { 185 | assert.strictEqual( 186 | (e as Error).message, 187 | 'Heap profiler is already started with intervalBytes 524288 and' + 188 | ' stackDepth 64' 189 | ); 190 | } 191 | assert.ok( 192 | !startStub.called, 193 | 'expected startSamplingHeapProfiler not to be called second time' 194 | ); 195 | }); 196 | }); 197 | 198 | describe('stop', () => { 199 | it('should not call stopSamplingHeapProfiler if profiler not started', () => { 200 | heapProfiler.stop(); 201 | assert.ok(!stopStub.called, 'stop() should have been no-op.'); 202 | }); 203 | it('should call stopSamplingHeapProfiler if profiler started', () => { 204 | heapProfiler.start(1024 * 512, 32); 205 | heapProfiler.stop(); 206 | assert.ok( 207 | stopStub.called, 208 | 'expected stopSamplingHeapProfiler to be called' 209 | ); 210 | }); 211 | }); 212 | }); 213 | 214 | describe('OOMMonitoring', () => { 215 | it('should call external process upon OOM', async function () { 216 | // this test is very slow on some configs (asan/valgrind) 217 | this.timeout(20000); 218 | const proc = fork(path.join(__dirname, 'oom.js'), { 219 | execArgv: ['--max-old-space-size=50'], 220 | }); 221 | const checkFilePath = 'oom_check.log'; 222 | if (fs.existsSync(checkFilePath)) { 223 | fs.unlinkSync(checkFilePath); 224 | } 225 | // wait for proc to exit 226 | await new Promise((resolve, reject) => { 227 | proc.on('exit', code => { 228 | if (code === 0) { 229 | reject(); 230 | } else { 231 | resolve(); 232 | } 233 | }); 234 | }); 235 | assert.equal(fs.readFileSync(checkFilePath), 'ok'); 236 | fs.unlinkSync(checkFilePath); 237 | }); 238 | }); 239 | -------------------------------------------------------------------------------- /ts/test/test-profile-encoder.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2019 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import {promisify} from 'util'; 18 | import {gunzip as gunzipCallback, gunzipSync} from 'zlib'; 19 | 20 | import {Profile} from 'pprof-format'; 21 | import {encode, encodeSync} from '../src/profile-encoder'; 22 | 23 | import {decodedTimeProfile, timeProfile} from './profiles-for-tests'; 24 | 25 | const assert = require('assert'); 26 | const gunzip = promisify(gunzipCallback); 27 | 28 | describe('profile-encoded', () => { 29 | describe('encode', () => { 30 | it('should encode profile such that the encoded profile can be decoded', async () => { 31 | const encoded = await encode(timeProfile); 32 | const unzipped = await gunzip(encoded); 33 | const decoded = Profile.decode(unzipped); 34 | assert.deepEqual(decoded, decodedTimeProfile); 35 | }); 36 | }); 37 | describe('encodeSync', () => { 38 | it('should encode profile such that the encoded profile can be decoded', () => { 39 | const encoded = encodeSync(timeProfile); 40 | const unzipped = gunzipSync(encoded); 41 | const decoded = Profile.decode(unzipped); 42 | assert.deepEqual(decoded, decodedTimeProfile); 43 | }); 44 | }); 45 | }); 46 | -------------------------------------------------------------------------------- /ts/test/test-profile-serializer.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017 Google Inc. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | import * as sinon from 'sinon'; 17 | import * as tmp from 'tmp'; 18 | 19 | import { 20 | NON_JS_THREADS_FUNCTION_NAME, 21 | serializeHeapProfile, 22 | serializeTimeProfile, 23 | } from '../src/profile-serializer'; 24 | import {SourceMapper} from '../src/sourcemapper/sourcemapper'; 25 | import {Label, Profile} from 'pprof-format'; 26 | import {TimeProfile} from '../src/v8-types'; 27 | import { 28 | anonymousFunctionHeapProfile, 29 | getAndVerifyPresence, 30 | heapProfile, 31 | heapSourceProfile, 32 | labelEncodingProfile, 33 | mapDirPath, 34 | timeProfile, 35 | timeSourceProfile, 36 | v8AnonymousFunctionHeapProfile, 37 | v8HeapGeneratedProfile, 38 | v8HeapProfile, 39 | v8TimeGeneratedProfile, 40 | v8TimeProfile, 41 | } from './profiles-for-tests'; 42 | 43 | const assert = require('assert'); 44 | 45 | function getNonJSThreadsSample(profile: Profile): Number[] | null { 46 | for (const sample of profile.sample!) { 47 | const locationId = sample.locationId[0]; 48 | const location = getAndVerifyPresence( 49 | profile.location!, 50 | locationId as number 51 | ); 52 | const functionId = location.line![0].functionId; 53 | const fn = getAndVerifyPresence(profile.function!, functionId as number); 54 | const fn_name = profile.stringTable.strings[fn.name as number]; 55 | if (fn_name === NON_JS_THREADS_FUNCTION_NAME) { 56 | return sample.value as Number[]; 57 | } 58 | } 59 | 60 | return null; 61 | } 62 | 63 | describe('profile-serializer', () => { 64 | let dateStub: sinon.SinonStub<[], number>; 65 | 66 | before(() => { 67 | dateStub = sinon.stub(Date, 'now').returns(0); 68 | }); 69 | after(() => { 70 | dateStub.restore(); 71 | }); 72 | 73 | describe('serializeTimeProfile', () => { 74 | it('should produce expected profile', () => { 75 | const timeProfileOut = serializeTimeProfile(v8TimeProfile, 1000); 76 | assert.deepEqual(timeProfileOut, timeProfile); 77 | }); 78 | 79 | it('should omit non-jS threads CPU time when profile has no CPU time', () => { 80 | const timeProfile: TimeProfile = { 81 | startTime: 0, 82 | endTime: 10 * 1000 * 1000, 83 | hasCpuTime: false, 84 | nonJSThreadsCpuTime: 1000, 85 | topDownRoot: { 86 | name: '(root)', 87 | scriptName: 'root', 88 | scriptId: 0, 89 | lineNumber: 0, 90 | columnNumber: 0, 91 | hitCount: 0, 92 | children: [], 93 | }, 94 | }; 95 | const timeProfileOut = serializeTimeProfile(timeProfile, 1000); 96 | assert.equal(getNonJSThreadsSample(timeProfileOut), null); 97 | const timeProfileOutWithLabels = serializeTimeProfile( 98 | timeProfile, 99 | 1000, 100 | undefined, 101 | false, 102 | () => { 103 | return {foo: 'bar'}; 104 | } 105 | ); 106 | assert.equal(getNonJSThreadsSample(timeProfileOutWithLabels), null); 107 | }); 108 | 109 | it('should omit non-jS threads CPU time when it is zero', () => { 110 | const timeProfile: TimeProfile = { 111 | startTime: 0, 112 | endTime: 10 * 1000 * 1000, 113 | hasCpuTime: true, 114 | nonJSThreadsCpuTime: 0, 115 | topDownRoot: { 116 | name: '(root)', 117 | scriptName: 'root', 118 | scriptId: 0, 119 | lineNumber: 0, 120 | columnNumber: 0, 121 | hitCount: 0, 122 | children: [], 123 | }, 124 | }; 125 | const timeProfileOut = serializeTimeProfile(timeProfile, 1000); 126 | assert.equal(getNonJSThreadsSample(timeProfileOut), null); 127 | const timeProfileOutWithLabels = serializeTimeProfile( 128 | timeProfile, 129 | 1000, 130 | undefined, 131 | false, 132 | () => { 133 | return {foo: 'bar'}; 134 | } 135 | ); 136 | assert.equal(getNonJSThreadsSample(timeProfileOutWithLabels), null); 137 | }); 138 | 139 | it('should produce Non-JS thread sample with zero wall time', () => { 140 | const timeProfile: TimeProfile = { 141 | startTime: 0, 142 | endTime: 10 * 1000 * 1000, 143 | hasCpuTime: true, 144 | nonJSThreadsCpuTime: 1000, 145 | topDownRoot: { 146 | name: '(root)', 147 | scriptName: 'root', 148 | scriptId: 0, 149 | lineNumber: 0, 150 | columnNumber: 0, 151 | hitCount: 0, 152 | children: [], 153 | }, 154 | }; 155 | const timeProfileOut = serializeTimeProfile(timeProfile, 1000); 156 | const values = getNonJSThreadsSample(timeProfileOut); 157 | assert.notEqual(values, null); 158 | assert.equal(values![0], 0); 159 | assert.equal(values![1], 0); 160 | assert.equal(values![2], 1000); 161 | const timeProfileOutWithLabels = serializeTimeProfile( 162 | timeProfile, 163 | 1000, 164 | undefined, 165 | false, 166 | () => { 167 | return {foo: 'bar'}; 168 | } 169 | ); 170 | const valuesWithLabels = getNonJSThreadsSample(timeProfileOutWithLabels); 171 | assert.notEqual(valuesWithLabels, null); 172 | assert.equal(valuesWithLabels![0], 0); 173 | assert.equal(valuesWithLabels![1], 0); 174 | assert.equal(valuesWithLabels![2], 1000); 175 | }); 176 | }); 177 | 178 | describe('label builder', () => { 179 | it('should accept strings, numbers, and bigints', () => { 180 | const profileOut = serializeTimeProfile(labelEncodingProfile, 1000); 181 | const st = profileOut.stringTable; 182 | assert.deepEqual(profileOut.sample[0].label, [ 183 | new Label({key: st.dedup('someStr'), str: st.dedup('foo')}), 184 | new Label({key: st.dedup('someNum'), num: 42}), 185 | new Label({key: st.dedup('someBigint'), num: 18446744073709551557n}), 186 | ]); 187 | }); 188 | }); 189 | 190 | describe('serializeHeapProfile', () => { 191 | it('should produce expected profile', () => { 192 | const heapProfileOut = serializeHeapProfile(v8HeapProfile, 0, 512 * 1024); 193 | assert.deepEqual(heapProfileOut, heapProfile); 194 | }); 195 | it('should produce expected profile when there is anonymous function', () => { 196 | const heapProfileOut = serializeHeapProfile( 197 | v8AnonymousFunctionHeapProfile, 198 | 0, 199 | 512 * 1024 200 | ); 201 | assert.deepEqual(heapProfileOut, anonymousFunctionHeapProfile); 202 | }); 203 | }); 204 | 205 | describe('source map specified', () => { 206 | let sourceMapper: SourceMapper; 207 | before(async () => { 208 | const sourceMapFiles = [mapDirPath]; 209 | sourceMapper = await SourceMapper.create(sourceMapFiles); 210 | }); 211 | 212 | describe('serializeHeapProfile', () => { 213 | it('should produce expected profile', () => { 214 | const heapProfileOut = serializeHeapProfile( 215 | v8HeapGeneratedProfile, 216 | 0, 217 | 512 * 1024, 218 | undefined, 219 | sourceMapper 220 | ); 221 | assert.deepEqual(heapProfileOut, heapSourceProfile); 222 | }); 223 | }); 224 | 225 | describe('serializeTimeProfile', () => { 226 | it('should produce expected profile', () => { 227 | const timeProfileOut = serializeTimeProfile( 228 | v8TimeGeneratedProfile, 229 | 1000, 230 | sourceMapper 231 | ); 232 | assert.deepEqual(timeProfileOut, timeSourceProfile); 233 | }); 234 | }); 235 | 236 | after(() => { 237 | tmp.setGracefulCleanup(); 238 | }); 239 | }); 240 | }); 241 | -------------------------------------------------------------------------------- /ts/test/test-worker-threads.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line node/no-unsupported-features/node-builtins 2 | import {execFile} from 'child_process'; 3 | import {promisify} from 'util'; 4 | 5 | const exec = promisify(execFile); 6 | 7 | describe('Worker Threads', () => { 8 | // eslint-ignore-next-line prefer-array-callback 9 | it('should work', function () { 10 | this.timeout(20000); 11 | const nbWorkers = 2; 12 | return exec('node', ['./out/test/worker.js', String(nbWorkers)]); 13 | }); 14 | }); 15 | -------------------------------------------------------------------------------- /ts/test/worker.ts: -------------------------------------------------------------------------------- 1 | import {Worker, isMainThread, workerData, parentPort} from 'worker_threads'; 2 | import {pbkdf2} from 'crypto'; 3 | import {time} from '../src/index'; 4 | import {Profile, ValueType} from 'pprof-format'; 5 | import {getAndVerifyPresence, getAndVerifyString} from './profiles-for-tests'; 6 | 7 | import assert from 'assert'; 8 | 9 | const DURATION_MILLIS = 1000; 10 | const intervalMicros = 10000; 11 | const withContexts = 12 | process.platform === 'darwin' || process.platform === 'linux'; 13 | 14 | function createWorker(durationMs: number): Promise { 15 | return new Promise((resolve, reject) => { 16 | const profiles: Profile[] = []; 17 | new Worker(__filename, {workerData: {durationMs}}) 18 | .on('exit', exitCode => { 19 | if (exitCode !== 0) reject(); 20 | setTimeout(() => { 21 | // Run a second worker after the first one exited to test for proper 22 | // cleanup after first worker. This used to segfault. 23 | new Worker(__filename, {workerData: {durationMs}}) 24 | .on('exit', exitCode => { 25 | if (exitCode !== 0) reject(); 26 | resolve(profiles); 27 | }) 28 | .on('error', reject) 29 | .on('message', profile => { 30 | profiles.push(profile); 31 | }); 32 | }, Math.floor(Math.random() * durationMs)); 33 | }) 34 | .on('error', reject) 35 | .on('message', profile => { 36 | profiles.push(profile); 37 | }); 38 | }); 39 | } 40 | 41 | async function executeWorkers(nbWorkers: number, durationMs: number) { 42 | const workers = []; 43 | for (let i = 0; i < nbWorkers; i++) { 44 | workers.push(createWorker(durationMs)); 45 | } 46 | return Promise.all(workers).then(profiles => profiles.flat()); 47 | } 48 | 49 | function getCpuUsage() { 50 | const cpu = process.cpuUsage(); 51 | return cpu.user + cpu.system; 52 | } 53 | 54 | async function main(durationMs: number) { 55 | time.start({ 56 | durationMillis: durationMs * 3, 57 | intervalMicros, 58 | withContexts, 59 | collectCpuTime: withContexts, 60 | }); 61 | 62 | const cpu0 = getCpuUsage(); 63 | const nbWorkers = Number(process.argv[2] || 2); 64 | 65 | // start workers 66 | const workers = executeWorkers(nbWorkers, durationMs); 67 | 68 | const deadline = Date.now() + durationMs; 69 | // wait for all work to finish 70 | await Promise.all([bar(deadline), foo(deadline)]); 71 | const workerProfiles = await workers; 72 | 73 | // restart and check profile 74 | const profile1 = time.stop(true); 75 | const cpu1 = getCpuUsage(); 76 | 77 | workerProfiles.forEach(checkProfile); 78 | checkProfile(profile1); 79 | if (withContexts) { 80 | checkCpuTime(profile1, cpu1 - cpu0, workerProfiles); 81 | } 82 | const newDeadline = Date.now() + durationMs; 83 | await Promise.all([bar(newDeadline), foo(newDeadline)]); 84 | 85 | const profile2 = time.stop(); 86 | const cpu2 = getCpuUsage(); 87 | checkProfile(profile2); 88 | if (withContexts) { 89 | checkCpuTime(profile2, cpu2 - cpu1); 90 | } 91 | } 92 | 93 | async function worker(durationMs: number) { 94 | time.start({ 95 | durationMillis: durationMs, 96 | intervalMicros, 97 | withContexts, 98 | collectCpuTime: withContexts, 99 | }); 100 | 101 | const deadline = Date.now() + durationMs; 102 | await Promise.all([bar(deadline), foo(deadline)]); 103 | 104 | const profile = time.stop(); 105 | parentPort?.postMessage(profile); 106 | } 107 | 108 | if (isMainThread) { 109 | main(DURATION_MILLIS); 110 | } else { 111 | worker(workerData.durationMs); 112 | } 113 | 114 | function valueName(profile: Profile, vt: ValueType) { 115 | const type = getAndVerifyString(profile.stringTable!, vt, 'type'); 116 | const unit = getAndVerifyString(profile.stringTable!, vt, 'unit'); 117 | return `${type}/${unit}`; 118 | } 119 | 120 | function sampleName(profile: Profile, sampleType: ValueType[]) { 121 | return sampleType.map(valueName.bind(null, profile)); 122 | } 123 | 124 | function getCpuTime(profile: Profile) { 125 | let jsCpuTime = 0; 126 | let nonJsCpuTime = 0; 127 | if (!withContexts) return {jsCpuTime, nonJsCpuTime}; 128 | for (const sample of profile.sample!) { 129 | const locationId = sample.locationId[0]; 130 | const location = getAndVerifyPresence( 131 | profile.location!, 132 | locationId as number 133 | ); 134 | const functionId = location.line![0].functionId; 135 | const fn = getAndVerifyPresence(profile.function!, functionId as number); 136 | const fn_name = profile.stringTable.strings[fn.name as number]; 137 | if (fn_name === time.constants.NON_JS_THREADS_FUNCTION_NAME) { 138 | nonJsCpuTime += sample.value![2] as number; 139 | assert.strictEqual(sample.value![0], 0); 140 | assert.strictEqual(sample.value![1], 0); 141 | } else { 142 | jsCpuTime += sample.value![2] as number; 143 | } 144 | } 145 | 146 | return {jsCpuTime, nonJsCpuTime}; 147 | } 148 | 149 | function checkCpuTime( 150 | profile: Profile, 151 | processCpuTimeMicros: number, 152 | workerProfiles: Profile[] = [], 153 | maxRelativeError = 0.1 154 | ) { 155 | let workersJsCpuTime = 0; 156 | let workersNonJsCpuTime = 0; 157 | 158 | for (const workerProfile of workerProfiles) { 159 | const {jsCpuTime, nonJsCpuTime} = getCpuTime(workerProfile); 160 | workersJsCpuTime += jsCpuTime; 161 | workersNonJsCpuTime += nonJsCpuTime; 162 | } 163 | 164 | const {jsCpuTime: mainJsCpuTime, nonJsCpuTime: mainNonJsCpuTime} = 165 | getCpuTime(profile); 166 | 167 | // workers should not report non-JS CPU time 168 | assert.strictEqual( 169 | workersNonJsCpuTime, 170 | 0, 171 | 'worker non-JS CPU time should be null' 172 | ); 173 | 174 | const totalCpuTimeMicros = 175 | (mainJsCpuTime + mainNonJsCpuTime + workersJsCpuTime) / 1000; 176 | const err = 177 | Math.abs(totalCpuTimeMicros - processCpuTimeMicros) / processCpuTimeMicros; 178 | const msg = `process cpu time: ${ 179 | processCpuTimeMicros / 1000 180 | }ms\ntotal profile cpu time: ${ 181 | totalCpuTimeMicros / 1000 182 | }ms\nmain JS cpu time: ${mainJsCpuTime / 1000000}ms\nworker JS cpu time: ${ 183 | workersJsCpuTime / 1000000 184 | }\nnon-JS cpu time: ${mainNonJsCpuTime / 1000000}ms\nerror: ${err}`; 185 | assert.ok( 186 | err <= maxRelativeError, 187 | `total profile CPU time should be close to process cpu time:\n${msg}` 188 | ); 189 | } 190 | 191 | function checkProfile(profile: Profile) { 192 | assert.deepStrictEqual(sampleName(profile, profile.sampleType!), [ 193 | 'sample/count', 194 | 'wall/nanoseconds', 195 | ...(withContexts ? ['cpu/nanoseconds'] : []), 196 | ]); 197 | assert.strictEqual(typeof profile.timeNanos, 'number'); 198 | assert.strictEqual(typeof profile.durationNanos, 'number'); 199 | assert.strictEqual(typeof profile.period, 'number'); 200 | assert.strictEqual( 201 | valueName(profile, profile.periodType!), 202 | 'wall/nanoseconds' 203 | ); 204 | 205 | assert.ok(profile.sample.length > 0, 'No samples'); 206 | 207 | for (const sample of profile.sample!) { 208 | assert.deepStrictEqual(sample.label, []); 209 | 210 | for (const value of sample.value!) { 211 | assert.strictEqual(typeof value, 'number'); 212 | } 213 | 214 | for (const locationId of sample.locationId!) { 215 | const location = getAndVerifyPresence( 216 | profile.location!, 217 | locationId as number 218 | ); 219 | 220 | for (const {functionId, line} of location.line!) { 221 | const fn = getAndVerifyPresence( 222 | profile.function!, 223 | functionId as number 224 | ); 225 | 226 | getAndVerifyString(profile.stringTable!, fn, 'name'); 227 | getAndVerifyString(profile.stringTable!, fn, 'systemName'); 228 | getAndVerifyString(profile.stringTable!, fn, 'filename'); 229 | assert.strictEqual(typeof line, 'number'); 230 | } 231 | } 232 | } 233 | } 234 | 235 | async function bar(deadline: number) { 236 | let done = false; 237 | setTimeout(() => { 238 | done = true; 239 | }, deadline - Date.now()); 240 | while (!done) { 241 | await new Promise(resolve => { 242 | pbkdf2('secret', 'salt', 100000, 64, 'sha512', () => { 243 | resolve(); 244 | }); 245 | }); 246 | } 247 | } 248 | 249 | function fooWork() { 250 | let sum = 0; 251 | for (let i = 0; i < 1e7; i++) { 252 | sum += sum; 253 | } 254 | return sum; 255 | } 256 | 257 | async function foo(deadline: number) { 258 | let done = false; 259 | setTimeout(() => { 260 | done = true; 261 | }, deadline - Date.now()); 262 | 263 | while (!done) { 264 | await new Promise(resolve => { 265 | fooWork(); 266 | setImmediate(() => resolve()); 267 | }); 268 | } 269 | } 270 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./node_modules/gts/tsconfig-google.json", 3 | "compilerOptions": { 4 | "rootDir": "ts", 5 | "outDir": "out", 6 | "target": "es2020", 7 | "esModuleInterop": true, 8 | }, 9 | "include": [ 10 | "ts/**/*.ts" 11 | ], 12 | "exclude": [ 13 | "node_modules" 14 | ] 15 | } 16 | --------------------------------------------------------------------------------