├── .devcontainer ├── compose.yml └── devcontainer.json ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml └── workflows │ ├── check-dist.yml │ ├── ci.yml │ ├── close-inactive-issues.yml │ ├── codeql-analysis.yml │ ├── example-run-action.yml │ ├── example-workflow-01.yml │ ├── example-workflow-02.yml │ ├── example-workflow-03.yml │ └── linter.yml ├── .gitignore ├── .node-version ├── .prettierignore ├── .prettierrc.json ├── LICENSE ├── README.md ├── action.yml ├── badges └── coverage.svg ├── dist ├── index.js ├── index.js.map ├── licenses.txt ├── package.json └── sourcemap-register.cjs ├── eslint.config.mjs ├── examples └── google-cloud │ ├── Dockerfile │ ├── README.md │ └── collector-config.yaml ├── img ├── actions-tab.png ├── choose-metrics.png ├── enable-workflows.png ├── filter-run-id.png ├── fork-repository.png ├── github-workflow-duration-seconds-gauge.png ├── metrics-graph.png ├── metrics-prom.png ├── repository-secret.png ├── search-trace-run-id.png ├── trace-attributes.png ├── trace-detail.png ├── traces-jager.png └── verify-completed-job.png ├── package-lock.json ├── package.json ├── script ├── release └── update ├── src ├── github │ ├── check-completed.test.ts │ ├── check-completed.ts │ ├── github.test.ts │ ├── github.ts │ ├── index.ts │ └── types.ts ├── index.test.ts ├── index.ts ├── instrumentation │ ├── index.ts │ ├── instrumentation.test.ts │ └── instrumentation.ts ├── main.test.ts ├── main.ts ├── metrics │ ├── constants.ts │ ├── create-gauges.test.ts │ ├── create-gauges.ts │ ├── create-metrics.test.ts │ ├── create-metrics.ts │ └── index.ts ├── settings.test.ts ├── settings.ts ├── traces │ ├── create-spans.ts │ ├── create-trace.test.ts │ ├── create-trace.ts │ └── index.ts └── utils │ ├── calc-diff-sec.test.ts │ ├── calc-diff-sec.ts │ └── opentelemetry-all-disable.ts ├── tsconfig.json └── vitest.config.ts /.devcontainer/compose.yml: -------------------------------------------------------------------------------- 1 | # FYI: https://containers.dev/guide/dockerfile#docker-compose 2 | version: '3' 3 | services: 4 | dev-container: 5 | image: mcr.microsoft.com/devcontainers/typescript-node:20 6 | container_name: dev_container 7 | command: sleep infinity 8 | volumes: 9 | - ../..:/workspaces:cached 10 | networks: 11 | - default 12 | prometheus: 13 | image: prom/prometheus:v2.54.1 14 | ports: 15 | - '9090:9090' 16 | command: 17 | - '--enable-feature=otlp-write-receiver' 18 | - '--config.file=/etc/prometheus/prometheus.yml' 19 | networks: 20 | - default 21 | jaeger: 22 | image: jaegertracing/all-in-one:1.61.0 23 | ports: 24 | - '16686:16686' 25 | - '4317:4317' 26 | - '4318:4318' 27 | networks: 28 | - default 29 | networks: 30 | default: 31 | driver: bridge 32 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "GitHub Actions (TypeScript)", 3 | "workspaceFolder": "/workspaces/github-actions-opentelemetry", 4 | "postAttachCommand": { 5 | "npm": "npm install", 6 | "add-safe-directory": "git config --global --add safe.directory ${containerWorkspaceFolder}" 7 | }, 8 | "dockerComposeFile": ["./compose.yml"], 9 | "service": "dev-container", 10 | "customizations": { 11 | "codespaces": { 12 | "openFiles": ["README.md"] 13 | }, 14 | "vscode": { 15 | "extensions": [ 16 | "bierner.markdown-preview-github-styles", 17 | "davidanson.vscode-markdownlint", 18 | "dbaeumer.vscode-eslint", 19 | "esbenp.prettier-vscode", 20 | "github.copilot", 21 | "github.copilot-chat", 22 | "github.vscode-github-actions", 23 | "github.vscode-pull-request-github", 24 | "me-dutour-mathieu.vscode-github-actions", 25 | "redhat.vscode-yaml", 26 | "rvest.vs-code-prettier-eslint", 27 | "yzhang.markdown-all-in-one", 28 | "streetsidesoftware.code-spell-checker", 29 | "vitest.explorer" 30 | ], 31 | "settings": { 32 | "editor.defaultFormatter": "esbenp.prettier-vscode", 33 | "editor.tabSize": 2, 34 | "editor.formatOnSave": true, 35 | "markdown.extension.list.indentationSize": "adaptive", 36 | "markdown.extension.italic.indicator": "_", 37 | "markdown.extension.orderedList.marker": "one" 38 | } 39 | } 40 | }, 41 | "remoteEnv": { 42 | "GITHUB_TOKEN": "${localEnv:GITHUB_TOKEN}" 43 | }, 44 | "features": { 45 | "ghcr.io/devcontainers/features/github-cli:1": {}, 46 | "ghcr.io/devcontainers-contrib/features/prettier:1": {} 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | 3 | dist/** -diff linguist-generated=true 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | --- 8 | 9 | Thank you for reporting on this issue! Please fit under the template :) 10 | 11 | **Describe the bug** 12 | A clear and concise description of what the bug is. 13 | 14 | **To Reproduce** 15 | Steps to reproduce the behavior. 16 | 17 | **Expected behavior** 18 | A clear and concise description of what you expected to happen. 19 | 20 | **Logs or Screenshots** 21 | If applicable, add logs of github-actions-opentelemetry or screenshots to help 22 | explain your problem. 23 | 24 | **Additional context** 25 | Add any other context about the problem here. 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | --- 8 | 9 | **Is your feature request related to a problem? Please describe.** 10 | A clear and concise description of what the problem is. Ex. I'm always 11 | frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've 18 | considered. 19 | 20 | **Additional context** 21 | Add any other context or screenshots about the feature request here. 22 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: monthly 7 | groups: 8 | actions-minor: 9 | update-types: 10 | - minor 11 | - patch 12 | - major 13 | 14 | - package-ecosystem: npm 15 | directory: / 16 | schedule: 17 | interval: monthly 18 | groups: 19 | npm: 20 | update-types: 21 | - minor 22 | - patch 23 | -------------------------------------------------------------------------------- /.github/workflows/check-dist.yml: -------------------------------------------------------------------------------- 1 | # In TypeScript actions, `dist/` is a special directory. When you reference 2 | # an action with the `uses:` property, `dist/index.js` is the code that will be 3 | # run. For this project, the `dist/index.js` file is transpiled from other 4 | # source files. This workflow ensures the `dist/` directory contains the 5 | # expected transpiled code. 6 | # 7 | # If this workflow is run from a feature branch, it will act as an additional CI 8 | # check and fail if the checked-in `dist/` directory does not match what is 9 | # expected from the build. 10 | name: Check Transpiled JavaScript 11 | 12 | on: 13 | pull_request: 14 | push: 15 | 16 | permissions: 17 | contents: read 18 | 19 | jobs: 20 | check-dist: 21 | name: Check dist/ 22 | runs-on: ubuntu-latest 23 | 24 | steps: 25 | - name: Checkout 26 | id: checkout 27 | uses: actions/checkout@v4 28 | 29 | - name: Setup Node.js 30 | id: setup-node 31 | uses: actions/setup-node@v4 32 | with: 33 | node-version-file: .node-version 34 | cache: npm 35 | 36 | - name: Install Dependencies 37 | id: install 38 | run: npm ci 39 | 40 | - name: Build dist/ Directory 41 | id: build 42 | run: npm run bundle 43 | 44 | # This will fail the workflow if the `dist/` directory is different than 45 | # expected. 46 | - name: Compare Directories 47 | id: diff 48 | run: | 49 | if [ ! -d dist/ ]; then 50 | echo "Expected dist/ directory does not exist. See status below:" 51 | ls -la ./ 52 | exit 1 53 | fi 54 | if [ "$(git diff --ignore-space-at-eol --text dist/ | wc -l)" -gt "0" ]; then 55 | echo "Detected uncommitted changes after build. See status below:" 56 | git diff --ignore-space-at-eol --text dist/ 57 | exit 1 58 | fi 59 | 60 | # If `dist/` was different than expected, upload the expected version as a 61 | # workflow artifact. 62 | - if: ${{ failure() && steps.diff.outcome == 'failure' }} 63 | name: Upload Artifact 64 | id: upload 65 | uses: actions/upload-artifact@v4 66 | with: 67 | name: dist 68 | path: dist/ 69 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | 3 | on: 4 | pull_request: 5 | push: 6 | 7 | permissions: 8 | contents: read 9 | 10 | jobs: 11 | test-typescript: 12 | name: TypeScript Tests 13 | runs-on: ubuntu-latest 14 | services: 15 | # Anything is fine as long as it accepts metrics and traces with OTLP because the tests does not check stored data. 16 | collector: 17 | image: otel/opentelemetry-collector-contrib:0.115.1 18 | ports: 19 | - 4318:4318 20 | 21 | steps: 22 | - name: Checkout 23 | id: checkout 24 | uses: actions/checkout@v4 25 | 26 | - name: Setup Node.js 27 | id: setup-node 28 | uses: actions/setup-node@v4 29 | with: 30 | node-version-file: .node-version 31 | cache: npm 32 | 33 | - name: Install Dependencies 34 | id: npm-ci 35 | run: npm ci 36 | 37 | - name: Check Format 38 | id: npm-format-check 39 | run: npm run format:check 40 | 41 | - name: Lint 42 | id: npm-lint 43 | run: npm run lint 44 | 45 | - name: Test 46 | id: npm-test 47 | env: 48 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 49 | run: npm run test 50 | 51 | test-action: 52 | name: GitHub Actions Test 53 | runs-on: ubuntu-latest 54 | services: 55 | # Anything is fine as long as it accepts metrics and traces with OTLP because the tests does not check stored data. 56 | collector: 57 | image: otel/opentelemetry-collector-contrib:0.115.1 58 | ports: 59 | - 4318:4318 60 | steps: 61 | - name: Checkout 62 | id: checkout 63 | uses: actions/checkout@v4 64 | 65 | - name: Test Local Action 66 | id: test-action 67 | uses: ./ 68 | env: 69 | OTEL_SERVICE_NAME: github-actions-opentelemetry 70 | # If this job is removed, update it. 71 | # https://github.com/paper2/github-actions-opentelemetry/actions/runs/10856659171 72 | OWNER: paper2 73 | REPOSITORY: github-actions-opentelemetry 74 | WORKFLOW_RUN_ID: 10856659171 75 | 76 | with: 77 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 78 | -------------------------------------------------------------------------------- /.github/workflows/close-inactive-issues.yml: -------------------------------------------------------------------------------- 1 | name: Close inactive issues 2 | on: 3 | schedule: 4 | - cron: '33 1 * * *' 5 | 6 | jobs: 7 | close-issues: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | steps: 13 | - uses: actions/stale@v9 14 | with: 15 | days-before-issue-stale: 30 16 | days-before-issue-close: 14 17 | stale-issue-label: 'stale' 18 | stale-issue-message: 19 | 'This issue is stale because it has been open for 30 days with no 20 | activity.' 21 | close-issue-message: 22 | 'This issue was closed because it has been inactive for 14 days 23 | since being marked as stale.' 24 | days-before-pr-stale: -1 25 | days-before-pr-close: -1 26 | repo-token: ${{ secrets.GITHUB_TOKEN }} 27 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: CodeQL 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | schedule: 11 | - cron: '31 7 * * 3' 12 | 13 | permissions: 14 | actions: read 15 | checks: write 16 | contents: read 17 | security-events: write 18 | 19 | jobs: 20 | analyze: 21 | name: Analyze 22 | runs-on: ubuntu-latest 23 | 24 | strategy: 25 | fail-fast: false 26 | matrix: 27 | language: 28 | - TypeScript 29 | 30 | steps: 31 | - name: Checkout 32 | id: checkout 33 | uses: actions/checkout@v4 34 | 35 | - name: Initialize CodeQL 36 | id: initialize 37 | uses: github/codeql-action/init@v3 38 | with: 39 | languages: ${{ matrix.language }} 40 | source-root: src 41 | 42 | - name: Autobuild 43 | id: autobuild 44 | uses: github/codeql-action/autobuild@v3 45 | 46 | - name: Perform CodeQL Analysis 47 | id: analyze 48 | uses: github/codeql-action/analyze@v3 49 | -------------------------------------------------------------------------------- /.github/workflows/example-run-action.yml: -------------------------------------------------------------------------------- 1 | name: Send Telemetry after Other Workflow Example 2 | 3 | on: 4 | workflow_run: 5 | # Specify the workflows you want to collect telemetry. 6 | workflows: 7 | - Example Workflow 01 8 | - Example Workflow 02 9 | - Example Workflow 03 10 | types: 11 | - completed 12 | 13 | permissions: 14 | # Required for private repositories 15 | actions: read 16 | 17 | jobs: 18 | send-telemetry: 19 | name: Send CI Telemetry 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Run 23 | id: run 24 | uses: paper2/github-actions-opentelemetry@main 25 | env: 26 | OTEL_EXPORTER_OTLP_ENDPOINT: 27 | ${{ secrets.OTEL_EXPORTER_OTLP_ENDPOINT }} 28 | OTEL_SERVICE_NAME: github-actions-opentelemetry 29 | with: 30 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 31 | -------------------------------------------------------------------------------- /.github/workflows/example-workflow-01.yml: -------------------------------------------------------------------------------- 1 | name: Example Workflow 01 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - getting-started 8 | 9 | env: 10 | FIXED_SLEEP: 1 11 | RANDOM_SLEEP: 3 12 | 13 | jobs: 14 | test: 15 | name: Test Application 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | id: checkout 20 | run: sleep 2 21 | - name: Setup 22 | id: setup 23 | run: sleep 2 24 | - name: Install Dependencies 25 | id: install 26 | run: sleep 3 27 | - name: Lint 28 | id: lint 29 | run: sleep 2 30 | - name: Test 31 | id: test 32 | # add a random delay for changing metrics and traces 33 | run: 34 | sleep $(($RANDOM % ${{ env.RANDOM_SLEEP }} + ${{ env.FIXED_SLEEP }})) 35 | build: 36 | name: Build Application 37 | runs-on: ubuntu-latest 38 | steps: 39 | - name: Checkout 40 | id: checkout 41 | run: sleep 2 42 | - name: Setup 43 | id: setup 44 | run: sleep 2 45 | - name: Install Dependencies 46 | id: install 47 | run: sleep 3 48 | - name: Build 49 | id: build 50 | # add a random delay for changing metrics and traces 51 | run: 52 | sleep $(($RANDOM % ${{ env.RANDOM_SLEEP }} + ${{ env.FIXED_SLEEP }})) 53 | deploy: 54 | name: Deploy Application 55 | needs: [test, build] 56 | runs-on: ubuntu-latest 57 | steps: 58 | - name: Deploy 59 | id: deploy 60 | run: sleep 2 61 | -------------------------------------------------------------------------------- /.github/workflows/example-workflow-02.yml: -------------------------------------------------------------------------------- 1 | name: Example Workflow 02 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - getting-started 8 | 9 | env: 10 | FIXED_SLEEP: 2 11 | RANDOM_SLEEP: 3 12 | 13 | jobs: 14 | test: 15 | name: Test Application 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | id: checkout 20 | run: sleep 2 21 | - name: Setup 22 | id: setup 23 | run: sleep 2 24 | - name: Install Dependencies 25 | id: install 26 | run: sleep 3 27 | - name: Lint 28 | id: lint 29 | run: sleep 2 30 | - name: Test 31 | id: test 32 | # add a random delay for changing metrics and traces 33 | run: 34 | sleep $(($RANDOM % ${{ env.RANDOM_SLEEP }} + ${{ env.FIXED_SLEEP }})) 35 | build: 36 | name: Build Application 37 | runs-on: ubuntu-latest 38 | steps: 39 | - name: Checkout 40 | id: checkout 41 | run: sleep 2 42 | - name: Setup 43 | id: setup 44 | run: sleep 2 45 | - name: Install Dependencies 46 | id: install 47 | run: sleep 3 48 | - name: Build 49 | id: build 50 | # add a random delay for changing metrics and traces 51 | run: 52 | sleep $(($RANDOM % ${{ env.RANDOM_SLEEP }} + ${{ env.FIXED_SLEEP }})) 53 | deploy: 54 | name: Deploy Application 55 | needs: [test, build] 56 | runs-on: ubuntu-latest 57 | steps: 58 | - name: Deploy 59 | id: deploy 60 | run: sleep 2 61 | -------------------------------------------------------------------------------- /.github/workflows/example-workflow-03.yml: -------------------------------------------------------------------------------- 1 | name: Example Workflow 03 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - getting-started 8 | 9 | env: 10 | FIXED_SLEEP: 3 11 | RANDOM_SLEEP: 3 12 | 13 | jobs: 14 | test: 15 | name: Test Application 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | id: checkout 20 | run: sleep 2 21 | - name: Setup 22 | id: setup 23 | run: sleep 2 24 | - name: Install Dependencies 25 | id: install 26 | run: sleep 3 27 | - name: Lint 28 | id: lint 29 | run: sleep 2 30 | - name: Test 31 | id: test 32 | # add a random delay for changing metrics and traces 33 | run: 34 | sleep $(($RANDOM % ${{ env.RANDOM_SLEEP }} + ${{ env.FIXED_SLEEP }})) 35 | build: 36 | name: Build Application 37 | runs-on: ubuntu-latest 38 | steps: 39 | - name: Checkout 40 | id: checkout 41 | run: sleep 2 42 | - name: Setup 43 | id: setup 44 | run: sleep 2 45 | - name: Install Dependencies 46 | id: install 47 | run: sleep 3 48 | - name: Build 49 | id: build 50 | # add a random delay for changing metrics and traces 51 | run: 52 | sleep $(($RANDOM % ${{ env.RANDOM_SLEEP }} + ${{ env.FIXED_SLEEP }})) 53 | deploy: 54 | name: Deploy Application 55 | needs: [test, build] 56 | runs-on: ubuntu-latest 57 | steps: 58 | - name: Deploy 59 | id: deploy 60 | run: sleep 2 61 | -------------------------------------------------------------------------------- /.github/workflows/linter.yml: -------------------------------------------------------------------------------- 1 | name: Lint Codebase 2 | 3 | on: 4 | pull_request: 5 | 6 | permissions: 7 | contents: read 8 | packages: read 9 | statuses: write 10 | 11 | jobs: 12 | lint: 13 | name: Lint Codebase 14 | runs-on: ubuntu-latest 15 | 16 | permissions: 17 | # To write linting fixes 18 | contents: write 19 | packages: read 20 | 21 | steps: 22 | - name: Checkout code 23 | uses: actions/checkout@v4 24 | 25 | - name: Setup Node.js 26 | id: setup-node 27 | uses: actions/setup-node@v4 28 | with: 29 | node-version-file: .node-version 30 | cache: npm 31 | 32 | - name: Install Dependencies 33 | id: install 34 | run: npm ci 35 | 36 | - name: lint 37 | id: lint 38 | run: npm run lint 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependency directory 2 | node_modules 3 | 4 | # Rest pulled from https://github.com/github/gitignore/blob/master/Node.gitignore 5 | # Logs 6 | logs 7 | *.log 8 | npm-debug.log* 9 | yarn-debug.log* 10 | yarn-error.log* 11 | lerna-debug.log* 12 | 13 | # Diagnostic reports (https://nodejs.org/api/report.html) 14 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 15 | 16 | # Runtime data 17 | pids 18 | *.pid 19 | *.seed 20 | *.pid.lock 21 | 22 | # Directory for instrumented libs generated by jscoverage/JSCover 23 | lib-cov 24 | 25 | # Coverage directory used by tools like istanbul 26 | coverage 27 | *.lcov 28 | 29 | # nyc test coverage 30 | .nyc_output 31 | 32 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 33 | .grunt 34 | 35 | # Bower dependency directory (https://bower.io/) 36 | bower_components 37 | 38 | # node-waf configuration 39 | .lock-wscript 40 | 41 | # Compiled binary addons (https://nodejs.org/api/addons.html) 42 | build/Release 43 | 44 | # Dependency directories 45 | jspm_packages/ 46 | 47 | # TypeScript v1 declaration files 48 | typings/ 49 | 50 | # TypeScript cache 51 | *.tsbuildinfo 52 | 53 | # Optional npm cache directory 54 | .npm 55 | 56 | # Optional eslint cache 57 | .eslintcache 58 | 59 | # Optional REPL history 60 | .node_repl_history 61 | 62 | # Output of 'npm pack' 63 | *.tgz 64 | 65 | # Yarn Integrity file 66 | .yarn-integrity 67 | 68 | # dotenv environment variables file 69 | .env 70 | .env.test 71 | 72 | # parcel-bundler cache (https://parceljs.org/) 73 | .cache 74 | 75 | # next.js build output 76 | .next 77 | 78 | # nuxt.js build output 79 | .nuxt 80 | 81 | # vuepress build output 82 | .vuepress/dist 83 | 84 | # Serverless directories 85 | .serverless/ 86 | 87 | # FuseBox cache 88 | .fusebox/ 89 | 90 | # DynamoDB Local files 91 | .dynamodb/ 92 | 93 | # OS metadata 94 | .DS_Store 95 | Thumbs.db 96 | 97 | # Ignore built ts files 98 | __tests__/runner/* 99 | 100 | # IDE files 101 | .idea 102 | .vscode 103 | *.code-workspace 104 | 105 | # Super Linter Output 106 | github_conf/ -------------------------------------------------------------------------------- /.node-version: -------------------------------------------------------------------------------- 1 | 20.16.0 2 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | node_modules/ 3 | coverage/ 4 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 80, 3 | "tabWidth": 2, 4 | "useTabs": false, 5 | "semi": false, 6 | "singleQuote": true, 7 | "quoteProps": "as-needed", 8 | "jsxSingleQuote": false, 9 | "trailingComma": "none", 10 | "bracketSpacing": true, 11 | "bracketSameLine": true, 12 | "arrowParens": "avoid", 13 | "proseWrap": "always", 14 | "htmlWhitespaceSensitivity": "css", 15 | "endOfLine": "lf" 16 | } 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright Yohei Kamitsukasa & GitHub 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GitHub Actions OpenTelemetry 2 | 3 | ![CI](https://github.com/actions/typescript-action/actions/workflows/ci.yml/badge.svg) 4 | [![Check dist/](https://github.com/actions/typescript-action/actions/workflows/check-dist.yml/badge.svg)](https://github.com/actions/typescript-action/actions/workflows/check-dist.yml) 5 | [![CodeQL](https://github.com/actions/typescript-action/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/actions/typescript-action/actions/workflows/codeql-analysis.yml) 6 | [![Coverage](./badges/coverage.svg)](./badges/coverage.svg) 7 | 8 | This action sends metrics and traces of GitHub Actions to an OpenTelemetry 9 | Protocol (OTLP) endpoint. It helps you monitor and analyze GitHub Actions. 10 | 11 | ## Features Summary 12 | 13 | - 📊 Collects Metrics of GitHub Actions workflows and job execution times 14 | - 🔍 Collects Traces of GitHub Actions workflow, jobs, steps. 15 | - 📦 Sends data to any OTLP-compatible backend for monitoring and observability 16 | - 🚀 Collect telemetry without modifying existing workflows 17 | 18 | ## Metrics 19 | 20 | | Descriptor Name | Description | 21 | | ---------------------------- | ---------------------- | 22 | | `github.workflow.duration` | Duration of workflow | 23 | | `github.job.duration` | Duration of job | 24 | | `github.job.queued_duration` | Duration of queued job | 25 | 26 | Each metric has associated attributes. 27 | 28 | ![Prometheus Example Screen Shot](./img/metrics-prom.png) 29 | 30 | ## Traces 31 | 32 | ![Jaeger Example Screen Shot](./img/traces-jager.png) 33 | 34 | ![Attributes Sample](./img/trace-attributes.png) 35 | 36 | You can find a trace by the `run_id` attribute attached to the root span. 37 | `run_id` is visible in the workflow results URL. For example, if the URL is: 38 | 39 | ```txt 40 | https://github.com/paper2/github-actions-opentelemetry/actions/runs/12246387114 41 | ``` 42 | 43 | Then the `run_id` is `12246387114`. 44 | 45 | ![search-trace-run-id](./img/search-trace-run-id.png) 46 | 47 | ## How it works 48 | 49 | This action creates metrics and traces of GitHub Actions workflows and sends 50 | them to an OTLP endpoint. It uses the GitHub API to collect data about completed 51 | workflows and jobs. The action then sends this data to the OTLP endpoint for 52 | monitoring and observability. 53 | 54 | ```mermaid 55 | sequenceDiagram 56 | participant TW as Target Workflow 57 | participant GAOW as GitHub Actions OpenTelemetry Workflow 58 | participant GA as GitHub API 59 | participant OE as OTLP Endpoint 60 | 61 | TW ->> TW: Complete Workflow 62 | TW ->> GAOW: Trigger by workflow_run 63 | GAOW ->> GA: Get Target Workflow Data 64 | GAOW ->> OE: Send Metrics and Traces 65 | ``` 66 | 67 | ## Setup Instructions 68 | 69 | 1. **Create OTLP Endpoint**: Set up an OTLP backend to receive telemetry data 70 | (e.g., Jaeger, Prometheus, or other monitoring tools). 71 | 1. **Add a Workflow**: Create a new workflow file and use this action triggered 72 | by 73 | [workflow_run](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#workflow_run) 74 | because this action collects telemetry of completed workflows. 75 | 76 | ### GitHub Actions Example 77 | 78 | Here's an example of how to set up this action in a GitHub Actions workflow: 79 | 80 | ```yaml 81 | name: Send Telemetry after Other Workflow 82 | 83 | on: 84 | workflow_run: 85 | # Specify the workflows you want to collect telemetry. 86 | workflows: 87 | - Check Transpiled JavaScript 88 | - Continuous Integration 89 | - CodeQL 90 | - Lint Codebase 91 | # This action uses completed workflow for making traces and metrics. 92 | types: 93 | - completed 94 | 95 | permissions: 96 | # Required for private repositories 97 | actions: read 98 | 99 | jobs: 100 | send-telemetry: 101 | name: Send CI Telemetry 102 | runs-on: ubuntu-latest 103 | steps: 104 | - name: Run 105 | id: run 106 | uses: paper2/github-actions-opentelemetry@main 107 | env: 108 | OTEL_SERVICE_NAME: github-actions-opentelemetry 109 | OTEL_EXPORTER_OTLP_ENDPOINT: https://collector-example.com 110 | # Additional OTLP headers. Useful for OTLP authentication. 111 | # e.g. 112 | # New Relic: api-key=YOUR_NEWRELIC_API_KEY 113 | # Google Cloud Run: Authorization=Bearer 114 | # Basic Authentication: Authorization=Basic 115 | OTEL_EXPORTER_OTLP_HEADERS: 116 | api-key=${ secrets.API_KEY },other-config-value=value 117 | with: 118 | # Required for collecting workflow data 119 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 120 | ``` 121 | 122 | ### Configuration 123 | 124 | To configure the action, you need to set the following environment variables: 125 | 126 | | Environment Variable | Required | Default Value | Description | 127 | | ------------------------------------- | -------- | ------------- | ------------------------------------------------------------------------------------------------ | 128 | | `OTEL_SERVICE_NAME` | Yes | - | Service name. | 129 | | `OTEL_EXPORTER_OTLP_ENDPOINT` | Yes | - | OTLP Endpoint for Traces and Metrics. e.g., | 130 | | `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` | No | - | OTLP Endpoint for Metrics instead of OTEL_EXPORTER_OTLP_ENDPOINT. | 131 | | `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | No | - | OTLP Endpoint for Traces instead of OTEL_EXPORTER_OTLP_ENDPOINT. | 132 | | `OTEL_EXPORTER_OTLP_HEADERS` | No | - | Additional OTLP headers. Useful for authentication. e.g., "api-key=key,other-config-value=value" | 133 | | `FEATURE_TRACE` | No | `true` | Enable trace feature. | 134 | | `FEATURE_METRICS` | No | `true` | Enable Metrics feature. | 135 | | `OTEL_LOG_LEVEL` | No | `info` | Log level. | 136 | 137 | ### Getting Started 138 | 139 | We prepared a [Getting Started](./examples/google-cloud/README.md) to create 140 | OpenTelemetry backend and run this action by using Google Cloud. 141 | 142 | ## Limitations 143 | 144 | There are some limitations that come from GitHub Actions Specification. See 145 | [Specification](https://github.com/paper2/github-actions-opentelemetry/wiki/Specification) 146 | page for details. 147 | 148 | ## Development 149 | 150 | ### Dev Container 151 | 152 | You can run containers by 153 | [devcontainer](https://code.visualstudio.com/docs/devcontainers/containers). 154 | 155 | - Jaeger and Prometheus run for local testing. 156 | - Jaeger: 157 | - Prometheus: 158 | 159 | ### Local test 160 | 161 | You can run all tests below command. 162 | 163 | ```sh 164 | npm run test 165 | ``` 166 | 167 | You can run a simple test. It is useful for checking output while developing. 168 | 169 | ```sh 170 | npm run test-local 171 | ``` 172 | 173 | ### Compile 174 | 175 | TypeScript codes must be compiled by ncc. You have changed code, run bellow the 176 | command. 177 | 178 | ```sh 179 | npm run all 180 | ``` 181 | 182 | > [!NOTE] 183 | > This command creates `index.js` and more on the `/dist` directory. You must 184 | > includes these artifacts on a commit because GitHub Actions uses these files. 185 | 186 | ### Recommend to install GitHub CLI (gh) 187 | 188 | Tests invoke real GitHub API. Unauthenticated users are subject to strict API 189 | rate limits. If `gh` command is installed and login is finished, token is 190 | automatically set for tests by `vitest.config.ts`. 191 | 192 | the login command is below. 193 | 194 | ```sh 195 | gh auth login 196 | ``` 197 | 198 | If you face below error, recommend to install GitHub CLI and login. 199 | 200 | ```text 201 | message: "API rate limit exceeded for xx.xx.xx.xx. (But here's the good news: Authenticated requests get a higher rate limit. Check out the documentation for more details.)", 202 | documentation_url: 'https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting' 203 | ``` 204 | 205 | ### Default Environment Variables for Testing 206 | 207 | Some environment variables are set on `vitest.config.ts`. 208 | 209 | ## License 210 | 211 | This project is licensed under the MIT License. See the [LICENSE](./LICENSE) 212 | file for details. 213 | 214 | ## Contributing 215 | 216 | Contributions are welcome! Please fork the repository and submit a pull request. 217 | Before contributing, ensure that your changes are well-documented and tested. 218 | 219 | ## Support 220 | 221 | If you encounter any issues or have questions, feel free to open an issue in the 222 | repository. We will do our best to assist you promptly. 223 | -------------------------------------------------------------------------------- /action.yml: -------------------------------------------------------------------------------- 1 | name: 'github-actions-opentelemetry' 2 | description: 'A GitHub Action that sends telemetry data by OpenTelemetry' 3 | author: '35333687+paper2@users.noreply.github.com' 4 | 5 | branding: 6 | icon: 'heart' 7 | color: 'red' 8 | 9 | inputs: 10 | GITHUB_TOKEN: 11 | description: 'GitHub token for client authentication' 12 | required: true 13 | 14 | runs: 15 | using: node20 16 | main: dist/index.js 17 | -------------------------------------------------------------------------------- /badges/coverage.svg: -------------------------------------------------------------------------------- 1 | Coverage: 96.2%Coverage96.2% -------------------------------------------------------------------------------- /dist/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "module" 3 | } 4 | -------------------------------------------------------------------------------- /dist/sourcemap-register.cjs: -------------------------------------------------------------------------------- 1 | (()=>{var e={296:e=>{var r=Object.prototype.toString;var n=typeof Buffer!=="undefined"&&typeof Buffer.alloc==="function"&&typeof Buffer.allocUnsafe==="function"&&typeof Buffer.from==="function";function isArrayBuffer(e){return r.call(e).slice(8,-1)==="ArrayBuffer"}function fromArrayBuffer(e,r,t){r>>>=0;var o=e.byteLength-r;if(o<0){throw new RangeError("'offset' is out of bounds")}if(t===undefined){t=o}else{t>>>=0;if(t>o){throw new RangeError("'length' is out of bounds")}}return n?Buffer.from(e.slice(r,r+t)):new Buffer(new Uint8Array(e.slice(r,r+t)))}function fromString(e,r){if(typeof r!=="string"||r===""){r="utf8"}if(!Buffer.isEncoding(r)){throw new TypeError('"encoding" must be a valid string encoding')}return n?Buffer.from(e,r):new Buffer(e,r)}function bufferFrom(e,r,t){if(typeof e==="number"){throw new TypeError('"value" argument must not be a number')}if(isArrayBuffer(e)){return fromArrayBuffer(e,r,t)}if(typeof e==="string"){return fromString(e,r)}return n?Buffer.from(e):new Buffer(e)}e.exports=bufferFrom},599:(e,r,n)=>{e=n.nmd(e);var t=n(927).SourceMapConsumer;var o=n(928);var i;try{i=n(896);if(!i.existsSync||!i.readFileSync){i=null}}catch(e){}var a=n(296);function dynamicRequire(e,r){return e.require(r)}var u=false;var s=false;var l=false;var c="auto";var p={};var f={};var g=/^data:application\/json[^,]+base64,/;var d=[];var h=[];function isInBrowser(){if(c==="browser")return true;if(c==="node")return false;return typeof window!=="undefined"&&typeof XMLHttpRequest==="function"&&!(window.require&&window.module&&window.process&&window.process.type==="renderer")}function hasGlobalProcessEventEmitter(){return typeof process==="object"&&process!==null&&typeof process.on==="function"}function globalProcessVersion(){if(typeof process==="object"&&process!==null){return process.version}else{return""}}function globalProcessStderr(){if(typeof process==="object"&&process!==null){return process.stderr}}function globalProcessExit(e){if(typeof process==="object"&&process!==null&&typeof process.exit==="function"){return process.exit(e)}}function handlerExec(e){return function(r){for(var n=0;n"}var n=this.getLineNumber();if(n!=null){r+=":"+n;var t=this.getColumnNumber();if(t){r+=":"+t}}}var o="";var i=this.getFunctionName();var a=true;var u=this.isConstructor();var s=!(this.isToplevel()||u);if(s){var l=this.getTypeName();if(l==="[object Object]"){l="null"}var c=this.getMethodName();if(i){if(l&&i.indexOf(l)!=0){o+=l+"."}o+=i;if(c&&i.indexOf("."+c)!=i.length-c.length-1){o+=" [as "+c+"]"}}else{o+=l+"."+(c||"")}}else if(u){o+="new "+(i||"")}else if(i){o+=i}else{o+=r;a=false}if(a){o+=" ("+r+")"}return o}function cloneCallSite(e){var r={};Object.getOwnPropertyNames(Object.getPrototypeOf(e)).forEach((function(n){r[n]=/^(?:is|get)/.test(n)?function(){return e[n].call(e)}:e[n]}));r.toString=CallSiteToString;return r}function wrapCallSite(e,r){if(r===undefined){r={nextPosition:null,curPosition:null}}if(e.isNative()){r.curPosition=null;return e}var n=e.getFileName()||e.getScriptNameOrSourceURL();if(n){var t=e.getLineNumber();var o=e.getColumnNumber()-1;var i=/^v(10\.1[6-9]|10\.[2-9][0-9]|10\.[0-9]{3,}|1[2-9]\d*|[2-9]\d|\d{3,}|11\.11)/;var a=i.test(globalProcessVersion())?0:62;if(t===1&&o>a&&!isInBrowser()&&!e.isEval()){o-=a}var u=mapSourcePosition({source:n,line:t,column:o});r.curPosition=u;e=cloneCallSite(e);var s=e.getFunctionName;e.getFunctionName=function(){if(r.nextPosition==null){return s()}return r.nextPosition.name||s()};e.getFileName=function(){return u.source};e.getLineNumber=function(){return u.line};e.getColumnNumber=function(){return u.column+1};e.getScriptNameOrSourceURL=function(){return u.source};return e}var l=e.isEval()&&e.getEvalOrigin();if(l){l=mapEvalOrigin(l);e=cloneCallSite(e);e.getEvalOrigin=function(){return l};return e}return e}function prepareStackTrace(e,r){if(l){p={};f={}}var n=e.name||"Error";var t=e.message||"";var o=n+": "+t;var i={nextPosition:null,curPosition:null};var a=[];for(var u=r.length-1;u>=0;u--){a.push("\n at "+wrapCallSite(r[u],i));i.nextPosition=i.curPosition}i.curPosition=i.nextPosition=null;return o+a.reverse().join("")}function getErrorSource(e){var r=/\n at [^(]+ \((.*):(\d+):(\d+)\)/.exec(e.stack);if(r){var n=r[1];var t=+r[2];var o=+r[3];var a=p[n];if(!a&&i&&i.existsSync(n)){try{a=i.readFileSync(n,"utf8")}catch(e){a=""}}if(a){var u=a.split(/(?:\r\n|\r|\n)/)[t-1];if(u){return n+":"+t+"\n"+u+"\n"+new Array(o).join(" ")+"^"}}}return null}function printErrorAndExit(e){var r=getErrorSource(e);var n=globalProcessStderr();if(n&&n._handle&&n._handle.setBlocking){n._handle.setBlocking(true)}if(r){console.error();console.error(r)}console.error(e.stack);globalProcessExit(1)}function shimEmitUncaughtException(){var e=process.emit;process.emit=function(r){if(r==="uncaughtException"){var n=arguments[1]&&arguments[1].stack;var t=this.listeners(r).length>0;if(n&&!t){return printErrorAndExit(arguments[1])}}return e.apply(this,arguments)}}var S=d.slice(0);var _=h.slice(0);r.wrapCallSite=wrapCallSite;r.getErrorSource=getErrorSource;r.mapSourcePosition=mapSourcePosition;r.retrieveSourceMap=v;r.install=function(r){r=r||{};if(r.environment){c=r.environment;if(["node","browser","auto"].indexOf(c)===-1){throw new Error("environment "+c+" was unknown. Available options are {auto, browser, node}")}}if(r.retrieveFile){if(r.overrideRetrieveFile){d.length=0}d.unshift(r.retrieveFile)}if(r.retrieveSourceMap){if(r.overrideRetrieveSourceMap){h.length=0}h.unshift(r.retrieveSourceMap)}if(r.hookRequire&&!isInBrowser()){var n=dynamicRequire(e,"module");var t=n.prototype._compile;if(!t.__sourceMapSupport){n.prototype._compile=function(e,r){p[r]=e;f[r]=undefined;return t.call(this,e,r)};n.prototype._compile.__sourceMapSupport=true}}if(!l){l="emptyCacheBetweenOperations"in r?r.emptyCacheBetweenOperations:false}if(!u){u=true;Error.prepareStackTrace=prepareStackTrace}if(!s){var o="handleUncaughtExceptions"in r?r.handleUncaughtExceptions:true;try{var i=dynamicRequire(e,"worker_threads");if(i.isMainThread===false){o=false}}catch(e){}if(o&&hasGlobalProcessEventEmitter()){s=true;shimEmitUncaughtException()}}};r.resetRetrieveHandlers=function(){d.length=0;h.length=0;d=S.slice(0);h=_.slice(0);v=handlerExec(h);m=handlerExec(d)}},517:(e,r,n)=>{var t=n(297);var o=Object.prototype.hasOwnProperty;var i=typeof Map!=="undefined";function ArraySet(){this._array=[];this._set=i?new Map:Object.create(null)}ArraySet.fromArray=function ArraySet_fromArray(e,r){var n=new ArraySet;for(var t=0,o=e.length;t=0){return r}}else{var n=t.toSetString(e);if(o.call(this._set,n)){return this._set[n]}}throw new Error('"'+e+'" is not in the set.')};ArraySet.prototype.at=function ArraySet_at(e){if(e>=0&&e{var t=n(158);var o=5;var i=1<>1;return r?-n:n}r.encode=function base64VLQ_encode(e){var r="";var n;var i=toVLQSigned(e);do{n=i&a;i>>>=o;if(i>0){n|=u}r+=t.encode(n)}while(i>0);return r};r.decode=function base64VLQ_decode(e,r,n){var i=e.length;var s=0;var l=0;var c,p;do{if(r>=i){throw new Error("Expected more digits in base 64 VLQ value.")}p=t.decode(e.charCodeAt(r++));if(p===-1){throw new Error("Invalid base64 digit: "+e.charAt(r-1))}c=!!(p&u);p&=a;s=s+(p<{var n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".split("");r.encode=function(e){if(0<=e&&e{r.GREATEST_LOWER_BOUND=1;r.LEAST_UPPER_BOUND=2;function recursiveSearch(e,n,t,o,i,a){var u=Math.floor((n-e)/2)+e;var s=i(t,o[u],true);if(s===0){return u}else if(s>0){if(n-u>1){return recursiveSearch(u,n,t,o,i,a)}if(a==r.LEAST_UPPER_BOUND){return n1){return recursiveSearch(e,u,t,o,i,a)}if(a==r.LEAST_UPPER_BOUND){return u}else{return e<0?-1:e}}}r.search=function search(e,n,t,o){if(n.length===0){return-1}var i=recursiveSearch(-1,n.length,e,n,t,o||r.GREATEST_LOWER_BOUND);if(i<0){return-1}while(i-1>=0){if(t(n[i],n[i-1],true)!==0){break}--i}return i}},24:(e,r,n)=>{var t=n(297);function generatedPositionAfter(e,r){var n=e.generatedLine;var o=r.generatedLine;var i=e.generatedColumn;var a=r.generatedColumn;return o>n||o==n&&a>=i||t.compareByGeneratedPositionsInflated(e,r)<=0}function MappingList(){this._array=[];this._sorted=true;this._last={generatedLine:-1,generatedColumn:0}}MappingList.prototype.unsortedForEach=function MappingList_forEach(e,r){this._array.forEach(e,r)};MappingList.prototype.add=function MappingList_add(e){if(generatedPositionAfter(this._last,e)){this._last=e;this._array.push(e)}else{this._sorted=false;this._array.push(e)}};MappingList.prototype.toArray=function MappingList_toArray(){if(!this._sorted){this._array.sort(t.compareByGeneratedPositionsInflated);this._sorted=true}return this._array};r.P=MappingList},299:(e,r)=>{function swap(e,r,n){var t=e[r];e[r]=e[n];e[n]=t}function randomIntInRange(e,r){return Math.round(e+Math.random()*(r-e))}function doQuickSort(e,r,n,t){if(n{var t;var o=n(297);var i=n(197);var a=n(517).C;var u=n(818);var s=n(299).g;function SourceMapConsumer(e,r){var n=e;if(typeof e==="string"){n=o.parseSourceMapInput(e)}return n.sections!=null?new IndexedSourceMapConsumer(n,r):new BasicSourceMapConsumer(n,r)}SourceMapConsumer.fromSourceMap=function(e,r){return BasicSourceMapConsumer.fromSourceMap(e,r)};SourceMapConsumer.prototype._version=3;SourceMapConsumer.prototype.__generatedMappings=null;Object.defineProperty(SourceMapConsumer.prototype,"_generatedMappings",{configurable:true,enumerable:true,get:function(){if(!this.__generatedMappings){this._parseMappings(this._mappings,this.sourceRoot)}return this.__generatedMappings}});SourceMapConsumer.prototype.__originalMappings=null;Object.defineProperty(SourceMapConsumer.prototype,"_originalMappings",{configurable:true,enumerable:true,get:function(){if(!this.__originalMappings){this._parseMappings(this._mappings,this.sourceRoot)}return this.__originalMappings}});SourceMapConsumer.prototype._charIsMappingSeparator=function SourceMapConsumer_charIsMappingSeparator(e,r){var n=e.charAt(r);return n===";"||n===","};SourceMapConsumer.prototype._parseMappings=function SourceMapConsumer_parseMappings(e,r){throw new Error("Subclasses must implement _parseMappings")};SourceMapConsumer.GENERATED_ORDER=1;SourceMapConsumer.ORIGINAL_ORDER=2;SourceMapConsumer.GREATEST_LOWER_BOUND=1;SourceMapConsumer.LEAST_UPPER_BOUND=2;SourceMapConsumer.prototype.eachMapping=function SourceMapConsumer_eachMapping(e,r,n){var t=r||null;var i=n||SourceMapConsumer.GENERATED_ORDER;var a;switch(i){case SourceMapConsumer.GENERATED_ORDER:a=this._generatedMappings;break;case SourceMapConsumer.ORIGINAL_ORDER:a=this._originalMappings;break;default:throw new Error("Unknown order of iteration.")}var u=this.sourceRoot;a.map((function(e){var r=e.source===null?null:this._sources.at(e.source);r=o.computeSourceURL(u,r,this._sourceMapURL);return{source:r,generatedLine:e.generatedLine,generatedColumn:e.generatedColumn,originalLine:e.originalLine,originalColumn:e.originalColumn,name:e.name===null?null:this._names.at(e.name)}}),this).forEach(e,t)};SourceMapConsumer.prototype.allGeneratedPositionsFor=function SourceMapConsumer_allGeneratedPositionsFor(e){var r=o.getArg(e,"line");var n={source:o.getArg(e,"source"),originalLine:r,originalColumn:o.getArg(e,"column",0)};n.source=this._findSourceIndex(n.source);if(n.source<0){return[]}var t=[];var a=this._findMapping(n,this._originalMappings,"originalLine","originalColumn",o.compareByOriginalPositions,i.LEAST_UPPER_BOUND);if(a>=0){var u=this._originalMappings[a];if(e.column===undefined){var s=u.originalLine;while(u&&u.originalLine===s){t.push({line:o.getArg(u,"generatedLine",null),column:o.getArg(u,"generatedColumn",null),lastColumn:o.getArg(u,"lastGeneratedColumn",null)});u=this._originalMappings[++a]}}else{var l=u.originalColumn;while(u&&u.originalLine===r&&u.originalColumn==l){t.push({line:o.getArg(u,"generatedLine",null),column:o.getArg(u,"generatedColumn",null),lastColumn:o.getArg(u,"lastGeneratedColumn",null)});u=this._originalMappings[++a]}}}return t};r.SourceMapConsumer=SourceMapConsumer;function BasicSourceMapConsumer(e,r){var n=e;if(typeof e==="string"){n=o.parseSourceMapInput(e)}var t=o.getArg(n,"version");var i=o.getArg(n,"sources");var u=o.getArg(n,"names",[]);var s=o.getArg(n,"sourceRoot",null);var l=o.getArg(n,"sourcesContent",null);var c=o.getArg(n,"mappings");var p=o.getArg(n,"file",null);if(t!=this._version){throw new Error("Unsupported version: "+t)}if(s){s=o.normalize(s)}i=i.map(String).map(o.normalize).map((function(e){return s&&o.isAbsolute(s)&&o.isAbsolute(e)?o.relative(s,e):e}));this._names=a.fromArray(u.map(String),true);this._sources=a.fromArray(i,true);this._absoluteSources=this._sources.toArray().map((function(e){return o.computeSourceURL(s,e,r)}));this.sourceRoot=s;this.sourcesContent=l;this._mappings=c;this._sourceMapURL=r;this.file=p}BasicSourceMapConsumer.prototype=Object.create(SourceMapConsumer.prototype);BasicSourceMapConsumer.prototype.consumer=SourceMapConsumer;BasicSourceMapConsumer.prototype._findSourceIndex=function(e){var r=e;if(this.sourceRoot!=null){r=o.relative(this.sourceRoot,r)}if(this._sources.has(r)){return this._sources.indexOf(r)}var n;for(n=0;n1){v.source=l+_[1];l+=_[1];v.originalLine=i+_[2];i=v.originalLine;v.originalLine+=1;v.originalColumn=a+_[3];a=v.originalColumn;if(_.length>4){v.name=c+_[4];c+=_[4]}}m.push(v);if(typeof v.originalLine==="number"){h.push(v)}}}s(m,o.compareByGeneratedPositionsDeflated);this.__generatedMappings=m;s(h,o.compareByOriginalPositions);this.__originalMappings=h};BasicSourceMapConsumer.prototype._findMapping=function SourceMapConsumer_findMapping(e,r,n,t,o,a){if(e[n]<=0){throw new TypeError("Line must be greater than or equal to 1, got "+e[n])}if(e[t]<0){throw new TypeError("Column must be greater than or equal to 0, got "+e[t])}return i.search(e,r,o,a)};BasicSourceMapConsumer.prototype.computeColumnSpans=function SourceMapConsumer_computeColumnSpans(){for(var e=0;e=0){var t=this._generatedMappings[n];if(t.generatedLine===r.generatedLine){var i=o.getArg(t,"source",null);if(i!==null){i=this._sources.at(i);i=o.computeSourceURL(this.sourceRoot,i,this._sourceMapURL)}var a=o.getArg(t,"name",null);if(a!==null){a=this._names.at(a)}return{source:i,line:o.getArg(t,"originalLine",null),column:o.getArg(t,"originalColumn",null),name:a}}}return{source:null,line:null,column:null,name:null}};BasicSourceMapConsumer.prototype.hasContentsOfAllSources=function BasicSourceMapConsumer_hasContentsOfAllSources(){if(!this.sourcesContent){return false}return this.sourcesContent.length>=this._sources.size()&&!this.sourcesContent.some((function(e){return e==null}))};BasicSourceMapConsumer.prototype.sourceContentFor=function SourceMapConsumer_sourceContentFor(e,r){if(!this.sourcesContent){return null}var n=this._findSourceIndex(e);if(n>=0){return this.sourcesContent[n]}var t=e;if(this.sourceRoot!=null){t=o.relative(this.sourceRoot,t)}var i;if(this.sourceRoot!=null&&(i=o.urlParse(this.sourceRoot))){var a=t.replace(/^file:\/\//,"");if(i.scheme=="file"&&this._sources.has(a)){return this.sourcesContent[this._sources.indexOf(a)]}if((!i.path||i.path=="/")&&this._sources.has("/"+t)){return this.sourcesContent[this._sources.indexOf("/"+t)]}}if(r){return null}else{throw new Error('"'+t+'" is not in the SourceMap.')}};BasicSourceMapConsumer.prototype.generatedPositionFor=function SourceMapConsumer_generatedPositionFor(e){var r=o.getArg(e,"source");r=this._findSourceIndex(r);if(r<0){return{line:null,column:null,lastColumn:null}}var n={source:r,originalLine:o.getArg(e,"line"),originalColumn:o.getArg(e,"column")};var t=this._findMapping(n,this._originalMappings,"originalLine","originalColumn",o.compareByOriginalPositions,o.getArg(e,"bias",SourceMapConsumer.GREATEST_LOWER_BOUND));if(t>=0){var i=this._originalMappings[t];if(i.source===n.source){return{line:o.getArg(i,"generatedLine",null),column:o.getArg(i,"generatedColumn",null),lastColumn:o.getArg(i,"lastGeneratedColumn",null)}}}return{line:null,column:null,lastColumn:null}};t=BasicSourceMapConsumer;function IndexedSourceMapConsumer(e,r){var n=e;if(typeof e==="string"){n=o.parseSourceMapInput(e)}var t=o.getArg(n,"version");var i=o.getArg(n,"sections");if(t!=this._version){throw new Error("Unsupported version: "+t)}this._sources=new a;this._names=new a;var u={line:-1,column:0};this._sections=i.map((function(e){if(e.url){throw new Error("Support for url field in sections not implemented.")}var n=o.getArg(e,"offset");var t=o.getArg(n,"line");var i=o.getArg(n,"column");if(t{var t=n(818);var o=n(297);var i=n(517).C;var a=n(24).P;function SourceMapGenerator(e){if(!e){e={}}this._file=o.getArg(e,"file",null);this._sourceRoot=o.getArg(e,"sourceRoot",null);this._skipValidation=o.getArg(e,"skipValidation",false);this._sources=new i;this._names=new i;this._mappings=new a;this._sourcesContents=null}SourceMapGenerator.prototype._version=3;SourceMapGenerator.fromSourceMap=function SourceMapGenerator_fromSourceMap(e){var r=e.sourceRoot;var n=new SourceMapGenerator({file:e.file,sourceRoot:r});e.eachMapping((function(e){var t={generated:{line:e.generatedLine,column:e.generatedColumn}};if(e.source!=null){t.source=e.source;if(r!=null){t.source=o.relative(r,t.source)}t.original={line:e.originalLine,column:e.originalColumn};if(e.name!=null){t.name=e.name}}n.addMapping(t)}));e.sources.forEach((function(t){var i=t;if(r!==null){i=o.relative(r,t)}if(!n._sources.has(i)){n._sources.add(i)}var a=e.sourceContentFor(t);if(a!=null){n.setSourceContent(t,a)}}));return n};SourceMapGenerator.prototype.addMapping=function SourceMapGenerator_addMapping(e){var r=o.getArg(e,"generated");var n=o.getArg(e,"original",null);var t=o.getArg(e,"source",null);var i=o.getArg(e,"name",null);if(!this._skipValidation){this._validateMapping(r,n,t,i)}if(t!=null){t=String(t);if(!this._sources.has(t)){this._sources.add(t)}}if(i!=null){i=String(i);if(!this._names.has(i)){this._names.add(i)}}this._mappings.add({generatedLine:r.line,generatedColumn:r.column,originalLine:n!=null&&n.line,originalColumn:n!=null&&n.column,source:t,name:i})};SourceMapGenerator.prototype.setSourceContent=function SourceMapGenerator_setSourceContent(e,r){var n=e;if(this._sourceRoot!=null){n=o.relative(this._sourceRoot,n)}if(r!=null){if(!this._sourcesContents){this._sourcesContents=Object.create(null)}this._sourcesContents[o.toSetString(n)]=r}else if(this._sourcesContents){delete this._sourcesContents[o.toSetString(n)];if(Object.keys(this._sourcesContents).length===0){this._sourcesContents=null}}};SourceMapGenerator.prototype.applySourceMap=function SourceMapGenerator_applySourceMap(e,r,n){var t=r;if(r==null){if(e.file==null){throw new Error("SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, "+'or the source map\'s "file" property. Both were omitted.')}t=e.file}var a=this._sourceRoot;if(a!=null){t=o.relative(a,t)}var u=new i;var s=new i;this._mappings.unsortedForEach((function(r){if(r.source===t&&r.originalLine!=null){var i=e.originalPositionFor({line:r.originalLine,column:r.originalColumn});if(i.source!=null){r.source=i.source;if(n!=null){r.source=o.join(n,r.source)}if(a!=null){r.source=o.relative(a,r.source)}r.originalLine=i.line;r.originalColumn=i.column;if(i.name!=null){r.name=i.name}}}var l=r.source;if(l!=null&&!u.has(l)){u.add(l)}var c=r.name;if(c!=null&&!s.has(c)){s.add(c)}}),this);this._sources=u;this._names=s;e.sources.forEach((function(r){var t=e.sourceContentFor(r);if(t!=null){if(n!=null){r=o.join(n,r)}if(a!=null){r=o.relative(a,r)}this.setSourceContent(r,t)}}),this)};SourceMapGenerator.prototype._validateMapping=function SourceMapGenerator_validateMapping(e,r,n,t){if(r&&typeof r.line!=="number"&&typeof r.column!=="number"){throw new Error("original.line and original.column are not numbers -- you probably meant to omit "+"the original mapping entirely and only map the generated position. If so, pass "+"null for the original mapping instead of an object with empty or null values.")}if(e&&"line"in e&&"column"in e&&e.line>0&&e.column>=0&&!r&&!n&&!t){return}else if(e&&"line"in e&&"column"in e&&r&&"line"in r&&"column"in r&&e.line>0&&e.column>=0&&r.line>0&&r.column>=0&&n){return}else{throw new Error("Invalid mapping: "+JSON.stringify({generated:e,source:n,original:r,name:t}))}};SourceMapGenerator.prototype._serializeMappings=function SourceMapGenerator_serializeMappings(){var e=0;var r=1;var n=0;var i=0;var a=0;var u=0;var s="";var l;var c;var p;var f;var g=this._mappings.toArray();for(var d=0,h=g.length;d0){if(!o.compareByGeneratedPositionsInflated(c,g[d-1])){continue}l+=","}}l+=t.encode(c.generatedColumn-e);e=c.generatedColumn;if(c.source!=null){f=this._sources.indexOf(c.source);l+=t.encode(f-u);u=f;l+=t.encode(c.originalLine-1-i);i=c.originalLine-1;l+=t.encode(c.originalColumn-n);n=c.originalColumn;if(c.name!=null){p=this._names.indexOf(c.name);l+=t.encode(p-a);a=p}}s+=l}return s};SourceMapGenerator.prototype._generateSourcesContent=function SourceMapGenerator_generateSourcesContent(e,r){return e.map((function(e){if(!this._sourcesContents){return null}if(r!=null){e=o.relative(r,e)}var n=o.toSetString(e);return Object.prototype.hasOwnProperty.call(this._sourcesContents,n)?this._sourcesContents[n]:null}),this)};SourceMapGenerator.prototype.toJSON=function SourceMapGenerator_toJSON(){var e={version:this._version,sources:this._sources.toArray(),names:this._names.toArray(),mappings:this._serializeMappings()};if(this._file!=null){e.file=this._file}if(this._sourceRoot!=null){e.sourceRoot=this._sourceRoot}if(this._sourcesContents){e.sourcesContent=this._generateSourcesContent(e.sources,e.sourceRoot)}return e};SourceMapGenerator.prototype.toString=function SourceMapGenerator_toString(){return JSON.stringify(this.toJSON())};r.x=SourceMapGenerator},565:(e,r,n)=>{var t;var o=n(163).x;var i=n(297);var a=/(\r?\n)/;var u=10;var s="$$$isSourceNode$$$";function SourceNode(e,r,n,t,o){this.children=[];this.sourceContents={};this.line=e==null?null:e;this.column=r==null?null:r;this.source=n==null?null:n;this.name=o==null?null:o;this[s]=true;if(t!=null)this.add(t)}SourceNode.fromStringWithSourceMap=function SourceNode_fromStringWithSourceMap(e,r,n){var t=new SourceNode;var o=e.split(a);var u=0;var shiftNextLine=function(){var e=getNextLine();var r=getNextLine()||"";return e+r;function getNextLine(){return u=0;r--){this.prepend(e[r])}}else if(e[s]||typeof e==="string"){this.children.unshift(e)}else{throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got "+e)}return this};SourceNode.prototype.walk=function SourceNode_walk(e){var r;for(var n=0,t=this.children.length;n0){r=[];for(n=0;n{function getArg(e,r,n){if(r in e){return e[r]}else if(arguments.length===3){return n}else{throw new Error('"'+r+'" is a required argument.')}}r.getArg=getArg;var n=/^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/;var t=/^data:.+\,.+$/;function urlParse(e){var r=e.match(n);if(!r){return null}return{scheme:r[1],auth:r[2],host:r[3],port:r[4],path:r[5]}}r.urlParse=urlParse;function urlGenerate(e){var r="";if(e.scheme){r+=e.scheme+":"}r+="//";if(e.auth){r+=e.auth+"@"}if(e.host){r+=e.host}if(e.port){r+=":"+e.port}if(e.path){r+=e.path}return r}r.urlGenerate=urlGenerate;function normalize(e){var n=e;var t=urlParse(e);if(t){if(!t.path){return e}n=t.path}var o=r.isAbsolute(n);var i=n.split(/\/+/);for(var a,u=0,s=i.length-1;s>=0;s--){a=i[s];if(a==="."){i.splice(s,1)}else if(a===".."){u++}else if(u>0){if(a===""){i.splice(s+1,u);u=0}else{i.splice(s,2);u--}}}n=i.join("/");if(n===""){n=o?"/":"."}if(t){t.path=n;return urlGenerate(t)}return n}r.normalize=normalize;function join(e,r){if(e===""){e="."}if(r===""){r="."}var n=urlParse(r);var o=urlParse(e);if(o){e=o.path||"/"}if(n&&!n.scheme){if(o){n.scheme=o.scheme}return urlGenerate(n)}if(n||r.match(t)){return r}if(o&&!o.host&&!o.path){o.host=r;return urlGenerate(o)}var i=r.charAt(0)==="/"?r:normalize(e.replace(/\/+$/,"")+"/"+r);if(o){o.path=i;return urlGenerate(o)}return i}r.join=join;r.isAbsolute=function(e){return e.charAt(0)==="/"||n.test(e)};function relative(e,r){if(e===""){e="."}e=e.replace(/\/$/,"");var n=0;while(r.indexOf(e+"/")!==0){var t=e.lastIndexOf("/");if(t<0){return r}e=e.slice(0,t);if(e.match(/^([^\/]+:\/)?\/*$/)){return r}++n}return Array(n+1).join("../")+r.substr(e.length+1)}r.relative=relative;var o=function(){var e=Object.create(null);return!("__proto__"in e)}();function identity(e){return e}function toSetString(e){if(isProtoString(e)){return"$"+e}return e}r.toSetString=o?identity:toSetString;function fromSetString(e){if(isProtoString(e)){return e.slice(1)}return e}r.fromSetString=o?identity:fromSetString;function isProtoString(e){if(!e){return false}var r=e.length;if(r<9){return false}if(e.charCodeAt(r-1)!==95||e.charCodeAt(r-2)!==95||e.charCodeAt(r-3)!==111||e.charCodeAt(r-4)!==116||e.charCodeAt(r-5)!==111||e.charCodeAt(r-6)!==114||e.charCodeAt(r-7)!==112||e.charCodeAt(r-8)!==95||e.charCodeAt(r-9)!==95){return false}for(var n=r-10;n>=0;n--){if(e.charCodeAt(n)!==36){return false}}return true}function compareByOriginalPositions(e,r,n){var t=strcmp(e.source,r.source);if(t!==0){return t}t=e.originalLine-r.originalLine;if(t!==0){return t}t=e.originalColumn-r.originalColumn;if(t!==0||n){return t}t=e.generatedColumn-r.generatedColumn;if(t!==0){return t}t=e.generatedLine-r.generatedLine;if(t!==0){return t}return strcmp(e.name,r.name)}r.compareByOriginalPositions=compareByOriginalPositions;function compareByGeneratedPositionsDeflated(e,r,n){var t=e.generatedLine-r.generatedLine;if(t!==0){return t}t=e.generatedColumn-r.generatedColumn;if(t!==0||n){return t}t=strcmp(e.source,r.source);if(t!==0){return t}t=e.originalLine-r.originalLine;if(t!==0){return t}t=e.originalColumn-r.originalColumn;if(t!==0){return t}return strcmp(e.name,r.name)}r.compareByGeneratedPositionsDeflated=compareByGeneratedPositionsDeflated;function strcmp(e,r){if(e===r){return 0}if(e===null){return 1}if(r===null){return-1}if(e>r){return 1}return-1}function compareByGeneratedPositionsInflated(e,r){var n=e.generatedLine-r.generatedLine;if(n!==0){return n}n=e.generatedColumn-r.generatedColumn;if(n!==0){return n}n=strcmp(e.source,r.source);if(n!==0){return n}n=e.originalLine-r.originalLine;if(n!==0){return n}n=e.originalColumn-r.originalColumn;if(n!==0){return n}return strcmp(e.name,r.name)}r.compareByGeneratedPositionsInflated=compareByGeneratedPositionsInflated;function parseSourceMapInput(e){return JSON.parse(e.replace(/^\)]}'[^\n]*\n/,""))}r.parseSourceMapInput=parseSourceMapInput;function computeSourceURL(e,r,n){r=r||"";if(e){if(e[e.length-1]!=="/"&&r[0]!=="/"){e+="/"}r=e+r}if(n){var t=urlParse(n);if(!t){throw new Error("sourceMapURL could not be parsed")}if(t.path){var o=t.path.lastIndexOf("/");if(o>=0){t.path=t.path.substring(0,o+1)}}r=join(urlGenerate(t),r)}return normalize(r)}r.computeSourceURL=computeSourceURL},927:(e,r,n)=>{n(163).x;r.SourceMapConsumer=n(684).SourceMapConsumer;n(565)},896:e=>{"use strict";e.exports=require("fs")},928:e=>{"use strict";e.exports=require("path")}};var r={};function __webpack_require__(n){var t=r[n];if(t!==undefined){return t.exports}var o=r[n]={id:n,loaded:false,exports:{}};var i=true;try{e[n](o,o.exports,__webpack_require__);i=false}finally{if(i)delete r[n]}o.loaded=true;return o.exports}(()=>{__webpack_require__.nmd=e=>{e.paths=[];if(!e.children)e.children=[];return e}})();if(typeof __webpack_require__!=="undefined")__webpack_require__.ab=__dirname+"/";var n={};__webpack_require__(599).install();module.exports=n})(); -------------------------------------------------------------------------------- /eslint.config.mjs: -------------------------------------------------------------------------------- 1 | import vitest from '@vitest/eslint-plugin' 2 | import typescriptEslint from '@typescript-eslint/eslint-plugin' 3 | import globals from 'globals' 4 | import tsParser from '@typescript-eslint/parser' 5 | import path from 'node:path' 6 | import { fileURLToPath } from 'node:url' 7 | import js from '@eslint/js' 8 | import { FlatCompat } from '@eslint/eslintrc' 9 | import eslintPluginPrettierRecommended from 'eslint-plugin-prettier/recommended' 10 | 11 | const __filename = fileURLToPath(import.meta.url) 12 | const __dirname = path.dirname(__filename) 13 | const compat = new FlatCompat({ 14 | baseDirectory: __dirname, 15 | recommendedConfig: js.configs.recommended, 16 | allConfig: js.configs.all 17 | }) 18 | 19 | export default [ 20 | { 21 | ignores: [ 22 | '**/node_modules/**', 23 | '**/dist/**', 24 | '**/coverage/**', 25 | '**/*.json', 26 | 'vitest.config.ts', 27 | 'eslint.config.mjs' 28 | ] 29 | }, 30 | ...compat.extends( 31 | 'eslint:recommended', 32 | 'plugin:@typescript-eslint/eslint-recommended', 33 | 'plugin:@typescript-eslint/recommended' 34 | ), 35 | 36 | { 37 | plugins: { 38 | vitest, 39 | '@typescript-eslint': typescriptEslint 40 | }, 41 | 42 | languageOptions: { 43 | globals: { 44 | ...globals.node, 45 | Atomics: 'readonly', 46 | SharedArrayBuffer: 'readonly' 47 | }, 48 | 49 | parser: tsParser, 50 | ecmaVersion: 2023, 51 | sourceType: 'module', 52 | 53 | parserOptions: { 54 | project: ['./tsconfig.json'] 55 | } 56 | }, 57 | 58 | settings: { 59 | 'import/resolver': { 60 | typescript: {} 61 | } 62 | }, 63 | 64 | rules: { 65 | ...vitest.configs.recommended.rules, 66 | camelcase: 'off', 67 | 'eslint-comments/no-use': 'off', 68 | 'eslint-comments/no-unused-disable': 'off', 69 | 'i18n-text/no-en': 'off', 70 | 'import/no-namespace': 'off', 71 | 'no-console': 'off', 72 | 'no-unused-vars': 'off', 73 | 'prettier/prettier': 'error', 74 | semi: 'off', 75 | '@typescript-eslint/array-type': 'error', 76 | '@typescript-eslint/await-thenable': 'error', 77 | '@typescript-eslint/ban-ts-comment': 'error', 78 | '@typescript-eslint/consistent-type-assertions': 'error', 79 | 80 | '@typescript-eslint/explicit-member-accessibility': [ 81 | 'error', 82 | { 83 | accessibility: 'no-public' 84 | } 85 | ], 86 | 87 | '@typescript-eslint/explicit-function-return-type': [ 88 | 'error', 89 | { 90 | allowExpressions: true 91 | } 92 | ], 93 | '@typescript-eslint/no-array-constructor': 'error', 94 | '@typescript-eslint/no-empty-interface': 'error', 95 | '@typescript-eslint/no-explicit-any': 'error', 96 | '@typescript-eslint/no-extraneous-class': 'error', 97 | '@typescript-eslint/no-for-in-array': 'error', 98 | '@typescript-eslint/no-inferrable-types': 'error', 99 | '@typescript-eslint/no-misused-new': 'error', 100 | '@typescript-eslint/no-namespace': 'error', 101 | '@typescript-eslint/no-non-null-assertion': 'warn', 102 | '@typescript-eslint/no-require-imports': 'error', 103 | '@typescript-eslint/no-unnecessary-qualifier': 'error', 104 | '@typescript-eslint/no-unnecessary-type-assertion': 'error', 105 | '@typescript-eslint/no-unused-vars': 'error', 106 | '@typescript-eslint/no-useless-constructor': 'error', 107 | '@typescript-eslint/no-var-requires': 'error', 108 | '@typescript-eslint/prefer-for-of': 'warn', 109 | '@typescript-eslint/prefer-function-type': 'warn', 110 | '@typescript-eslint/prefer-includes': 'error', 111 | '@typescript-eslint/prefer-string-starts-ends-with': 'error', 112 | '@typescript-eslint/promise-function-async': 'error', 113 | '@typescript-eslint/require-array-sort-compare': 'error', 114 | '@typescript-eslint/restrict-plus-operands': 'error', 115 | '@typescript-eslint/space-before-function-paren': 'off', 116 | '@typescript-eslint/unbound-method': 'error', 117 | '@typescript-eslint/no-floating-promises': 'error' 118 | } 119 | }, 120 | // Any other config imports go at the top. See: https://github.com/prettier/eslint-plugin-prettier?tab=readme-ov-file#configuration-new-eslintconfigjs 121 | eslintPluginPrettierRecommended 122 | ] 123 | -------------------------------------------------------------------------------- /examples/google-cloud/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM otel/opentelemetry-collector-contrib:0.101.0 2 | COPY collector-config.yaml /etc/otelcol-contrib/config.yaml -------------------------------------------------------------------------------- /examples/google-cloud/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started on Google Cloud 2 | 3 | ## Overview 4 | 5 | This guide explains how to deploy an OpenTelemetry Collector on Google Cloud 6 | Cloud Run and how to use `github-actions-opentelemetry` to send traces and 7 | metrics from GitHub Actions workflows to Google Cloud via the OpenTelemetry 8 | Protocol (OTLP). 9 | 10 | > [!IMPORTANT] 11 | > `github-actions-opentelemetry` works with any OTLP endpoint. It can also be 12 | > used Other than that Google Cloud. 13 | 14 | ## Prerequisites 15 | 16 | - A Google Cloud project 17 | - The `gcloud` CLI installed 18 | - A GitHub account 19 | 20 | ## Step 1: Configure Default gcloud Settings 21 | 22 | 1. **Set the default project**: 23 | 24 | ```sh 25 | gcloud config set project 26 | ``` 27 | 28 | Replace `` with your Google Cloud project ID. 29 | 30 | 2. **Set the default region**: 31 | 32 | ```sh 33 | gcloud config set run/region 34 | ``` 35 | 36 | Replace `` with your desired Cloud Run region (for example, 37 | `us-west1`). 38 | 39 | ## Step 2: Fork the GitHub Actions OpenTelemetry Repository 40 | 41 | To run the sample GitHub Actions workflow, fork the 42 | [`github-actions-opentelemetry`](https://github.com/paper2/github-actions-opentelemetry) 43 | repository. 44 | 45 | ![fork repository](../../img/fork-repository.png) 46 | 47 | ## Step 3: Clone the Sample Code 48 | 49 | Clone your forked repository to your local machine and navigate to the 50 | `google-cloud` example directory: 51 | 52 | ```sh 53 | git clone https:// 54 | cd github-actions-opentelemetry/examples/google-cloud 55 | ``` 56 | 57 | ## Step 4: Deploy the OpenTelemetry Collector to Cloud Run 58 | 59 | Deploy the collector using the following command: 60 | 61 | ```sh 62 | gcloud run deploy collector \ 63 | --source . \ 64 | --allow-unauthenticated \ 65 | --port=4318 \ 66 | --max-instances=3 67 | ``` 68 | 69 | > [!NOTE] 70 | > In a production environment, it is recommended that you do not allow 71 | > unauthenticated access to Cloud Run. 72 | 73 | This command uses the [Dockerfile](./Dockerfile) to build a container and deploy 74 | the OpenTelemetry Collector to Cloud Run. The container is based on the 75 | [OpenTelemetry Collector Contrib repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) 76 | and uses [collector-config.yaml](./collector-config.yaml) as its configuration 77 | file. 78 | 79 | The configuration file is set up to receive telemetry via OTLP and forward it to 80 | Cloud Trace and Cloud Monitoring. This means you now have a collector that can 81 | accept OTLP data. 82 | 83 | ## Step 5: Set the OTLP Endpoint 84 | 85 | Retrieve the Cloud Run endpoint for the OpenTelemetry Collector: 86 | 87 | ```sh 88 | gcloud run services describe collector --format 'value(status.url)' 89 | ``` 90 | 91 | Copy the resulting URL and add it as a 92 | [repository secret](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository) 93 | named `OTEL_EXPORTER_OTLP_ENDPOINT`. 94 | 95 | ![repository-secret](../../img/repository-secret.png) 96 | 97 | ## Step 6: Enable Workflow Runs 98 | 99 | Open the **Actions** tab in your forked repository. You should see a message 100 | asking if you want to enable workflows. Review them and enable this feature. 101 | 102 | ![enable workflows](../../img/enable-workflows.png) 103 | 104 | ## Step 7: Run the Workflow 105 | 106 | Create and push a new branch named `getting-started`: 107 | 108 | ```sh 109 | git switch -c getting-started 110 | git commit --allow-empty -m "empty commit" 111 | git push --set-upstream origin getting-started 112 | ``` 113 | 114 | In the **Actions** tab, verify that the 115 | [Example Workflow](../../.github/workflows/example-workflow-01.yml) completes 116 | successfully. Afterward, the 117 | [Send Telemetry after Other Workflow Example](../../.github/workflows/example-run-action.yml) 118 | runs and uses `github-actions-opentelemetry` to send traces and metrics to the 119 | OTLP endpoint. 120 | 121 | ![verify completed job](../../img/verify-completed-job.png) 122 | 123 | Once you confirm that the workflow completes, make another commit to observe 124 | changes in the metrics: 125 | 126 | ```sh 127 | git commit --allow-empty -m "empty commit" 128 | git push 129 | ``` 130 | 131 | The 132 | [Send Telemetry after Other Workflow Example](../../.github/workflows/example-run-action.yml) 133 | is below. 134 | 135 | ```yaml 136 | name: Send Telemetry after Other Workflow Example 137 | 138 | on: 139 | workflow_run: 140 | # Specify the workflows you want to collect telemetry from. 141 | workflows: 142 | - Example Workflow 01 143 | - Example Workflow 02 144 | - Example Workflow 03 145 | types: 146 | - completed 147 | 148 | permissions: 149 | # Required for private repositories 150 | actions: read 151 | 152 | jobs: 153 | send-telemetry: 154 | name: Send CI Telemetry 155 | runs-on: ubuntu-latest 156 | steps: 157 | - name: Run 158 | id: run 159 | uses: paper2/github-actions-opentelemetry@main 160 | env: 161 | OTEL_EXPORTER_OTLP_ENDPOINT: 162 | ${{ secrets.OTEL_EXPORTER_OTLP_ENDPOINT }} 163 | OTEL_SERVICE_NAME: github-actions-opentelemetry 164 | with: 165 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 166 | ``` 167 | 168 | Whenever any of the specified workflows complete, the `workflow_run` event 169 | triggers this job. `github-actions-opentelemetry` then gathers workflow details 170 | and sends traces and metrics to the specified OTLP endpoint. 171 | 172 | ## Step 8: Check Cloud Trace for Traces 173 | 174 | To find traces, identify the `run_id` from the Example Workflow. The `run_id` is 175 | visible in the workflow results URL. For example, if the URL is: 176 | 177 | ```txt 178 | https://github.com/paper2/github-actions-opentelemetry/actions/runs/13388380812 179 | ``` 180 | 181 | Then the `run_id` is `13388380812`. 182 | 183 | Open the [Trace Explorer](https://console.cloud.google.com/traces/explorer) and 184 | filter by `run_id`. 185 | 186 | ![filter run id](../../img/filter-run-id.png) 187 | 188 | Select a **Span ID** link to view detailed trace information. 189 | 190 | ![trace detail](../../img/trace-detail.png) 191 | 192 | ## Step 9: Check Cloud Monitoring for Metrics 193 | 194 | Open the 195 | [Metrics Explorer](https://console.cloud.google.com/monitoring/metrics-explorer) 196 | and select the metric: 197 | 198 | ```txt 199 | prometheus/github_job_duration_seconds/gauge 200 | ``` 201 | 202 | ![choose metrics](../../img/choose-metrics.png) 203 | 204 | Configure the **Aggregation** settings to group by `workflow_name` and 205 | `job_name` to see execution times for each workflow and job. 206 | 207 | ![metrics graph](../../img/metrics-graph.png) 208 | 209 | ## Step 10: Clean Up 210 | 211 | To remove the Cloud Run service: 212 | 213 | ```sh 214 | gcloud run services delete collector 215 | ``` 216 | 217 | Then reset your gcloud configuration: 218 | 219 | ```sh 220 | gcloud config unset project 221 | gcloud config unset run/region 222 | ``` 223 | 224 | Finally, delete your forked repository if you no longer need it. 225 | -------------------------------------------------------------------------------- /examples/google-cloud/collector-config.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | http: 5 | 6 | processors: 7 | batch: 8 | send_batch_max_size: 200 9 | send_batch_size: 200 10 | timeout: 5s 11 | 12 | resource: 13 | attributes: 14 | # Not using gcp detector to avoid increasing time series due to changing instance IDs in Cloud Run. 15 | - key: 'location' 16 | value: 'us-east1' # Set the location to your Google Cloud region 17 | action: upsert 18 | 19 | exporters: 20 | googlecloud: 21 | googlemanagedprometheus: 22 | 23 | service: 24 | pipelines: 25 | metrics: 26 | receivers: [otlp] 27 | processors: [batch, resource] 28 | exporters: [googlemanagedprometheus] 29 | traces: 30 | receivers: [otlp] 31 | processors: [batch, resource] 32 | exporters: [googlecloud] 33 | -------------------------------------------------------------------------------- /img/actions-tab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/actions-tab.png -------------------------------------------------------------------------------- /img/choose-metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/choose-metrics.png -------------------------------------------------------------------------------- /img/enable-workflows.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/enable-workflows.png -------------------------------------------------------------------------------- /img/filter-run-id.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/filter-run-id.png -------------------------------------------------------------------------------- /img/fork-repository.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/fork-repository.png -------------------------------------------------------------------------------- /img/github-workflow-duration-seconds-gauge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/github-workflow-duration-seconds-gauge.png -------------------------------------------------------------------------------- /img/metrics-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/metrics-graph.png -------------------------------------------------------------------------------- /img/metrics-prom.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/metrics-prom.png -------------------------------------------------------------------------------- /img/repository-secret.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/repository-secret.png -------------------------------------------------------------------------------- /img/search-trace-run-id.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/search-trace-run-id.png -------------------------------------------------------------------------------- /img/trace-attributes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/trace-attributes.png -------------------------------------------------------------------------------- /img/trace-detail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/trace-detail.png -------------------------------------------------------------------------------- /img/traces-jager.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/traces-jager.png -------------------------------------------------------------------------------- /img/verify-completed-job.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paper2/github-actions-opentelemetry/a505841fe94af9400e7110e67c15036d71f70620/img/verify-completed-job.png -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "typescript-action", 3 | "description": "GitHub Actions TypeScript template", 4 | "version": "v0.5.0", 5 | "author": "", 6 | "private": true, 7 | "homepage": "https://github.com/actions/typescript-action", 8 | "repository": { 9 | "type": "git", 10 | "url": "git+https://github.com/actions/typescript-action.git" 11 | }, 12 | "type": "module", 13 | "bugs": { 14 | "url": "https://github.com/actions/typescript-action/issues" 15 | }, 16 | "keywords": [ 17 | "actions", 18 | "node", 19 | "setup" 20 | ], 21 | "exports": { 22 | ".": "./dist/index.js" 23 | }, 24 | "engines": { 25 | "node": ">=20" 26 | }, 27 | "scripts": { 28 | "bundle": "npm run format:write && npm run package", 29 | "coverage": "npx make-coverage-badge --output-path ./badges/coverage.svg", 30 | "format:write": "npx prettier --write .", 31 | "format:check": "npx prettier --check .", 32 | "lint": "npx eslint . -c eslint.config.mjs", 33 | "package": "npx ncc build src/index.ts -o dist --source-map --license licenses.txt", 34 | "package:watch": "npm run package -- --watch", 35 | "test": "vitest --coverage", 36 | "test-local": "vitest src/main.test.ts -t 'should run successfully' --hideSkippedTests --reporter=dot", 37 | "all": "npm run format:write && npm run lint && npm run test -- --run && npm run coverage && npm run package" 38 | }, 39 | "license": "MIT", 40 | "dependencies": { 41 | "@actions/core": "^1.11.1", 42 | "@actions/github": "^6.0.0", 43 | "@octokit/rest": "^21.1.0", 44 | "@octokit/webhooks-types": "^7.6.1", 45 | "@opentelemetry/api": "^1.9.0", 46 | "@opentelemetry/exporter-metrics-otlp-proto": "^0.57.1", 47 | "@opentelemetry/exporter-trace-otlp-proto": "^0.57.1", 48 | "@opentelemetry/resources": "^1.30.1", 49 | "@opentelemetry/sdk-trace-base": "^1.28.0", 50 | "ts-retry": "^5.0.1" 51 | }, 52 | "devDependencies": { 53 | "@eslint/eslintrc": "^3.2.0", 54 | "@eslint/js": "^9.19.0", 55 | "@octokit/types": "^13.5.0", 56 | "@opentelemetry/sdk-node": "0.56.0", 57 | "@opentelemetry/semantic-conventions": "^1.27.0", 58 | "@types/node": "^22.13.1", 59 | "@typescript-eslint/eslint-plugin": "^8.23.0", 60 | "@typescript-eslint/parser": "^8.23.0", 61 | "@vercel/ncc": "^0.38.3", 62 | "@vitest/coverage-v8": "^3.0.5", 63 | "@vitest/eslint-plugin": "^1.1.25", 64 | "eslint": "^9.19.0", 65 | "eslint-config-prettier": "^10.0.1", 66 | "eslint-import-resolver-typescript": "^3.7.0", 67 | "eslint-plugin-github": "^5.1.7", 68 | "eslint-plugin-import": "^2.30.0", 69 | "eslint-plugin-jsonc": "^2.19.1", 70 | "eslint-plugin-prettier": "^5.2.3", 71 | "eslint-plugin-vitest": "^0.5.4", 72 | "globals": "^15.14.0", 73 | "make-coverage-badge": "^1.2.0", 74 | "prettier": "^3.4.2", 75 | "prettier-eslint": "^16.3.0", 76 | "tsx": "^4.19.2", 77 | "typescript": "~5.7.3", 78 | "vitest": "^3.0.5" 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /script/release: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit early 4 | # See: https://www.gnu.org/savannah-checkouts/gnu/bash/manual/bash.html#The-Set-Builtin 5 | set -e 6 | 7 | # About: 8 | # 9 | # This is a helper script to tag and push a new release. GitHub Actions use 10 | # release tags to allow users to select a specific version of the action to use. 11 | # 12 | # See: https://github.com/actions/typescript-action#publishing-a-new-release 13 | # See: https://github.com/actions/toolkit/blob/master/docs/action-versioning.md#recommendations 14 | # 15 | # This script will do the following: 16 | # 17 | # 1. Retrieve the latest release tag 18 | # 2. Display the latest release tag 19 | # 3. Prompt the user for a new release tag 20 | # 4. Validate the new release tag 21 | # 5. Remind user to update the version field in package.json 22 | # 6. Tag a new release 23 | # 7. Set 'is_major_release' variable 24 | # 8. Point separate major release tag (e.g. v1, v2) to the new release 25 | # 9. Push the new tags (with commits, if any) to remote 26 | # 10. If this is a major release, create a 'releases/v#' branch and push 27 | # 28 | # Usage: 29 | # 30 | # script/release 31 | 32 | # Variables 33 | semver_tag_regex='v[0-9]+\.[0-9]+\.[0-9]+$' 34 | semver_tag_glob='v[0-9].[0-9].[0-9]*' 35 | git_remote='origin' 36 | major_semver_tag_regex='\(v[0-9]*\)' 37 | 38 | # Terminal colors 39 | OFF='\033[0m' 40 | BOLD_RED='\033[1;31m' 41 | BOLD_GREEN='\033[1;32m' 42 | BOLD_BLUE='\033[1;34m' 43 | BOLD_PURPLE='\033[1;35m' 44 | BOLD_UNDERLINED='\033[1;4m' 45 | BOLD='\033[1m' 46 | 47 | # 1. Retrieve the latest release tag 48 | if ! latest_tag=$(git describe --abbrev=0 --match="$semver_tag_glob"); then 49 | # There are no existing release tags 50 | echo -e "No tags found (yet) - Continue to create and push your first tag" 51 | latest_tag="[unknown]" 52 | fi 53 | 54 | # 2. Display the latest release tag 55 | echo -e "The latest release tag is: ${BOLD_BLUE}${latest_tag}${OFF}" 56 | 57 | # 3. Prompt the user for a new release tag 58 | read -r -p 'Enter a new release tag (vX.X.X format): ' new_tag 59 | 60 | # 4. Validate the new release tag 61 | if echo "$new_tag" | grep -q -E "$semver_tag_regex"; then 62 | # Release tag is valid 63 | echo -e "Tag: ${BOLD_BLUE}$new_tag${OFF} is valid syntax" 64 | else 65 | # Release tag is not in `vX.X.X` format 66 | echo -e "Tag: ${BOLD_BLUE}$new_tag${OFF} is ${BOLD_RED}not valid${OFF} (must be in ${BOLD}vX.X.X${OFF} format)" 67 | exit 1 68 | fi 69 | 70 | # 5. Remind user to update the version field in package.json 71 | echo -e -n "Make sure the version field in package.json is ${BOLD_BLUE}$new_tag${OFF}. Yes? [Y/${BOLD_UNDERLINED}n${OFF}] " 72 | read -r YN 73 | 74 | if [[ ! ($YN == "y" || $YN == "Y") ]]; then 75 | # Package.json version field is not up to date 76 | echo -e "Please update the package.json version to ${BOLD_PURPLE}$new_tag${OFF} and commit your changes" 77 | exit 1 78 | fi 79 | 80 | # 6. Tag a new release 81 | git tag "$new_tag" --annotate --message "$new_tag Release" 82 | echo -e "Tagged: ${BOLD_GREEN}$new_tag${OFF}" 83 | 84 | # 7. Set 'is_major_release' variable 85 | latest_major_release_tag=$(expr "$latest_tag" : "$major_semver_tag_regex") 86 | new_major_release_tag=$(expr "$new_tag" : "$major_semver_tag_regex") 87 | 88 | if ! [[ "$new_major_release_tag" = "$latest_major_release_tag" ]]; then 89 | is_major_release='yes' 90 | else 91 | is_major_release='no' 92 | fi 93 | 94 | # 8. Point separate major release tag (e.g. v1, v2) to the new release 95 | if [ $is_major_release = 'yes' ]; then 96 | # Create a new major verison tag and point it to this release 97 | git tag "$new_major_release_tag" --annotate --message "$new_major_release_tag Release" 98 | echo -e "New major version tag: ${BOLD_GREEN}$new_major_release_tag${OFF}" 99 | else 100 | # Update the major verison tag to point it to this release 101 | git tag "$latest_major_release_tag" --force --annotate --message "Sync $latest_major_release_tag tag with $new_tag" 102 | echo -e "Synced ${BOLD_GREEN}$latest_major_release_tag${OFF} with ${BOLD_GREEN}$new_tag${OFF}" 103 | fi 104 | 105 | # 9. Push the new tags (with commits, if any) to remote 106 | git push --follow-tags 107 | 108 | if [ $is_major_release = 'yes' ]; then 109 | # New major version tag is pushed with the '--follow-tags' flags 110 | echo -e "Tags: ${BOLD_GREEN}$new_major_release_tag${OFF} and ${BOLD_GREEN}$new_tag${OFF} pushed to remote" 111 | else 112 | # Force push the updated major version tag 113 | git push $git_remote "$latest_major_release_tag" --force 114 | echo -e "Tags: ${BOLD_GREEN}$latest_major_release_tag${OFF} and ${BOLD_GREEN}$new_tag${OFF} pushed to remote" 115 | fi 116 | 117 | # 10. If this is a major release, create a 'releases/v#' branch and push 118 | if [ $is_major_release = 'yes' ]; then 119 | git branch "releases/$latest_major_release_tag" "$latest_major_release_tag" 120 | echo -e "Branch: ${BOLD_BLUE}releases/$latest_major_release_tag${OFF} created from ${BOLD_BLUE}$latest_major_release_tag${OFF} tag" 121 | git push --set-upstream $git_remote "releases/$latest_major_release_tag" 122 | echo -e "Branch: ${BOLD_GREEN}releases/$latest_major_release_tag${OFF} pushed to remote" 123 | fi 124 | 125 | # Completed 126 | echo -e "${BOLD_GREEN}Done!${OFF}" 127 | -------------------------------------------------------------------------------- /script/update: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Exit early 3 | # See: https://www.gnu.org/savannah-checkouts/gnu/bash/manual/bash.html#The-Set-Builtin 4 | set -eu 5 | 6 | git switch "$1" 7 | git pull origin main 8 | npm install 9 | npm run all 10 | git add -A 11 | git commit -m 'npm run all for update lib' 12 | git push 13 | -------------------------------------------------------------------------------- /src/github/check-completed.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, expect, test } from 'vitest' 2 | import { WorkflowResults, WorkflowRun, WorkflowRunJob } from './types.js' 3 | import { checkCompleted } from './check-completed.js' 4 | 5 | describe('checkCompleted', () => { 6 | const createMockWorkflowResults = ({ 7 | workflowRun = {}, 8 | workflowRunJob = {}, 9 | step = {} 10 | }): WorkflowResults => ({ 11 | workflowRun: { 12 | id: 111111111, 13 | status: 'completed', 14 | name: 'Test Workflow', 15 | ...workflowRun 16 | } as WorkflowRun, 17 | workflowRunJobs: [ 18 | { 19 | id: 22222222, 20 | status: 'completed', 21 | conclusion: 'success', 22 | completed_at: '2024-11-30T12:00:00Z', 23 | steps: [ 24 | { 25 | name: 'step-1', 26 | status: 'completed', 27 | started_at: '2024-11-30T11:00:00Z', 28 | completed_at: '2024-11-30T12:00:00Z' 29 | }, 30 | { 31 | name: 'step-2', 32 | status: 'completed', 33 | started_at: '2024-11-30T11:00:00Z', 34 | completed_at: '2024-11-30T12:00:00Z' 35 | } 36 | ] 37 | } as WorkflowRunJob, 38 | { 39 | id: 33333333, 40 | status: 'completed', 41 | completed_at: '2024-11-30T12:00:00Z', 42 | conclusion: 'success', 43 | steps: [ 44 | { 45 | name: 'step-1', 46 | status: 'completed', 47 | started_at: '2024-11-30T11:00:00Z', 48 | completed_at: '2024-11-30T12:00:00Z', 49 | ...step 50 | } 51 | ], 52 | ...workflowRunJob 53 | } as WorkflowRunJob 54 | ] 55 | }) 56 | 57 | test('returns true when workflow, jobs, and steps are completed', () => { 58 | const mockData = createMockWorkflowResults({}) 59 | expect(checkCompleted(mockData)).toBe(true) 60 | }) 61 | 62 | describe('check workflow', () => { 63 | test('returns false when workflowRun.status is not "completed"', () => { 64 | const mockData = createMockWorkflowResults({ 65 | workflowRun: { status: 'in_progress' } 66 | }) 67 | expect(checkCompleted(mockData)).toBe(false) 68 | }) 69 | 70 | test('returns false when workflowRun.name is not defined', () => { 71 | const mockData = createMockWorkflowResults({ 72 | workflowRun: { name: undefined } 73 | }) 74 | expect(checkCompleted(mockData)).toBe(false) 75 | }) 76 | }) 77 | 78 | describe('check jobs', () => { 79 | test('returns false when a job is not completed', () => { 80 | const mockData = createMockWorkflowResults({ 81 | workflowRunJob: { status: 'in_progress' } 82 | }) 83 | expect(checkCompleted(mockData)).toBe(false) 84 | }) 85 | 86 | test('returns false when a job has no completed_at', () => { 87 | const mockData = createMockWorkflowResults({ 88 | workflowRunJob: { completed_at: undefined } 89 | }) 90 | expect(checkCompleted(mockData)).toBe(false) 91 | }) 92 | }) 93 | 94 | describe('check steps', () => { 95 | test('returns false when a job has no steps', () => { 96 | const mockData = createMockWorkflowResults({ 97 | workflowRunJob: { steps: undefined } 98 | }) 99 | expect(checkCompleted(mockData)).toBe(false) 100 | }) 101 | 102 | test('returns false when a step is not completed', () => { 103 | const mockData = createMockWorkflowResults({ 104 | step: { 105 | status: 'in_progress' 106 | } 107 | }) 108 | expect(checkCompleted(mockData)).toBe(false) 109 | }) 110 | 111 | test('returns false when a step does not have started_at', () => { 112 | const mockData = createMockWorkflowResults({ 113 | step: { 114 | started_at: undefined 115 | } 116 | }) 117 | expect(checkCompleted(mockData)).toBe(false) 118 | }) 119 | 120 | test('returns false when a step does not have completed_at', () => { 121 | const mockData = createMockWorkflowResults({ 122 | step: { 123 | completed_at: undefined 124 | } 125 | }) 126 | expect(checkCompleted(mockData)).toBe(false) 127 | }) 128 | 129 | test('returns false when a conclusion is not defined', () => { 130 | const mockData = createMockWorkflowResults({ 131 | workflowRunJob: { conclusion: null } 132 | }) 133 | expect(checkCompleted(mockData)).toBe(false) 134 | }) 135 | }) 136 | }) 137 | -------------------------------------------------------------------------------- /src/github/check-completed.ts: -------------------------------------------------------------------------------- 1 | import { WorkflowResults } from './types.js' 2 | import * as core from '@actions/core' 3 | 4 | // GitHub Actions may be eventual consistency. 5 | export const checkCompleted = (workflowResult: WorkflowResults): boolean => { 6 | const { workflowRun, workflowRunJobs } = workflowResult 7 | 8 | let status = true 9 | 10 | // check workflow 11 | if (workflowRun.status !== 'completed') { 12 | core.warning(`This workflow is not completed. id: ${workflowRun.id}`) 13 | status = false 14 | } 15 | if (!workflowRun.name) { 16 | core.warning('workflowRun.name should be defined.') 17 | status = false 18 | } 19 | 20 | // check jobs 21 | for (const job of workflowRunJobs) { 22 | if (job.status !== 'completed') { 23 | core.warning( 24 | `A job is not completed. workflowRun.id: ${workflowRun.id}, job.id: ${job.id} ` 25 | ) 26 | status = false 27 | } 28 | if (!job.completed_at) { 29 | // TODO: should exit immediately and not failed because it is not recoverable empirically. 30 | core.warning('job.completed_at should be defined.') 31 | status = false 32 | } 33 | if (!job.conclusion) { 34 | core.warning( 35 | `job.conclusion should be defined. workflowRun.id: ${workflowRun.id}, job.id: ${job.id}` 36 | ) 37 | status = false 38 | } 39 | } 40 | 41 | // check steps 42 | for (const job of workflowRunJobs) { 43 | if (!job.steps) { 44 | core.warning( 45 | `A job has no steps. workflowRun.id: ${workflowRun.id}, job.id: ${job.id}` 46 | ) 47 | status = false 48 | continue 49 | } 50 | for (const step of job.steps) { 51 | const stepLoggedProperties = `workflowRun.id: ${workflowRun.id}, job.id: ${job.id}, step.name: ${step.name}` 52 | if (step.status !== 'completed') { 53 | core.warning(`A step is not completed. ${stepLoggedProperties}`) 54 | status = false 55 | } 56 | if (!step.started_at) { 57 | core.warning( 58 | `step.started_at should be defined. ${stepLoggedProperties}` 59 | ) 60 | status = false 61 | } 62 | if (!step.completed_at) { 63 | core.warning( 64 | `step.completed_at should be defined. ${stepLoggedProperties}` 65 | ) 66 | status = false 67 | } 68 | } 69 | } 70 | 71 | return status 72 | } 73 | -------------------------------------------------------------------------------- /src/github/github.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, test, expect } from 'vitest' 2 | import { fetchWorkflowResults } from './github.js' 3 | 4 | describe('fetchWorkflowResults', () => { 5 | // Tips: If API limit exceed, authenticate by using below command 6 | // $ export GITHUB_TOKEN=`gh auth token` 7 | test('should fetch results using real api', async () => { 8 | // not test retry because it needs mock of checkCompleted but it affects correct test case. 9 | await expect(fetchWorkflowResults(0, 1)).resolves.not.toThrow() 10 | }) 11 | }) 12 | -------------------------------------------------------------------------------- /src/github/github.ts: -------------------------------------------------------------------------------- 1 | import { Octokit } from '@octokit/rest' 2 | import * as github from '@actions/github' 3 | import { EventPayloadMap } from '@octokit/webhooks-types' 4 | import settings from '../settings.js' 5 | import { 6 | WorkflowRun, 7 | WorkflowRunJobs, 8 | WorkflowRunContext, 9 | WorkflowResults, 10 | GitHubContext 11 | } from './types.js' 12 | import * as core from '@actions/core' 13 | import { fail } from 'assert' 14 | import { isTooManyTries, retryAsync } from 'ts-retry' 15 | import { checkCompleted } from './check-completed.js' 16 | 17 | export const fetchWorkflowResults = async ( 18 | delayMs = 1000, 19 | maxTry = 10 20 | ): Promise => { 21 | const token = core.getInput('GITHUB_TOKEN') || process.env.GITHUB_TOKEN // read environment variable for testing 22 | const octokit = new Octokit({ 23 | baseUrl: process.env.GITHUB_API_URL || 'https://api.github.com', 24 | auth: token 25 | }) 26 | const workflowRunContext = getWorkflowRunContext(github.context) 27 | try { 28 | // A workflow sometime has not completed in spite of trigger of workflow completed event. 29 | // FYI: https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#workflow_run 30 | const results = await retryAsync( 31 | async () => ({ 32 | workflowRun: await fetchWorkflowRun(octokit, workflowRunContext), 33 | workflowRunJobs: await fetchWorkflowRunJobs(octokit, workflowRunContext) 34 | }), 35 | { 36 | delay: delayMs, 37 | maxTry, 38 | onError: (err, currentTry) => 39 | console.error(`current try: ${currentTry}`, err), 40 | until: lastResult => checkCompleted(lastResult) 41 | } 42 | ) 43 | return results 44 | } catch (err) { 45 | core.error('failed to get results of workflow run') 46 | if (isTooManyTries(err)) { 47 | console.error('retry count exceeded maxTry') 48 | } 49 | console.error(err) 50 | throw err 51 | } 52 | } 53 | 54 | const fetchWorkflowRun = async ( 55 | octokit: Octokit, 56 | workflowContext: WorkflowRunContext 57 | ): Promise => { 58 | const res = await octokit.rest.actions.getWorkflowRunAttempt({ 59 | owner: workflowContext.owner, 60 | repo: workflowContext.repo, 61 | run_id: workflowContext.runId, 62 | attempt_number: workflowContext.attempt_number 63 | }) 64 | return { 65 | ...res.data 66 | } 67 | } 68 | 69 | const fetchWorkflowRunJobs = async ( 70 | octokit: Octokit, 71 | workflowContext: WorkflowRunContext 72 | ): Promise => { 73 | const res = await octokit.rest.actions.listJobsForWorkflowRun({ 74 | owner: workflowContext.owner, 75 | repo: workflowContext.repo, 76 | run_id: workflowContext.runId, 77 | per_page: 100 78 | }) 79 | return res.data.jobs 80 | } 81 | 82 | export const getWorkflowRunContext = ( 83 | context: GitHubContext 84 | ): WorkflowRunContext => { 85 | // If this workflow is trigged on `workflow_run`, set runId it's id. 86 | // Detail of `workflow_run` event: https://docs.github.com/ja/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#workflow_run 87 | const workflowRunEvent = context.payload as 88 | | EventPayloadMap['workflow_run'] 89 | | undefined 90 | 91 | const runId = settings.workflowRunId ?? workflowRunEvent?.workflow_run?.id 92 | if (!runId) fail('Workflow run id should be defined.') 93 | 94 | return { 95 | owner: settings.owner ?? context.repo.owner, 96 | repo: settings.repository ?? context.repo.repo, 97 | attempt_number: workflowRunEvent?.workflow_run?.run_attempt || 1, 98 | runId 99 | } 100 | } 101 | 102 | export const getLatestCompletedAt = (jobs: WorkflowRunJobs): string => { 103 | const jobCompletedAtDates = jobs.map(job => { 104 | if (job.completed_at === null) fail('Jobs should be completed.') 105 | return new Date(job.completed_at) 106 | }) 107 | const maxDateNumber = Math.max(...jobCompletedAtDates.map(Number)) 108 | return new Date(maxDateNumber).toISOString() 109 | } 110 | -------------------------------------------------------------------------------- /src/github/index.ts: -------------------------------------------------------------------------------- 1 | export * from './github.js' 2 | export * from './types.js' 3 | -------------------------------------------------------------------------------- /src/github/types.ts: -------------------------------------------------------------------------------- 1 | import { Endpoints } from '@octokit/types' 2 | import { context } from '@actions/github' 3 | 4 | export type WorkflowRun = 5 | Endpoints['GET /repos/{owner}/{repo}/actions/runs/{run_id}']['response']['data'] 6 | export type WorkflowRunJobs = 7 | Endpoints['GET /repos/{owner}/{repo}/actions/runs/{run_id}/jobs']['response']['data']['jobs'] 8 | export type WorkflowRunJob = WorkflowRunJobs[number] 9 | 10 | export interface WorkflowRunContext { 11 | readonly owner: string 12 | readonly repo: string 13 | readonly runId: number 14 | readonly attempt_number: number 15 | } 16 | export interface WorkflowResults { 17 | workflowRun: WorkflowRun 18 | workflowRunJobs: WorkflowRunJobs 19 | } 20 | 21 | export type GitHubContext = typeof context 22 | -------------------------------------------------------------------------------- /src/index.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, describe, vi, test } from 'vitest' 2 | import * as main from './main.js' 3 | 4 | // Mock the action's entrypoint 5 | const runMock = vi.spyOn(main, 'run').mockImplementation(async () => {}) 6 | 7 | describe('index', () => { 8 | test('calls run when imported', async () => { 9 | await import('./index.js') 10 | expect(runMock).toHaveBeenCalled() 11 | }) 12 | }) 13 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * The entrypoint for the action. 3 | */ 4 | import { run } from './main.js' 5 | 6 | // eslint-disable-next-line @typescript-eslint/no-floating-promises 7 | run() 8 | -------------------------------------------------------------------------------- /src/instrumentation/index.ts: -------------------------------------------------------------------------------- 1 | export * from './instrumentation.js' 2 | -------------------------------------------------------------------------------- /src/instrumentation/instrumentation.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, test, expect, beforeEach, afterEach } from 'vitest' 2 | import { initialize, forceFlush, shutdown } from './instrumentation.js' 3 | import { opentelemetryAllDisable } from '../utils/opentelemetry-all-disable.js' 4 | import * as opentelemetry from '@opentelemetry/api' 5 | import settings from '../settings.js' 6 | import { InMemorySpanExporter } from '@opentelemetry/sdk-trace-base' 7 | import { 8 | InMemoryMetricExporter, 9 | AggregationTemporality 10 | } from '@opentelemetry/sdk-metrics' 11 | 12 | settings.logeLevel = 'debug' // For testing 13 | 14 | describe('initialize', () => { 15 | beforeEach(() => { 16 | opentelemetryAllDisable() 17 | }) 18 | 19 | test('should initialize successfully', () => { 20 | expect(() => initialize()).not.toThrow() 21 | }) 22 | test('should throw error when multiple initialize', () => { 23 | expect(() => initialize()).not.toThrow() 24 | expect(() => initialize()).toThrow( 25 | 'setGlobalMeterProvider failed. please check settings or duplicate registration.' 26 | ) 27 | opentelemetry.metrics.disable() 28 | expect(() => initialize()).toThrow( 29 | 'setGlobalTracerProvider failed. please check settings or duplicate registration.' 30 | ) 31 | }) 32 | 33 | describe('initializeMeter', () => { 34 | const metricsExporter = new InMemoryMetricExporter( 35 | AggregationTemporality.DELTA 36 | ) 37 | afterEach(() => { 38 | settings.FeatureFlagMetrics = true 39 | metricsExporter.reset() 40 | }) 41 | test('should export metrics', async () => { 42 | expect(() => initialize(metricsExporter, undefined)).not.toThrow() 43 | const meter = opentelemetry.metrics.getMeter('test') 44 | meter.createCounter('test') 45 | await expect(forceFlush()).resolves.not.toThrow() 46 | expect(metricsExporter.getMetrics()).toHaveLength(1) 47 | }) 48 | test('should not export metrics when disable FeatureFlagMetrics', async () => { 49 | settings.FeatureFlagMetrics = false 50 | expect(() => initialize(metricsExporter, undefined)).not.toThrow() 51 | const meter = opentelemetry.metrics.getMeter('test') 52 | meter.createCounter('test') 53 | await expect(forceFlush()).resolves.not.toThrow() 54 | expect(metricsExporter.getMetrics()).toHaveLength(0) 55 | }) 56 | }) 57 | 58 | describe('initializeTracer', () => { 59 | const spanExporter = new InMemorySpanExporter() 60 | afterEach(() => { 61 | settings.FeatureFlagTrace = true 62 | spanExporter.reset() 63 | }) 64 | 65 | test('should export trace', async () => { 66 | expect(() => initialize(undefined, spanExporter)).not.toThrow() 67 | const tracer = opentelemetry.trace.getTracer('test') 68 | tracer.startSpan('test').end() 69 | await expect(forceFlush()).resolves.not.toThrow() 70 | expect(spanExporter.getFinishedSpans()).toHaveLength(1) 71 | }) 72 | test('should not export trace when disabled FeatureFlagTrace', async () => { 73 | settings.FeatureFlagTrace = false 74 | expect(() => initialize(undefined, spanExporter)).not.toThrow() 75 | const tracer = opentelemetry.trace.getTracer('test') 76 | tracer.startSpan('test').end() 77 | await expect(forceFlush()).resolves.not.toThrow() 78 | expect(spanExporter.getFinishedSpans()).toHaveLength(0) 79 | }) 80 | }) 81 | }) 82 | 83 | describe('shutdown', () => { 84 | beforeEach(() => { 85 | opentelemetryAllDisable() 86 | }) 87 | test('forceFlush and shutdown should be success', async () => { 88 | initialize() 89 | await expect(forceFlush()).resolves.not.toThrow() 90 | await expect(shutdown()).resolves.not.toThrow() 91 | }) 92 | }) 93 | -------------------------------------------------------------------------------- /src/instrumentation/instrumentation.ts: -------------------------------------------------------------------------------- 1 | import { detectResourcesSync, envDetector } from '@opentelemetry/resources' 2 | import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-proto' 3 | import { 4 | MeterProvider, 5 | PeriodicExportingMetricReader, 6 | PushMetricExporter 7 | } from '@opentelemetry/sdk-metrics' 8 | import * as opentelemetry from '@opentelemetry/api' 9 | import { 10 | SpanExporter, 11 | BasicTracerProvider, 12 | BatchSpanProcessor 13 | } from '@opentelemetry/sdk-trace-base' 14 | import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-proto' 15 | import settings from '../settings.js' 16 | 17 | let traceProvider: BasicTracerProvider 18 | let meterProvider: MeterProvider 19 | 20 | export const initialize = ( 21 | meterExporter?: PushMetricExporter, 22 | spanExporter?: SpanExporter 23 | ): void => { 24 | if (settings.logeLevel === 'debug') 25 | opentelemetry.diag.setLogger( 26 | new opentelemetry.DiagConsoleLogger(), 27 | opentelemetry.DiagLogLevel.DEBUG 28 | ) 29 | initializeMeter(meterExporter) 30 | initializeTracer(spanExporter) 31 | } 32 | 33 | const initializeMeter = (exporter?: PushMetricExporter): void => { 34 | if (settings.FeatureFlagMetrics) { 35 | meterProvider = new MeterProvider({ 36 | readers: [ 37 | new PeriodicExportingMetricReader({ 38 | exporter: exporter ?? new OTLPMetricExporter(), 39 | // Exporter has not implemented the manual flush method yet. 40 | // High interval prevents from generating duplicate metrics. 41 | exportIntervalMillis: 24 * 60 * 60 * 1000 // 24 hours 42 | }) 43 | ], 44 | resource: detectResourcesSync({ detectors: [envDetector] }) 45 | }) 46 | } else { 47 | meterProvider = new MeterProvider() 48 | } 49 | const result = opentelemetry.metrics.setGlobalMeterProvider(meterProvider) 50 | if (!result) { 51 | throw new Error( 52 | 'setGlobalMeterProvider failed. please check settings or duplicate registration.' 53 | ) 54 | } 55 | } 56 | 57 | const initializeTracer = (exporter?: SpanExporter): void => { 58 | if (settings.FeatureFlagTrace) { 59 | traceProvider = new BasicTracerProvider({ 60 | resource: detectResourcesSync({ detectors: [envDetector] }), 61 | spanProcessors: [ 62 | new BatchSpanProcessor(exporter || new OTLPTraceExporter({})) 63 | ] 64 | }) 65 | } else { 66 | traceProvider = new BasicTracerProvider() 67 | } 68 | const result = opentelemetry.trace.setGlobalTracerProvider(traceProvider) 69 | if (!result) { 70 | throw new Error( 71 | 'setGlobalTracerProvider failed. please check settings or duplicate registration.' 72 | ) 73 | } 74 | } 75 | 76 | export const forceFlush = async (): Promise => { 77 | await meterProvider.forceFlush() 78 | await traceProvider.forceFlush() 79 | } 80 | 81 | export const shutdown = async (): Promise => { 82 | await meterProvider.shutdown() 83 | await traceProvider.shutdown() 84 | } 85 | -------------------------------------------------------------------------------- /src/main.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, test, expect, vi, beforeEach } from 'vitest' 2 | import * as github from './github/index.js' 3 | import * as instrumentation from './instrumentation/index.js' 4 | import { run } from './main.js' 5 | import { opentelemetryAllDisable } from './utils/opentelemetry-all-disable.js' 6 | 7 | // For test error handle 8 | const fetchWorkflowResultsSpy = vi.spyOn(github, 'fetchWorkflowResults') 9 | const forceFlushSpy = vi.spyOn(instrumentation, 'forceFlush') 10 | 11 | describe('run function', () => { 12 | beforeEach(() => { 13 | opentelemetryAllDisable() 14 | }) 15 | describe('should exit with expected code', () => { 16 | test('should run successfully', async () => { 17 | await expect(run()).rejects.toThrowError( 18 | 'process.exit unexpectedly called with "0"' 19 | ) 20 | }) 21 | test('should exit with 1 when fetching workflow results failed', async () => { 22 | fetchWorkflowResultsSpy.mockRejectedValueOnce(new Error('test')) 23 | await expect(run()).rejects.toThrowError( 24 | 'process.exit unexpectedly called with "1"' 25 | ) 26 | }) 27 | test('should exit with 1 when forceFlush failed', async () => { 28 | forceFlushSpy.mockRejectedValueOnce(new Error('test')) 29 | await expect(run()).rejects.toThrowError( 30 | 'process.exit unexpectedly called with "1"' 31 | ) 32 | }) 33 | }) 34 | }) 35 | -------------------------------------------------------------------------------- /src/main.ts: -------------------------------------------------------------------------------- 1 | import * as core from '@actions/core' 2 | import { fetchWorkflowResults } from './github/index.js' 3 | import { createMetrics } from './metrics/index.js' 4 | import { createTrace } from './traces/index.js' 5 | import { forceFlush, initialize, shutdown } from './instrumentation/index.js' 6 | 7 | /** 8 | * The main function for the action. 9 | * @returns {Promise} Resolves when the action is complete. 10 | */ 11 | export async function run(): Promise { 12 | // required: run initialize() first. 13 | // usually use --required runtime option for first reading. 14 | // for simple use this action, this is satisfied on here. 15 | initialize() 16 | 17 | let exitCode = 0 18 | 19 | try { 20 | const results = await fetchWorkflowResults() 21 | await createMetrics(results) 22 | await createTrace(results) 23 | } catch (error) { 24 | if (error instanceof Error) core.error(error) 25 | console.error(error) 26 | exitCode = 1 27 | } 28 | 29 | try { 30 | await forceFlush() 31 | console.log('Providers force flush successfully.') 32 | await shutdown() 33 | console.log('Providers shutdown successfully.') 34 | } catch (error) { 35 | if (error instanceof Error) core.error(error) 36 | console.error(error) 37 | exitCode = 1 38 | } 39 | 40 | process.exit(exitCode) 41 | } 42 | -------------------------------------------------------------------------------- /src/metrics/constants.ts: -------------------------------------------------------------------------------- 1 | export const descriptorNames = { 2 | JOB_DURATION: 'github.job.duration', 3 | JOB_QUEUED_DURATION: 'github.job.queued_duration', 4 | WORKFLOW_DURATION: 'github.workflow.duration' 5 | } as const satisfies Record 6 | 7 | export const attributeKeys = { 8 | REPOSITORY: 'repository', 9 | WORKFLOW_NAME: 'workflow.name', 10 | JOB_NAME: 'job.name', 11 | JOB_CONCLUSION: 'job.conclusion' 12 | } as const satisfies Record 13 | -------------------------------------------------------------------------------- /src/metrics/create-gauges.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, test, expect, vi } from 'vitest' 2 | import * as opentelemetry from '@opentelemetry/api' 3 | import { createGauge } from './create-gauges.js' 4 | import { 5 | InMemoryMetricExporter, 6 | AggregationTemporality, 7 | MeterProvider, 8 | PeriodicExportingMetricReader 9 | } from '@opentelemetry/sdk-metrics' 10 | 11 | interface InMemoryProvider { 12 | provider: MeterProvider 13 | exporter: InMemoryMetricExporter 14 | } 15 | 16 | const createMeterProvider = (): InMemoryProvider => { 17 | const exporter = new InMemoryMetricExporter(AggregationTemporality.DELTA) 18 | const provider = new MeterProvider({ 19 | readers: [ 20 | new PeriodicExportingMetricReader({ 21 | exporter 22 | }) 23 | ] 24 | }) 25 | return { provider, exporter } 26 | } 27 | 28 | describe('createGauge', () => { 29 | test('should create a gauge and observe the value', async () => { 30 | const { provider, exporter } = createMeterProvider() 31 | const mockGetMeter = vi 32 | .spyOn(opentelemetry.metrics, 'getMeter') 33 | .mockImplementation(name => provider.getMeter(name)) 34 | const attributes = { test1: 'test1', test2: 'test2' } 35 | 36 | createGauge('testGauge', 42, attributes) 37 | 38 | await provider.forceFlush() 39 | await provider.shutdown() 40 | 41 | expect(mockGetMeter).toHaveBeenCalledOnce() 42 | expect(exporter.getMetrics()).toMatchObject([ 43 | { 44 | scopeMetrics: [ 45 | { 46 | scope: { 47 | version: '', 48 | schemaUrl: undefined 49 | }, 50 | metrics: [ 51 | { 52 | descriptor: { 53 | name: 'testGauge', 54 | type: 'GAUGE', 55 | description: '', 56 | unit: '', 57 | valueType: 1, 58 | advice: {} 59 | }, 60 | aggregationTemporality: 0, 61 | dataPointType: 2, 62 | dataPoints: [ 63 | { 64 | attributes: { test1: 'test1', test2: 'test2' }, 65 | value: 42 66 | } 67 | ] 68 | } 69 | ] 70 | } 71 | ] 72 | } 73 | ]) 74 | }) 75 | }) 76 | -------------------------------------------------------------------------------- /src/metrics/create-gauges.ts: -------------------------------------------------------------------------------- 1 | import * as opentelemetry from '@opentelemetry/api' 2 | import { 3 | getLatestCompletedAt, 4 | WorkflowRun, 5 | WorkflowRunJob, 6 | WorkflowRunJobs 7 | } from '../github/index.js' 8 | import { calcDiffSec } from '../utils/calc-diff-sec.js' 9 | import { descriptorNames as dn, attributeKeys as ak } from './constants.js' 10 | import * as core from '@actions/core' 11 | 12 | export const createGauge = ( 13 | name: string, 14 | value: number, 15 | attributes: opentelemetry.Attributes, 16 | option?: opentelemetry.MetricOptions 17 | ): void => { 18 | const meter = opentelemetry.metrics.getMeter('github-actions-metrics') 19 | 20 | const gauge = meter.createGauge(name, option) 21 | gauge.record(value, attributes) 22 | } 23 | 24 | const createMetricsAttributes = ( 25 | workflow: WorkflowRun, 26 | job?: WorkflowRunJob 27 | ): opentelemetry.Attributes => ({ 28 | [ak.WORKFLOW_NAME]: workflow.name || undefined, 29 | [ak.REPOSITORY]: workflow.repository.full_name, 30 | ...(job && { [ak.JOB_NAME]: job.name }), 31 | ...(job && job.conclusion && { [ak.JOB_CONCLUSION]: job.conclusion }) // conclusion specification: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/about-status-checks#check-statuses-and-conclusions 32 | }) 33 | 34 | export const createWorkflowGauges = ( 35 | workflow: WorkflowRun, 36 | workflowRunJobs: WorkflowRunJobs 37 | ): void => { 38 | const workflowMetricsAttributes = createMetricsAttributes(workflow) 39 | const jobCompletedAtMax = new Date(getLatestCompletedAt(workflowRunJobs)) 40 | createGauge( 41 | dn.WORKFLOW_DURATION, 42 | calcDiffSec(new Date(workflow.created_at), jobCompletedAtMax), 43 | workflowMetricsAttributes, 44 | { unit: 's' } 45 | ) 46 | } 47 | 48 | export const createJobGauges = ( 49 | workflow: WorkflowRun, 50 | workflowRunJobs: WorkflowRunJobs 51 | ): void => { 52 | for (const job of workflowRunJobs) { 53 | if (!job.completed_at) { 54 | continue 55 | } 56 | 57 | const jobMetricsAttributes = createMetricsAttributes(workflow, job) 58 | createGauge( 59 | dn.JOB_DURATION, 60 | calcDiffSec(new Date(job.started_at), new Date(job.completed_at)), 61 | jobMetricsAttributes, 62 | { unit: 's' } 63 | ) 64 | 65 | // The calculation method for GitHub's queue times has not been disclosed. 66 | // Since it is displayed in the job column, it is assumed to be calculated based on job information. 67 | // See. https://docs.github.com/en/actions/administering-github-actions/viewing-github-actions-metrics 68 | const jobQueuedDuration = calcDiffSec( 69 | new Date(job.created_at), 70 | new Date(job.started_at) 71 | ) 72 | if (jobQueuedDuration < 0) { 73 | core.notice( 74 | `${job.name}: Skip to create ${dn.JOB_QUEUED_DURATION} metrics. This is a GitHub specification issue that occasionally occurs, so it can't be recover.` 75 | ) 76 | continue 77 | } 78 | createGauge( 79 | dn.JOB_QUEUED_DURATION, 80 | jobQueuedDuration, 81 | jobMetricsAttributes, 82 | { unit: 's' } 83 | ) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/metrics/create-metrics.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, test, expect, afterEach, beforeEach } from 'vitest' 2 | import { WorkflowResults } from '../github/index.js' 3 | import { 4 | InMemoryMetricExporter, 5 | AggregationTemporality, 6 | MetricData 7 | } from '@opentelemetry/sdk-metrics' 8 | import { initialize, forceFlush } from '../instrumentation/index.js' 9 | import { calcDiffSec } from '../utils/calc-diff-sec.js' 10 | import { createMetrics } from './create-metrics.js' 11 | import { opentelemetryAllDisable } from '../utils/opentelemetry-all-disable.js' 12 | import { descriptorNames as dn, attributeKeys as ak } from './constants.js' 13 | import { fail } from 'assert' 14 | import settings from '../settings.js' 15 | 16 | const workflowRunResults = { 17 | workflowRun: { 18 | created_at: '2024-09-01T00:00:00Z', 19 | status: 'completed', 20 | id: 10000000000, 21 | name: 'Test Run', 22 | run_number: 14, 23 | repository: { 24 | full_name: 'paper2/github-actions-opentelemetry' 25 | } 26 | }, 27 | workflowRunJobs: [ 28 | { 29 | created_at: '2024-09-01T00:02:00Z', 30 | started_at: '2024-09-01T00:05:00Z', 31 | completed_at: '2024-09-01T00:10:00Z', 32 | conclusion: 'success', 33 | id: 30000000000, 34 | name: 'job1', 35 | run_id: 10000000000, 36 | workflow_name: 'Test Run', 37 | status: 'completed', 38 | steps: [ 39 | { 40 | name: 'step1_1', 41 | started_at: '2024-09-01T00:05:10Z', 42 | completed_at: '2024-09-01T00:05:20Z', 43 | conclusion: 'success' 44 | }, 45 | { 46 | name: 'step1_2', 47 | started_at: '2024-09-01T00:05:30Z', 48 | completed_at: '2024-09-01T00:05:35', 49 | conclusion: 'success' 50 | }, 51 | { 52 | name: 'step1_3', 53 | started_at: '2024-09-01T00:05:40', 54 | completed_at: '2024-09-01T00:05:50', 55 | conclusion: 'success' 56 | } 57 | ] 58 | }, 59 | { 60 | created_at: '2024-09-01T00:12:00Z', 61 | started_at: '2024-09-01T00:15:00Z', 62 | completed_at: '2024-09-01T00:20:00Z', 63 | conclusion: 'failure', 64 | id: 30000000001, 65 | name: 'job2', 66 | run_id: 10000000000, 67 | workflow_name: 'Test Run', 68 | status: 'completed', 69 | steps: [ 70 | { 71 | name: 'step2_1', 72 | started_at: '2024-09-01T00:15:10Z', 73 | completed_at: '2024-09-01T00:15:20Z', 74 | conclusion: 'success' 75 | }, 76 | { 77 | name: 'step2_2', 78 | started_at: '2024-09-01T00:15:30Z', 79 | completed_at: '2024-09-01T00:15:35', 80 | conclusion: 'success' 81 | }, 82 | { 83 | name: 'step2_3', 84 | started_at: '2024-09-01T00:15:40', 85 | completed_at: '2024-09-01T00:15:50', 86 | conclusion: 'failure' 87 | } 88 | ] 89 | } 90 | ] 91 | } as WorkflowResults 92 | const { workflowRun, workflowRunJobs } = workflowRunResults 93 | 94 | describe('should export expected metrics', () => { 95 | const exporter = new InMemoryMetricExporter(AggregationTemporality.DELTA) 96 | 97 | beforeEach(() => { 98 | exporter.reset() 99 | initialize(exporter) 100 | }) 101 | 102 | afterEach(async () => { 103 | opentelemetryAllDisable() 104 | }) 105 | 106 | test(`should verify ${dn.JOB_DURATION}`, async () => { 107 | await createMetrics(workflowRunResults) 108 | await forceFlush() 109 | const metric = findMetricByDescriptorName(exporter, dn.JOB_DURATION) 110 | const dataPoints = metric.dataPoints.map(dataPoint => ({ 111 | taskName: dataPoint.attributes[ak.JOB_NAME], 112 | value: dataPoint.value 113 | })) 114 | 115 | expect(dataPoints).toHaveLength(workflowRunJobs.length) 116 | for (const job of workflowRunJobs) { 117 | if (!job.completed_at) fail() 118 | expect(dataPoints).toContainEqual({ 119 | taskName: job.name, 120 | value: calcDiffSec(new Date(job.started_at), new Date(job.completed_at)) 121 | }) 122 | } 123 | }) 124 | 125 | test(`should verify ${dn.WORKFLOW_DURATION}`, async () => { 126 | await createMetrics(workflowRunResults) 127 | await forceFlush() 128 | const metric = findMetricByDescriptorName(exporter, dn.WORKFLOW_DURATION) 129 | 130 | expect(metric.dataPoints).toHaveLength(1) 131 | if (!workflowRunJobs[1].completed_at) fail() 132 | expect(metric.dataPoints[0].value).toEqual( 133 | calcDiffSec( 134 | new Date(workflowRun.created_at), 135 | new Date(workflowRunJobs[1].completed_at) // last job's complete_at 136 | ) 137 | ) 138 | }) 139 | 140 | test('should not export metrics when disable FeatureFlagMetrics', async () => { 141 | settings.FeatureFlagMetrics = false 142 | await createMetrics(workflowRunResults) 143 | await forceFlush() 144 | expect(exporter.getMetrics()).toHaveLength(1) 145 | expect(exporter.getMetrics()[0].scopeMetrics).toHaveLength(0) 146 | settings.FeatureFlagMetrics = true 147 | }) 148 | 149 | test(`should throw error when createMetrics fails`, async () => { 150 | const brokenResults = {} as WorkflowResults 151 | await expect(createMetrics(brokenResults)).rejects.toThrow() 152 | }) 153 | }) 154 | 155 | describe('should export expected attributes', () => { 156 | const exporter = new InMemoryMetricExporter(AggregationTemporality.DELTA) 157 | 158 | beforeEach(() => { 159 | exporter.reset() 160 | initialize(exporter) 161 | }) 162 | 163 | afterEach(async () => { 164 | opentelemetryAllDisable() 165 | }) 166 | 167 | test('should export workflow name', async () => { 168 | await createMetrics(workflowRunResults) 169 | await forceFlush() 170 | const metricWorkflow = findMetricByDescriptorName( 171 | exporter, 172 | dn.WORKFLOW_DURATION 173 | ) 174 | const metricJob = findMetricByDescriptorName(exporter, dn.JOB_DURATION) 175 | 176 | expect(metricWorkflow.dataPoints).toHaveLength(1) 177 | expect(metricWorkflow.dataPoints[0].attributes[ak.WORKFLOW_NAME]).toEqual( 178 | workflowRun.name 179 | ) 180 | expect(metricJob.dataPoints).toHaveLength(2) 181 | for (const point of metricJob.dataPoints) { 182 | expect(point.attributes[ak.WORKFLOW_NAME]).toEqual(workflowRun.name) 183 | } 184 | }) 185 | 186 | test('should export repository name', async () => { 187 | await createMetrics(workflowRunResults) 188 | await forceFlush() 189 | const metricWorkflow = findMetricByDescriptorName( 190 | exporter, 191 | dn.WORKFLOW_DURATION 192 | ) 193 | const metricJob = findMetricByDescriptorName(exporter, dn.JOB_DURATION) 194 | 195 | expect(metricWorkflow.dataPoints).toHaveLength(1) 196 | expect(metricWorkflow.dataPoints[0].attributes[ak.REPOSITORY]).toEqual( 197 | workflowRun.repository.full_name 198 | ) 199 | expect(metricJob.dataPoints).toHaveLength(2) 200 | for (const point of metricJob.dataPoints) { 201 | expect(point.attributes[ak.REPOSITORY]).toEqual( 202 | workflowRun.repository.full_name 203 | ) 204 | } 205 | }) 206 | 207 | test('should export job name in job metrics', async () => { 208 | await createMetrics(workflowRunResults) 209 | await forceFlush() 210 | const metric = findMetricByDescriptorName(exporter, dn.JOB_DURATION) 211 | 212 | expect(metric.dataPoints).toHaveLength(2) 213 | for (const point of metric.dataPoints) { 214 | expect(point.attributes[ak.JOB_NAME]).toMatch(/job[1-2]/) 215 | } 216 | }) 217 | 218 | test('should not export job name in workflow metrics', async () => { 219 | await createMetrics(workflowRunResults) 220 | await forceFlush() 221 | const metric = findMetricByDescriptorName(exporter, dn.WORKFLOW_DURATION) 222 | 223 | expect(metric.dataPoints).toHaveLength(1) 224 | expect(metric.dataPoints[0].attributes[ak.JOB_NAME]).toBeUndefined() 225 | }) 226 | 227 | test('should export job conclusion in job metrics', async () => { 228 | await createMetrics(workflowRunResults) 229 | await forceFlush() 230 | const metric = findMetricByDescriptorName(exporter, dn.JOB_DURATION) 231 | 232 | expect(metric.dataPoints).toHaveLength(2) 233 | expect(metric.dataPoints[0].attributes[ak.JOB_CONCLUSION]).toEqual( 234 | 'success' 235 | ) 236 | expect(metric.dataPoints[1].attributes[ak.JOB_CONCLUSION]).toEqual( 237 | 'failure' 238 | ) 239 | }) 240 | 241 | test('should not export job conclusion in workflow metrics', async () => { 242 | await createMetrics(workflowRunResults) 243 | await forceFlush() 244 | const metric = findMetricByDescriptorName(exporter, dn.WORKFLOW_DURATION) 245 | 246 | expect(metric.dataPoints).toHaveLength(1) 247 | expect(metric.dataPoints[0].attributes[ak.JOB_CONCLUSION]).toBeUndefined() 248 | }) 249 | 250 | test('should not export job conclusion when job conclusion is null', async () => { 251 | const modifiedWorkflowRunResults = { 252 | workflowRun: workflowRunResults.workflowRun, 253 | workflowRunJobs: [ 254 | { 255 | ...workflowRunResults.workflowRunJobs[0], 256 | conclusion: null 257 | } 258 | ] 259 | } 260 | await createMetrics(modifiedWorkflowRunResults) 261 | await forceFlush() 262 | const metric = findMetricByDescriptorName(exporter, dn.JOB_DURATION) 263 | 264 | expect(metric.dataPoints).toHaveLength(1) 265 | expect(metric.dataPoints[0].attributes[ak.JOB_CONCLUSION]).toBeUndefined() 266 | }) 267 | }) 268 | 269 | const findMetricByDescriptorName = ( 270 | exporter: InMemoryMetricExporter, 271 | name: string 272 | ): MetricData => { 273 | expect(exporter.getMetrics()).toHaveLength(1) 274 | expect(exporter.getMetrics()[0].scopeMetrics).toHaveLength(1) 275 | const metric = exporter 276 | .getMetrics()[0] 277 | .scopeMetrics[0].metrics.find(v => v.descriptor.name === name) 278 | if (metric === undefined) { 279 | throw new Error(`${name} descriptor is not found`) 280 | } 281 | return metric 282 | } 283 | -------------------------------------------------------------------------------- /src/metrics/create-metrics.ts: -------------------------------------------------------------------------------- 1 | import settings from '../settings.js' 2 | import { createWorkflowGauges, createJobGauges } from './create-gauges.js' 3 | import { WorkflowResults } from 'src/github/types.js' 4 | 5 | export const createMetrics = async ( 6 | results: WorkflowResults 7 | ): Promise => { 8 | const { workflowRun, workflowRunJobs } = results 9 | if (!settings.FeatureFlagMetrics) { 10 | console.log('metrics feature is disabled.') 11 | return 12 | } 13 | try { 14 | createWorkflowGauges(workflowRun, workflowRunJobs) 15 | createJobGauges(workflowRun, workflowRunJobs) 16 | } catch (error) { 17 | console.error('failed to create metrics') 18 | throw error 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/metrics/index.ts: -------------------------------------------------------------------------------- 1 | export { createMetrics } from './create-metrics.js' 2 | -------------------------------------------------------------------------------- /src/settings.test.ts: -------------------------------------------------------------------------------- 1 | import { test, describe, expect } from 'vitest' 2 | import { createSettings } from './settings.js' 3 | 4 | describe('settings', () => { 5 | test('should parse WORKFLOW_RUN_ID correctly', async () => { 6 | process.env.WORKFLOW_RUN_ID = '123' 7 | const settings = createSettings(process.env) 8 | expect(settings.workflowRunId).toBe(123) 9 | }) 10 | 11 | test('should set workflowRunId to undefined when WORKFLOW_RUN_ID is not set', async () => { 12 | delete process.env.WORKFLOW_RUN_ID 13 | const settings = createSettings(process.env) 14 | expect(settings.workflowRunId).toBeUndefined() 15 | }) 16 | 17 | test('should set owner and repository from environment variables', async () => { 18 | process.env.OWNER = 'owner-name' 19 | process.env.REPOSITORY = 'repo-name' 20 | const settings = createSettings(process.env) 21 | expect(settings.owner).toBe('owner-name') 22 | expect(settings.repository).toBe('repo-name') 23 | }) 24 | 25 | test('should set FeatureFlagTrace to false when FEATURE_TRACE is "false"', async () => { 26 | process.env.FEATURE_TRACE = 'false' 27 | const settings = createSettings(process.env) 28 | expect(settings.FeatureFlagTrace).toBe(false) 29 | }) 30 | 31 | test('should set FeatureFlagTrace to true by default when FEATURE_TRACE is not set', async () => { 32 | delete process.env.FEATURE_TRACE 33 | const settings = createSettings(process.env) 34 | expect(settings.FeatureFlagTrace).toBe(true) 35 | }) 36 | 37 | test('should set FeatureFlagMetrics to true when FEATURE_METRICS is "false"', async () => { 38 | process.env.FEATURE_METRICS = 'false' 39 | const settings = createSettings(process.env) 40 | expect(settings.FeatureFlagMetrics).toBe(false) 41 | }) 42 | 43 | test('should set FeatureFlagMetrics to true by default when FEATURE_METRICS is not set', async () => { 44 | delete process.env.FEATURE_METRICS 45 | const settings = createSettings(process.env) 46 | expect(settings.FeatureFlagMetrics).toBe(true) 47 | }) 48 | 49 | test('should set logeLevel to "debug" when RUNNER_DEBUG is "1"', async () => { 50 | process.env.RUNNER_DEBUG = '1' 51 | const settings = createSettings(process.env) 52 | expect(settings.logeLevel).toBe('debug') 53 | }) 54 | 55 | test('should set logeLevel to OTEL_LOG_LEVEL when set', async () => { 56 | process.env.RUNNER_DEBUG = '0' 57 | process.env.OTEL_LOG_LEVEL = 'warn' 58 | const settings = createSettings(process.env) 59 | expect(settings.logeLevel).toBe('warn') 60 | }) 61 | 62 | test('should set logeLevel to "info" by default when no related env variables are set', async () => { 63 | delete process.env.RUNNER_DEBUG 64 | delete process.env.OTEL_LOG_LEVEL 65 | const settings = createSettings(process.env) 66 | expect(settings.logeLevel).toBe('info') 67 | }) 68 | }) 69 | -------------------------------------------------------------------------------- /src/settings.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line @typescript-eslint/explicit-function-return-type 2 | export const createSettings = (env: typeof process.env) => ({ 3 | workflowRunId: env.WORKFLOW_RUN_ID 4 | ? parseInt(env.WORKFLOW_RUN_ID) 5 | : undefined, 6 | owner: env.OWNER, 7 | repository: env.REPOSITORY, 8 | FeatureFlagTrace: env.FEATURE_TRACE 9 | ? env.FEATURE_TRACE.toLowerCase() === 'true' 10 | : true, 11 | FeatureFlagMetrics: env.FEATURE_METRICS 12 | ? env.FEATURE_METRICS.toLowerCase() === 'true' 13 | : true, 14 | logeLevel: 15 | env.RUNNER_DEBUG === '1' 16 | ? 'debug' // https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/store-information-in-variables#default-environment-variables 17 | : env.OTEL_LOG_LEVEL || 'info' // https://opentelemetry.io/docs/zero-code/js/#troubleshooting 18 | }) 19 | 20 | export const settings = createSettings(process.env) 21 | 22 | export default settings 23 | -------------------------------------------------------------------------------- /src/traces/create-spans.ts: -------------------------------------------------------------------------------- 1 | import { Context, ROOT_CONTEXT } from '@opentelemetry/api' 2 | import { 3 | WorkflowRun, 4 | WorkflowRunJobs, 5 | WorkflowRunJob, 6 | getLatestCompletedAt 7 | } from '../github/index.js' 8 | import * as opentelemetry from '@opentelemetry/api' 9 | import { fail } from 'assert' 10 | import { calcDiffSec } from '../utils/calc-diff-sec.js' 11 | import * as core from '@actions/core' 12 | 13 | export const createWorkflowRunTrace = ( 14 | workflowRun: WorkflowRun, 15 | workflowRunJobs: WorkflowRunJobs 16 | ): Context => { 17 | if (!workflowRun.name) fail() 18 | const span = createSpan( 19 | ROOT_CONTEXT, 20 | workflowRun.name, 21 | workflowRun.created_at, 22 | getLatestCompletedAt(workflowRunJobs), 23 | workflowRun.conclusion, 24 | { ...buildWorkflowRunAttributes(workflowRun) } 25 | ) 26 | 27 | return opentelemetry.trace.setSpan(ROOT_CONTEXT, span) 28 | } 29 | 30 | export const createWorkflowRunJobSpan = ( 31 | ctx: Context, 32 | job: WorkflowRunJob 33 | ): Context => { 34 | if (!job.completed_at || job.steps === undefined) fail() 35 | const spanWithWaiting = createSpan( 36 | ctx, 37 | `${job.name} with time of waiting runner`, 38 | job.created_at, 39 | job.completed_at, 40 | job.conclusion, 41 | { ...buildWorkflowRunJobAttributes(job) } 42 | ) 43 | const ctxWithWaiting = opentelemetry.trace.setSpan(ctx, spanWithWaiting) 44 | 45 | const waitingSpanName = `waiting runner for ${job.name}` 46 | const jobQueuedDuration = calcDiffSec( 47 | new Date(job.created_at), 48 | new Date(job.started_at) 49 | ) 50 | if (jobQueuedDuration >= 0) { 51 | createSpan( 52 | ctxWithWaiting, 53 | waitingSpanName, 54 | job.created_at, 55 | job.started_at, 56 | 'success', // waiting runner is not a error. 57 | { ...buildWorkflowRunJobAttributes(job) } 58 | ) 59 | } else { 60 | core.notice( 61 | `${job.name}: Skip to create "${waitingSpanName}" span. This is a GitHub specification issue that occasionally occurs, so it can't be recover.` 62 | ) 63 | } 64 | 65 | const jobSpan = createSpan( 66 | ctxWithWaiting, 67 | job.name, 68 | job.started_at, 69 | job.completed_at, 70 | job.conclusion, 71 | { ...buildWorkflowRunJobAttributes(job) } 72 | ) 73 | 74 | return opentelemetry.trace.setSpan(ctxWithWaiting, jobSpan) 75 | } 76 | 77 | export const createWorkflowRunStepSpan = ( 78 | ctx: Context, 79 | job: WorkflowRunJob 80 | ): void => { 81 | if (job.steps === undefined) fail() 82 | job.steps.map(step => { 83 | if (step.started_at == null || step.completed_at == null) fail() 84 | createSpan( 85 | ctx, 86 | step.name, 87 | step.started_at, 88 | step.completed_at, 89 | step.conclusion, 90 | {} 91 | ) 92 | }) 93 | } 94 | 95 | const createSpan = ( 96 | ctx: Context, 97 | name: string, 98 | startAt: string, 99 | endAt: string, 100 | conclusion: string | null, 101 | attributes: opentelemetry.Attributes 102 | ): opentelemetry.Span => { 103 | const tracer = opentelemetry.trace.getTracer('github-actions-opentelemetry') 104 | const startTime = new Date(startAt) 105 | const endTime = new Date(endAt) 106 | const span = tracer.startSpan(name, { startTime, attributes }, ctx) 107 | span.setStatus(getSpanStatusFromConclusion(conclusion)) 108 | span.end(endTime) 109 | return span 110 | } 111 | 112 | // In reality, the values of `conclusion` for step, job, and workflow might differ. 113 | // However, I couldn't find a complete definition in the official documentation. 114 | // The type of `conclusion` for a job is defined, but for step and workflow, it is just a string. 115 | // At the very least, we know that `conclusion` for step, job, and workflow can take the values `success` and `failure`, 116 | // so I have summarized the definitions accordingly. 117 | const getSpanStatusFromConclusion = ( 118 | status: string | null 119 | ): opentelemetry.SpanStatus => { 120 | switch (status) { 121 | case 'success': 122 | return { code: opentelemetry.SpanStatusCode.OK } 123 | case 'failure': 124 | case 'timed_out': 125 | return { code: opentelemetry.SpanStatusCode.ERROR } 126 | default: 127 | return { code: opentelemetry.SpanStatusCode.UNSET } 128 | } 129 | } 130 | 131 | const buildWorkflowRunAttributes = ( 132 | workflowRun: WorkflowRun 133 | ): opentelemetry.Attributes => ({ 134 | repository: workflowRun.repository.full_name, 135 | run_id: workflowRun.id, 136 | run_attempt: workflowRun.run_attempt, 137 | url: workflowRun.html_url 138 | }) 139 | 140 | const buildWorkflowRunJobAttributes = ( 141 | job: WorkflowRunJob 142 | ): opentelemetry.Attributes => ({ 143 | 'job.conclusion': job.conclusion || undefined, 144 | 'runner.name': job.runner_name || undefined, 145 | 'runner.group': job.runner_group_name || undefined 146 | }) 147 | -------------------------------------------------------------------------------- /src/traces/create-trace.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, test, expect, afterEach, beforeEach } from 'vitest' 2 | import { WorkflowResults } from '../github/index.js' 3 | import { 4 | InMemorySpanExporter, 5 | ReadableSpan 6 | } from '@opentelemetry/sdk-trace-base' 7 | import { opentelemetryAllDisable } from '../utils/opentelemetry-all-disable.js' 8 | import { initialize, forceFlush } from '../instrumentation/index.js' 9 | import { createTrace } from './create-trace.js' 10 | import { fail } from 'assert' 11 | import settings from '../settings.js' 12 | import { SpanStatusCode } from '@opentelemetry/api' 13 | 14 | const workflowRunResults = { 15 | workflowRun: { 16 | created_at: '2024-09-01T00:00:00Z', 17 | status: 'completed', 18 | id: 10000000000, 19 | name: 'Test Run', 20 | run_number: 14, 21 | repository: { 22 | full_name: 'paper2/github-actions-opentelemetry' 23 | }, 24 | conclusion: 'failure' 25 | }, 26 | workflowRunJobs: [ 27 | { 28 | created_at: '2024-09-01T00:02:00Z', 29 | started_at: '2024-09-01T00:05:00Z', 30 | completed_at: '2024-09-01T00:10:00Z', 31 | id: 30000000000, 32 | name: 'job1', 33 | run_id: 10000000000, 34 | workflow_name: 'Test Run', 35 | status: 'completed', 36 | conclusion: 'success', 37 | steps: [ 38 | { 39 | name: 'step1_1', 40 | started_at: '2024-09-01T00:05:10Z', 41 | completed_at: '2024-09-01T00:05:20Z', 42 | conclusion: 'success' 43 | }, 44 | { 45 | name: 'step1_2', 46 | started_at: '2024-09-01T00:05:30Z', 47 | completed_at: '2024-09-01T00:05:35', 48 | conclusion: 'success' 49 | }, 50 | { 51 | name: 'step1_3', 52 | started_at: '2024-09-01T00:05:40', 53 | completed_at: '2024-09-01T00:05:50', 54 | conclusion: 'success' 55 | } 56 | ] 57 | }, 58 | { 59 | created_at: '2024-09-01T00:12:00Z', 60 | started_at: '2024-09-01T00:15:00Z', 61 | completed_at: '2024-09-01T00:20:00Z', 62 | id: 30000000001, 63 | name: 'job2', 64 | run_id: 10000000000, 65 | workflow_name: 'Test Run', 66 | status: 'completed', 67 | conclusion: 'failure', 68 | steps: [ 69 | { 70 | name: 'step2_1', 71 | started_at: '2024-09-01T00:15:10Z', 72 | completed_at: '2024-09-01T00:15:20Z', 73 | conclusion: 'success' 74 | }, 75 | { 76 | name: 'step2_2', 77 | started_at: '2024-09-01T00:15:30Z', 78 | completed_at: '2024-09-01T00:15:35', 79 | conclusion: 'success' 80 | }, 81 | { 82 | name: 'step2_3', 83 | started_at: '2024-09-01T00:15:40', 84 | completed_at: '2024-09-01T00:15:50', 85 | conclusion: 'failure' 86 | } 87 | ] 88 | } 89 | ] 90 | } as WorkflowResults 91 | const { workflowRun, workflowRunJobs } = workflowRunResults 92 | 93 | describe('should export expected spans', () => { 94 | const exporter = new InMemorySpanExporter() 95 | 96 | beforeEach(() => { 97 | exporter.reset() 98 | initialize(undefined, exporter) 99 | }) 100 | 101 | afterEach(() => { 102 | opentelemetryAllDisable() 103 | }) 104 | 105 | test('should verify startTime and endTime', async () => { 106 | await createTrace(workflowRunResults) 107 | await forceFlush() 108 | const spans = exporter.getFinishedSpans().map(span => ({ 109 | name: span.name, 110 | startTime: span.startTime, 111 | endTime: span.endTime 112 | })) 113 | 114 | let testedSpanCount = 0 115 | 116 | if (!workflowRunJobs[1].completed_at) fail() 117 | // workflow span 118 | expect(spans).toContainEqual({ 119 | name: workflowRun.name, 120 | startTime: [toEpochSec(workflowRun.created_at), 0], 121 | endTime: [ 122 | toEpochSec(workflowRunJobs[1].completed_at), // last job's completed_at 123 | 0 124 | ] 125 | }) 126 | testedSpanCount++ 127 | 128 | // jobs span 129 | for (const job of workflowRunJobs) { 130 | if (!job.completed_at) fail() 131 | expect(spans).toContainEqual({ 132 | name: job.name, 133 | startTime: [toEpochSec(job.started_at), 0], 134 | endTime: [toEpochSec(job.completed_at), 0] 135 | }) 136 | testedSpanCount++ 137 | } 138 | 139 | // with waiting for a job span 140 | for (const job of workflowRunJobs) { 141 | if (!job.completed_at) fail() 142 | expect(spans).toContainEqual({ 143 | name: `${job.name} with time of waiting runner`, 144 | startTime: [toEpochSec(job.created_at), 0], 145 | endTime: [toEpochSec(job.completed_at), 0] 146 | }) 147 | testedSpanCount++ 148 | } 149 | 150 | // waiting for a job span 151 | for (const job of workflowRunJobs) { 152 | expect(spans).toContainEqual({ 153 | name: `waiting runner for ${job.name}`, 154 | startTime: [toEpochSec(job.created_at), 0], 155 | endTime: [toEpochSec(job.started_at), 0] 156 | }) 157 | testedSpanCount++ 158 | } 159 | 160 | // steps span 161 | for (const job of workflowRunJobs) { 162 | job.steps?.map(step => { 163 | if (!step.started_at || !step.completed_at) fail() 164 | expect(spans).toContainEqual({ 165 | name: step.name, 166 | startTime: [toEpochSec(step.started_at), 0], 167 | endTime: [toEpochSec(step.completed_at), 0] 168 | }) 169 | testedSpanCount++ 170 | }) 171 | } 172 | 173 | expect(spans).toHaveLength(testedSpanCount) 174 | }) 175 | 176 | test('should export only one root span', async () => { 177 | await createTrace(workflowRunResults) 178 | await forceFlush() 179 | 180 | const spans = exporter.getFinishedSpans().map(span => ({ 181 | parentSpanId: span.parentSpanId 182 | })) 183 | 184 | const rootSpanCount = spans.filter(v => { 185 | return v.parentSpanId === undefined 186 | }) 187 | 188 | expect(rootSpanCount).toHaveLength(1) 189 | }) 190 | 191 | test('should verify resource attributes', async () => { 192 | await createTrace(workflowRunResults) 193 | await forceFlush() 194 | 195 | const spans = exporter.getFinishedSpans().map(span => ({ 196 | resourceAttributes: span.resource.attributes 197 | })) 198 | 199 | spans.map(span => { 200 | expect(span.resourceAttributes).toMatchObject({ 201 | 'service.name': 'github-actions-opentelemetry' 202 | }) 203 | }) 204 | }) 205 | 206 | test('should not export when disable FeatureFlagTrace', async () => { 207 | settings.FeatureFlagTrace = false 208 | await createTrace(workflowRunResults) 209 | await forceFlush() 210 | 211 | expect(exporter.getFinishedSpans()).toHaveLength(0) 212 | settings.FeatureFlagTrace = true 213 | }) 214 | 215 | test('should verify span status', async () => { 216 | await createTrace(workflowRunResults) 217 | await forceFlush() 218 | 219 | const spans = exporter.getFinishedSpans() 220 | if (!workflowRun.name) fail() 221 | const rootSpan = findSpanByName(spans, workflowRun.name) 222 | const job1 = findSpanByName(spans, 'job1') 223 | const job2 = findSpanByName(spans, 'job2') 224 | const job2WithWaitingTime = findSpanByName( 225 | spans, 226 | 'job2 with time of waiting runner' 227 | ) 228 | const job2WaitingTime = findSpanByName(spans, 'waiting runner for job2') 229 | const step2_1 = findSpanByName(spans, 'step2_1') 230 | const step2_2 = findSpanByName(spans, 'step2_2') 231 | const step2_3 = findSpanByName(spans, 'step2_3') 232 | expect(rootSpan.status.code).toBe(SpanStatusCode.ERROR) 233 | expect(job1.status.code).toBe(SpanStatusCode.OK) 234 | expect(job2.status.code).toBe(SpanStatusCode.ERROR) 235 | expect(job2WithWaitingTime.status.code).toBe(SpanStatusCode.ERROR) 236 | expect(job2WaitingTime.status.code).toBe(SpanStatusCode.OK) 237 | expect(step2_1.status.code).toBe(SpanStatusCode.OK) 238 | expect(step2_2.status.code).toBe(SpanStatusCode.OK) 239 | expect(step2_3.status.code).toBe(SpanStatusCode.ERROR) 240 | }) 241 | 242 | test('should verify span hierarchy', async () => { 243 | await createTrace(workflowRunResults) 244 | await forceFlush() 245 | 246 | const spans = exporter.getFinishedSpans() 247 | 248 | if (!workflowRun.name) fail() 249 | const rootSpan = findSpanByName(spans, workflowRun.name) 250 | const child1 = findSpanByName( 251 | spans, 252 | `${workflowRunJobs[0].name} with time of waiting runner` 253 | ) 254 | const child1_1 = findSpanByName( 255 | spans, 256 | `waiting runner for ${workflowRunJobs[0].name}` 257 | ) 258 | const child1_2 = findSpanByName(spans, 'job1') 259 | const child1_2_1 = findSpanByName(spans, 'step1_1') 260 | const child1_2_2 = findSpanByName(spans, 'step1_2') 261 | const child1_2_3 = findSpanByName(spans, 'step1_3') 262 | const child2 = findSpanByName(spans, 'job2 with time of waiting runner') 263 | const child2_1 = findSpanByName(spans, 'waiting runner for job2') 264 | const child2_2 = findSpanByName(spans, 'job2') 265 | const child2_2_1 = findSpanByName(spans, 'step2_1') 266 | const child2_2_2 = findSpanByName(spans, 'step2_2') 267 | const child2_2_3 = findSpanByName(spans, 'step2_3') 268 | 269 | const assertionCount = 270 | [ 271 | assertParentChildRelationship(rootSpan, child1), 272 | assertParentChildRelationship(child1, child1_1), 273 | assertParentChildRelationship(child1, child1_2), 274 | assertParentChildRelationship(child1_2, child1_2_1), 275 | assertParentChildRelationship(child1_2, child1_2_2), 276 | assertParentChildRelationship(child1_2, child1_2_3), 277 | assertParentChildRelationship(rootSpan, child2), 278 | assertParentChildRelationship(child2, child2_1), 279 | assertParentChildRelationship(child2, child2_2), 280 | assertParentChildRelationship(child2_2, child2_2_1), 281 | assertParentChildRelationship(child2_2, child2_2_2), 282 | assertParentChildRelationship(child2_2, child2_2_3) 283 | ].length + 1 // add 1 because rootSpan assertion is not existed. 284 | 285 | expect(spans).toHaveLength(assertionCount) 286 | }) 287 | }) 288 | 289 | const toEpochSec = (date: string): number => { 290 | return Math.floor(new Date(date).getTime() / 1000) 291 | } 292 | 293 | const findSpanByName = (spans: ReadableSpan[], name: string): ReadableSpan => { 294 | const span = spans.find(s => s.name === name) 295 | if (!span) throw new Error(`${name} is not find in spans.`) 296 | return span 297 | } 298 | 299 | const assertParentChildRelationship = ( 300 | parent: ReadableSpan, 301 | child: ReadableSpan 302 | ): void => { 303 | expect(child.parentSpanId).toBe(parent.spanContext().spanId) 304 | expect(child.spanContext().traceId).toBe(parent.spanContext().traceId) 305 | } 306 | -------------------------------------------------------------------------------- /src/traces/create-trace.ts: -------------------------------------------------------------------------------- 1 | import settings from '../settings.js' 2 | import { WorkflowResults } from 'src/github/types.js' 3 | import { 4 | createWorkflowRunTrace, 5 | createWorkflowRunJobSpan, 6 | createWorkflowRunStepSpan 7 | } from './create-spans.js' 8 | import * as opentelemetry from '@opentelemetry/api' 9 | 10 | export const createTrace = async ( 11 | results: WorkflowResults 12 | ): Promise => { 13 | if (!settings.FeatureFlagTrace) { 14 | console.log('trace feature is disabled.') 15 | return 16 | } 17 | const { workflowRun, workflowRunJobs } = results 18 | const rootCtx = createWorkflowRunTrace(workflowRun, workflowRunJobs) 19 | for (const job of workflowRunJobs) { 20 | const jobCtx = createWorkflowRunJobSpan(rootCtx, job) 21 | createWorkflowRunStepSpan(jobCtx, job) 22 | } 23 | 24 | const traceId = opentelemetry.trace.getSpanContext(rootCtx)?.traceId 25 | // TODO: actions output traceID. 26 | console.log(`TraceID: ${traceId}`) 27 | return traceId 28 | } 29 | -------------------------------------------------------------------------------- /src/traces/index.ts: -------------------------------------------------------------------------------- 1 | export * from './create-trace.js' 2 | -------------------------------------------------------------------------------- /src/utils/calc-diff-sec.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, test, expect } from 'vitest' 2 | import { calcDiffSec } from './calc-diff-sec.js' 3 | 4 | describe('calcDiffSec', () => { 5 | test('should calculate the difference in seconds between two dates', () => { 6 | const date1 = new Date('2023-01-01T00:00:00Z') 7 | const date2 = new Date('2023-01-01T00:00:10Z') 8 | 9 | const diff = calcDiffSec(date1, date2) 10 | 11 | expect(diff).toBe(10) 12 | }) 13 | 14 | test('should return a negative value if the first date is earlier', () => { 15 | const date1 = new Date('2023-01-01T00:00:20Z') 16 | const date2 = new Date('2023-01-01T00:00:10Z') 17 | 18 | const diff = calcDiffSec(date1, date2) 19 | 20 | expect(diff).toBe(-10) 21 | }) 22 | }) 23 | -------------------------------------------------------------------------------- /src/utils/calc-diff-sec.ts: -------------------------------------------------------------------------------- 1 | export const calcDiffSec = (startDate: Date, endDate: Date): number => { 2 | const diffMs = endDate.getTime() - startDate.getTime() 3 | return Math.floor(diffMs / 1000) 4 | } 5 | -------------------------------------------------------------------------------- /src/utils/opentelemetry-all-disable.ts: -------------------------------------------------------------------------------- 1 | import * as opentelemetry from '@opentelemetry/api' 2 | 3 | // Disable opentelemetry global components for test initialization. 4 | export const opentelemetryAllDisable = (): void => { 5 | opentelemetry.metrics.disable() 6 | opentelemetry.trace.disable() 7 | opentelemetry.diag.disable() 8 | opentelemetry.context.disable() 9 | opentelemetry.propagation.disable() 10 | } 11 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "compilerOptions": { 4 | "target": "ES2022", 5 | "module": "NodeNext", 6 | "rootDir": "./src", 7 | "moduleResolution": "NodeNext", 8 | "baseUrl": "./", 9 | "sourceMap": true, 10 | "outDir": "./dist", 11 | "noImplicitAny": true, 12 | "esModuleInterop": true, 13 | "forceConsistentCasingInFileNames": true, 14 | "strict": true, 15 | "skipLibCheck": true, 16 | "newLine": "lf" 17 | }, 18 | "exclude": ["./dist", "./node_modules", "./coverage", "vitest.config.ts"] 19 | } 20 | -------------------------------------------------------------------------------- /vitest.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vitest/config' 2 | import { execSync } from 'child_process' 3 | 4 | const isCI = process.env.CI === 'true' 5 | 6 | if (!isCI) { 7 | // Set up GitHub token on local. 8 | setGitHubTokenEnv() 9 | } 10 | 11 | const defaultEnv = { 12 | FEATURE_METRICS: 'true', 13 | FEATURE_TRACE: 'true', 14 | OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: 15 | 'http://prometheus:9090/api/v1/otlp/v1/metrics', 16 | OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: 'http://jaeger:4318/v1/traces', 17 | OTEL_SERVICE_NAME: 'github-actions-opentelemetry', 18 | OWNER: 'paper2', 19 | REPOSITORY: 'github-actions-opentelemetry', 20 | WORKFLOW_RUN_ID: '12246387114' 21 | } 22 | 23 | const CIEnv = { 24 | ...defaultEnv, 25 | OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: undefined, 26 | OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: undefined 27 | } 28 | 29 | export default defineConfig({ 30 | test: { 31 | coverage: { 32 | provider: 'v8', 33 | reporter: ['json-summary', 'text', 'lcov'], 34 | include: ['src'] 35 | }, 36 | env: isCI ? CIEnv : defaultEnv 37 | } 38 | }) 39 | 40 | function setGitHubTokenEnv(): void { 41 | try { 42 | // check gh command is installed 43 | execSync('gh --version', { stdio: 'ignore' }) 44 | 45 | console.log('gh command found. Attempting to retrieve the GitHub token...') 46 | 47 | const token = execSync('gh auth token', { encoding: 'utf-8' }).trim() 48 | if (!token) { 49 | console.warn('Failed to retrieve GitHub token using `gh auth token`.') 50 | return 51 | } 52 | process.env.GITHUB_TOKEN = token 53 | 54 | console.log( 55 | 'GITHUB_TOKEN has been successfully set as an environment variable.' 56 | ) 57 | } catch (error) { 58 | console.warn( 59 | 'The gh command is either unavailable or failed to retrieve the GitHub token.', 60 | 'Please ensure that the GitHub CLI is installed and authenticated ($ gh auth login) because this test interacts with the real GitHub API.', 61 | 'Note that unauthenticated users are subject to strict API rate limits.' 62 | ) 63 | console.warn(error) 64 | } 65 | } 66 | --------------------------------------------------------------------------------