├── .gitattributes ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── baggage.md │ ├── context.md │ ├── logs.md │ ├── metrics.md │ ├── miscellaneous.md │ ├── profiling.md │ ├── protocol.md │ ├── resource.md │ └── trace.md ├── PULL_REQUEST_TEMPLATE.md ├── renovate.json5 ├── scripts │ └── triage-helper │ │ ├── Pipfile │ │ ├── Pipfile.lock │ │ └── app.py └── workflows │ ├── checks.yaml │ ├── fossa.yml │ ├── ossf-scorecard.yml │ ├── stale-pr.yaml │ └── triage-followup.yml ├── .gitignore ├── .lychee.toml ├── .markdownlint.yaml ├── .nvmrc ├── .vscode └── settings.json ├── .yamllint ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── development ├── README.md ├── metrics │ └── config-service.md └── trace │ └── zpages.md ├── internal ├── img │ ├── api-lifecycle.png │ ├── architecture.png │ ├── dynamic-config-service.png │ ├── issue-triage-workflow.png │ ├── library-design.png │ ├── library-full.png │ ├── library-minimal.png │ └── long-term-support.png └── tools │ ├── go.mod │ ├── go.sum │ └── tools.go ├── issue-management.md ├── oteps ├── 0000-template.md ├── 0001-telemetry-without-manual-instrumentation.md ├── 0005-global-init.md ├── 0007-no-out-of-band-reporting.md ├── 0016-named-tracers.md ├── 0035-opentelemetry-protocol.md ├── 0038-version-semantic-attribute.md ├── 0066-separate-context-propagation.md ├── 0083-component.md ├── 0099-otlp-http.md ├── 0110-z-pages.md ├── 0111-auto-resource-detection.md ├── 0119-standard-system-metrics.md ├── 0122-otlp-http-json.md ├── 0143-versioning-and-stability.md ├── 0147-upgrade-procedures.md ├── 0149-exponential-histogram.md ├── 0152-telemetry-schemas.md ├── 0155-external-modules.md ├── 0156-columnar-encoding.md ├── 0178-mapping-to-otlp-anyvalue.md ├── 0182-otlp-remote-parent.md ├── 0199-support-elastic-common-schema-in-opentelemetry.md ├── 0201-scope-attributes.md ├── 0202-events-and-logs-api.md ├── 0225-configuration.md ├── 0227-separate-semantic-conventions.md ├── 0232-maturity-of-otel.md ├── 0243-app-telemetry-schema-vision-roadmap.md ├── 0258-env-context-baggage-carriers.md ├── 0265-event-vision.md ├── 0266-move-oteps-to-spec.md ├── 4430-span-event-api-deprecation-plan.md ├── README.md ├── assets │ ├── 0225-config.yaml │ └── 0225-schema.json ├── entities │ ├── 0256-entities-data-model.md │ └── 0264-resource-and-entities.md ├── experimental │ └── 0121-config-service.md ├── images │ ├── otlp-client-server.png │ ├── otlp-concurrent.png │ ├── otlp-multi-destination.png │ ├── otlp-request-response.png │ └── otlp-sequential.png ├── img │ ├── 0066_context_propagation_details.png │ ├── 0066_context_propagation_overview.png │ ├── 0143_api_lifecycle.png │ ├── 0143_cross_cutting.png │ ├── 0143_long_term.png │ ├── 0152-collector.png │ ├── 0152-otel-schema.png │ ├── 0152-query-translate.png │ ├── 0152-source-and-backend.png │ ├── 0156-arrow-ecosystem.svg │ ├── 0156-resource-events.svg │ ├── 0156_All trials.png │ ├── 0156_Best trials.png │ ├── 0156_OTEL - Arrow IPC.png │ ├── 0156_OTEL - HowToUseArrow.png │ ├── 0156_OTEL - ProtocolSeqDiagram.png │ ├── 0156_OTEL - Row vs Column.png │ ├── 0156_OTEL-Metric-Model.png │ ├── 0156_RecordBatch.png │ ├── 0156_collector_internal_overview.png │ ├── 0156_collector_phase_2.png │ ├── 0156_compression_ratio_summary_multivariate_metrics.png │ ├── 0156_compression_ratio_summary_std_metrics.png │ ├── 0156_logs_bytes.png │ ├── 0156_logs_schema.png │ ├── 0156_logs_step_times.png │ ├── 0156_logs_step_times_phase1.png │ ├── 0156_metrics_schema.png │ ├── 0156_metrics_small_batches.png │ ├── 0156_metrics_step_times.png │ ├── 0156_metrics_step_times_phase1.png │ ├── 0156_multivariate_metrics_bytes.png │ ├── 0156_summary.png │ ├── 0156_summary_time_spent.png │ ├── 0156_traces_schema.png │ ├── 0156_traces_step_times_phase1.png │ ├── 0156_traffic_reduction_use_case.png │ ├── 0156_univariate_metrics_bytes.png │ ├── 0201-scope-multiplexing.png │ ├── 0235-sampling-threshold-calculation.png │ ├── 0243-otel-weaver-component-schema.svg │ ├── 0243-otel-weaver-concepts.svg │ ├── 0243-otel-weaver-dev-strategies.svg │ ├── 0243-otel-weaver-hierarchy.svg │ ├── 0243-otel-weaver-overview.svg │ ├── 0243-otel-weaver-resolved-schema.svg │ ├── 0243-otel-weaver-responsibilities-properties.svg │ ├── 0243-otel-weaver-use-cases.svg │ ├── 0258-env-context-opentofu-trace.png │ ├── 0258-env-context-opentofu-tracing.png │ └── 0258-env-context-parent-child-process.png ├── logs │ ├── 0091-logs-vocabulary.md │ ├── 0092-logs-vision.md │ ├── 0097-log-data-model.md │ ├── 0130-logs-1.0ga-definition.md │ ├── 0150-logging-library-sdk.md │ └── images │ │ └── otep0150 │ │ ├── appender.png │ │ ├── custom-exporter.png │ │ ├── custom-processor.png │ │ └── otlp-file.png ├── metrics │ ├── 0003-measure-metric-type.md │ ├── 0008-metric-observer.md │ ├── 0009-metric-handles.md │ ├── 0010-cumulative-to-counter.md │ ├── 0049-metric-label-set.md │ ├── 0070-metric-bound-instrument.md │ ├── 0072-metric-observer.md │ ├── 0080-remove-metric-gauge.md │ ├── 0088-metric-instrument-optional-refinements.md │ ├── 0090-remove-labelset-from-metrics-api.md │ ├── 0098-metric-instruments-explained.md │ ├── 0108-naming-guidelines.md │ ├── 0113-exemplars.md │ ├── 0126-Configurable-Metric-Aggregations.md │ ├── 0131-otlp-export-behavior.md │ └── 0146-metrics-prototype-scenarios.md ├── profiles │ ├── 0212-profiling-vision.md │ ├── 0239-profiles-data-model.md │ └── images │ │ └── otep0239 │ │ └── profiles-data-model.png └── trace │ ├── 0002-remove-spandata.md │ ├── 0006-sampling.md │ ├── 0059-otlp-trace-data-format.md │ ├── 0136-error_flagging.md │ ├── 0168-sampling-propagation.md │ ├── 0170-sampling-probability.md │ ├── 0173-messaging-semantic-conventions.md │ ├── 0174-http-semantic-conventions.md │ ├── 0205-messaging-semantic-conventions-context-propagation.md │ ├── 0220-messaging-semantic-conventions-span-structure.md │ ├── 0235-sampling-threshold-in-trace-state.md │ └── 0250-Composite_Samplers.md ├── package.json ├── schemas ├── 1.10.0 ├── 1.11.0 ├── 1.12.0 ├── 1.13.0 ├── 1.14.0 ├── 1.15.0 ├── 1.16.0 ├── 1.17.0 ├── 1.18.0 ├── 1.19.0 ├── 1.20.0 ├── 1.21.0 ├── 1.4.0 ├── 1.5.0 ├── 1.6.1 ├── 1.7.0 ├── 1.8.0 └── 1.9.0 ├── spec-compliance-matrix.md ├── specification ├── README.md ├── baggage │ ├── README.md │ └── api.md ├── common │ ├── README.md │ ├── attribute-naming.md │ ├── attribute-requirement-level.md │ ├── attribute-type-mapping.md │ └── mapping-to-non-otlp.md ├── compatibility │ ├── README.md │ ├── logging_trace_context.md │ ├── opencensus.md │ ├── opentracing.md │ └── prometheus_and_openmetrics.md ├── configuration │ ├── README.md │ ├── api.md │ ├── data-model.md │ ├── sdk-environment-variables.md │ └── sdk.md ├── context │ ├── README.md │ ├── api-propagators.md │ └── env-carriers.md ├── document-status.md ├── entities │ ├── README.md │ └── data-model.md ├── error-handling.md ├── glossary.md ├── library-guidelines.md ├── library-layout.md ├── logs │ ├── README.md │ ├── api.md │ ├── data-model-appendix.md │ ├── data-model.md │ ├── img │ │ ├── app-to-file-logs-fb.png │ │ ├── app-to-file-logs-otelcol.png │ │ ├── app-to-otelcol.png │ │ ├── appender.png │ │ ├── application-api-sdk.png │ │ ├── separate-collection.png │ │ └── unified-collection.png │ ├── noop.md │ ├── sdk.md │ ├── sdk_exporters │ │ ├── README.md │ │ └── stdout.md │ └── supplementary-guidelines.md ├── metrics │ ├── README.md │ ├── api.md │ ├── data-model.md │ ├── img │ │ ├── accumulator-detail.png │ │ ├── metrics-sdk.png │ │ ├── model-cumulative-sum.png │ │ ├── model-delta-histogram.png │ │ ├── model-delta-sum.png │ │ ├── model-event-layer.png │ │ ├── model-gauge.png │ │ ├── model-layers-stream.png │ │ └── model-layers.png │ ├── metric-requirement-level.md │ ├── noop.md │ ├── sdk.md │ ├── sdk_exporters │ │ ├── README.md │ │ ├── in-memory.md │ │ ├── otlp.md │ │ ├── prometheus.md │ │ └── stdout.md │ └── supplementary-guidelines.md ├── overview.md ├── performance-benchmark.md ├── performance.md ├── profiles │ ├── README.md │ └── mappings.md ├── protocol │ ├── README.md │ ├── design-goals.md │ ├── exporter.md │ ├── file-exporter.md │ ├── img │ │ ├── otlp-client-server.png │ │ ├── otlp-concurrent.png │ │ ├── otlp-multi-destination.png │ │ ├── otlp-request-response.png │ │ └── otlp-sequential.png │ ├── otlp.md │ └── requirements.md ├── resource │ ├── README.md │ ├── data-model.md │ └── sdk.md ├── schemas │ ├── README.md │ ├── file_format_v1.0.0.md │ ├── file_format_v1.1.0.md │ └── img │ │ ├── 0152-collector.png │ │ ├── 0152-otel-schema.png │ │ ├── 0152-query-translate.png │ │ └── 0152-source-and-backend.png ├── semantic-conventions.md ├── specification-principles.md ├── telemetry-stability.md ├── trace │ ├── README.md │ ├── api.md │ ├── exceptions.md │ ├── sdk.md │ ├── sdk_exporters │ │ ├── README.md │ │ ├── stdout.md │ │ └── zipkin.md │ ├── tracestate-handling.md │ ├── tracestate-probability-sampling-experimental.md │ └── tracestate-probability-sampling.md ├── upgrading.md ├── vendors.md └── versioning-and-stability.md └── supplementary-guidelines └── compatibility └── aws.md /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | 3 | *.cmd text eol=crlf 4 | *.bat text eol=crlf 5 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # 3 | # List of approvers for OpenTelemetry Specifications repository 4 | # 5 | ##################################################### 6 | # 7 | # Learn about membership in OpenTelemetry community: 8 | # https://github.com/open-telemetry/community/blob/main/community-membership.md 9 | # 10 | # 11 | # Learn about CODEOWNERS file format: 12 | # https://help.github.com/en/articles/about-code-owners 13 | # 14 | 15 | # Global owners, will be the owners for everything in the repo. Membership is tracked via https://github.com/open-telemetry/community/blob/main/community-members.md 16 | * @open-telemetry/technical-committee @open-telemetry/spec-sponsors 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/baggage.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Baggage 3 | about: Issues related to the specification/baggage directory 4 | labels: spec:baggage 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/context.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Context 3 | about: Issues related to the specification/context directory 4 | labels: spec:context 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/logs.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Logs 3 | about: Issues related to the specification/logs directory 4 | labels: spec:logs 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/metrics.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Metrics 3 | about: Issues related to the specification/metrics directory 4 | labels: spec:metrics 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/miscellaneous.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Miscellaneous (anything else) 3 | about: Issues that do not fit into other categories 4 | labels: spec:miscellaneous 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/profiling.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Profiling 3 | about: Issues related to the specification/profiling directory 4 | labels: spec:profiling 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/protocol.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Protocol 3 | about: Issues related to the specification/protocol directory 4 | labels: spec:protocol 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/resource.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Resource 3 | about: Issues related to the specification/resource directory 4 | labels: spec:resource 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/trace.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Trace 3 | about: Issues related to the specification/trace directory 4 | labels: spec:trace 5 | --- 6 | 7 | **What are you trying to achieve?** 8 | 9 | What did you expect to see? 10 | 11 | **Additional context.** 12 | 13 | Add any other context about the problem here. If you followed an existing documentation, please share the link to it. 14 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Fixes # 2 | 3 | ## Changes 4 | 5 | Please provide a brief description of the changes here. 6 | 7 | For non-trivial changes, follow the [change proposal process](https://github.com/open-telemetry/opentelemetry-specification/blob/main/CONTRIBUTING.md#proposing-a-change). 8 | 9 | * [ ] Related issues # 10 | * [ ] Related [OTEP(s)](https://github.com/open-telemetry/oteps) # 11 | * [ ] Links to the prototypes (when adding or changing features) 12 | * [ ] [`CHANGELOG.md`](https://github.com/open-telemetry/opentelemetry-specification/blob/main/CHANGELOG.md) file updated for non-trivial changes 13 | * [ ] [`spec-compliance-matrix.md`](https://github.com/open-telemetry/opentelemetry-specification/blob/main/spec-compliance-matrix.md) updated if necessary 14 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:best-practices", 5 | "helpers:pinGitHubActionDigestsToSemver" 6 | ], 7 | "packageRules": [ 8 | { 9 | "groupName": "all patch versions", 10 | "matchUpdateTypes": ["patch"], 11 | "schedule": ["before 8am every weekday"] 12 | }, 13 | { 14 | "matchUpdateTypes": ["minor", "major"], 15 | "schedule": ["before 8am on Monday"] 16 | }, 17 | { 18 | "matchManagers": [ 19 | "github-actions" 20 | ], 21 | "groupName": "github-actions deps" 22 | } 23 | ], 24 | "labels": [ 25 | "dependencies" 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /.github/scripts/triage-helper/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | pygithub = "*" 8 | pytz = "*" 9 | 10 | [dev-packages] 11 | 12 | [requires] 13 | python_version = "3.11" 14 | -------------------------------------------------------------------------------- /.github/workflows/checks.yaml: -------------------------------------------------------------------------------- 1 | name: Checks 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | merge_group: 9 | 10 | jobs: 11 | markdownlint: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: check out code 15 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 16 | 17 | - name: install dependencies 18 | run: npm install 19 | 20 | - name: run markdownlint 21 | run: make markdownlint 22 | 23 | yamllint: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - name: check out code 27 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 28 | 29 | - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 30 | 31 | - name: install yamllint 32 | run: make install-yamllint 33 | 34 | - name: run yamllint 35 | run: yamllint . -f github 36 | 37 | markdown-link-check: 38 | runs-on: ubuntu-latest 39 | steps: 40 | - name: check out code 41 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 42 | 43 | - name: install dependencies 44 | run: npm install 45 | 46 | - name: run markdown-link-check 47 | run: make markdown-link-check 48 | 49 | markdown-toc-check: 50 | runs-on: ubuntu-latest 51 | steps: 52 | - name: check out code 53 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 54 | 55 | - name: install dependencies 56 | run: npm install 57 | 58 | - name: run markdown-toc 59 | run: make markdown-toc 60 | 61 | - name: validate markdown-toc 62 | run: git diff --exit-code ':*.md' || (echo 'Generated markdown Table of Contents is out of date, please run "make markdown-toc" and commit the changes in this PR.' && exit 1) 63 | 64 | misspell: 65 | runs-on: ubuntu-latest 66 | steps: 67 | - name: check out code 68 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 69 | 70 | - name: run misspell 71 | run: make misspell 72 | -------------------------------------------------------------------------------- /.github/workflows/fossa.yml: -------------------------------------------------------------------------------- 1 | name: FOSSA scanning 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | fossa: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 16 | 17 | - uses: fossas/fossa-action@93a52ecf7c3ac7eb40f5de77fd69b1a19524de94 # v1.5.0 18 | with: 19 | api-key: ${{secrets.FOSSA_API_KEY}} 20 | team: OpenTelemetry 21 | -------------------------------------------------------------------------------- /.github/workflows/ossf-scorecard.yml: -------------------------------------------------------------------------------- 1 | name: OSSF Scorecard 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | schedule: 8 | - cron: "2 22 * * 3" # once a week 9 | workflow_dispatch: 10 | 11 | permissions: read-all 12 | 13 | jobs: 14 | analysis: 15 | runs-on: ubuntu-latest 16 | permissions: 17 | # Needed for Code scanning upload 18 | security-events: write 19 | # Needed for GitHub OIDC token if publish_results is true 20 | id-token: write 21 | steps: 22 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 23 | with: 24 | persist-credentials: false 25 | 26 | - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 27 | with: 28 | results_file: results.sarif 29 | results_format: sarif 30 | publish_results: true 31 | 32 | # Upload the results as artifacts (optional). Commenting out will disable 33 | # uploads of run results in SARIF format to the repository Actions tab. 34 | # https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts 35 | - name: "Upload artifact" 36 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 37 | with: 38 | name: SARIF file 39 | path: results.sarif 40 | retention-days: 5 41 | 42 | # Upload the results to GitHub's code scanning dashboard (optional). 43 | # Commenting out will disable upload of results to your repo's Code Scanning dashboard 44 | - name: "Upload to code-scanning" 45 | uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 46 | with: 47 | sarif_file: results.sarif 48 | -------------------------------------------------------------------------------- /.github/workflows/stale-pr.yaml: -------------------------------------------------------------------------------- 1 | name: "Close stale pull requests" 2 | on: 3 | schedule: 4 | - cron: "12 3 * * *" # arbitrary time not to DDOS GitHub 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 11 | with: 12 | repo-token: ${{ secrets.GITHUB_TOKEN }} 13 | stale-pr-message: 'This PR was marked stale due to lack of activity. It will be closed in 7 days.' 14 | close-pr-message: 'Closed as inactive. Feel free to reopen if this PR is still being worked on.' 15 | exempt-pr-labels: 'release:after-ga' 16 | days-before-stale: 7 17 | days-before-close: 7 18 | -------------------------------------------------------------------------------- /.github/workflows/triage-followup.yml: -------------------------------------------------------------------------------- 1 | name: "Mark issues for followup" 2 | on: 3 | schedule: 4 | - cron: "12 4 * * *" 5 | workflow_dispatch: 6 | 7 | jobs: 8 | followup: 9 | runs-on: ubuntu-latest 10 | defaults: 11 | run: 12 | working-directory: ./.github/scripts/triage-helper 13 | steps: 14 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 15 | - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 16 | with: 17 | python-version: 3.11 18 | - name: Install pipenv 19 | run: | 20 | python -m pip install --upgrade pipenv wheel 21 | - name: Install dependencies 22 | run: | 23 | pipenv install --deploy --dev 24 | - name: Run script 25 | run: | 26 | pipenv run python app.py open-telemetry/opentelemetry-specification 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IntelliJ IDEA 2 | .idea 3 | *.iml 4 | 5 | # Eclipse 6 | .classpath 7 | .project 8 | .settings 9 | bin 10 | 11 | # NetBeans 12 | /.nb-gradle 13 | /.nb-gradle-properties 14 | 15 | # OS X 16 | .DS_Store 17 | 18 | # Emacs 19 | *~ 20 | \#*\# 21 | 22 | # Vim 23 | .swp 24 | 25 | # Misspell binary 26 | internal/tools/bin 27 | 28 | # Node.js files for tools (e.g. markdown-toc) 29 | node_modules/ 30 | package-lock.json 31 | 32 | # Visual Studio Code 33 | .vscode 34 | 35 | # Visual Studio 36 | .vs -------------------------------------------------------------------------------- /.lychee.toml: -------------------------------------------------------------------------------- 1 | include_fragments = true 2 | 3 | # accepting 429s for now until we implement a better way to deal with GitHub rate limiting 4 | # see https://github.com/open-telemetry/semantic-conventions/issues/2214 5 | accept = ["200..=299", "403", "429"] 6 | 7 | exclude = [ 8 | # excluding links to pull requests and issues is done for performance 9 | "^https://github.com/open-telemetry/opentelemetry-specification/(issues|pull)/\\d+$", 10 | # TODO (trask) look into this 11 | "^https://docs.google.com/document/d/1d0afxe3J6bQT-I6UbRXeIYNcTIyBQv4axfjKF4yvAPA/edit" 12 | ] 13 | 14 | # better to be safe and avoid failures 15 | max_retries = 6 16 | -------------------------------------------------------------------------------- /.markdownlint.yaml: -------------------------------------------------------------------------------- 1 | # See https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md 2 | # and https://github.com/DavidAnson/markdownlint/blob/main/README.md 3 | 4 | # Default state for all rules 5 | default: true 6 | 7 | ul-style: false 8 | line-length: false 9 | no-duplicate-header: 10 | siblings_only: true 11 | ol-prefix: 12 | style: ordered 13 | no-inline-html: false 14 | fenced-code-language: false 15 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | lts/* 2 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rewrap.wrappingColumn": 80, 3 | "editor.rulers": [80], 4 | "markdownlint.config": { 5 | "MD004": false, 6 | "MD013": false, 7 | "MD024": {"allow_different_nesting": true}, 8 | "MD029": {"style": "ordered"}, 9 | "MD033": false, 10 | "MD040": false, 11 | }, 12 | "yaml.schemas": { 13 | "https://raw.githubusercontent.com/open-telemetry/build-tools/v0.17.0/semantic-conventions/semconv.schema.json": [ 14 | "semantic_conventions/**/*.yaml" 15 | ] 16 | }, 17 | "json.schemaDownload.enable": true 18 | } 19 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | document-start: disable 5 | octal-values: enable 6 | truthy: 7 | allowed-values: ['true', 'false', 'on'] # 'on' for GH action trigger 8 | line-length: 9 | max: 200 10 | indentation: 11 | check-multi-line-strings: false 12 | indent-sequences: consistent 13 | brackets: 14 | max-spaces-inside: 1 15 | max-spaces-inside-empty: 0 16 | comments: 17 | min-spaces-from-content: 1 18 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # All documents to be used in spell check. 2 | ALL_DOCS := $(shell find . -type f -name '*.md' -not -path './.github/*' -not -path './node_modules/*' -not -path '*semantic_conventions*' | sort) 3 | PWD := $(shell pwd) 4 | 5 | TOOLS_DIR := ./internal/tools 6 | MISSPELL_BINARY=bin/misspell 7 | MISSPELL = $(TOOLS_DIR)/$(MISSPELL_BINARY) 8 | 9 | # see https://github.com/open-telemetry/build-tools/releases for semconvgen updates 10 | # Keep links in semantic_conventions/README.md and .vscode/settings.json in sync! 11 | SEMCONVGEN_VERSION=0.17.0 12 | 13 | # TODO: add `yamllint` step to `all` after making sure it works on Mac. 14 | .PHONY: all 15 | all: install-tools markdownlint markdown-link-check misspell 16 | 17 | $(MISSPELL): 18 | cd $(TOOLS_DIR) && go build -o $(MISSPELL_BINARY) github.com/client9/misspell/cmd/misspell 19 | 20 | .PHONY: misspell 21 | misspell: $(MISSPELL) 22 | $(MISSPELL) -error $(ALL_DOCS) 23 | 24 | .PHONY: misspell-correction 25 | misspell-correction: $(MISSPELL) 26 | $(MISSPELL) -w $(ALL_DOCS) 27 | 28 | .PHONY: markdown-link-check 29 | markdown-link-check: 30 | docker run --rm \ 31 | --mount 'type=bind,source=$(PWD),target=/home/repo' \ 32 | lycheeverse/lychee:sha-2aa22f8@sha256:2e3786630482c41f9f2dd081e06d7da1c36d66996e8cf6573409b8bc418d48c4 \ 33 | --config home/repo/.lychee.toml \ 34 | --root-dir /home/repo \ 35 | -v \ 36 | home/repo 37 | 38 | # This target runs markdown-toc on all files that contain 39 | # a comment . 40 | # 41 | # The recommended way to prepate a .md file for markdown-toc is 42 | # to add these comments: 43 | # 44 | # 45 | # 46 | .PHONY: markdown-toc 47 | markdown-toc: 48 | @if ! npm ls markdown-toc; then npm install; fi 49 | @for f in $(ALL_DOCS); do \ 50 | if grep -q '' $$f; then \ 51 | echo markdown-toc: processing $$f; \ 52 | npx --no -- markdown-toc --no-first-h1 --no-stripHeadingTags -i $$f || exit 1; \ 53 | else \ 54 | echo markdown-toc: no TOC markers, skipping $$f; \ 55 | fi; \ 56 | done 57 | 58 | .PHONY: markdownlint 59 | markdownlint: 60 | @if ! npm ls markdownlint; then npm install; fi 61 | @for f in $(ALL_DOCS); do \ 62 | echo $$f; \ 63 | npx --no -p markdownlint-cli markdownlint -c .markdownlint.yaml $$f \ 64 | || exit 1; \ 65 | done 66 | 67 | .PHONY: install-yamllint 68 | install-yamllint: 69 | # Using a venv is recommended 70 | pip install -U yamllint~=1.26.1 71 | 72 | .PHONY: yamllint 73 | yamllint: 74 | yamllint . 75 | 76 | # Run all checks in order of speed / likely failure. 77 | .PHONY: check 78 | check: misspell markdownlint markdown-link-check 79 | @echo "All checks complete" 80 | 81 | # Attempt to fix issues / regenerate tables. 82 | .PHONY: fix 83 | fix: misspell-correction 84 | @echo "All autofixes complete" 85 | 86 | .PHONY: install-tools 87 | install-tools: $(MISSPELL) 88 | npm install 89 | @echo "All tools installed" 90 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenTelemetry Specification 2 | 3 | [![Slack](https://img.shields.io/badge/slack-@cncf/otel--specification-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01N7PP1THC) 4 | [![Release](https://img.shields.io/github/v/release/open-telemetry/opentelemetry-specification)](https://github.com/open-telemetry/opentelemetry-specification/releases/) 5 | 6 | ![OpenTelemetry Logo](https://opentelemetry.io/img/logos/opentelemetry-horizontal-color.png) 7 | 8 | The **[OpenTelemetry][] specification** describes the cross-language 9 | requirements and expectations for all OpenTelemetry implementations. 10 | 11 | - The [latest release][] is hosted at 12 | [opentelemetry.io/docs/specs/otel][] 13 | - Markdown sources are under [specification](./specification/README.md) 14 | 15 | ## Change / contribution process 16 | 17 | For details, see [CONTRIBUTING.md](CONTRIBUTING.md), in particular read 18 | [Proposing a change](CONTRIBUTING.md#proposing-a-change) before submitting a PR. 19 | 20 | ## Questions 21 | 22 | Questions that need additional attention can be brought to the regular 23 | specifications meeting. EU and US timezone friendly meeting is held every 24 | Tuesday at 8 AM Pacific time. Meeting notes are held in the [Google 25 | doc](https://docs.google.com/document/d/1pdvPeKjA8v8w_fGKAN68JjWBmVJtPCpqdi9IZrd6eEo). 26 | APAC timezone friendly meetings are held on request. See 27 | [OpenTelemetry calendar](https://github.com/open-telemetry/community#calendar). 28 | 29 | Escalations to technical committee may be made over the 30 | [e-mail](https://github.com/open-telemetry/community#tc-technical-committee). 31 | Technical committee holds regular meetings, notes are held 32 | [here](https://docs.google.com/document/d/1hOHPCu5TGenqTeWPB9qQB_qd33uITZBcvK1FnWxYJAw/edit?usp=sharing). 33 | 34 | ## Specification compliance matrix by language 35 | 36 | See [Compliance of Implementations with 37 | Specification](./spec-compliance-matrix.md). 38 | 39 | ## Project Timeline 40 | 41 | The current project status as well as information on notable past releases is found at 42 | [the OpenTelemetry project page](https://opentelemetry.io/status/). 43 | 44 | Information about current work and future development plans is found at the 45 | [specification development milestones](https://github.com/open-telemetry/opentelemetry-specification/milestones). 46 | 47 | ## Versioning the Specification 48 | 49 | Changes to the [specification](./specification/overview.md) are versioned according to [Semantic Versioning 2.0](https://semver.org/spec/v2.0.0.html) and described in [CHANGELOG.md](CHANGELOG.md). Layout changes are not versioned. Specific implementations of the specification should specify which version they implement. 50 | 51 | Changes to the change process itself are not currently versioned but may be independently versioned in the future. 52 | 53 | ## License 54 | 55 | By contributing to OpenTelemetry Specification repository, you agree that your contributions will be licensed under its [Apache 2.0 License](LICENSE). 56 | 57 | [OpenTelemetry]: https://opentelemetry.io 58 | [latest release]: https://github.com/open-telemetry/opentelemetry-specification/releases/latest 59 | [opentelemetry.io/docs/specs/otel]: https://opentelemetry.io/docs/specs/otel/ 60 | -------------------------------------------------------------------------------- /development/README.md: -------------------------------------------------------------------------------- 1 | # Features in Development 2 | 3 | This folder is used for features in 4 | [Development](../specification/document-status.md). `Development` status is for components 5 | that are on track to become part of the specification, but that require a faster cadence 6 | of merges and collaboration. 7 | 8 | Features in Development must be: 9 | 10 | - Implementable as a plugin to OpenTelemetry components (API, SDK, collector, etc.). 11 | - Be in active development or testing. 12 | - Approved as a general direction via OTEP process. 13 | 14 | To avoid any confusion, all files in this directory must have a note about its Development status. 15 | 16 | Development status precedes the alpha version (see 17 | [OTEP 0232](../oteps/0232-maturity-of-otel.md#explanation)). 18 | All changes in the `development` folder go through the regular review process. Changes are allowed to be merged faster as completeness of a solution is not a requirement. Approval means that proposed changes are OK for experimentation. 19 | 20 | When the feature or parts of it are developed far enough to declare them as an alpha version of a main project and move out of the Development status, it must go through a **new** OTEP PR and it must be expected that design and APIs will be changed. In fact, the same people who approved the experiment may likely be the most critical reviewers. It demonstrates an interest and involvement, not critique. 21 | -------------------------------------------------------------------------------- /internal/img/api-lifecycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/internal/img/api-lifecycle.png -------------------------------------------------------------------------------- /internal/img/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/internal/img/architecture.png -------------------------------------------------------------------------------- /internal/img/dynamic-config-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/internal/img/dynamic-config-service.png -------------------------------------------------------------------------------- /internal/img/issue-triage-workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/internal/img/issue-triage-workflow.png -------------------------------------------------------------------------------- /internal/img/library-design.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/internal/img/library-design.png -------------------------------------------------------------------------------- /internal/img/library-full.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/internal/img/library-full.png -------------------------------------------------------------------------------- /internal/img/library-minimal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/internal/img/library-minimal.png -------------------------------------------------------------------------------- /internal/img/long-term-support.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/internal/img/long-term-support.png -------------------------------------------------------------------------------- /internal/tools/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/open-telemetry/opentelemetry-specification/internal/tools 2 | 3 | go 1.12 4 | 5 | require github.com/client9/misspell v0.3.4 6 | -------------------------------------------------------------------------------- /internal/tools/go.sum: -------------------------------------------------------------------------------- 1 | github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= 2 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 3 | -------------------------------------------------------------------------------- /internal/tools/tools.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019, OpenTelemetry Authors 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | // +build tools 17 | 18 | package tools 19 | 20 | // This file follows the recommendation at 21 | // https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module 22 | // on how to pin tooling dependencies to a go.mod file. 23 | // This ensures that all systems use the same version of tools in addition to regular dependencies. 24 | 25 | import ( 26 | _ "github.com/client9/misspell/cmd/misspell" 27 | ) 28 | -------------------------------------------------------------------------------- /oteps/0000-template.md: -------------------------------------------------------------------------------- 1 | # Replace this with your awesome OTEP title 2 | 3 | Short (one sentence) summary, e.g., something that would be appropriate for a [CHANGELOG](https://keepachangelog.com/) or release notes. 4 | 5 | ## Motivation 6 | 7 | Why should we make this change? What new value would it bring? What use cases does it enable? 8 | 9 | ## Explanation 10 | 11 | Explain the proposed change as though it was already implemented and you were explaining it to a user. Depending on which layer the proposal addresses, the "user" may vary, or there may even be multiple. 12 | 13 | We encourage you to use examples, diagrams, or whatever else makes the most sense! 14 | 15 | ## Internal details 16 | 17 | From a technical perspective, how do you propose accomplishing the proposal? In particular, please explain: 18 | 19 | * How the change would impact and interact with existing functionality 20 | * Likely error modes (and how to handle them) 21 | * Corner cases (and how to handle them) 22 | 23 | While you do not need to prescribe a particular implementation - indeed, OTEPs should be about **behaviour**, not implementation! - it may be useful to provide at least one suggestion as to how the proposal *could* be implemented. This helps reassure reviewers that implementation is at least possible, and often helps them inspire them to think more deeply about trade-offs, alternatives, etc. 24 | 25 | ## Trade-offs and mitigations 26 | 27 | What are some (known!) drawbacks? What are some ways that they might be mitigated? 28 | 29 | Note that mitigations do not need to be complete *solutions*, and that they do not need to be accomplished directly through your proposal. A suggested mitigation may even warrant its own OTEP! 30 | 31 | ## Prior art and alternatives 32 | 33 | What are some prior and/or alternative approaches? For instance, is there a corresponding feature in OpenTracing or OpenCensus? What are some ideas that you have rejected? 34 | 35 | ## Open questions 36 | 37 | What are some questions that you know aren't resolved yet by the OTEP? These may be questions that could be answered through further discussion, implementation experiments, or anything else that the future may bring. 38 | 39 | ## Prototypes 40 | 41 | Link to any prototypes or proof-of-concept implementations that you have created. 42 | This may include code, design documents, or anything else that demonstrates the 43 | feasibility of your proposal. 44 | 45 | Depending on the scope of the change, prototyping in multiple programming 46 | languages might be required. 47 | 48 | ## Future possibilities 49 | 50 | What are some future changes that this proposal would enable? 51 | -------------------------------------------------------------------------------- /oteps/0005-global-init.md: -------------------------------------------------------------------------------- 1 | # Global SDK initialization 2 | 3 | **Status**: proposed 4 | 5 | Specify the behavior of OpenTelemetry APIs and implementations at startup. 6 | 7 | ## Motivation 8 | 9 | OpenTelemetry is designed with a separation between the API and the 10 | SDK which implements it, allowing an application to configure and bind 11 | any compatible SDK at runtime. OpenTelemetry is designed to support 12 | "zero touch" instrumentation for third party libraries through the use 13 | of a global instance. 14 | 15 | In many programming environments, it is possible for libraries of code 16 | to auto-initialize, allowing them to begin operation concurrently with 17 | the main program, e.g., while initializing static program state. This 18 | presents a set of opposing requirements: (1) the API supports a 19 | configurable SDK; (2) third party libraries may use OpenTelemetry 20 | without configuration. 21 | 22 | ## Explanation 23 | 24 | There are several acceptable ways to address this situation. The 25 | feasibility of each approach varies by language. The implementation 26 | must select one of the following strategies: 27 | 28 | ### Service provider mechanism 29 | 30 | Where the language provides a commonly accepted way to inject SDK 31 | components, it should be preferred. The Java SPI supports loading and 32 | configuring the global SDK before it is first used, and because of 33 | this property the service provider mechanism case leaves little else 34 | to specify. 35 | 36 | ### Explicit initializer 37 | 38 | When it is not possible to ensure the SDK is installed and configured 39 | before the API is first used, loading the SDK is handed off to the 40 | user "at the right time", as stated in [Ruby issue 41 | 19](https://github.com/open-telemetry/opentelemetry-ruby/issues/19). 42 | In this case, a number of requirements must be specified, as discussed 43 | next. 44 | 45 | ## Requirements: Explicit initializer 46 | 47 | OpenTelemetry specifies that the default implementation is 48 | non-operational (i.e., a "no-op"), requiring that API method calls 49 | result in effectively zero instrumentation overhead. We expect third 50 | party libraries to use the global SDK before it is installed, which is 51 | addressed in a requirement stated below. 52 | 53 | The explicit initializer method should take independent `Tracer` and 54 | `Meter` objects (e.g., `opentelemetry.Init(Tracer, Meter)`). The SDK 55 | may be installed no more than once. After the first SDK installed, 56 | subsequent calls to the explicit initializer shall log console 57 | warnings. 58 | 59 | In common language, uses of the global SDK instance (i.e., the Tracer 60 | and Meter) must "begin working" once the SDK is installed, with the 61 | following stipulations: 62 | 63 | ### Tracer 64 | 65 | There may be loss of spans at startup. 66 | 67 | Spans that are started before the SDK is installed are not recovered, 68 | they continue as No-op spans. 69 | 70 | ### Meter 71 | 72 | There may be loss of metrics at startup. 73 | 74 | Metric SubMeasure objects (i.e., metrics w/ predefined labels) 75 | initialized before the SDK is installed will redirect to the global 76 | SDK after it is installed. 77 | 78 | ### Concrete types 79 | 80 | Keys, tags, attributes, labels, resources, span context, and 81 | distributed context are specified as pure API objects, therefore do 82 | not depend on the SDK being installed. 83 | 84 | ## Trade-offs and mitigations 85 | 86 | ### Testing support 87 | 88 | Testing should be performed without depending on the global SDK. 89 | 90 | ### Synchronization 91 | 92 | Since the global Tracer and Meter objects are required to begin 93 | working once the SDK is installed, there is some implied 94 | synchronization overhead at startup, overhead we expect to fall after 95 | the SDK is installed. We recommend explicitly installing a No-op SDK 96 | to fully disable instrumentation, as this approach will have a lower 97 | overhead than leaving the OpenTelemetry library uninitialized. 98 | 99 | ## Prior art and alternatives 100 | 101 | As an example that does not qualify as "commonly accepted", see [Go 102 | issue 52](https://github.com/open-telemetry/opentelemetry-go/issues/52) 103 | which demonstrates using the Go `plugin` package to load a 104 | configurable SDK prior to first use. 105 | 106 | ## Open questions 107 | 108 | What other options should be passed to the explicit global initializer? 109 | 110 | Is there a public test for "is the SDK installed; is it a no-op"? 111 | -------------------------------------------------------------------------------- /oteps/0007-no-out-of-band-reporting.md: -------------------------------------------------------------------------------- 1 | # Remove support to report out-of-band telemetry from the API 2 | 3 | ## TL;DR 4 | 5 | This section tries to summarize all the changes proposed in this RFC: 6 | 7 | 1. Remove API requirement to support reporting out-of-band telemetry. 8 | 2. Move Resource to SDK, API will always report telemetry for the current application so no need to 9 | allow configuring the Resource in any instrumentation. 10 | 3. New APIs should be designed without this requirement. 11 | 12 | ## Motivation 13 | 14 | Currently the API package is designed with a goal to support reporting out-of-band telemetry, but 15 | this requirements forces a lot of trade-offs and unnecessary complicated APIs (e.g. `Resource` must 16 | be exposed in the API package to allow telemetry to be associated with the source of the telemetry). 17 | 18 | Reporting out-of-band telemetry is a required for the OpenTelemetry ecosystem, but this can be done 19 | via a few different other options that does not require to use the API package: 20 | 21 | * The OpenTelemetry Service, users can write a simple [receiver][otelsvc-receiver] that parses and 22 | produces the OpenTelemetry data. 23 | * Using the SDK's exporter framework, users can write directly OpenTelemetry data. 24 | 25 | ## Internal details 26 | 27 | Here is a list of decisions and trade-offs related to supporting out-of-band reporting: 28 | 29 | 1. Add `Resource` concept into the API. 30 | * Example in the create metric we need to allow users to specify the resource, see 31 | [here][create-metric]. The developer that writes the instrumentation has no knowledge about where 32 | the monitored resource is deployed so there is no way to configure the right resource. 33 | 2. [RFC](./trace/0002-remove-spandata.md) removes support to report SpanData. 34 | * This will require that the trace API has to support all the possible fields to be configured 35 | via the API, for example need to allow users to set a pre-generated `SpanId` that can be avoided 36 | if we do not support out-of-band reporting. 37 | 3. Sampling logic for out-of-band spans will get very complicated because it will be incorrect to 38 | sample these data. 39 | 4. Associating the source of the telemetry with the telemetry data gets very simple. All data 40 | produced by one instance of the API implementation belongs to only one Application. 41 | 42 | This can be rephrased as "one API implementation instance" can report telemetry about only the 43 | current Application. 44 | 45 | ### Resource changes 46 | 47 | This RFC does not suggest to remove the `Resource` concept or to modify any API in this interface, 48 | it only suggests to move this concept to the SDK level. 49 | 50 | Every implementation of the API (SDK in OpenTelemetry case) instance will have one `Resource` that 51 | describes the running Application. There may be cases where in the same binary there are multiple 52 | Application running (e.g. Java application server), every application will have it's own SDK 53 | instance configured with it's own `Resource`. 54 | 55 | ## Related Issues 56 | 57 | * [opentelemetry-specification/62](https://github.com/open-telemetry/opentelemetry-specification/issues/62) 58 | * [opentelemetry-specification/61](https://github.com/open-telemetry/opentelemetry-specification/issues/61) 59 | 60 | [otelsvc-receiver]: https://github.com/open-telemetry/opentelemetry-service#config-receivers 61 | [create-metric]: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/metrics/api.md#create-metric 62 | -------------------------------------------------------------------------------- /oteps/0038-version-semantic-attribute.md: -------------------------------------------------------------------------------- 1 | # Version Semantic Attribute 2 | 3 | Add a standard `version` semantic attribute. 4 | 5 | ## Motivation 6 | 7 | When creating trace data or metrics, it can be extremely useful to know the specific version that 8 | emitted the iota of span or measurement being viewed. However, versions can mean different things 9 | to different systems and users. In addition, downstream analysis systems may wish to expose 10 | functionality related to the type of a version (such as detecting when versions are newer or older). 11 | To support this, we should standardize a `version` attribute with optional hints as to the type of the 12 | version. 13 | 14 | ## Explanation 15 | 16 | A `version` is a semantic attribute that can be applied to other resources, such as `Service`, 17 | `Component`, `Library`, `Device`, `Platform`, etc. A `version` attribute is optional, but recommended. 18 | The definition of a `version` is a key-value attribute pair of `string` to `string`, with naming schemas 19 | available to hint at the type of a version, such as the following: 20 | 21 | `version=semver:1.2.3` (a semantic version) 22 | `version=git:8ae73a` (a git sha hash) 23 | `version=0.0.4.2.20190921` (a untyped version) 24 | 25 | ## Internal details 26 | 27 | Since this is just an attribute pair, no special handling is required, although SDKs may provide helper methods 28 | to construct schema-appropriate values. 29 | 30 | ## Prior art and alternatives 31 | 32 | Tagging service resources with their version is generally suggested by analysis tools -- see [JAEGER_TAGS](https://www.jaegertracing.io/docs/1.8/client-features/) for an example -- but lacks standardization. 33 | -------------------------------------------------------------------------------- /oteps/0110-z-pages.md: -------------------------------------------------------------------------------- 1 | # zPages: general direction (#110) 2 | 3 | Make zPages a standard OpenTelemetry component. 4 | 5 | ## Motivation 6 | 7 | Self-introspection debug pages or zPages are in-process web pages that display collected data from the process they are attached to. They are used to provide in-process diagnostics without the need of any backend to examine traces or metrics. Various implementations of zPages are widely used in many environments. The standard extensible implementation of zPages in OpenTelemetry will benefit everybody. 8 | 9 | ## Explanation 10 | 11 | This OTEP is a request to get a general approval for zPages development as an experimental feature [open-telemetry/opentelemetry-specification#62](https://github.com/open-telemetry/opentelemetry-specification/pull/632). See [opencensus.io/zpages](https://opencensus.io/zpages/) for the overview of zPages. 12 | 13 | ## Internal details 14 | 15 | Implementation of zPages includes multiple components - data collection (sampling, filtering, configuration), storage and aggregation, and a framework to expose this data. 16 | 17 | This is a request for a general direction approval. There are a few principles for the development: 18 | 19 | 1. zPages MUST NOT be hardcoded into OpenTelemetry SDK. 20 | 2. OpenTelemetry implementation of zPages MUST be split as two separate components - one for data, another for rendering. So that, for example, data providers could be also integrated into other rendering frameworks. 21 | 3. zPages SHOULD be built as a framework that provides a way to extend information exposed from the process. Ideally all the way to replace OpenTelemetry SDK with alternative source of information. 22 | 23 | ## Trade-offs and mitigations 24 | 25 | We may discover that implementation of zPages as a vendor-specific or user-specific plugins may be preferable. Based on initial investigation, extensible standard implementation will benefit everybody. 26 | 27 | ## Prior art and alternatives 28 | 29 | [opencensus.io/zpages](https://opencensus.io/zpages/) 30 | 31 | ## Open questions 32 | 33 | N/A 34 | 35 | ## Future possibilities 36 | 37 | N/A 38 | -------------------------------------------------------------------------------- /oteps/0265-event-vision.md: -------------------------------------------------------------------------------- 1 | # Event Basics 2 | 3 | ## Motivation 4 | 5 | The introduction of Events has been contentious, so we want to document and agree on a few basics. 6 | 7 | ### What are OpenTelemetry Events? 8 | 9 | OpenTelemetry Events are a type of OpenTelemetry Log that requires an event name and follows a specific structure implied by that event name. 10 | 11 | They are a core concept in OpenTelemetry Semantic Conventions. 12 | 13 | ### OTLP 14 | 15 | Since OpenTelemetry Events are a type of OpenTelemetry Log, they share the same OTLP log data structure and pipeline. 16 | 17 | ### API 18 | 19 | OpenTelemetry SHOULD provide a (user-facing) Logs API that includes the capability to emit OpenTelemetry Events. 20 | 21 | ### Interoperability with other logging libraries 22 | 23 | OpenTelemetry SHOULD provide a way to send OpenTelemetry Logs from the OpenTelemetry Logs API to other logging libraries (e.g., Log4j). 24 | This allows users to integrate OpenTelemetry Logs into an existing (non-OpenTelemetry) log stream. 25 | 26 | OpenTelemetry SHOULD provide a way to bypass the OpenTelemetry Logs API entirely and emit OpenTelemetry Logs (including Events) 27 | directly via existing language-specific logging libraries, if that library has the capability to do so. 28 | 29 | OpenTelemetry will recommend that 30 | [instrumentation libraries](../specification/glossary.md#instrumentation-library) 31 | use the OpenTelemetry Logs API to emit OpenTelemetry Events rather than using other logging libraries to emit OpenTelemetry Events. This recommendation aims to provide users with a simple and consistent 32 | onboarding experience that avoids mixing approaches. 33 | 34 | OpenTelemetry will also recommend that application developers use the OpenTelemetry Logs API to emit OpenTelemetry Events instead of using another 35 | logging library, as this helps prevent accidentally emitting logs that lack an event name or are unstructured. 36 | 37 | Recommending the OpenTelemetry Logs API for emitting OpenTelemetry Events, rather than using other logging libraries, contributes to a clearer overall 38 | OpenTelemetry API story. This ensures a unified approach with first-class user-facing APIs for traces, metrics, and events, 39 | all suitable for direct use in native instrumentation. 40 | 41 | ### Relationship to Span Events 42 | 43 | Events are intended to replace Span Events in the long term. 44 | Span Events will be deprecated to signal that users should prefer Events. 45 | 46 | See [OTEP 4430: Span Event API deprecation plan](4430-span-event-api-deprecation-plan.md) 47 | for more details. 48 | 49 | ### SDK 50 | 51 | This refers to the existing OpenTelemetry Logs SDK. 52 | 53 | ## Alternatives 54 | 55 | Many alternatives were considered over the past 2+ years. 56 | 57 | These alternatives primarily boil down to differences in naming (e.g. whether to even use the word Event) 58 | and organization (e.g. whether Event API should be something separate from Logs API). 59 | 60 | The state of this OTEP represents the option that we think will be the least confusing to the most number of users across the wide range of different language ecosystems that are supported. 61 | 62 | ## Open questions 63 | 64 | * How to support routing logs from the Logs API to a language-specific logging library 65 | while simultaneously routing logs from the language-specific logging library to an OpenTelemetry Logging Exporter? 66 | * How do log bodies interoperate with other logging libraries? 67 | OpenTelemetry Logs have two places to put structure (attributes and body), while often logging libraries only have one layer of structure, 68 | which makes it non-obvious how to do a two-way mapping between them in this case. 69 | * How do event bodies interoperate with Span Events? 70 | * Should the Logs API have an `Enabled` function based on severity level and event name? 71 | * What kind of capabilities should the OpenTelemetry Logs API have now that it is user-facing? 72 | (Keeping in mind the bundle size constraints of browsers and possibly other client environments.) 73 | * What kind of ergonomic improvements make sense now that the OpenTelemetry Logs API is user-facing? 74 | (Keeping in mind the bundle size constraints of browsers and possibly other client environments.) 75 | * How do OpenTelemetry Events relate to raw metric events? 76 | (e.g. [opentelemetry-specification/617](https://github.com/open-telemetry/opentelemetry-specification/issues/617)). 77 | * How do OpenTelemetry Events relate to raw span events? 78 | (e.g. a streaming SDK). 79 | * Should event name be captured as an attribute or as a top-level field? 80 | * How will Event / Span Event interoperability work in the presence of sampling (e.g. since Span Events are sampled along with Spans)? 81 | -------------------------------------------------------------------------------- /oteps/0266-move-oteps-to-spec.md: -------------------------------------------------------------------------------- 1 | # Move OTEPS to the Specification repository 2 | 3 | Let's move OTEP documentation and PRs back into the [Specification](https://github.com/open-telemetry/opentelemetry-specification) repository. 4 | 5 | ## Motivation 6 | 7 | Moving OTEPs back into the specification solves two main issues: 8 | 9 | - Maintaining its tooling infrastructure (currently woefully out of date) 10 | - Bringing it into the existing triage and voting process currently within the 11 | Specification. 12 | 13 | ## Explanation 14 | 15 | Originally, OTEPs were kept as a separate repository to keep disjoint/disruptive designs as a separate repository. There are a few differences between a normal PR and an OTEP: 16 | 17 | - OTEPs are expected to be directional and subject to change when actually entered into the specification. 18 | - OTEPs require more approvals than specification PRs 19 | - OTEPs have different PR worklfows (whether due to accidental omission or conscious decision), e.g. staleness checks, linting. 20 | 21 | As OpenTelemetry is stabilizing, the need for OTEPs to live outside the specification is growing less, and we face challenges like: 22 | 23 | - Keeping OTEP tooling up to date 24 | - Advertising the repositories existence 25 | - New contributors to OpenTelemetry often can't find recorded decision that exist in OTEPs. 26 | - Getting reviews from folks used to checking the Specification repository, but not the less-frequently-worked-on OTEP repository. 27 | 28 | To solve these, let's move OTEPs into a directory within the [specification repository](https://github.com/open-telemetry/opentelemetry-specification). 29 | We would also update all tooling and expected reviews to match existing standards for OTEPs. Given the maintainers of OTEPs are the same as 30 | maintainers of the specification, this should not change the bar for acceptance. 31 | 32 | ## Internal details 33 | 34 | The following changes would occur: 35 | 36 | - The following files would be moved to the specification repo: 37 | - `text/` directory -> `oteps/text/` 38 | - `0000-template.md` -> `oteps/0000-template.md` 39 | - Update the specification `Makefile` to include linting, spell checking, link checking and TOC-ing the oteps directory. 40 | - A one-time cleanup of OTEP markdown upon import to the specification repository. 41 | - Close existing OTEP PRs and ask folks to reopen against the specification repository. 42 | - New labels within the specification repository to tag OTEPs, including automation to set these on PR open. 43 | - Updating contributing guidelines to include a section about OTEPs. 44 | - Add `oteps/README.md` file outlining that OTEPS are not normative and part of enhancement proposal process. 45 | - Add disclaimer to the header of every OTEP that the contents are not normative and part of the enhancement proposal process. 46 | 47 | ## Trade-offs and mitigations 48 | 49 | Moving into the specification repository DOES mean that we would have a directory with a different quality bar and, somewhat, process than the rest of the repository. 50 | This can be mitigated through the use of clear, vibrant labels for OTEPS, and updating process guidelines for the specification repository to retain the important 51 | aspects of the current OTEP status. 52 | 53 | ## Prior art and alternatives 54 | 55 | OTEPs were originally based on common enhancement proposal processes in other ecosystems, where enhancements live outside core repositories and follow a more rigorous criteria and evaluation. We are finding this 56 | problematic for OpenTelemetry for reasons discussed above. Additionally, unlike many other ecosystems where enhancement/design is kept separate from core code, OpenTelemetry *already* keeps its design separate 57 | form core code via the Specification vs. implementation repositories. Unlike these other OSS projects, our Specification generally requires rigorous discussion, design and prototyping prior to acceptance. Even 58 | after acceptance into the specification, work is still required for improvements to roll out to the ecosystem. Effectively: The OpenTelemetry specification has no such thing as a "small" change: There are only medium changes that appear small, but would be enhancements in other proejcts or large changes that require an OTEP. 59 | 60 | ## Open questions 61 | 62 | What are the important portions of the OTEP process to bring over? Have we missed anything in this description? 63 | 64 | ## Future possibilities 65 | 66 | In the future, we could figure out how to make OTEPs more searchable, discoverable and highlighted within the opentelemetry.io website. 67 | 68 | Additionally, we can look at extending staleness deadlines for OTEP labeled PRs. 69 | -------------------------------------------------------------------------------- /oteps/images/otlp-client-server.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/images/otlp-client-server.png -------------------------------------------------------------------------------- /oteps/images/otlp-concurrent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/images/otlp-concurrent.png -------------------------------------------------------------------------------- /oteps/images/otlp-multi-destination.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/images/otlp-multi-destination.png -------------------------------------------------------------------------------- /oteps/images/otlp-request-response.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/images/otlp-request-response.png -------------------------------------------------------------------------------- /oteps/images/otlp-sequential.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/images/otlp-sequential.png -------------------------------------------------------------------------------- /oteps/img/0066_context_propagation_details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0066_context_propagation_details.png -------------------------------------------------------------------------------- /oteps/img/0066_context_propagation_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0066_context_propagation_overview.png -------------------------------------------------------------------------------- /oteps/img/0143_api_lifecycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0143_api_lifecycle.png -------------------------------------------------------------------------------- /oteps/img/0143_cross_cutting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0143_cross_cutting.png -------------------------------------------------------------------------------- /oteps/img/0143_long_term.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0143_long_term.png -------------------------------------------------------------------------------- /oteps/img/0152-collector.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0152-collector.png -------------------------------------------------------------------------------- /oteps/img/0152-otel-schema.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0152-otel-schema.png -------------------------------------------------------------------------------- /oteps/img/0152-query-translate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0152-query-translate.png -------------------------------------------------------------------------------- /oteps/img/0152-source-and-backend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0152-source-and-backend.png -------------------------------------------------------------------------------- /oteps/img/0156_All trials.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_All trials.png -------------------------------------------------------------------------------- /oteps/img/0156_Best trials.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_Best trials.png -------------------------------------------------------------------------------- /oteps/img/0156_OTEL - Arrow IPC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_OTEL - Arrow IPC.png -------------------------------------------------------------------------------- /oteps/img/0156_OTEL - HowToUseArrow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_OTEL - HowToUseArrow.png -------------------------------------------------------------------------------- /oteps/img/0156_OTEL - ProtocolSeqDiagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_OTEL - ProtocolSeqDiagram.png -------------------------------------------------------------------------------- /oteps/img/0156_OTEL - Row vs Column.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_OTEL - Row vs Column.png -------------------------------------------------------------------------------- /oteps/img/0156_OTEL-Metric-Model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_OTEL-Metric-Model.png -------------------------------------------------------------------------------- /oteps/img/0156_RecordBatch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_RecordBatch.png -------------------------------------------------------------------------------- /oteps/img/0156_collector_internal_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_collector_internal_overview.png -------------------------------------------------------------------------------- /oteps/img/0156_collector_phase_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_collector_phase_2.png -------------------------------------------------------------------------------- /oteps/img/0156_compression_ratio_summary_multivariate_metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_compression_ratio_summary_multivariate_metrics.png -------------------------------------------------------------------------------- /oteps/img/0156_compression_ratio_summary_std_metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_compression_ratio_summary_std_metrics.png -------------------------------------------------------------------------------- /oteps/img/0156_logs_bytes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_logs_bytes.png -------------------------------------------------------------------------------- /oteps/img/0156_logs_schema.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_logs_schema.png -------------------------------------------------------------------------------- /oteps/img/0156_logs_step_times.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_logs_step_times.png -------------------------------------------------------------------------------- /oteps/img/0156_logs_step_times_phase1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_logs_step_times_phase1.png -------------------------------------------------------------------------------- /oteps/img/0156_metrics_schema.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_metrics_schema.png -------------------------------------------------------------------------------- /oteps/img/0156_metrics_small_batches.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_metrics_small_batches.png -------------------------------------------------------------------------------- /oteps/img/0156_metrics_step_times.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_metrics_step_times.png -------------------------------------------------------------------------------- /oteps/img/0156_metrics_step_times_phase1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_metrics_step_times_phase1.png -------------------------------------------------------------------------------- /oteps/img/0156_multivariate_metrics_bytes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_multivariate_metrics_bytes.png -------------------------------------------------------------------------------- /oteps/img/0156_summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_summary.png -------------------------------------------------------------------------------- /oteps/img/0156_summary_time_spent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_summary_time_spent.png -------------------------------------------------------------------------------- /oteps/img/0156_traces_schema.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_traces_schema.png -------------------------------------------------------------------------------- /oteps/img/0156_traces_step_times_phase1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_traces_step_times_phase1.png -------------------------------------------------------------------------------- /oteps/img/0156_traffic_reduction_use_case.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_traffic_reduction_use_case.png -------------------------------------------------------------------------------- /oteps/img/0156_univariate_metrics_bytes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0156_univariate_metrics_bytes.png -------------------------------------------------------------------------------- /oteps/img/0201-scope-multiplexing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0201-scope-multiplexing.png -------------------------------------------------------------------------------- /oteps/img/0235-sampling-threshold-calculation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0235-sampling-threshold-calculation.png -------------------------------------------------------------------------------- /oteps/img/0258-env-context-opentofu-trace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0258-env-context-opentofu-trace.png -------------------------------------------------------------------------------- /oteps/img/0258-env-context-opentofu-tracing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0258-env-context-opentofu-tracing.png -------------------------------------------------------------------------------- /oteps/img/0258-env-context-parent-child-process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/img/0258-env-context-parent-child-process.png -------------------------------------------------------------------------------- /oteps/logs/0091-logs-vocabulary.md: -------------------------------------------------------------------------------- 1 | # Logs: Vocabulary 2 | 3 | This documents defines the vocabulary for logs to be used across OpenTelemetry project. 4 | 5 | ## Motivation 6 | 7 | We need a common language and common understanding of terms that we use to 8 | avoid the chaos experienced by the builders of the Tower of Babel. 9 | 10 | ## Proposal 11 | 12 | OpenTelemetry specification already contains a [vocabulary](../../specification/overview.md) 13 | for Traces, Metrics and other relevant concepts. 14 | 15 | This proposal is to add the following concepts to the vocabulary. 16 | 17 | ### Log Record 18 | 19 | A recording of an event. Typically the record includes a timestamp indicating 20 | when the event happened as well as other data that describes what happened, 21 | where it happened, etc. 22 | 23 | Also known as Log Entry. 24 | 25 | ### Log 26 | 27 | Sometimes used to refer to a collection of Log Records. May be ambiguous, since 28 | people also sometimes use `Log` to refer to a single `Log Record`, thus this 29 | term should be used carefully and in the context where ambiguity is possible 30 | additional qualifiers should be used (e.g. `Log Record`). 31 | 32 | ### Embedded Log 33 | 34 | `Log Records` embedded inside a [Span](../../specification/trace/api.md#span) 35 | object, in the [Events](../../specification/trace/api.md#add-events) list. 36 | 37 | ### Standalone Log 38 | 39 | `Log Records` that are not embedded inside a `Span` and are recorded elsewhere. 40 | 41 | ### Log Attributes 42 | 43 | Key/value pairs contained in a `Log Record`. 44 | 45 | ### Structured Logs 46 | 47 | Logs that are recorded in a format which has a well-defined structure that allows 48 | to differentiate between different elements of a Log Record (e.g. the Timestamp, 49 | the Attributes, etc). The _Syslog protocol_ ([RFC 5425](https://tools.ietf.org/html/rfc5424)), 50 | for example, defines a `structured-data` format. 51 | 52 | ### Flat File Logs 53 | 54 | Logs recorded in text files, often one line per log record (although multiline 55 | records are possible too). There is no common industry agreement whether 56 | logs written to text files in more structured formats (e.g. JSON files) 57 | are considered Flat File Logs or not. Where such distinction is important it is 58 | recommended to call it out specifically. 59 | -------------------------------------------------------------------------------- /oteps/logs/0130-logs-1.0ga-definition.md: -------------------------------------------------------------------------------- 1 | # Logs GA Scope 2 | 3 | This document defines what's in scope for OpenTelemetry Logs General 4 | Availability release. 5 | 6 | ## Motivation 7 | 8 | Clearly defined scope is important to align all logs contributors and to make 9 | sure we know what target we are working towards. Note that some of the listed 10 | items are already fully or partially implemented but are still listed for 11 | completeness. 12 | 13 | General Availability of OpenTelemetry Logs is expected after OpenTelemetry 1.0 14 | GA (which will only include traces and metrics). 15 | 16 | ## Logs Roadmap Items 17 | 18 | ### Specification and SDK 19 | 20 | - Write guidelines and specification for logging libraries to support 21 | OpenTelemetry-compliant logs. 22 | 23 | - Implement OpenTelemetry logs SDK for Java that support trace context 24 | extraction from the current execution context. 25 | 26 | - Show full OpenTelemetry-compliant implementation of an "addon" to one of the 27 | popular logging libraries for Java (e.g. Log4J, SLF4J, etc). Use OpenTelemetry 28 | SDK to automatically include extracted trace context in the logs and support 29 | exporting via OTLP. 30 | 31 | - Implement an example Java application that shows how to emit correlated traces 32 | and logs. Use the supported popular logging library together with 33 | OpenTelemetry SDK, and export logs using OTLP. 34 | 35 | - Add Logs support to OTLP specification. 36 | 37 | ### Collector 38 | 39 | - Implement receiver and exporter for OTLP logs in Collector. 40 | 41 | - Implement "array" value type for in-memory data structures (it is already part 42 | of OTLP but is not supported by the Collector yet). 43 | 44 | - Implement log exporters in Collector for a few vendor formats from 45 | participating vendors. 46 | 47 | - Implement Fluent Forward protocol receiver to receive logs from 48 | FluentBit/FluentD. 49 | 50 | - Add support for log data type to the following processors: `resource`, 51 | `batch`, `attributes`, `k8s_tagger`, `resourcedetection`. 52 | 53 | - Add end-to-end performance tests for log forwarding (similar to existing trace 54 | and metric tests) at least for OTLP and Fluent Forward protocols. 55 | 56 | - Test operation of Collector together with at least one other logging agent 57 | (e.g. FluentBit), allowing to read file logs as described here. Publish test 58 | results (including performance). 59 | 60 | - Implement an example that shows how to use OpenTelemetry Collector to collect 61 | correlated logs, traces and metrics from a distributed microservices 62 | application (preferably running in a cloud-native control plane like 63 | Kubernetes) 64 | -------------------------------------------------------------------------------- /oteps/logs/images/otep0150/appender.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/logs/images/otep0150/appender.png -------------------------------------------------------------------------------- /oteps/logs/images/otep0150/custom-exporter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/logs/images/otep0150/custom-exporter.png -------------------------------------------------------------------------------- /oteps/logs/images/otep0150/custom-processor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/logs/images/otep0150/custom-processor.png -------------------------------------------------------------------------------- /oteps/logs/images/otep0150/otlp-file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/logs/images/otep0150/otlp-file.png -------------------------------------------------------------------------------- /oteps/metrics/0008-metric-observer.md: -------------------------------------------------------------------------------- 1 | # Metrics observer specification 2 | 3 | **Status:** Superceded entirely by [0072-metric-observer](0072-metric-observer.md) 4 | 5 | Propose metric `Observer` callbacks for context-free access to current Gauge instrument values on demand. 6 | 7 | ## Motivation 8 | 9 | The current specification describes metric callbacks as an alternate means of generating metrics for the SDK, allowing the application to generate metrics only as often as desired by the monitoring infrastructure. This proposal limits callback metrics to only support gauge `Observer` callbacks, arguably the only important case. 10 | 11 | ## Explanation 12 | 13 | Gauge metric instruments are typically used to reflect properties that are pre-computed by a system, where the measurement interval is arbitrary. When selecting a gauge, as opposed to the cumulative or measure kind of metric instrument, there could be significant computational cost in computing the current value. When this is the case, it is understandable that we are interested in computing them on demand to minimize cost. 14 | 15 | Why are gauges different than cumulative and measure instruments? Measure instruments, by definition, carry information in the individual event, so the callback cannot optimize any better than the SDK can in this case. Cumulative instruments are more commonly used to record amounts that are readily available, such as the number of bytes read or written, and while this may not always be true, recall the special case of `NonDescending` gauges. 16 | 17 | `NonDescending` gauges owe their existence to this case, that we support non-negative cumulative metrics which, being expensive to compute, are recommended for use with `Observer` callbacks. For example, if it requires a system call or more to compute a non-descending sum, such as the _cpu seconds_ consumed by the process, we should declare a non-descending gauge `Observer` for the instrument, instead of a cumulative. This allows the cost of the metric to be reduced according to the desired monitoring frequency. 18 | 19 | One significant difference between gauges that are explicitly `Set()`, as compared with observer callbacks, is that `Set()` happens inside a context, whereas the observer callback does not. 20 | 21 | ## Details 22 | 23 | Observer callbacks are only supported for gauge metric instruments. Use the language-specific constructor for an Observer gauge (e.g., `metric.NewFloat64Observer()`). Observer gauges support the `NonDescending` option. 24 | 25 | Callbacks return a map from _label set_ to gauge value. Gauges declared with observer callbacks cannot also be `Set`. 26 | 27 | Callbacks should avoid blocking. The implementation may be required to cancel computation if the callback blocks for too long. 28 | 29 | Callbacks must not be called synchronously with application code via any OpenTelemetry API. Implementations that cannot provide this guarantee should prefer not to implement observer callbacks. 30 | 31 | Callbacks may be called synchronously in the SDK on behalf of an exporter. 32 | 33 | Callbacks should avoid calling OpenTelemetry APIs, but we recognize this may be impossible to enforce. 34 | 35 | ## Trade-offs and mitigations 36 | 37 | Callbacks are a relatively dangerous programming pattern, which may require care to avoid deadlocks between the application and the API or the SDK. Implementations may consider preventing deadlocks through runtime callstack introspection, to make these interfaces absolutely safe. 38 | -------------------------------------------------------------------------------- /oteps/metrics/0009-metric-handles.md: -------------------------------------------------------------------------------- 1 | # Metric Handle API specification 2 | 3 | Specify the behavior of the Metrics API "Handle" type, for efficient repeated-use of metric instruments. 4 | 5 | ## Motivation 6 | 7 | The specification currently names this concept "TimeSeries", the type returned by `GetOrCreateTimeseries`, which supports binding a metric to a pre-defined set of labels for repeated use. This proposal renames these "Handle" and `GetHandle`, respectively, and adds further detail to the API specification for handles. 8 | 9 | ## Explanation 10 | 11 | The `TimeSeries` is referred to as a "Handle", as the former name suggests an implementation, not an API concept. "Handle", we feel, is more descriptive of the intended use. Likewise with `GetOrCreateTimeSeries` to `GetHandle` and `GetDefaultTimeSeries` to `GetDefaultHandle`, these names suggest an implementation and not the intended use. 12 | 13 | Applications are encouraged to re-use metric handles for efficiency. 14 | 15 | Handles are useful to reduce the cost of repeatedly recording a metric instrument (cumulative, gauge, or measure) with a pre-defined set of label values. 16 | 17 | `GetHandle` gets a new handle given a [`LabelSet`](./0049-metric-label-set.md). 18 | 19 | As a language-optional feature, the API may provide an _ordered_ form of the API for supplying labels in known order. The ordered label-value API is provided as a (language-optional) potential optimization that facilitates a simple lookup for the SDK. In this ordered-value form, the API is permitted to throw an exception or return an error when there is a mismatch in the arguments to `GetHandle`, although languages without strong type-checking may wish to omit this feature. When label values are accepted in any order, SDKs may be forced to canonicalize the labels in order to find an existing metrics handle, but they must not throw exceptions. 20 | 21 | `GetHandle` supports arbitrary label sets. There is no requirement that the LabelSet used to construct a handle covers the recommended aggregation keys of a metric instrument. 22 | 23 | ## Internal details 24 | 25 | Because each of the metric kinds supports a different operation (`Add()`, `Set()`, and `Record()`), there are logically distinct kinds of handle. The names of the distinct handle types should reflect their instrument kind. 26 | 27 | The names (`Handle`, `GetHandle`, ...) are just language-neutral recommendations. Language APIs should feel free to choose type and method names with attention to the language's style. 28 | 29 | ### Metric `Attachment` support 30 | 31 | OpenCensus has the notion of a metric attachment, allowing the application to include additional information associated with the event, for sampling purposes. Any label value not used for aggregation may be used as a sample "attachment", including the OpenTelemetry span context, to associate sample trace context with exported metrics. 32 | 33 | ## Issues addressed 34 | 35 | [Agreements reached on handles and naming in the working group convened on 8/21/2019](https://docs.google.com/document/d/1d0afxe3J6bQT-I6UbRXeIYNcTIyBQv4axfjKF4yvAPA/edit#). 36 | 37 | [`record` should take a generic `Attachment` class instead of having tracing dependency](https://github.com/open-telemetry/opentelemetry-specification/issues/144) 38 | -------------------------------------------------------------------------------- /oteps/metrics/0010-cumulative-to-counter.md: -------------------------------------------------------------------------------- 1 | # Rename "Cumulative" to "Counter" in the metrics API 2 | 3 | Prefer the name "Counter" as opposed to "Cumulative". 4 | 5 | ## Motivation 6 | 7 | Informally speaking, it seems that OpenTelemetry community members would prefer to call Cumulative metric instruments "Counters". During conversation (e.g., in the 8/21 working session), this has become clear. 8 | 9 | Counter is a noun, like the other kinds Gauge and Measure. Cumulative is an adjective, so while "Cumulative instrument" makes sense, it describes a "Counter". 10 | 11 | ## Explanation 12 | 13 | This will eliminate the cognitive cost of mapping "cumulative" to "counter" when speaking about these APIs. 14 | 15 | This is the term used for a cumulative metric instrument, for example, in [Statsd](https://github.com/statsd/statsd/blob/master/docs/metric_types.md) and [Prometheus](https://prometheus.io/docs/concepts/metric_types/#counter). 16 | 17 | However, we have identified important sub-cases of Counter that are treated as follows. Counters have an option: 18 | 19 | - True-cumulative Counter: By default, `Add()` arguments must be >= 0. 20 | - Bi-directional Counter: As an option, `Add()` arguments must be +/-0. 21 | 22 | Gauges are sometimes used to monitoring non-descending quantities (e.g., cpu usage), as an option: 23 | 24 | - Bi-directional Gauge: By default, `Set()` arguments may by +/- 0. 25 | - Uni-directional Gauge: As an option, `Set()` arguments must change by >= 0. 26 | 27 | Uni-directional Gauge instruments are typically used in metric `Observer` callbacks where the observed value is cumulative. 28 | 29 | ## Trade-offs and mitigations 30 | 31 | Other ways to describe the distinction between true-cumulative and bi-directional Counters are: 32 | 33 | - Additive (vs. Cumulative) 34 | - GaugeDelta (vs. Gauge) 35 | 36 | It is possible that reducing all of these cases into the broad term "Counter" creates more confusion than it addresses. 37 | 38 | ## Internal details 39 | 40 | Simply replace every "Cumulative" with "Counter", then edit for grammar. 41 | 42 | ## Prior art and alternatives 43 | 44 | In a survey of existing metrics libraries, Counter is far more common. 45 | -------------------------------------------------------------------------------- /oteps/metrics/0049-metric-label-set.md: -------------------------------------------------------------------------------- 1 | # Metric `LabelSet` specification 2 | 3 | Introduce a first-class `LabelSet` API type as a handle on a pre-defined set of labels for the Metrics API. 4 | 5 | ## Motivation 6 | 7 | Labels are the term for key-value pairs used in the OpenTelemetry Metrics API. Treatment of labels in the Metrics API is especially important for performance across a variety of export strategies. 8 | 9 | Label serialization is often one of the most expensive tasks when processing metric events. Creating a `LabelSet` once and re-using it many times can greatly reduce the overall cost of processing many events. 10 | 11 | The Metrics API supports three calling conventions: the Handle convention, the Direct convention, and the Batch convention. Each of these conventions stands to benefit when a `LabelSet` is re-used, as it allows the SDK to process the label set once instead of once per call. Whenever more than one handle will be created with the same labels, more than one instrument will be called directly with the same labels, or more than one batch of metric events will be recorded with the same labels, re-using a `LabelSet` makes it possible for the SDK to improve performance. 12 | 13 | ## Explanation 14 | 15 | Metric instrument APIs which presently take labels in the form `{ Key: Value, ... }` will be updated to take an explicit `LabelSet`. The `Meter.Labels()` API method supports getting a `LabelSet` from the API, allowing the programmer to acquire a pre-defined label set. Here are several examples of `LabelSet` re-use. Assume we have two instruments: 16 | 17 | ```golang 18 | var ( 19 | cumulative = metric.NewFloat64Cumulative("my_counter") 20 | gauge = metric.NewFloat64Gauge("my_gauge") 21 | ) 22 | ``` 23 | 24 | Use a `LabelSet` to construct multiple Handles: 25 | 26 | ```golang 27 | var ( 28 | labels = meter.Labels({ "required_key1": value1, "required_key2": value2 }) 29 | chandle = cumulative.GetHandle(labels) 30 | ghandle = gauge.GetHandle(labels) 31 | ) 32 | for ... { 33 | // ... 34 | chandle.Add(...) 35 | ghandle.Set(...) 36 | } 37 | ``` 38 | 39 | Use a `LabelSet` to make multiple Direct calls: 40 | 41 | ```golang 42 | labels := meter.Labels({ "required_key1": value1, "required_key2": value2 }) 43 | cumulative.Add(quantity, labels) 44 | gauge.Set(quantity, labels) 45 | ``` 46 | 47 | Of course, repeated calls to `Meter.RecordBatch()` could re-use a `LabelSet` as well. 48 | 49 | ### Ordered `LabelSet` option 50 | 51 | As a language-level decision, APIs may support _ordered_ LabelSet 52 | construction, in which a pre-defined set of ordered label keys is 53 | defined such that values can be supplied in order. This allows a 54 | faster code path to construct the `LabelSet`. For example, 55 | 56 | ```golang 57 | 58 | var rpcLabelKeys = meter.OrderedLabelKeys("a", "b", "c") 59 | 60 | for _, input := range stream { 61 | labels := rpcLabelKeys.Values(1, 2, 3) // a=1, b=2, c=3 62 | 63 | // ... 64 | } 65 | ``` 66 | 67 | This is specified as a language-optional feature because its safety, 68 | and therefore its value as an input for monitoring, depends on the 69 | availability of type-checking in the source language. Passing 70 | unordered labels (i.e., a list of bound keys and values) to 71 | `Meter.Labels(...)` is considered the safer alternative. 72 | 73 | ### Interaction with "Named" Meters 74 | 75 | LabelSet values may be used with any named Meter originating from the 76 | same Meter provider. That is, LabelSets acquired through a named 77 | Meter may be used by any Meter from the same Meter provider. 78 | 79 | ## Internal details 80 | 81 | Metric SDKs that do not or cannot take advantage of the LabelSet optimizations are not especially burdened by having to support these APIs. It is trivial to supply an implementation of `LabelSet` that simply stores a list of labels. This may not be acceptable in performance-critical applications, but this is the common case in many metrics and diagnostics APIs today. 82 | 83 | ## Trade-offs and mitigations 84 | 85 | In languages where overloading is a standard convenience, the metrics API may elect to offer alternate forms that elide the call to `Meter.Labels()`, for example: 86 | 87 | ``` 88 | instrument.GetHandle({ Key: Value, ... }) 89 | ``` 90 | 91 | as opposed to this: 92 | 93 | ``` 94 | instrument.GetHandle(meter.Labels({ Key: Value, ... })) 95 | ``` 96 | 97 | A key distinction between `LabelSet` and similar concepts in existing metrics libraries is that it is a _write-only_ structure. `LabelSet` allows the developer to input metric labels without being able to read them back. This avoids forcing the SDK to retain a reference to memory that is not required. 98 | 99 | ## Prior art and alternatives 100 | 101 | Some existing metrics APIs support this concept. For example, see `Scope` in the [Tally metric API for Go](https://godoc.org/github.com/uber-go/tally#Scope). 102 | 103 | Some libraries take `LabelSet` one step further. In the future, we may add to the the `LabelSet` API a method to extend the label set with additional labels. For example: 104 | 105 | ``` 106 | serviceLabels := meter.Labels({ "k1": "v1", "k2": "v2" }) 107 | // ... 108 | requestLabels := serviceLabels.With({ "k3": "v3", "k4": "v4" }) 109 | ``` 110 | -------------------------------------------------------------------------------- /oteps/metrics/0070-metric-bound-instrument.md: -------------------------------------------------------------------------------- 1 | # Rename metric instrument Handles to "Bound Instruments" 2 | 3 | The OpenTelemetry metrics API specification refers to a concept known 4 | as ["metric handles"](0009-metric-handles.md), which is a metric 5 | instrument bound to a `LabelSet`. This OTEP proposes to change that 6 | term to "bound instruments" to avoid the more-generic term "handle". 7 | 8 | The corresponding method to create a bound instrument will be renamed 9 | "Bind" as opposed to "GetHandle". 10 | 11 | ## Motivation 12 | 13 | The term "Handle" is widely seen as too general for its purpose in the 14 | metrics API. Rather than re-use a widely-used noun for this concept, 15 | we instead will re-use the metric "instrument" noun and apply an 16 | adjective, "bound" to convey that it has been bound to a `LabelSet`. 17 | 18 | ## Explanation 19 | 20 | "Handle" has been confusing from the start. However it was preceded by 21 | other potentially confusing terms (e.g., "TimeSeries", "Entry"). The 22 | term "Bound instrument" was initially suggested 23 | [here](https://github.com/open-telemetry/opentelemetry-specification/pull/299#discussion_r334211154) 24 | and widely accepted. 25 | 26 | ## Internal details 27 | 28 | This is a simple renaming. All uses of "handle" will be replaced by 29 | "bound instrument" in the specification. All uses of the `GetHandle` 30 | method become `Bind`. 31 | 32 | Note that the phrase "bound instrument" may not appear directly in the 33 | user-facing API, nor is it required to, whereas the method `GetHandle` 34 | is a specified method on metric instruments. 35 | 36 | The newly-named `Bind()` method returns a bound instrument type. The 37 | name of the returned type may simply take the name of its instrument 38 | with the prefix `Bound`. For example, an `Int64Counter` instrument's 39 | `Bind()` method should return a `BoundInt64Counter` type. 40 | 41 | As usual, the spelling and capitalization of these names are just 42 | recommendations, individual language committees should select names 43 | that are well suited to their language and existing API style. 44 | 45 | ## Trade-offs and mitigations 46 | 47 | This is widely seen as an improvement, based on informal discussions. 48 | 49 | ## Prior art and alternatives 50 | 51 | The OpenCensus libraries named this concept "Entries", with a 52 | `GetEntry` method, as they are entries some kind of map. 53 | 54 | The earliest appearance in OpenTelemetry renamed these "TimeSeries", 55 | hoping to improve matters, but "TimeSeries" more commonly refers to 56 | the output the bound instruments, after aggregation. "Handle" was 57 | decided upon in an August 2019 working group on metrics. 58 | 59 | The Prometheus library refers to unbound instruments as "Vectors" and 60 | supports a variety of "With" methods to bind labels with the vector to 61 | yield a bound instrument. 62 | -------------------------------------------------------------------------------- /oteps/metrics/0090-remove-labelset-from-metrics-api.md: -------------------------------------------------------------------------------- 1 | # Remove the LabelSet object from the metrics API 2 | 3 | The proposal is to remove the current [`LabelSet`](./0049-metric-label-set.md) 4 | API and change all the current APIs that accept LabelSet to accept directly the 5 | labels (list of key-values, or a map of key-values based on the language 6 | capabilities). 7 | 8 | ## Motivation 9 | 10 | The [`LabelSet`](./0049-metric-label-set.md) API type was added to serve as a 11 | handle on a pre-defined set of labels for the Metrics API. 12 | 13 | This API represents an optimization for the current metrics API that allows the 14 | implementations to avoid encoding and checking labels restrictions multiple 15 | times for the same set of lables. Usages and implementations of the metrics API 16 | have shown that LabelSet adds extra unnecessary complexity with little benefit. 17 | 18 | Some users prefer to avoid this performance optimization for the benefit of a 19 | cleaner code and OpenTelemetry needs to address them as well, so this means that 20 | it is important for OpenTelemetry to support record APIs where users can pass 21 | directly the labels. 22 | 23 | OpenTelementry can always add this optimization later (backwards compatible 24 | change) if we determine that it is very important to have. 25 | 26 | ## Trade-offs and mitigations 27 | 28 | In case where performance matters, here are the ways to achieve almost the same performance: 29 | 30 | - In the current API if a `LabelSet` is reused across multiple individual 31 | records across different instruments (one record to every instrument) then user 32 | can use the batch recording mechanism, so internally the SDK can do the labels 33 | encoding once. 34 | - In the current API if a `LabelSet` is used multiple times to record to the 35 | same instrument then user can use instrument bindings. 36 | - In the current API if a `LabelSet` is used across multiple batch recordings, 37 | and this pattern becomes very important, then OpenTelemetry can add support for 38 | batches to accept bindings. 39 | 40 | To ensure that the current batch recording can help in scenarios where there are 41 | some local conditions that control which measurements to be recorded, the 42 | recommendation is to have the `newBatchRecorder` return an interface called 43 | `BatchRecorder` that can be used to add `measurement` and when all entries are 44 | added call `record` to record all the `measurements`. 45 | 46 | ## Prior art and alternatives 47 | 48 | Almost all the existing Metric libraries do not require users to create 49 | something like LabelSet when recording a value. 50 | -------------------------------------------------------------------------------- /oteps/metrics/0108-naming-guidelines.md: -------------------------------------------------------------------------------- 1 | # Metric instrument naming guidelines 2 | 3 | ## Purpose 4 | 5 | Names and labels for metric instruments are primarily how humans interact with metric data -- users rely on these names to build dashboards and perform analysis. The names and hierarchical structure need to be understandable and discoverable during routine exploration -- and this becomes critical during incidents. 6 | 7 | To ensure these goals and consistency in future metric naming standards, this outlines a meta-standard for these names. 8 | 9 | ## Guidelines 10 | 11 | Metric names and labels exist within a single universe and a single hierarchy. Metric names and labels MUST be considered within the universe of all existing metric names. When defining new metric names and labels, consider the prior art of existing standard metrics and metrics from frameworks/libraries. 12 | 13 | Associated metrics SHOULD be nested together in a hierarchy based on their usage. Define a top-level hierarchy for common metric categories: for OS metrics, like CPU and network; for app runtimes, like GC internals. Libraries and frameworks should nest their metrics into a hierarchy as well. This aids in discovery and adhoc comparison. This allows a user to find similar metrics given a certain metric. 14 | 15 | The hierarchical structure of metrics defines the namespacing. Supporting OpenTelemetry artifacts define the metric structures and hierarchies for some categories of metrics, and these can assist decisions when creating future metrics. 16 | 17 | Common labels SHOULD be consistently named. This aids in discoverability and disambiguates similar labels to metric names. 18 | 19 | ["As a rule of thumb, **aggregations** over all the dimensions of a given metric **SHOULD** be meaningful,"](https://prometheus.io/docs/practices/naming/#metric-names) as Prometheus recommends. 20 | 21 | Semantic ambiguity SHOULD be avoided. Use prefixed metric names in cases where similar metrics have significantly different implementations across the breadth of all existing metrics. For example, every garbage collected runtime has slightly different strategies and measures. Using a single set of metric names for GC, not divided by the runtime, could create dissimilar comparisons and confusion for end users. (For example, prefer `runtime.java.gc*` over `runtime.gc.*`.) Measures of many operating system metrics are similar. 22 | 23 | For conventional metrics or metrics that have their units included in OpenTelemetry metadata (eg `metric.WithUnit` in Go), SHOULD NOT include the units in the metric name. Units may be included when it provides additional meaning to the metric name. Metrics MUST, above all, be understandable and usable. 24 | -------------------------------------------------------------------------------- /oteps/metrics/0131-otlp-export-behavior.md: -------------------------------------------------------------------------------- 1 | # OTLP Exporters Configurable Export Behavior 2 | 3 | Add support for configurable export behavior in OTLP exporters. 4 | 5 | The expected behavior required are 1) exporting cumulative values since start time by default, and 2) exporting delta values per collection interval when configured. 6 | 7 | ## Motivation 8 | 9 | 1. **Export behavior should be configurable**: Metric backends such as Prometheus, Cortex and other backends supporting Prometheus time-series that ingest data from the Prometheus remote write API, require cumulative values for cumulative metrics and additive metrics, per collection interval. In order to export metrics generated by the SDK using the Collector, incoming values from the SDK should be cumulative values. Note than in comparison, backends like Statsd expect delta values for each collection interval. To support different backend requirements, OTLP metric export behavior needs to be configurable, with cumulative values exported as a default. See discussion in [#731](https://github.com/open-telemetry/opentelemetry-specification/issues/731). 10 | 2. **Cumulative export should be the default behavior since it is more reliable**: Cumulative export also addresses the problem of missing delta values for an UpDownCounter. The final consumer of the UpDownCounter metrics is almost always interested in the cumulative value. If the Metrics SDK exports deltas and allows the consumer aggregate cumulative values, then any deltas lost in-transit will lead to inaccurate final values. This loss may impact the condition on which an alert is fired or not. On the other hand, exporting cumulative values guarantees only resolution is lost, but the value received by the final consumer will be correct eventually. 11 | 1. *Note:* The [Metrics SIG](https://docs.google.com/document/d/1LfDVyBJlIewwm3a0JtDtEjkusZjzQE3IAix8b0Fxy3Y/edit#heading=h.fxqkpi2ya3br) *July 23 and July 30 meetings concluded that cumulative export behavior is more reliable.* For example, Bogdan Drutu in [#725](https://github.com/open-telemetry/opentelemetry-specification/issues/725) notes “When exporting delta values of an UpdownCounter instrument, the export pipeline becomes a single point of failure for the alerts, any dropped "delta" will influence the "current" value of the metric in an undefined way." 12 | 13 | ## Explanation 14 | 15 | In order to support Prometheus backends using cumulative values as well as other backends that use delta values, the SDK needs to be configurable and support an OTLP exporter which handles both cumulative values by default and delta values for export. The implication is that the OTLP metric protocol should support both cumulative and delta reporting strategies. 16 | 17 | Users should be allowed to declare an environment variable or configuration field that determines this setting for OTLP exporters. 18 | 19 | ## Internal details 20 | 21 | OTLP exporters can report using the behavior it needs to the Metrics SDK. The SDK can merge the previous state of metrics with current value and return the appropriate values to the exporter. 22 | 23 | Configurable export behavior is already coded in the Metrics Processor component in the [Go SDK](https://github.com/open-telemetry/opentelemetry-go/pull/840). However, this functionality is hardcoded today and would need to rewritten to handle user-defined configuration. See the OTLP metrics definition in [PR #193](https://github.com/open-telemetry/opentelemetry-proto/pull/193), which support both export behaviors. 24 | 25 | ## Trade-offs and mitigations 26 | 27 | High memory usage: To support cumulative exports, the SDK needs to maintain state for each cumulative metrics. This means users with high-cardinality metrics can experience high memory usage. 28 | 29 | The high-cardinality metrics use case could be addressed by adding the metrics aggregation processor in the Collector. This would enable the Collector, when configured as an Agent, to support converting delta OTLP to Cumulative OTLP. This functionality requires a single agent for each metric-generating client so that all delta values of a metric are converted by the same Collector instance. 30 | 31 | ## Prior art and alternatives 32 | 33 | A discussed solution is to convert deltas to cumulative in the Collector both as an agent and as a standalone service. However, supporting conversion in the Collector when it is a standalone service requires implementation of a routing mechanism across all Collector instances to ensure delta values of the same cumulative metric are aggregated by the same Collector instance. 34 | 35 | ## Open questions 36 | 37 | As stated in the previous section, delta to cumulative conversion in the Collector is needed to support Prometheus type backends. This may be necessary in the Collector in the future because the Collector may also accept metrics from other sources that report delta values. On the other hand, if sources are reporting cumulative values, cumulative to delta conversion is needed to support Statsd type backends. 38 | 39 | The future implementation for conversions in the Collector is still under discussion. There is a proposal is to add a [Metric Aggregation Processor](https://github.com/open-telemetry/opentelemetry-collector/issues/1422) in the Collector which recommends a solution for delta to cumulative conversion. 40 | 41 | ## Future possibilities 42 | 43 | A future improvement that could be considered is to support a dynamic configuration from a configuration server that determines the appropriate export strategy of OTLP clients at startup. 44 | -------------------------------------------------------------------------------- /oteps/profiles/images/otep0239/profiles-data-model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/oteps/profiles/images/otep0239/profiles-data-model.png -------------------------------------------------------------------------------- /oteps/trace/0002-remove-spandata.md: -------------------------------------------------------------------------------- 1 | # Remove SpanData 2 | 3 | Remove and replace SpanData by adding span start and end options. 4 | 5 | ## Motivation 6 | 7 | SpanData represents an immutable span object, creating a fairly large API for all of the fields (12 to be exact). It exposes what feels like an SDK concern and implementation detail to the API surface. As a user, this is another API I need to learn how to use, and ID generation might also need to be exposed. As an implementer, it is a new data type that needs to be supported. The primary motivation for removing SpanData revolves around the desire to reduce the size of the tracing API. 8 | 9 | ## Explanation 10 | 11 | SpanData has a couple of use cases. 12 | 13 | The first use case revolves around creating a span synchronously but needing to change the start time to a more accurate timestamp. For example, in an HTTP server, you might record the time the first byte was received, parse the headers, determine the span name, and then create the span. The moment the span was created isn't representative of when the request actually began, so the time the first byte was received would become the span's start time. Since the current API doesn't allow start timestamps, you'd need to create a SpanData object. The big downside is that you don't end up with an active span object. 14 | 15 | The second use case comes from the need to construct and report out of band spans, meaning that you're creating "custom" spans for an operation you don't own. One good example of this is a span sink that takes in structured logs that contain correlation IDs and a duration (e.g. from splunk) and converts them to spans for your tracing system. Another example is running a sidecar on an HAProxy machine, tailing the request logs, and creating spans. SpanData allows you to report the out of band reporting case, whereas you can’t with the current Span API as you cannot set the start and end timestamp. 16 | 17 | I'd like to propose getting rid of SpanData and `tracer.recordSpanData()` and replacing it by allowing `tracer.startSpan()` to accept a start timestamp option and `span.end()` to accept end timestamp option. This reduces the API surface, consolidating on a single span type. Options would meet the requirements for out of band reporting. 18 | 19 | ## Internal details 20 | 21 | `startSpan()` would change so you can include an optional start timestamp, span ID, and resource. When you have a span sink, out of band spans may have different resources than the tracer they are being reported to, so you want to pass an explicit resource. For `span.end()` you would have an optional end timestamp. The exact implementation would be language specific, so some would use an options pattern with function overloading or variadic parameters, or add these options to the span builder. 22 | 23 | ## Trade-offs and mitigations 24 | 25 | From : If the underlying SDK automatically adds tags to spans such as thread-id, stacktrace, and cpu-usage when a span is started, they would be incorrect for out of band spans as the tracer would not know the difference between in and out of band spans. This can be mitigated by indicating that the span is out of band to prevent attaching incorrect information, possibly with an `isOutOfBand()` option on `startSpan()`. 26 | 27 | ## Prior art and alternatives 28 | 29 | The OpenTracing specification for `tracer.startSpan()` includes an optional start timestamp and zero or more tags. It also calls out an optional end timestamp and bulk logging for `span.end()`. 30 | 31 | ## Open questions 32 | 33 | There also seems to be some hidden dependency between SpanData and the sampler API. For example, given a complete SpanData object with a start and end timestamp, I imagine there's a use case where the sampler can look at the that and decide "this took a long time" and sample it. Is this a real use case? Is there a requirement to be able to provide complete span objects to the sampler? 34 | 35 | ## Future Work 36 | 37 | We might want to include attributes as a start option to give the underlying sampler more information to sample with. We also might want to include optional events, which would be for bulk adding events with explicit timestamps. 38 | 39 | We will also want to ensure, assuming the span or subtrace is being created in the same process, that the timestamps use the same precision and are monotonic. 40 | 41 | ## Related Issues 42 | 43 | Removing SpanData would resolve [open-telemetry/opentelemetry-specification#71](https://github.com/open-telemetry/opentelemetry-specification/issues/71). 44 | 45 | Options would solve [open-telemetry/opentelemetry-specification#139](https://github.com/open-telemetry/opentelemetry-specification/issues/139). 46 | 47 | By removing SpanData, [open-telemetry/opentelemetry-specification#92](https://github.com/open-telemetry/opentelemetry-specification/issues/92) can be resolved and closed. 48 | 49 | [open-telemetry/opentelemetry-specification#68](https://github.com/open-telemetry/opentelemetry-specification/issues/68) can be closed. An optional resource can provide a different resource for out of band spans, otherwise the tracer can provide the default resource. 50 | 51 | [open-telemetry/opentelemetry-specification#60](https://github.com/open-telemetry/opentelemetry-specification/issues/60) can be closed due to removal of SpanData. 52 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "devDependencies": { 3 | "markdown-link-check": "3.13.7", 4 | "markdown-toc": "1.2.0", 5 | "markdownlint-cli": "0.31.0" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /schemas/1.10.0: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.10.0 3 | versions: 4 | 1.10.0: 5 | 1.9.0: 6 | 1.8.0: 7 | spans: 8 | changes: 9 | - rename_attributes: 10 | attribute_map: 11 | db.cassandra.keyspace: db.name 12 | db.hbase.namespace: db.name 13 | 1.7.0: 14 | 1.6.1: 15 | 1.5.0: 16 | 1.4.0: 17 | -------------------------------------------------------------------------------- /schemas/1.11.0: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.11.0 3 | versions: 4 | 1.11.0: 5 | 1.10.0: 6 | 1.9.0: 7 | 1.8.0: 8 | spans: 9 | changes: 10 | - rename_attributes: 11 | attribute_map: 12 | db.cassandra.keyspace: db.name 13 | db.hbase.namespace: db.name 14 | 1.7.0: 15 | 1.6.1: 16 | 1.5.0: 17 | 1.4.0: 18 | -------------------------------------------------------------------------------- /schemas/1.12.0: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.12.0 3 | versions: 4 | 1.12.0: 5 | 1.11.0: 6 | 1.10.0: 7 | 1.9.0: 8 | 1.8.0: 9 | spans: 10 | changes: 11 | - rename_attributes: 12 | attribute_map: 13 | db.cassandra.keyspace: db.name 14 | db.hbase.namespace: db.name 15 | 1.7.0: 16 | 1.6.1: 17 | 1.5.0: 18 | 1.4.0: 19 | -------------------------------------------------------------------------------- /schemas/1.13.0: -------------------------------------------------------------------------------- 1 | file_format: 1.1.0 2 | schema_url: https://opentelemetry.io/schemas/1.13.0 3 | versions: 4 | 1.13.0: 5 | spans: 6 | changes: 7 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2614 8 | - rename_attributes: 9 | attribute_map: 10 | net.peer.ip: net.sock.peer.addr 11 | net.host.ip: net.sock.host.addr 12 | 1.12.0: 13 | 1.11.0: 14 | 1.10.0: 15 | 1.9.0: 16 | 1.8.0: 17 | spans: 18 | changes: 19 | - rename_attributes: 20 | attribute_map: 21 | db.cassandra.keyspace: db.name 22 | db.hbase.namespace: db.name 23 | 1.7.0: 24 | 1.6.1: 25 | 1.5.0: 26 | 1.4.0: 27 | -------------------------------------------------------------------------------- /schemas/1.14.0: -------------------------------------------------------------------------------- 1 | file_format: 1.1.0 2 | schema_url: https://opentelemetry.io/schemas/1.14.0 3 | versions: 4 | 1.14.0: 5 | 1.13.0: 6 | spans: 7 | changes: 8 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2614 9 | - rename_attributes: 10 | attribute_map: 11 | net.peer.ip: net.sock.peer.addr 12 | net.host.ip: net.sock.host.addr 13 | 1.12.0: 14 | 1.11.0: 15 | 1.10.0: 16 | 1.9.0: 17 | 1.8.0: 18 | spans: 19 | changes: 20 | - rename_attributes: 21 | attribute_map: 22 | db.cassandra.keyspace: db.name 23 | db.hbase.namespace: db.name 24 | 1.7.0: 25 | 1.6.1: 26 | 1.5.0: 27 | 1.4.0: 28 | -------------------------------------------------------------------------------- /schemas/1.15.0: -------------------------------------------------------------------------------- 1 | file_format: 1.1.0 2 | schema_url: https://opentelemetry.io/schemas/1.15.0 3 | versions: 4 | 1.15.0: 5 | spans: 6 | changes: 7 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2743 8 | - rename_attributes: 9 | attribute_map: 10 | http.retry_count: http.resend_count 11 | 1.14.0: 12 | 1.13.0: 13 | spans: 14 | changes: 15 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2614 16 | - rename_attributes: 17 | attribute_map: 18 | net.peer.ip: net.sock.peer.addr 19 | net.host.ip: net.sock.host.addr 20 | 1.12.0: 21 | 1.11.0: 22 | 1.10.0: 23 | 1.9.0: 24 | 1.8.0: 25 | spans: 26 | changes: 27 | - rename_attributes: 28 | attribute_map: 29 | db.cassandra.keyspace: db.name 30 | db.hbase.namespace: db.name 31 | 1.7.0: 32 | 1.6.1: 33 | 1.5.0: 34 | 1.4.0: 35 | -------------------------------------------------------------------------------- /schemas/1.16.0: -------------------------------------------------------------------------------- 1 | file_format: 1.1.0 2 | schema_url: https://opentelemetry.io/schemas/1.16.0 3 | versions: 4 | 1.16.0: 5 | 1.15.0: 6 | spans: 7 | changes: 8 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2743 9 | - rename_attributes: 10 | attribute_map: 11 | http.retry_count: http.resend_count 12 | 1.14.0: 13 | 1.13.0: 14 | spans: 15 | changes: 16 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2614 17 | - rename_attributes: 18 | attribute_map: 19 | net.peer.ip: net.sock.peer.addr 20 | net.host.ip: net.sock.host.addr 21 | 1.12.0: 22 | 1.11.0: 23 | 1.10.0: 24 | 1.9.0: 25 | 1.8.0: 26 | spans: 27 | changes: 28 | - rename_attributes: 29 | attribute_map: 30 | db.cassandra.keyspace: db.name 31 | db.hbase.namespace: db.name 32 | 1.7.0: 33 | 1.6.1: 34 | 1.5.0: 35 | 1.4.0: 36 | -------------------------------------------------------------------------------- /schemas/1.17.0: -------------------------------------------------------------------------------- 1 | file_format: 1.1.0 2 | schema_url: https://opentelemetry.io/schemas/1.17.0 3 | versions: 4 | 1.17.0: 5 | spans: 6 | changes: 7 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2957 8 | - rename_attributes: 9 | attribute_map: 10 | messaging.consumer_id: messaging.consumer.id 11 | messaging.protocol: net.app.protocol.name 12 | messaging.protocol_version: net.app.protocol.version 13 | messaging.destination: messaging.destination.name 14 | messaging.temp_destination: messaging.destination.temporary 15 | messaging.destination_kind: messaging.destination.kind 16 | messaging.message_id: messaging.message.id 17 | messaging.conversation_id: messaging.message.conversation_id 18 | messaging.message_payload_size_bytes: messaging.message.payload_size_bytes 19 | messaging.message_payload_compressed_size_bytes: messaging.message.payload_compressed_size_bytes 20 | messaging.rabbitmq.routing_key: messaging.rabbitmq.destination.routing_key 21 | messaging.kafka.message_key: messaging.kafka.message.key 22 | messaging.kafka.partition: messaging.kafka.destination.partition 23 | messaging.kafka.tombstone: messaging.kafka.message.tombstone 24 | messaging.rocketmq.message_type: messaging.rocketmq.message.type 25 | messaging.rocketmq.message_tag: messaging.rocketmq.message.tag 26 | messaging.rocketmq.message_keys: messaging.rocketmq.message.keys 27 | messaging.kafka.consumer_group: messaging.kafka.consumer.group 28 | 1.16.0: 29 | 1.15.0: 30 | spans: 31 | changes: 32 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2743 33 | - rename_attributes: 34 | attribute_map: 35 | http.retry_count: http.resend_count 36 | 1.14.0: 37 | 1.13.0: 38 | spans: 39 | changes: 40 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2614 41 | - rename_attributes: 42 | attribute_map: 43 | net.peer.ip: net.sock.peer.addr 44 | net.host.ip: net.sock.host.addr 45 | 1.12.0: 46 | 1.11.0: 47 | 1.10.0: 48 | 1.9.0: 49 | 1.8.0: 50 | spans: 51 | changes: 52 | - rename_attributes: 53 | attribute_map: 54 | db.cassandra.keyspace: db.name 55 | db.hbase.namespace: db.name 56 | 1.7.0: 57 | 1.6.1: 58 | 1.5.0: 59 | 1.4.0: 60 | -------------------------------------------------------------------------------- /schemas/1.18.0: -------------------------------------------------------------------------------- 1 | file_format: 1.1.0 2 | schema_url: https://opentelemetry.io/schemas/1.18.0 3 | versions: 4 | 1.18.0: 5 | 1.17.0: 6 | spans: 7 | changes: 8 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2957 9 | - rename_attributes: 10 | attribute_map: 11 | messaging.consumer_id: messaging.consumer.id 12 | messaging.protocol: net.app.protocol.name 13 | messaging.protocol_version: net.app.protocol.version 14 | messaging.destination: messaging.destination.name 15 | messaging.temp_destination: messaging.destination.temporary 16 | messaging.destination_kind: messaging.destination.kind 17 | messaging.message_id: messaging.message.id 18 | messaging.conversation_id: messaging.message.conversation_id 19 | messaging.message_payload_size_bytes: messaging.message.payload_size_bytes 20 | messaging.message_payload_compressed_size_bytes: messaging.message.payload_compressed_size_bytes 21 | messaging.rabbitmq.routing_key: messaging.rabbitmq.destination.routing_key 22 | messaging.kafka.message_key: messaging.kafka.message.key 23 | messaging.kafka.partition: messaging.kafka.destination.partition 24 | messaging.kafka.tombstone: messaging.kafka.message.tombstone 25 | messaging.rocketmq.message_type: messaging.rocketmq.message.type 26 | messaging.rocketmq.message_tag: messaging.rocketmq.message.tag 27 | messaging.rocketmq.message_keys: messaging.rocketmq.message.keys 28 | messaging.kafka.consumer_group: messaging.kafka.consumer.group 29 | 1.16.0: 30 | 1.15.0: 31 | spans: 32 | changes: 33 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2743 34 | - rename_attributes: 35 | attribute_map: 36 | http.retry_count: http.resend_count 37 | 1.14.0: 38 | 1.13.0: 39 | spans: 40 | changes: 41 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2614 42 | - rename_attributes: 43 | attribute_map: 44 | net.peer.ip: net.sock.peer.addr 45 | net.host.ip: net.sock.host.addr 46 | 1.12.0: 47 | 1.11.0: 48 | 1.10.0: 49 | 1.9.0: 50 | 1.8.0: 51 | spans: 52 | changes: 53 | - rename_attributes: 54 | attribute_map: 55 | db.cassandra.keyspace: db.name 56 | db.hbase.namespace: db.name 57 | 1.7.0: 58 | 1.6.1: 59 | 1.5.0: 60 | 1.4.0: 61 | -------------------------------------------------------------------------------- /schemas/1.19.0: -------------------------------------------------------------------------------- 1 | file_format: 1.1.0 2 | schema_url: https://opentelemetry.io/schemas/1.19.0 3 | versions: 4 | 1.19.0: 5 | spans: 6 | changes: 7 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3209 8 | - rename_attributes: 9 | attribute_map: 10 | faas.execution: faas.invocation_id 11 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3188 12 | - rename_attributes: 13 | attribute_map: 14 | faas.id: cloud.resource_id 15 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3190 16 | - rename_attributes: 17 | attribute_map: 18 | http.user_agent: user_agent.original 19 | resources: 20 | changes: 21 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3190 22 | - rename_attributes: 23 | attribute_map: 24 | browser.user_agent: user_agent.original 25 | 1.18.0: 26 | 1.17.0: 27 | spans: 28 | changes: 29 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2957 30 | - rename_attributes: 31 | attribute_map: 32 | messaging.consumer_id: messaging.consumer.id 33 | messaging.protocol: net.app.protocol.name 34 | messaging.protocol_version: net.app.protocol.version 35 | messaging.destination: messaging.destination.name 36 | messaging.temp_destination: messaging.destination.temporary 37 | messaging.destination_kind: messaging.destination.kind 38 | messaging.message_id: messaging.message.id 39 | messaging.conversation_id: messaging.message.conversation_id 40 | messaging.message_payload_size_bytes: messaging.message.payload_size_bytes 41 | messaging.message_payload_compressed_size_bytes: messaging.message.payload_compressed_size_bytes 42 | messaging.rabbitmq.routing_key: messaging.rabbitmq.destination.routing_key 43 | messaging.kafka.message_key: messaging.kafka.message.key 44 | messaging.kafka.partition: messaging.kafka.destination.partition 45 | messaging.kafka.tombstone: messaging.kafka.message.tombstone 46 | messaging.rocketmq.message_type: messaging.rocketmq.message.type 47 | messaging.rocketmq.message_tag: messaging.rocketmq.message.tag 48 | messaging.rocketmq.message_keys: messaging.rocketmq.message.keys 49 | messaging.kafka.consumer_group: messaging.kafka.consumer.group 50 | 1.16.0: 51 | 1.15.0: 52 | spans: 53 | changes: 54 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2743 55 | - rename_attributes: 56 | attribute_map: 57 | http.retry_count: http.resend_count 58 | 1.14.0: 59 | 1.13.0: 60 | spans: 61 | changes: 62 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2614 63 | - rename_attributes: 64 | attribute_map: 65 | net.peer.ip: net.sock.peer.addr 66 | net.host.ip: net.sock.host.addr 67 | 1.12.0: 68 | 1.11.0: 69 | 1.10.0: 70 | 1.9.0: 71 | 1.8.0: 72 | spans: 73 | changes: 74 | - rename_attributes: 75 | attribute_map: 76 | db.cassandra.keyspace: db.name 77 | db.hbase.namespace: db.name 78 | 1.7.0: 79 | 1.6.1: 80 | 1.5.0: 81 | 1.4.0: 82 | -------------------------------------------------------------------------------- /schemas/1.20.0: -------------------------------------------------------------------------------- 1 | file_format: 1.1.0 2 | schema_url: https://opentelemetry.io/schemas/1.20.0 3 | versions: 4 | 1.20.0: 5 | spans: 6 | changes: 7 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3272 8 | - rename_attributes: 9 | attribute_map: 10 | net.app.protocol.name: net.protocol.name 11 | net.app.protocol.version: net.protocol.version 12 | 1.19.0: 13 | spans: 14 | changes: 15 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3209 16 | - rename_attributes: 17 | attribute_map: 18 | faas.execution: faas.invocation_id 19 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3188 20 | - rename_attributes: 21 | attribute_map: 22 | faas.id: cloud.resource_id 23 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3190 24 | - rename_attributes: 25 | attribute_map: 26 | http.user_agent: user_agent.original 27 | resources: 28 | changes: 29 | # https://github.com/open-telemetry/opentelemetry-specification/pull/3190 30 | - rename_attributes: 31 | attribute_map: 32 | browser.user_agent: user_agent.original 33 | 1.18.0: 34 | 1.17.0: 35 | spans: 36 | changes: 37 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2957 38 | - rename_attributes: 39 | attribute_map: 40 | messaging.consumer_id: messaging.consumer.id 41 | messaging.protocol: net.app.protocol.name 42 | messaging.protocol_version: net.app.protocol.version 43 | messaging.destination: messaging.destination.name 44 | messaging.temp_destination: messaging.destination.temporary 45 | messaging.destination_kind: messaging.destination.kind 46 | messaging.message_id: messaging.message.id 47 | messaging.conversation_id: messaging.message.conversation_id 48 | messaging.message_payload_size_bytes: messaging.message.payload_size_bytes 49 | messaging.message_payload_compressed_size_bytes: messaging.message.payload_compressed_size_bytes 50 | messaging.rabbitmq.routing_key: messaging.rabbitmq.destination.routing_key 51 | messaging.kafka.message_key: messaging.kafka.message.key 52 | messaging.kafka.partition: messaging.kafka.destination.partition 53 | messaging.kafka.tombstone: messaging.kafka.message.tombstone 54 | messaging.rocketmq.message_type: messaging.rocketmq.message.type 55 | messaging.rocketmq.message_tag: messaging.rocketmq.message.tag 56 | messaging.rocketmq.message_keys: messaging.rocketmq.message.keys 57 | messaging.kafka.consumer_group: messaging.kafka.consumer.group 58 | 1.16.0: 59 | 1.15.0: 60 | spans: 61 | changes: 62 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2743 63 | - rename_attributes: 64 | attribute_map: 65 | http.retry_count: http.resend_count 66 | 1.14.0: 67 | 1.13.0: 68 | spans: 69 | changes: 70 | # https://github.com/open-telemetry/opentelemetry-specification/pull/2614 71 | - rename_attributes: 72 | attribute_map: 73 | net.peer.ip: net.sock.peer.addr 74 | net.host.ip: net.sock.host.addr 75 | 1.12.0: 76 | 1.11.0: 77 | 1.10.0: 78 | 1.9.0: 79 | 1.8.0: 80 | spans: 81 | changes: 82 | - rename_attributes: 83 | attribute_map: 84 | db.cassandra.keyspace: db.name 85 | db.hbase.namespace: db.name 86 | 1.7.0: 87 | 1.6.1: 88 | 1.5.0: 89 | 1.4.0: 90 | -------------------------------------------------------------------------------- /schemas/1.4.0: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.4.0 3 | versions: 4 | 1.4.0: 5 | -------------------------------------------------------------------------------- /schemas/1.5.0: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.5.0 3 | versions: 4 | 1.5.0: 5 | 1.4.0: 6 | -------------------------------------------------------------------------------- /schemas/1.6.1: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.6.1 3 | versions: 4 | 1.6.1: 5 | 1.5.0: 6 | 1.4.0: 7 | -------------------------------------------------------------------------------- /schemas/1.7.0: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.7.0 3 | versions: 4 | 1.7.0: 5 | 1.6.1: 6 | 1.5.0: 7 | 1.4.0: 8 | -------------------------------------------------------------------------------- /schemas/1.8.0: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.8.0 3 | versions: 4 | 1.8.0: 5 | spans: 6 | changes: 7 | - rename_attributes: 8 | attribute_map: 9 | db.cassandra.keyspace: db.name 10 | db.hbase.namespace: db.name 11 | 1.7.0: 12 | 1.6.1: 13 | 1.5.0: 14 | 1.4.0: 15 | -------------------------------------------------------------------------------- /schemas/1.9.0: -------------------------------------------------------------------------------- 1 | file_format: 1.0.0 2 | schema_url: https://opentelemetry.io/schemas/1.9.0 3 | versions: 4 | 1.9.0: 5 | 1.8.0: 6 | spans: 7 | changes: 8 | - rename_attributes: 9 | attribute_map: 10 | db.cassandra.keyspace: db.name 11 | db.hbase.namespace: db.name 12 | 1.7.0: 13 | 1.6.1: 14 | 1.5.0: 15 | 1.4.0: 16 | -------------------------------------------------------------------------------- /specification/README.md: -------------------------------------------------------------------------------- 1 | 14 | 15 | # OpenTelemetry Specification 16 | 17 | ## Contents 18 | 19 | - [Overview](overview.md) 20 | - [Glossary](glossary.md) 21 | - Principles and Guidelines 22 | - [Core Principles](specification-principles.md) 23 | - [Versioning and stability for OpenTelemetry clients](versioning-and-stability.md) 24 | - [Library Guidelines](library-guidelines.md) 25 | - [Package/Library Layout](library-layout.md) 26 | - [General error handling guidelines](error-handling.md) 27 | - [Performance](performance.md) 28 | - API Specification 29 | - [Context](context/README.md) 30 | - [Propagators](context/api-propagators.md) 31 | - [Environment Variable Carriers](context/env-carriers.md) 32 | - [Baggage](baggage/api.md) 33 | - [Tracing](trace/api.md) 34 | - [Metrics](metrics/api.md) 35 | - [Logs](logs/README.md) 36 | - [API](logs/api.md) 37 | - SDK Specification 38 | - [Tracing](trace/sdk.md) 39 | - [Metrics](metrics/sdk.md) 40 | - [Logs](logs/sdk.md) 41 | - [Resource](resource/sdk.md) 42 | - [Configuration](configuration/README.md) 43 | - Data Specification 44 | - [Semantic Conventions](overview.md#semantic-conventions) 45 | - [Protocol](protocol/README.md) 46 | - [Metrics](metrics/data-model.md) 47 | - [Logs](logs/data-model.md) 48 | - [Profiles](profiles/mappings.md) 49 | - Compatibility 50 | - [OpenCensus](compatibility/opencensus.md) 51 | - [OpenTracing](compatibility/opentracing.md) 52 | - [Prometheus and OpenMetrics](compatibility/prometheus_and_openmetrics.md) 53 | - [Trace Context in non-OTLP Log Formats](compatibility/logging_trace_context.md) 54 | 55 | ## Notation Conventions and Compliance 56 | 57 | The keywords "MUST", "MUST NOT", "REQUIRED", "SHOULD", 58 | "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in the 59 | [specification][] are to be interpreted as described in [BCP 60 | 14](https://tools.ietf.org/html/bcp14) 61 | [[RFC2119](https://tools.ietf.org/html/rfc2119)] 62 | [[RFC8174](https://tools.ietf.org/html/rfc8174)] when, and only when, they 63 | appear in all capitals, as shown here. 64 | 65 | An implementation of the [specification][] is not compliant if it fails to 66 | satisfy one or more of the "MUST", "MUST NOT", "REQUIRED", 67 | requirements defined in the [specification][]. Conversely, an 68 | implementation of the [specification][] is compliant if it satisfies all the 69 | "MUST", "MUST NOT", "REQUIRED", requirements defined in the [specification][]. 70 | 71 | ## Project Naming 72 | 73 | - The official project name is "OpenTelemetry" (with no space between "Open" and 74 | "Telemetry"). 75 | - The official acronym used by the OpenTelemetry project is "OTel". "OT" MAY be 76 | used only as a part of a longer acronym, such as OTCA (OpenTelemetry Certified Associate). 77 | - The official names for sub-projects, like language specific implementations, 78 | follow the pattern of "OpenTelemetry {the name of the programming language, 79 | runtime or component}", for example, "OpenTelemetry Python", "OpenTelemetry 80 | .NET" or "OpenTelemetry Collector". 81 | 82 | ## About the project 83 | 84 | See the [project repository][] for information about the following, and more: 85 | 86 | - [Change / contribution process](../README.md#change--contribution-process) 87 | - [Project timeline](../README.md#project-timeline) 88 | - [Versioning the specification](../README.md#versioning-the-specification) 89 | - [License](../README.md#license) 90 | 91 | [project repository]: https://github.com/open-telemetry/opentelemetry-specification 92 | [specification]: overview.md 93 | -------------------------------------------------------------------------------- /specification/baggage/README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Baggage 8 | -------------------------------------------------------------------------------- /specification/common/attribute-naming.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Attribute Naming 6 | 7 | This page has moved to 8 | [Naming](https://opentelemetry.io/docs/specs/semconv/general/naming/). 9 | -------------------------------------------------------------------------------- /specification/common/attribute-requirement-level.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | # Attribute Requirement Levels for Semantic Conventions 7 | 8 | This page has moved to 9 | [Attribute Requirement Levels](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/). 10 | -------------------------------------------------------------------------------- /specification/common/mapping-to-non-otlp.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # OpenTelemetry Transformation to non-OTLP Formats 6 | 7 | **Status**: [Stable](../document-status.md) 8 | 9 | All OpenTelemetry concepts and data recorded using OpenTelemetry API can be 10 | directly and precisely represented using corresponding messages and fields of 11 | OTLP format. However, for other formats this is not always the case. Sometimes a 12 | format will not have a native way to represent a particular OpenTelemetry 13 | concept or a field of a concept. 14 | 15 | This document defines the transformation between OpenTelemetry and formats other 16 | than OTLP, for OpenTelemetry fields and concepts that have no direct semantic 17 | equivalent in those other formats. 18 | 19 | Note: when a format has a direct semantic equivalent for a particular field or 20 | concept then the recommendation in this document MUST be ignored. 21 | 22 | See also additional specific transformation rules for 23 | [Prometheus](../compatibility/prometheus_and_openmetrics.md) and [Zipkin](../trace/sdk_exporters/zipkin.md). 24 | The specific rules for Prometheus and Zipkin take precedence over the generic rules defined 25 | in this document. 26 | 27 | ## Mappings 28 | 29 | ### InstrumentationScope 30 | 31 | OpenTelemetry `InstrumentationScope`'s fields MUST be reported as key-value 32 | pairs associated with the Span, Metric Data Point or LogRecord using the following mapping: 33 | 34 | 35 | | Attribute | Type | Description | Examples | Requirement Level | 36 | |---|---|---|---|---| 37 | | `otel.scope.name` | string | The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). | `io.opentelemetry.contrib.mongodb` | Recommended | 38 | | `otel.scope.version` | string | The version of the instrumentation scope - (`InstrumentationScope.Version` in OTLP). | `1.0.0` | Recommended | 39 | 40 | 41 | The following deprecated aliases MUST also be reported with exact same values for 42 | backward compatibility reasons: 43 | 44 | 45 | | Attribute | Type | Description | Examples | Requirement Level | 46 | |---|---|---|---|---| 47 | | `otel.library.name` | string | Deprecated, use the `otel.scope.name` attribute. | `io.opentelemetry.contrib.mongodb` | Recommended | 48 | | `otel.library.version` | string | Deprecated, use the `otel.scope.version` attribute. | `1.0.0` | Recommended | 49 | 50 | 51 | ### Span Status 52 | 53 | Span `Status` MUST be reported as key-value pairs associated with the Span, 54 | unless the `Status` is `UNSET`. In the latter case it MUST NOT be reported. 55 | 56 | The following table defines the OpenTelemetry `Status`'s mapping to Span's 57 | key-value pairs: 58 | 59 | 60 | | Attribute | Type | Description | Examples | Requirement Level | 61 | |---|---|---|---|---| 62 | | `otel.status_code` | string | Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET. | `OK` | Recommended | 63 | | `otel.status_description` | string | Description of the Status if it has a value, otherwise not set. | `resource not found` | Recommended | 64 | 65 | `otel.status_code` MUST be one of the following: 66 | 67 | | Value | Description | 68 | |---|---| 69 | | `OK` | The operation has been validated by an Application developer or Operator to have completed successfully. | 70 | | `ERROR` | The operation contains an error. | 71 | 72 | 73 | ### Dropped Attributes Count 74 | 75 | OpenTelemetry dropped attributes count MUST be reported as a key-value 76 | pair associated with the corresponding data entity (e.g. Span, Span Link, Span Event, 77 | Metric data point, LogRecord, etc). The key name MUST be `otel.dropped_attributes_count`. 78 | 79 | This key-value pair should only be recorded when it contains a non-zero value. 80 | 81 | ### Dropped Events Count 82 | 83 | OpenTelemetry Span's dropped events count MUST be reported as a key-value pair 84 | associated with the Span. The key name MUST be `otel.dropped_events_count`. 85 | 86 | This key-value pair should only be recorded when it contains a non-zero value. 87 | 88 | ### Dropped Links Count 89 | 90 | OpenTelemetry Span's dropped links count MUST be reported as a key-value pair 91 | associated with the Span. The key name MUST be `otel.dropped_links_count`. 92 | 93 | This key-value pair should only be recorded when it contains a non-zero value. 94 | 95 | ### Instrumentation Scope Attributes 96 | 97 | Exporters to formats that don't have a concept that is equivalent to the Scope 98 | SHOULD record the attributes at the most suitable place in their corresponding format, 99 | typically at the Span, Metric or LogRecord equivalent. 100 | -------------------------------------------------------------------------------- /specification/compatibility/README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Compatibility 8 | -------------------------------------------------------------------------------- /specification/compatibility/logging_trace_context.md: -------------------------------------------------------------------------------- 1 | # Trace Context in non-OTLP Log Formats 2 | 3 | **Status**: [Stable](../document-status.md) 4 | 5 |
6 | Table of Contents 7 | 8 | 9 | 10 | - [Overview](#overview) 11 | * [Syslog RFC5424](#syslog-rfc5424) 12 | * [Plain Text Formats](#plain-text-formats) 13 | * [JSON Formats](#json-formats) 14 | * [Other Structured Formats](#other-structured-formats) 15 | 16 | 17 | 18 |
19 | 20 | ## Overview 21 | 22 | OTLP Logs Records have top level fields 23 | representing [trace context](../logs/data-model.md#trace-context-fields). This 24 | document defines how trace context should be recorded in non-OTLP Log Formats. 25 | To summarize, the following field names should be used in legacy formats: 26 | 27 | - "trace_id" for [TraceId](../logs/data-model.md#field-traceid), lowercase and hex-encoded. 28 | - "span_id" for [SpanId](../logs/data-model.md#field-spanid), lowercase and hex-encoded. 29 | - "trace_flags" for [trace flags](../logs/data-model.md#field-traceflags), formatted 30 | according to W3C traceflags format. 31 | 32 | All 3 fields are optional (see the [data model](../logs/data-model.md) for details of 33 | which combination of fields is considered valid). 34 | 35 | ### Syslog RFC5424 36 | 37 | Trace id, span id and traceflags SHOULD be recorded via SD-ID "opentelemetry". 38 | 39 | For example: 40 | 41 | ``` 42 | [opentelemetry trace_id="102981abcd2901" span_id="abcdef1010" trace_flags="01"] 43 | ``` 44 | 45 | ### Plain Text Formats 46 | 47 | The fields SHOULD be recorded according to the customary approach used for a 48 | particular format (e.g. field:value format for LTSV). For example: 49 | 50 | ``` 51 | host:192.168.0.1trace_id:102981abcd2901span_id:abcdef1010time:[01/Jan/2010:10:11:23 -0400]req:GET /health HTTP/1.0status:200 52 | ``` 53 | 54 | ### JSON Formats 55 | 56 | The fields SHOULD be recorded as top-level fields in the JSON structure. For example: 57 | 58 | ```json 59 | { 60 | "timestamp":1581385157.14429, 61 | "body":"Incoming request", 62 | "trace_id":"102981abcd2901", 63 | "span_id":"abcdef1010" 64 | } 65 | ``` 66 | 67 | ### Other Structured Formats 68 | 69 | The fields SHOULD be recorded as top-level structured attributes of the log 70 | record as it is customary for the particular format. 71 | -------------------------------------------------------------------------------- /specification/configuration/README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Configuration 8 | 9 | OpenTelemetry SDK components are highly configurable. This specification 10 | outlines the mechanisms by which OpenTelemetry components can be configured. It 11 | does not attempt to specify the details of what can be configured. 12 | 13 | ## Configuration Interfaces 14 | 15 | ### Programmatic 16 | 17 | The SDK MUST provide a programmatic interface for all configuration. 18 | This interface SHOULD be written in the language of the SDK itself. 19 | All other configuration mechanisms SHOULD be built on top of this interface. 20 | 21 | An example of this programmatic interface is accepting a well-defined 22 | struct on an SDK builder class. From that, one could build a CLI that accepts a 23 | file (YAML, JSON, TOML, ...) and then transforms into that well-defined struct 24 | consumable by the programmatic interface ( 25 | see [declarative configuration](#declarative-configuration)). 26 | 27 | ### Environment variables 28 | 29 | Environment variable configuration defines a set of language agnostic 30 | environment variables for common configuration goals. 31 | 32 | See [OpenTelemetry Environment Variable Specification](./sdk-environment-variables.md). 33 | 34 | ### Declarative configuration 35 | 36 | Declarative configuration provides a mechanism for configuring OpenTelemetry 37 | which is more expressive and full-featured than 38 | the [environment variable](#environment-variables) based scheme, and language 39 | agnostic in a way not possible with [programmatic configuration](#programmatic). 40 | Notably, declarative configuration defines tooling allowing users to load 41 | OpenTelemetry components according to a file-based representation of a 42 | standardized configuration data model. 43 | 44 | Declarative configuration consists of the following main components: 45 | 46 | * [Data model](./data-model.md) defines data structures which allow users to 47 | specify an intended configuration of OpenTelemetry SDK components and 48 | instrumentation. The data model includes a file-based representation. 49 | * [Instrumentation configuration API](./api.md) allows 50 | instrumentation libraries to consume configuration by reading relevant 51 | configuration options during initialization. 52 | * [Configuration SDK](./sdk.md) defines SDK capabilities around file 53 | configuration, including an In-Memory configuration model, support for 54 | referencing custom extension plugin interfaces in configuration files, and 55 | operations to parse configuration files and interpret the configuration data 56 | model. 57 | 58 | ### Other Mechanisms 59 | 60 | Additional configuration mechanisms SHOULD be provided in whatever 61 | language/format/style is idiomatic for the language of the SDK. The 62 | SDK can include as many configuration mechanisms as appropriate. 63 | -------------------------------------------------------------------------------- /specification/configuration/api.md: -------------------------------------------------------------------------------- 1 | # Instrumentation Configuration API 2 | 3 | **Status**: [Development](../document-status.md) 4 | 5 | 6 | 7 | - [Overview](#overview) 8 | * [ConfigProvider](#configprovider) 9 | + [ConfigProvider operations](#configprovider-operations) 10 | - [Get instrumentation config](#get-instrumentation-config) 11 | * [ConfigProperties](#configproperties) 12 | 13 | 14 | 15 | ## Overview 16 | 17 | The instrumentation configuration API is part of 18 | the [declarative configuration interface](./README.md#declarative-configuration). 19 | 20 | The API allows [instrumentation libraries](../glossary.md#instrumentation-library) 21 | to consume configuration by reading relevant configuration during 22 | initialization. For example, an instrumentation library for an HTTP client can 23 | read the set of HTTP request and response headers to capture. 24 | 25 | It consists of the following main components: 26 | 27 | * [ConfigProvider](#configprovider) is the entry point of the API. 28 | * [ConfigProperties](#configproperties) is a programmatic representation of a 29 | configuration mapping node. 30 | 31 | ### ConfigProvider 32 | 33 | `ConfigProvider` provides access to configuration properties relevant to 34 | instrumentation. 35 | 36 | Instrumentation libraries access `ConfigProvider` during 37 | initialization. `ConfigProvider` may be passed as an argument to the 38 | instrumentation library, or the instrumentation library may access it from a 39 | central place. Thus, the API SHOULD provide a way to access a global 40 | default `ConfigProvider`, and set/register it. 41 | 42 | #### ConfigProvider operations 43 | 44 | The `ConfigProvider` MUST provide the following functions: 45 | 46 | * [Get instrumentation config](#get-instrumentation-config) 47 | 48 | TODO: decide if additional operations are needed to improve API ergonomics 49 | 50 | ##### Get instrumentation config 51 | 52 | Obtain configuration relevant to instrumentation libraries. 53 | 54 | **Returns:** [`ConfigProperties`](#configproperties) representing 55 | the [`.instrumentation`](https://github.com/open-telemetry/opentelemetry-configuration/blob/670901762dd5cce1eecee423b8660e69f71ef4be/examples/kitchen-sink.yaml#L438-L439) 56 | configuration mapping node. 57 | 58 | If the `.instrumentation` node is not set, get instrumentation config MUST 59 | return nil, null, undefined or another language-specific idiomatic pattern 60 | denoting empty. 61 | 62 | ### ConfigProperties 63 | 64 | `ConfigProperties` is a programmatic representation of a configuration mapping 65 | node (i.e. a YAML mapping node). 66 | 67 | `ConfigProperties` MUST provide accessors for reading all properties from the 68 | mapping node it represents, including: 69 | 70 | * scalars (string, boolean, double precision floating point, 64-bit integer) 71 | * mappings, which SHOULD be represented as `ConfigProperties` 72 | * sequences of scalars 73 | * sequences of mappings, which SHOULD be represented as `ConfigProperties` 74 | * the set of property keys present 75 | 76 | `ConfigProperties` SHOULD provide access to properties in a type safe manner, 77 | based on what is idiomatic in the language. 78 | 79 | `ConfigProperties` SHOULD allow a caller to determine if a property is present 80 | with a null value, versus not set. 81 | -------------------------------------------------------------------------------- /specification/context/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | # Context 9 | 10 | **Status**: [Stable, Feature-freeze](../document-status.md). 11 | 12 |
13 | Table of Contents 14 | 15 | 16 | 17 | - [Overview](#overview) 18 | - [Create a key](#create-a-key) 19 | - [Get value](#get-value) 20 | - [Set value](#set-value) 21 | - [Optional Global operations](#optional-global-operations) 22 | * [Get current Context](#get-current-context) 23 | * [Attach Context](#attach-context) 24 | * [Detach Context](#detach-context) 25 | 26 | 27 | 28 |
29 | 30 | ## Overview 31 | 32 | A `Context` is a propagation mechanism which carries execution-scoped values 33 | across API boundaries and between logically associated [execution units](../glossary.md#execution-unit). 34 | Cross-cutting concerns access their data in-process using the same shared 35 | `Context` object. 36 | 37 | A `Context` MUST be immutable, and its write operations MUST 38 | result in the creation of a new `Context` containing the original 39 | values and the specified values updated. 40 | 41 | Languages are expected to use the single, widely used `Context` implementation 42 | if one exists for them. In the cases where an extremely clear, pre-existing 43 | option is not available, OpenTelemetry MUST provide its own `Context` 44 | implementation. Depending on the language, its usage may be either explicit 45 | or implicit. 46 | 47 | Users writing instrumentation in languages that use `Context` implicitly are 48 | discouraged from using the `Context` API directly. In those cases, users will 49 | manipulate `Context` through cross-cutting concerns APIs instead, in order to 50 | perform operations such as setting trace or baggage entries for a specified 51 | `Context`. 52 | 53 | A `Context` is expected to have the following operations, with their 54 | respective language differences: 55 | 56 | ## Create a key 57 | 58 | Keys are used to allow cross-cutting concerns to control access to their local state. 59 | They are unique such that other libraries which may use the same context 60 | cannot accidentally use the same key. It is recommended that concerns mediate 61 | data access via an API, rather than provide direct public access to their keys. 62 | 63 | The API MUST accept the following parameter: 64 | 65 | - The key name. The key name exists for debugging purposes and does not uniquely identify the key. Multiple calls to `CreateKey` with the same name SHOULD NOT return the same value unless language constraints dictate otherwise. Different languages may impose different restrictions on the expected types, so this parameter remains an implementation detail. 66 | 67 | The API MUST return an opaque object representing the newly created key. 68 | 69 | ## Get value 70 | 71 | Concerns can access their local state in the current execution state 72 | represented by a `Context`. 73 | 74 | The API MUST accept the following parameters: 75 | 76 | - The `Context`. 77 | - The key. 78 | 79 | The API MUST return the value in the `Context` for the specified key. 80 | 81 | ## Set value 82 | 83 | Concerns can record their local state in the current execution state 84 | represented by a `Context`. 85 | 86 | The API MUST accept the following parameters: 87 | 88 | - The `Context`. 89 | - The key. 90 | - The value to be set. 91 | 92 | The API MUST return a new `Context` containing the new value. 93 | 94 | ## Optional Global operations 95 | 96 | These operations are expected to only be implemented by languages 97 | using `Context` implicitly, and thus are optional. These operations 98 | SHOULD only be used to implement automatic scope switching and define 99 | higher level APIs by SDK components and OpenTelemetry instrumentation libraries. 100 | 101 | ### Get current Context 102 | 103 | The API MUST return the `Context` associated with the caller's current execution unit. 104 | 105 | ### Attach Context 106 | 107 | Associates a `Context` with the caller's current execution unit. 108 | 109 | The API MUST accept the following parameters: 110 | 111 | - The `Context`. 112 | 113 | The API MUST return a value that can be used as a `Token` to restore the previous 114 | `Context`. 115 | 116 | Note that every call to this operation should result in a corresponding call to 117 | [Detach Context](#detach-context). 118 | 119 | ### Detach Context 120 | 121 | Resets the `Context` associated with the caller's current execution unit 122 | to the value it had before attaching a specified `Context`. 123 | 124 | This operation is intended to help making sure the correct `Context` 125 | is associated with the caller's current execution unit. Users can 126 | rely on it to identify a wrong call order, i.e. trying to detach 127 | a `Context` that is not the current instance. In this case the operation 128 | can emit a signal to warn users of the wrong call order, such as logging 129 | an error or returning an error value. 130 | 131 | The API MUST accept the following parameters: 132 | 133 | - A `Token` that was returned by a previous call to attach a `Context`. 134 | 135 | The API MAY return a value used to check whether the operation 136 | was successful or not. 137 | -------------------------------------------------------------------------------- /specification/document-status.md: -------------------------------------------------------------------------------- 1 | # Definitions of Document Statuses 2 | 3 | Specification documents (files) may explicitly define a "Status", typically 4 | shown immediately after the document title. When present, the "Status" applies 5 | to the individual document only and not to the entire specification or any other 6 | documents. The following table describes what the statuses mean. 7 | 8 | ## Lifecycle status 9 | 10 | The support guarantees and allowed changes are governed by the lifecycle of the document.Lifecycle stages are defined in the [Versioning and Stability](versioning-and-stability.md) document. 11 | 12 | |Status |Explanation| 13 | |--------------------|-----------| 14 | |No explicit "Status"|Equivalent to Development.| 15 | |Development |Breaking changes are allowed.| 16 | |Stable |Breaking changes are no longer allowed. See [stability guarantees](versioning-and-stability.md#stable) for details.| 17 | |Deprecated |Changes are no longer allowed, except for editorial changes.| 18 | 19 | The specification follows 20 | [OTEP 0232](../oteps/0232-maturity-of-otel.md#explanation) 21 | maturity level definitions. 22 | 23 | ## Feature freeze 24 | 25 | In addition to the statuses above, documents may be marked as `Feature-freeze`. These documents are not currently accepting new feature requests, to allow the Technical Committee time to focus on other areas of the specification. Editorial changes are still accepted. Changes that address production issues with existing features are still accepted. 26 | 27 | Feature freeze is separate from a lifecycle status. The lifecycle represents the support requirements for the document, feature freeze only indicates the current focus of the specification community. The feature freeze label may be applied to a document at any lifecycle stage. By definition, deprecated documents have a feature freeze in place. 28 | 29 | ## Mixed 30 | 31 | Some documents have individual sections with different statuses. These documents are marked with the status `Mixed` at the top, for clarity. 32 | -------------------------------------------------------------------------------- /specification/entities/README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Entities 8 | 9 |
10 | Table of Contents 11 | 12 | 13 | 14 | - [Overview](#overview) 15 | - [Specifications](#specifications) 16 | 17 | 18 | 19 |
20 | 21 | ## Overview 22 | 23 | Entity represents an object of interest associated with produced telemetry: 24 | traces, metrics, logs, profiles etc. 25 | 26 | ## Specifications 27 | 28 | - [Data Model](./data-model.md) 29 | -------------------------------------------------------------------------------- /specification/library-layout.md: -------------------------------------------------------------------------------- 1 | # OpenTelemetry Project Package Layout 2 | 3 | This documentation serves to document the "look and feel" of a basic layout for OpenTelemetry 4 | projects. This package layout is intentionally generic and it doesn't try to impose a language 5 | specific package structure. 6 | 7 | ## API Package 8 | 9 | Here is a proposed generic package structure for OpenTelemetry API package. 10 | 11 | A typical top-level directory layout: 12 | 13 | ``` 14 | api 15 | ├── context 16 | │ └── propagation 17 | ├── metrics 18 | ├── trace 19 | │ └── propagation 20 | ├── baggage 21 | │ └── propagation 22 | ├── internal 23 | └── logs 24 | ``` 25 | 26 | > Use of lowercase, CamelCase or Snake Case (stylized as snake_case) names depends on the language. 27 | 28 | ### `/api/context` 29 | 30 | This directory describes the API that provides in-process context propagation. 31 | 32 | ### `/api/metrics` 33 | 34 | This directory describes the [Metrics API](./metrics/api.md) that can be used to 35 | record application metrics. 36 | 37 | ### `/api/baggage` 38 | 39 | This directory describes the [Baggage API](baggage/api.md) that can be used to 40 | manage context propagation and metric event attributes. 41 | 42 | ### `/api/trace` 43 | 44 | The [Trace API](trace/api.md) consist of a few main classes: 45 | 46 | - `Tracer` is used for all operations. See [Tracer](trace/api.md#tracer) section. 47 | - `Span` is a mutable object storing information about the current operation 48 | execution. See [Span](trace/api.md#span) section. 49 | 50 | ### `/api/internal` (_Optional_) 51 | 52 | Library components and implementations that shouldn't be exposed to the users. 53 | If a language has an idiomatic layout for internal components, please follow 54 | the language idiomatic style. 55 | 56 | ### `/api/logs` (_In the future_) 57 | 58 | > TODO: logs operations 59 | 60 | ## SDK Package 61 | 62 | Here is a proposed generic package structure for OpenTelemetry SDK package. 63 | 64 | A typical top-level directory layout: 65 | 66 | ``` 67 | sdk 68 | ├── context 69 | ├── metrics 70 | ├── resource 71 | ├── trace 72 | ├── baggage 73 | ├── internal 74 | └── logs 75 | ``` 76 | 77 | > Use of lowercase, CamelCase or Snake Case (stylized as snake_case) names depends on the language. 78 | 79 | ### `/sdk/context` 80 | 81 | This directory describes the SDK implementation for api/context. 82 | 83 | ### `/sdk/metrics` 84 | 85 | This directory describes the SDK implementation for api/metrics. 86 | 87 | ### `/sdk/resource` 88 | 89 | The [resource directory](resource/sdk.md) primarily defines a type 90 | [Resource](overview.md#resources) that captures information about the entity for 91 | which stats or traces are recorded. For example, metrics exposed by a Kubernetes 92 | container can be linked to a resource that specifies the cluster, namespace, 93 | pod, and container name. 94 | 95 | ### `/sdk/baggage` 96 | 97 | > TODO 98 | 99 | ### `/sdk/trace` 100 | 101 | This directory describes the [Tracing SDK](trace/sdk.md) implementation. 102 | 103 | ### `/sdk/internal` (_Optional_) 104 | 105 | Library components and implementations that shouldn't be exposed to the users. 106 | If a language has an idiomatic layout for internal components, please follow 107 | the language idiomatic style. 108 | 109 | ### `/sdk/logs` (_In the future_) 110 | 111 | > TODO: logs operations 112 | -------------------------------------------------------------------------------- /specification/logs/img/app-to-file-logs-fb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/logs/img/app-to-file-logs-fb.png -------------------------------------------------------------------------------- /specification/logs/img/app-to-file-logs-otelcol.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/logs/img/app-to-file-logs-otelcol.png -------------------------------------------------------------------------------- /specification/logs/img/app-to-otelcol.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/logs/img/app-to-otelcol.png -------------------------------------------------------------------------------- /specification/logs/img/appender.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/logs/img/appender.png -------------------------------------------------------------------------------- /specification/logs/img/application-api-sdk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/logs/img/application-api-sdk.png -------------------------------------------------------------------------------- /specification/logs/img/separate-collection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/logs/img/separate-collection.png -------------------------------------------------------------------------------- /specification/logs/img/unified-collection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/logs/img/unified-collection.png -------------------------------------------------------------------------------- /specification/logs/noop.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Logs API No-Op Implementation 6 | 7 | **Status**: [Stable](../document-status.md), except where otherwise specified 8 | 9 |
10 | Table of Contents 11 | 12 | 13 | 14 | - [LoggerProvider](#loggerprovider) 15 | * [Logger Creation](#logger-creation) 16 | - [Logger](#logger) 17 | * [Emit LogRecord](#emit-logrecord) 18 | * [Enabled](#enabled) 19 | 20 | 21 | 22 |
23 | 24 | Users of OpenTelemetry need a way to disable the API from actually 25 | performing any operations. The No-Op OpenTelemetry API implementation 26 | (henceforth referred to as the No-Op) provides users with this 27 | functionally. It implements the [OpenTelemetry Logs API](./api.md) 28 | so that no telemetry is produced and computation resources are minimized. 29 | 30 | All language implementations of OpenTelemetry MUST provide a No-Op. 31 | 32 | The [Logs API](./api.md) defines components with various operations. 33 | All No-Op components MUST NOT hold configuration or operational state. All No-op 34 | operations MUST accept all defined parameters, MUST NOT validate any arguments 35 | received, and MUST NOT return any non-empty error or log any message. 36 | 37 | ## LoggerProvider 38 | 39 | The No-Op MUST allow the creation of multiple `LoggerProviders`s. 40 | 41 | Since all `LoggerProviders`s hold the same empty state, a No-Op MAY 42 | provide the same `LoggerProvider` instances to all creation requests. 43 | 44 | ### Logger Creation 45 | 46 | New `Logger` instances are always created with a [LoggerProvider](./api.md#loggerprovider). 47 | Therefore, `LoggerProvider` MUST allow for the creation of `Logger`s. 48 | All `Logger`s created MUST be an instance of the [No-Op Logger](#logger). 49 | 50 | Since all `Logger`s will hold the same empty state, a `LoggerProvider` MAY 51 | return the same `Logger` instances to all creation requests. 52 | 53 | ## Logger 54 | 55 | ### Emit LogRecord 56 | 57 | The No-Op `Logger` MUST allow 58 | for [emitting LogRecords](./api.md#emit-a-logrecord). 59 | 60 | ### Enabled 61 | 62 | **Status**: [Development](../document-status.md) 63 | 64 | MUST always return `false`. 65 | -------------------------------------------------------------------------------- /specification/logs/sdk_exporters/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | # Logs Exporters 9 | -------------------------------------------------------------------------------- /specification/logs/sdk_exporters/stdout.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Logs Exporter - Standard output 6 | 7 | **Status**: [Stable](../../document-status.md) 8 | 9 | "Standard output" LogRecord Exporter is a [LogRecord 10 | Exporter](../sdk.md#logrecordexporter) which outputs the logs to stdout/console. 11 | 12 | The exporter's output format is unspecified and can vary between 13 | implementations. Documentation SHOULD warn users about this. The following 14 | wording is recommended (modify as needed): 15 | 16 | > This exporter is intended for debugging and learning purposes. It is not 17 | > recommended for production use. The output format is not standardized and can 18 | > change at any time. 19 | > 20 | > If a standardized format for exporting logs to stdout is desired, consider 21 | > using the [File Exporter](../../protocol/file-exporter.md), if available. 22 | > However, please review the status of the File Exporter and verify if it is 23 | > stable and production-ready. 24 | 25 | [OpenTelemetry SDK](../../overview.md#sdk) authors MAY choose the best idiomatic 26 | name for their language. For example, ConsoleExporter, StdoutExporter, 27 | StreamExporter, etc. 28 | 29 | If a language provides a mechanism to automatically configure a 30 | [LogRecordProcessor](../sdk.md#logrecordprocessor) to pair with the associated 31 | exporter (e.g., using the [`OTEL_LOGS_EXPORTER` environment 32 | variable](../../configuration/sdk-environment-variables.md#exporter-selection)), by 33 | default the standard output exporter SHOULD be paired with a [simple 34 | processor](../sdk.md#simple-processor). 35 | -------------------------------------------------------------------------------- /specification/metrics/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | # OpenTelemetry Metrics 9 | 10 |
11 | Table of Contents 12 | 13 | 14 | 15 | - [Overview](#overview) 16 | * [Design Goals](#design-goals) 17 | * [Concepts](#concepts) 18 | + [API](#api) 19 | + [SDK](#sdk) 20 | + [Programming Model](#programming-model) 21 | - [Specifications](#specifications) 22 | - [References](#references) 23 | 24 | 25 | 26 |
27 | 28 | ## Overview 29 | 30 | ### Design Goals 31 | 32 | Given there are many well-established metrics solutions that exist today, it is 33 | important to understand the goals of OpenTelemetry’s metrics effort: 34 | 35 | * **Being able to connect metrics to other signals**. For example, metrics and 36 | traces can be correlated via [exemplars](data-model.md#exemplars), and metrics attributes can be enriched 37 | via [Baggage](../baggage/api.md) and [Context](../context/README.md). 38 | Additionally, [Resource](../resource/sdk.md) can be applied to 39 | [logs](../overview.md#log-signal)/[metrics](../overview.md#metric-signal)/[traces](../overview.md#tracing-signal) 40 | in a consistent way. 41 | 42 | * **Providing a path for [OpenCensus](https://opencensus.io/) customers to 43 | migrate to OpenTelemetry**. This was the original goal of OpenTelemetry - 44 | converging OpenCensus and OpenTracing. We will focus on providing the 45 | semantics and capability, instead of doing a 1-1 mapping of the APIs. 46 | 47 | * **Working with existing metrics instrumentation protocols and standards**. 48 | Here is the minimum set of goals: 49 | * Providing full support for [Prometheus](https://prometheus.io/) - users 50 | should be able to use OpenTelemetry clients and 51 | [Collector](../overview.md#collector) to collect and export metrics, with 52 | the ability to achieve the same functionality as the native Prometheus 53 | clients. 54 | * Providing the ability to collect [StatsD](https://github.com/statsd/statsd) 55 | metrics using the [OpenTelemetry Collector](../overview.md#collector). 56 | 57 | ### Concepts 58 | 59 | #### API 60 | 61 | The **OpenTelemetry Metrics API** ("the API" hereafter) serves two purposes: 62 | 63 | * Capturing raw measurements efficiently and simultaneously. 64 | * Decoupling the instrumentation from the [SDK](#sdk), allowing the SDK to be 65 | specified/included in the application. 66 | 67 | When no [SDK](#sdk) is explicitly included/enabled in the application, no 68 | telemetry data will be collected. Please refer to the overall [OpenTelemetry 69 | API](../overview.md#api) concept and [API and Minimal 70 | Implementation](../library-guidelines.md#api-and-minimal-implementation) for 71 | more information. 72 | 73 | #### SDK 74 | 75 | The **OpenTelemetry Metrics SDK** ("the SDK" hereafter) implements the API, 76 | providing functionality and extensibility such as configuration, aggregation, 77 | processors and exporters. 78 | 79 | OpenTelemetry requires a [separation of the API from the 80 | SDK](../library-guidelines.md#requirements), so that different SDKs can be 81 | configured at run time. Please refer to the overall [OpenTelemetry 82 | SDK](../overview.md#sdk) concept for more information. 83 | 84 | #### Programming Model 85 | 86 | ```text 87 | +------------------+ 88 | | MeterProvider | +-----------------+ +--------------+ 89 | | Meter A | Measurements... | | Metrics... | | 90 | | Instrument X +-----------------> In-memory state +-------------> MetricReader | 91 | | Instrument Y | | | | | 92 | | Meter B | +-----------------+ +--------------+ 93 | | Instrument Z | 94 | | ... | +-----------------+ +--------------+ 95 | | ... | Measurements... | | Metrics... | | 96 | | ... +-----------------> In-memory state +-------------> MetricReader | 97 | | ... | | | | | 98 | | ... | +-----------------+ +--------------+ 99 | +------------------+ 100 | ``` 101 | 102 | ## Specifications 103 | 104 | * [Metrics API](./api.md) 105 | * [Metrics SDK](./sdk.md) 106 | * [Metrics Data Model and Protocol](./data-model.md) 107 | * [Metrics Requirement Levels](./metric-requirement-level.md) 108 | 109 | ## References 110 | 111 | * Scenarios for Metrics API/SDK Prototyping ([OTEP 112 | 146](../../oteps/metrics/0146-metrics-prototype-scenarios.md)) 113 | -------------------------------------------------------------------------------- /specification/metrics/img/accumulator-detail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/accumulator-detail.png -------------------------------------------------------------------------------- /specification/metrics/img/metrics-sdk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/metrics-sdk.png -------------------------------------------------------------------------------- /specification/metrics/img/model-cumulative-sum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/model-cumulative-sum.png -------------------------------------------------------------------------------- /specification/metrics/img/model-delta-histogram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/model-delta-histogram.png -------------------------------------------------------------------------------- /specification/metrics/img/model-delta-sum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/model-delta-sum.png -------------------------------------------------------------------------------- /specification/metrics/img/model-event-layer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/model-event-layer.png -------------------------------------------------------------------------------- /specification/metrics/img/model-gauge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/model-gauge.png -------------------------------------------------------------------------------- /specification/metrics/img/model-layers-stream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/model-layers-stream.png -------------------------------------------------------------------------------- /specification/metrics/img/model-layers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/metrics/img/model-layers.png -------------------------------------------------------------------------------- /specification/metrics/metric-requirement-level.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | # Metric Requirement Levels for Semantic Conventions 7 | 8 | This page has moved to 9 | [Metric Requirement Levels](https://opentelemetry.io/docs/specs/semconv/general/metric-requirement-level/). 10 | -------------------------------------------------------------------------------- /specification/metrics/sdk_exporters/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | # Metrics Exporters 9 | -------------------------------------------------------------------------------- /specification/metrics/sdk_exporters/in-memory.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Metrics Exporter - In-memory 6 | 7 | **Status**: [Stable](../../document-status.md) 8 | 9 | In-memory Metrics Exporter is a [Push Metric 10 | Exporter](../sdk.md#push-metric-exporter) which accumulates metrics data in the 11 | local memory and allows to inspect it (useful for e.g. unit tests). 12 | 13 | In-memory Metrics Exporter MUST provide configuration to set 14 | the [MetricReader](../sdk.md#metricreader) output `temporality` as a function of 15 | instrument kind. This option MAY be named `temporality`, and MUST set 16 | temporality to Cumulative for all instrument kinds by default. 17 | 18 | In-memory Metrics Exporter MAY provide configuration to set 19 | the [MetricReader](../sdk.md#metricreader) default `aggregation` as a function 20 | of instrument kind. This option MAY be named `default_aggregation`, and MUST use 21 | the [default aggregation](../sdk.md#default-aggregation) by default. 22 | 23 | If a language provides a mechanism to automatically configure a 24 | [MetricReader](../sdk.md#metricreader) to pair with the associated 25 | exporter (e.g., using the [`OTEL_METRICS_EXPORTER` environment 26 | variable](../../configuration/sdk-environment-variables.md#exporter-selection)), by 27 | default the exporter MUST be paired with a [periodic exporting 28 | MetricReader](../sdk.md#periodic-exporting-metricreader). 29 | -------------------------------------------------------------------------------- /specification/metrics/sdk_exporters/otlp.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Metrics Exporter - OTLP 6 | 7 | **Status**: [Stable](../../document-status.md) 8 | 9 | ## General 10 | 11 | OTLP Metrics Exporter is a [Push Metric 12 | Exporter](../sdk.md#push-metric-exporter) which sends metrics via the 13 | [OpenTelemetry Protocol](../../protocol/README.md). 14 | 15 | OTLP Metrics Exporter MUST provide configuration to influence 16 | the [MetricReader](../sdk.md#metricreader) output `temporality` as a function of 17 | instrument kind. This option MAY be named `temporality`, and MUST set 18 | temporality preference to Cumulative for all instrument kinds by default. 19 | 20 | OTLP Metrics Exporter MUST provide configuration to influence 21 | the [MetricReader](../sdk.md#metricreader) default `aggregation` as a function 22 | of instrument kind. This option MAY be named `default_aggregation`, and MUST set 23 | the [default aggregation](../sdk.md#default-aggregation) for all instrument kinds by default. 24 | 25 | The exporter MUST provide configuration according to the [OpenTelemetry Protocol 26 | Exporter](../../protocol/exporter.md) specification. 27 | 28 | If a language provides a mechanism to automatically configure a 29 | [MetricReader](../sdk.md#metricreader) to pair with the associated 30 | Exporter (e.g., using the [`OTEL_METRICS_EXPORTER` environment 31 | variable](../../configuration/sdk-environment-variables.md#exporter-selection)), 32 | then by default: 33 | 34 | * The exporter MUST be paired with a [periodic exporting 35 | MetricReader](../sdk.md#periodic-exporting-metricreader). 36 | * The exporter MUST configure the default aggregation temporality on the 37 | basis of instrument kind using the 38 | `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` variable as described 39 | below. 40 | * The exporter MUST configure the default aggregation on the basis of instrument kind using 41 | the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` variable as described below if it is implemented. 42 | 43 | ## Additional Environment Variable Configuration 44 | 45 | | Name | Description | Default | Type | 46 | |------------------------------------------------------------|--------------------------------------------------------------------------------------------------------|-----------------------------|----------| 47 | | `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` | Configure the exporter's aggregation `temporality` option (see above) on the basis of instrument kind. | `cumulative` | [Enum][] | 48 | | `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` | Configure the exporter's `default_aggregation` option (see above) for Histogram instrument kind. | `explicit_bucket_histogram` | [Enum][] | 49 | 50 | The recognized (case-insensitive) values for `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` are: 51 | 52 | * `Cumulative`: Choose cumulative aggregation temporality for all instrument kinds. 53 | * `Delta`: Choose Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds, choose 54 | Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds. 55 | * `LowMemory`: This configuration uses Delta aggregation temporality for Synchronous Counter and Histogram and uses Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds. 56 | 57 | The "LowMemory" choice is so-named because the SDK can under certain 58 | conditions use less memory in this configuration than the others. 59 | Comparatively, the "cumulative" choice forces the SDK to maintain a 60 | delta-to-cumulative conversion for Synchronous Counter and Histogram 61 | instruments, while the "delta" choice forces the SDK to maintain a 62 | cumulative-to-delta conversion for Asynchronous Counter instruments. 63 | 64 | The recognized (case-insensitive) values for `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` are: 65 | 66 | * `explicit_bucket_histogram`: 67 | Use [Explicit Bucket Histogram Aggregation](../sdk.md#explicit-bucket-histogram-aggregation). 68 | * `base2_exponential_bucket_histogram`: 69 | Use [Base2 Exponential Bucket Histogram Aggregation](../sdk.md#base2-exponential-bucket-histogram-aggregation). 70 | 71 | ## References 72 | 73 | - [OTEP0131 OTLP Exporters Configurable Export Behavior](../../../oteps/metrics/0131-otlp-export-behavior.md) 74 | 75 | [Enum]: ../../configuration/sdk-environment-variables.md#enum 76 | -------------------------------------------------------------------------------- /specification/metrics/sdk_exporters/prometheus.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Metrics Exporter - Prometheus 6 | 7 | **Status**: [Development](../../document-status.md) 8 | 9 | A Prometheus Exporter MUST be a [Pull Metric Exporter](../sdk.md#pull-metric-exporter) 10 | which responds to HTTP requests with Prometheus metrics in the appropriate format. 11 | 12 | OpenTelemetry metrics MUST be converted to Prometheus metrics according to the 13 | [Prometheus Compatibility specification](../../compatibility/prometheus_and_openmetrics.md). 14 | 15 | A Prometheus Exporter SHOULD use 16 | [Prometheus client libraries](https://prometheus.io/docs/instrumenting/clientlibs/) 17 | for serving Prometheus metrics. This allows the prometheus client to negotiate 18 | the [format](https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md) 19 | of the response using the `Content-Type` header. If a prometheus client library 20 | is used, the OpenTelemetry Prometheus Exporter SHOULD be modeled as a 21 | [custom Collector](https://prometheus.io/docs/instrumenting/writing_clientlibs/#overall-structure) 22 | so it can be used in conjunction with existing Prometheus instrumentation. 23 | 24 | Regardless of whether a Prometheus client library is used, the Prometheus 25 | Exporter MUST support version `0.0.4` of the 26 | [Text-based format](https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format). 27 | A Prometheus Exporter MAY support Exemplars and Exponential Histograms, 28 | which are [not currently supported by the Prometheus text format](../../compatibility/prometheus_and_openmetrics.md#differences-between-prometheus-formats), 29 | by supporting other Protocols, but is not required to implement them. 30 | 31 | A Prometheus Exporter for an OpenTelemetry metrics SDK MUST NOT use 32 | [Prometheus Remote Write format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto) 33 | or [OpenMetrics protobuf format](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#protobuf-format). 34 | 35 | A Prometheus Exporter for an OpenTelemetry metrics SDK MUST NOT add 36 | [explicit timestamps on Metric points](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#metric). 37 | 38 | There MUST be at most one `target` info metric exposed by an SDK 39 | Prometheus exporter. 40 | 41 | A Prometheus Exporter MUST set 42 | the [MetricReader](../sdk.md#metricreader) `temporality` as a function of 43 | instrument kind to be `cumulative` for all instrument kinds. 44 | 45 | ## Configuration 46 | 47 | A Prometheus Exporter SHOULD support a configuration option to set the host 48 | that metrics are served on. The option MAY be named `host`, and MUST be `localhost` 49 | by default. 50 | 51 | A Prometheus Exporter SHOULD support a configuration option to set the port 52 | that metrics are served on. The option MAY be named `port`, and MUST be `9464` by 53 | default. 54 | 55 | A Prometheus Exporter SHOULD support a configuration option to set 56 | the [MetricReader](../sdk.md#metricreader) default `aggregation` as a function 57 | of instrument kind. This option MAY be named `default_aggregation`, and MUST use 58 | the [default aggregation](../sdk.md#default-aggregation) by default. 59 | 60 | A Prometheus Exporter MAY offer configuration to add resource attributes as metric attributes. 61 | By default, it MUST NOT add any resource attributes as metric attributes. 62 | The configuration SHOULD allow the user to select which resource attributes to copy (e.g. 63 | include / exclude or regular expression based). Copied Resource attributes MUST NOT be 64 | excluded from the `target` info metric. The option MAY be named `with_resource_constant_labels`. 65 | 66 | A Prometheus Exporter MAY support a configuration option to produce metrics without a [unit suffix](../../compatibility/prometheus_and_openmetrics.md#metric-metadata) 67 | or UNIT metadata. The option MAY be named `without_units`, and MUST be `false` by default. 68 | 69 | A Prometheus Exporter MAY support a configuration option to produce metrics without a [type suffix](../../compatibility/prometheus_and_openmetrics.md#metric-metadata). 70 | The option MAY be named `without_type_suffix`, and MUST be `false` by default. 71 | 72 | A Prometheus Exporter MAY support a configuration option to produce metrics without a [scope info](../../compatibility/prometheus_and_openmetrics.md#instrumentation-scope-1) 73 | metric, or scope labels. The option MAY be named `without_scope_info`, and MUST be `false` by default. 74 | 75 | A Prometheus Exporter MAY support a configuration option to produce metrics without a [target info](../../compatibility/prometheus_and_openmetrics.md#resource-attributes-1) 76 | metric. The option MAY be named `without_target_info`, and MUST be `false` by default. 77 | -------------------------------------------------------------------------------- /specification/metrics/sdk_exporters/stdout.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Metrics Exporter - Standard output 6 | 7 | **Status**: [Stable](../../document-status.md) 8 | 9 | "Standard output" Metrics Exporter is a [Push Metric 10 | Exporter](../sdk.md#push-metric-exporter) which outputs the metrics to 11 | stdout/console. 12 | 13 | The exporter's output format is unspecified and can vary between 14 | implementations. Documentation SHOULD warn users about this. The following 15 | wording is recommended (modify as needed): 16 | 17 | > This exporter is intended for debugging and learning purposes. It is not 18 | > recommended for production use. The output format is not standardized and can 19 | > change at any time. 20 | > 21 | > If a standardized format for exporting metrics to stdout is desired, consider 22 | > using the [File Exporter](../../protocol/file-exporter.md), if available. 23 | > However, please review the status of the File Exporter and verify if it is 24 | > stable and production-ready. 25 | 26 | [OpenTelemetry SDK](../../overview.md#sdk) authors MAY choose the best idiomatic 27 | name for their language. For example, ConsoleExporter, StdoutExporter, 28 | StreamExporter, etc. 29 | 30 | "Standard output" Metrics Exporter MUST provide configuration to set 31 | the [MetricReader](../sdk.md#metricreader) output `temporality` as a function of 32 | instrument kind. This option MAY be named `temporality`, and MUST set 33 | temporality to Cumulative for all instrument kinds by default. 34 | 35 | "Standard output" Metrics Exporter MAY provide configuration to set 36 | the [MetricReader](../sdk.md#metricreader) default `aggregation` as a function 37 | of instrument kind. This option MAY be named `default_aggregation`, and MUST use 38 | the [default aggregation](../sdk.md#default-aggregation) by default. 39 | 40 | If a language provides a mechanism to automatically configure a 41 | [MetricReader](../sdk.md#metricreader) to pair with the associated 42 | exporter (e.g., using the [`OTEL_METRICS_EXPORTER` environment 43 | variable](../../configuration/sdk-environment-variables.md#exporter-selection)), by 44 | default the exporter MUST be paired with a [periodic exporting 45 | MetricReader](../sdk.md#periodic-exporting-metricreader) 46 | with a default `exportIntervalMilliseconds` of 10000. 47 | -------------------------------------------------------------------------------- /specification/performance-benchmark.md: -------------------------------------------------------------------------------- 1 | # Performance Benchmark of OpenTelemetry API 2 | 3 | This document describes common performance benchmark guidelines on how to 4 | measure and report the performance of OpenTelemetry SDKs. 5 | 6 | The goal of this benchmark is to provide a tool to get the basic performance 7 | overhead of the OpenTelemetry SDK for given events throughput on the target 8 | platform. 9 | 10 | ## Benchmark Configuration 11 | 12 | ### Span Configuration 13 | 14 | - No parent `Span` or parent `SpanContext`. 15 | - Default Span [Kind](./trace/api.md#spankind) and 16 | [Status](./trace/api.md#set-status). 17 | - Associated to a [resource](overview.md#resources) with attributes 18 | `service.name`, `service.version` and 10 characters string value for each 19 | attribute, and attribute `service.instance.id` with a unique UUID. See 20 | [Service](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/resource/README.md#service) for details. 21 | - 1 [attribute](./common/README.md#attribute) with a signed 64-bit integer 22 | value. 23 | - 1 [event](./trace/api.md#add-events) without any attributes. 24 | - The `AlwaysOn` sampler should be enabled. 25 | - Each `Span` is created and immediately ended. 26 | 27 | ### Measurement Configuration 28 | 29 | For the languages with bootstrap cost like JIT compilation, a warm-up phase is 30 | recommended to take place before the measurement, which runs under the same 31 | `Span` [configuration](#span-configuration). 32 | 33 | ## Throughput Measurement 34 | 35 | ### Create Spans 36 | 37 | Number of spans which could be created and exported via OTLP exporter in 1 38 | second per logical core and average number over all logical cores, with each 39 | span containing 10 attributes, and each attribute containing two 20 characters 40 | strings, one as attribute name the other as value. 41 | 42 | ## Instrumentation Cost 43 | 44 | ### CPU Usage Measurement 45 | 46 | With given number of span throughput specified by user, or 10,000 spans per 47 | second as default if user does not input the number, measure and report the CPU 48 | usage for SDK with both default configured simple and batching span processors 49 | together with OTLP exporter. The benchmark should create an out-of-process OTLP 50 | receiver which listens on the exporting target or adopts existing OTLP exporter 51 | which runs out-of-process, responds with success status immediately and drops 52 | the data. The collector should not add significant CPU overhead to the 53 | measurement. Because the benchmark does not include user processing logic, the 54 | total CPU consumption of benchmark program could be considered as approximation 55 | of SDK's CPU consumption. 56 | 57 | The total running time for one test iteration is suggested to be at least 15 58 | seconds. The average and peak CPU usage should be reported. 59 | 60 | ### Memory Usage Measurement 61 | 62 | Measure dynamic memory consumption, e.g. heap, for the same scenario as above 63 | CPU Usage section with 15 seconds duration. 64 | 65 | ## Report 66 | 67 | ### Report Format 68 | 69 | All the numbers above should be measured multiple times (suggest 10 times at 70 | least) and reported. 71 | -------------------------------------------------------------------------------- /specification/performance.md: -------------------------------------------------------------------------------- 1 | # Performance and Blocking of OpenTelemetry API 2 | 3 | This document defines common principles that will help designers create OpenTelemetry clients that are safe to use. 4 | 5 | ## Key principles 6 | 7 | Here are the key principles: 8 | 9 | - **Library should not block end-user application by default.** 10 | - **Library should not consume unbounded memory resource.** 11 | 12 | Although there are inevitable overhead to achieve monitoring, API should not degrade the end-user application as possible. So that it should not block the end-user application nor consume too much memory resource. 13 | 14 | See also [Concurrency and Thread-Safety](library-guidelines.md#concurrency-and-thread-safety) if the implementation supports concurrency. 15 | 16 | ### Tradeoff between non-blocking and memory consumption 17 | 18 | Incomplete asynchronous I/O tasks or background tasks may consume memory to preserve their state. In such a case, there is a tradeoff between dropping some tasks to prevent memory starvation and keeping all tasks to prevent information loss. 19 | 20 | If there is such tradeoff in OpenTelemetry client, it should provide the following options to end-user: 21 | 22 | - **Prevent information loss**: Preserve all information but possible to consume many resources 23 | - **Prevent blocking**: Dropping some information under overwhelming load and show warning log to inform when information loss starts and when recovered 24 | - Should provide option to change threshold of the dropping 25 | - Better to provide metric that represents effective sampling ratio 26 | - OpenTelemetry client might provide this option for Logging 27 | 28 | ### End-user application should be aware of the size of logs 29 | 30 | Logging could consume much memory by default if the end-user application emits too many logs. This default behavior is intended to preserve logs rather than dropping it. To make resource usage bounded, the end-user should consider reducing logs that are passed to the exporters. 31 | 32 | Therefore, the OpenTelemetry client should provide a way to filter logs to capture by OpenTelemetry. End-user applications may want to log so much into log file or stdout (or somewhere else) but not want to send all of the logs to OpenTelemetry exporters. 33 | 34 | In a documentation of the OpenTelemetry client, it is a good idea to point out that too many logs consume many resources by default then guide how to filter logs. 35 | 36 | ### Shutdown and explicit flushing could block 37 | 38 | The OpenTelemetry client could block the end-user application when it shut down. On shutdown, it has to flush data to prevent information loss. The OpenTelemetry client should support user-configurable timeout if it blocks on shut down. 39 | 40 | If the OpenTelemetry client supports an explicit flush operation, it could block also. But should support a configurable timeout. 41 | 42 | ## Documentation 43 | 44 | If language specific implementation has special characteristics that are not described in this document, such characteristics should be documented. 45 | -------------------------------------------------------------------------------- /specification/profiles/README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Profiles 8 | -------------------------------------------------------------------------------- /specification/profiles/mappings.md: -------------------------------------------------------------------------------- 1 | # Mappings 2 | 3 | **Status**: [Development](../document-status.md) 4 | 5 | This document defines the required attributes of [`Mapping`](../../oteps/profiles/0239-profiles-data-model.md#message-mapping) messages. 6 | 7 | 8 | 9 | - [Attributes](#attributes) 10 | * [Algorithm for `process.executable.build_id.htlhash`](#algorithm-for-processexecutablebuild_idhtlhash) 11 | 12 | 13 | 14 | ## Attributes 15 | 16 | A message representing a `Mapping` MUST have at least one of the following 17 | [process attributes](https://opentelemetry.io/docs/specs/semconv/attributes-registry/process/#process-attributes): 18 | 19 | - `process.executable.build_id.gnu` 20 | - `process.executable.build_id.go` 21 | - `process.executable.build_id.htlhash` 22 | 23 | If possible all the above listed attributes SHOULD be present in a `Mapping`. To promote interoperability, it is RECOMMENDED for `process.executable.build_id.htlhash` to be present in every `Mapping`. For the use and purpose of `process.executable.build_id.go` see [golang/go#68652](https://github.com/golang/go/issues/68652#issuecomment-2274452424). 24 | 25 | ### Algorithm for `process.executable.build_id.htlhash` 26 | 27 | In some environments GNU and/or Go build_id values are stripped or not usable - for example Alpine 28 | Linux which is often used as a base for Docker environments. For that reason and to promote interoperability, a deterministic build_id generation algorithm that hashes the first and last page of a file together with its length is defined as: 29 | 30 | ``` 31 | Input ← Concat(File[:4096], File[-4096:], BigEndianUInt64(Len(File))) 32 | Digest ← SHA256(Input) 33 | BuildID ← Digest[:16] 34 | ``` 35 | 36 | where `Input` is the concatenation of the first and last 4096 bytes of the file (may overlap, not padded) and the 8 byte big-endian serialization of the file length. The resulting `BuildID` is the truncation of the hash digest to 16 bytes (128 bits), in hex string form. 37 | -------------------------------------------------------------------------------- /specification/protocol/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | # OpenTelemetry Protocol 9 | 10 | The OpenTelemetry protocol (OTLP) design goals, requirements, and 11 | [specification] have moved to 12 | [github.com/open-telemetry/opentelemetry-proto/docs](https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/). 13 | 14 | You can also view the specification from the OpenTelemetry website, see [OTLP][specification]. 15 | 16 | For additional OTLP implementation requirements in the OpenTelemetry SDKs, see 17 | [SDK Exporter](exporter.md). 18 | 19 | [specification]: https://opentelemetry.io/docs/specs/otlp/ 20 | -------------------------------------------------------------------------------- /specification/protocol/design-goals.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Design Goals for OpenTelemetry Wire Protocol 8 | 9 | This page has moved to 10 | [github.com/open-telemetry/opentelemetry-proto/docs/design-goals.md](https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/design-goals.md). 11 | -------------------------------------------------------------------------------- /specification/protocol/img/otlp-client-server.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/protocol/img/otlp-client-server.png -------------------------------------------------------------------------------- /specification/protocol/img/otlp-concurrent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/protocol/img/otlp-concurrent.png -------------------------------------------------------------------------------- /specification/protocol/img/otlp-multi-destination.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/protocol/img/otlp-multi-destination.png -------------------------------------------------------------------------------- /specification/protocol/img/otlp-request-response.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/protocol/img/otlp-request-response.png -------------------------------------------------------------------------------- /specification/protocol/img/otlp-sequential.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/protocol/img/otlp-sequential.png -------------------------------------------------------------------------------- /specification/protocol/otlp.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | # OpenTelemetry Protocol Specification 7 | 8 | This page has moved to [OTLP](https://opentelemetry.io/docs/specs/otlp/). 9 | -------------------------------------------------------------------------------- /specification/protocol/requirements.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # OpenTelemetry Protocol Requirements 8 | 9 | This page has moved to 10 | [github.com/open-telemetry/opentelemetry-proto/docs/requirements.md](https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/requirements.md). 11 | -------------------------------------------------------------------------------- /specification/resource/README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Resource 8 | 9 |
10 | Table of Contents 11 | 12 | 13 | 14 | - [Overview](#overview) 15 | * [Identity](#identity) 16 | * [Navigation](#navigation) 17 | * [Telescoping](#telescoping) 18 | - [Specifications](#specifications) 19 | 20 | 21 | 22 |
23 | 24 | ## Overview 25 | 26 | A Resource is a representation of the entity producing telemetry. 27 | Within OpenTelemetry, all signals are associated with a Resource, enabling 28 | contextual correlation of data from the same source. For example, if I see 29 | a high latency in a span I need to check the metrics for the same entity that 30 | produced that Span during the time when the latency was observed. 31 | 32 | Resource provides two important aspects for observability: 33 | 34 | - It MUST identify an entity that is producing telemetry. 35 | - It SHOULD allow users to determine where that entity resides within their infrastructure. 36 | 37 | ### Identity 38 | 39 | Resource provides a natural way to understand "what" produced an effect and 40 | evaluate other signals of that same source. This is done through attaching the 41 | same set of identifying attributes on all telemetry produced in an 42 | OpenTelemetry SDK. 43 | 44 | Resource identity provides a natural pivot point for observability signals, a 45 | key type of correlation in OpenTelemetry. 46 | 47 | ### Navigation 48 | 49 | Implicit in the design of Resource and attributes is ensuring users are able to 50 | navigate their infrastructure, tools, UIs, etc. to find the *same* entity that 51 | telemetry is reporting against. For example, in practice we could see Resource 52 | including more than on entity, like: 53 | 54 | - A process 55 | - A container 56 | - A kubernetes pod name 57 | - A namespace 58 | - A deployment 59 | 60 | By including identifying attributes of each of these, we can help users navigate 61 | through their `kubectl` or Kubernetes UIs to find the specific process 62 | generating telemetry. This is as important as being able to uniquely identify 63 | one process from another. 64 | 65 | > Aside: Observability signals SHOULD be actionable. Knowing a process is 66 | > struggling is not as useful as being able to scale up a deployment to take 67 | > load off the struggling process. 68 | 69 | If the only thing important to Resource was identity, we could simply use UUIDs. 70 | However, this would rely on some other, easily accessible, system to provide 71 | human-friendly understanding for these UUIDs. OpenTelemetry provides a model 72 | where a full UUID-only solution could be chosen, but defaults to a *blended* 73 | approach, where resource provides both Identity and Navigation. 74 | 75 | This leads to the next concept: Telescoping identity to the needs of a system. 76 | 77 | ### Telescoping 78 | 79 | Within OpenTelemetry, we want to give users the flexibility to decide what 80 | information needs to be sent *with* observability signals and what information 81 | can be later joined. We call this "telescoping identity" where users can decide 82 | how *small* or *large* the size of an OpenTelemetry resource will be on the wire 83 | (and correspondingly, how large data points are when stored, depending on 84 | storage solution). 85 | 86 | For example, in the extreme, OpenTelemery could synthesize a UUID for every 87 | system which produces telemetry. All identifying attributes for Resource and 88 | Entity could be sent via a side channel with known relationships to this UUID. 89 | While this would optimise the runtime generation and sending of telemetry, it 90 | comes at the cost of downstream storage systems needing to join data back 91 | together either at ingestion time or query time. For high performance use cases, 92 | e.g. alerting, these joins can be expensive. 93 | 94 | In practice, users control Resource identity via the configuration of Resource 95 | Detection within SDKs and the collector. Users wishing for minimal identity will 96 | limit their resource detection just to a `service.instance.id`, for example. 97 | Some users highly customize resource detection with many concepts being appended. 98 | 99 | ## Specifications 100 | 101 | - [Data Model](./data-model.md) 102 | - [Resource SDK](./sdk.md) 103 | -------------------------------------------------------------------------------- /specification/resource/data-model.md: -------------------------------------------------------------------------------- 1 | # Resource Data Model 2 | 3 | **Status**: [Development](../document-status.md) 4 | 5 |
6 | Table of Contents 7 | 8 | 9 | 10 | - [Identity](#identity) 11 | 12 | 13 | 14 |
15 | 16 | A Resource is a representation of the entity producing telemetry as Attributes. 17 | For example, You could have a process producing telemetry that is 18 | running in a container on Kubernetes, which is associated to a Pod running on a 19 | Node that is a VM but also is in a namespace and possibly is part of a 20 | Deployment. Resource could have attributes to denote information about the 21 | Container, the Pod, the Node, the VM or the Deployment. All of these help 22 | identify what produced the telemetry. Note that there are certain "standard 23 | attributes" that have prescribed meanings. 24 | 25 | A resource is composed of 0 or more [`Entities`](../entities/README.md) and 0 26 | or more attributes not associated with any entity. 27 | 28 | The data model below defines a logical model for an Resource (irrespective of the physical format and encoding of how resource data is recorded). 29 | 30 | | Field | Type | Description | 31 | |------------|----------|-----------------| 32 | | Entities | set\ | Defines the set of Entities associated with this resource.

[Entity is defined here](../entities/data-model.md) | 33 | | Attributes | map\ | Additional Attributes that identify the resource.

MUST not change during the lifetime of the resource.

Follows OpenTelemetry [Standard attribute definition](../common/README.md#standard-attribute). | 34 | 35 | ## Identity 36 | 37 | Most resources are a composition of [`Entity`](../entities/data-model.md). 38 | Entity includes its own notion of identity. The identity of a resource is 39 | the set of entities contained within it. Two resources are considered 40 | different if one contains an entity not found in the other. 41 | 42 | Some resources include raw attributes in additon to Entities. Raw attributes are 43 | considered identifying on a resource. That is, if the key-value pairs of 44 | raw attributes are different, then you can assume the resource is different. 45 | -------------------------------------------------------------------------------- /specification/schemas/img/0152-collector.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/schemas/img/0152-collector.png -------------------------------------------------------------------------------- /specification/schemas/img/0152-otel-schema.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/schemas/img/0152-otel-schema.png -------------------------------------------------------------------------------- /specification/schemas/img/0152-query-translate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/schemas/img/0152-query-translate.png -------------------------------------------------------------------------------- /specification/schemas/img/0152-source-and-backend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-specification/bdcf53d55285ae42339f37908a24ba6f3ac167cb/specification/schemas/img/0152-source-and-backend.png -------------------------------------------------------------------------------- /specification/semantic-conventions.md: -------------------------------------------------------------------------------- 1 | # Semantic Conventions 2 | 3 | **Status**: [Development](document-status.md) 4 | 5 | The **Semantic Conventions** define the keys and values which describe commonly observed concepts, protocols, and operations used by applications. 6 | 7 | OpenTelemetry defines its semantic conventions in a separate repository: 8 | [https://github.com/open-telemetry/semantic-conventions](https://github.com/open-telemetry/semantic-conventions). 9 | 10 | ## Reserved Attributes 11 | 12 | Semantic conventions MUST provide the following attributes: 13 | 14 | - [`error.type`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/error.md#error-type) 15 | - [`exception.message`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/exception.md#exception-message) 16 | - [`exception.stacktrace`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/exception.md#exception-stacktrace) 17 | - [`exception.type`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/exception.md#exception-type) 18 | - [`server.address`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/server.md#server-address) 19 | - [`server.port`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/server.md#server-port) 20 | - [`service.name`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/service.md#service-name) 21 | - [`telemetry.sdk.language`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/telemetry.md#telemetry-sdk-language) 22 | - [`telemetry.sdk.name`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/telemetry.md#telemetry-sdk-name) 23 | - [`telemetry.sdk.version`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/telemetry.md#telemetry-sdk-version) 24 | - [`url.scheme`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/url.md#url-scheme) 25 | 26 | Semantic conventions MUST provide the following events: 27 | 28 | - [`exception`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/exceptions/exceptions-spans.md) 29 | 30 | ## In-development Reserved Attributes 31 | 32 | Semantic conventions MUST provide the following attributes: 33 | 34 | - [`service.instance.id`](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/service.md#service-instance-id) 35 | 36 | ## Reserved Namespace 37 | 38 | The `otel.*` namespace is reserved for defining compatibility with 39 | non-OpenTelemetry technologies. 40 | -------------------------------------------------------------------------------- /specification/telemetry-stability.md: -------------------------------------------------------------------------------- 1 | # Telemetry Stability 2 | 3 | **Status**: [Development](document-status.md) 4 | 5 |

6 | Table of Contents 7 | 8 | 9 | 10 | - [Unstable Instrumentations](#unstable-instrumentations) 11 | - [Stable Instrumentations](#stable-instrumentations) 12 | * [Fixed Schema Telemetry Producers](#fixed-schema-telemetry-producers) 13 | * [Schema-File Driven Telemetry Producers](#schema-file-driven-telemetry-producers) 14 | 15 | 16 | 17 |
18 | 19 | This section defines stability requirements for telemetry produced by 20 | OpenTelemetry instrumentations. 21 | 22 | All OpenTelemetry-authored instrumentations are labeled to be either `Unstable` or `Stable` 23 | from the perspective of the telemetry they produce. 24 | 25 | Adding of new metrics, spans, span events or log records and adding of 26 | new attributes to spans, span events, log records or resources are considered 27 | additive, non-breaking changes and are always allowed for `Unstable` and `Stable` 28 | instrumentations. 29 | 30 | Other changes in the produced telemetry are regulated by the following rules. 31 | 32 | ## Unstable Instrumentations 33 | 34 | Unstable telemetry-producing instrumentations (unstable instrumentations for short) SHOULD 35 | be clearly labeled so by any means the instrumentations authors consider idiomatic for 36 | their language, e.g. via version numbers, artifact names, documentation, etc. 37 | 38 | Unstable instrumentations provide no guarantees about the shape of 39 | the telemetry they produce and how that shape changes over time, from version to version. 40 | Span or metric names, attributes of any telemetry items may change without any 41 | restrictions. The produced telemetry MAY specify a Schema URL if the telemetry data 42 | conforms to a particular Schema. 43 | 44 | Unstable instrumentations authored by OpenTelemetry MAY produce additional telemetry that 45 | is not described by OpenTelemetry semantic conventions. 46 | 47 | TODO: decide if it is necessary to indicate on the wire if the produced telemetry is 48 | coming from an unstable instrumentation. 49 | 50 | ## Stable Instrumentations 51 | 52 | > **Warning** 53 | > There is a moratorium on relying on schema transformations for telemetry stability. 54 | 55 | Stable telemetry-producing instrumentations (stable instrumentations for short) SHOULD 56 | be clearly labeled so by any means the instrumentations authors consider idiomatic for 57 | their language, e.g. via version numbers, artifact names, documentation, etc. 58 | 59 | Stable instrumentations fall into 2 categories: fixed-schema producers and schema-file 60 | driven producers. 61 | 62 | Stable instrumentations authored by OpenTelemetry SHOULD NOT produce telemetry that is 63 | not described by OpenTelemetry semantic conventions. If, however, this rule is broken the 64 | instrumentations MUST NOT change such telemetry, regardless of whether they 65 | are fixed-schema producers or schema-file driven producers. Once the produced telemetry 66 | is added to the semantic conventions, changes will be allowed as described below. 67 | 68 | ### Fixed Schema Telemetry Producers 69 | 70 | Instrumentations that are labeled `Stable` and do not include the Schema URL in the 71 | produced telemetry are called Fixed Schema Telemetry Producers. 72 | 73 | Such instrumentations are prohibited from changing any produced telemetry. If the 74 | specification changes over time and the semantic conventions are updated, the 75 | instrumentation is still prohibited from adopting the changes. If the instrumentation 76 | wishes to adopt the semantic convention changes it must first become a 77 | [Schema-File Driven Telemetry Producer](#schema-file-driven-telemetry-producers) by 78 | adding an appropriate Schema URL in the produced telemetry. 79 | 80 | ### Schema-File Driven Telemetry Producers 81 | 82 | Stable instrumentations that include the Schema URL in the produced telemetry are 83 | called Schema-File Driven Telemetry Producers. 84 | 85 | Such instrumentations are prohibited from changing the produced telemetry until 86 | the moratorium on relying on schema transformations for telemetry stability is lifted 87 | and until that date are subject to exactly the same restrictions as 88 | [Fixed Schema Telemetry Producers](#fixed-schema-telemetry-producers). 89 | 90 | After the moratorium is lifted, stable instrumentations are allowed to change the produced telemetry 91 | if all the following conditions are fulfilled: 92 | 93 | - The change is part of OpenTelemetry semantic conventions and is in a released 94 | version of the specification. 95 | - The change has a corresponding [published](schemas/README.md#opentelemetry-schema) 96 | OpenTelemetry Schema File that describes the change. 97 | - The produced telemetry correctly specifies the respective Schema URL. 98 | -------------------------------------------------------------------------------- /specification/trace/README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Trace 8 | -------------------------------------------------------------------------------- /specification/trace/exceptions.md: -------------------------------------------------------------------------------- 1 | # Exceptions 2 | 3 | **Status**: [Stable](../document-status.md), Unless otherwise specified. 4 | 5 | This document defines how to record exceptions and their attributes. 6 | 7 | 8 | 9 | - [Recording an Exception](#recording-an-exception) 10 | - [Attributes](#attributes) 11 | 12 | 13 | 14 | ## Recording an Exception 15 | 16 | An exception SHOULD be recorded as an `Event` on the span during which it occurred 17 | if and only if it remains unhandled when the span ends and causes the span status 18 | to be set to ERROR. 19 | 20 | The name of the event MUST be `"exception"`. 21 | 22 | **Status**: [Development](../document-status.md) - Refer to the [Recording Errors](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/general/recording-errors.md) document for the details on how to report errors across signals. 23 | 24 | A typical template for an auto-instrumentation implementing this semantic convention 25 | using an [API-provided `recordException` method](api.md#record-exception) 26 | could look like this (pseudo-Java): 27 | 28 | ```java 29 | Span span = myTracer.startSpan(/*...*/); 30 | try { 31 | // Code that does the actual work which the Span represents 32 | } catch (Throwable e) { 33 | span.recordException(e); 34 | span.setAttribute(AttributeKey.stringKey("error.type"), e.getClass().getCanonicalName()) 35 | span.setStatus(StatusCode.ERROR, e.getMessage()); 36 | throw e; 37 | } finally { 38 | span.end(); 39 | } 40 | ``` 41 | 42 | ## Attributes 43 | 44 | An event representing an exception MUST have an 45 | event name `exception`. 46 | 47 | Additionally, the following attributes SHOULD be 48 | filled out: 49 | 50 | - `exception.message` 51 | - `exception.stacktrace` 52 | - `exception.type` 53 | 54 | The format and semantics of these attributes are 55 | defined in [semantic conventions](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/exceptions/exceptions-spans.md). 56 | -------------------------------------------------------------------------------- /specification/trace/sdk_exporters/README.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | # Trace Exporters 9 | -------------------------------------------------------------------------------- /specification/trace/sdk_exporters/stdout.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | # Span Exporter - Standard output 6 | 7 | **Status**: [Stable](../../document-status.md) 8 | 9 | "Standard output" Span Exporter is a [Span 10 | Exporter](../sdk.md#span-exporter) which outputs the spans to 11 | stdout/console. 12 | 13 | The exporter's output format is unspecified and can vary between 14 | implementations. Documentation SHOULD warn users about this. The following 15 | wording is recommended (modify as needed): 16 | 17 | > This exporter is intended for debugging and learning purposes. It is not 18 | > recommended for production use. The output format is not standardized and can 19 | > change at any time. 20 | > 21 | > If a standardized format for exporting traces to stdout is desired, consider 22 | > using the [File Exporter](../../protocol/file-exporter.md), if available. 23 | > However, please review the status of the File Exporter and verify if it is 24 | > stable and production-ready. 25 | 26 | [OpenTelemetry SDK](../../overview.md#sdk) authors MAY choose the best idiomatic 27 | name for their language. For example, ConsoleExporter, StdoutExporter, 28 | StreamExporter, LoggingExporter etc. 29 | 30 | If a language provides a mechanism to automatically configure a 31 | [Span processor](../sdk.md#span-processor) to pair with the associated 32 | exporter (e.g., using the [`OTEL_TRACES_EXPORTER` environment 33 | variable](../../configuration/sdk-environment-variables.md#exporter-selection)), by 34 | default the standard output exporter SHOULD be paired with a [simple 35 | processor](../sdk.md#simple-processor). 36 | -------------------------------------------------------------------------------- /specification/vendors.md: -------------------------------------------------------------------------------- 1 | # Vendors 2 | 3 |
4 | Table of Contents 5 | 6 | 7 | 8 | - [Abstract](#abstract) 9 | - [Supports OpenTelemetry](#supports-opentelemetry) 10 | - [Implements OpenTelemetry](#implements-opentelemetry) 11 | - [Qualifications](#qualifications) 12 | 13 | 14 | 15 |
16 | 17 | ## Abstract 18 | 19 | The OpenTelemetry project consists of both a 20 | [specification](https://github.com/open-telemetry/opentelemetry-specification) 21 | for the API, SDK, protocol and semantic conventions, as well as an 22 | implementation of each for a number of languages. The default SDK implementation 23 | is [highly configurable](configuration/README.md) and extendable, for example 24 | through [Span Processors](trace/sdk.md#span-processor), to allow for additional 25 | logic needed by particular vendors to be added without having to implement a 26 | custom SDK. By not requiring a custom SDK means for most languages a user will 27 | already find an implementation to use and if not they'll have a well documented 28 | specification to follow for implementing in a new language. 29 | 30 | The goal is for users to be able to easily switch between vendors while also 31 | ensuring that any language with an OpenTelemetry SDK implementation is able to 32 | work with any vendor who claims support for OpenTelemetry. 33 | 34 | This document will explain what is required of a vendor to be considered to 35 | "Support OpenTelemetry" or "Implements OpenTelemetry". 36 | 37 | ## Supports OpenTelemetry 38 | 39 | "Supports OpenTelemetry" means the vendor must accept the output of the default 40 | SDK through one of two mechanisms: 41 | 42 | - By providing an exporter for the [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector/) and / or the OpenTelemetry SDKs 43 | - By building a receiver for the [OpenTelemetry protocol](https://github.com/open-telemetry/opentelemetry-proto) 44 | 45 | ## Implements OpenTelemetry 46 | 47 | A vendor with a custom SDK implementation will be listed as "Implements 48 | OpenTelemetry". If the custom SDK is optional then the vendor can be listed as 49 | "Supports OpenTelemetry". 50 | 51 | ## Qualifications 52 | 53 | A vendor can qualify their support for OpenTelemetry with the type of telemetry 54 | they support. For example, a vendor that accepts the OpenTelemetry protocol 55 | exports for metrics only will be listed as "Supports OpenTelemetry Metrics" or 56 | one that implements a custom SDK only for tracing will be listed as "Implements 57 | OpenTelemetry Tracing". 58 | -------------------------------------------------------------------------------- /supplementary-guidelines/compatibility/aws.md: -------------------------------------------------------------------------------- 1 | # Compatibility Considerations for AWS 2 | 3 | This document highlights compatibility considerations for OpenTelemetry 4 | instrumentations when interacting with AWS managed services using an aws-sdk, 5 | a third-party library, or a direct HTTP request. 6 | 7 | ## Context Propagation 8 | 9 | When making calls to AWS managed services using an AWS SDK, a third-party 10 | library, or a direct HTTP request, an AWS service-supported propagation format should 11 | be used to add context propagation to HTTP headers on the outgoing request in order 12 | to propagate the context to services indirectly invoked by such call. 13 | 14 | Instrumentation may allow a different propagator to be explicitly configured for 15 | the instrumentation (e.g. an explicitly provided propagator, or an option to use the 16 | globally configured propagator for all or certain calls). 17 | This will be useful for certain cases where the services allow transporting these 18 | headers to a receiving side, for example SQS or SNS with message attributes. 19 | Note that this also means that instrumentations providing this option cannot just 20 | replace their call to the X-Ray propagator with a call to another propagator (as 21 | that would only send HTTP headers in the API REST call that would be immediately 22 | ignored by the receiving AWS service), but will need to introduce per-service-call 23 | implementations where it makes sense (e.g., for SQS send and SQS receive). 24 | This can allow for transporting additional context that may not be supported by X-Ray, 25 | such as baggage or tracestate, or supporting certain legacy propagation formats. 26 | Documentation should advise that doing so is subject to attribute limits and billing impacts. 27 | 28 | Propagation headers must be added before the signature is calculated to prevent 29 | errors on signed requests. If injecting into the request itself (not just adding 30 | additional HTTP headers), additional considerations may apply (for example, the 31 | .NET AWS SDK calculates a hash of the attributes it sends and compares it with 32 | the `MD5OfMessageAttributes` that it receives). 33 | 34 | The following formats are currently natively supported by AWS services for propagation: 35 | 36 | * [AWS X-Ray](https://docs.aws.amazon.com/xray/latest/devguide/aws-xray.html) 37 | 38 | AWS service-supported context propagation is necessary to allow context propagation 39 | through AWS managed services, for example: `S3 -> SNS -> SQS -> Lambda`. 40 | 41 | (See the [aws-lambda sqs-event semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/faas/aws-lambda.md#sqs-event) 42 | doc for details on how this context propagation is consumed by Lambda instrumentation.) 43 | --------------------------------------------------------------------------------