├── .env ├── .gitattributes ├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ ├── ci.yml │ ├── docker.yml │ └── semantic.yml ├── .gitignore ├── LICENSE ├── README.md ├── common ├── common.go ├── common_test.go ├── error.go ├── go.mod ├── go.sum ├── logger.go └── metrics.go ├── demo ├── Dockerfile.hotrod ├── docker-compose-development.yml ├── docker-compose.yml ├── grafana │ ├── dashboards │ │ ├── Open Telemetry-1682672178148.json │ │ └── dashboards.yml │ └── datasources │ │ ├── flightsql.yml │ │ └── jaeger.yml ├── hotrod.patch ├── jaeger-ui-config.json ├── otelcol-config.yml └── telegraf │ └── telegraf.conf ├── docs ├── index.md ├── logs.md ├── metrics.md └── traces.md ├── influx2otel ├── README.md ├── go.mod ├── go.sum ├── metrics.go ├── metrics_helper_test.go ├── metrics_statsd_schema_test.go ├── metrics_telegraf_prometheus_v1.go ├── metrics_telegraf_prometheus_v1_test.go ├── metrics_telegraf_prometheus_v2.go ├── metrics_telegraf_prometheus_v2_test.go └── metrics_unknown_schema_test.go ├── jaeger-influxdb ├── Dockerfile ├── Dockerfile.all-in-one ├── README.md ├── all-in-one.sh ├── cmd │ └── jaeger-influxdb │ │ └── main.go ├── go.mod ├── go.sum └── internal │ ├── common.go │ ├── config.go │ ├── influxdb.go │ ├── influxdb_reader.go │ ├── influxdb_test.go │ ├── influxdb_writer.go │ ├── logctx.go │ └── queries.go ├── otel2influx ├── README.md ├── common.go ├── go.mod ├── go.sum ├── logs.go ├── metrics.go ├── metrics_otel_v1.go ├── metrics_telegraf_prometheus_v1.go ├── metrics_telegraf_prometheus_v1_test.go ├── metrics_telegraf_prometheus_v2.go ├── metrics_telegraf_prometheus_v2_test.go ├── traces.go ├── writer.go └── writer_test.go ├── otelcol-influxdb ├── .gitignore ├── Dockerfile ├── README.md └── build.yml ├── run-checks.sh ├── tests-integration ├── common_test.go ├── go.mod ├── go.sum ├── helper_otelcol_test.go ├── helper_telegraf_test.go ├── influx2otel_test.go ├── otel2influx_test.go └── test_fodder.go └── update-deps.sh /.env: -------------------------------------------------------------------------------- 1 | INFLUXDB_ADDR=eu-central-1-1.aws.cloud2.influxdata.com 2 | INFLUXDB_TOKEN=xxxxxxxx 3 | INFLUXDB_ORG=xxxxxxx 4 | INFLUXDB_BUCKET=otel 5 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | go.sum linguist-generated=true 2 | 3 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @influxdata/iox-write 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/common" 5 | schedule: 6 | day: "tuesday" 7 | interval: "weekly" 8 | open-pull-requests-limit: 0 9 | - package-ecosystem: "gomod" 10 | directory: "/influx2otel" 11 | schedule: 12 | day: "tuesday" 13 | interval: "weekly" 14 | open-pull-requests-limit: 0 15 | - package-ecosystem: "gomod" 16 | directory: "/jaeger-influxdb" 17 | schedule: 18 | day: "tuesday" 19 | interval: "weekly" 20 | open-pull-requests-limit: 0 21 | - package-ecosystem: "gomod" 22 | directory: "/otel2influx" 23 | schedule: 24 | day: "tuesday" 25 | interval: "weekly" 26 | open-pull-requests-limit: 0 27 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | 11 | build: 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | go: [ "1.19", "1.20" ] 16 | package: [ "common", "influx2otel", "otel2influx", "jaeger-influxdb", "tests-integration" ] 17 | exclude: 18 | - go: 1.19 19 | package: jaeger-influxdb 20 | runs-on: ubuntu-latest 21 | steps: 22 | 23 | - uses: actions/checkout@v2 24 | with: 25 | fetch-depth: 1 26 | 27 | - uses: actions/setup-go@v2 28 | with: 29 | go-version: ${{ matrix.go }} 30 | 31 | - name: Test 32 | run: > 33 | cd ${{ matrix.package }} && 34 | go test ./... 35 | 36 | - name: Fmt 37 | run: > 38 | test -z $(gofmt -s -l ./${{ matrix.package }} | head -n 1) || ( gofmt -s -d ./${{ matrix.package }} ; exit 1 ) 39 | 40 | - name: Vet 41 | run: > 42 | cd ${{ matrix.package }} && 43 | go vet ./... 44 | 45 | - name: staticcheck 46 | run: > 47 | go install honnef.co/go/tools/cmd/staticcheck@2023.1.3 && 48 | cd ${{ matrix.package }} && 49 | staticcheck -f stylish ./... 50 | 51 | build-otelcol-influxdb: 52 | runs-on: ubuntu-latest 53 | steps: 54 | 55 | - uses: actions/checkout@v2 56 | with: 57 | fetch-depth: 1 58 | 59 | - uses: actions/setup-go@v2 60 | with: 61 | go-version: "1.20" 62 | 63 | - name: build 64 | run: > 65 | go install go.opentelemetry.io/collector/cmd/builder@v0.87.0 && 66 | cd otelcol-influxdb && 67 | builder --config build.yml 68 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: docker 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | workflow_dispatch: 8 | 9 | jobs: 10 | docker: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Set up QEMU 14 | uses: docker/setup-qemu-action@v2 15 | - name: Set up Docker Buildx 16 | uses: docker/setup-buildx-action@v2 17 | - name: Login to Docker Hub 18 | uses: docker/login-action@v2 19 | with: 20 | username: ${{ secrets.DOCKERHUB_USERNAME }} 21 | password: ${{ secrets.DOCKERHUB_TOKEN }} 22 | 23 | - name: Docker meta, jaeger-influxdb 24 | id: meta-jaeger-influxdb 25 | uses: docker/metadata-action@v4 26 | with: 27 | images: | 28 | jacobmarble/jaeger-influxdb 29 | tags: | 30 | type=semver,pattern={{version}} 31 | - name: Build and push jaeger-influxdb 32 | uses: docker/build-push-action@v4 33 | with: 34 | push: true 35 | file: jaeger-influxdb/Dockerfile 36 | tags: ${{ steps.meta-jaeger-influxdb.outputs.tags }} 37 | platforms: linux/amd64,linux/arm64 38 | 39 | - name: Docker meta, jaeger-influxdb-all-in-one 40 | id: meta-jaeger-influxdb-all-in-one 41 | uses: docker/metadata-action@v4 42 | with: 43 | images: | 44 | jacobmarble/jaeger-influxdb-all-in-one 45 | tags: | 46 | type=semver,pattern={{version}} 47 | - name: Build and push jaeger-influxdb-all-in-one 48 | uses: docker/build-push-action@v4 49 | with: 50 | push: true 51 | file: jaeger-influxdb/Dockerfile.all-in-one 52 | tags: ${{ steps.meta-jaeger-influxdb-all-in-one.outputs.tags }} 53 | platforms: linux/amd64,linux/arm64 54 | 55 | - name: Docker meta, otelcol-influxdb 56 | id: meta-otelcol-influxdb 57 | uses: docker/metadata-action@v4 58 | with: 59 | images: | 60 | jacobmarble/otelcol-influxdb 61 | tags: | 62 | type=semver,pattern={{version}} 63 | - name: Build and push otelcol-influxdb 64 | uses: docker/build-push-action@v4 65 | with: 66 | push: true 67 | file: otelcol-influxdb/Dockerfile 68 | tags: ${{ steps.meta-otelcol-influxdb.outputs.tags }} 69 | platforms: linux/amd64,linux/arm64 70 | -------------------------------------------------------------------------------- /.github/workflows/semantic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Semantic PR and Commit Messages" 3 | 4 | on: 5 | pull_request: 6 | types: [opened, reopened, synchronize, edited] 7 | 8 | jobs: 9 | semantic: 10 | uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | local/ 2 | .env-old 3 | plugins 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 InfluxData 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # InfluxDB Observability 2 | 3 | This repository is a reference for converting observability signals (traces, metrics, logs) to/from a common InfluxDB schema. 4 | 5 | ## Demo 6 | 7 | Steps to run the write full write/query demo follow. 8 | 9 | In an InfluxDB Cloud 2 account backed by IOx, create a bucket named `otel`. 10 | Then, create a token with permission to read and write to that bucket. 11 | 12 | In demo/docker-compose.yml, set values for these keys. 13 | The key `INFLUXDB_BUCKET_ARCHIVE` is optional; 14 | if set, it should point to an InfluxDB bucket with longer retention policy than `INFLUXDB_BUCKET`, 15 | so that the "Archive Trace" button in Jaeger works properly: 16 | 17 | The community addition focuses on the useability of the demo with Grafana. With this being said to improve demo setup we have introduced a `.env` file that will allow you to set the following variables: 18 | 19 | ```bash 20 | INFLUXDB_ADDR=eu-central-1-1.aws.cloud2.influxdata.com 21 | INFLUXDB_TOKEN=xxxxxxx 22 | INFLUXDB_ORG=xxxxxxxx 23 | INFLUXDB_BUCKET=otel 24 | INFLUXDB_BUCKET_ARCHIVE=otel-archive 25 | ``` 26 | 27 | In demo/otelcol-config.yml, set the similar values for these keys: 28 | ```yaml 29 | endpoint: https://< region specific URL - https://region.csp.cloud2.influxdata.com/ > 30 | bucket: otel 31 | token: 32 | ``` 33 | 34 | Run the docker compose: 35 | ```console 36 | $ docker-compose --file demo/docker-compose.yml --project-directory . up --abort-on-container-exit --remove-orphans 37 | ``` 38 | 39 | Traces are generated by "HotRod", an application designed to demonstrate tracing. 40 | Browse to HotRod at http://localhost:8080 and click some buttons to trigger trace activity. 41 | 42 | Query those traces. 43 | Browse to Jaeger at http://localhost:16686 and click "Find Traces" near the bottom left. 44 | 45 | Click any trace. 46 | 47 | View the dependency graph. 48 | Click "System Architecture". 49 | 50 | Grafana is available at http://localhost:3000. The default username and password are both `admin`. The default datasource of flightSQL is already confgiured. 51 | 52 | **Note: You can find a dashboard to import under `demo/grafana/dashboards/Open Telemetry-1681814438598.json`** 53 | 54 | If you would like to access the Trace node tree. Then Make sure to enable it within the Jaeger datasource. Head to data sources and click on the Jaeger datasource. Then enable `Enable Node Graph`. Then click save and test. 55 | 56 | 57 | The images `otelcol-influxdb` and `jaeger-influxdb` are automatically built and pushed to Docker at https://hub.docker.com/r/jacobmarble/otelcol-influxdb and https://hub.docker.com/r/jacobmarble/jaeger-influxdb . 58 | 59 | ## Schema Reference 60 | 61 | [Schema reference with conversion tables](docs/index.md). 62 | 63 | ## Modules 64 | 65 | ### `common` 66 | 67 | The golang package `common` contains simple utilities and common string values, 68 | used in at least two of the above-mentioned packages. 69 | 70 | ### `otel2influx` and `influx2otel` 71 | 72 | The golang package [`otel2influx`](otel2influx/README.md) converts OpenTelemetry protocol buffer objects to (measurement, tags, fields, timestamp) tuples. 73 | It is imported by [the OpenTelemetry Collector InfluxDB exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/influxdbexporter) 74 | and by [the Telegraf OpenTelemetry input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry). 75 | 76 | The golang package [`influx2otel`](influx2otel/README.md) converts (measurement, tags, fields, timestamp) tuples to OpenTelemetry protocol buffer objects. 77 | It is imported by [the OpenTelemtry Collector InfluxDB receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/influxdbreceiver) 78 | and by [the Telegraf OpenTelemetry output plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentelemetry). 79 | 80 | ### `jaeger-influxdb` 81 | 82 | The [Jaeger Query Plugin for InfluxDB](jaeger-influxdb) enables querying traces stored in InfluxDB/IOx via the Jaeger UI. 83 | 84 | ### `tests-integration` 85 | 86 | The golang package `tests-integration` contains integration tests. 87 | These tests exercise the above packages against OpenTelemetry Collector Contrib and Telegraf. 88 | 89 | To run these tests: 90 | ```console 91 | $ cd tests-integration 92 | $ go test 93 | ``` 94 | 95 | ## Development 96 | 97 | The file `demo/docker-compose-development.yml` is similar to the demo above, 98 | but it uses local builds of the `otelcol-influxdb` and `jaeger-influxdb` images. 99 | 100 | Build the needed docker images: 101 | ```console 102 | $ docker compose --file demo/docker-compose-development.yml --project-directory . build 103 | ``` 104 | 105 | Start the development demo environment: 106 | ```console 107 | $ docker compose --file demo/docker-compose-development.yml --project-directory . up --abort-on-container-exit --remove-orphans 108 | ``` 109 | 110 | ## Contributing 111 | 112 | Changes can be tested on a local branch using the `run-checks.sh` tool. 113 | `run-checks.sh` verifies `go mod tidy` using `git diff`, 114 | so any changes must be staged for commit in order for `run-checks.sh` to pass. 115 | 116 | To update critical dependencies (OpenTelemetry, Jaeger, and intra-repo modules) in the various modules of this repository: 117 | - run `update-deps.sh` 118 | - stage the changed `go.mod` and `go.sum` files 119 | - run `run-checks.sh` 120 | 121 | ## TODO 122 | Fork this demo: 123 | https://github.com/open-telemetry/opentelemetry-demo 124 | 125 | -------------------------------------------------------------------------------- /common/common.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | 7 | semconv "go.opentelemetry.io/collector/semconv/v1.16.0" 8 | ) 9 | 10 | // https://github.com/open-telemetry/opentelemetry-specification/tree/v1.16.0/specification/resource/semantic_conventions 11 | var ResourceNamespace = regexp.MustCompile(generateResourceNamespaceRegexp()) 12 | 13 | func generateResourceNamespaceRegexp() string { 14 | semconvResourceAttributeNames := semconv.GetResourceSemanticConventionAttributeNames() 15 | components := make([]string, len(semconvResourceAttributeNames)) 16 | for i, attributeName := range semconvResourceAttributeNames { 17 | components[i] = strings.ReplaceAll(attributeName, `.`, `\.`) 18 | } 19 | return `^(?:` + strings.Join(components, `|`) + `)(?:\.[a-z0-9]+)*$` 20 | } 21 | 22 | const ( 23 | MeasurementSpans = "spans" 24 | MeasurementSpanLinks = "span-links" 25 | MeasurementLogs = "logs" 26 | MeasurementPrometheus = "prometheus" 27 | 28 | MetricGaugeFieldKey = "gauge" 29 | MetricCounterFieldKey = "counter" 30 | MetricHistogramCountFieldKey = "count" 31 | MetricHistogramSumFieldKey = "sum" 32 | MetricHistogramMinFieldKey = "min" 33 | MetricHistogramMaxFieldKey = "max" 34 | MetricHistogramInfFieldKey = "+Inf" 35 | MetricHistogramBoundKeyV2 = "le" 36 | MetricHistogramCountSuffix = "_count" 37 | MetricHistogramSumSuffix = "_sum" 38 | MetricHistogramBucketSuffix = "_bucket" 39 | MetricHistogramMinSuffix = "_min" 40 | MetricHistogramMaxSuffix = "_max" 41 | MetricSummaryCountFieldKey = "count" 42 | MetricSummarySumFieldKey = "sum" 43 | MetricSummaryQuantileKeyV2 = "quantile" 44 | MetricSummaryCountSuffix = "_count" 45 | MetricSummarySumSuffix = "_sum" 46 | MetricExemplarSuffix = "_exemplar" 47 | 48 | // These attribute key names are influenced by the proto message keys. 49 | // https://github.com/open-telemetry/opentelemetry-proto/blob/abbf7b7b49a5342d0d6c0e86e91d713bbedb6580/opentelemetry/proto/trace/v1/trace.proto 50 | // https://github.com/open-telemetry/opentelemetry-proto/blob/abbf7b7b49a5342d0d6c0e86e91d713bbedb6580/opentelemetry/proto/metrics/v1/metrics.proto 51 | // https://github.com/open-telemetry/opentelemetry-proto/blob/abbf7b7b49a5342d0d6c0e86e91d713bbedb6580/opentelemetry/proto/logs/v1/logs.proto 52 | AttributeTime = "time" 53 | AttributeStartTimeUnixNano = "start_time_unix_nano" 54 | AttributeObservedTimeUnixNano = "observed_time_unix_nano" 55 | // string formatted RFC3339, used by the otel statsd input plugin 56 | AttributeStartTimeStatsd = "start_time" 57 | AttributeTraceID = "trace_id" 58 | AttributeSpanID = "span_id" 59 | AttributeTraceState = "trace_state" 60 | AttributeParentSpanID = "parent_span_id" 61 | AttributeSpanName = "span.name" // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.78.0/connector/spanmetricsconnector/connector.go#L30 62 | AttributeSpanKind = "span.kind" // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.78.0/connector/spanmetricsconnector/connector.go#L31 63 | AttributeEndTimeUnixNano = "end_time_unix_nano" 64 | AttributeDurationNano = "duration_nano" 65 | AttributeDroppedAttributesCount = "dropped_attributes_count" 66 | AttributeDroppedEventsCount = "dropped_events_count" 67 | AttributeDroppedLinksCount = "dropped_links_count" 68 | AttributeAttributes = "attributes" 69 | AttributeLinkedTraceID = "linked_trace_id" 70 | AttributeLinkedSpanID = "linked_span_id" 71 | AttributeSeverityNumber = "severity_number" 72 | AttributeSeverityText = "severity_text" 73 | AttributeBody = "body" 74 | AttributeFlags = "flags" 75 | ) 76 | -------------------------------------------------------------------------------- /common/common_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestResourceNamespace(t *testing.T) { 9 | assert.False(t, ResourceNamespace.MatchString("foo")) 10 | assert.False(t, ResourceNamespace.MatchString("foo.bar")) 11 | assert.True(t, ResourceNamespace.MatchString("service.name")) 12 | assert.False(t, ResourceNamespace.MatchString("service.foo")) 13 | assert.True(t, ResourceNamespace.MatchString("faas.instance")) 14 | assert.False(t, ResourceNamespace.MatchString("faas.execution")) 15 | } 16 | -------------------------------------------------------------------------------- /common/error.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | type RetryableError struct { 4 | error 5 | } 6 | -------------------------------------------------------------------------------- /common/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/influxdata/influxdb-observability/common 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/stretchr/testify v1.8.4 7 | go.opentelemetry.io/collector/semconv v0.87.0 8 | ) 9 | 10 | require ( 11 | github.com/davecgh/go-spew v1.1.1 // indirect 12 | github.com/kr/text v0.2.0 // indirect 13 | github.com/pmezard/go-difflib v1.0.0 // indirect 14 | gopkg.in/yaml.v3 v3.0.1 // indirect 15 | ) 16 | -------------------------------------------------------------------------------- /common/go.sum: -------------------------------------------------------------------------------- 1 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 5 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 6 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 7 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 8 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 9 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 10 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 11 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 12 | go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= 13 | go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= 14 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 15 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 16 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 17 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 18 | -------------------------------------------------------------------------------- /common/logger.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | // Logger must be implemented by the user of this package. 4 | // Emitted logs indicate non-fatal conversion errors. 5 | type Logger interface { 6 | Debug(msg string, kv ...interface{}) 7 | } 8 | 9 | // NoopLogger is a no-op implementation of Logger. 10 | type NoopLogger struct{} 11 | 12 | func (NoopLogger) Debug(_ string, _ ...interface{}) {} 13 | 14 | // ErrorLogger intercepts log entries emitted by this package, 15 | // adding key "error" before any error type value. 16 | // 17 | // ErrorLogger panicks if the resulting kv slice length is odd. 18 | type ErrorLogger struct { 19 | Logger 20 | } 21 | 22 | func (e *ErrorLogger) Debug(msg string, kv ...interface{}) { 23 | for i := range kv { 24 | if _, isError := kv[i].(error); isError { 25 | kv = append(kv, nil) 26 | copy(kv[i+1:], kv[i:]) 27 | kv[i] = "error" 28 | } 29 | } 30 | if len(kv)%2 != 0 { 31 | panic("log entry kv count is odd") 32 | } 33 | e.Logger.Debug(msg, kv...) 34 | } 35 | -------------------------------------------------------------------------------- /common/metrics.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | type InfluxMetricValueType uint8 4 | 5 | const ( 6 | InfluxMetricValueTypeUntyped InfluxMetricValueType = iota 7 | InfluxMetricValueTypeGauge 8 | InfluxMetricValueTypeSum 9 | InfluxMetricValueTypeHistogram 10 | InfluxMetricValueTypeSummary 11 | ) 12 | 13 | func (vType InfluxMetricValueType) String() string { 14 | switch vType { 15 | case InfluxMetricValueTypeUntyped: 16 | return "untyped" 17 | case InfluxMetricValueTypeGauge: 18 | return "gauge" 19 | case InfluxMetricValueTypeSum: 20 | return "sum" 21 | case InfluxMetricValueTypeHistogram: 22 | return "histogram" 23 | case InfluxMetricValueTypeSummary: 24 | return "summary" 25 | default: 26 | panic("invalid InfluxMetricValueType") 27 | } 28 | } 29 | 30 | type MetricsSchema uint8 31 | 32 | const ( 33 | _ MetricsSchema = iota 34 | MetricsSchemaTelegrafPrometheusV1 35 | MetricsSchemaTelegrafPrometheusV2 36 | MetricsSchemaOtelV1 37 | ) 38 | 39 | func (ms MetricsSchema) String() string { 40 | switch ms { 41 | case MetricsSchemaTelegrafPrometheusV1: 42 | return "telegraf-prometheus-v1" 43 | case MetricsSchemaTelegrafPrometheusV2: 44 | return "telegraf-prometheus-v2" 45 | case MetricsSchemaOtelV1: 46 | return "otel-v1" 47 | default: 48 | panic("invalid MetricsSchema") 49 | } 50 | } 51 | 52 | var MetricsSchemata = map[string]MetricsSchema{ 53 | MetricsSchemaTelegrafPrometheusV1.String(): MetricsSchemaTelegrafPrometheusV1, 54 | MetricsSchemaTelegrafPrometheusV2.String(): MetricsSchemaTelegrafPrometheusV2, 55 | MetricsSchemaOtelV1.String(): MetricsSchemaOtelV1, 56 | } 57 | -------------------------------------------------------------------------------- /demo/Dockerfile.hotrod: -------------------------------------------------------------------------------- 1 | #syntax=docker/dockerfile:1.2 2 | FROM golang:1.20-alpine3.16 AS builder 3 | RUN apk --update --no-cache add git 4 | ENV CGO_ENABLED 0 5 | 6 | RUN git clone --branch v1.41.0 --depth 1 https://github.com/jaegertracing/jaeger 7 | WORKDIR /go/jaeger 8 | COPY demo/hotrod.patch . 9 | RUN git apply hotrod.patch 10 | RUN go install ./examples/hotrod 11 | 12 | FROM scratch 13 | USER 10001 14 | COPY --from=builder --chmod=0755 /go/bin/hotrod / 15 | ENTRYPOINT ["/hotrod"] 16 | CMD ["all"] 17 | -------------------------------------------------------------------------------- /demo/docker-compose-development.yml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | jaeger-query: 4 | image: jaegertracing/jaeger-query:1.49 5 | stop_grace_period: 10s 6 | ports: 7 | - "16686:16686" # web UI 8 | depends_on: 9 | - jaeger-influxdb 10 | environment: 11 | #QUERY_BEARER_TOKEN_PROPAGATION: true 12 | LOG_LEVEL: warn 13 | SPAN_STORAGE_TYPE: grpc-plugin 14 | GRPC_STORAGE_SERVER: jaeger-influxdb:17271 15 | GRPC_STORAGE_CONNECTION_TIMEOUT: 30s 16 | QUERY_HTTP_SERVER_HOST_PORT: :16686 17 | ADMIN_HTTP_HOST_PORT: :16687 18 | QUERY_UI_CONFIG: /jaeger-ui-config.json 19 | QUERY_ENABLE_TRACING: false 20 | volumes: 21 | - ./demo/jaeger-ui-config.json:/jaeger-ui-config.json:ro 22 | 23 | jaeger-influxdb: 24 | build: 25 | context: . 26 | dockerfile: jaeger-influxdb/Dockerfile 27 | image: jaeger-influxdb:local 28 | stop_grace_period: 10s 29 | environment: 30 | LOG_LEVEL: info 31 | LISTEN_ADDR: :17271 32 | INFLUXDB_TIMEOUT: 30s 33 | # required: hostname or hostname:port 34 | INFLUXDB_ADDR: 35 | # required: bucket name 36 | INFLUXDB_BUCKET: otel 37 | # optional: bucket name for archived traces 38 | INFLUXDB_BUCKET_ARCHIVE: 39 | # required 40 | INFLUXDB_TOKEN: 41 | 42 | hotrod: 43 | build: 44 | context: . 45 | dockerfile: demo/Dockerfile.hotrod 46 | image: hotrod:local 47 | stop_grace_period: 1s 48 | ports: 49 | - "8080:8080" # web UI 50 | depends_on: 51 | - otelcol-influxdb 52 | environment: 53 | JAEGER_AGENT_HOST: otelcol-influxdb 54 | JAEGER_AGENT_PORT: 6831 55 | 56 | otelcol-influxdb: 57 | build: 58 | context: . 59 | dockerfile: otelcol-influxdb/Dockerfile 60 | image: otelcol-influxdb:local 61 | command: [ "--config", "/config.yml" ] 62 | stop_grace_period: 10s 63 | volumes: 64 | - ./demo/otelcol-config.yml:/config.yml:ro 65 | -------------------------------------------------------------------------------- /demo/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | jaeger-query: 4 | image: jaegertracing/jaeger-query:1.49 5 | stop_grace_period: 10s 6 | ports: 7 | - "16686:16686" # web UI 8 | depends_on: 9 | - jaeger-influxdb 10 | environment: 11 | LOG_LEVEL: info 12 | SPAN_STORAGE_TYPE: grpc-plugin 13 | GRPC_STORAGE_SERVER: jaeger-influxdb:17271 14 | GRPC_STORAGE_CONNECTION_TIMEOUT: 30s 15 | QUERY_HTTP_SERVER_HOST_PORT: :16686 16 | ADMIN_HTTP_HOST_PORT: :16687 17 | QUERY_UI_CONFIG: /jaeger-ui-config.json 18 | volumes: 19 | - ./demo/jaeger-ui-config.json:/jaeger-ui-config.json:ro 20 | 21 | jaeger-influxdb: 22 | image: jacobmarble/jaeger-influxdb:0.5.9 23 | stop_grace_period: 10s 24 | environment: 25 | LOG_LEVEL: debug 26 | LISTEN_ADDR: :17271 27 | INFLUXDB_TIMEOUT: 30s 28 | env_file: 29 | - .env 30 | hotrod: 31 | image: jaegertracing/example-hotrod:1.41 32 | stop_grace_period: 1s 33 | ports: 34 | - "8080:8080" # web UI 35 | - "8083:8083" 36 | depends_on: 37 | - otelcol-influxdb 38 | environment: 39 | JAEGER_AGENT_HOST: otelcol-influxdb 40 | JAEGER_AGENT_PORT: 6831 41 | command: ["all", "-m", "prometheus"] 42 | 43 | otelcol-influxdb: 44 | image: otel/opentelemetry-collector-contrib:0.87.0 45 | command: [ "--config", "/config.yml" ] 46 | stop_grace_period: 10s 47 | volumes: 48 | - ./demo/otelcol-config.yml:/config.yml:ro 49 | env_file: 50 | - .env 51 | 52 | grafana: 53 | image: grafana/grafana:latest 54 | ports: 55 | - 3000:3000 56 | environment: 57 | - INFLUX_HOST=${INFLUXDB_ADDR} 58 | - INFLUX_TOKEN=${INFLUXDB_TOKEN} 59 | - INFLUX_ORG=${INFLUXDB_ORG} 60 | - INFLUX_BUCKET=${INFLUXDB_BUCKET} 61 | - GF_INSTALL_PLUGINS=influxdata-flightsql-datasource 62 | volumes: 63 | - ./demo/grafana/datasources:/etc/grafana/provisioning/datasources 64 | - ./demo/grafana/dashboards:/etc/grafana/provisioning/dashboards 65 | - grafana:/var/lib/grafana/ 66 | restart: always 67 | volumes: 68 | grafana: -------------------------------------------------------------------------------- /demo/grafana/dashboards/dashboards.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | # an unique provider name. Required 5 | - name: 'OpenTelemetry Demo' 6 | # Org id. Default to 1 7 | orgId: 1 8 | # name of the dashboard folder. 9 | folder: 'general' 10 | # folder UID. will be automatically generated if not specified 11 | folderUid: '' 12 | # provider type. Default to 'file' 13 | type: file 14 | # disable dashboard deletion 15 | disableDeletion: false 16 | # how often Grafana will scan for changed dashboards 17 | updateIntervalSeconds: 10 18 | # allow updating provisioned dashboards from the UI 19 | allowUiUpdates: true 20 | options: 21 | # path to dashboard files on disk. Required when using the 'file' type 22 | path: /etc/grafana/provisioning/dashboards 23 | # use folder names from filesystem to create folders in Grafana 24 | foldersFromFilesStructure: true -------------------------------------------------------------------------------- /demo/grafana/datasources/flightsql.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: FlightSQL 5 | type: influxdata-flightsql-datasource 6 | typeName: FlightSQL 7 | access: proxy 8 | url: '' 9 | user: '' 10 | database: '' 11 | basicAuth: false 12 | isDefault: true 13 | jsonData: 14 | host: ${INFLUX_HOST}:443 15 | metadata: 16 | - bucket-name: ${INFLUX_BUCKET} 17 | secure: true 18 | token: ${INFLUX_TOKEN} 19 | readOnly: false 20 | editable: true 21 | 22 | -------------------------------------------------------------------------------- /demo/grafana/datasources/jaeger.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | datasources: 3 | - name: Jaeger 4 | type: jaeger 5 | access: proxy 6 | url: http://jaeger-query:16686 7 | readOnly: false 8 | editable: true 9 | isDefault: false 10 | jsonData: 11 | tracesToLogs: 12 | # Field with internal link pointing to a logs data source in Grafana. 13 | # datasourceUid value must match the datasourceUid value of the logs data source. 14 | datasourceUid: 'grafana' 15 | filterByTraceID: true 16 | filterBySpanID: false 17 | nodeGraph: 18 | enabled: true 19 | 20 | -------------------------------------------------------------------------------- /demo/hotrod.patch: -------------------------------------------------------------------------------- 1 | --- a/examples/hotrod/cmd/root.go 2 | +++ b/examples/hotrod/cmd/root.go 3 | @@ -81,7 +81,9 @@ func init() { 4 | RootCmd.PersistentFlags().StringVarP(&jaegerUI, "jaeger-ui", "j", "http://localhost:16686", "Address of Jaeger UI to create [find trace] links") 5 | 6 | rand.Seed(int64(time.Now().Nanosecond())) 7 | - logger, _ = zap.NewDevelopment( 8 | + loggerConfig := zap.NewProductionConfig() 9 | + loggerConfig.Level.SetLevel(zapcore.FatalLevel) 10 | + logger, _ = loggerConfig.Build( 11 | zap.AddStacktrace(zapcore.FatalLevel), 12 | zap.AddCallerSkip(1), 13 | ) 14 | -------------------------------------------------------------------------------- /demo/jaeger-ui-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "dagMaxNumServices": 200, 4 | "menuEnabled": true 5 | }, 6 | "monitor": { 7 | "menuEnabled": false 8 | }, 9 | "archiveEnabled": true, 10 | "menu": [ 11 | { 12 | "label": "About InfluxDB", 13 | "items": [ 14 | { 15 | "label": "Website", 16 | "url": "https://www.influxdata.com/" 17 | }, 18 | { 19 | "label": "Docs", 20 | "url": "https://docs.influxdata.com/" 21 | } 22 | ] 23 | } 24 | ], 25 | "search": { 26 | "maxLookback": { 27 | "label": "2 Days", 28 | "value": "2d" 29 | }, 30 | "maxLimit": 1000 31 | } 32 | } -------------------------------------------------------------------------------- /demo/otelcol-config.yml: -------------------------------------------------------------------------------- 1 | receivers: 2 | jaeger: 3 | protocols: 4 | thrift_compact: 5 | thrift_http: 6 | 7 | otlp: 8 | protocols: 9 | grpc: 10 | http: 11 | 12 | exporters: 13 | influxdb: 14 | endpoint: https://${INFLUXDB_ADDR}/ 15 | bucket: ${INFLUXDB_BUCKET} 16 | token: ${INFLUXDB_TOKEN} 17 | metrics_schema: otel-v1 18 | 19 | connectors: 20 | spanmetrics: 21 | aggregation_temporality: "AGGREGATION_TEMPORALITY_DELTA" 22 | servicegraph: 23 | 24 | service: 25 | telemetry: 26 | logs: 27 | level: debug 28 | encoding: json 29 | 30 | pipelines: 31 | traces: 32 | receivers: [otlp, jaeger] 33 | exporters: [influxdb, spanmetrics, servicegraph] 34 | 35 | metrics: 36 | receivers: [spanmetrics, servicegraph] 37 | exporters: [influxdb] 38 | -------------------------------------------------------------------------------- /demo/telegraf/telegraf.conf: -------------------------------------------------------------------------------- 1 | [global_tags] 2 | 3 | [agent] 4 | interval = "30s" 5 | round_interval = true 6 | metric_batch_size = 1000 7 | metric_buffer_limit = 10000 8 | collection_jitter = "0s" 9 | flush_interval = "10s" 10 | flush_jitter = "0s" 11 | precision = "" 12 | debug = false 13 | quiet = false 14 | omit_hostname = false 15 | 16 | [[outputs.influxdb_v2]] 17 | urls = ["http://${INFLUXDB_ADDR}:8086"] # Replace with your InfluxDB v2 instance URL 18 | token = "$[INFLUXDB_TOKEN}" # Replace with your InfluxDB v2 token 19 | organization = "${INFLUXDB_ORG}" # Replace with your InfluxDB v2 organization name 20 | bucket = "${INFLUXDB_BUCKET}" # Replace with your InfluxDB v2 bucket name 21 | 22 | [[inputs.prometheus]] 23 | name_override = "metrics" 24 | ## By specifying a "v2" string to the version, you can use the new v2 25 | ## metrics format. 26 | metric_version = 2 27 | 28 | ## An array of urls to scrape metrics from. 29 | urls = ["http://localhost:8083/metrics"] # Replace with your Prometheus target URL 30 | 31 | ## An array of Kubernetes services to scrape metrics from. 32 | # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] 33 | 34 | ## Kubernetes config file to create client. 35 | # kube_config = "/path/to/kubernetes.config" 36 | 37 | ## Scrape interval 38 | # interval = "1m" 39 | 40 | ## Scrape metrics for kube-system services. 41 | ## If set to true, it is recommended to limit your urls to avoid too much 42 | ## data being scraped. 43 | # monitor_kubernetes_pods = false 44 | 45 | [[processors.regex]] 46 | namepass = ["metrics"] 47 | 48 | [[processors.regex.tags]] 49 | key = "_measurement" 50 | pattern = "(.*)_([^_]*)_([^_]*)_([^_]*)_([^_]*)" 51 | replacement = "${1}_${2}_${3}_${4}" 52 | result_key = "prefix" 53 | [[processors.regex.tags]] 54 | key = "_measurement" 55 | pattern = "(.*)_([^_]*)_([^_]*)_([^_]*)_([^_]*)" 56 | replacement = "${5}" 57 | result_key = "suffix" -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # InfluxDB/IOx Common Observability Schema 2 | 3 | *Perfect is the enemy of good.* 4 | 5 | Reference for InfluxDB/IOx schema, in terms of the OpenTelemetry data model. 6 | The goal of this schema is to be (1) a common reference for clients writing to and reading from InfluxDB/IOx and (2) a common reference for humans performing ad-hoc queries to troubleshoot observed systems. 7 | 8 | While OpenTelemetry is the primary reference, translation to/from some other common schemas are also provided. 9 | 10 | InfluxDB value types are expressed as tag and field. 11 | Tags and fields have non-empty string keys. 12 | Tags have string values, and fields have basic scalar values: string, int, uint, float, bool. 13 | 14 | Non-finite floating-point field values (+/- infinity and NaN from IEEE 754) are not currently supported by InfluxDB/IOx, but are part of the design spec. 15 | Therefore, no special consideration is given here. 16 | 17 | ## Signal Types 18 | 19 | - [Traces](traces.md) 20 | - [Metrics](metrics.md) 21 | - [Logs](logs.md) 22 | -------------------------------------------------------------------------------- /docs/logs.md: -------------------------------------------------------------------------------- 1 | # Logs 2 | 3 | A log record is a timestamped event record, containing textual structured and/or unstructured information. 4 | Log records are composed of: 5 | 6 | - timestamp 7 | - specific attributes (mostly optional) 8 | - zero-to-many free-form attributes 9 | 10 | #### References 11 | 12 | - [OpenTelemetry Logs Specification](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.1.0/specification/logs) 13 | - [OpenTelemetry LogRecord protocol buffer message](https://github.com/open-telemetry/opentelemetry-proto/blob/v0.8.0/opentelemetry/proto/logs/v1/logs.proto#L86-L132) 14 | - [Fluentd Event structure](https://docs.fluentd.org/v/1.0/quickstart/life-of-a-fluentd-event#event-structure) 15 | - [Greylog GELF payload spec](https://docs.graylog.org/en/4.0/pages/gelf.html#gelf-payload-specification) 16 | - [Syslog message parts - RFC 3164 (obsoleted)](https://tools.ietf.org/html/rfc3164#section-4) 17 | - [Syslog message parts - RFC 5424](https://tools.ietf.org/html/rfc5424#section-6) 18 | - [Vector log event](https://vector.dev/docs/about/under-the-hood/architecture/data-model/log/) 19 | - [Logfmt description](https://brandur.org/logfmt) 20 | - [Elastic Common Schema: Log Fields](https://www.elastic.co/guide/en/ecs/current/ecs-log.html) 21 | 22 | ## Log Records 23 | 24 | Influx measurement/tag/field | OpenTelemetry LogRecord field | Fluentd | Greylog GELF | Syslog 3164 | Syslog 5424 25 | --- | --- | --- | --- | --- | --- 26 | measurement =`logs` | . 27 | timestamp | `time_unix_nano` fixed64 | `time` float | `timestamp` number | `HEADER timestamp` string | `TIMESTAMP` string 28 | `body` field string | `body` AnyValue | `record["message"]` string or
`record["log"]` string | `full_message` string or
`short_message` string | `MSG content` string | `MSG` string 29 | `name` tag | `name` string | | | `MSG tag` string | `APP-NAME` string 30 | `trace_id` tag | `trace_id` bytes 31 | `span_id` tag | `span_id` bytes 32 | . | `instrumentation_library` InstrumentationLibrary 33 | `otel.library.name` tag | `InstrumentationLibrary.name` string 34 | `otel.library.version` tag | `InstrumentationLibrary.version` string 35 | . | `resource` Resource 36 | (free-form fields)\* | `Resource.attributes` repeated KeyValue 37 | `otel.resource.dropped_attributes_count` field uint | `Resource.dropped_attributes_count` uint32 38 | (free-form fields)\* | `attributes` repeated KeyValue | `record` JSON map | `_[additional field]` string or number | | `STRUCTURED-DATA` string 39 | `otel.log.dropped_attributes_count` field uint | `dropped_attributes_count` uint32 40 | `severity_number` tag uint | `severity_number` enum SeverityNumber | | `level` number | `PRI severity` integer | `PRI severity` integer 41 | `severity_text` field string | `severity_text` string 42 | `otel.log.flags` field uint | `flags` fixed32 43 | . | `attributes["fluent.tag"]` string | `tag` string 44 | . | `Resource.attributes["net.host.name"]` string | | `host` string | `HEADER hostname` string | `HOSTNAME` string 45 | . | `Resource.attributes["net.host.ip"]` string | | | `HEADER IP address` string | `HOSTNAME` string 46 | . | `Resource.attributes["greylog.version"]` string | | `version` string =`1.1` 47 | . | `Resource.attributes["syslog.version"]` string | | | | `VERSION` integer =`1` 48 | . | TODO | | | | `PROCID` varying 49 | . | TODO | | | | `MSGID` string 50 | 51 | 52 | 53 | \* To convert from Influx to OTel, use common OTel attribute key prefixes to distinguish resource attributes from log record attributes. 54 | This regex matches resource attribute keys: 55 | 56 | ``` 57 | ^(service\.|telemetry\.|container\.|process\.|host\.|os\.|cloud\.|deployment\.|k8s\.|aws\.|gcp\.|azure\.|faas\.name|faas\.id|faas\.version|faas\.instance|faas\.max_memory) 58 | ``` 59 | -------------------------------------------------------------------------------- /influx2otel/README.md: -------------------------------------------------------------------------------- 1 | # InfluxDB Line Protocol to OpenTelemetry Converter 2 | 3 | [![Go Reference](https://pkg.go.dev/badge/github.com/influxdata/influxdb-observability/influx2otel.svg)](https://pkg.go.dev/github.com/influxdata/influxdb-observability/influx2otel) 4 | 5 | This package converts [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) metrics to OpenTelemetry proto messages. 6 | -------------------------------------------------------------------------------- /influx2otel/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/influxdata/influxdb-observability/influx2otel 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/influxdata/influxdb-observability/common v0.5.8 7 | github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0 8 | github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0 9 | github.com/stretchr/testify v1.8.4 10 | go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 11 | go.opentelemetry.io/collector/semconv v0.87.0 12 | ) 13 | 14 | require ( 15 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 16 | github.com/davecgh/go-spew v1.1.1 // indirect 17 | github.com/gogo/protobuf v1.3.2 // indirect 18 | github.com/golang/protobuf v1.5.3 // indirect 19 | github.com/json-iterator/go v1.1.12 // indirect 20 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 21 | github.com/modern-go/reflect2 v1.0.2 // indirect 22 | github.com/pmezard/go-difflib v1.0.0 // indirect 23 | go.uber.org/multierr v1.11.0 // indirect 24 | golang.org/x/net v0.17.0 // indirect 25 | golang.org/x/sys v0.13.0 // indirect 26 | golang.org/x/text v0.13.0 // indirect 27 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect 28 | google.golang.org/grpc v1.58.3 // indirect 29 | google.golang.org/protobuf v1.31.0 // indirect 30 | gopkg.in/yaml.v3 v3.0.1 // indirect 31 | ) 32 | 33 | replace github.com/influxdata/influxdb-observability/common => ../common 34 | -------------------------------------------------------------------------------- /influx2otel/go.sum: -------------------------------------------------------------------------------- 1 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 2 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 3 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 5 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 7 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 8 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 9 | github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= 10 | github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 11 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 12 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 13 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 14 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 15 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 16 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 17 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 18 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 19 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 20 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 21 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 22 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 23 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 24 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 25 | github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.87.0 h1:ekT4/I9J484j4yR/0VHj5AGtgv8KmNd+e4oXxNJNR/o= 26 | github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0 h1:9RtkoPmUPRW1NrOawEiWsxOZ/dBlym5DzhLXjRpM9tM= 27 | github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0/go.mod h1:xvO0/6zTw6UBl7g4hZpvapfvANNSnj6sQcSnF6jqSSg= 28 | github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0 h1:zA50pvJziZjWQiN9MZIkT6Ii3hMSaCKa6jvs1vCYT5g= 29 | github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0/go.mod h1:IjdtiiTTNlAkspcNyAjHysWAZs5U48alWGUodTkkxhI= 30 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 31 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 32 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 33 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 34 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 35 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 36 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 37 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 38 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 39 | go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= 40 | go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= 41 | go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= 42 | go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= 43 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 44 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 45 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 46 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 47 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 48 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 49 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 50 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 51 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 52 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 53 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 54 | golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= 55 | golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= 56 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 57 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 58 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 59 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 60 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 61 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 62 | golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= 63 | golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 64 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 65 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 66 | golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= 67 | golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= 68 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 69 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 70 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 71 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 72 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 73 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 74 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 75 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 76 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= 77 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= 78 | google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= 79 | google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= 80 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 81 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 82 | google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= 83 | google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 84 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 85 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 86 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 87 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 88 | -------------------------------------------------------------------------------- /influx2otel/metrics_helper_test.go: -------------------------------------------------------------------------------- 1 | package influx2otel_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" 7 | "github.com/stretchr/testify/assert" 8 | "go.opentelemetry.io/collector/pdata/pmetric" 9 | ) 10 | 11 | func assertMetricsEqual(t *testing.T, expect, got pmetric.Metrics) { 12 | t.Helper() 13 | 14 | assert.NoError(t, 15 | pmetrictest.CompareMetrics(expect, got, 16 | pmetrictest.IgnoreMetricDataPointsOrder(), 17 | pmetrictest.IgnoreMetricsOrder(), 18 | pmetrictest.IgnoreResourceMetricsOrder(), 19 | pmetrictest.IgnoreScopeMetricsOrder(), 20 | pmetrictest.IgnoreSummaryDataPointValueAtQuantileSliceOrder(), 21 | ), 22 | ) 23 | } 24 | -------------------------------------------------------------------------------- /influx2otel/metrics_statsd_schema_test.go: -------------------------------------------------------------------------------- 1 | package influx2otel_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "go.opentelemetry.io/collector/pdata/pcommon" 8 | "go.opentelemetry.io/collector/pdata/pmetric" 9 | 10 | "github.com/stretchr/testify/require" 11 | 12 | "github.com/influxdata/influxdb-observability/common" 13 | "github.com/influxdata/influxdb-observability/influx2otel" 14 | ) 15 | 16 | func TestStatsdTimingSchema(t *testing.T) { 17 | c, err := influx2otel.NewLineProtocolToOtelMetrics(new(common.NoopLogger)) 18 | require.NoError(t, err) 19 | 20 | b := c.NewBatch() 21 | err = b.AddPoint("test_service_stage_metrics_biz_success_v4", 22 | map[string]string{ 23 | "metric_type": "timing", 24 | "type": "app", 25 | }, 26 | map[string]interface{}{ 27 | "count": float64(10), 28 | "lower": float64(10), 29 | "mean": float64(10), 30 | "median": float64(10), 31 | "stddev": float64(10), 32 | "sum": float64(100), 33 | "upper": float64(20), 34 | }, 35 | time.Unix(0, 1395066363000000123), 36 | common.InfluxMetricValueTypeUntyped) 37 | require.NoError(t, err) 38 | 39 | expect := pmetric.NewMetrics() 40 | rm := expect.ResourceMetrics().AppendEmpty() 41 | isMetrics := rm.ScopeMetrics().AppendEmpty() 42 | m := isMetrics.Metrics().AppendEmpty() 43 | m.SetName("test_service_stage_metrics_biz_success_v4_count") 44 | m.SetEmptyGauge() 45 | dp := m.Gauge().DataPoints().AppendEmpty() 46 | dp.Attributes().PutStr("metric_type", "timing") 47 | dp.Attributes().PutStr("type", "app") 48 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 49 | dp.SetDoubleValue(10) 50 | 51 | m = isMetrics.Metrics().AppendEmpty() 52 | m.SetName("test_service_stage_metrics_biz_success_v4_lower") 53 | m.SetEmptyGauge() 54 | dp = m.Gauge().DataPoints().AppendEmpty() 55 | dp.Attributes().PutStr("metric_type", "timing") 56 | dp.Attributes().PutStr("type", "app") 57 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 58 | dp.SetDoubleValue(10) 59 | 60 | m = isMetrics.Metrics().AppendEmpty() 61 | m.SetName("test_service_stage_metrics_biz_success_v4_mean") 62 | m.SetEmptyGauge() 63 | dp = m.Gauge().DataPoints().AppendEmpty() 64 | dp.Attributes().PutStr("metric_type", "timing") 65 | dp.Attributes().PutStr("type", "app") 66 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 67 | dp.SetDoubleValue(10) 68 | 69 | m = isMetrics.Metrics().AppendEmpty() 70 | m.SetName("test_service_stage_metrics_biz_success_v4_median") 71 | m.SetEmptyGauge() 72 | dp = m.Gauge().DataPoints().AppendEmpty() 73 | dp.Attributes().PutStr("metric_type", "timing") 74 | dp.Attributes().PutStr("type", "app") 75 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 76 | dp.SetDoubleValue(10) 77 | 78 | m = isMetrics.Metrics().AppendEmpty() 79 | m.SetName("test_service_stage_metrics_biz_success_v4_stddev") 80 | m.SetEmptyGauge() 81 | dp = m.Gauge().DataPoints().AppendEmpty() 82 | dp.Attributes().PutStr("metric_type", "timing") 83 | dp.Attributes().PutStr("type", "app") 84 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 85 | dp.SetDoubleValue(10) 86 | 87 | m = isMetrics.Metrics().AppendEmpty() 88 | m.SetName("test_service_stage_metrics_biz_success_v4_sum") 89 | m.SetEmptyGauge() 90 | dp = m.Gauge().DataPoints().AppendEmpty() 91 | dp.Attributes().PutStr("metric_type", "timing") 92 | dp.Attributes().PutStr("type", "app") 93 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 94 | dp.SetDoubleValue(100) 95 | 96 | m = isMetrics.Metrics().AppendEmpty() 97 | m.SetName("test_service_stage_metrics_biz_success_v4_upper") 98 | m.SetEmptyGauge() 99 | dp = m.Gauge().DataPoints().AppendEmpty() 100 | dp.Attributes().PutStr("metric_type", "timing") 101 | dp.Attributes().PutStr("type", "app") 102 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 103 | dp.SetDoubleValue(20) 104 | 105 | assertMetricsEqual(t, expect, b.GetMetrics()) 106 | } 107 | 108 | func TestStatsCounter(t *testing.T) { 109 | c, err := influx2otel.NewLineProtocolToOtelMetrics(new(common.NoopLogger)) 110 | require.NoError(t, err) 111 | 112 | // statsd metric: 113 | // gorets:1|c 114 | b := c.NewBatch() 115 | err = b.AddPoint("gorets", 116 | map[string]string{ 117 | "metric_type": "counter", 118 | "type": "app", 119 | }, 120 | map[string]interface{}{ 121 | "value": int64(10), 122 | }, 123 | time.Unix(0, 1395066363000000123), 124 | common.InfluxMetricValueTypeSum) 125 | require.NoError(t, err) 126 | 127 | expect := pmetric.NewMetrics() 128 | rm := expect.ResourceMetrics().AppendEmpty() 129 | isMetrics := rm.ScopeMetrics().AppendEmpty() 130 | m := isMetrics.Metrics().AppendEmpty() 131 | m.SetName("gorets_value") 132 | m.SetEmptySum() 133 | m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) 134 | m.Sum().SetIsMonotonic(true) 135 | dp := m.Sum().DataPoints().AppendEmpty() 136 | dp.Attributes().PutStr("metric_type", "counter") 137 | dp.Attributes().PutStr("type", "app") 138 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 139 | dp.SetIntValue(10) 140 | 141 | assertMetricsEqual(t, expect, b.GetMetrics()) 142 | } 143 | 144 | func TestStatsDeltaCounter(t *testing.T) { 145 | c, err := influx2otel.NewLineProtocolToOtelMetrics(new(common.NoopLogger)) 146 | require.NoError(t, err) 147 | 148 | // statsd metric: 149 | // gorets:1|c 150 | b := c.NewBatch() 151 | err = b.AddPoint("gorets", 152 | map[string]string{ 153 | "metric_type": "counter", 154 | "type": "app", 155 | "temporality": "delta", 156 | }, 157 | map[string]interface{}{ 158 | "value": int64(10), 159 | }, 160 | time.Unix(0, 1395066363000000123), 161 | common.InfluxMetricValueTypeSum) 162 | require.NoError(t, err) 163 | 164 | expect := pmetric.NewMetrics() 165 | rm := expect.ResourceMetrics().AppendEmpty() 166 | isMetrics := rm.ScopeMetrics().AppendEmpty() 167 | m := isMetrics.Metrics().AppendEmpty() 168 | m.SetName("gorets_value") 169 | m.SetEmptySum() 170 | m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) 171 | m.Sum().SetIsMonotonic(true) 172 | dp := m.Sum().DataPoints().AppendEmpty() 173 | dp.Attributes().PutStr("metric_type", "counter") 174 | dp.Attributes().PutStr("type", "app") 175 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 176 | dp.SetIntValue(10) 177 | 178 | assertMetricsEqual(t, expect, b.GetMetrics()) 179 | } 180 | 181 | func TestStatsGauge(t *testing.T) { 182 | c, err := influx2otel.NewLineProtocolToOtelMetrics(new(common.NoopLogger)) 183 | require.NoError(t, err) 184 | 185 | // statsd metric: 186 | // gaugor:333|g 187 | b := c.NewBatch() 188 | err = b.AddPoint("gaugor", 189 | map[string]string{ 190 | "metric_type": "gauge", 191 | "type": "app", 192 | }, 193 | map[string]interface{}{ 194 | "value": int64(333), 195 | }, 196 | time.Unix(0, 1395066363000000123), 197 | common.InfluxMetricValueTypeGauge) 198 | require.NoError(t, err) 199 | 200 | expect := pmetric.NewMetrics() 201 | rm := expect.ResourceMetrics().AppendEmpty() 202 | isMetrics := rm.ScopeMetrics().AppendEmpty() 203 | m := isMetrics.Metrics().AppendEmpty() 204 | m.SetName("gaugor_value") 205 | m.SetEmptyGauge() 206 | dp := m.Gauge().DataPoints().AppendEmpty() 207 | 208 | dp.Attributes().PutStr("metric_type", "gauge") 209 | dp.Attributes().PutStr("type", "app") 210 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 211 | dp.SetIntValue(333) 212 | 213 | assertMetricsEqual(t, expect, b.GetMetrics()) 214 | } 215 | 216 | func TestStatsdSetsSchema(t *testing.T) { 217 | c, err := influx2otel.NewLineProtocolToOtelMetrics(new(common.NoopLogger)) 218 | require.NoError(t, err) 219 | 220 | // statsd metric: 221 | // uniques:765|s 222 | b := c.NewBatch() 223 | err = b.AddPoint("uniques", 224 | map[string]string{ 225 | "metric_type": "sets", 226 | "type": "app", 227 | }, 228 | map[string]interface{}{ 229 | "value": int64(1), 230 | }, 231 | time.Unix(0, 1395066363000000123), 232 | common.InfluxMetricValueTypeUntyped) 233 | 234 | require.NoError(t, err) 235 | 236 | expect := pmetric.NewMetrics() 237 | rm := expect.ResourceMetrics().AppendEmpty() 238 | isMetrics := rm.ScopeMetrics().AppendEmpty() 239 | m := isMetrics.Metrics().AppendEmpty() 240 | m.SetName("uniques_value") 241 | m.SetEmptyGauge() 242 | dp := m.Gauge().DataPoints().AppendEmpty() 243 | dp.Attributes().PutStr("metric_type", "sets") 244 | dp.Attributes().PutStr("type", "app") 245 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 246 | dp.SetIntValue(1) 247 | 248 | assertMetricsEqual(t, expect, b.GetMetrics()) 249 | } 250 | 251 | func TestDeltaTemporalityStatsdCounter(t *testing.T) { 252 | c, err := influx2otel.NewLineProtocolToOtelMetrics(new(common.NoopLogger)) 253 | require.NoError(t, err) 254 | 255 | // statsd metric: 256 | // gorets:1|c 257 | b := c.NewBatch() 258 | err = b.AddPoint("gorets", 259 | map[string]string{ 260 | "metric_type": "counter", 261 | "type": "app", 262 | "temporality": "delta", 263 | }, 264 | map[string]interface{}{ 265 | "value": int64(10), 266 | common.AttributeStartTimeStatsd: "2023-04-13T22:34:00.000535129+03:00", 267 | }, 268 | time.Unix(0, 1395066363000000123), 269 | common.InfluxMetricValueTypeSum) 270 | require.NoError(t, err) 271 | 272 | expect := pmetric.NewMetrics() 273 | rm := expect.ResourceMetrics().AppendEmpty() 274 | isMetrics := rm.ScopeMetrics().AppendEmpty() 275 | m := isMetrics.Metrics().AppendEmpty() 276 | m.SetName("gorets_value") 277 | m.SetEmptySum() 278 | m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) 279 | m.Sum().SetIsMonotonic(true) 280 | dp := m.Sum().DataPoints().AppendEmpty() 281 | dp.Attributes().PutStr("metric_type", "counter") 282 | dp.Attributes().PutStr("type", "app") 283 | dp.SetStartTimestamp(pcommon.Timestamp(1681414440000535129)) 284 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 285 | dp.SetIntValue(10) 286 | 287 | assertMetricsEqual(t, expect, b.GetMetrics()) 288 | } 289 | -------------------------------------------------------------------------------- /influx2otel/metrics_unknown_schema_test.go: -------------------------------------------------------------------------------- 1 | package influx2otel_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "go.opentelemetry.io/collector/pdata/pcommon" 8 | "go.opentelemetry.io/collector/pdata/pmetric" 9 | 10 | "github.com/stretchr/testify/require" 11 | 12 | "github.com/influxdata/influxdb-observability/common" 13 | "github.com/influxdata/influxdb-observability/influx2otel" 14 | ) 15 | 16 | func TestUnknownSchema(t *testing.T) { 17 | c, err := influx2otel.NewLineProtocolToOtelMetrics(new(common.NoopLogger)) 18 | require.NoError(t, err) 19 | 20 | b := c.NewBatch() 21 | err = b.AddPoint("cpu", 22 | map[string]string{ 23 | "container.name": "42", 24 | "otel.library.name": "My Library", 25 | "otel.library.version": "latest", 26 | "cpu": "cpu4", 27 | "host": "777348dc6343", 28 | }, 29 | map[string]interface{}{ 30 | "usage_user": 0.10090817356207936, 31 | "usage_system": 0.3027245206862381, 32 | "some_int_key": int64(7), 33 | }, 34 | time.Unix(0, 1395066363000000123), 35 | common.InfluxMetricValueTypeUntyped) 36 | require.NoError(t, err) 37 | 38 | expect := pmetric.NewMetrics() 39 | rm := expect.ResourceMetrics().AppendEmpty() 40 | rm.Resource().Attributes().PutStr("container.name", "42") 41 | isMetrics := rm.ScopeMetrics().AppendEmpty() 42 | isMetrics.Scope().SetName("My Library") 43 | isMetrics.Scope().SetVersion("latest") 44 | m := isMetrics.Metrics().AppendEmpty() 45 | m.SetName("cpu_usage_user") 46 | m.SetEmptyGauge() 47 | dp := m.Gauge().DataPoints().AppendEmpty() 48 | dp.Attributes().PutStr("cpu", "cpu4") 49 | dp.Attributes().PutStr("host", "777348dc6343") 50 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 51 | dp.SetDoubleValue(0.10090817356207936) 52 | m = isMetrics.Metrics().AppendEmpty() 53 | m.SetName("cpu_usage_system") 54 | m.SetEmptyGauge() 55 | dp = m.Gauge().DataPoints().AppendEmpty() 56 | dp.Attributes().PutStr("cpu", "cpu4") 57 | dp.Attributes().PutStr("host", "777348dc6343") 58 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 59 | dp.SetDoubleValue(0.3027245206862381) 60 | m = isMetrics.Metrics().AppendEmpty() 61 | m.SetName("cpu_some_int_key") 62 | m.SetEmptyGauge() 63 | dp = m.Gauge().DataPoints().AppendEmpty() 64 | dp.Attributes().PutStr("cpu", "cpu4") 65 | dp.Attributes().PutStr("host", "777348dc6343") 66 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 67 | dp.SetIntValue(7) 68 | 69 | assertMetricsEqual(t, expect, b.GetMetrics()) 70 | } 71 | -------------------------------------------------------------------------------- /jaeger-influxdb/Dockerfile: -------------------------------------------------------------------------------- 1 | #syntax=docker/dockerfile:1.2 2 | FROM golang:1.20-alpine3.16 AS builder 3 | RUN apk --update --no-cache add ca-certificates 4 | ENV CGO_ENABLED 0 5 | 6 | COPY . /project 7 | WORKDIR /project/jaeger-influxdb 8 | 9 | RUN \ 10 | --mount=type=cache,id=influxdb-observability-gocache,sharing=locked,target=/root/.cache/go-build \ 11 | --mount=type=cache,id=influxdb-observability-gomodcache,sharing=locked,target=/go/pkg/mod \ 12 | du -cshx /root/.cache/go-build /go/pkg/mod && \ 13 | go install ./cmd/jaeger-influxdb && \ 14 | du -cshx /root/.cache/go-build /go/pkg/mod 15 | 16 | FROM scratch 17 | USER 10001 18 | COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 19 | COPY --from=builder --chmod=0755 /go/bin/jaeger-influxdb / 20 | ENTRYPOINT ["/jaeger-influxdb"] 21 | -------------------------------------------------------------------------------- /jaeger-influxdb/Dockerfile.all-in-one: -------------------------------------------------------------------------------- 1 | #syntax=docker/dockerfile:1.2 2 | FROM golang:1.20-alpine3.16 AS builder 3 | RUN apk --update --no-cache add ca-certificates 4 | ENV CGO_ENABLED 0 5 | 6 | COPY . /project 7 | WORKDIR /project/jaeger-influxdb 8 | 9 | RUN \ 10 | --mount=type=cache,id=influxdb-observability-gocache,sharing=locked,target=/root/.cache/go-build \ 11 | --mount=type=cache,id=influxdb-observability-gomodcache,sharing=locked,target=/go/pkg/mod \ 12 | du -cshx /root/.cache/go-build /go/pkg/mod && \ 13 | go install ./cmd/jaeger-influxdb && \ 14 | du -cshx /root/.cache/go-build /go/pkg/mod 15 | 16 | FROM alpine:3.16 17 | RUN apk --update --no-cache add bash ca-certificates 18 | USER 10001 19 | COPY --from=jaegertracing/jaeger-query:1.49 --chmod=0755 /go/bin/query-linux /jaeger-query 20 | COPY --chmod=0755 jaeger-influxdb/all-in-one.sh / 21 | COPY --from=builder --chmod=0755 /go/bin/jaeger-influxdb / 22 | ENTRYPOINT ["/all-in-one.sh"] 23 | -------------------------------------------------------------------------------- /jaeger-influxdb/README.md: -------------------------------------------------------------------------------- 1 | # Jaeger UI Storage backend, InfluxDB Service 2 | 3 | **This is experimental software** 4 | 5 | This service enables querying traces stored in InfluxDB, via the Jaeger UI. 6 | To write traces to InfluxDB, use the [OpenTelemetry Collector, InfluxDB Distribution](https://github.com/influxdata/influxdb-observability/tree/main/otelcol-influxdb). 7 | 8 | ## Docker 9 | Docker images exist at [jacobmarble/jaeger-influxdb](https://hub.docker.com/r/jacobmarble/jaeger-influxdb) and [jacobmarble/jaeger-influxdb-all-in-one](https://hub.docker.com/r/jacobmarble/jaeger-influxdb-all-in-one). 10 | In particular, the all-in-one image is great for testing, 11 | but for production use, consider running `jaegertracing/jaeger-query` and `jacobmarble/jaeger-influxdb` in separate containers. 12 | For an example configuration using separate containers, see [docker-compose.yml](../demo/docker-compose.yml). 13 | 14 | ## Build 15 | Build the `jaeger-influxdb` service with `go install`: 16 | 17 | ```console 18 | $ cd jaeger-influxdb 19 | $ go install ./cmd/jaeger-influxdb/ 20 | ``` 21 | -------------------------------------------------------------------------------- /jaeger-influxdb/all-in-one.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | trap 'kill $(jobs -p)' SIGINT SIGTERM 4 | 5 | ./jaeger-influxdb & 6 | ./jaeger-query & 7 | 8 | wait -n 9 | kill -s SIGINT $(jobs -p) 10 | wait 11 | -------------------------------------------------------------------------------- /jaeger-influxdb/cmd/jaeger-influxdb/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | "time" 12 | 13 | "github.com/jaegertracing/jaeger/plugin/storage/grpc/shared" 14 | "github.com/mattn/go-isatty" 15 | "github.com/spf13/cobra" 16 | "go.uber.org/multierr" 17 | "go.uber.org/zap" 18 | "google.golang.org/grpc" 19 | "google.golang.org/grpc/reflection" 20 | 21 | "github.com/influxdata/influxdb-observability/jaeger-influxdb/internal" 22 | ) 23 | 24 | const serviceName = "jaeger-influxdb" 25 | 26 | func main() { 27 | config := new(internal.Config) 28 | command := &cobra.Command{ 29 | Use: serviceName, 30 | Args: cobra.NoArgs, 31 | Short: serviceName + " is the Jaeger-InfluxDB gRPC remote storage service", 32 | RunE: func(cmd *cobra.Command, _ []string) error { 33 | return run(cmd.Context(), config) 34 | }, 35 | } 36 | 37 | if err := config.Init(command); err != nil { 38 | fmt.Printf("failed to get config: %s\n", err.Error()) 39 | os.Exit(1) 40 | } 41 | 42 | logger, err := initLogger(config) 43 | if err != nil { 44 | fmt.Printf("failed to start logger: %s\n", err.Error()) 45 | os.Exit(1) 46 | } 47 | 48 | ctx := contextWithStandardSignals(context.Background()) 49 | ctx = internal.LoggerWithContext(ctx, logger) 50 | if err := command.ExecuteContext(ctx); err != nil { 51 | if !errors.Is(err, context.Canceled) { 52 | fmt.Printf("%s\n", err.Error()) 53 | os.Exit(1) 54 | } 55 | } 56 | } 57 | 58 | func initLogger(config *internal.Config) (*zap.Logger, error) { 59 | var loggerConfig zap.Config 60 | if isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) { 61 | loggerConfig = zap.NewDevelopmentConfig() 62 | } else { 63 | loggerConfig = zap.NewProductionConfig() 64 | } 65 | var err error 66 | loggerConfig.Level, err = zap.ParseAtomicLevel(config.LogLevel) 67 | if err != nil { 68 | return nil, err 69 | } 70 | return loggerConfig.Build(zap.AddStacktrace(zap.ErrorLevel)) 71 | } 72 | 73 | func contextWithStandardSignals(ctx context.Context) context.Context { 74 | sigCh := make(chan os.Signal, 1) 75 | signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) 76 | ctx, cancel := context.WithCancel(ctx) 77 | go func() { 78 | defer cancel() 79 | select { 80 | case <-ctx.Done(): 81 | return 82 | case <-sigCh: 83 | return 84 | } 85 | }() 86 | return ctx 87 | } 88 | 89 | type contextServerStream struct { 90 | grpc.ServerStream 91 | ctx context.Context 92 | } 93 | 94 | func (ss *contextServerStream) Context() context.Context { 95 | return ss.ctx 96 | } 97 | 98 | func run(ctx context.Context, config *internal.Config) error { 99 | backend, err := internal.NewInfluxdbStorage(ctx, config) 100 | if err != nil { 101 | return err 102 | } 103 | defer backend.Close() 104 | logger := internal.LoggerFromContext(ctx) 105 | grpcHandler := shared.NewGRPCHandlerWithPlugins(backend, backend, nil) 106 | grpcServer := grpc.NewServer( 107 | grpc.UnaryInterceptor(func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { 108 | res, err := handler(internal.LoggerWithContext(ctx, logger), req) 109 | if err != nil && err != context.Canceled { 110 | logger.Error("gRPC interceptor", zap.Error(err)) 111 | } 112 | return res, err 113 | }), 114 | grpc.StreamInterceptor(func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { 115 | ctx := internal.LoggerWithContext(stream.Context(), logger) 116 | stream = &contextServerStream{ 117 | ServerStream: stream, 118 | ctx: ctx, 119 | } 120 | err := handler(srv, stream) 121 | if err != nil && err != context.Canceled { 122 | logger.Error("gRPC interceptor", zap.Error(err)) 123 | } 124 | return err 125 | })) 126 | reflection.Register(grpcServer) 127 | if err = grpcHandler.Register(grpcServer); err != nil { 128 | return err 129 | } 130 | 131 | grpcListener, err := net.Listen("tcp", config.ListenAddr) 132 | if err != nil { 133 | return err 134 | } 135 | // grpcServer.Serve() closes this listener, so don't need to close it directly 136 | defer func() { _ = grpcListener.Close() }() 137 | 138 | errCh := make(chan error) 139 | go func() { 140 | defer close(errCh) 141 | errCh <- grpcServer.Serve(grpcListener) 142 | }() 143 | 144 | internal.LoggerFromContext(ctx).Info("ready") 145 | <-ctx.Done() 146 | internal.LoggerFromContext(ctx).Info("exiting") 147 | 148 | grpcServer.GracefulStop() 149 | select { 150 | case err = <-errCh: 151 | case <-time.After(5 * time.Second): 152 | internal.LoggerFromContext(ctx).Warn("the gRPC server is being stubborn, so forcing it to stop") 153 | grpcServer.Stop() 154 | select { 155 | case err = <-errCh: 156 | case <-time.After(3 * time.Second): 157 | err = errors.New("the gRPC server never stopped") 158 | } 159 | } 160 | 161 | err = multierr.Combine(err, backend.Close()) 162 | return err 163 | } 164 | -------------------------------------------------------------------------------- /jaeger-influxdb/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/influxdata/influxdb-observability/jaeger-influxdb 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/apache/arrow-adbc/go/adbc v0.6.0 7 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da 8 | github.com/influxdata/influxdb-observability/common v0.5.8 9 | github.com/influxdata/line-protocol/v2 v2.2.1 10 | github.com/jaegertracing/jaeger v1.50.0 11 | github.com/mattn/go-isatty v0.0.18 12 | github.com/opentracing/opentracing-go v1.2.0 13 | github.com/spf13/cobra v1.7.0 14 | github.com/spf13/viper v1.16.0 15 | github.com/stretchr/testify v1.8.4 16 | go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 17 | go.opentelemetry.io/collector/semconv v0.87.0 18 | go.uber.org/multierr v1.11.0 19 | go.uber.org/zap v1.26.0 20 | google.golang.org/grpc v1.58.3 21 | ) 22 | 23 | require ( 24 | github.com/apache/arrow/go/v13 v13.0.0 // indirect 25 | github.com/bluele/gcache v0.0.2 // indirect 26 | github.com/davecgh/go-spew v1.1.1 // indirect 27 | github.com/fatih/color v1.13.0 // indirect 28 | github.com/fsnotify/fsnotify v1.6.0 // indirect 29 | github.com/goccy/go-json v0.10.2 // indirect 30 | github.com/gogo/protobuf v1.3.2 // indirect 31 | github.com/golang/protobuf v1.5.3 // indirect 32 | github.com/google/flatbuffers v23.5.26+incompatible // indirect 33 | github.com/hashicorp/go-hclog v1.5.0 // indirect 34 | github.com/hashicorp/go-plugin v1.5.2 // indirect 35 | github.com/hashicorp/hcl v1.0.0 // indirect 36 | github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d // indirect 37 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 38 | github.com/json-iterator/go v1.1.12 // indirect 39 | github.com/klauspost/compress v1.17.0 // indirect 40 | github.com/klauspost/cpuid/v2 v2.2.5 // indirect 41 | github.com/magiconair/properties v1.8.7 // indirect 42 | github.com/mattn/go-colorable v0.1.13 // indirect 43 | github.com/mitchellh/go-testing-interface v1.0.0 // indirect 44 | github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect 45 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 46 | github.com/modern-go/reflect2 v1.0.2 // indirect 47 | github.com/oklog/run v1.1.0 // indirect 48 | github.com/pelletier/go-toml/v2 v2.0.8 // indirect 49 | github.com/pierrec/lz4/v4 v4.1.18 // indirect 50 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 51 | github.com/spf13/afero v1.9.5 // indirect 52 | github.com/spf13/cast v1.5.1 // indirect 53 | github.com/spf13/jwalterweatherman v1.1.0 // indirect 54 | github.com/spf13/pflag v1.0.5 // indirect 55 | github.com/subosito/gotenv v1.4.2 // indirect 56 | github.com/zeebo/xxh3 v1.0.2 // indirect 57 | go.opentelemetry.io/otel v1.19.0 // indirect 58 | go.opentelemetry.io/otel/trace v1.19.0 // indirect 59 | go.uber.org/atomic v1.11.0 // indirect 60 | golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect 61 | golang.org/x/mod v0.12.0 // indirect 62 | golang.org/x/net v0.17.0 // indirect 63 | golang.org/x/sync v0.3.0 // indirect 64 | golang.org/x/sys v0.13.0 // indirect 65 | golang.org/x/text v0.13.0 // indirect 66 | golang.org/x/tools v0.11.0 // indirect 67 | golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect 68 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect 69 | google.golang.org/protobuf v1.31.0 // indirect 70 | gopkg.in/ini.v1 v1.67.0 // indirect 71 | gopkg.in/yaml.v3 v3.0.1 // indirect 72 | ) 73 | 74 | replace github.com/influxdata/influxdb-observability/common => ../common 75 | -------------------------------------------------------------------------------- /jaeger-influxdb/internal/common.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "math" 8 | "regexp" 9 | "strings" 10 | "time" 11 | 12 | "github.com/apache/arrow-adbc/go/adbc" 13 | "github.com/jaegertracing/jaeger/model" 14 | "github.com/opentracing/opentracing-go/ext" 15 | "go.opentelemetry.io/collector/pdata/ptrace" 16 | semconv "go.opentelemetry.io/collector/semconv/v1.16.0" 17 | 18 | "github.com/influxdata/influxdb-observability/common" 19 | ) 20 | 21 | func recordToSpan(record map[string]interface{}) (*model.Span, error) { 22 | span := model.Span{ 23 | Process: &model.Process{ 24 | ServiceName: "", 25 | }, 26 | } 27 | parentSpanRef := model.SpanRef{ 28 | RefType: model.SpanRefType_CHILD_OF, 29 | } 30 | // TODO add more process attributes 31 | var err error 32 | for k, v := range record { 33 | if vv, ok := v.(string); (ok && vv == "NULL") || (!ok && v == nil) { 34 | continue 35 | } 36 | switch k { 37 | case common.AttributeTime: 38 | if vv, ok := v.(time.Time); !ok { 39 | return nil, fmt.Errorf("time is type %T", v) 40 | } else { 41 | span.StartTime = vv 42 | } 43 | case common.AttributeTraceID: 44 | if vv, ok := v.(string); !ok { 45 | return nil, fmt.Errorf("trace ID is type %T", v) 46 | } else if span.TraceID, err = model.TraceIDFromString(vv); err != nil { 47 | return nil, err 48 | } 49 | parentSpanRef.TraceID = span.TraceID 50 | case common.AttributeSpanID: 51 | if vv, ok := v.(string); !ok { 52 | return nil, fmt.Errorf("span ID is type %T", v) 53 | } else if span.SpanID, err = model.SpanIDFromString(vv); err != nil { 54 | return nil, err 55 | } 56 | case semconv.AttributeServiceName: 57 | if vv, ok := v.(string); !ok { 58 | return nil, fmt.Errorf("service name is type %T", v) 59 | } else { 60 | span.Process.ServiceName = vv 61 | } 62 | case common.AttributeSpanName: 63 | if vv, ok := v.(string); !ok { 64 | return nil, fmt.Errorf("operation name is type %T", v) 65 | } else { 66 | span.OperationName = vv 67 | } 68 | case common.AttributeSpanKind: 69 | if vv, ok := v.(string); !ok { 70 | return nil, fmt.Errorf("span kind is type %T", v) 71 | } else { 72 | switch vv { 73 | case ptrace.SpanKindServer.String(): 74 | span.Tags = append(span.Tags, model.String(string(ext.SpanKind), string(ext.SpanKindRPCServerEnum))) 75 | case ptrace.SpanKindClient.String(): 76 | span.Tags = append(span.Tags, model.String(string(ext.SpanKind), string(ext.SpanKindRPCClientEnum))) 77 | case ptrace.SpanKindProducer.String(): 78 | span.Tags = append(span.Tags, model.String(string(ext.SpanKind), string(ext.SpanKindProducerEnum))) 79 | case ptrace.SpanKindConsumer.String(): 80 | span.Tags = append(span.Tags, model.String(string(ext.SpanKind), string(ext.SpanKindConsumerEnum))) 81 | case ptrace.SpanKindInternal.String(): 82 | span.Tags = append(span.Tags, model.String(string(ext.SpanKind), "internal")) 83 | } 84 | } 85 | case common.AttributeDurationNano: 86 | if vv, ok := v.(int64); !ok { 87 | return nil, fmt.Errorf("duration nanoseconds is type %T", v) 88 | } else { 89 | span.Duration = time.Duration(vv) 90 | } 91 | case common.AttributeEndTimeUnixNano: 92 | // Jaeger likes duration ^^ 93 | continue 94 | case common.AttributeParentSpanID: 95 | if vv, ok := v.(string); !ok { 96 | return nil, fmt.Errorf("parent span ID is type %T", v) 97 | } else { 98 | parentSpanRef.SpanID, err = model.SpanIDFromString(vv) 99 | } 100 | if err != nil { 101 | return nil, err 102 | } 103 | case semconv.OtelStatusCode: 104 | if vv, ok := v.(string); !ok { 105 | return nil, fmt.Errorf("status code is type %T", v) 106 | } else { 107 | span.Tags = append(span.Tags, model.String(k, vv)) 108 | if v == ptrace.StatusCodeError { 109 | span.Tags = append(span.Tags, model.Bool("error", true)) 110 | } 111 | } 112 | case common.AttributeAttributes: 113 | if vv, ok := v.(string); !ok { 114 | return nil, fmt.Errorf("attribute is type %T", v) 115 | } else { 116 | m := make(map[string]interface{}) 117 | if err = json.Unmarshal([]byte(vv), &m); err != nil { 118 | return nil, fmt.Errorf("failed to unmarshal JSON-encoded attributes: %w", err) 119 | } 120 | for attributeKey, attributeValue := range m { 121 | span.Tags = append(span.Tags, kvToKeyValue(attributeKey, attributeValue)) 122 | } 123 | } 124 | default: 125 | if common.ResourceNamespace.MatchString(k) { 126 | span.Process.Tags = append(span.Process.Tags, kvToKeyValue(k, v)) 127 | } else { 128 | span.Tags = append(span.Tags, kvToKeyValue(k, v)) 129 | } 130 | } 131 | } 132 | 133 | if span.StartTime.IsZero() || (span.TraceID.High == 0 && span.TraceID.Low == 0) || span.SpanID == 0 { 134 | return nil, errors.New("incomplete span") 135 | } 136 | if parentSpanRef.SpanID != 0 { 137 | span.References = []model.SpanRef{parentSpanRef} 138 | } 139 | 140 | return &span, nil 141 | } 142 | 143 | func kvToKeyValue(k string, v interface{}) model.KeyValue { 144 | switch vv := v.(type) { 145 | case bool: 146 | return model.Bool(k, vv) 147 | case float64: 148 | return model.Float64(k, vv) 149 | case int64: 150 | return model.Int64(k, vv) 151 | case string: 152 | return model.String(k, vv) 153 | default: 154 | return model.String(k, fmt.Sprint(vv)) 155 | } 156 | } 157 | 158 | func recordToLog(record map[string]interface{}) (model.TraceID, model.SpanID, *model.Log, error) { 159 | log := new(model.Log) 160 | var traceID model.TraceID 161 | var spanID model.SpanID 162 | var err error 163 | for k, v := range record { 164 | if vv, ok := v.(string); ok && vv == "NULL" { 165 | continue 166 | } 167 | switch k { 168 | case common.AttributeTime: 169 | if vv, ok := v.(time.Time); !ok { 170 | return model.TraceID{}, 0, nil, fmt.Errorf("time is type %T", v) 171 | } else { 172 | log.Timestamp = vv 173 | } 174 | case common.AttributeTraceID: 175 | if vv, ok := v.(string); !ok { 176 | return model.TraceID{}, 0, nil, fmt.Errorf("trace ID is type %T", v) 177 | } else if traceID, err = model.TraceIDFromString(vv); err != nil { 178 | return model.TraceID{}, 0, nil, err 179 | } 180 | case common.AttributeSpanID: 181 | if vv, ok := v.(string); !ok { 182 | return model.TraceID{}, 0, nil, fmt.Errorf("span ID is type %T", v) 183 | } else if spanID, err = model.SpanIDFromString(vv); err != nil { 184 | return model.TraceID{}, 0, nil, err 185 | } 186 | case semconv.AttributeEventName: 187 | if vv, ok := v.(string); !ok { 188 | return model.TraceID{}, 0, nil, fmt.Errorf("log name is type %T", v) 189 | } else { 190 | log.Fields = append(log.Fields, model.String("event", vv)) 191 | } 192 | case common.AttributeBody: 193 | if vv, ok := v.(string); !ok { 194 | return model.TraceID{}, 0, nil, fmt.Errorf("log body is type %T", v) 195 | } else { 196 | log.Fields = append(log.Fields, model.String("message", vv)) 197 | } 198 | case common.AttributeAttributes: 199 | vv, ok := v.(string) 200 | if !ok { 201 | return model.TraceID{}, 0, nil, fmt.Errorf("log attributes attribute is type %T", v) 202 | } 203 | var m map[string]interface{} 204 | if err = json.Unmarshal([]byte(vv), &m); err != nil { 205 | return model.TraceID{}, 0, nil, fmt.Errorf("failed to unmarshal attributes from JSON: %w", err) 206 | } 207 | for mk, mv := range m { 208 | switch mvv := mv.(type) { 209 | case nil: 210 | log.Fields = append(log.Fields, model.String(mk, "")) 211 | case bool: 212 | log.Fields = append(log.Fields, model.Bool(mk, mvv)) 213 | case float64: 214 | if intPart, fracPart := math.Modf(mvv); fracPart == 0 { 215 | log.Fields = append(log.Fields, model.Int64(mk, int64(intPart))) 216 | } else { 217 | log.Fields = append(log.Fields, model.Float64(mk, mvv)) 218 | } 219 | case string: 220 | log.Fields = append(log.Fields, model.String(mk, mvv)) 221 | case []interface{}: 222 | s := make([]string, len(mvv)) 223 | for i := range mvv { 224 | if mvv[i] == nil { 225 | s[i] = "" 226 | } else { 227 | s[i] = fmt.Sprint(mvv[i]) 228 | } 229 | } 230 | log.Fields = append(log.Fields, model.String(mk, strings.Join(s, ","))) 231 | default: 232 | // ignore 233 | } 234 | } 235 | case semconv.AttributeServiceName: 236 | // The span has this information, no need to duplicate 237 | default: 238 | log.Fields = append(log.Fields, kvToKeyValue(k, v)) 239 | } 240 | } 241 | 242 | if log.Timestamp.IsZero() || (traceID.High == 0 && traceID.Low == 0) || spanID == 0 { 243 | return model.TraceID{}, 0, nil, errors.New("incomplete span event") 244 | } 245 | 246 | return traceID, spanID, log, nil 247 | } 248 | 249 | func recordToSpanRef(record map[string]interface{}) (model.TraceID, model.SpanID, *model.SpanRef, error) { 250 | spanRef := &model.SpanRef{ 251 | RefType: model.FollowsFrom, 252 | } 253 | var traceID model.TraceID 254 | var spanID model.SpanID 255 | var err error 256 | for k, v := range record { 257 | if vv, ok := v.(string); ok && vv == "NULL" { 258 | continue 259 | } 260 | switch k { 261 | case common.AttributeTraceID: 262 | if vv, ok := v.(string); !ok { 263 | return model.TraceID{}, 0, nil, fmt.Errorf("trace ID is type %T", v) 264 | } else if traceID, err = model.TraceIDFromString(vv); err != nil { 265 | return model.TraceID{}, 0, nil, err 266 | } 267 | case common.AttributeSpanID: 268 | if vv, ok := v.(string); !ok { 269 | return model.TraceID{}, 0, nil, fmt.Errorf("span ID is type %T", v) 270 | } else if spanID, err = model.SpanIDFromString(vv); err != nil { 271 | return model.TraceID{}, 0, nil, err 272 | } 273 | case common.AttributeLinkedTraceID: 274 | if vv, ok := v.(string); !ok { 275 | return model.TraceID{}, 0, nil, fmt.Errorf("linked trace ID is type %T", v) 276 | } else if spanRef.TraceID, err = model.TraceIDFromString(vv); err != nil { 277 | return model.TraceID{}, 0, nil, err 278 | } 279 | case common.AttributeLinkedSpanID: 280 | if vv, ok := v.(string); !ok { 281 | return model.TraceID{}, 0, nil, fmt.Errorf("linked span ID is type %T", v) 282 | } else if spanRef.SpanID, err = model.SpanIDFromString(vv); err != nil { 283 | return model.TraceID{}, 0, nil, err 284 | } 285 | default: 286 | // OpenTelemetry links do not have timestamps/attributes/fields/labels 287 | } 288 | } 289 | 290 | if (spanRef.TraceID.High == 0 && spanRef.TraceID.Low == 0) || spanRef.SpanID == 0 || (traceID.High == 0 && traceID.Low == 0) || spanID == 0 { 291 | return model.TraceID{}, 0, nil, errors.New("incomplete span link") 292 | } 293 | 294 | return traceID, spanID, spanRef, nil 295 | } 296 | 297 | var errTableNotFound = regexp.MustCompile(`table '\S+' not found`) 298 | 299 | func isTableNotFound(err error) bool { 300 | aerr, ok := err.(adbc.Error) 301 | return ok && errTableNotFound.MatchString(aerr.Msg) 302 | } 303 | -------------------------------------------------------------------------------- /jaeger-influxdb/internal/config.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/jaegertracing/jaeger/ports" 9 | "github.com/spf13/cobra" 10 | "github.com/spf13/viper" 11 | "go.uber.org/zap/zapcore" 12 | ) 13 | 14 | type Config struct { 15 | LogLevel string 16 | ListenAddr string 17 | InfluxdbAddr string 18 | InfluxdbTLSDisable bool 19 | InfluxdbTimeout time.Duration 20 | InfluxdbBucket string 21 | InfluxdbBucketArchive string 22 | InfluxdbToken string 23 | InfluxdbQueryMetadata map[string]string 24 | } 25 | 26 | func (c *Config) Init(command *cobra.Command) error { 27 | viper.AutomaticEnv() 28 | viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) 29 | for _, f := range []struct { 30 | pointer interface{} 31 | name string 32 | defaultValue interface{} 33 | usage string 34 | }{ 35 | { 36 | pointer: &c.LogLevel, 37 | name: "log-level", 38 | defaultValue: zapcore.InfoLevel.String(), 39 | usage: "log level (zap)", 40 | }, 41 | { 42 | pointer: &c.ListenAddr, 43 | name: "listen-addr", 44 | defaultValue: fmt.Sprintf(":%d", ports.RemoteStorageGRPC), 45 | usage: "Jaeger gRPC storage service (this process) host:port address", 46 | }, 47 | { 48 | pointer: &c.InfluxdbAddr, 49 | name: "influxdb-addr", 50 | usage: "InfluxDB service host:port", 51 | }, 52 | { 53 | pointer: &c.InfluxdbTLSDisable, 54 | name: "influxdb-tls-disable", 55 | usage: "Do not use TLS to connect to InfluxDB (mostly for development)", 56 | }, 57 | { 58 | pointer: &c.InfluxdbTimeout, 59 | name: "influxdb-timeout", 60 | defaultValue: 15 * time.Second, 61 | usage: "InfluxDB query timeout", 62 | }, 63 | { 64 | pointer: &c.InfluxdbBucket, 65 | name: "influxdb-bucket", 66 | usage: "InfluxDB bucket name, containing traces, logs, metrics (query only)", 67 | }, 68 | { 69 | pointer: &c.InfluxdbBucketArchive, 70 | name: "influxdb-bucket-archive", 71 | usage: "InfluxDB bucket name, for archiving traces (optional; write and query permissions required)", 72 | }, 73 | { 74 | pointer: &c.InfluxdbToken, 75 | name: "influxdb-token", 76 | usage: "InfluxDB API access token", 77 | }, 78 | { 79 | pointer: &c.InfluxdbQueryMetadata, 80 | name: "influxdb-query-metadata", 81 | usage: `gRPC metadata sent with SQL queries ("foo=bar") (optional; specify zero to many times)`, 82 | }, 83 | } { 84 | switch v := f.pointer.(type) { 85 | case *string: 86 | var defaultValue string 87 | if f.defaultValue != nil { 88 | defaultValue = f.defaultValue.(string) 89 | } 90 | command.Flags().StringVar(v, f.name, defaultValue, f.usage) 91 | if err := viper.BindPFlag(f.name, command.Flags().Lookup(f.name)); err != nil { 92 | return err 93 | } 94 | *v = viper.GetString(f.name) 95 | case *time.Duration: 96 | var defaultValue time.Duration 97 | if f.defaultValue != nil { 98 | defaultValue = f.defaultValue.(time.Duration) 99 | } 100 | command.Flags().DurationVar(v, f.name, defaultValue, f.usage) 101 | if err := viper.BindPFlag(f.name, command.Flags().Lookup(f.name)); err != nil { 102 | return err 103 | } 104 | *v = viper.GetDuration(f.name) 105 | case *bool: 106 | var defaultValue bool 107 | if f.defaultValue != nil { 108 | defaultValue = f.defaultValue.(bool) 109 | } 110 | command.Flags().BoolVar(v, f.name, defaultValue, f.usage) 111 | if err := viper.BindPFlag(f.name, command.Flags().Lookup(f.name)); err != nil { 112 | return err 113 | } 114 | *v = viper.GetBool(f.name) 115 | case *map[string]string: 116 | var defaultValue map[string]string 117 | if f.defaultValue != nil { 118 | defaultValue = f.defaultValue.(map[string]string) 119 | } 120 | command.Flags().StringToStringVar(v, f.name, defaultValue, f.usage) 121 | if err := viper.BindPFlag(f.name, command.Flags().Lookup(f.name)); err != nil { 122 | return err 123 | } 124 | *v = viper.GetStringMapString(f.name) 125 | default: 126 | return fmt.Errorf("flag type %T not implemented", f.pointer) 127 | } 128 | } 129 | return nil 130 | } 131 | -------------------------------------------------------------------------------- /jaeger-influxdb/internal/influxdb.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "errors" 7 | "fmt" 8 | "net" 9 | "net/http" 10 | "net/url" 11 | "regexp" 12 | "strings" 13 | "time" 14 | 15 | "github.com/apache/arrow-adbc/go/adbc" 16 | "github.com/apache/arrow-adbc/go/adbc/driver/flightsql" 17 | _ "github.com/apache/arrow-adbc/go/adbc/sqldriver/flightsql" 18 | "github.com/golang/groupcache/lru" 19 | "github.com/jaegertracing/jaeger/plugin/storage/grpc/shared" 20 | "github.com/jaegertracing/jaeger/storage/dependencystore" 21 | "github.com/jaegertracing/jaeger/storage/spanstore" 22 | "go.uber.org/multierr" 23 | "go.uber.org/zap" 24 | ) 25 | 26 | var _ shared.StoragePlugin = (*InfluxdbStorage)(nil) 27 | var _ shared.ArchiveStoragePlugin = (*InfluxdbStorage)(nil) 28 | 29 | const ( 30 | tableSpans = "spans" 31 | tableLogs = "logs" 32 | tableSpanLinks = "span-links" 33 | 34 | tableSpanMetricsCalls = "calls__sum" // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.78.0/connector/spanmetricsconnector 35 | tableSpanMetricsDuration = "duration_ms_histogram" // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.78.0/connector/spanmetricsconnector 36 | tableServiceGraphRequestCount = "traces_service_graph_request_total__sum" // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.78.0/connector/servicegraphconnector 37 | tableServiceGraphRequestDuration = "traces_service_graph_request_duration_seconds__histogram" // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.78.0/connector/servicegraphconnector 38 | columnServiceGraphClient = "client" 39 | columnServiceGraphServer = "server" 40 | columnServiceGraphCount = "value_cumulative_monotonic_int" 41 | 42 | uriSchemeSecure = "grpc+tls" 43 | uriSchemeNotSecure = "grpc+tcp" 44 | ) 45 | 46 | type InfluxdbStorage struct { 47 | logger *zap.Logger 48 | 49 | queryTimeout time.Duration 50 | 51 | db *sql.DB 52 | reader spanstore.Reader 53 | readerDependency dependencystore.Reader 54 | writer spanstore.Writer 55 | 56 | dbArchive *sql.DB 57 | readerArchive spanstore.Reader 58 | writerArchive spanstore.Writer 59 | } 60 | 61 | func NewInfluxdbStorage(ctx context.Context, config *Config) (*InfluxdbStorage, error) { 62 | logger := LoggerFromContext(ctx) 63 | 64 | influxdbAddr, err := composeHostPortFromAddr(logger, config.InfluxdbAddr, config.InfluxdbTLSDisable) 65 | if err != nil { 66 | return nil, err 67 | } 68 | if config.InfluxdbBucket == "" { 69 | return nil, fmt.Errorf("influxdb-bucket not specified, either by flag or env var") 70 | } 71 | if config.InfluxdbBucket == config.InfluxdbBucketArchive { 72 | return nil, fmt.Errorf("primary bucket and archive bucket must be different, but both are set to '%s'", config.InfluxdbBucket) 73 | } 74 | if config.InfluxdbBucketArchive == "" { 75 | logger.Warn("influxdb-bucket-archive not specified, so trace archiving is disabled") 76 | } 77 | 78 | is := &InfluxdbStorage{ 79 | logger: logger, 80 | queryTimeout: config.InfluxdbTimeout, 81 | } 82 | 83 | uriScheme := uriSchemeSecure 84 | if config.InfluxdbTLSDisable { 85 | uriScheme = uriSchemeNotSecure 86 | } 87 | dsn := []string{ 88 | fmt.Sprintf("%s=%s://%s/", adbc.OptionKeyURI, uriScheme, influxdbAddr), 89 | fmt.Sprintf("%s=Bearer %s", flightsql.OptionAuthorizationHeader, config.InfluxdbToken), 90 | fmt.Sprintf("%s=%s", flightsql.OptionRPCCallHeaderPrefix+"bucket-name", config.InfluxdbBucket), 91 | } 92 | for k, v := range config.InfluxdbQueryMetadata { 93 | k, v = strings.TrimSpace(k), strings.TrimSpace(v) 94 | if len(k) == 0 || strings.Contains(k, ";") || strings.Contains(v, ";") { 95 | return nil, fmt.Errorf("invalid gRPC metadata: %s=%s", k, v) 96 | } 97 | dsn = append(dsn, fmt.Sprintf("%s=%s", flightsql.OptionRPCCallHeaderPrefix+k, v)) 98 | } 99 | 100 | db, err := sql.Open("flightsql", strings.Join(dsn, " ; ")) 101 | if err != nil { 102 | row := db.QueryRowContext(ctx, "SELECT 1") 103 | var v int 104 | err = multierr.Combine(row.Scan(&v)) 105 | if err == nil && v != 1 { 106 | err = errors.New("failed to ping database") 107 | } 108 | } 109 | if err != nil { 110 | return nil, fmt.Errorf("failed to contact InfluxDB query service: %w", err) 111 | } 112 | 113 | reader := &influxdbReader{ 114 | logger: logger.With(zap.String("influxdb", "reader")), 115 | executeQuery: is.executeQuery, 116 | db: db, 117 | tableSpans: tableSpans, 118 | tableLogs: tableLogs, 119 | tableSpanLinks: tableSpanLinks, 120 | } 121 | readerDependency := &influxdbDependencyReader{ 122 | logger: logger.With(zap.String("influxdb", "reader-dependency")), 123 | ir: reader, 124 | } 125 | writer := &influxdbWriterNoop{ 126 | logger: logger.With(zap.String("influxdb", "writer")), 127 | } 128 | 129 | is.db = db 130 | is.reader = reader 131 | is.readerDependency = readerDependency 132 | is.writer = writer 133 | 134 | var readerArchive spanstore.Reader 135 | var writerArchive spanstore.Writer 136 | var dbArchive *sql.DB 137 | 138 | if config.InfluxdbBucketArchive != "" { 139 | dsnArchive := strings.Join([]string{ 140 | fmt.Sprintf("%s=%s://%s/", adbc.OptionKeyURI, uriScheme, influxdbAddr), 141 | fmt.Sprintf("%s=Bearer %s", flightsql.OptionAuthorizationHeader, config.InfluxdbToken), 142 | fmt.Sprintf("%s=%s", flightsql.OptionRPCCallHeaderPrefix+"bucket-name", config.InfluxdbBucketArchive), 143 | }, " ; ") 144 | 145 | dbArchive, err = sql.Open("flightsql", dsnArchive) 146 | if err != nil { 147 | return nil, err 148 | } 149 | 150 | readerArchive = &influxdbReader{ 151 | logger: logger.With(zap.String("influxdb", "reader-archive")), 152 | executeQuery: is.executeQuery, 153 | db: dbArchive, 154 | tableSpans: tableSpans, 155 | tableLogs: tableLogs, 156 | tableSpanLinks: tableSpanLinks, 157 | } 158 | writerArchive = &influxdbWriterArchive{ 159 | logger: logger.With(zap.String("influxdb", "writer-archive")), 160 | executeQuery: is.executeQuery, 161 | recentTraces: lru.New(100), 162 | httpClient: &http.Client{Timeout: config.InfluxdbTimeout}, 163 | authToken: config.InfluxdbToken, 164 | 165 | dbSrc: db, 166 | bucketNameSrc: config.InfluxdbBucket, 167 | tableSpansSrc: tableSpans, 168 | tableLogsSrc: tableLogs, 169 | tableSpanLinksSrc: tableSpanLinks, 170 | 171 | writeURLArchive: composeWriteURL(influxdbAddr, config.InfluxdbBucketArchive), 172 | bucketNameArchive: config.InfluxdbBucketArchive, 173 | tableSpansArchive: tableSpans, 174 | tableLogsArchive: tableLogs, 175 | tableSpanLinksArchive: tableSpanLinks, 176 | } 177 | 178 | is.dbArchive = dbArchive 179 | is.readerArchive = readerArchive 180 | is.writerArchive = writerArchive 181 | } 182 | 183 | return is, nil 184 | } 185 | 186 | func (is *InfluxdbStorage) Close() error { 187 | err := is.db.Close() 188 | if is.dbArchive != nil { 189 | err = multierr.Append(err, is.dbArchive.Close()) 190 | } 191 | return err 192 | } 193 | 194 | func (is *InfluxdbStorage) SpanReader() spanstore.Reader { 195 | return is.reader 196 | } 197 | 198 | func (is *InfluxdbStorage) DependencyReader() dependencystore.Reader { 199 | return is.readerDependency 200 | } 201 | 202 | func (is *InfluxdbStorage) SpanWriter() spanstore.Writer { 203 | return is.writer 204 | } 205 | 206 | func (is *InfluxdbStorage) ArchiveSpanReader() spanstore.Reader { 207 | return is.readerArchive 208 | } 209 | 210 | func (is *InfluxdbStorage) ArchiveSpanWriter() spanstore.Writer { 211 | return is.writerArchive 212 | } 213 | 214 | func (is *InfluxdbStorage) executeQuery(ctx context.Context, db *sql.DB, query string, f func(record map[string]interface{}) error) error { 215 | ctx, cancel := context.WithTimeout(ctx, is.queryTimeout) 216 | defer cancel() 217 | 218 | is.logger.Debug("executing query", zap.String("query", query)) 219 | 220 | rows, err := db.QueryContext(ctx, query) 221 | if err != nil { 222 | return err 223 | } 224 | defer func() { _ = rows.Close() }() 225 | 226 | columns, err := rows.Columns() 227 | if err != nil { 228 | return err 229 | } 230 | m := make(map[string]interface{}, len(columns)) 231 | 232 | rowValues := make([]interface{}, len(columns)) 233 | for i := range rowValues { 234 | rowValues[i] = new(interface{}) 235 | } 236 | 237 | for rows.Next() { 238 | if err = rows.Scan(rowValues[:]...); err != nil { 239 | return err 240 | } 241 | for i, columnName := range columns { 242 | v := rowValues[i].(*interface{}) 243 | if v == nil || *v == nil { 244 | delete(m, columnName) 245 | } else { 246 | m[columnName] = *v 247 | } 248 | } 249 | if err = f(m); err != nil { 250 | return err 251 | } 252 | } 253 | 254 | return multierr.Combine(rows.Err(), rows.Close()) 255 | } 256 | 257 | func composeWriteURL(influxdbClientHost, influxdbBucket string) string { 258 | writeURL := &url.URL{Scheme: "https", Host: influxdbClientHost, Path: "/api/v2/write"} 259 | 260 | queryValues := writeURL.Query() 261 | queryValues.Set("precision", "ns") 262 | queryValues.Set("bucket", influxdbBucket) 263 | writeURL.RawQuery = queryValues.Encode() 264 | 265 | return writeURL.String() 266 | } 267 | 268 | func composeHostPortFromAddr(logger *zap.Logger, influxdbAddr string, notSecureFlagHint bool) (string, error) { 269 | errInvalid := func(err error) error { 270 | if err == nil { 271 | return fmt.Errorf("influxdb-addr value is invalid '%s'", influxdbAddr) 272 | } 273 | return fmt.Errorf("influxdb-addr value is invalid '%s': %w", influxdbAddr, err) 274 | } 275 | hostPort := influxdbAddr 276 | 277 | if hostPort == "" { 278 | return "", errInvalid(nil) 279 | } 280 | 281 | reValidURL := regexp.MustCompile(`^(?:([\w+-]*):)?//([\w.-]*)(?::(\w*))?/?$`) 282 | 283 | if parts := reValidURL.FindStringSubmatch(hostPort); len(parts) == 4 { 284 | // Forgive format scheme://host:port, but not unconditionally 285 | scheme, host, port := parts[1], parts[2], parts[3] 286 | 287 | validURLSchemes := map[string]bool{ 288 | "http": true, 289 | "grpc": true, 290 | uriSchemeNotSecure: true, 291 | "https": false, 292 | uriSchemeSecure: false, 293 | } 294 | 295 | if notSecureURLScheme, found := validURLSchemes[scheme]; !found || notSecureURLScheme != notSecureFlagHint { 296 | return "", errInvalid(fmt.Errorf("URL scheme '%s' is not recognized", scheme)) 297 | } 298 | if host == "" { 299 | return "", errInvalid(errors.New("host is missing")) 300 | } 301 | if port == "" { 302 | hostPort = host 303 | } else { 304 | hostPort = net.JoinHostPort(host, port) 305 | } 306 | if scheme == "http" || scheme == "https" { 307 | logger.Warn(fmt.Sprintf("influxdb-addr value '%s' will be handled as '%s'", influxdbAddr, hostPort)) 308 | } 309 | } 310 | 311 | if !strings.Contains(hostPort, ":") { 312 | // If no port specified, assume default port 313 | hostPort += ":443" 314 | } 315 | 316 | if host, port, err := net.SplitHostPort(hostPort); err == nil { 317 | switch { 318 | case host == "": 319 | return "", errInvalid(errors.New("host is missing")) 320 | case port == "": 321 | return "", errInvalid(errors.New("port is missing")) 322 | default: 323 | return net.JoinHostPort(host, port), nil 324 | } 325 | } else { 326 | return "", errInvalid(err) 327 | } 328 | } 329 | -------------------------------------------------------------------------------- /jaeger-influxdb/internal/influxdb_reader.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "time" 7 | 8 | "github.com/jaegertracing/jaeger/model" 9 | "github.com/jaegertracing/jaeger/storage/dependencystore" 10 | "github.com/jaegertracing/jaeger/storage/spanstore" 11 | semconv "go.opentelemetry.io/collector/semconv/v1.16.0" 12 | "go.uber.org/zap" 13 | 14 | "github.com/influxdata/influxdb-observability/common" 15 | ) 16 | 17 | var _ spanstore.Reader = (*influxdbReader)(nil) 18 | var _ dependencystore.Reader = (*influxdbDependencyReader)(nil) 19 | 20 | type influxdbReader struct { 21 | logger *zap.Logger 22 | 23 | executeQuery func(ctx context.Context, db *sql.DB, query string, f func(record map[string]interface{}) error) error 24 | 25 | db *sql.DB 26 | tableSpans, tableLogs, tableSpanLinks string 27 | } 28 | 29 | func (ir *influxdbReader) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { 30 | // Get spans 31 | spansBySpanID := make(map[model.SpanID]*model.Span) 32 | 33 | f := func(record map[string]interface{}) error { 34 | span, err := recordToSpan(record) 35 | if err != nil { 36 | ir.logger.Warn("failed to convert span to Span", zap.Error(err)) 37 | } else { 38 | spansBySpanID[span.SpanID] = span 39 | } 40 | return nil 41 | } 42 | err := ir.executeQuery(ctx, ir.db, queryGetTraceSpans(ir.tableSpans, traceID), f) 43 | switch { 44 | case err != nil && !isTableNotFound(err): // ignore table not found (schema-on-write) 45 | return nil, err 46 | case len(spansBySpanID) == 0: 47 | return nil, spanstore.ErrTraceNotFound 48 | } 49 | 50 | // Get events 51 | f = func(record map[string]interface{}) error { 52 | if _, spanID, log, err := recordToLog(record); err != nil { 53 | ir.logger.Warn("failed to convert event to Log", zap.Error(err)) 54 | } else if span, ok := spansBySpanID[spanID]; !ok { 55 | ir.logger.Warn("span event contains unknown span ID") 56 | } else { 57 | span.Logs = append(span.Logs, *log) 58 | } 59 | return nil 60 | } 61 | err = ir.executeQuery(ctx, ir.db, queryGetTraceEvents(ir.tableLogs, traceID), f) 62 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 63 | return nil, err 64 | } 65 | 66 | // Get links 67 | f = func(record map[string]interface{}) error { 68 | _, spanID, spanRef, err := recordToSpanRef(record) 69 | if err != nil { 70 | ir.logger.Warn("failed to convert link to SpanRef", zap.Error(err)) 71 | } else if span, found := spansBySpanID[spanID]; !found { 72 | ir.logger.Warn("link contains unknown span ID") 73 | } else { 74 | span.References = append(span.References, *spanRef) 75 | } 76 | return nil 77 | } 78 | 79 | err = ir.executeQuery(ctx, ir.db, queryGetTraceLinks(ir.tableSpanLinks, traceID), f) 80 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 81 | return nil, err 82 | } 83 | 84 | // Assemble trace 85 | trace := &model.Trace{ 86 | Spans: make([]*model.Span, 0, len(spansBySpanID)), 87 | } 88 | for _, span := range spansBySpanID { 89 | trace.Spans = append(trace.Spans, span) 90 | } 91 | return trace, nil 92 | } 93 | 94 | func (ir *influxdbReader) GetServices(ctx context.Context) ([]string, error) { 95 | var services []string 96 | f := func(record map[string]interface{}) error { 97 | if v, found := record[semconv.AttributeServiceName]; found && v != nil { 98 | services = append(services, v.(string)) 99 | } 100 | return nil 101 | } 102 | 103 | err := ir.executeQuery(ctx, ir.db, queryGetServices(), f) 104 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 105 | return nil, err 106 | } 107 | return services, nil 108 | } 109 | 110 | func (ir *influxdbReader) GetOperations(ctx context.Context, operationQueryParameters spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { 111 | var operations []spanstore.Operation 112 | f := func(record map[string]interface{}) error { 113 | if v, found := record[common.AttributeSpanName]; found && v != nil { 114 | operation := spanstore.Operation{Name: v.(string)} 115 | if spanKind, found := record[common.AttributeSpanKind]; found && spanKind != nil { 116 | operation.SpanKind = spanKind.(string) 117 | } 118 | operations = append(operations, operation) 119 | } 120 | return nil 121 | } 122 | 123 | err := ir.executeQuery(ctx, ir.db, queryGetOperations(operationQueryParameters.ServiceName), f) 124 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 125 | return nil, err 126 | } 127 | return operations, nil 128 | } 129 | 130 | func (ir *influxdbReader) FindTraces(ctx context.Context, traceQueryParameters *spanstore.TraceQueryParameters) ([]*model.Trace, error) { 131 | // Get trace IDs 132 | traceIDs, err := ir.FindTraceIDs(ctx, traceQueryParameters) 133 | if err != nil || len(traceIDs) == 0 { 134 | return nil, err 135 | } 136 | 137 | // Get traces 138 | spansBySpanIDByTraceID := make(map[model.TraceID]map[model.SpanID]*model.Span) 139 | f := func(record map[string]interface{}) error { 140 | if span, err := recordToSpan(record); err != nil { 141 | return err 142 | } else if trace, found := spansBySpanIDByTraceID[span.TraceID]; !found { 143 | spansBySpanIDByTraceID[span.TraceID] = map[model.SpanID]*model.Span{span.SpanID: span} 144 | } else { 145 | trace[span.SpanID] = span 146 | } 147 | return nil 148 | } 149 | 150 | err = ir.executeQuery(ctx, ir.db, queryGetTraceSpans(ir.tableSpans, traceIDs...), f) 151 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 152 | return nil, err 153 | } 154 | 155 | // Get events 156 | f = func(record map[string]interface{}) error { 157 | if traceID, spanID, log, err := recordToLog(record); err != nil { 158 | return err 159 | } else if trace, found := spansBySpanIDByTraceID[traceID]; !found { 160 | ir.logger.Warn("trace not found for log") 161 | } else if span, found := trace[spanID]; !found { 162 | ir.logger.Warn("span not found for log") 163 | } else { 164 | span.Logs = append(span.Logs, *log) 165 | } 166 | return nil 167 | } 168 | 169 | err = ir.executeQuery(ctx, ir.db, queryGetTraceEvents(ir.tableLogs, traceIDs...), f) 170 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 171 | return nil, err 172 | } 173 | 174 | // Get links 175 | f = func(record map[string]interface{}) error { 176 | if traceID, spanID, spanRef, err := recordToSpanRef(record); err != nil { 177 | return err 178 | } else if trace, found := spansBySpanIDByTraceID[traceID]; !found { 179 | ir.logger.Warn("trace not found for span ref") 180 | } else if span, found := trace[spanID]; !found { 181 | ir.logger.Warn("span not found for span ref") 182 | } else { 183 | span.References = append(span.References, *spanRef) 184 | } 185 | return nil 186 | } 187 | 188 | err = ir.executeQuery(ctx, ir.db, queryGetTraceLinks(ir.tableSpanLinks, traceIDs...), f) 189 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 190 | return nil, err 191 | } 192 | 193 | traces := make([]*model.Trace, 0, len(spansBySpanIDByTraceID)) 194 | for _, spans := range spansBySpanIDByTraceID { 195 | trace := &model.Trace{Spans: make([]*model.Span, 0, len(spansBySpanIDByTraceID))} 196 | for _, span := range spans { 197 | trace.Spans = append(trace.Spans, span) 198 | } 199 | traces = append(traces, trace) 200 | } 201 | 202 | return traces, nil 203 | } 204 | 205 | func (ir *influxdbReader) FindTraceIDs(ctx context.Context, traceQueryParameters *spanstore.TraceQueryParameters) ([]model.TraceID, error) { 206 | var traceIDs []model.TraceID 207 | f := func(record map[string]interface{}) error { 208 | if v, found := record[common.AttributeTraceID]; found && v != nil { 209 | traceID, err := model.TraceIDFromString(v.(string)) 210 | if err != nil { 211 | return err 212 | } 213 | traceIDs = append(traceIDs, traceID) 214 | } 215 | return nil 216 | } 217 | 218 | err := ir.executeQuery(ctx, ir.db, queryFindTraceIDs(ir.tableSpans, traceQueryParameters), f) 219 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 220 | return nil, err 221 | } 222 | return traceIDs, nil 223 | } 224 | 225 | type influxdbDependencyReader struct { 226 | logger *zap.Logger 227 | ir *influxdbReader 228 | } 229 | 230 | func (idr *influxdbDependencyReader) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]model.DependencyLink, error) { 231 | var dependencyLinks []model.DependencyLink 232 | 233 | f := func(record map[string]interface{}) error { 234 | var parentService string 235 | if v, found := record[columnServiceGraphClient]; !found || v == nil { 236 | idr.logger.Warn("parent service not found in dependency link") 237 | return nil 238 | } else { 239 | parentService = v.(string) 240 | } 241 | var childService string 242 | if v, found := record[columnServiceGraphServer]; !found || v == nil { 243 | idr.logger.Warn("child service not found in dependency link") 244 | return nil 245 | } else { 246 | childService = v.(string) 247 | } 248 | var calls int64 249 | if v, found := record[columnServiceGraphCount]; !found || v == nil { 250 | idr.logger.Warn("calls not found in dependency link") 251 | return nil 252 | } else { 253 | calls = v.(int64) 254 | } 255 | 256 | dependencyLinks = append(dependencyLinks, model.DependencyLink{ 257 | Parent: parentService, 258 | Child: childService, 259 | CallCount: uint64(calls), 260 | }) 261 | 262 | return nil 263 | } 264 | 265 | err := idr.ir.executeQuery(ctx, idr.ir.db, queryGetDependencies(endTs, lookback), f) 266 | if err != nil && !isTableNotFound(err) { // ignore table not found (schema-on-write) 267 | return nil, err 268 | } 269 | return dependencyLinks, nil 270 | } 271 | -------------------------------------------------------------------------------- /jaeger-influxdb/internal/influxdb_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "go.uber.org/zap" 10 | ) 11 | 12 | func TestComposeHostPortFromAddr(t *testing.T) { 13 | logger := zap.NewNop() 14 | 15 | for _, testCase := range []struct { 16 | influxdbAddr string 17 | disableTLS bool 18 | expectedValue string 19 | expectError bool 20 | }{ 21 | {"host.tld:80", false, "host.tld:80", false}, 22 | {"host.tld:80", true, "host.tld:80", false}, 23 | {"host:80", false, "host:80", false}, 24 | {"host:80", true, "host:80", false}, 25 | {"host.tld", false, "host.tld:443", false}, 26 | {"host.tld", true, "host.tld:443", false}, 27 | {"host", false, "host:443", false}, 28 | {"host", true, "host:443", false}, 29 | {"http://host:80", true, "host:80", false}, 30 | {"http://host", true, "host:443", false}, 31 | {"http://host:", true, "host:443", false}, 32 | {"grpc://host:80", true, "host:80", false}, 33 | {"grpc+tcp://host:80", true, "host:80", false}, 34 | {"https://host:80", false, "host:80", false}, 35 | {"https://host", false, "host:443", false}, 36 | {"https://host:", false, "host:443", false}, 37 | {"grpc+tls://host:80", false, "host:80", false}, 38 | 39 | {":80", false, "", true}, 40 | {":80", true, "", true}, 41 | {"host:", false, "", true}, 42 | {"host:", true, "", true}, 43 | {":", false, "", true}, 44 | {":", true, "", true}, 45 | {"", false, "", true}, 46 | {"", true, "", true}, 47 | {"://", true, "", true}, 48 | {"://", false, "", true}, 49 | {"//", true, "", true}, 50 | {"//", false, "", true}, 51 | {"http://host", false, "", true}, 52 | {"http://:80", true, "", true}, 53 | {"http://:", true, "", true}, 54 | {"http://", true, "", true}, 55 | {"https://host", true, "", true}, 56 | {"https://:80", false, "", true}, 57 | {"https://:", false, "", true}, 58 | {"https://", false, "", true}, 59 | } { 60 | t.Run(fmt.Sprintf("%s--%v", strings.ReplaceAll(testCase.influxdbAddr, "://", "_"), testCase.disableTLS), func(t *testing.T) { 61 | testCase := testCase 62 | actualValue, actualErr := composeHostPortFromAddr(logger, testCase.influxdbAddr, testCase.disableTLS) 63 | assert.Equal(t, testCase.expectedValue, actualValue) 64 | if testCase.expectError { 65 | assert.Error(t, actualErr) 66 | } else { 67 | assert.NoError(t, actualErr) 68 | } 69 | }) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /jaeger-influxdb/internal/influxdb_writer.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "database/sql" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | "sync" 12 | "time" 13 | 14 | "github.com/golang/groupcache/lru" 15 | "github.com/influxdata/line-protocol/v2/lineprotocol" 16 | "github.com/jaegertracing/jaeger/model" 17 | "github.com/jaegertracing/jaeger/storage/spanstore" 18 | "go.uber.org/zap" 19 | 20 | "github.com/influxdata/influxdb-observability/common" 21 | ) 22 | 23 | var _ spanstore.Writer = (*influxdbWriterNoop)(nil) 24 | var _ spanstore.Writer = (*influxdbWriterArchive)(nil) 25 | 26 | type influxdbWriterNoop struct { 27 | logger *zap.Logger 28 | } 29 | 30 | func (iwn *influxdbWriterNoop) WriteSpan(_ context.Context, _ *model.Span) error { 31 | iwn.logger.Debug("no-op WriteSpan called") 32 | return errors.New("WriteSpan is not implemented in this context") 33 | } 34 | 35 | type influxdbWriterArchive struct { 36 | logger *zap.Logger 37 | 38 | executeQuery func(ctx context.Context, db *sql.DB, query string, f func(record map[string]interface{}) error) error 39 | 40 | recentTraces *lru.Cache 41 | recentTracesMu sync.Mutex 42 | httpClient *http.Client 43 | authToken string 44 | 45 | dbSrc *sql.DB 46 | bucketNameSrc, tableSpansSrc, tableLogsSrc, tableSpanLinksSrc string 47 | 48 | writeURLArchive string 49 | bucketNameArchive, tableSpansArchive, tableLogsArchive, tableSpanLinksArchive string 50 | } 51 | 52 | func (iwa *influxdbWriterArchive) WriteSpan(ctx context.Context, span *model.Span) error { 53 | iwa.recentTracesMu.Lock() 54 | if _, found := iwa.recentTraces.Get(span.TraceID.High ^ span.TraceID.Low); found { 55 | iwa.recentTracesMu.Unlock() 56 | return nil 57 | } 58 | iwa.recentTraces.Add(span.TraceID.High^span.TraceID.Low, struct{}{}) 59 | iwa.recentTracesMu.Unlock() 60 | 61 | lpEncoder := new(lineprotocol.Encoder) 62 | lpEncoder.SetLax(true) 63 | lpEncoder.SetPrecision(lineprotocol.Nanosecond) 64 | 65 | // trace spans 66 | 67 | err := iwa.executeQuery(ctx, iwa.dbSrc, queryGetTraceSpans(iwa.tableSpansSrc, span.TraceID), 68 | func(row map[string]interface{}) error { 69 | lpEncoder.StartLine(iwa.tableSpansArchive) 70 | var tagCount int 71 | for _, k := range []string{common.AttributeTraceID, common.AttributeSpanID} { 72 | if stringValue, ok := row[k].(string); ok { 73 | lpEncoder.AddTag(k, stringValue) 74 | tagCount++ 75 | } else { 76 | iwa.logger.Sugar().Warn("expected column %s to have type string but got %T", k, row[k]) 77 | } 78 | } 79 | if tagCount != 2 { 80 | return fmt.Errorf("expected 2 tags, but got %d; should find columns %s, %s", 81 | tagCount, common.AttributeTraceID, common.AttributeSpanID) 82 | } 83 | for k, v := range row { 84 | switch k { 85 | case common.AttributeTraceID, common.AttributeSpanID, common.AttributeTime: 86 | default: 87 | if v == nil { 88 | continue 89 | } 90 | if fieldValue, ok := lineprotocol.NewValue(v); ok { 91 | lpEncoder.AddField(k, fieldValue) 92 | } else { 93 | iwa.logger.Sugar().Warn("failed to cast column %s (%T) to line protocol field value", k, v) 94 | } 95 | } 96 | } 97 | foundTime := false 98 | if v, ok := row[common.AttributeTime]; ok && v != nil { 99 | if timeValue, ok := v.(time.Time); ok { 100 | foundTime = true 101 | lpEncoder.EndLine(timeValue) 102 | } else { 103 | iwa.logger.Sugar().Warn("expected column %s to have type time but got %T", common.AttributeTime, v) 104 | } 105 | } 106 | if !foundTime { 107 | return fmt.Errorf("time value not found in row") 108 | } 109 | return nil 110 | }) 111 | if err != nil { 112 | return fmt.Errorf("failed to query spans table: %w", err) 113 | } 114 | 115 | // trace events 116 | 117 | err = iwa.executeQuery(ctx, iwa.dbSrc, queryGetTraceEvents(iwa.tableLogsSrc, span.TraceID), 118 | func(row map[string]interface{}) error { 119 | lpEncoder.StartLine(iwa.tableLogsArchive) 120 | var tagCount int 121 | for _, k := range []string{common.AttributeTraceID, common.AttributeSpanID} { 122 | if stringValue, ok := row[k].(string); ok { 123 | lpEncoder.AddTag(k, stringValue) 124 | tagCount++ 125 | } else { 126 | iwa.logger.Sugar().Warn("expected column %s to have type string but got %T", k, row[k]) 127 | } 128 | } 129 | if tagCount != 2 { 130 | return fmt.Errorf("expected 2 tags, but got %d; should find columns %s, %s", 131 | tagCount, common.AttributeTraceID, common.AttributeSpanID) 132 | } 133 | for k, v := range row { 134 | switch k { 135 | case common.AttributeTraceID, common.AttributeSpanID, common.AttributeTime: 136 | default: 137 | if v == nil { 138 | continue 139 | } 140 | if fieldValue, ok := lineprotocol.NewValue(v); ok { 141 | lpEncoder.AddField(k, fieldValue) 142 | } else { 143 | iwa.logger.Sugar().Warn("failed to cast column %s (%T) to line protocol field value", k, v) 144 | } 145 | } 146 | } 147 | foundTime := false 148 | if v, ok := row[common.AttributeTime]; ok && v != nil { 149 | if timeValue, ok := v.(time.Time); ok { 150 | foundTime = true 151 | lpEncoder.EndLine(timeValue) 152 | } else { 153 | iwa.logger.Sugar().Warn("expected column %s to have type time but got %T", common.AttributeTime, v) 154 | } 155 | } 156 | if !foundTime { 157 | return fmt.Errorf("time value not found in row") 158 | } 159 | return nil 160 | }) 161 | if err != nil { 162 | iwa.logger.Error("failed to query logs (span events) table", zap.Error(err)) 163 | } 164 | 165 | // trace span links 166 | 167 | err = iwa.executeQuery(ctx, iwa.dbSrc, queryGetTraceLinks(iwa.tableSpanLinksSrc, span.TraceID), 168 | func(row map[string]interface{}) error { 169 | lpEncoder.StartLine(iwa.tableSpanLinksArchive) 170 | var tagCount int 171 | for _, k := range []string{common.AttributeTraceID, common.AttributeSpanID, common.AttributeLinkedTraceID, common.AttributeLinkedSpanID} { 172 | if stringValue, ok := row[k].(string); ok { 173 | lpEncoder.AddTag(k, stringValue) 174 | tagCount++ 175 | } else { 176 | iwa.logger.Sugar().Warn("expected column %s to have type string but got %T", k, row[k]) 177 | } 178 | } 179 | if tagCount != 4 { 180 | return fmt.Errorf("expected 4 tags, but got %d; should find columns %s, %s, %s, %s", 181 | tagCount, common.AttributeTraceID, common.AttributeSpanID, common.AttributeLinkedTraceID, common.AttributeLinkedSpanID) 182 | } 183 | for k, v := range row { 184 | switch k { 185 | case common.AttributeTraceID, common.AttributeSpanID, common.AttributeLinkedTraceID, common.AttributeLinkedSpanID, common.AttributeTime: 186 | default: 187 | if v == nil { 188 | continue 189 | } 190 | if fieldValue, ok := lineprotocol.NewValue(v); ok { 191 | lpEncoder.AddField(k, fieldValue) 192 | } else { 193 | iwa.logger.Sugar().Warn("failed to cast column %s (%T) to line protocol field value", k, v) 194 | } 195 | } 196 | } 197 | foundTime := false 198 | if v, ok := row[common.AttributeTime]; ok && v != nil { 199 | if timeValue, ok := v.(time.Time); ok { 200 | foundTime = true 201 | lpEncoder.EndLine(timeValue) 202 | } else { 203 | iwa.logger.Sugar().Warn("expected column %s to have type time but got %T", common.AttributeTime, v) 204 | } 205 | } 206 | if !foundTime { 207 | return fmt.Errorf("time value not found in row") 208 | } 209 | return nil 210 | }) 211 | if err != nil { 212 | iwa.logger.Error("failed to query span links table", zap.Error(err)) 213 | } 214 | 215 | if err = lpEncoder.Err(); err != nil { 216 | return err 217 | } 218 | 219 | req, err := http.NewRequestWithContext(ctx, http.MethodPost, iwa.writeURLArchive, bytes.NewReader(lpEncoder.Bytes())) 220 | if err != nil { 221 | return err 222 | } 223 | req.Header.Set("Authorization", fmt.Sprintf("Token %s", iwa.authToken)) 224 | if res, err := iwa.httpClient.Do(req); err != nil { 225 | return err 226 | } else if body, err := io.ReadAll(res.Body); err != nil { 227 | return err 228 | } else if err = res.Body.Close(); err != nil { 229 | return err 230 | } else if res.StatusCode/100 != 2 { 231 | return fmt.Errorf("line protocol write returned %q %q", res.Status, string(body)) 232 | } 233 | 234 | return nil 235 | } 236 | -------------------------------------------------------------------------------- /jaeger-influxdb/internal/logctx.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var loggerContext struct{} 10 | 11 | func LoggerWithContext(ctx context.Context, logger *zap.Logger) context.Context { 12 | return context.WithValue(ctx, loggerContext, logger) 13 | } 14 | 15 | func LoggerFromContext(ctx context.Context) *zap.Logger { 16 | logger, _ := ctx.Value(loggerContext).(*zap.Logger) 17 | return logger 18 | } 19 | -------------------------------------------------------------------------------- /jaeger-influxdb/internal/queries.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/jaegertracing/jaeger/model" 9 | "github.com/jaegertracing/jaeger/storage/spanstore" 10 | semconv "go.opentelemetry.io/collector/semconv/v1.16.0" 11 | 12 | "github.com/influxdata/influxdb-observability/common" 13 | ) 14 | 15 | func traceIDToString(traceID model.TraceID) string { 16 | // model.TraceID.String() does not convert the high portion if it is zero 17 | return fmt.Sprintf("%016x%016x", traceID.High, traceID.Low) 18 | } 19 | 20 | func queryGetAllWhereTraceID(table string, traceIDs ...model.TraceID) string { 21 | if len(traceIDs) == 0 { 22 | return fmt.Sprintf(`SELECT * FROM '%s' WHERE false`, table) 23 | } 24 | traceIDStrings := make([]string, len(traceIDs)) 25 | for i, traceID := range traceIDs { 26 | traceIDStrings[i] = traceIDToString(traceID) 27 | } 28 | return fmt.Sprintf(`SELECT * FROM '%s' WHERE "%s" IN ('%s')`, 29 | table, common.AttributeTraceID, strings.Join(traceIDStrings, `','`)) 30 | } 31 | 32 | func queryGetTraceSpans(tableSpans string, traceIDs ...model.TraceID) string { 33 | return queryGetAllWhereTraceID(tableSpans, traceIDs...) 34 | } 35 | 36 | func queryGetTraceEvents(tableLogs string, traceIDs ...model.TraceID) string { 37 | return queryGetAllWhereTraceID(tableLogs, traceIDs...) 38 | } 39 | 40 | func queryGetTraceLinks(tableSpanLinks string, traceIDs ...model.TraceID) string { 41 | return queryGetAllWhereTraceID(tableSpanLinks, traceIDs...) 42 | } 43 | 44 | func queryGetServices() string { 45 | return fmt.Sprintf(`SELECT "%s" FROM '%s' GROUP BY "%s"`, 46 | semconv.AttributeServiceName, tableSpanMetricsCalls, semconv.AttributeServiceName) 47 | } 48 | 49 | func queryGetOperations(serviceName string) string { 50 | return fmt.Sprintf(`SELECT "%s", "%s" FROM '%s' WHERE "%s" = '%s' GROUP BY "%s", "%s"`, 51 | common.AttributeSpanName, common.AttributeSpanKind, tableSpanMetricsCalls, semconv.AttributeServiceName, serviceName, common.AttributeSpanName, common.AttributeSpanKind) 52 | } 53 | 54 | func queryGetDependencies(endTs time.Time, lookback time.Duration) string { 55 | return fmt.Sprintf(` 56 | SELECT "%s", "%s", SUM("%s") AS "%s" 57 | FROM '%s' 58 | WHERE "%s" >= to_timestamp(%d) AND "%s" <= to_timestamp(%d) 59 | GROUP BY "%s", "%s"`, 60 | columnServiceGraphClient, columnServiceGraphServer, columnServiceGraphCount, columnServiceGraphCount, 61 | tableServiceGraphRequestCount, 62 | common.AttributeTime, endTs.Add(-lookback).UnixNano(), common.AttributeTime, endTs.UnixNano(), 63 | columnServiceGraphClient, columnServiceGraphServer) 64 | } 65 | 66 | func queryFindTraceIDs(tableSpans string, tqp *spanstore.TraceQueryParameters) string { 67 | tags := make(map[string]string, len(tqp.Tags)+2) 68 | for k, v := range tqp.Tags { 69 | tags[k] = v 70 | } 71 | if tqp.ServiceName != "" { 72 | tags[semconv.AttributeServiceName] = tqp.ServiceName 73 | } 74 | if tqp.OperationName != "" { 75 | tags[common.AttributeSpanName] = tqp.OperationName 76 | } 77 | 78 | predicates := make([]string, 0, len(tags)+4) 79 | for k, v := range tags { 80 | predicates = append(predicates, fmt.Sprintf(`"%s" = '%s'`, k, v)) 81 | } 82 | if !tqp.StartTimeMin.IsZero() { 83 | predicates = append(predicates, fmt.Sprintf(`"%s" >= to_timestamp(%d)`, common.AttributeTime, tqp.StartTimeMin.UnixNano())) 84 | } 85 | if !tqp.StartTimeMax.IsZero() { 86 | predicates = append(predicates, fmt.Sprintf(`"%s" <= to_timestamp(%d)`, common.AttributeTime, tqp.StartTimeMax.UnixNano())) 87 | } 88 | if tqp.DurationMin > 0 { 89 | predicates = append(predicates, 90 | fmt.Sprintf(`"%s" >= %d`, common.AttributeDurationNano, tqp.DurationMin.Nanoseconds())) 91 | } 92 | if tqp.DurationMax > 0 { 93 | predicates = append(predicates, 94 | fmt.Sprintf(`"%s" <= %d`, common.AttributeDurationNano, tqp.DurationMax.Nanoseconds())) 95 | } 96 | 97 | query := fmt.Sprintf(`SELECT "%s", MAX("%s") AS t FROM '%s'`, common.AttributeTraceID, common.AttributeTime, tableSpans) 98 | if len(predicates) > 0 { 99 | query += fmt.Sprintf(" WHERE %s", strings.Join(predicates, " AND ")) 100 | } 101 | query += fmt.Sprintf(` GROUP BY "%s" ORDER BY t DESC LIMIT %d`, common.AttributeTraceID, tqp.NumTraces) 102 | 103 | return query 104 | } 105 | -------------------------------------------------------------------------------- /otel2influx/README.md: -------------------------------------------------------------------------------- 1 | # OpenTelemetry to InfluxDB Line Protocol Converter 2 | 3 | [![Go Reference](https://pkg.go.dev/badge/github.com/influxdata/influxdb-observability/otel2influx.svg)](https://pkg.go.dev/github.com/influxdata/influxdb-observability/otel2influx) 4 | 5 | This package converts OpenTelemetry traces, metrics, and logs to [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/). 6 | The schema is optimized for [InfluxDB/IOx](https://github.com/influxdata/influxdb_iox), a timeseries database engine that is currently under development. 7 | 8 | [Docker Image: WIP OpenTelemetry Collector Contrib](https://hub.docker.com/r/jacobmarble/opentelemetry-collector-contrib-influxdb) 9 | 10 | [Docker Image: WIP Telegraf](https://hub.docker.com/r/jacobmarble/telegraf-opentelemetry) 11 | 12 | ## Definitions 13 | 14 | ["InfluxDB"](https://www.influxdata.com/products/influxdb/) 15 | 16 | [InfluxDB "IOx"](https://www.influxdata.com/blog/announcing-influxdb-iox/) 17 | 18 | [InfluxDB "Line Protocol"](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 19 | 20 | ["OpenTelemetry"](https://opentelemetry.io/docs/concepts/what-is-opentelemetry/) 21 | 22 | [OpenTelemetry "Signal"](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/overview.md#opentelemetry-client-architecture) 23 | 24 | [OpenTelemetry Signal "Attribute"](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes) and [Attribute naming guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/attribute-and-label-naming.md). 25 | 26 | OpenTelemetry Signal "Label": similar to Signal Attribute; values are type string only 27 | 28 | [OpenTelemetry Signal "Resource"](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md) 29 | 30 | [OpenTelemetry Signal "Instrumentation Library"](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/overview.md#instrumentation-libraries) 31 | 32 | Signals have properties that are not free-form Attributes or Labels. 33 | These properties distinguish the OpenTelemetry Signal types. 34 | For example: 35 | - trace ID, span ID, parent span ID 36 | - metric name 37 | - log severity, log span ID 38 | 39 | ## Conversion 40 | 41 | Spans are assigned measurement `spans`. 42 | Metric points are assigned the measurement named in the `Metric.name` protocol buffer field. 43 | Logs are assigned measurement `logs`. 44 | 45 | This exporter converts all Attributes to Line Protocol fields, without modification. 46 | In the case that application Attribute keys conflict with Resource or Instrumentation Library Attribute keys, the application loses. 47 | 48 | The exporter converts other Signal properties to fields with key names borrowed from [OTLP protocol buffer](https://github.com/open-telemetry/opentelemetry-proto) messages. 49 | For example: 50 | - `Span.start_time_unix_nano` (type fixed64) -> InfluxDB line protocol timestamp 51 | - `Span.trace_id` (type bytes) -> `trace_id` as hexadecimal string 52 | - `Span.name` (type string) -> `name` 53 | - `Span.end_time_unix_nano` (type fixed64) -> `end_time_unix_nano` as uint64 54 | - `Metric.time_unix_nano` (type fixed64) -> InfluxDB line protocol timestamp 55 | - `LogRecord.time_unix_nano` (type fixed64) -> InfluxDB line protocol timestamp 56 | - This is an optional field in the OpenTelemetry data model, but required in InfluxDB. 57 | - This exporter drops the LogRecord if this field value is not set. 58 | - `LogRecord.severity_number` (type enum) -> `severity_number` as int32 59 | - `LogRecord.body` (type opentelemetry.proto.common.v1.AnyValue) -> `body` as string 60 | 61 | Some exceptions to the above exist. 62 | For example: 63 | - `Span.events` (type repeated message) -> measurement `logs` 64 | - `Span.links` (type repeated message) -> measurment `span-links` 65 | - `Metric.description` is ignored 66 | - `Metric.unit` is ignored 67 | - `Metric` values are assigned field keys `gauge`, `counter`, `count`, `sum`, `inf` 68 | - Metric conversion follows Prometheus conventions for compatibility 69 | - `LogRecord.flags` is ignored 70 | - This is an enum with no values defines yet 71 | 72 | ## Example Line Protocol 73 | 74 | TODO(jacobmarble): update this section 75 | 76 | ### Tracing Spans 77 | ``` 78 | spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="d5270e78d85f570f",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="4c28227be6a010e1",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689169000 79 | spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="d5270e78d85f570f",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689135000 80 | spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="b57e98af78c3399b",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="a0643a156d7f9f7f",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689388000 81 | spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="b57e98af78c3399b",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689303300 82 | spans end_time_unix_nano="2021-02-19 20:50:25.6896741 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="6a8e6a0edcc1c966",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="d68f7f3b41eb8075",status_code="STATUS_CODE_OK",trace_id="651dadde186b7834c52b13a28fc27bea" 1613767825689480300 83 | ``` 84 | 85 | ### Metrics 86 | ``` 87 | avalanche_metric_mmmmm_0_71 cycle_id="0",gauge=29,host.name="generate-metrics-avalanche",label_key_kkkkk_0="label_val_vvvvv_0",label_key_kkkkk_1="label_val_vvvvv_1",label_key_kkkkk_2="label_val_vvvvv_2",label_key_kkkkk_3="label_val_vvvvv_3",label_key_kkkkk_4="label_val_vvvvv_4",label_key_kkkkk_5="label_val_vvvvv_5",label_key_kkkkk_6="label_val_vvvvv_6",label_key_kkkkk_7="label_val_vvvvv_7",label_key_kkkkk_8="label_val_vvvvv_8",label_key_kkkkk_9="label_val_vvvvv_9",port="9090",scheme="http",series_id="3",service.name="otel-collector" 1613772311130000000 88 | avalanche_metric_mmmmm_0_71 cycle_id="0",gauge=16,host.name="generate-metrics-avalanche",label_key_kkkkk_0="label_val_vvvvv_0",label_key_kkkkk_1="label_val_vvvvv_1",label_key_kkkkk_2="label_val_vvvvv_2",label_key_kkkkk_3="label_val_vvvvv_3",label_key_kkkkk_4="label_val_vvvvv_4",label_key_kkkkk_5="label_val_vvvvv_5",label_key_kkkkk_6="label_val_vvvvv_6",label_key_kkkkk_7="label_val_vvvvv_7",label_key_kkkkk_8="label_val_vvvvv_8",label_key_kkkkk_9="label_val_vvvvv_9",port="9090",scheme="http",series_id="4",service.name="otel-collector" 1613772311130000000 89 | avalanche_metric_mmmmm_0_71 cycle_id="0",gauge=22,host.name="generate-metrics-avalanche",label_key_kkkkk_0="label_val_vvvvv_0",label_key_kkkkk_1="label_val_vvvvv_1",label_key_kkkkk_2="label_val_vvvvv_2",label_key_kkkkk_3="label_val_vvvvv_3",label_key_kkkkk_4="label_val_vvvvv_4",label_key_kkkkk_5="label_val_vvvvv_5",label_key_kkkkk_6="label_val_vvvvv_6",label_key_kkkkk_7="label_val_vvvvv_7",label_key_kkkkk_8="label_val_vvvvv_8",label_key_kkkkk_9="label_val_vvvvv_9",port="9090",scheme="http",series_id="5",service.name="otel-collector" 1613772311130000000 90 | avalanche_metric_mmmmm_0_71 cycle_id="0",gauge=90,host.name="generate-metrics-avalanche",label_key_kkkkk_0="label_val_vvvvv_0",label_key_kkkkk_1="label_val_vvvvv_1",label_key_kkkkk_2="label_val_vvvvv_2",label_key_kkkkk_3="label_val_vvvvv_3",label_key_kkkkk_4="label_val_vvvvv_4",label_key_kkkkk_5="label_val_vvvvv_5",label_key_kkkkk_6="label_val_vvvvv_6",label_key_kkkkk_7="label_val_vvvvv_7",label_key_kkkkk_8="label_val_vvvvv_8",label_key_kkkkk_9="label_val_vvvvv_9",port="9090",scheme="http",series_id="6",service.name="otel-collector" 1613772311130000000 91 | avalanche_metric_mmmmm_0_71 cycle_id="0",gauge=51,host.name="generate-metrics-avalanche",label_key_kkkkk_0="label_val_vvvvv_0",label_key_kkkkk_1="label_val_vvvvv_1",label_key_kkkkk_2="label_val_vvvvv_2",label_key_kkkkk_3="label_val_vvvvv_3",label_key_kkkkk_4="label_val_vvvvv_4",label_key_kkkkk_5="label_val_vvvvv_5",label_key_kkkkk_6="label_val_vvvvv_6",label_key_kkkkk_7="label_val_vvvvv_7",label_key_kkkkk_8="label_val_vvvvv_8",label_key_kkkkk_9="label_val_vvvvv_9",port="9090",scheme="http",series_id="7",service.name="otel-collector" 1613772311130000000 92 | ``` 93 | 94 | ### Logs 95 | ``` 96 | logs fluent.tag="fluent.info",pid=18i,ppid=9i,worker=0i 1613769568895331700 97 | logs fluent.tag="fluent.debug",instance=1720i,queue_size=0i,stage_size=0i 1613769568895697200 98 | logs fluent.tag="fluent.info",worker=0i 1613769568896515100 99 | ``` 100 | -------------------------------------------------------------------------------- /otel2influx/common.go: -------------------------------------------------------------------------------- 1 | package otel2influx 2 | 3 | import ( 4 | "go.opentelemetry.io/collector/pdata/pcommon" 5 | semconv "go.opentelemetry.io/collector/semconv/v1.16.0" 6 | ) 7 | 8 | func ResourceToTags(resource pcommon.Resource, tags map[string]string) map[string]string { 9 | resource.Attributes().Range(func(k string, v pcommon.Value) bool { 10 | if k != "" { 11 | tags[k] = v.AsString() 12 | } 13 | return true 14 | }) 15 | return tags 16 | } 17 | 18 | func InstrumentationScopeToTags(instrumentationScope pcommon.InstrumentationScope, tags map[string]string) map[string]string { 19 | if instrumentationScope.Name() != "" { 20 | tags[semconv.OtelLibraryName] = instrumentationScope.Name() 21 | } 22 | if instrumentationScope.Version() != "" { 23 | tags[semconv.OtelLibraryVersion] = instrumentationScope.Version() 24 | } 25 | instrumentationScope.Attributes().Range(func(k string, v pcommon.Value) bool { 26 | if k != "" { 27 | tags[k] = v.AsString() 28 | } 29 | return true 30 | }) 31 | return tags 32 | } 33 | -------------------------------------------------------------------------------- /otel2influx/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/influxdata/influxdb-observability/otel2influx 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/influxdata/influxdb-observability/common v0.5.8 7 | github.com/stretchr/testify v1.8.4 8 | go.opentelemetry.io/collector/consumer v0.87.0 9 | go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 10 | go.opentelemetry.io/collector/semconv v0.87.0 11 | go.uber.org/multierr v1.11.0 12 | golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 13 | ) 14 | 15 | require ( 16 | github.com/davecgh/go-spew v1.1.1 // indirect 17 | github.com/gogo/protobuf v1.3.2 // indirect 18 | github.com/golang/protobuf v1.5.3 // indirect 19 | github.com/json-iterator/go v1.1.12 // indirect 20 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 21 | github.com/modern-go/reflect2 v1.0.2 // indirect 22 | github.com/pmezard/go-difflib v1.0.0 // indirect 23 | golang.org/x/net v0.17.0 // indirect 24 | golang.org/x/sys v0.13.0 // indirect 25 | golang.org/x/text v0.13.0 // indirect 26 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect 27 | google.golang.org/grpc v1.58.3 // indirect 28 | google.golang.org/protobuf v1.31.0 // indirect 29 | gopkg.in/yaml.v3 v3.0.1 // indirect 30 | ) 31 | 32 | replace github.com/influxdata/influxdb-observability/common => ../common 33 | -------------------------------------------------------------------------------- /otel2influx/go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 5 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 6 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 7 | github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= 8 | github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 9 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 10 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 11 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 12 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 13 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 14 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 15 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 16 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 17 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 18 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 19 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 20 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 21 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 22 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 23 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 24 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 25 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 26 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 27 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 28 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 29 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 30 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 31 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 32 | go.opentelemetry.io/collector v0.87.0 h1:160HewHp+/wzr62BzWjQgIvdTtzpaYTlCnGVb8DYnM0= 33 | go.opentelemetry.io/collector/consumer v0.87.0 h1:oR5XKZoVF/hwz0FnrYPaHcbbQazHifMsxpENMR7ivvo= 34 | go.opentelemetry.io/collector/consumer v0.87.0/go.mod h1:lui5rg1byAT7QPbCY733StCDc/TPxS3hVNXKoVQ3LsI= 35 | go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= 36 | go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= 37 | go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= 38 | go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= 39 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 40 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 41 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 42 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 43 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 44 | golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= 45 | golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= 46 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 47 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 48 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 49 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 50 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 51 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 52 | golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= 53 | golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= 54 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 55 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 56 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 57 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 58 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 59 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 60 | golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= 61 | golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 62 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 63 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 64 | golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= 65 | golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= 66 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 67 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 68 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 69 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 70 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 71 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 72 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 73 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 74 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= 75 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= 76 | google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= 77 | google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= 78 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 79 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 80 | google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= 81 | google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 82 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 83 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 84 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 85 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 86 | -------------------------------------------------------------------------------- /otel2influx/logs.go: -------------------------------------------------------------------------------- 1 | package otel2influx 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "strings" 8 | "time" 9 | 10 | "go.opentelemetry.io/collector/consumer/consumererror" 11 | "go.opentelemetry.io/collector/pdata/pcommon" 12 | "go.opentelemetry.io/collector/pdata/plog" 13 | semconv "go.opentelemetry.io/collector/semconv/v1.16.0" 14 | "golang.org/x/exp/maps" 15 | 16 | "github.com/influxdata/influxdb-observability/common" 17 | ) 18 | 19 | type OtelLogsToLineProtocolConfig struct { 20 | Logger common.Logger 21 | Writer InfluxWriter 22 | // LogRecordDimensions are log record attributes to be used as line protocol tags. 23 | // These are always included as tags, if available: 24 | // - trace ID 25 | // - span ID 26 | // The default values: 27 | // - service.name 28 | // Other common attributes can be found here: 29 | // - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv 30 | // When using InfluxDB for both logs and traces, be certain that LogRecordDimensions 31 | // matches the tracing SpanDimensions value. 32 | LogRecordDimensions []string 33 | } 34 | 35 | func DefaultOtelLogsToLineProtocolConfig() *OtelLogsToLineProtocolConfig { 36 | return &OtelLogsToLineProtocolConfig{ 37 | Logger: new(common.NoopLogger), 38 | Writer: new(NoopInfluxWriter), 39 | LogRecordDimensions: []string{ 40 | semconv.AttributeServiceName, 41 | }, 42 | } 43 | } 44 | 45 | type OtelLogsToLineProtocol struct { 46 | logger common.Logger 47 | writer InfluxWriter 48 | 49 | logRecordDimensions map[string]struct{} 50 | } 51 | 52 | func NewOtelLogsToLineProtocol(config *OtelLogsToLineProtocolConfig) (*OtelLogsToLineProtocol, error) { 53 | logRecordDimensions := make(map[string]struct{}, len(config.LogRecordDimensions)) 54 | { 55 | duplicateDimensions := make(map[string]struct{}) 56 | for _, k := range config.LogRecordDimensions { 57 | if _, found := logRecordDimensions[k]; found { 58 | duplicateDimensions[k] = struct{}{} 59 | } else { 60 | logRecordDimensions[k] = struct{}{} 61 | } 62 | } 63 | if len(duplicateDimensions) > 0 { 64 | return nil, fmt.Errorf("duplicate record dimension(s) configured: %s", 65 | strings.Join(maps.Keys(duplicateDimensions), ",")) 66 | } 67 | 68 | } 69 | return &OtelLogsToLineProtocol{ 70 | logger: config.Logger, 71 | writer: config.Writer, 72 | logRecordDimensions: logRecordDimensions, 73 | }, nil 74 | } 75 | 76 | func (c *OtelLogsToLineProtocol) WriteLogs(ctx context.Context, ld plog.Logs) error { 77 | batch := c.writer.NewBatch() 78 | for i := 0; i < ld.ResourceLogs().Len(); i++ { 79 | resourceLogs := ld.ResourceLogs().At(i) 80 | for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ { 81 | ilLogs := resourceLogs.ScopeLogs().At(j) 82 | for k := 0; k < ilLogs.LogRecords().Len(); k++ { 83 | logRecord := ilLogs.LogRecords().At(k) 84 | if err := c.enqueueLogRecord(ctx, resourceLogs.Resource(), ilLogs.Scope(), logRecord, batch); err != nil { 85 | return consumererror.NewPermanent(fmt.Errorf("failed to convert OTLP log record to line protocol: %w", err)) 86 | } 87 | } 88 | } 89 | } 90 | return batch.WriteBatch(ctx) 91 | } 92 | 93 | func (c *OtelLogsToLineProtocol) enqueueLogRecord(ctx context.Context, resource pcommon.Resource, instrumentationScope pcommon.InstrumentationScope, logRecord plog.LogRecord, batch InfluxWriterBatch) error { 94 | ts := logRecord.Timestamp().AsTime() 95 | if ts.IsZero() { 96 | // This is a valid condition in OpenTelemetry, but not in InfluxDB. 97 | // From otel proto field Logrecord.time_unix_name: 98 | // "Value of 0 indicates unknown or missing timestamp." 99 | ts = time.Now() 100 | } 101 | 102 | tags := make(map[string]string, len(c.logRecordDimensions)+2) 103 | fields := make(map[string]interface{}) 104 | 105 | fields[common.AttributeFlags] = uint64(logRecord.Flags()) 106 | if ots := logRecord.ObservedTimestamp().AsTime(); !ots.IsZero() && !ots.Equal(time.Unix(0, 0)) { 107 | fields[common.AttributeObservedTimeUnixNano] = ots.UnixNano() 108 | } 109 | 110 | if traceID, spanID := logRecord.TraceID(), logRecord.SpanID(); !traceID.IsEmpty() && !spanID.IsEmpty() { 111 | tags[common.AttributeTraceID] = traceID.String() 112 | tags[common.AttributeSpanID] = spanID.String() 113 | } 114 | 115 | if severityNumber := logRecord.SeverityNumber(); severityNumber != plog.SeverityNumberUnspecified { 116 | fields[common.AttributeSeverityNumber] = int64(severityNumber) 117 | } 118 | if severityText := logRecord.SeverityText(); severityText != "" { 119 | fields[common.AttributeSeverityText] = severityText 120 | } 121 | fields[common.AttributeBody] = logRecord.Body().AsString() 122 | 123 | droppedAttributesCount := uint64(logRecord.DroppedAttributesCount()) 124 | attributesField := make(map[string]any) 125 | for _, attributes := range []pcommon.Map{resource.Attributes(), instrumentationScope.Attributes(), logRecord.Attributes()} { 126 | attributes.Range(func(k string, v pcommon.Value) bool { 127 | if k == "" { 128 | return true 129 | } 130 | if _, found := c.logRecordDimensions[k]; found { 131 | tags[k] = v.AsString() 132 | } else { 133 | attributesField[k] = v.AsRaw() 134 | } 135 | return true 136 | }) 137 | } 138 | if len(attributesField) > 0 { 139 | marshalledAttributes, err := json.Marshal(attributesField) 140 | if err != nil { 141 | c.logger.Debug("failed to marshal attributes to JSON", err) 142 | droppedAttributesCount += uint64(logRecord.Attributes().Len()) 143 | } else { 144 | fields[common.AttributeAttributes] = string(marshalledAttributes) 145 | } 146 | } 147 | for k := range tags { 148 | if _, found := fields[k]; found { 149 | c.logger.Debug("tag and field keys conflict; field will be dropped", "key", k) 150 | droppedAttributesCount++ 151 | delete(fields, k) 152 | } 153 | } 154 | if droppedAttributesCount > 0 { 155 | fields[common.AttributeDroppedAttributesCount] = droppedAttributesCount 156 | } 157 | 158 | if err := batch.EnqueuePoint(ctx, common.MeasurementLogs, tags, fields, ts, common.InfluxMetricValueTypeUntyped); err != nil { 159 | return fmt.Errorf("failed to write point for int gauge: %w", err) 160 | } 161 | 162 | return nil 163 | } 164 | -------------------------------------------------------------------------------- /otel2influx/metrics.go: -------------------------------------------------------------------------------- 1 | package otel2influx 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "go.opentelemetry.io/collector/consumer/consumererror" 8 | "go.opentelemetry.io/collector/pdata/pcommon" 9 | "go.opentelemetry.io/collector/pdata/pmetric" 10 | 11 | "github.com/influxdata/influxdb-observability/common" 12 | ) 13 | 14 | type OtelMetricsToLineProtocolConfig struct { 15 | Logger common.Logger 16 | Writer InfluxWriter 17 | Schema common.MetricsSchema 18 | } 19 | 20 | func DefaultOtelMetricsToLineProtocolConfig() *OtelMetricsToLineProtocolConfig { 21 | return &OtelMetricsToLineProtocolConfig{ 22 | Logger: new(common.NoopLogger), 23 | Writer: new(NoopInfluxWriter), 24 | Schema: common.MetricsSchemaTelegrafPrometheusV1, 25 | } 26 | } 27 | 28 | type metricWriter interface { 29 | enqueueMetric(ctx context.Context, resource pcommon.Resource, instrumentationScope pcommon.InstrumentationScope, metric pmetric.Metric, batch InfluxWriterBatch) error 30 | } 31 | 32 | type OtelMetricsToLineProtocol struct { 33 | iw InfluxWriter 34 | mw metricWriter 35 | } 36 | 37 | func NewOtelMetricsToLineProtocol(config *OtelMetricsToLineProtocolConfig) (*OtelMetricsToLineProtocol, error) { 38 | var mw metricWriter 39 | switch config.Schema { 40 | case common.MetricsSchemaTelegrafPrometheusV1: 41 | mw = &metricWriterTelegrafPrometheusV1{ 42 | logger: config.Logger, 43 | } 44 | case common.MetricsSchemaTelegrafPrometheusV2: 45 | mw = &metricWriterTelegrafPrometheusV2{ 46 | logger: config.Logger, 47 | } 48 | case common.MetricsSchemaOtelV1: 49 | mw = &metricWriterOtelV1{ 50 | logger: config.Logger, 51 | } 52 | default: 53 | return nil, fmt.Errorf("unrecognized metrics schema %d", config.Logger) 54 | } 55 | return &OtelMetricsToLineProtocol{ 56 | iw: config.Writer, 57 | mw: mw, 58 | }, nil 59 | } 60 | 61 | func (c *OtelMetricsToLineProtocol) WriteMetrics(ctx context.Context, md pmetric.Metrics) error { 62 | batch := c.iw.NewBatch() 63 | for i := 0; i < md.ResourceMetrics().Len(); i++ { 64 | resourceMetrics := md.ResourceMetrics().At(i) 65 | for j := 0; j < resourceMetrics.ScopeMetrics().Len(); j++ { 66 | isMetrics := resourceMetrics.ScopeMetrics().At(j) 67 | for k := 0; k < isMetrics.Metrics().Len(); k++ { 68 | metric := isMetrics.Metrics().At(k) 69 | if err := c.mw.enqueueMetric(ctx, resourceMetrics.Resource(), isMetrics.Scope(), metric, batch); err != nil { 70 | return consumererror.NewPermanent(fmt.Errorf("failed to convert OTLP metric to line protocol: %w", err)) 71 | } 72 | } 73 | } 74 | } 75 | return batch.WriteBatch(ctx) 76 | } 77 | 78 | type basicDataPoint interface { 79 | Timestamp() pcommon.Timestamp 80 | StartTimestamp() pcommon.Timestamp 81 | Attributes() pcommon.Map 82 | Flags() pmetric.DataPointFlags 83 | } 84 | -------------------------------------------------------------------------------- /otel2influx/metrics_otel_v1.go: -------------------------------------------------------------------------------- 1 | package otel2influx 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "strconv" 8 | "strings" 9 | 10 | "go.opentelemetry.io/collector/pdata/pcommon" 11 | "go.opentelemetry.io/collector/pdata/pmetric" 12 | "go.uber.org/multierr" 13 | 14 | "github.com/influxdata/influxdb-observability/common" 15 | ) 16 | 17 | var _ metricWriter = (*metricWriterOtelV1)(nil) 18 | 19 | type metricWriterOtelV1 struct { 20 | logger common.Logger 21 | } 22 | 23 | func (m *metricWriterOtelV1) enqueueMetric(ctx context.Context, resource pcommon.Resource, is pcommon.InstrumentationScope, pm pmetric.Metric, batch InfluxWriterBatch) (err error) { 24 | defer func() { 25 | if r := recover(); r != nil { 26 | var rerr error 27 | switch v := r.(type) { 28 | case error: 29 | rerr = v 30 | case string: 31 | rerr = errors.New(v) 32 | default: 33 | rerr = fmt.Errorf("%+v", r) 34 | } 35 | err = multierr.Combine(err, rerr) 36 | } 37 | }() 38 | 39 | // TODO metric description 40 | measurementName := fmt.Sprintf("%s_%s_%s", pm.Name(), pm.Unit(), strings.ToLower(pm.Type().String())) 41 | tags := make(map[string]string) 42 | tags = ResourceToTags(resource, tags) 43 | tags = InstrumentationScopeToTags(is, tags) 44 | 45 | switch pm.Type() { 46 | // case pmetric.MetricTypeGauge: 47 | // return m.writeGauge(ctx, resource, is, pm.Name(), pm.Gauge(), batch) 48 | case pmetric.MetricTypeSum: 49 | m.enqueueSum(ctx, measurementName, tags, pm, batch) 50 | case pmetric.MetricTypeHistogram: 51 | m.enqueueHistogram(ctx, measurementName, tags, pm, batch) 52 | default: 53 | err = fmt.Errorf("unrecognized metric type %q", pm.Type()) 54 | } 55 | return 56 | } 57 | 58 | // formatFieldKeyMetricSumOtelV1 composes a value field key from (sum temporality, sum monotonicity, and datapoint value type) 59 | func formatFieldKeyMetricSumOtelV1(temporality string, monotonic bool, dataPointValueType string) string { 60 | var monotonicity string 61 | if monotonic { 62 | monotonicity = "monotonic" 63 | } else { 64 | monotonicity = "nonmonotonic" 65 | } 66 | 67 | return fmt.Sprintf("value_%s_%s_%s", strings.ToLower(temporality), monotonicity, strings.ToLower(dataPointValueType)) 68 | } 69 | 70 | func (m *metricWriterOtelV1) enqueueSum(ctx context.Context, measurementName string, resourceTags map[string]string, pm pmetric.Metric, batch InfluxWriterBatch) { 71 | temporality := pm.Sum().AggregationTemporality().String() 72 | monotonic := pm.Sum().IsMonotonic() 73 | 74 | buildValue := func(dataPoint pmetric.NumberDataPoint) (string, interface{}) { 75 | fieldKey := formatFieldKeyMetricSumOtelV1(temporality, monotonic, dataPoint.ValueType().String()) 76 | switch dataPoint.ValueType() { 77 | case pmetric.NumberDataPointValueTypeInt: 78 | return fieldKey, dataPoint.IntValue() 79 | case pmetric.NumberDataPointValueTypeDouble: 80 | return fieldKey, dataPoint.DoubleValue() 81 | default: 82 | panic(fmt.Sprintf("unsupported data point value type '%s'", dataPoint.ValueType().String())) 83 | } 84 | } 85 | 86 | for i := 0; i < pm.Sum().DataPoints().Len(); i++ { 87 | // TODO datapoint exemplars 88 | // TODO datapoint flags 89 | dataPoint := pm.Sum().DataPoints().At(i) 90 | 91 | fields := make(map[string]interface{}, 3) 92 | if dataPoint.StartTimestamp() != 0 { 93 | fields[common.AttributeStartTimeUnixNano] = int64(dataPoint.StartTimestamp()) 94 | } 95 | valueFieldKey, value := buildValue(dataPoint) 96 | fields[valueFieldKey] = value 97 | 98 | tags := make(map[string]string, dataPoint.Attributes().Len()+len(resourceTags)) 99 | for k, v := range resourceTags { 100 | tags[k] = v 101 | } 102 | dataPoint.Attributes().Range(func(k string, v pcommon.Value) bool { 103 | tags[k] = v.AsString() 104 | return true 105 | }) 106 | 107 | err := batch.EnqueuePoint(ctx, measurementName, tags, fields, dataPoint.Timestamp().AsTime(), common.InfluxMetricValueTypeUntyped) 108 | if err != nil { 109 | panic(err) 110 | } 111 | } 112 | } 113 | 114 | func (m *metricWriterOtelV1) enqueueHistogram(ctx context.Context, measurementName string, resourceTags map[string]string, pm pmetric.Metric, batch InfluxWriterBatch) { 115 | temporality := strings.ToLower(pm.Histogram().AggregationTemporality().String()) 116 | 117 | for i := 0; i < pm.Histogram().DataPoints().Len(); i++ { 118 | // TODO datapoint exemplars 119 | // TODO datapoint flags 120 | dataPoint := pm.Histogram().DataPoints().At(i) 121 | 122 | bucketCounts, explicitBounds := dataPoint.BucketCounts(), dataPoint.ExplicitBounds() 123 | if bucketCounts.Len() > 0 && 124 | bucketCounts.Len() != explicitBounds.Len() && 125 | bucketCounts.Len() != explicitBounds.Len()+1 { 126 | // The infinity bucket is not used in this schema, 127 | // so accept input if that particular bucket is missing. 128 | panic(fmt.Sprintf("invalid metric histogram bucket counts qty %d vs explicit bounds qty %d", bucketCounts.Len(), explicitBounds.Len())) 129 | } 130 | 131 | fields := make(map[string]interface{}, explicitBounds.Len()+6) 132 | if dataPoint.StartTimestamp() != 0 { 133 | fields[common.AttributeStartTimeUnixNano] = int64(dataPoint.StartTimestamp()) 134 | } 135 | for i := 0; i < explicitBounds.Len(); i++ { 136 | boundStr := strconv.FormatFloat(explicitBounds.At(i), 'f', -1, 64) 137 | k := fmt.Sprintf("%s_%s", temporality, boundStr) 138 | fields[k] = bucketCounts.At(i) 139 | } 140 | 141 | fields["count"] = dataPoint.Count() 142 | if dataPoint.HasSum() { 143 | fields["sum"] = dataPoint.Sum() 144 | } 145 | if dataPoint.HasMin() && dataPoint.HasMax() { 146 | fields["min"] = dataPoint.Min() 147 | fields["max"] = dataPoint.Max() 148 | } 149 | 150 | tags := make(map[string]string, dataPoint.Attributes().Len()+len(resourceTags)) 151 | for k, v := range resourceTags { 152 | tags[k] = v 153 | } 154 | dataPoint.Attributes().Range(func(k string, v pcommon.Value) bool { 155 | tags[k] = v.AsString() 156 | return true 157 | }) 158 | 159 | err := batch.EnqueuePoint(ctx, measurementName, tags, fields, dataPoint.Timestamp().AsTime(), common.InfluxMetricValueTypeUntyped) 160 | if err != nil { 161 | panic(err) 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /otel2influx/traces.go: -------------------------------------------------------------------------------- 1 | package otel2influx 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "strings" 9 | "time" 10 | 11 | "go.opentelemetry.io/collector/consumer/consumererror" 12 | "go.opentelemetry.io/collector/pdata/pcommon" 13 | "go.opentelemetry.io/collector/pdata/ptrace" 14 | semconv "go.opentelemetry.io/collector/semconv/v1.16.0" 15 | "go.uber.org/multierr" 16 | "golang.org/x/exp/maps" 17 | 18 | "github.com/influxdata/influxdb-observability/common" 19 | ) 20 | 21 | type OtelTracesToLineProtocolConfig struct { 22 | Logger common.Logger 23 | Writer InfluxWriter 24 | // SpanDimensions are span attributes to be used as line protocol tags. 25 | // These are always included as tags: 26 | // - trace ID 27 | // - span ID 28 | // The default values are strongly recommended for use with Jaeger: 29 | // - service.name 30 | // - span.name 31 | // Other common attributes can be found here: 32 | // - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv 33 | SpanDimensions []string 34 | } 35 | 36 | func DefaultOtelTracesToLineProtocolConfig() *OtelTracesToLineProtocolConfig { 37 | return &OtelTracesToLineProtocolConfig{ 38 | Logger: new(common.NoopLogger), 39 | Writer: new(NoopInfluxWriter), 40 | SpanDimensions: []string{ 41 | semconv.AttributeServiceName, 42 | common.AttributeSpanName, 43 | }, 44 | } 45 | } 46 | 47 | type OtelTracesToLineProtocol struct { 48 | logger common.Logger 49 | influxWriter InfluxWriter 50 | 51 | spanDimensions map[string]struct{} 52 | } 53 | 54 | func NewOtelTracesToLineProtocol(config *OtelTracesToLineProtocolConfig) (*OtelTracesToLineProtocol, error) { 55 | spanDimensions := make(map[string]struct{}, len(config.SpanDimensions)) 56 | { 57 | duplicateDimensions := make(map[string]struct{}) 58 | for _, k := range config.SpanDimensions { 59 | if _, found := spanDimensions[k]; found { 60 | duplicateDimensions[k] = struct{}{} 61 | } else { 62 | spanDimensions[k] = struct{}{} 63 | } 64 | } 65 | if len(duplicateDimensions) > 0 { 66 | return nil, fmt.Errorf("duplicate span dimension(s) configured: %s", 67 | strings.Join(maps.Keys(duplicateDimensions), ",")) 68 | } 69 | } 70 | 71 | return &OtelTracesToLineProtocol{ 72 | logger: config.Logger, 73 | influxWriter: config.Writer, 74 | spanDimensions: spanDimensions, 75 | }, nil 76 | } 77 | 78 | func (c *OtelTracesToLineProtocol) WriteTraces(ctx context.Context, td ptrace.Traces) error { 79 | batch := c.influxWriter.NewBatch() 80 | for i := 0; i < td.ResourceSpans().Len(); i++ { 81 | resourceSpans := td.ResourceSpans().At(i) 82 | for j := 0; j < resourceSpans.ScopeSpans().Len(); j++ { 83 | scopeSpans := resourceSpans.ScopeSpans().At(j) 84 | for k := 0; k < scopeSpans.Spans().Len(); k++ { 85 | span := scopeSpans.Spans().At(k) 86 | if err := c.enqueueSpan(ctx, span, scopeSpans.Scope().Attributes(), resourceSpans.Resource().Attributes(), batch); err != nil { 87 | return consumererror.NewPermanent(fmt.Errorf("failed to convert OTLP span to line protocol: %w", err)) 88 | } 89 | } 90 | } 91 | } 92 | return batch.WriteBatch(ctx) 93 | } 94 | 95 | func (c *OtelTracesToLineProtocol) enqueueSpan(ctx context.Context, span ptrace.Span, scopeAttributes, resourceAttributes pcommon.Map, batch InfluxWriterBatch) (err error) { 96 | defer func() { 97 | if r := recover(); r != nil { 98 | var rerr error 99 | switch v := r.(type) { 100 | case error: 101 | rerr = v 102 | case string: 103 | rerr = errors.New(v) 104 | default: 105 | rerr = fmt.Errorf("%+v", r) 106 | } 107 | err = multierr.Combine(err, rerr) 108 | } 109 | }() 110 | 111 | traceID := span.TraceID() 112 | if traceID.IsEmpty() { 113 | err = errors.New("span has no trace ID") 114 | return 115 | } 116 | spanID := span.SpanID() 117 | if spanID.IsEmpty() { 118 | err = errors.New("span has no span ID") 119 | return 120 | } 121 | 122 | measurement := common.MeasurementSpans 123 | tags := make(map[string]string, len(c.spanDimensions)+2) 124 | fields := make(map[string]interface{}, scopeAttributes.Len()+resourceAttributes.Len()+10) 125 | 126 | droppedAttributesCount := uint64(span.DroppedAttributesCount()) 127 | attributesField := make(map[string]any) 128 | 129 | for _, attributes := range []pcommon.Map{resourceAttributes, scopeAttributes, span.Attributes()} { 130 | attributes.Range(func(k string, v pcommon.Value) bool { 131 | if _, found := c.spanDimensions[k]; found { 132 | if _, found = tags[k]; found { 133 | c.logger.Debug("dimension %s already exists as a tag", k) 134 | attributesField[k] = v.AsRaw() 135 | } 136 | tags[k] = v.AsString() 137 | } else { 138 | attributesField[k] = v.AsRaw() 139 | } 140 | return true 141 | }) 142 | } 143 | if len(attributesField) > 0 { 144 | marshalledAttributes, err := json.Marshal(attributesField) 145 | if err != nil { 146 | c.logger.Debug("failed to marshal attributes to JSON", err) 147 | droppedAttributesCount += uint64(span.Attributes().Len()) 148 | } else { 149 | fields[common.AttributeAttributes] = string(marshalledAttributes) 150 | } 151 | } 152 | 153 | if traceState := span.TraceState().AsRaw(); traceState != "" { 154 | fields[common.AttributeTraceState] = traceState 155 | } 156 | if parentSpanID := span.ParentSpanID(); !parentSpanID.IsEmpty() { 157 | fields[common.AttributeParentSpanID] = parentSpanID.String() 158 | } 159 | if name := span.Name(); name != "" { 160 | fields[common.AttributeSpanName] = name 161 | } 162 | if kind := span.Kind(); kind != ptrace.SpanKindUnspecified { 163 | fields[common.AttributeSpanKind] = kind.String() 164 | } 165 | 166 | ts := span.StartTimestamp().AsTime() 167 | if ts.IsZero() { 168 | err = errors.New("span has no timestamp") 169 | return 170 | } 171 | 172 | if endTime := span.EndTimestamp().AsTime(); !endTime.IsZero() { 173 | fields[common.AttributeEndTimeUnixNano] = endTime.UnixNano() 174 | fields[common.AttributeDurationNano] = endTime.Sub(ts).Nanoseconds() 175 | } 176 | 177 | droppedEventsCount := uint64(span.DroppedEventsCount()) 178 | for i := 0; i < span.Events().Len(); i++ { 179 | if err = c.enqueueSpanEvent(ctx, traceID, spanID, span.Events().At(i), batch); err != nil { 180 | droppedEventsCount++ 181 | c.logger.Debug("invalid span event", err) 182 | } 183 | } 184 | if droppedEventsCount > 0 { 185 | fields[common.AttributeDroppedEventsCount] = droppedEventsCount 186 | } 187 | 188 | droppedLinksCount := uint64(span.DroppedLinksCount()) 189 | for i := 0; i < span.Links().Len(); i++ { 190 | if err = c.writeSpanLink(ctx, traceID, spanID, ts, span.Links().At(i), batch); err != nil { 191 | droppedLinksCount++ 192 | c.logger.Debug("invalid span link", err) 193 | } 194 | } 195 | if droppedLinksCount > 0 { 196 | fields[common.AttributeDroppedLinksCount] = droppedLinksCount 197 | } 198 | 199 | status := span.Status() 200 | switch status.Code() { 201 | case ptrace.StatusCodeUnset: 202 | case ptrace.StatusCodeOk, ptrace.StatusCodeError: 203 | fields[semconv.OtelStatusCode] = status.Code().String() 204 | default: 205 | c.logger.Debug("status code not recognized", "code", status.Code()) 206 | } 207 | if message := status.Message(); message != "" { 208 | fields[semconv.OtelStatusDescription] = message 209 | } 210 | 211 | tags[common.AttributeTraceID] = traceID.String() 212 | tags[common.AttributeSpanID] = spanID.String() 213 | 214 | for k := range tags { 215 | if _, found := fields[k]; found { 216 | c.logger.Debug("tag and field keys conflict; field will be dropped", "key", k) 217 | droppedAttributesCount++ 218 | delete(fields, k) 219 | } 220 | } 221 | if droppedAttributesCount > 0 { 222 | fields[common.AttributeDroppedAttributesCount] = droppedAttributesCount 223 | } 224 | 225 | if err = batch.EnqueuePoint(ctx, measurement, tags, fields, ts, common.InfluxMetricValueTypeUntyped); err != nil { 226 | return fmt.Errorf("failed to enqueue point for span: %w", err) 227 | } 228 | 229 | return nil 230 | } 231 | 232 | func (c *OtelTracesToLineProtocol) enqueueSpanEvent(ctx context.Context, traceID pcommon.TraceID, spanID pcommon.SpanID, spanEvent ptrace.SpanEvent, batch InfluxWriterBatch) error { 233 | fields := make(map[string]interface{}, 2) 234 | if name := spanEvent.Name(); name != "" { 235 | fields[semconv.AttributeEventName] = name 236 | } 237 | 238 | if spanEvent.Attributes().Len() > 0 { 239 | droppedAttributesCount := uint64(spanEvent.DroppedAttributesCount()) 240 | marshalledAttributes, err := json.Marshal(spanEvent.Attributes().AsRaw()) 241 | if err != nil { 242 | c.logger.Debug("failed to marshal attributes to JSON", err) 243 | droppedAttributesCount += uint64(spanEvent.Attributes().Len()) 244 | } else { 245 | fields[common.AttributeAttributes] = string(marshalledAttributes) 246 | } 247 | if droppedAttributesCount > 0 { 248 | fields[common.AttributeDroppedAttributesCount] = droppedAttributesCount 249 | } 250 | } 251 | 252 | tags := map[string]string{ 253 | common.AttributeTraceID: traceID.String(), 254 | common.AttributeSpanID: spanID.String(), 255 | } 256 | 257 | err := batch.EnqueuePoint(ctx, common.MeasurementLogs, tags, fields, spanEvent.Timestamp().AsTime(), common.InfluxMetricValueTypeUntyped) 258 | if err != nil { 259 | return fmt.Errorf("failed to write point for span event: %w", err) 260 | } 261 | return nil 262 | } 263 | 264 | func (c *OtelTracesToLineProtocol) writeSpanLink(ctx context.Context, traceID pcommon.TraceID, spanID pcommon.SpanID, ts time.Time, spanLink ptrace.SpanLink, batch InfluxWriterBatch) error { 265 | fields := make(map[string]interface{}, 2) 266 | 267 | linkedTraceID := spanLink.TraceID() 268 | if linkedTraceID.IsEmpty() { 269 | return errors.New("span link has no trace ID") 270 | } 271 | linkedSpanID := spanLink.SpanID() 272 | if linkedSpanID.IsEmpty() { 273 | return errors.New("span link has no span ID") 274 | } 275 | 276 | tags := map[string]string{ 277 | common.AttributeTraceID: traceID.String(), 278 | common.AttributeSpanID: spanID.String(), 279 | common.AttributeLinkedTraceID: linkedTraceID.String(), 280 | common.AttributeLinkedSpanID: linkedSpanID.String(), 281 | } 282 | 283 | if traceState := spanLink.TraceState().AsRaw(); traceState != "" { 284 | fields[common.AttributeTraceState] = traceState 285 | } 286 | 287 | if spanLink.Attributes().Len() > 0 { 288 | droppedAttributesCount := uint64(spanLink.DroppedAttributesCount()) 289 | marshalledAttributes, err := json.Marshal(spanLink.Attributes().AsRaw()) 290 | if err != nil { 291 | c.logger.Debug("failed to marshal attributes to JSON", err) 292 | droppedAttributesCount += uint64(spanLink.Attributes().Len()) 293 | } else { 294 | fields[common.AttributeAttributes] = string(marshalledAttributes) 295 | } 296 | if droppedAttributesCount > 0 { 297 | fields[common.AttributeDroppedAttributesCount] = droppedAttributesCount 298 | } 299 | } 300 | 301 | if err := batch.EnqueuePoint(ctx, common.MeasurementSpanLinks, tags, fields, ts, common.InfluxMetricValueTypeUntyped); err != nil { 302 | return fmt.Errorf("failed to write point for span link: %w", err) 303 | } 304 | return nil 305 | } 306 | -------------------------------------------------------------------------------- /otel2influx/writer.go: -------------------------------------------------------------------------------- 1 | package otel2influx 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/influxdata/influxdb-observability/common" 8 | ) 9 | 10 | type InfluxWriter interface { 11 | NewBatch() InfluxWriterBatch 12 | } 13 | 14 | type InfluxWriterBatch interface { 15 | EnqueuePoint(ctx context.Context, measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time, vType common.InfluxMetricValueType) error 16 | WriteBatch(ctx context.Context) error 17 | } 18 | 19 | type NoopInfluxWriter struct{} 20 | 21 | func (w *NoopInfluxWriter) NewBatch() InfluxWriterBatch { 22 | return w 23 | } 24 | 25 | func (w *NoopInfluxWriter) EnqueuePoint(ctx context.Context, measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time, vType common.InfluxMetricValueType) error { 26 | return nil 27 | } 28 | 29 | func (w *NoopInfluxWriter) WriteBatch(ctx context.Context) error { 30 | return nil 31 | } 32 | -------------------------------------------------------------------------------- /otel2influx/writer_test.go: -------------------------------------------------------------------------------- 1 | package otel2influx_test 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "go.opentelemetry.io/collector/pdata/pcommon" 8 | 9 | "github.com/influxdata/influxdb-observability/common" 10 | "github.com/influxdata/influxdb-observability/otel2influx" 11 | ) 12 | 13 | type mockPoint struct { 14 | measurement string 15 | tags map[string]string 16 | fields map[string]interface{} 17 | ts time.Time 18 | vType common.InfluxMetricValueType 19 | } 20 | 21 | var _ otel2influx.InfluxWriter = &MockInfluxWriter{} 22 | var _ otel2influx.InfluxWriterBatch = &MockInfluxWriterBatch{} 23 | 24 | type MockInfluxWriter struct { 25 | points []mockPoint 26 | } 27 | 28 | func (w *MockInfluxWriter) NewBatch() otel2influx.InfluxWriterBatch { 29 | return &MockInfluxWriterBatch{w: w} 30 | } 31 | 32 | type MockInfluxWriterBatch struct { 33 | w *MockInfluxWriter 34 | } 35 | 36 | func (b *MockInfluxWriterBatch) EnqueuePoint(ctx context.Context, measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time, vType common.InfluxMetricValueType) error { 37 | b.w.points = append(b.w.points, mockPoint{ 38 | measurement: measurement, 39 | tags: tags, 40 | fields: fields, 41 | ts: ts, 42 | vType: vType, 43 | }) 44 | return nil 45 | } 46 | 47 | func (b *MockInfluxWriterBatch) WriteBatch(ctx context.Context) error { 48 | return nil 49 | } 50 | 51 | var ( 52 | timestamp = pcommon.Timestamp(1395066363000000123) 53 | startTimestamp = pcommon.Timestamp(1395066363000000001) 54 | ) 55 | -------------------------------------------------------------------------------- /otelcol-influxdb/.gitignore: -------------------------------------------------------------------------------- 1 | /build 2 | -------------------------------------------------------------------------------- /otelcol-influxdb/Dockerfile: -------------------------------------------------------------------------------- 1 | #syntax=docker/dockerfile:1.2 2 | FROM golang:1.20-alpine3.16 AS builder 3 | RUN apk --update --no-cache add ca-certificates 4 | ENV CGO_ENABLED 0 5 | 6 | RUN \ 7 | --mount=type=cache,id=influxdb-observability-gocache,sharing=locked,target=/root/.cache/go-build \ 8 | --mount=type=cache,id=influxdb-observability-gomodcache,sharing=locked,target=/go/pkg/mod \ 9 | du -cshx /root/.cache/go-build /go/pkg/mod && \ 10 | go install go.opentelemetry.io/collector/cmd/builder@v0.87.0 && \ 11 | du -cshx /root/.cache/go-build /go/pkg/mod 12 | 13 | COPY . /project 14 | WORKDIR /project/otelcol-influxdb 15 | 16 | RUN \ 17 | --mount=type=cache,id=influxdb-observability-gocache,sharing=locked,target=/root/.cache/go-build \ 18 | --mount=type=cache,id=influxdb-observability-gomodcache,sharing=locked,target=/go/pkg/mod \ 19 | du -cshx /root/.cache/go-build /go/pkg/mod && \ 20 | builder --config build.yml && \ 21 | du -cshx /root/.cache/go-build /go/pkg/mod 22 | 23 | FROM scratch 24 | USER 10001 25 | COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 26 | COPY --from=builder --chmod=0755 /project/otelcol-influxdb/build/otelcol-influxdb / 27 | ENTRYPOINT ["/otelcol-influxdb"] 28 | -------------------------------------------------------------------------------- /otelcol-influxdb/README.md: -------------------------------------------------------------------------------- 1 | # OpenTelemetry Collector, InfluxDB Distribution 2 | 3 | **This is experimental software** 4 | 5 | This directory contains tools to build an [OpenTelemetry Collector Distribution](https://opentelemetry.io/docs/concepts/distributions/) with the InfluxDB exporter, and little else. 6 | Its purpose is to be a lightweight alternative to the [OpenTelemetry Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/) Distribution, which includes the InfluxDB plugins, as well as many others. 7 | 8 | ## Docker 9 | Docker images exist at [jacobmarble/otelcol-influxdb](https://hub.docker.com/r/jacobmarble/otelcol-influxdb). 10 | For an example configuration, see [docker-compose.yml](../demo/docker-compose.yml). 11 | 12 | ## Build 13 | 14 | ```console 15 | $ go install go.opentelemetry.io/collector/cmd/builder@latest 16 | ... 17 | $ cd otelcol-influxdb 18 | $ builder --config build.yml 19 | ... 20 | $ ./build/otelcol-influxdb 21 | ... 22 | ``` 23 | -------------------------------------------------------------------------------- /otelcol-influxdb/build.yml: -------------------------------------------------------------------------------- 1 | dist: 2 | name: otelcol-influxdb 3 | module: github.com/influxdata/influxdb-observability/otelcol-influxdb 4 | description: OpenTelemetry Collector Distribution built for InfluxDB 5 | version: 0.87.0-0.0.0-beta.0 6 | otelcol_version: 0.87.0 7 | output_path: ./build 8 | 9 | receivers: 10 | - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.87.0 11 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.87.0 12 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.87.0 13 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver v0.87.0 14 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.87.0 15 | 16 | exporters: 17 | - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.87.0 18 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter v0.87.0 19 | 20 | connectors: 21 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.87.0 22 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.87.0 23 | 24 | extensions: 25 | - gomod: go.opentelemetry.io/collector/extension/ballastextension v0.87.0 26 | - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.87.0 27 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension v0.87.0 28 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension v0.87.0 29 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.87.0 30 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.87.0 31 | 32 | processors: 33 | - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.87.0 34 | - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.87.0 35 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0 36 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.87.0 37 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.87.0 38 | 39 | replaces: 40 | - github.com/influxdata/influxdb-observability/common => ../../common 41 | - github.com/influxdata/influxdb-observability/influx2otel => ../../influx2otel 42 | - github.com/influxdata/influxdb-observability/otel2influx => ../../otel2influx 43 | -------------------------------------------------------------------------------- /run-checks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "$0")" 6 | BASEDIR=$(pwd) 7 | 8 | if ! hash go; then 9 | echo "please install go and try again" 10 | exit 1 11 | fi 12 | if ! hash staticcheck; then 13 | echo "installing staticcheck" 14 | if ! go install honnef.co/go/tools/cmd/staticcheck@2023.1.3; then 15 | echo "failed to install staticcheck" 16 | exit 1 17 | fi 18 | fi 19 | if ! hash builder; then 20 | echo "installing the opentelemetry collector builder" 21 | if ! go install go.opentelemetry.io/collector/cmd/builder@v0.87.0; then 22 | echo "failed to install the opentelemetry collector builder" 23 | exit 1 24 | fi 25 | fi 26 | 27 | for package in common influx2otel otel2influx jaeger-influxdb tests-integration; do 28 | echo checking ${package} 29 | cd "${BASEDIR}/${package}" 30 | go mod tidy 31 | if ! git diff --exit-code -- go.mod go.sum; then 32 | fail=1 33 | fi 34 | if ! go build ./...; then 35 | fail=1 36 | fi 37 | if ! go test ./...; then 38 | fail=1 39 | fi 40 | if [[ -n $(gofmt -s -l . | head -n 1) ]]; then 41 | fail=1 42 | gofmt -s -d . 43 | fi 44 | if ! go vet ./...; then 45 | fail=1 46 | fi 47 | if ! staticcheck -f stylish ./...; then 48 | fail=1 49 | fi 50 | done 51 | 52 | echo checking otelcol-influxdb 53 | cd "${BASEDIR}/otelcol-influxdb" 54 | if ! builder --config build.yml; then 55 | fail=1 56 | fi 57 | 58 | echo 59 | 60 | if [ -n "$fail" ]; then 61 | echo "at least one check failed" 62 | exit 1 63 | else 64 | echo "all checks OK" 65 | fi 66 | -------------------------------------------------------------------------------- /tests-integration/common_test.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "net" 5 | "testing" 6 | 7 | "github.com/influxdata/influxdb/v2/models" 8 | "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | "go.opentelemetry.io/collector/pdata/pmetric" 12 | ) 13 | 14 | func findOpenTCPPort(t *testing.T) int { 15 | t.Helper() 16 | l, err := net.Listen("tcp", "127.0.0.1:0") 17 | require.NoError(t, err) 18 | port := l.Addr().(*net.TCPAddr).Port 19 | require.NoError(t, l.Close()) 20 | return port 21 | } 22 | 23 | func assertLineprotocolEqual(t *testing.T, expect, got string) bool { 24 | t.Helper() 25 | 26 | expectPoints := parseLineProtocol(t, expect) 27 | gotPoints := parseLineProtocol(t, got) 28 | return assert.Equal(t, expectPoints, gotPoints) 29 | } 30 | 31 | func parseLineProtocol(t *testing.T, line string) map[string]map[string][]models.Fields { 32 | points, err := models.ParsePointsString(line) 33 | require.NoError(t, err) 34 | fieldsByTagsByMeasurement := make(map[string]map[string][]models.Fields) 35 | for _, point := range points { 36 | measurementName := string(point.Name()) 37 | fieldsByTags := fieldsByTagsByMeasurement[measurementName] 38 | if fieldsByTags == nil { 39 | fieldsByTagsByMeasurement[measurementName] = make(map[string][]models.Fields) 40 | fieldsByTags = fieldsByTagsByMeasurement[measurementName] 41 | } 42 | 43 | tags := point.Tags().String() 44 | fields, err := point.Fields() 45 | require.NoError(t, err) 46 | fieldsByTags[tags] = append(fieldsByTags[tags], fields) 47 | } 48 | return fieldsByTagsByMeasurement 49 | } 50 | 51 | func assertMetricsEqual(t *testing.T, expect, got pmetric.Metrics) { 52 | t.Helper() 53 | 54 | assert.NoError(t, 55 | pmetrictest.CompareMetrics(expect, got, 56 | pmetrictest.IgnoreMetricDataPointsOrder(), 57 | pmetrictest.IgnoreMetricsOrder(), 58 | pmetrictest.IgnoreResourceMetricsOrder(), 59 | pmetrictest.IgnoreScopeMetricsOrder(), 60 | pmetrictest.IgnoreSummaryDataPointValueAtQuantileSliceOrder(), 61 | ), 62 | ) 63 | } 64 | -------------------------------------------------------------------------------- /tests-integration/go.mod: -------------------------------------------------------------------------------- 1 | module tests 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/influxdata/influxdb/v2 v2.6.1 7 | github.com/influxdata/line-protocol/v2 v2.2.1 8 | github.com/influxdata/telegraf v1.27.2 9 | github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter v0.87.0 10 | github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.87.0 11 | github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0 12 | github.com/open-telemetry/opentelemetry-collector-contrib/receiver/influxdbreceiver v0.87.0 13 | github.com/stretchr/testify v1.8.4 14 | go.opentelemetry.io/collector/component v0.87.0 15 | go.opentelemetry.io/collector/confmap v0.87.0 16 | go.opentelemetry.io/collector/consumer v0.87.0 17 | go.opentelemetry.io/collector/exporter v0.87.0 18 | go.opentelemetry.io/collector/extension v0.87.0 19 | go.opentelemetry.io/collector/otelcol v0.87.0 20 | go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 21 | go.opentelemetry.io/collector/processor v0.87.0 22 | go.opentelemetry.io/collector/receiver v0.87.0 23 | go.uber.org/zap v1.26.0 24 | google.golang.org/grpc v1.58.3 25 | ) 26 | 27 | require ( 28 | contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect 29 | github.com/alecthomas/participle v0.4.1 // indirect 30 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect 31 | github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect 32 | github.com/awnumar/memcall v0.1.2 // indirect 33 | github.com/awnumar/memguard v0.22.3 // indirect 34 | github.com/benbjohnson/clock v1.3.3 // indirect 35 | github.com/beorn7/perks v1.0.1 // indirect 36 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect 37 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 38 | github.com/compose-spec/compose-go v1.16.0 // indirect 39 | github.com/coreos/go-semver v0.3.1 // indirect 40 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 41 | github.com/fatih/color v1.15.0 // indirect 42 | github.com/felixge/httpsnoop v1.0.3 // indirect 43 | github.com/fsnotify/fsnotify v1.6.0 // indirect 44 | github.com/go-kit/log v0.2.1 // indirect 45 | github.com/go-logfmt/logfmt v0.6.0 // indirect 46 | github.com/go-logr/logr v1.2.4 // indirect 47 | github.com/go-logr/stdr v1.2.2 // indirect 48 | github.com/go-ole/go-ole v1.2.6 // indirect 49 | github.com/gobwas/glob v0.2.3 // indirect 50 | github.com/gogo/protobuf v1.3.2 // indirect 51 | github.com/golang-jwt/jwt/v5 v5.0.0 // indirect 52 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 53 | github.com/golang/protobuf v1.5.3 // indirect 54 | github.com/golang/snappy v0.0.4 // indirect 55 | github.com/google/cel-go v0.14.1-0.20230424164844-d39523c445fc // indirect 56 | github.com/google/uuid v1.3.1 // indirect 57 | github.com/gosnmp/gosnmp v1.35.1-0.20230602062452-f30602b8dad6 // indirect 58 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect 59 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 60 | github.com/influxdata/influxdb-observability/common v0.5.8 // indirect 61 | github.com/influxdata/influxdb-observability/influx2otel v0.5.8 // indirect 62 | github.com/influxdata/influxdb-observability/otel2influx v0.5.6 // indirect 63 | github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 // indirect 64 | github.com/json-iterator/go v1.1.12 // indirect 65 | github.com/klauspost/compress v1.17.0 // indirect 66 | github.com/klauspost/pgzip v1.2.6 // indirect 67 | github.com/knadh/koanf v1.5.0 // indirect 68 | github.com/knadh/koanf/v2 v2.0.1 // indirect 69 | github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect 70 | github.com/mattn/go-colorable v0.1.13 // indirect 71 | github.com/mattn/go-isatty v0.0.19 // indirect 72 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 73 | github.com/mitchellh/copystructure v1.2.0 // indirect 74 | github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect 75 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 76 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 77 | github.com/modern-go/reflect2 v1.0.2 // indirect 78 | github.com/naoina/go-stringutil v0.1.0 // indirect 79 | github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0 // indirect 80 | github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0 // indirect 81 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 82 | github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect 83 | github.com/prometheus/client_golang v1.17.0 // indirect 84 | github.com/prometheus/client_model v0.5.0 // indirect 85 | github.com/prometheus/common v0.44.0 // indirect 86 | github.com/prometheus/procfs v0.11.1 // indirect 87 | github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 // indirect 88 | github.com/prometheus/statsd_exporter v0.22.7 // indirect 89 | github.com/rs/cors v1.10.1 // indirect 90 | github.com/shirou/gopsutil/v3 v3.23.9 // indirect 91 | github.com/shoenig/go-m1cpu v0.1.6 // indirect 92 | github.com/sirupsen/logrus v1.9.3 // indirect 93 | github.com/sleepinggenius2/gosmi v0.4.4 // indirect 94 | github.com/spf13/cobra v1.7.0 // indirect 95 | github.com/spf13/pflag v1.0.5 // indirect 96 | github.com/stoewer/go-strcase v1.2.0 // indirect 97 | github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07 // indirect 98 | github.com/tklauser/go-sysconf v0.3.12 // indirect 99 | github.com/tklauser/numcpus v0.6.1 // indirect 100 | github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect 101 | github.com/yusufpapurcu/wmi v1.2.3 // indirect 102 | go.opencensus.io v0.24.0 // indirect 103 | go.opentelemetry.io/collector v0.87.0 // indirect 104 | go.opentelemetry.io/collector/config/configauth v0.87.0 // indirect 105 | go.opentelemetry.io/collector/config/configcompression v0.87.0 // indirect 106 | go.opentelemetry.io/collector/config/confighttp v0.87.0 // indirect 107 | go.opentelemetry.io/collector/config/configopaque v0.87.0 // indirect 108 | go.opentelemetry.io/collector/config/configtelemetry v0.87.0 // indirect 109 | go.opentelemetry.io/collector/config/configtls v0.87.0 // indirect 110 | go.opentelemetry.io/collector/config/internal v0.87.0 // indirect 111 | go.opentelemetry.io/collector/connector v0.87.0 // indirect 112 | go.opentelemetry.io/collector/extension/auth v0.87.0 // indirect 113 | go.opentelemetry.io/collector/featuregate v1.0.0-rcv0016 // indirect 114 | go.opentelemetry.io/collector/semconv v0.87.0 // indirect 115 | go.opentelemetry.io/collector/service v0.87.0 // indirect 116 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect 117 | go.opentelemetry.io/contrib/propagators/b3 v1.19.0 // indirect 118 | go.opentelemetry.io/otel v1.19.0 // indirect 119 | go.opentelemetry.io/otel/bridge/opencensus v0.42.0 // indirect 120 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect 121 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect 122 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect 123 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect 124 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect 125 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect 126 | go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect 127 | go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0 // indirect 128 | go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 // indirect 129 | go.opentelemetry.io/otel/metric v1.19.0 // indirect 130 | go.opentelemetry.io/otel/sdk v1.19.0 // indirect 131 | go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect 132 | go.opentelemetry.io/otel/trace v1.19.0 // indirect 133 | go.opentelemetry.io/proto/otlp v1.0.0 // indirect 134 | go.uber.org/multierr v1.11.0 // indirect 135 | golang.org/x/crypto v0.14.0 // indirect 136 | golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect 137 | golang.org/x/net v0.17.0 // indirect 138 | golang.org/x/sys v0.13.0 // indirect 139 | golang.org/x/text v0.13.0 // indirect 140 | gonum.org/v1/gonum v0.14.0 // indirect 141 | google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect 142 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect 143 | google.golang.org/protobuf v1.31.0 // indirect 144 | gopkg.in/yaml.v2 v2.4.0 // indirect 145 | gopkg.in/yaml.v3 v3.0.1 // indirect 146 | ) 147 | 148 | replace ( 149 | github.com/influxdata/influxdb-observability/common => ../common 150 | github.com/influxdata/influxdb-observability/influx2otel => ../influx2otel 151 | github.com/influxdata/influxdb-observability/otel2influx => ../otel2influx 152 | github.com/influxdata/telegraf => github.com/influxdata/telegraf v0.0.0-20230830233451-76d12e97cabc 153 | github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter => github.com/jacobmarble/opentelemetry-collector-contrib/exporter/influxdbexporter v0.0.0-20230831000419-93c5219f48bd 154 | ) 155 | -------------------------------------------------------------------------------- /tests-integration/helper_telegraf_test.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "log" 8 | "net" 9 | "net/http" 10 | "sort" 11 | "strings" 12 | "testing" 13 | "time" 14 | 15 | "go.opentelemetry.io/collector/pdata/pmetric" 16 | "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" 17 | "google.golang.org/grpc/credentials/insecure" 18 | 19 | "github.com/influxdata/line-protocol/v2/lineprotocol" 20 | "github.com/influxdata/telegraf" 21 | "github.com/influxdata/telegraf/agent" 22 | "github.com/influxdata/telegraf/config" 23 | telegrafmetric "github.com/influxdata/telegraf/metric" 24 | "github.com/influxdata/telegraf/models" 25 | otelinput "github.com/influxdata/telegraf/plugins/inputs/opentelemetry" 26 | "github.com/influxdata/telegraf/plugins/outputs/health" 27 | oteloutput "github.com/influxdata/telegraf/plugins/outputs/opentelemetry" 28 | "github.com/stretchr/testify/assert" 29 | "github.com/stretchr/testify/require" 30 | "go.uber.org/zap/zapcore" 31 | "go.uber.org/zap/zaptest" 32 | "google.golang.org/grpc" 33 | ) 34 | 35 | func assertOtel2InfluxTelegraf(t *testing.T, lp string, telegrafValueType telegraf.ValueType, expect pmetric.Metrics) { 36 | mockInputPlugin, mockOtelService, stopTelegraf := setupTelegrafOpenTelemetryOutput(t) 37 | t.Cleanup(stopTelegraf) 38 | 39 | lpdec := lineprotocol.NewDecoder(strings.NewReader(lp)) 40 | for lpdec.Next() { 41 | name, err := lpdec.Measurement() 42 | require.NoError(t, err) 43 | tags := make(map[string]string) 44 | for k, v, _ := lpdec.NextTag(); k != nil; k, v, _ = lpdec.NextTag() { 45 | tags[string(k)] = string(v) 46 | } 47 | fields := make(map[string]interface{}) 48 | for k, v, _ := lpdec.NextField(); k != nil; k, v, _ = lpdec.NextField() { 49 | fields[string(k)] = v.Interface() 50 | } 51 | ts, err := lpdec.Time(lineprotocol.Nanosecond, time.Now()) 52 | require.NoError(t, err) 53 | 54 | m := telegrafmetric.New(string(name), tags, fields, ts, telegrafValueType) 55 | mockInputPlugin.accumulator.AddMetric(m) 56 | } 57 | require.NoError(t, lpdec.Err()) 58 | 59 | stopTelegraf() 60 | 61 | var got pmetric.Metrics 62 | select { 63 | case got = <-mockOtelService.metricss: 64 | case <-time.NewTimer(time.Second).C: 65 | t.Log("test timed out") 66 | t.Fail() 67 | return 68 | } 69 | 70 | assertMetricsEqual(t, expect, got) 71 | } 72 | 73 | func setupTelegrafOpenTelemetryInput(t *testing.T) (*grpc.ClientConn, *mockOutputPlugin, context.CancelFunc) { 74 | t.Helper() 75 | 76 | telegrafConfig := config.NewConfig() 77 | 78 | otelInputAddress := fmt.Sprintf("127.0.0.1:%d", findOpenTCPPort(t)) 79 | inputPlugin := &otelinput.OpenTelemetry{ 80 | ServiceAddress: otelInputAddress, 81 | Timeout: config.Duration(time.Second), 82 | MetricsSchema: "prometheus-v1", 83 | Log: zaptest.NewLogger(t, zaptest.Level(zapcore.InfoLevel)).Sugar(), 84 | } 85 | otelInputConfig := &models.InputConfig{ 86 | Name: "opentelemetry", 87 | } 88 | telegrafConfig.Inputs = append(telegrafConfig.Inputs, models.NewRunningInput(inputPlugin, otelInputConfig)) 89 | 90 | mockOutputPlugin := newMockOutputPlugin() 91 | mockOutputConfig := &models.OutputConfig{ 92 | Name: "mock", 93 | } 94 | healthOutputAddress := fmt.Sprintf("127.0.0.1:%d", findOpenTCPPort(t)) 95 | healthOutputPlugin := health.NewHealth() 96 | healthOutputPlugin.ServiceAddress = "http://" + healthOutputAddress 97 | healthOutputConfig := &models.OutputConfig{ 98 | Name: "health", 99 | } 100 | telegrafConfig.Outputs = append(telegrafConfig.Outputs, 101 | models.NewRunningOutput(mockOutputPlugin, mockOutputConfig, 0, 0), 102 | models.NewRunningOutput(healthOutputPlugin, healthOutputConfig, 0, 0)) 103 | 104 | ag := agent.NewAgent(telegrafConfig) 105 | ctx, stopAgent := context.WithCancel(context.Background()) 106 | 107 | agentDone := make(chan struct{}) 108 | go func(ctx context.Context) { 109 | err := ag.Run(ctx) 110 | assert.NoError(t, err) 111 | close(agentDone) 112 | }(ctx) 113 | t.Cleanup(stopAgent) 114 | 115 | go func() { 116 | select { 117 | case <-agentDone: 118 | return 119 | case <-time.NewTimer(time.Second).C: 120 | t.Log("test timed out") 121 | t.Fail() 122 | stopAgent() 123 | } 124 | }() 125 | 126 | for { // Wait for health check to be green 127 | response, _ := http.Get(fmt.Sprintf("http://%s", healthOutputAddress)) 128 | if response != nil && response.StatusCode/100 == 2 { 129 | break 130 | } 131 | 132 | time.Sleep(10 * time.Millisecond) 133 | select { 134 | case <-agentDone: 135 | return nil, nil, nil 136 | default: 137 | } 138 | } 139 | 140 | clientConn, err := grpc.Dial(otelInputAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) 141 | require.NoError(t, err) 142 | 143 | return clientConn, mockOutputPlugin, stopAgent 144 | } 145 | 146 | var _ telegraf.Output = (*mockOutputPlugin)(nil) 147 | 148 | type mockOutputPlugin struct { 149 | metrics chan []telegraf.Metric 150 | } 151 | 152 | func newMockOutputPlugin() *mockOutputPlugin { 153 | return &mockOutputPlugin{ 154 | metrics: make(chan []telegraf.Metric, 10), 155 | } 156 | } 157 | 158 | func (m *mockOutputPlugin) SampleConfig() string { 159 | return "" 160 | } 161 | 162 | func (m *mockOutputPlugin) Description() string { 163 | return "" 164 | } 165 | 166 | func (m *mockOutputPlugin) Connect() error { 167 | return nil 168 | } 169 | 170 | func (m *mockOutputPlugin) Close() error { 171 | return nil 172 | } 173 | 174 | func (m *mockOutputPlugin) Write(metrics []telegraf.Metric) error { 175 | m.metrics <- metrics 176 | return nil 177 | } 178 | 179 | func (m *mockOutputPlugin) lineprotocol(t *testing.T) string { 180 | t.Helper() 181 | 182 | encoder := new(lineprotocol.Encoder) 183 | 184 | select { 185 | case metrics := <-m.metrics: 186 | for _, metric := range metrics { 187 | encoder.StartLine(metric.Name()) 188 | 189 | tagNames := make([]string, 0, len(metric.Tags())) 190 | for k := range metric.Tags() { 191 | tagNames = append(tagNames, k) 192 | } 193 | sort.Strings(tagNames) 194 | for _, k := range tagNames { 195 | encoder.AddTag(k, metric.Tags()[k]) 196 | } 197 | 198 | fieldNames := make([]string, 0, len(metric.Fields())) 199 | for k := range metric.Fields() { 200 | fieldNames = append(fieldNames, k) 201 | } 202 | sort.Strings(fieldNames) 203 | for _, k := range fieldNames { 204 | encoder.AddField(k, lineprotocol.MustNewValue(metric.Fields()[k])) 205 | } 206 | 207 | encoder.EndLine(metric.Time()) 208 | } 209 | case <-time.NewTimer(time.Second).C: 210 | t.Log("test timed out") 211 | t.Fail() 212 | return "" 213 | } 214 | 215 | require.NoError(t, encoder.Err()) 216 | return string(encoder.Bytes()) 217 | } 218 | 219 | func setupTelegrafOpenTelemetryOutput(t *testing.T) (*mockInputPlugin, *mockOtelService, context.CancelFunc) { 220 | t.Helper() 221 | 222 | logWriterToRestore := log.Writer() 223 | log.SetOutput(io.Discard) 224 | t.Cleanup(func() { 225 | log.SetOutput(logWriterToRestore) 226 | }) 227 | telegrafConfig := config.NewConfig() 228 | // telegrafConfig.Agent.Quiet = false 229 | // telegrafConfig.Agent.Debug = true 230 | // telegrafConfig.Agent.LogTarget = "file" 231 | // telegrafConfig.Agent.Logfile = "/dev/null" 232 | 233 | mockInputPlugin := &mockInputPlugin{ 234 | hasStarted: make(chan struct{}), 235 | } 236 | mockInputConfig := &models.InputConfig{ 237 | Name: "mock", 238 | } 239 | telegrafConfig.Inputs = append(telegrafConfig.Inputs, models.NewRunningInput(mockInputPlugin, mockInputConfig)) 240 | 241 | otelOutputAddress := fmt.Sprintf("127.0.0.1:%d", findOpenTCPPort(t)) 242 | otelOutputPlugin := &oteloutput.OpenTelemetry{ 243 | ServiceAddress: otelOutputAddress, 244 | } 245 | otelOutputConfig := &models.OutputConfig{ 246 | Name: "opentelemetry", 247 | } 248 | healthOutputAddress := fmt.Sprintf("127.0.0.1:%d", findOpenTCPPort(t)) 249 | healthOutputPlugin := health.NewHealth() 250 | healthOutputPlugin.ServiceAddress = "http://" + healthOutputAddress 251 | healthOutputConfig := &models.OutputConfig{ 252 | Name: "health", 253 | } 254 | telegrafConfig.Outputs = append(telegrafConfig.Outputs, 255 | models.NewRunningOutput(otelOutputPlugin, otelOutputConfig, 0, 0), 256 | models.NewRunningOutput(healthOutputPlugin, healthOutputConfig, 0, 0)) 257 | 258 | ag := agent.NewAgent(telegrafConfig) 259 | ctx, stopAgent := context.WithCancel(context.Background()) 260 | t.Cleanup(stopAgent) 261 | 262 | mockOtelServiceListener, err := net.Listen("tcp", otelOutputAddress) 263 | require.NoError(t, err) 264 | mockOtelService := newMockOtelService() 265 | mockOtelServiceGrpcServer := grpc.NewServer() 266 | pmetricotlp.RegisterGRPCServer(mockOtelServiceGrpcServer, mockOtelService) 267 | 268 | go func() { 269 | err := mockOtelServiceGrpcServer.Serve(mockOtelServiceListener) 270 | assert.NoError(t, err) 271 | }() 272 | t.Cleanup(mockOtelServiceGrpcServer.Stop) 273 | 274 | agentDone := make(chan struct{}) 275 | go func(ctx context.Context) { 276 | err := ag.Run(ctx) 277 | assert.NoError(t, err) 278 | close(agentDone) 279 | }(ctx) 280 | 281 | go func() { 282 | select { 283 | case <-agentDone: 284 | return 285 | case <-time.NewTimer(time.Second).C: 286 | t.Log("test timed out") 287 | t.Fail() 288 | stopAgent() 289 | } 290 | }() 291 | 292 | for { // Wait for health check to be green 293 | response, _ := http.Get(fmt.Sprintf("http://%s", healthOutputAddress)) 294 | if response != nil && response.StatusCode/100 == 2 { 295 | break 296 | } 297 | 298 | time.Sleep(10 * time.Millisecond) 299 | select { 300 | case <-agentDone: 301 | return nil, nil, nil 302 | default: 303 | } 304 | } 305 | 306 | // Wait for input plugin to be started (to prevent race condition) 307 | select { 308 | case <-mockInputPlugin.hasStarted: 309 | case <-time.After(time.Second): 310 | t.Fatal("mock input plugin not started") 311 | } 312 | 313 | return mockInputPlugin, mockOtelService, stopAgent 314 | } 315 | 316 | var _ telegraf.ServiceInput = (*mockInputPlugin)(nil) 317 | 318 | type mockInputPlugin struct { 319 | hasStarted chan struct{} 320 | accumulator telegraf.Accumulator 321 | } 322 | 323 | func (m *mockInputPlugin) Start(accumulator telegraf.Accumulator) error { 324 | m.accumulator = accumulator 325 | close(m.hasStarted) 326 | return nil 327 | } 328 | 329 | func (m *mockInputPlugin) Stop() { 330 | } 331 | 332 | func (m *mockInputPlugin) SampleConfig() string { 333 | return "" 334 | } 335 | 336 | func (m *mockInputPlugin) Description() string { 337 | return "" 338 | } 339 | 340 | func (m *mockInputPlugin) Gather(accumulator telegraf.Accumulator) error { 341 | return nil 342 | } 343 | 344 | var _ pmetricotlp.GRPCServer = (*mockOtelService)(nil) 345 | 346 | type mockOtelService struct { 347 | pmetricotlp.GRPCClient 348 | metricss chan pmetric.Metrics 349 | } 350 | 351 | func newMockOtelService() *mockOtelService { 352 | return &mockOtelService{ 353 | metricss: make(chan pmetric.Metrics), 354 | } 355 | } 356 | 357 | func (m *mockOtelService) Export(ctx context.Context, request pmetricotlp.ExportRequest) (pmetricotlp.ExportResponse, error) { 358 | clone := pmetric.NewMetrics() 359 | request.Metrics().CopyTo(clone) 360 | m.metricss <- clone 361 | return pmetricotlp.NewExportResponse(), nil 362 | } 363 | -------------------------------------------------------------------------------- /tests-integration/influx2otel_test.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "strings" 7 | "testing" 8 | "time" 9 | 10 | "go.opentelemetry.io/collector/pdata/pcommon" 11 | "go.opentelemetry.io/collector/pdata/pmetric" 12 | 13 | "github.com/influxdata/telegraf" 14 | "github.com/stretchr/testify/assert" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | func TestInflux2Otel(t *testing.T) { 19 | for i, mt := range metricTests { 20 | t.Run(fmt.Sprint(i), func(t *testing.T) { 21 | t.Run("otelcol", func(t *testing.T) { 22 | otelcolReceiverAddress, mockExporterFactory := setupOtelcolInfluxDBReceiver(t) 23 | 24 | response, err := http.Post(fmt.Sprintf("http://%s/write", otelcolReceiverAddress), "", strings.NewReader(mt.lp)) 25 | require.NoError(t, err) 26 | require.Equal(t, 2, response.StatusCode/100) 27 | 28 | got := mockExporterFactory.consumedMetrics 29 | expect := mt.otel 30 | assertMetricsEqual(t, expect, got) 31 | }) 32 | 33 | t.Run("telegraf", func(t *testing.T) { 34 | assertOtel2InfluxTelegraf(t, mt.lp, telegraf.Untyped, mt.otel) 35 | }) 36 | }) 37 | } 38 | } 39 | 40 | func TestInflux2Otel_nowtime(t *testing.T) { 41 | t.Run("otelcol", func(t *testing.T) { 42 | otelcolReceiverAddress, mockExporterFactory := setupOtelcolInfluxDBReceiver(t) 43 | 44 | lp := ` 45 | cpu_temp,foo=bar gauge=87.332 46 | ` 47 | 48 | response, err := http.Post(fmt.Sprintf("http://%s/write", otelcolReceiverAddress), "", strings.NewReader(lp)) 49 | require.NoError(t, err) 50 | assert.Equal(t, 2, response.StatusCode/100) 51 | 52 | gotTime := mockExporterFactory.consumedMetrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Timestamp().AsTime() 53 | assert.WithinDuration(t, time.Now(), gotTime, time.Second) 54 | }) 55 | } 56 | 57 | func TestInflux2Otel_unknownSchema(t *testing.T) { 58 | t.Run("telegraf", func(t *testing.T) { 59 | lp := ` 60 | cpu,cpu=cpu4,host=777348dc6343 usage_user=0.10090817356207936,usage_system=0.3027245206862381,usage_iowait=0,invalid="ignored" 1395066363000000123 61 | ` 62 | 63 | expect := pmetric.NewMetrics() 64 | metrics := expect.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() 65 | metric := metrics.AppendEmpty() 66 | metric.SetName("cpu_usage_iowait") 67 | metric.SetEmptyGauge() 68 | dp := metric.Gauge().DataPoints().AppendEmpty() 69 | dp.Attributes().PutStr("cpu", "cpu4") 70 | dp.Attributes().PutStr("host", "777348dc6343") 71 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 72 | dp.SetDoubleValue(0.0) 73 | metric = metrics.AppendEmpty() 74 | metric.SetName("cpu_usage_system") 75 | metric.SetEmptyGauge() 76 | dp = metric.Gauge().DataPoints().AppendEmpty() 77 | dp.Attributes().PutStr("cpu", "cpu4") 78 | dp.Attributes().PutStr("host", "777348dc6343") 79 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 80 | dp.SetDoubleValue(0.3027245206862381) 81 | metric = metrics.AppendEmpty() 82 | metric.SetName("cpu_usage_user") 83 | metric.SetEmptyGauge() 84 | dp = metric.Gauge().DataPoints().AppendEmpty() 85 | dp.Attributes().PutStr("cpu", "cpu4") 86 | dp.Attributes().PutStr("host", "777348dc6343") 87 | dp.SetTimestamp(pcommon.Timestamp(1395066363000000123)) 88 | dp.SetDoubleValue(0.10090817356207936) 89 | 90 | assertOtel2InfluxTelegraf(t, lp, telegraf.Untyped, expect) 91 | }) 92 | } 93 | 94 | func TestInflux2Otel_gaugeNonPrometheus(t *testing.T) { 95 | t.Run("telegraf", func(t *testing.T) { 96 | lp := ` 97 | swap,host=8eaaf6b73054 used_percent=1.5,total=1073737728i 1626302080000000000 98 | ` 99 | expect := pmetric.NewMetrics() 100 | metrics := expect.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() 101 | metric := metrics.AppendEmpty() 102 | metric.SetName("swap_used_percent") 103 | metric.SetEmptyGauge() 104 | dp := metric.Gauge().DataPoints().AppendEmpty() 105 | dp.Attributes().PutStr("host", "8eaaf6b73054") 106 | dp.SetTimestamp(pcommon.Timestamp(1626302080000000000)) 107 | dp.SetDoubleValue(1.5) 108 | metric = metrics.AppendEmpty() 109 | metric.SetName("swap_total") 110 | metric.SetEmptyGauge() 111 | dp = metric.Gauge().DataPoints().AppendEmpty() 112 | dp.Attributes().PutStr("host", "8eaaf6b73054") 113 | dp.SetTimestamp(pcommon.Timestamp(1626302080000000000)) 114 | dp.SetIntValue(1073737728) 115 | 116 | assertOtel2InfluxTelegraf(t, lp, telegraf.Gauge, expect) 117 | }) 118 | } 119 | 120 | func TestInflux2Otel_counterNonPrometheus(t *testing.T) { 121 | t.Run("telegraf", func(t *testing.T) { 122 | lp := ` 123 | swap,host=8eaaf6b73054 in=32768i,out=12021760i 1626302080000000000 124 | ` 125 | expect := pmetric.NewMetrics() 126 | metrics := expect.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() 127 | metric := metrics.AppendEmpty() 128 | metric.SetName("swap_in") 129 | metric.SetEmptySum() 130 | metric.Sum().SetIsMonotonic(true) 131 | metric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) 132 | dp := metric.Sum().DataPoints().AppendEmpty() 133 | dp.Attributes().PutStr("host", "8eaaf6b73054") 134 | dp.SetTimestamp(pcommon.Timestamp(1626302080000000000)) 135 | dp.SetIntValue(32768) 136 | metric = metrics.AppendEmpty() 137 | metric.SetName("swap_out") 138 | metric.SetEmptySum() 139 | metric.Sum().SetIsMonotonic(true) 140 | metric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) 141 | dp = metric.Sum().DataPoints().AppendEmpty() 142 | dp.Attributes().PutStr("host", "8eaaf6b73054") 143 | dp.SetTimestamp(pcommon.Timestamp(1626302080000000000)) 144 | dp.SetIntValue(12021760) 145 | 146 | assertOtel2InfluxTelegraf(t, lp, telegraf.Counter, expect) 147 | }) 148 | } 149 | -------------------------------------------------------------------------------- /tests-integration/otel2influx_test.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "testing" 8 | 9 | "go.opentelemetry.io/collector/pdata/plog" 10 | "go.opentelemetry.io/collector/pdata/plog/plogotlp" 11 | "go.opentelemetry.io/collector/pdata/pmetric" 12 | "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" 13 | "go.opentelemetry.io/collector/pdata/ptrace" 14 | "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" 15 | 16 | "github.com/stretchr/testify/assert" 17 | "github.com/stretchr/testify/require" 18 | ) 19 | 20 | func TestOtel2Influx(t *testing.T) { 21 | t.Run("metrics", func(t *testing.T) { 22 | for i, mt := range metricTests { 23 | t.Run(fmt.Sprint(i), func(t *testing.T) { 24 | t.Run("otelcol", func(t *testing.T) { 25 | mockDestination, mockReceiverFactory, closeOtelcol, err := setupOtelcolInfluxDBExporter(t) 26 | require.NoError(t, err) 27 | t.Cleanup(mockDestination.Close) 28 | 29 | clone := pmetric.NewMetrics() 30 | mt.otel.CopyTo(clone) 31 | err = mockReceiverFactory.nextMetricsConsumer.ConsumeMetrics(context.Background(), clone) 32 | require.NoError(t, err) 33 | 34 | got := mockReceiverFactory.lineprotocol(t) 35 | 36 | assertLineprotocolEqual(t, mt.lp, got) 37 | closeOtelcol(t) 38 | }) 39 | 40 | t.Run("telegraf", func(t *testing.T) { 41 | clientConn, mockOutputPlugin, stopTelegraf := setupTelegrafOpenTelemetryInput(t) 42 | metricsClient := pmetricotlp.NewGRPCClient(clientConn) 43 | 44 | clone := pmetric.NewMetrics() 45 | mt.otel.CopyTo(clone) 46 | request := pmetricotlp.NewExportRequestFromMetrics(clone) 47 | _, err := metricsClient.Export(context.Background(), request) 48 | if err != nil { 49 | // TODO not sure why the service returns this error, but the data arrives as required by the test 50 | // rpc error: code = Internal desc = grpc: error while marshaling: proto: Marshal called with nil 51 | if !strings.Contains(err.Error(), "proto: Marshal called with nil") { 52 | assert.NoError(t, err) 53 | } 54 | } 55 | 56 | stopTelegraf() // flush telegraf buffers 57 | got := mockOutputPlugin.lineprotocol(t) 58 | 59 | assertLineprotocolEqual(t, mt.lp, got) 60 | }) 61 | }) 62 | } 63 | }) 64 | 65 | t.Run("traces", func(t *testing.T) { 66 | for i, tt := range traceTests { 67 | t.Run(fmt.Sprint(i), func(t *testing.T) { 68 | t.Run("otelcol", func(t *testing.T) { 69 | mockDestination, mockReceiverFactory, closeOtelcol, err := setupOtelcolInfluxDBExporter(t) 70 | require.NoError(t, err) 71 | t.Cleanup(mockDestination.Close) 72 | 73 | clone := ptrace.NewTraces() 74 | tt.otel.CopyTo(clone) 75 | err = mockReceiverFactory.nextTracesConsumer.ConsumeTraces(context.Background(), clone) 76 | require.NoError(t, err) 77 | 78 | got := mockReceiverFactory.lineprotocol(t) 79 | 80 | assertLineprotocolEqual(t, tt.lp, got) 81 | closeOtelcol(t) 82 | }) 83 | 84 | t.Run("telegraf", func(t *testing.T) { 85 | clientConn, mockOutputPlugin, stopTelegraf := setupTelegrafOpenTelemetryInput(t) 86 | tracesClient := ptraceotlp.NewGRPCClient(clientConn) 87 | 88 | clone := ptrace.NewTraces() 89 | tt.otel.CopyTo(clone) 90 | request := ptraceotlp.NewExportRequestFromTraces(clone) 91 | _, err := tracesClient.Export(context.Background(), request) 92 | require.NoError(t, err) 93 | 94 | stopTelegraf() // flush telegraf buffers 95 | got := mockOutputPlugin.lineprotocol(t) 96 | 97 | assertLineprotocolEqual(t, tt.lp, got) 98 | }) 99 | }) 100 | } 101 | }) 102 | 103 | t.Run("logs", func(t *testing.T) { 104 | for i, lt := range logTests { 105 | t.Run(fmt.Sprint(i), func(t *testing.T) { 106 | t.Run("otelcol", func(t *testing.T) { 107 | mockDestination, mockReceiverFactory, closeOtelcol, err := setupOtelcolInfluxDBExporter(t) 108 | require.NoError(t, err) 109 | t.Cleanup(mockDestination.Close) 110 | 111 | clone := plog.NewLogs() 112 | lt.otel.CopyTo(clone) 113 | err = mockReceiverFactory.nextLogsConsumer.ConsumeLogs(context.Background(), clone) 114 | require.NoError(t, err) 115 | 116 | got := mockReceiverFactory.lineprotocol(t) 117 | 118 | assertLineprotocolEqual(t, lt.lp, got) 119 | closeOtelcol(t) 120 | }) 121 | 122 | t.Run("telegraf", func(t *testing.T) { 123 | clientConn, mockOutputPlugin, stopTelegraf := setupTelegrafOpenTelemetryInput(t) 124 | logsClient := plogotlp.NewGRPCClient(clientConn) 125 | 126 | clone := plog.NewLogs() 127 | lt.otel.CopyTo(clone) 128 | request := plogotlp.NewExportRequestFromLogs(clone) 129 | _, err := logsClient.Export(context.Background(), request) 130 | require.NoError(t, err) 131 | 132 | stopTelegraf() // flush telegraf buffers 133 | got := mockOutputPlugin.lineprotocol(t) 134 | 135 | assertLineprotocolEqual(t, lt.lp, got) 136 | }) 137 | }) 138 | } 139 | }) 140 | } 141 | -------------------------------------------------------------------------------- /tests-integration/test_fodder.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "go.opentelemetry.io/collector/pdata/pcommon" 5 | "go.opentelemetry.io/collector/pdata/plog" 6 | "go.opentelemetry.io/collector/pdata/pmetric" 7 | "go.opentelemetry.io/collector/pdata/ptrace" 8 | ) 9 | 10 | type metricTest struct { 11 | otel pmetric.Metrics 12 | lp string 13 | } 14 | 15 | type traceTest struct { 16 | otel ptrace.Traces 17 | lp string 18 | } 19 | 20 | type logTest struct { 21 | otel plog.Logs 22 | lp string 23 | } 24 | 25 | var ( 26 | metricTests []metricTest 27 | traceTests []traceTest 28 | logTests []logTest 29 | ) 30 | 31 | func init() { 32 | { 33 | metrics := pmetric.NewMetrics() 34 | rm := metrics.ResourceMetrics().AppendEmpty() 35 | isMetrics := rm.ScopeMetrics().AppendEmpty() 36 | m := isMetrics.Metrics().AppendEmpty() 37 | m.SetName("cpu_temp") 38 | m.SetEmptyGauge() 39 | dp := m.Gauge().DataPoints().AppendEmpty() 40 | dp.Attributes().PutStr("foo", "bar") 41 | dp.SetTimestamp(pcommon.Timestamp(1622848686000000000)) 42 | dp.SetDoubleValue(87.332) 43 | dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) 44 | m = isMetrics.Metrics().AppendEmpty() 45 | m.SetName("http_request_duration_seconds") 46 | m.SetEmptyHistogram() 47 | m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) 48 | dp2 := m.Histogram().DataPoints().AppendEmpty() 49 | dp2.Attributes().PutStr("region", "eu") 50 | dp2.SetTimestamp(pcommon.Timestamp(1622848686000000000)) 51 | dp2.SetCount(144320) 52 | dp2.SetSum(53423) 53 | dp2.ExplicitBounds().FromRaw([]float64{0.05, 0.1, 0.2, 0.5, 1}) 54 | dp2.BucketCounts().FromRaw([]uint64{24054, 9390, 66948, 28997, 4599, 10332}) 55 | dp2.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(false)) 56 | m = isMetrics.Metrics().AppendEmpty() 57 | m.SetName("http_requests_total") 58 | m.SetEmptySum() 59 | m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) 60 | m.Sum().SetIsMonotonic(true) 61 | dp = m.Sum().DataPoints().AppendEmpty() 62 | dp.Attributes().PutStr("method", "post") 63 | dp.Attributes().PutStr("code", "200") 64 | dp.SetTimestamp(pcommon.Timestamp(1622848686000000000)) 65 | dp.SetDoubleValue(1027) 66 | dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) 67 | dp = m.Sum().DataPoints().AppendEmpty() 68 | dp.Attributes().PutStr("method", "post") 69 | dp.Attributes().PutStr("code", "400") 70 | dp.SetTimestamp(pcommon.Timestamp(1622848686000000000)) 71 | dp.SetDoubleValue(3) 72 | dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(false)) 73 | 74 | metricTests = append(metricTests, metricTest{ 75 | otel: metrics, 76 | lp: ` 77 | cpu_temp,foo=bar gauge=87.332,flags=1u 1622848686000000000 78 | http_request_duration_seconds,region=eu count=144320,sum=53423,flags=0u,0.05=24054,0.1=33444,0.2=100392,0.5=129389,1=133988,+Inf=144320 1622848686000000000 79 | http_requests_total,code=200,method=post counter=1027,flags=1u 1622848686000000000 80 | http_requests_total,code=400,method=post counter=3,flags=0u 1622848686000000000 81 | `, 82 | }) 83 | } 84 | 85 | { 86 | traces := ptrace.NewTraces() 87 | rs := traces.ResourceSpans().AppendEmpty() 88 | ilSpan := rs.ScopeSpans().AppendEmpty() 89 | span := ilSpan.Spans().AppendEmpty() 90 | span.SetName("cpu_temp") 91 | span.SetTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1}) 92 | span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3}) 93 | span.SetKind(ptrace.SpanKindInternal) 94 | span.SetStartTimestamp(pcommon.Timestamp(1622848000000000000)) 95 | span.SetEndTimestamp(pcommon.Timestamp(1622848100000000000)) 96 | span.Attributes().PutBool("k", true) 97 | span.SetDroppedAttributesCount(7) 98 | event := span.Events().AppendEmpty() 99 | event.SetName("yay-event") 100 | event.SetTimestamp(pcommon.Timestamp(1622848000000000001)) 101 | event.Attributes().PutStr("foo", "bar") 102 | event.SetDroppedAttributesCount(5) 103 | span.SetDroppedEventsCount(13) 104 | link := span.Links().AppendEmpty() 105 | link.SetTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2}) 106 | link.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3}) 107 | link.Attributes().PutInt("yay-link", 123) 108 | link.SetDroppedAttributesCount(19) 109 | span.SetDroppedLinksCount(17) 110 | span = ilSpan.Spans().AppendEmpty() 111 | span.SetName("http_request") 112 | span.SetTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1}) 113 | span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 4}) 114 | span.SetParentSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3}) 115 | span.SetKind(ptrace.SpanKindClient) 116 | span.SetStartTimestamp(pcommon.Timestamp(1622848000000000002)) 117 | span.SetEndTimestamp(pcommon.Timestamp(1622848000000000005)) 118 | span = ilSpan.Spans().AppendEmpty() 119 | span.SetName("process_batch") 120 | span.SetTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2}) 121 | span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 5}) 122 | span.SetKind(ptrace.SpanKindConsumer) 123 | span.SetStartTimestamp(pcommon.Timestamp(1622848000000000010)) 124 | span.SetEndTimestamp(pcommon.Timestamp(1622848000000000012)) 125 | 126 | traceTests = append(traceTests, traceTest{ 127 | otel: traces, 128 | lp: ` 129 | spans,span_id=0000000000000003,trace_id=00000000000000020000000000000001 duration_nano=100000000000i,end_time_unix_nano=1622848100000000000i,span.kind="Internal",attributes="{\"k\":true}",dropped_attributes_count=7u,dropped_events_count=13u,dropped_links_count=17u,span.name="cpu_temp" 1622848000000000000 130 | logs,span_id=0000000000000003,trace_id=00000000000000020000000000000001 attributes="{\"foo\":\"bar\"}",dropped_attributes_count=5u,event.name="yay-event" 1622848000000000001 131 | span-links,linked_span_id=0000000000000003,linked_trace_id=00000000000000020000000000000002,span_id=0000000000000003,trace_id=00000000000000020000000000000001 dropped_attributes_count=19u,attributes="{\"yay-link\":123}" 1622848000000000000 132 | spans,span_id=0000000000000004,trace_id=00000000000000020000000000000001 duration_nano=3i,end_time_unix_nano=1622848000000000005i,span.kind="Client",parent_span_id="0000000000000003",span.name="http_request" 1622848000000000002 133 | spans,span_id=0000000000000005,trace_id=00000000000000020000000000000002 duration_nano=2i,end_time_unix_nano=1622848000000000012i,span.kind="Consumer",span.name="process_batch" 1622848000000000010 134 | `, 135 | }) 136 | } 137 | 138 | { 139 | logs := plog.NewLogs() 140 | rl := logs.ResourceLogs().AppendEmpty() 141 | ilLog := rl.ScopeLogs().AppendEmpty() 142 | log := ilLog.LogRecords().AppendEmpty() 143 | log.SetTimestamp(pcommon.Timestamp(1622848686000000000)) 144 | log.SetSeverityNumber(plog.SeverityNumberInfo) 145 | log.SetSeverityText("info") 146 | log.Body().SetStr("something-happened") 147 | log.Attributes().PutBool("k", true) 148 | log.SetDroppedAttributesCount(5) 149 | log.SetTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1}) 150 | log.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3}) 151 | log.SetFlags(plog.DefaultLogRecordFlags.WithIsSampled(true)) 152 | 153 | logTests = append(logTests, logTest{ 154 | otel: logs, 155 | lp: ` 156 | logs,span_id=0000000000000003,trace_id=00000000000000020000000000000001 body="something-happened",attributes="{\"k\":true}",dropped_attributes_count=5u,flags=1u,severity_number=9i,severity_text="info" 1622848686000000000 157 | `, 158 | }) 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /update-deps.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "$0")" 6 | BASEDIR=$(pwd) 7 | 8 | for module in common influx2otel otel2influx jaeger-influxdb tests-integration; do 9 | cd ${BASEDIR}/${module} 10 | go mod tidy 11 | go list -f '{{range .Imports}}{{.}} 12 | {{end}} 13 | {{range .TestImports}}{{.}} 14 | {{end}} 15 | {{range .XTestImports}}{{.}} 16 | {{end}}' ./... | sort | uniq | grep 'github.com/open-telemetry\|go.opentelemetry.io\|github.com/jaegertracing/jaeger\|github.com/influxdata/influxdb-observability' | xargs go get -t 17 | go mod tidy 18 | 19 | done 20 | 21 | cd ${BASEDIR} 22 | --------------------------------------------------------------------------------