├── aggregators ├── testdata │ └── .gitkeep ├── nullable │ ├── doc.go │ └── bool.go ├── internal │ ├── protohash │ │ ├── doc.go │ │ ├── generated.go │ │ └── generate │ │ │ └── main.go │ ├── constraint │ │ └── constraint.go │ ├── telemetry │ │ ├── config.go │ │ ├── config_test.go │ │ ├── metrics_test.go │ │ └── metrics.go │ └── hdrhistogram │ │ ├── hdrhistogram_test.go │ │ └── hdrhistogram.go ├── logging.go ├── cachedeventsmap.go ├── ndjson_bench_test.go ├── codec_test.go ├── config_test.go ├── config.go ├── models.go ├── combined_metrics_test.go ├── merger.go └── codec.go ├── CODEOWNERS ├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ ├── add-to-project.yaml │ └── ci.yml ├── NOTICE.txt ├── CODE_OF_CONDUCT.md ├── proto ├── buf.yaml ├── labels.proto └── aggregation.proto ├── aggregationpb ├── doc.go ├── labels.pb.go └── labels_vtproto.pb.go ├── .gitignore ├── tools └── install-protoc.sh ├── Makefile ├── LICENSE.txt ├── go.mod ├── README.md └── go.sum /aggregators/testdata/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @elastic/obs-ds-intake-services 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | /.github/workflows @elastic/observablt-ci -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | Elastic APM Aggregation 2 | Copyright 2023-2023 Elasticsearch B.V. 3 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 303 See Other 2 | 3 | Location: https://www.elastic.co/community/codeofconduct 4 | -------------------------------------------------------------------------------- /proto/buf.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | lint: 3 | use: 4 | - DEFAULT 5 | except: 6 | - PACKAGE_DIRECTORY_MATCH 7 | breaking: 8 | use: 9 | - WIRE 10 | -------------------------------------------------------------------------------- /aggregators/nullable/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | // Package nullable contains nullable types. 6 | package nullable 7 | -------------------------------------------------------------------------------- /aggregationpb/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | // Package aggregationpb holds all the generated code from protobuf definitions 6 | // held in the `proto` folder. 7 | package aggregationpb 8 | -------------------------------------------------------------------------------- /aggregators/internal/protohash/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | //go:generate go run ./generate 6 | 7 | // Package protohash holds functions for hashing aggregationpb.*Key types. 8 | package protohash 9 | -------------------------------------------------------------------------------- /proto/labels.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package elastic.apm.v1; 4 | 5 | option go_package = "./aggregationpb"; 6 | option optimize_for = SPEED; 7 | 8 | message GlobalLabels { 9 | repeated Label labels = 1; 10 | repeated NumericLabel numeric_labels = 2; 11 | } 12 | 13 | message Label { 14 | string key = 1; 15 | string value = 2; 16 | repeated string values = 3; 17 | } 18 | 19 | message NumericLabel { 20 | string key = 1; 21 | double value = 2; 22 | repeated double values = 3; 23 | } 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | bin 8 | testbin/* 9 | __debug_bin 10 | 11 | # Test binary, build with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # editor and IDE paraphernalia 18 | .idea 19 | *.swp 20 | *.sw 21 | *.iml 22 | *~ 23 | *.DS_Store 24 | 25 | build 26 | 27 | # git ignore ndjson testdata files to avoid duplication across repos 28 | # until we figure out a better way to do so 29 | /aggregators/testdata/*.ndjson 30 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Go dependencies 4 | - package-ecosystem: "gomod" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | groups: 9 | otel: 10 | patterns: 11 | - "go.opentelemetry.io/*" 12 | golang.org/x/: 13 | patterns: 14 | - "golang.org/x/*" 15 | go-agent: 16 | patterns: 17 | - "go.elastic.co/apm*" 18 | # GitHub actions 19 | - package-ecosystem: "github-actions" 20 | directory: "/" 21 | schedule: 22 | interval: "weekly" 23 | day: "sunday" 24 | time: "22:00" 25 | groups: 26 | github-actions: 27 | patterns: 28 | - "*" 29 | -------------------------------------------------------------------------------- /aggregators/logging.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "go.opentelemetry.io/otel/attribute" 9 | "go.uber.org/zap" 10 | ) 11 | 12 | // otelKVsToZapFields converts []attribute.KeyValue to []zap.Field. 13 | // Designed to work with CombinedMetricsIDToKVs for logging. 14 | func otelKVsToZapFields(kvs []attribute.KeyValue) []zap.Field { 15 | if kvs == nil { 16 | return nil 17 | } 18 | fields := make([]zap.Field, len(kvs)) 19 | for i, kv := range kvs { 20 | fields[i] = zap.Any(string(kv.Key), kv.Value.AsInterface()) 21 | } 22 | return fields 23 | } 24 | -------------------------------------------------------------------------------- /.github/workflows/add-to-project.yaml: -------------------------------------------------------------------------------- 1 | name: Add new issues to the project board 2 | 3 | on: 4 | issues: 5 | types: 6 | - opened 7 | - transferred 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | add-to-project: 14 | name: Add issue to project 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Get token 18 | id: get_token 19 | uses: actions/create-github-app-token@v2 20 | with: 21 | app-id: ${{ secrets.OBS_AUTOMATION_APP_ID }} 22 | private-key: ${{ secrets.OBS_AUTOMATION_APP_PEM }} 23 | permission-organization-projects: write 24 | permission-issues: read 25 | - uses: actions/add-to-project@v1.0.2 26 | with: 27 | project-url: https://github.com/orgs/elastic/projects/1286 28 | github-token: ${{ steps.get_token.outputs.token }} 29 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: ~ 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | lint: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v6 16 | - uses: actions/setup-go@v6 17 | with: 18 | go-version-file: go.mod 19 | cache: true 20 | - run: make lint 21 | - run: make fmt 22 | - name: Verify repo is up-to-date 23 | run: | 24 | if [ -n "$(git status --porcelain)" ]; then 25 | echo 'Updates required:' 26 | git status 27 | exit 1 28 | fi 29 | 30 | run-tests: 31 | runs-on: ubuntu-latest 32 | steps: 33 | - uses: actions/checkout@v6 34 | - uses: actions/setup-go@v6 35 | with: 36 | go-version-file: go.mod 37 | cache: true 38 | - name: Run tests 39 | run: make test 40 | -------------------------------------------------------------------------------- /aggregators/internal/constraint/constraint.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | // Package constraint hold the definition of a generic constraint structure. 6 | package constraint 7 | 8 | type Constraint struct { 9 | counter int 10 | limit int 11 | } 12 | 13 | func New(initialCount, limit int) *Constraint { 14 | return &Constraint{ 15 | counter: initialCount, 16 | limit: limit, 17 | } 18 | } 19 | 20 | func (c *Constraint) Maxed() bool { 21 | return c.counter >= c.limit 22 | } 23 | 24 | func (c *Constraint) Add(delta int) { 25 | c.counter += delta 26 | } 27 | 28 | func (c *Constraint) Value() int { 29 | return c.counter 30 | } 31 | 32 | func (c *Constraint) Limit() int { 33 | return c.limit 34 | } 35 | -------------------------------------------------------------------------------- /tools/install-protoc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | ARCH=$(uname -m) 6 | PLATFORM=$(uname -s | tr '[:upper:]' '[:lower:]') 7 | UNAME_PLATFORM=${PLATFORM} 8 | BINARY=protoc 9 | if [[ ${PLATFORM} == "darwin" ]]; then 10 | PLATFORM=osx 11 | ARCH=universal_binary 12 | elif [[ ${PLATFORM} == "linux" ]]; then 13 | case ${ARCH} in 14 | "arm64") 15 | ARCH=aarch_64 16 | ;; 17 | "x86_64") 18 | ARCH=x86_64 19 | ;; 20 | "aarch64") 21 | ;; 22 | *) 23 | echo "-> Architecture ${ARCH} not supported"; exit 1; 24 | ;; 25 | esac 26 | fi 27 | 28 | PROTOBUF_VERSION="v22.1" 29 | PROTOBUF_VERSION_NO_V=$(echo ${PROTOBUF_VERSION}|tr -d 'v') 30 | 31 | PROTOC_PATH=build/${UNAME_PLATFORM}/${BINARY} 32 | mkdir -p ${PROTOC_PATH} 33 | 34 | curl -sL -o ${PROTOC_PATH}/${BINARY}.zip https://github.com/protocolbuffers/protobuf/releases/download/${PROTOBUF_VERSION}/${BINARY}-${PROTOBUF_VERSION_NO_V}-${PLATFORM}-${ARCH}.zip 35 | 36 | BIN_PATH=${PROTOC_PATH}/bin/${BINARY} 37 | cd ${PROTOC_PATH} && unzip ${BINARY}.zip 38 | cd - 39 | chmod +x ${BIN_PATH} 40 | -------------------------------------------------------------------------------- /aggregators/nullable/bool.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package nullable 6 | 7 | // Bool represents a bool value which can be set to nil. 8 | // Using uint32 since uint32 is smallest proto type. 9 | type Bool uint32 10 | 11 | const ( 12 | // Nil represents an unset bool value. 13 | Nil Bool = iota 14 | // False represents a false bool value. 15 | False 16 | // True represents a true bool value. 17 | True 18 | ) 19 | 20 | // ParseBoolPtr sets nullable bool from bool pointer. 21 | func (nb *Bool) ParseBoolPtr(b *bool) { 22 | if b == nil { 23 | *nb = Nil 24 | return 25 | } 26 | if *b { 27 | *nb = True 28 | return 29 | } 30 | *nb = False 31 | } 32 | 33 | // ToBoolPtr converts nullable bool to bool pointer. 34 | func (nb *Bool) ToBoolPtr() *bool { 35 | if nb == nil || *nb == Nil { 36 | return nil 37 | } 38 | var b bool 39 | switch *nb { 40 | case False: 41 | b = false 42 | case True: 43 | b = true 44 | } 45 | return &b 46 | } 47 | -------------------------------------------------------------------------------- /aggregators/internal/telemetry/config.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package telemetry 6 | 7 | import ( 8 | "go.opentelemetry.io/otel" 9 | "go.opentelemetry.io/otel/metric" 10 | ) 11 | 12 | type config struct { 13 | Meter metric.Meter 14 | } 15 | 16 | // Option interface is used to configure optional config options. 17 | type Option interface { 18 | apply(*config) 19 | } 20 | 21 | type optionFunc func(*config) 22 | 23 | func (o optionFunc) apply(c *config) { 24 | o(c) 25 | } 26 | 27 | func newConfig(opts ...Option) *config { 28 | c := &config{ 29 | Meter: otel.GetMeterProvider().Meter("aggregators"), 30 | } 31 | for _, opt := range opts { 32 | opt.apply(c) 33 | } 34 | return c 35 | } 36 | 37 | // WithMeter configures a meter to use for telemetry. If no meter is 38 | // passed then the meter is created using the global provider. 39 | func WithMeter(meter metric.Meter) Option { 40 | return optionFunc(func(cfg *config) { 41 | cfg.Meter = meter 42 | }) 43 | } 44 | -------------------------------------------------------------------------------- /aggregators/internal/telemetry/config_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package telemetry 6 | 7 | import ( 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "go.opentelemetry.io/otel" 12 | "go.opentelemetry.io/otel/sdk/metric" 13 | ) 14 | 15 | func TestConfig(t *testing.T) { 16 | custom := metric.NewMeterProvider().Meter("test") 17 | for _, tt := range []struct { 18 | name string 19 | options []Option 20 | expected func() *config 21 | }{ 22 | { 23 | name: "empty_config", 24 | options: nil, 25 | expected: func() *config { 26 | return &config{ 27 | Meter: otel.GetMeterProvider().Meter("aggregators"), 28 | } 29 | }, 30 | }, 31 | { 32 | name: "config_with_custom_meter_provider", 33 | options: []Option{WithMeter(custom)}, 34 | expected: func() *config { 35 | return &config{ 36 | Meter: custom, 37 | } 38 | }, 39 | }, 40 | } { 41 | t.Run(tt.name, func(t *testing.T) { 42 | cfg := newConfig(tt.options...) 43 | 44 | assert.Equal(t, tt.expected(), cfg) 45 | }) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | all: test 3 | 4 | fmt: 5 | @go tool github.com/elastic/go-licenser -license=Elasticv2 . 6 | @go tool golang.org/x/tools/cmd/goimports -local github.com/elastic/ -w . 7 | 8 | lint: 9 | go mod tidy -diff 10 | go tool honnef.co/go/tools/cmd/staticcheck -checks=all ./... 11 | 12 | protolint: 13 | docker run --volume "$(PWD):/workspace" --workdir /workspace bufbuild/buf lint proto 14 | docker run --volume "$(PWD):/workspace" --workdir /workspace bufbuild/buf breaking proto --against https://github.com/elastic/apm-aggregation.git#branch=main,subdir=proto 15 | 16 | .PHONY: clean 17 | clean: 18 | rm -fr bin build 19 | 20 | .PHONY: test 21 | test: go.mod 22 | go test -v -race ./... 23 | 24 | ############################################################################## 25 | # Protobuf generation 26 | ############################################################################## 27 | 28 | GITROOT ?= $(shell git rev-parse --show-toplevel) 29 | GOOSBUILD:=$(GITROOT)/build/$(shell go env GOOS) 30 | PROTOC=$(GOOSBUILD)/protoc/bin/protoc 31 | PROTOC_GEN_GO_VTPROTO=$(GOOSBUILD)/protoc-gen-go-vtproto 32 | PROTOC_GEN_GO=$(GOOSBUILD)/protoc-gen-go 33 | 34 | $(PROTOC): 35 | @./tools/install-protoc.sh 36 | 37 | $(PROTOC_GEN_GO_VTPROTO): 38 | GOBIN=$(GOOSBUILD) go install github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto 39 | 40 | $(PROTOC_GEN_GO): 41 | GOBIN=$(GOOSBUILD) go install google.golang.org/protobuf/cmd/protoc-gen-go 42 | 43 | PROTOC_OUT?=. 44 | 45 | .PHONY: gen-proto 46 | gen-proto: $(PROTOC_GEN_GO) $(PROTOC_GEN_GO_VTPROTO) $(PROTOC) 47 | $(eval STRUCTS := $(shell grep '^message' proto/*.proto | cut -d ' ' -f2)) 48 | $(PROTOC) -I . --go_out=$(PROTOC_OUT) --plugin protoc-gen-go="$(PROTOC_GEN_GO)" \ 49 | --go-vtproto_out=$(PROTOC_OUT) --plugin protoc-gen-go-vtproto="$(PROTOC_GEN_GO_VTPROTO)" \ 50 | --go-vtproto_opt=features=marshal+unmarshal+size+clone \ 51 | $(wildcard proto/*.proto) 52 | go generate ./aggregators/internal/protohash 53 | $(MAKE) fmt 54 | -------------------------------------------------------------------------------- /aggregators/cachedeventsmap.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "math" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | ) 13 | 14 | // cachedEventsMap holds a counts of cached events, keyed by interval and ID. 15 | // Cached events are events that have been processed by Aggregate methods, 16 | // but which haven't yet been harvested. Event counts are fractional because 17 | // an event may be spread over multiple partitions. 18 | // 19 | // Access to the map is protected with a mutex. During harvest, an exclusive 20 | // (write) lock is held. Concurrent aggregations may perform atomic updates 21 | // to the map, and the harvester may assume that the map will not be modified 22 | // while it is reading it. 23 | type cachedEventsMap struct { 24 | // (interval, id) -> count 25 | m sync.Map 26 | countPool sync.Pool 27 | } 28 | 29 | func (m *cachedEventsMap) loadAndDelete(end time.Time) map[time.Duration]map[[16]byte]float64 { 30 | loaded := make(map[time.Duration]map[[16]byte]float64) 31 | m.m.Range(func(k, v any) bool { 32 | key := k.(cachedEventsStatsKey) 33 | if !end.Truncate(key.interval).Equal(end) { 34 | return true 35 | } 36 | intervalMetrics, ok := loaded[key.interval] 37 | if !ok { 38 | intervalMetrics = make(map[[16]byte]float64) 39 | loaded[key.interval] = intervalMetrics 40 | } 41 | vscaled := *v.(*uint64) 42 | value := float64(vscaled / math.MaxUint16) 43 | intervalMetrics[key.id] = value 44 | m.m.Delete(k) 45 | m.countPool.Put(v) 46 | return true 47 | }) 48 | return loaded 49 | } 50 | 51 | func (m *cachedEventsMap) add(interval time.Duration, id [16]byte, n float64) { 52 | // We use a pool for the value to minimise allocations, as it will 53 | // always escape to the heap through LoadOrStore. 54 | nscaled, ok := m.countPool.Get().(*uint64) 55 | if !ok { 56 | nscaled = new(uint64) 57 | } 58 | // Scale by the maximum number of partitions to get an integer value, 59 | // for simpler atomic operations. 60 | *nscaled = uint64(n * math.MaxUint16) 61 | key := cachedEventsStatsKey{interval: interval, id: id} 62 | old, loaded := m.m.Load(key) 63 | if !loaded { 64 | old, loaded = m.m.LoadOrStore(key, nscaled) 65 | if !loaded { 66 | // Stored a new value. 67 | return 68 | } 69 | } 70 | atomic.AddUint64(old.(*uint64), *nscaled) 71 | m.countPool.Put(nscaled) 72 | } 73 | 74 | type cachedEventsStatsKey struct { 75 | interval time.Duration 76 | id [16]byte 77 | } 78 | -------------------------------------------------------------------------------- /aggregators/internal/protohash/generated.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | // Code generated by protohash/generate. DO NOT EDIT. 6 | 7 | package protohash 8 | 9 | import ( 10 | "encoding/binary" 11 | 12 | "github.com/cespare/xxhash/v2" 13 | 14 | "github.com/elastic/apm-aggregation/aggregationpb" 15 | ) 16 | 17 | func writeUint32(h *xxhash.Digest, v uint32) { 18 | var buf [4]byte 19 | binary.LittleEndian.PutUint32(buf[:], v) 20 | h.Write(buf[:]) 21 | } 22 | 23 | func writeUint64(h *xxhash.Digest, v uint64) { 24 | var buf [8]byte 25 | binary.LittleEndian.PutUint64(buf[:], v) 26 | h.Write(buf[:]) 27 | } 28 | 29 | func HashServiceAggregationKey(h xxhash.Digest, k *aggregationpb.ServiceAggregationKey) xxhash.Digest { 30 | writeUint64(&h, k.Timestamp) 31 | h.WriteString(k.ServiceName) 32 | h.WriteString(k.ServiceEnvironment) 33 | h.WriteString(k.ServiceLanguageName) 34 | h.WriteString(k.AgentName) 35 | h.Write(k.GlobalLabelsStr) 36 | return h 37 | } 38 | 39 | func HashServiceTransactionAggregationKey(h xxhash.Digest, k *aggregationpb.ServiceTransactionAggregationKey) xxhash.Digest { 40 | h.WriteString(k.TransactionType) 41 | return h 42 | } 43 | 44 | func HashSpanAggregationKey(h xxhash.Digest, k *aggregationpb.SpanAggregationKey) xxhash.Digest { 45 | h.WriteString(k.SpanName) 46 | h.WriteString(k.Outcome) 47 | h.WriteString(k.TargetType) 48 | h.WriteString(k.TargetName) 49 | h.WriteString(k.Resource) 50 | return h 51 | } 52 | 53 | func HashTransactionAggregationKey(h xxhash.Digest, k *aggregationpb.TransactionAggregationKey) xxhash.Digest { 54 | if k.TraceRoot { 55 | h.WriteString("1") 56 | } 57 | h.WriteString(k.ContainerId) 58 | h.WriteString(k.KubernetesPodName) 59 | h.WriteString(k.ServiceVersion) 60 | h.WriteString(k.ServiceNodeName) 61 | h.WriteString(k.ServiceRuntimeName) 62 | h.WriteString(k.ServiceRuntimeVersion) 63 | h.WriteString(k.ServiceLanguageVersion) 64 | h.WriteString(k.HostHostname) 65 | h.WriteString(k.HostName) 66 | h.WriteString(k.HostOsPlatform) 67 | h.WriteString(k.EventOutcome) 68 | h.WriteString(k.TransactionName) 69 | h.WriteString(k.TransactionType) 70 | h.WriteString(k.TransactionResult) 71 | writeUint32(&h, k.FaasColdstart) 72 | h.WriteString(k.FaasId) 73 | h.WriteString(k.FaasName) 74 | h.WriteString(k.FaasVersion) 75 | h.WriteString(k.FaasTriggerType) 76 | h.WriteString(k.CloudProvider) 77 | h.WriteString(k.CloudRegion) 78 | h.WriteString(k.CloudAvailabilityZone) 79 | h.WriteString(k.CloudServiceName) 80 | h.WriteString(k.CloudAccountId) 81 | h.WriteString(k.CloudAccountName) 82 | h.WriteString(k.CloudMachineType) 83 | h.WriteString(k.CloudProjectId) 84 | h.WriteString(k.CloudProjectName) 85 | return h 86 | } 87 | -------------------------------------------------------------------------------- /aggregators/internal/hdrhistogram/hdrhistogram_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package hdrhistogram 6 | 7 | import ( 8 | "math" 9 | "math/rand" 10 | "testing" 11 | 12 | "github.com/HdrHistogram/hdrhistogram-go" 13 | "github.com/google/go-cmp/cmp" 14 | "github.com/stretchr/testify/assert" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | func TestMerge(t *testing.T) { 19 | hist1, hist2 := getTestHistogram(), getTestHistogram() 20 | histRep1, histRep2 := New(), New() 21 | 22 | for i := 0; i < 1_000_000; i++ { 23 | v1, v2 := rand.Int63n(3_600_000_000), rand.Int63n(3_600_000_000) 24 | c1, c2 := rand.Int63n(1_000), rand.Int63n(1_000) 25 | hist1.RecordValues(v1, c1) 26 | histRep1.RecordValues(v1, c1) 27 | hist2.RecordValues(v2, c2) 28 | histRep2.RecordValues(v2, c2) 29 | } 30 | 31 | require.Equal(t, int64(0), hist1.Merge(hist2)) 32 | histRep1.Merge(histRep2) 33 | assert.Empty(t, cmp.Diff(hist1.Export(), convertHistogramRepToSnapshot(histRep1))) 34 | } 35 | 36 | func TestBuckets(t *testing.T) { 37 | buckets := func(h *hdrhistogram.Histogram) (uint64, []uint64, []float64) { 38 | distribution := h.Distribution() 39 | counts := make([]uint64, 0, len(distribution)) 40 | values := make([]float64, 0, len(distribution)) 41 | 42 | var totalCount uint64 43 | for _, b := range distribution { 44 | if b.Count <= 0 { 45 | continue 46 | } 47 | count := uint64(math.Round(float64(b.Count) / histogramCountScale)) 48 | counts = append(counts, count) 49 | values = append(values, float64(b.To)) 50 | totalCount += count 51 | } 52 | return totalCount, counts, values 53 | } 54 | hist := getTestHistogram() 55 | histRep := New() 56 | 57 | recordValuesForAll := func(v, n int64) { 58 | hist.RecordValues(v, n) 59 | histRep.RecordValues(v, n) 60 | } 61 | 62 | // Explicitly test for recording values with 0 count 63 | recordValuesForAll(rand.Int63n(3_600_000_000), 0) 64 | for i := 0; i < 1_000_000; i++ { 65 | v := rand.Int63n(3_600_000_000) 66 | c := rand.Int63n(1_000) 67 | recordValuesForAll(v, c) 68 | } 69 | actualTotalCount, actualCounts, actualValues := histRep.Buckets() 70 | expectedTotalCount, expectedCounts, expectedValues := buckets(hist) 71 | 72 | assert.Equal(t, expectedTotalCount, actualTotalCount) 73 | assert.Equal(t, expectedCounts, actualCounts) 74 | assert.Equal(t, expectedValues, actualValues) 75 | } 76 | 77 | func getTestHistogram() *hdrhistogram.Histogram { 78 | return hdrhistogram.New( 79 | lowestTrackableValue, 80 | highestTrackableValue, 81 | int(significantFigures), 82 | ) 83 | } 84 | 85 | func convertHistogramRepToSnapshot(h *HistogramRepresentation) *hdrhistogram.Snapshot { 86 | counts := make([]int64, countsLen) 87 | h.CountsRep.ForEach(func(bucket int32, value int64) { 88 | counts[bucket] += value 89 | }) 90 | return &hdrhistogram.Snapshot{ 91 | LowestTrackableValue: h.LowestTrackableValue, 92 | HighestTrackableValue: h.HighestTrackableValue, 93 | SignificantFigures: h.SignificantFigures, 94 | Counts: counts, 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /aggregators/internal/protohash/generate/main.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package main 6 | 7 | import ( 8 | "fmt" 9 | "go/types" 10 | "log" 11 | "os" 12 | "strings" 13 | 14 | "golang.org/x/tools/go/packages" 15 | ) 16 | 17 | func main() { 18 | const pkgpath = "github.com/elastic/apm-aggregation/aggregationpb" 19 | cfg := &packages.Config{Mode: packages.NeedTypes | packages.NeedTypesInfo} 20 | pkgs, err := packages.Load(cfg, pkgpath) 21 | if err != nil { 22 | log.Fatal(err) 23 | } 24 | 25 | f, err := os.Create("generated.go") 26 | if err != nil { 27 | log.Fatal(err) 28 | } 29 | defer f.Close() 30 | 31 | fmt.Fprintln(f, ` 32 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 33 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 34 | // you may not use this file except in compliance with the Elastic License 2.0. 35 | 36 | // Code generated by protohash/generate. DO NOT EDIT. 37 | 38 | package protohash 39 | 40 | import ( 41 | "encoding/binary" 42 | 43 | "github.com/cespare/xxhash/v2" 44 | 45 | "github.com/elastic/apm-aggregation/aggregationpb" 46 | ) 47 | 48 | func writeUint32(h *xxhash.Digest, v uint32) { 49 | var buf [4]byte 50 | binary.LittleEndian.PutUint32(buf[:], v) 51 | h.Write(buf[:]) 52 | } 53 | 54 | func writeUint64(h *xxhash.Digest, v uint64) { 55 | var buf [8]byte 56 | binary.LittleEndian.PutUint64(buf[:], v) 57 | h.Write(buf[:]) 58 | } 59 | `[1:]) 60 | 61 | pkg := pkgs[0] 62 | pkgscope := pkg.Types.Scope() 63 | for _, name := range pkgscope.Names() { 64 | if !strings.HasSuffix(name, "Key") { 65 | continue 66 | } 67 | typeName, ok := pkgscope.Lookup(name).(*types.TypeName) 68 | if !ok || !typeName.Exported() { 69 | continue 70 | } 71 | named := typeName.Type().(*types.Named) 72 | structType, ok := named.Underlying().(*types.Struct) 73 | if !ok { 74 | continue 75 | } 76 | 77 | fmt.Fprintf(f, "func Hash%[1]s(h xxhash.Digest, k *aggregationpb.%[1]s) xxhash.Digest {\n", name) 78 | for i := 0; i < structType.NumFields(); i++ { 79 | field := structType.Field(i) 80 | if !field.Exported() { 81 | continue 82 | } 83 | var unhandled bool 84 | switch fieldType := field.Type().(type) { 85 | case *types.Basic: 86 | switch kind := fieldType.Kind(); kind { 87 | case types.Bool: 88 | fmt.Fprintf(f, " if k.%s {\n h.WriteString(\"1\")\n }\n", field.Name()) 89 | case types.String: 90 | fmt.Fprintf(f, " h.WriteString(k.%s)\n", field.Name()) 91 | case types.Uint32: 92 | fmt.Fprintf(f, " writeUint32(&h, k.%s)\n", field.Name()) 93 | case types.Uint64: 94 | fmt.Fprintf(f, " writeUint64(&h, k.%s)\n", field.Name()) 95 | default: 96 | unhandled = true 97 | } 98 | case *types.Slice: 99 | switch elemType := fieldType.Elem().(type) { 100 | case *types.Basic: 101 | if elemType.Kind() != types.Byte { 102 | unhandled = true 103 | break 104 | } 105 | fmt.Fprintf(f, " h.Write(k.%s)\n", field.Name()) 106 | default: 107 | unhandled = true 108 | } 109 | default: 110 | unhandled = true 111 | } 112 | if unhandled { 113 | panic(fmt.Errorf("unhandled field %s.%s (%v)", name, field.Name(), field.Type())) 114 | } 115 | } 116 | fmt.Fprintln(f, " return h\n}") 117 | fmt.Fprintln(f) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /aggregators/ndjson_bench_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "bufio" 9 | "context" 10 | "fmt" 11 | "io" 12 | "io/fs" 13 | "math" 14 | "os" 15 | "testing" 16 | "time" 17 | 18 | "go.uber.org/zap" 19 | "golang.org/x/sync/semaphore" 20 | 21 | "github.com/elastic/apm-data/input/elasticapm" 22 | "github.com/elastic/apm-data/model/modelpb" 23 | "github.com/elastic/apm-data/model/modelprocessor" 24 | ) 25 | 26 | func ndjsonToBatch(reader io.Reader) (*modelpb.Batch, error) { 27 | logger, err := zap.NewDevelopment() 28 | if err != nil { 29 | return nil, err 30 | } 31 | elasticapmProcessor := elasticapm.NewProcessor(elasticapm.Config{ 32 | Logger: logger, 33 | MaxEventSize: 1024 * 1024, // 1MiB 34 | Semaphore: semaphore.NewWeighted(1), 35 | }) 36 | baseEvent := modelpb.APMEvent{ 37 | Event: &modelpb.Event{ 38 | Received: modelpb.FromTime(time.Now()), 39 | }, 40 | } 41 | var batch modelpb.Batch 42 | processor := modelprocessor.Chained{ 43 | modelprocessor.SetHostHostname{}, 44 | modelprocessor.SetServiceNodeName{}, 45 | modelprocessor.SetGroupingKey{}, 46 | modelprocessor.SetErrorMessage{}, 47 | modelpb.ProcessBatchFunc(func(ctx context.Context, b *modelpb.Batch) error { 48 | batch = make(modelpb.Batch, len(*b)) 49 | copy(batch, *b) 50 | return nil 51 | }), 52 | } 53 | 54 | var elasticapmResult elasticapm.Result 55 | if err := elasticapmProcessor.HandleStream( 56 | context.TODO(), 57 | &baseEvent, 58 | reader, 59 | math.MaxInt32, // batch size 60 | processor, 61 | &elasticapmResult, 62 | ); err != nil { 63 | return nil, fmt.Errorf("stream error: %w", err) 64 | } 65 | return &batch, nil 66 | } 67 | 68 | // forEachNDJSON loops over ndjson files in testdata. 69 | // The directory is empty by default but the ndjson files can be downloaded from the apm-perf repo. 70 | func forEachNDJSON(b *testing.B, f func(*testing.B, *modelpb.Batch)) { 71 | dirFS := os.DirFS("testdata") 72 | matches, err := fs.Glob(dirFS, "*.ndjson") 73 | if err != nil { 74 | b.Fatal(err) 75 | } 76 | for _, filename := range matches { 77 | b.Run(filename, func(b *testing.B) { 78 | file, err := dirFS.Open(filename) 79 | if err != nil { 80 | b.Fatal(err) 81 | } 82 | defer file.Close() 83 | 84 | batch, err := ndjsonToBatch(bufio.NewReader(file)) 85 | if err != nil { 86 | b.Fatal(err) 87 | } 88 | f(b, batch) 89 | }) 90 | } 91 | } 92 | 93 | func BenchmarkNDJSONSerial(b *testing.B) { 94 | forEachNDJSON(b, func(b *testing.B, batch *modelpb.Batch) { 95 | agg := newTestAggregator(b) 96 | b.Cleanup(func() { 97 | agg.Close(context.TODO()) 98 | }) 99 | cmID := EncodeToCombinedMetricsKeyID(b, "ab01") 100 | b.ResetTimer() 101 | 102 | for i := 0; i < b.N; i++ { 103 | if err := agg.AggregateBatch(context.Background(), cmID, batch); err != nil { 104 | b.Fatal(err) 105 | } 106 | } 107 | }) 108 | } 109 | 110 | func BenchmarkNDJSONParallel(b *testing.B) { 111 | forEachNDJSON(b, func(b *testing.B, batch *modelpb.Batch) { 112 | agg := newTestAggregator(b) 113 | b.Cleanup(func() { 114 | agg.Close(context.TODO()) 115 | }) 116 | cmID := EncodeToCombinedMetricsKeyID(b, "ab01") 117 | b.ResetTimer() 118 | 119 | b.RunParallel(func(pb *testing.PB) { 120 | for pb.Next() { 121 | if err := agg.AggregateBatch(context.Background(), cmID, batch); err != nil { 122 | b.Fatal(err) 123 | } 124 | } 125 | }) 126 | }) 127 | } 128 | -------------------------------------------------------------------------------- /proto/aggregation.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package elastic.apm.v1; 4 | 5 | option go_package = "./aggregationpb"; 6 | option optimize_for = SPEED; 7 | 8 | message CombinedMetrics { 9 | repeated KeyedServiceMetrics service_metrics = 1; 10 | Overflow overflow_services = 2; 11 | bytes overflow_services_estimator = 3; 12 | double events_total = 4; 13 | uint64 youngest_event_timestamp = 5; 14 | } 15 | 16 | message KeyedServiceMetrics { 17 | ServiceAggregationKey key = 1; 18 | ServiceMetrics metrics = 2; 19 | } 20 | 21 | message ServiceAggregationKey { 22 | uint64 timestamp = 1; 23 | string service_name = 2; 24 | string service_environment = 3; 25 | string service_language_name = 4; 26 | string agent_name = 5; 27 | bytes global_labels_str = 6; 28 | } 29 | 30 | message ServiceMetrics { 31 | Overflow overflow_groups = 1; 32 | repeated KeyedTransactionMetrics transaction_metrics = 2; 33 | repeated KeyedServiceTransactionMetrics service_transaction_metrics = 3; 34 | repeated KeyedSpanMetrics span_metrics = 4; 35 | } 36 | 37 | message KeyedTransactionMetrics { 38 | TransactionAggregationKey key = 1; 39 | TransactionMetrics metrics = 2; 40 | } 41 | 42 | message TransactionAggregationKey { 43 | bool trace_root = 1; 44 | 45 | string container_id = 2; 46 | string kubernetes_pod_name = 3; 47 | 48 | string service_version = 4; 49 | string service_node_name = 5; 50 | 51 | string service_runtime_name = 6; 52 | string service_runtime_version = 7; 53 | string service_language_version = 8; 54 | 55 | string host_hostname = 9; 56 | string host_name = 10; 57 | string host_os_platform = 11; 58 | 59 | string event_outcome = 12; 60 | 61 | string transaction_name = 13; 62 | string transaction_type = 14; 63 | string transaction_result = 15; 64 | 65 | uint32 faas_coldstart = 16; 66 | string faas_id = 17; 67 | string faas_name = 18; 68 | string faas_version = 19; 69 | string faas_trigger_type = 20; 70 | 71 | string cloud_provider = 21; 72 | string cloud_region = 22; 73 | string cloud_availability_zone = 23; 74 | string cloud_service_name = 24; 75 | string cloud_account_id = 25; 76 | string cloud_account_name = 26; 77 | string cloud_machine_type = 27; 78 | string cloud_project_id = 28; 79 | string cloud_project_name = 29; 80 | } 81 | 82 | message TransactionMetrics { 83 | HDRHistogram histogram = 1; 84 | } 85 | 86 | message KeyedServiceTransactionMetrics { 87 | ServiceTransactionAggregationKey key = 1; 88 | ServiceTransactionMetrics metrics = 2; 89 | } 90 | 91 | message ServiceTransactionAggregationKey { 92 | string transaction_type = 1; 93 | } 94 | 95 | message ServiceTransactionMetrics { 96 | HDRHistogram histogram = 1; 97 | double failure_count = 2; 98 | double success_count = 3; 99 | } 100 | 101 | message KeyedSpanMetrics { 102 | SpanAggregationKey key = 1; 103 | SpanMetrics metrics = 2; 104 | } 105 | 106 | message SpanAggregationKey { 107 | string span_name = 1; 108 | string outcome = 2; 109 | 110 | string target_type = 3; 111 | string target_name = 4; 112 | 113 | string resource = 5; 114 | } 115 | 116 | message SpanMetrics { 117 | double count = 1; 118 | double sum = 2; 119 | } 120 | 121 | message Overflow { 122 | TransactionMetrics overflow_transactions = 1; 123 | ServiceTransactionMetrics overflow_service_transactions = 2; 124 | SpanMetrics overflow_spans = 3; 125 | bytes overflow_transactions_estimator = 4; 126 | bytes overflow_service_transactions_estimator = 5; 127 | bytes overflow_spans_estimator = 6; 128 | } 129 | 130 | message HDRHistogram { 131 | int64 lowest_trackable_value = 1; 132 | int64 highest_trackable_value = 2; 133 | int64 significant_figures = 3; 134 | repeated int64 counts = 4; 135 | repeated int32 buckets = 5; 136 | } 137 | 138 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Elastic License 2.0 2 | 3 | URL: https://www.elastic.co/licensing/elastic-license 4 | 5 | ## Acceptance 6 | 7 | By using the software, you agree to all of the terms and conditions below. 8 | 9 | ## Copyright License 10 | 11 | The licensor grants you a non-exclusive, royalty-free, worldwide, 12 | non-sublicensable, non-transferable license to use, copy, distribute, make 13 | available, and prepare derivative works of the software, in each case subject to 14 | the limitations and conditions below. 15 | 16 | ## Limitations 17 | 18 | You may not provide the software to third parties as a hosted or managed 19 | service, where the service provides users with access to any substantial set of 20 | the features or functionality of the software. 21 | 22 | You may not move, change, disable, or circumvent the license key functionality 23 | in the software, and you may not remove or obscure any functionality in the 24 | software that is protected by the license key. 25 | 26 | You may not alter, remove, or obscure any licensing, copyright, or other notices 27 | of the licensor in the software. Any use of the licensor’s trademarks is subject 28 | to applicable law. 29 | 30 | ## Patents 31 | 32 | The licensor grants you a license, under any patent claims the licensor can 33 | license, or becomes able to license, to make, have made, use, sell, offer for 34 | sale, import and have imported the software, in each case subject to the 35 | limitations and conditions in this license. This license does not cover any 36 | patent claims that you cause to be infringed by modifications or additions to 37 | the software. If you or your company make any written claim that the software 38 | infringes or contributes to infringement of any patent, your patent license for 39 | the software granted under these terms ends immediately. If your company makes 40 | such a claim, your patent license ends immediately for work on behalf of your 41 | company. 42 | 43 | ## Notices 44 | 45 | You must ensure that anyone who gets a copy of any part of the software from you 46 | also gets a copy of these terms. 47 | 48 | If you modify the software, you must include in any modified copies of the 49 | software prominent notices stating that you have modified the software. 50 | 51 | ## No Other Rights 52 | 53 | These terms do not imply any licenses other than those expressly granted in 54 | these terms. 55 | 56 | ## Termination 57 | 58 | If you use the software in violation of these terms, such use is not licensed, 59 | and your licenses will automatically terminate. If the licensor provides you 60 | with a notice of your violation, and you cease all violation of this license no 61 | later than 30 days after you receive that notice, your licenses will be 62 | reinstated retroactively. However, if you violate these terms after such 63 | reinstatement, any additional violation of these terms will cause your licenses 64 | to terminate automatically and permanently. 65 | 66 | ## No Liability 67 | 68 | *As far as the law allows, the software comes as is, without any warranty or 69 | condition, and the licensor will not be liable to you for any damages arising 70 | out of these terms or the use or nature of the software, under any kind of 71 | legal claim.* 72 | 73 | ## Definitions 74 | 75 | The **licensor** is the entity offering these terms, and the **software** is the 76 | software the licensor makes available under these terms, including any portion 77 | of it. 78 | 79 | **you** refers to the individual or entity agreeing to these terms. 80 | 81 | **your company** is any legal entity, sole proprietorship, or other kind of 82 | organization that you work for, plus all organizations that have control over, 83 | are under the control of, or are under common control with that 84 | organization. **control** means ownership of substantially all the assets of an 85 | entity, or the power to direct its management and policies by vote, contract, or 86 | otherwise. Control can be direct or indirect. 87 | 88 | **your licenses** are all the licenses granted to you for the software under 89 | these terms. 90 | 91 | **use** means anything you do with the software requiring one of your licenses. 92 | 93 | **trademark** means trademarks, service marks, and similar rights. -------------------------------------------------------------------------------- /aggregators/codec_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "fmt" 9 | "testing" 10 | "time" 11 | 12 | "github.com/google/go-cmp/cmp" 13 | "github.com/google/go-cmp/cmp/cmpopts" 14 | "github.com/stretchr/testify/assert" 15 | 16 | "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" 17 | "github.com/elastic/apm-data/model/modelpb" 18 | ) 19 | 20 | func TestCombinedMetricsKey(t *testing.T) { 21 | expected := CombinedMetricsKey{ 22 | Interval: time.Minute, 23 | ProcessingTime: time.Now().Truncate(time.Minute), 24 | ID: EncodeToCombinedMetricsKeyID(t, "ab01"), 25 | } 26 | data := make([]byte, CombinedMetricsKeyEncodedSize) 27 | assert.NoError(t, expected.MarshalBinaryToSizedBuffer(data)) 28 | var actual CombinedMetricsKey 29 | assert.NoError(t, (&actual).UnmarshalBinary(data)) 30 | assert.Empty(t, cmp.Diff(expected, actual)) 31 | } 32 | 33 | func TestGetEncodedCombinedMetricsKeyWithoutPartitionID(t *testing.T) { 34 | key := CombinedMetricsKey{ 35 | Interval: time.Minute, 36 | ProcessingTime: time.Now().Truncate(time.Minute), 37 | ID: EncodeToCombinedMetricsKeyID(t, "ab01"), 38 | PartitionID: 11, 39 | } 40 | var encoded [CombinedMetricsKeyEncodedSize]byte 41 | assert.NoError(t, key.MarshalBinaryToSizedBuffer(encoded[:])) 42 | 43 | key.PartitionID = 0 44 | var expected [CombinedMetricsKeyEncodedSize]byte 45 | assert.NoError(t, key.MarshalBinaryToSizedBuffer(expected[:])) 46 | 47 | assert.Equal( 48 | t, 49 | expected[:], 50 | GetEncodedCombinedMetricsKeyWithoutPartitionID(encoded[:]), 51 | ) 52 | } 53 | 54 | func TestGlobalLabels(t *testing.T) { 55 | expected := globalLabels{ 56 | Labels: map[string]*modelpb.LabelValue{ 57 | "lb01": { 58 | Values: []string{"test01", "test02"}, 59 | Global: true, 60 | }, 61 | }, 62 | NumericLabels: map[string]*modelpb.NumericLabelValue{ 63 | "nlb01": { 64 | Values: []float64{0.1, 0.2}, 65 | Global: true, 66 | }, 67 | }, 68 | } 69 | str, err := expected.MarshalString() 70 | assert.NoError(t, err) 71 | var actual globalLabels 72 | assert.NoError(t, actual.UnmarshalString(str)) 73 | assert.Empty(t, cmp.Diff( 74 | expected, actual, 75 | cmpopts.IgnoreUnexported( 76 | modelpb.LabelValue{}, 77 | modelpb.NumericLabelValue{}, 78 | ), 79 | )) 80 | } 81 | 82 | func TestHistogramRepresentation(t *testing.T) { 83 | expected := hdrhistogram.New() 84 | expected.RecordDuration(time.Minute, 2) 85 | 86 | actual := hdrhistogram.New() 87 | histogramFromProto(actual, histogramToProto(expected)) 88 | assert.Empty(t, cmp.Diff( 89 | expected, actual, 90 | cmp.Comparer(func(a, b hdrhistogram.HybridCountsRep) bool { 91 | return a.Equal(&b) 92 | }), 93 | )) 94 | } 95 | 96 | func BenchmarkCombinedMetricsEncoding(b *testing.B) { 97 | b.ReportAllocs() 98 | ts := time.Now() 99 | cardinality := 10 100 | tcm := NewTestCombinedMetrics() 101 | sm := tcm.AddServiceMetrics(serviceAggregationKey{ 102 | Timestamp: ts, 103 | ServiceName: "bench", 104 | }) 105 | for i := 0; i < cardinality; i++ { 106 | txnName := fmt.Sprintf("txn%d", i) 107 | txnType := fmt.Sprintf("typ%d", i) 108 | spanName := fmt.Sprintf("spn%d", i) 109 | 110 | sm.AddTransaction(transactionAggregationKey{ 111 | TransactionName: txnName, 112 | TransactionType: txnType, 113 | }, WithTransactionCount(200)) 114 | sm.AddServiceTransaction(serviceTransactionAggregationKey{ 115 | TransactionType: txnType, 116 | }, WithTransactionCount(200)) 117 | sm.AddSpan(spanAggregationKey{ 118 | SpanName: spanName, 119 | }) 120 | } 121 | cm := tcm.Get() 122 | b.ResetTimer() 123 | for i := 0; i < b.N; i++ { 124 | cm.ToProto() 125 | } 126 | } 127 | 128 | func EncodeToCombinedMetricsKeyID(tb testing.TB, s string) [16]byte { 129 | var b [16]byte 130 | if len(s) > len(b) { 131 | tb.Fatal("invalid key length passed") 132 | } 133 | copy(b[len(b)-len(s):], s) 134 | return b 135 | } 136 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/elastic/apm-aggregation 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | github.com/HdrHistogram/hdrhistogram-go v1.2.0 7 | github.com/axiomhq/hyperloglog v0.2.6 8 | github.com/cespare/xxhash/v2 v2.3.0 9 | github.com/cockroachdb/pebble/v2 v2.1.2 10 | github.com/elastic/apm-data v1.19.5 11 | github.com/google/go-cmp v0.7.0 12 | github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 13 | github.com/stretchr/testify v1.11.1 14 | go.elastic.co/apm/module/apmotel/v2 v2.7.2 15 | go.elastic.co/apm/v2 v2.7.2 16 | go.opentelemetry.io/otel v1.39.0 17 | go.opentelemetry.io/otel/metric v1.39.0 18 | go.opentelemetry.io/otel/sdk v1.39.0 19 | go.opentelemetry.io/otel/sdk/metric v1.39.0 20 | go.opentelemetry.io/otel/trace v1.39.0 21 | go.uber.org/zap v1.27.1 22 | golang.org/x/sync v0.19.0 23 | golang.org/x/tools v0.40.0 24 | google.golang.org/protobuf v1.36.11 25 | ) 26 | 27 | require ( 28 | github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect 29 | github.com/DataDog/zstd v1.5.7 // indirect 30 | github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 // indirect 31 | github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect 32 | github.com/armon/go-radix v1.0.0 // indirect 33 | github.com/beorn7/perks v1.0.1 // indirect 34 | github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect 35 | github.com/cockroachdb/errors v1.11.3 // indirect 36 | github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect 37 | github.com/cockroachdb/redact v1.1.5 // indirect 38 | github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect 39 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect 40 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 41 | github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 // indirect 42 | github.com/elastic/go-licenser v0.4.2 // indirect 43 | github.com/elastic/go-sysinfo v1.7.1 // indirect 44 | github.com/elastic/go-windows v1.0.1 // indirect 45 | github.com/elastic/opentelemetry-lib v0.21.0 // indirect 46 | github.com/getsentry/sentry-go v0.29.1 // indirect 47 | github.com/go-logr/logr v1.4.3 // indirect 48 | github.com/go-logr/stdr v1.2.2 // indirect 49 | github.com/gogo/protobuf v1.3.2 // indirect 50 | github.com/golang/snappy v1.0.0 // indirect 51 | github.com/google/uuid v1.6.0 // indirect 52 | github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect 53 | github.com/json-iterator/go v1.1.12 // indirect 54 | github.com/kamstrup/intmap v0.5.2 // indirect 55 | github.com/klauspost/compress v1.18.0 // indirect 56 | github.com/kr/pretty v0.3.1 // indirect 57 | github.com/kr/text v0.2.0 // indirect 58 | github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect 59 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 60 | github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect 61 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 62 | github.com/pkg/errors v0.9.1 // indirect 63 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 64 | github.com/prometheus/client_golang v1.20.5 // indirect 65 | github.com/prometheus/client_model v0.6.1 // indirect 66 | github.com/prometheus/common v0.60.1 // indirect 67 | github.com/prometheus/procfs v0.15.1 // indirect 68 | github.com/rogpeppe/go-internal v1.14.1 // indirect 69 | go.elastic.co/apm/module/apmhttp/v2 v2.7.2 // indirect 70 | go.elastic.co/fastjson v1.5.1 // indirect 71 | go.opentelemetry.io/auto/sdk v1.2.1 // indirect 72 | go.opentelemetry.io/collector/consumer v1.37.0 // indirect 73 | go.opentelemetry.io/collector/pdata v1.37.0 // indirect 74 | go.uber.org/multierr v1.11.0 // indirect 75 | golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect 76 | golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect 77 | golang.org/x/mod v0.31.0 // indirect 78 | golang.org/x/net v0.48.0 // indirect 79 | golang.org/x/sys v0.39.0 // indirect 80 | golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect 81 | golang.org/x/text v0.32.0 // indirect 82 | golang.org/x/tools/go/expect v0.1.1-deprecated // indirect 83 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect 84 | google.golang.org/grpc v1.74.2 // indirect 85 | gopkg.in/yaml.v3 v3.0.1 // indirect 86 | honnef.co/go/tools v0.6.1 // indirect 87 | howett.net/plist v1.0.0 // indirect 88 | ) 89 | 90 | tool ( 91 | github.com/elastic/go-licenser 92 | golang.org/x/tools/cmd/goimports 93 | honnef.co/go/tools/cmd/staticcheck 94 | ) 95 | -------------------------------------------------------------------------------- /aggregators/config_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "go.opentelemetry.io/otel/sdk/metric" 13 | "go.opentelemetry.io/otel/sdk/trace" 14 | ) 15 | 16 | func TestNewConfig(t *testing.T) { 17 | defaultCfg := defaultCfg() 18 | customMeter := metric.NewMeterProvider().Meter("test") 19 | customTracer := trace.NewTracerProvider().Tracer("test") 20 | for _, tc := range []struct { 21 | name string 22 | opts []Option 23 | expected func() config 24 | expectedErrorMsg string 25 | }{ 26 | { 27 | name: "empty", 28 | opts: nil, 29 | expected: func() config { 30 | return defaultCfg 31 | }, 32 | }, 33 | { 34 | name: "with_data_dir", 35 | opts: []Option{ 36 | WithDataDir("/test"), 37 | }, 38 | expected: func() config { 39 | cfg := defaultCfg 40 | cfg.DataDir = "/test" 41 | return cfg 42 | }, 43 | }, 44 | { 45 | name: "with_limits", 46 | opts: []Option{ 47 | WithLimits(Limits{ 48 | MaxServices: 10, 49 | MaxSpanGroups: 10, 50 | MaxSpanGroupsPerService: 10, 51 | MaxTransactionGroups: 10, 52 | MaxTransactionGroupsPerService: 10, 53 | MaxServiceTransactionGroups: 10, 54 | MaxServiceTransactionGroupsPerService: 10, 55 | }), 56 | }, 57 | expected: func() config { 58 | cfg := defaultCfg 59 | cfg.Limits = Limits{ 60 | MaxServices: 10, 61 | MaxSpanGroups: 10, 62 | MaxSpanGroupsPerService: 10, 63 | MaxTransactionGroups: 10, 64 | MaxTransactionGroupsPerService: 10, 65 | MaxServiceTransactionGroups: 10, 66 | MaxServiceTransactionGroupsPerService: 10, 67 | } 68 | return cfg 69 | }, 70 | }, 71 | { 72 | name: "with_aggregation_intervals", 73 | opts: []Option{ 74 | WithAggregationIntervals([]time.Duration{time.Minute, time.Hour}), 75 | }, 76 | expected: func() config { 77 | cfg := defaultCfg 78 | cfg.AggregationIntervals = []time.Duration{time.Minute, time.Hour} 79 | return cfg 80 | }, 81 | }, 82 | { 83 | name: "with_harvest_delay", 84 | opts: []Option{ 85 | WithHarvestDelay(time.Hour), 86 | }, 87 | expected: func() config { 88 | cfg := defaultCfg 89 | cfg.HarvestDelay = time.Hour 90 | return cfg 91 | }, 92 | }, 93 | { 94 | name: "with_lookback", 95 | opts: []Option{ 96 | WithLookback(time.Hour), 97 | }, 98 | expected: func() config { 99 | cfg := defaultCfg 100 | cfg.Lookback = time.Hour 101 | return cfg 102 | }, 103 | }, 104 | { 105 | name: "with_meter", 106 | opts: []Option{ 107 | WithMeter(customMeter), 108 | }, 109 | expected: func() config { 110 | cfg := defaultCfg 111 | cfg.Meter = customMeter 112 | return cfg 113 | }, 114 | }, 115 | { 116 | name: "with_tracer", 117 | opts: []Option{ 118 | WithTracer(customTracer), 119 | }, 120 | expected: func() config { 121 | cfg := defaultCfg 122 | cfg.Tracer = customTracer 123 | return cfg 124 | }, 125 | }, 126 | { 127 | name: "with_empty_data_dir", 128 | opts: []Option{ 129 | WithDataDir(""), 130 | }, 131 | expectedErrorMsg: "data directory is required", 132 | }, 133 | { 134 | name: "with_nil_processor", 135 | opts: []Option{ 136 | WithProcessor(nil), 137 | }, 138 | expectedErrorMsg: "processor is required", 139 | }, 140 | { 141 | name: "with_no_aggregation_interval", 142 | opts: []Option{ 143 | WithAggregationIntervals(nil), 144 | }, 145 | expectedErrorMsg: "at least one aggregation interval is required", 146 | }, 147 | { 148 | name: "with_unsorted_aggregation_intervals", 149 | opts: []Option{ 150 | WithAggregationIntervals([]time.Duration{time.Hour, time.Minute}), 151 | }, 152 | expectedErrorMsg: "aggregation intervals must be in ascending order", 153 | }, 154 | { 155 | name: "with_invalid_aggregation_intervals", 156 | opts: []Option{ 157 | WithAggregationIntervals([]time.Duration{10 * time.Second, 15 * time.Second}), 158 | }, 159 | expectedErrorMsg: "aggregation intervals must be a factor of lowest interval", 160 | }, 161 | { 162 | name: "with_out_of_lower_range_aggregation_interval", 163 | opts: []Option{ 164 | WithAggregationIntervals([]time.Duration{time.Millisecond}), 165 | }, 166 | expectedErrorMsg: "aggregation interval less than one second is not supported", 167 | }, 168 | { 169 | name: "with_out_of_upper_range_aggregation_interval", 170 | opts: []Option{ 171 | WithAggregationIntervals([]time.Duration{20 * time.Hour}), 172 | }, 173 | expectedErrorMsg: "aggregation interval greater than 18 hours is not supported", 174 | }, 175 | } { 176 | actual, err := newConfig(tc.opts...) 177 | 178 | if tc.expectedErrorMsg != "" { 179 | assert.EqualError(t, err, tc.expectedErrorMsg) 180 | continue 181 | } 182 | 183 | expected := tc.expected() 184 | assert.NoError(t, err) 185 | 186 | // New logger is created for every call 187 | assert.NotNil(t, actual.Logger) 188 | actual.Logger, expected.Logger = nil, nil 189 | 190 | // Function values are not comparable 191 | assert.NotNil(t, actual.CombinedMetricsIDToKVs) 192 | actual.CombinedMetricsIDToKVs, expected.CombinedMetricsIDToKVs = nil, nil 193 | assert.NotNil(t, actual.Processor) 194 | actual.Processor, expected.Processor = nil, nil 195 | 196 | assert.Equal(t, expected, actual) 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # apm-aggregation 2 | 3 | APM metrics aggregation library that implements an LSM (Log Structured Merge tree)-based metrics aggregator. 4 | 5 | Files are subject to Elastic License v2. See LICENSE.txt for more. 6 | 7 | ## Instrumentation 8 | 9 | `apm-aggregation` uses OTEL to instrument itself. Instrumentation produces a set 10 | of metrics to help monitor the status of aggregations. This section describes the 11 | metrics produced by `apm-aggregation` in detail. 12 | 13 | ### Instrumentation Areas 14 | 15 | `apm-aggregation` aggregates metrics using LSM based key-value store [pebble](https://github.com/cockroachdb/pebble). 16 | The intrumentation covers two broad areas: 17 | 18 | 1. The core aggregation logic, including ingestion and harvest. 19 | 2. Performance of pebble database. 20 | 21 | ### Metrics 22 | 23 | `apm-aggregation` records and publishes the following metrics: 24 | 25 | #### `events.processed.count` 26 | 27 | - Type: `Float64Counter` 28 | 29 | The number of processed APM Events. It includes successfully and unsuccessfully 30 | processed events, which are reported as dimensions. 31 | 32 | ##### Dimensions 33 | 34 | - [`combined_metrics_id`](#combined_metrics_id) 35 | - [`aggregation_interval`](#aggregation_interval) 36 | - [`outcome`](#outcome) 37 | 38 | #### `events.processed.bytes` 39 | 40 | - Type: `Int64Counter` 41 | 42 | The number of encoded bytes processed by the aggregator. This reports the same number 43 | of bytes that is written to the underlying db. 44 | 45 | ##### Dimensions 46 | 47 | - [`combined_metrics_id`](#combined_metrics_id) 48 | - [`outcome`](#outcome) 49 | 50 | #### `events.processed.latency` 51 | 52 | - Type: `Float64Histogram` 53 | 54 | The processing delay for a batch of APM events accepted at a specific processing 55 | time. It is recorded after removing any expected delays due to aggregation interval 56 | or configuration. 57 | 58 | ##### Dimensions 59 | 60 | - [`combined_metrics_id`](#combined_metrics_id) 61 | - [`aggregation_interval`](#aggregation_interval) 62 | - [`outcome`](#outcome) 63 | 64 | #### `events.processed.queued-latency` 65 | 66 | - Type: `Float64Histogram` 67 | 68 | The delay in processing a batch based on the youngest APM event received in the batch. 69 | 70 | ##### Dimensions 71 | 72 | - [`combined_metrics_id`](#combined_metrics_id) 73 | - [`aggregation_interval`](#aggregation_interval) 74 | - [`outcome`](#outcome) 75 | 76 | #### `metrics.overflowed.count` 77 | 78 | - Type: `Int64Counter` 79 | 80 | Estimated number of metric aggregation keys that resulted in an overflow, per interval and aggregation type. 81 | 82 | ##### Dimensions 83 | 84 | - [`combined_metrics_id`](#combined_metrics_id) 85 | - [`aggregation_interval`](#aggregation_interval) 86 | - [`aggregation_type`](#aggregation_type) 87 | 88 | #### `pebble.flushes` 89 | 90 | - Type: `Int64ObservableCounter` 91 | 92 | The number of memtable flushes to disk. 93 | 94 | #### `pebble.flushed-bytes` 95 | 96 | - Type: `Int64ObservableCounter` 97 | 98 | The number of bytes written during a flush. 99 | 100 | #### `pebble.compactions` 101 | 102 | - Type: `Int64ObservableCounter` 103 | 104 | The number of table compactions performed by pebble. 105 | 106 | #### `pebble.ingested-bytes` 107 | 108 | - Type: `Int64ObservableCounter` 109 | 110 | The number of bytes ingested by pebble. 111 | 112 | #### `pebble.compacted-bytes-read` 113 | 114 | - Type: `Int64ObservableCounter` 115 | 116 | The number of bytes read during compaction. 117 | 118 | #### `pebble.compacted-bytes-written` 119 | 120 | - Type: `Int64ObservableCounter` 121 | 122 | The number of bytes written during compaction. 123 | 124 | #### `pebble.memtable.total-size` 125 | 126 | - Type: `Int64ObservableGauge` 127 | 128 | The current size of memtable in bytes. 129 | 130 | #### `pebble.disk.usage` 131 | 132 | - Type: `Int64ObservableGauge` 133 | 134 | The current total disk usage by pebble in bytes, including live and obsolete files. 135 | 136 | #### `pebble.read-amplification` 137 | 138 | - Type: `Int64ObservableGauge` 139 | 140 | The current read amplification for the db. 141 | 142 | #### `pebble.num-sstables` 143 | 144 | - Type: `Int64ObservableGauge` 145 | 146 | The current number of SSTables. 147 | 148 | #### `pebble.table-readers-mem-estimate` 149 | 150 | - Type: `Int64ObservableGauge` 151 | 152 | The memory in bytes used by pebble for index and fliter blocks. 153 | 154 | #### `pebble.estimated-pending-compaction` 155 | 156 | - Type: `Int64ObservableGauge` 157 | 158 | The current number of estimated bytes pending for compaction. 159 | 160 | #### `pebble.marked-for-compaction-files` 161 | 162 | - Type: `Int64ObservableGauge` 163 | 164 | The current number of SSTables marked for compaction. 165 | 166 | #### `pebble.keys.tombstone.count` 167 | 168 | - Type: `Int64ObservableGauge` 169 | 170 | The approximate count of delete keys across the storage engine. 171 | 172 | ### Dimensions 173 | 174 | This section lists the general dimensions published by some of the metric. 175 | 176 | #### `combined_metrics_id` 177 | 178 | This is an optional dimension. The key and the value of this dimension depends 179 | on the option `WithCombinedMetricsIDToKVs` passed to the aggregator. If this 180 | option is not supplied then this dimension is omitted. 181 | 182 | #### `aggregation_interval` 183 | 184 | Holds the value of aggregation interval for which the combined metrics is produced. 185 | For example: `1m`, `10m`, etc. 186 | 187 | #### `aggregation_type` 188 | 189 | Holds the the aggregation type for which an overflow occurred. 190 | For example: `service`, `transaction`, `service_transaction`, `service_destination`. 191 | 192 | #### `outcome` 193 | 194 | ##### `success` 195 | 196 | Events that have been successfully aggregated into the final combined metrics and 197 | processed as part of the harvest. 198 | 199 | ##### `failure` 200 | 201 | Events that failed to be aggregated for an reason and were dropped at any stage 202 | in the aggregation process. 203 | -------------------------------------------------------------------------------- /aggregators/internal/telemetry/metrics_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package telemetry 6 | 7 | import ( 8 | "context" 9 | "testing" 10 | 11 | "github.com/cockroachdb/pebble/v2" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | "go.opentelemetry.io/otel/sdk/metric" 15 | "go.opentelemetry.io/otel/sdk/metric/metricdata" 16 | "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" 17 | ) 18 | 19 | func TestNewInstruments(t *testing.T) { 20 | expected := []metricdata.Metrics{ 21 | { 22 | Name: "pebble.flushes", 23 | Description: "Number of memtable flushes to disk", 24 | Unit: "1", 25 | Data: metricdata.Sum[int64]{ 26 | DataPoints: []metricdata.DataPoint[int64]{ 27 | {Value: 0}, 28 | }, 29 | Temporality: metricdata.CumulativeTemporality, 30 | IsMonotonic: true, 31 | }, 32 | }, 33 | { 34 | Name: "pebble.flushed-bytes", 35 | Description: "Bytes written during flush", 36 | Unit: "by", 37 | Data: metricdata.Sum[int64]{ 38 | DataPoints: []metricdata.DataPoint[int64]{ 39 | {Value: 0}, 40 | }, 41 | Temporality: metricdata.CumulativeTemporality, 42 | IsMonotonic: true, 43 | }, 44 | }, 45 | { 46 | Name: "pebble.compactions", 47 | Description: "Number of table compactions", 48 | Unit: "1", 49 | Data: metricdata.Sum[int64]{ 50 | DataPoints: []metricdata.DataPoint[int64]{ 51 | {Value: 0}, 52 | }, 53 | Temporality: metricdata.CumulativeTemporality, 54 | IsMonotonic: true, 55 | }, 56 | }, 57 | { 58 | Name: "pebble.ingested-bytes", 59 | Description: "Bytes ingested", 60 | Unit: "by", 61 | Data: metricdata.Sum[int64]{ 62 | DataPoints: []metricdata.DataPoint[int64]{ 63 | {Value: 0}, 64 | }, 65 | Temporality: metricdata.CumulativeTemporality, 66 | IsMonotonic: true, 67 | }, 68 | }, 69 | { 70 | Name: "pebble.compacted-bytes-read", 71 | Description: "Bytes read during compaction", 72 | Unit: "by", 73 | Data: metricdata.Sum[int64]{ 74 | DataPoints: []metricdata.DataPoint[int64]{ 75 | {Value: 0}, 76 | }, 77 | Temporality: metricdata.CumulativeTemporality, 78 | IsMonotonic: true, 79 | }, 80 | }, 81 | { 82 | Name: "pebble.compacted-bytes-written", 83 | Description: "Bytes written during compaction", 84 | Unit: "by", 85 | Data: metricdata.Sum[int64]{ 86 | DataPoints: []metricdata.DataPoint[int64]{ 87 | {Value: 0}, 88 | }, 89 | Temporality: metricdata.CumulativeTemporality, 90 | IsMonotonic: true, 91 | }, 92 | }, 93 | { 94 | Name: "pebble.memtable.total-size", 95 | Description: "Current size of memtable in bytes", 96 | Unit: "by", 97 | Data: metricdata.Gauge[int64]{ 98 | DataPoints: []metricdata.DataPoint[int64]{ 99 | {Value: 0}, 100 | }, 101 | }, 102 | }, 103 | { 104 | Name: "pebble.disk.usage", 105 | Description: "Total disk usage by pebble, including live and obsolete files", 106 | Unit: "by", 107 | Data: metricdata.Gauge[int64]{ 108 | DataPoints: []metricdata.DataPoint[int64]{ 109 | {Value: 0}, 110 | }, 111 | }, 112 | }, 113 | { 114 | Name: "pebble.read-amplification", 115 | Description: "Current read amplification for the db", 116 | Unit: "1", 117 | Data: metricdata.Gauge[int64]{ 118 | DataPoints: []metricdata.DataPoint[int64]{ 119 | {Value: 0}, 120 | }, 121 | }, 122 | }, 123 | { 124 | Name: "pebble.num-sstables", 125 | Description: "Current number of storage engine SSTables", 126 | Unit: "1", 127 | Data: metricdata.Gauge[int64]{ 128 | DataPoints: []metricdata.DataPoint[int64]{ 129 | {Value: 0}, 130 | }, 131 | }, 132 | }, 133 | { 134 | Name: "pebble.table-readers-mem-estimate", 135 | Description: "Memory used by index and filter blocks", 136 | Unit: "by", 137 | Data: metricdata.Gauge[int64]{ 138 | DataPoints: []metricdata.DataPoint[int64]{ 139 | {Value: 0}, 140 | }, 141 | }, 142 | }, 143 | { 144 | Name: "pebble.estimated-pending-compaction", 145 | Description: "Estimated pending compaction bytes", 146 | Unit: "by", 147 | Data: metricdata.Gauge[int64]{ 148 | DataPoints: []metricdata.DataPoint[int64]{ 149 | {Value: 0}, 150 | }, 151 | }, 152 | }, 153 | { 154 | Name: "pebble.marked-for-compaction-files", 155 | Description: "Count of SSTables marked for compaction", 156 | Unit: "1", 157 | Data: metricdata.Gauge[int64]{ 158 | DataPoints: []metricdata.DataPoint[int64]{ 159 | {Value: 0}, 160 | }, 161 | }, 162 | }, 163 | { 164 | Name: "pebble.keys.tombstone.count", 165 | Description: "Approximate count of delete keys across the storage engine", 166 | Unit: "1", 167 | Data: metricdata.Gauge[int64]{ 168 | DataPoints: []metricdata.DataPoint[int64]{ 169 | {Value: 0}, 170 | }, 171 | }, 172 | }, 173 | } 174 | 175 | rdr := metric.NewManualReader() 176 | meter := metric.NewMeterProvider(metric.WithReader(rdr)).Meter("test") 177 | instruments, err := NewMetrics( 178 | func() *pebble.Metrics { return &pebble.Metrics{} }, 179 | WithMeter(meter), 180 | ) 181 | 182 | require.NoError(t, err) 183 | require.NotNil(t, instruments) 184 | var rm metricdata.ResourceMetrics 185 | assert.NoError(t, rdr.Collect(context.Background(), &rm)) 186 | 187 | require.Len(t, rm.ScopeMetrics, 1) 188 | sm := rm.ScopeMetrics[0] 189 | require.Len(t, sm.Metrics, len(expected)) 190 | for i, em := range expected { 191 | metricdatatest.AssertEqual(t, em, sm.Metrics[i], metricdatatest.IgnoreTimestamp()) 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /aggregationpb/labels.pb.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | // Code generated by protoc-gen-go. DO NOT EDIT. 6 | // versions: 7 | // protoc-gen-go v1.35.1 8 | // protoc v4.22.1 9 | // source: proto/labels.proto 10 | 11 | package aggregationpb 12 | 13 | import ( 14 | reflect "reflect" 15 | sync "sync" 16 | 17 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 18 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 19 | ) 20 | 21 | const ( 22 | // Verify that this generated code is sufficiently up-to-date. 23 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 24 | // Verify that runtime/protoimpl is sufficiently up-to-date. 25 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 26 | ) 27 | 28 | type GlobalLabels struct { 29 | state protoimpl.MessageState 30 | sizeCache protoimpl.SizeCache 31 | unknownFields protoimpl.UnknownFields 32 | 33 | Labels []*Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` 34 | NumericLabels []*NumericLabel `protobuf:"bytes,2,rep,name=numeric_labels,json=numericLabels,proto3" json:"numeric_labels,omitempty"` 35 | } 36 | 37 | func (x *GlobalLabels) Reset() { 38 | *x = GlobalLabels{} 39 | mi := &file_proto_labels_proto_msgTypes[0] 40 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 41 | ms.StoreMessageInfo(mi) 42 | } 43 | 44 | func (x *GlobalLabels) String() string { 45 | return protoimpl.X.MessageStringOf(x) 46 | } 47 | 48 | func (*GlobalLabels) ProtoMessage() {} 49 | 50 | func (x *GlobalLabels) ProtoReflect() protoreflect.Message { 51 | mi := &file_proto_labels_proto_msgTypes[0] 52 | if x != nil { 53 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 54 | if ms.LoadMessageInfo() == nil { 55 | ms.StoreMessageInfo(mi) 56 | } 57 | return ms 58 | } 59 | return mi.MessageOf(x) 60 | } 61 | 62 | // Deprecated: Use GlobalLabels.ProtoReflect.Descriptor instead. 63 | func (*GlobalLabels) Descriptor() ([]byte, []int) { 64 | return file_proto_labels_proto_rawDescGZIP(), []int{0} 65 | } 66 | 67 | func (x *GlobalLabels) GetLabels() []*Label { 68 | if x != nil { 69 | return x.Labels 70 | } 71 | return nil 72 | } 73 | 74 | func (x *GlobalLabels) GetNumericLabels() []*NumericLabel { 75 | if x != nil { 76 | return x.NumericLabels 77 | } 78 | return nil 79 | } 80 | 81 | type Label struct { 82 | state protoimpl.MessageState 83 | sizeCache protoimpl.SizeCache 84 | unknownFields protoimpl.UnknownFields 85 | 86 | Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` 87 | Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` 88 | Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` 89 | } 90 | 91 | func (x *Label) Reset() { 92 | *x = Label{} 93 | mi := &file_proto_labels_proto_msgTypes[1] 94 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 95 | ms.StoreMessageInfo(mi) 96 | } 97 | 98 | func (x *Label) String() string { 99 | return protoimpl.X.MessageStringOf(x) 100 | } 101 | 102 | func (*Label) ProtoMessage() {} 103 | 104 | func (x *Label) ProtoReflect() protoreflect.Message { 105 | mi := &file_proto_labels_proto_msgTypes[1] 106 | if x != nil { 107 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 108 | if ms.LoadMessageInfo() == nil { 109 | ms.StoreMessageInfo(mi) 110 | } 111 | return ms 112 | } 113 | return mi.MessageOf(x) 114 | } 115 | 116 | // Deprecated: Use Label.ProtoReflect.Descriptor instead. 117 | func (*Label) Descriptor() ([]byte, []int) { 118 | return file_proto_labels_proto_rawDescGZIP(), []int{1} 119 | } 120 | 121 | func (x *Label) GetKey() string { 122 | if x != nil { 123 | return x.Key 124 | } 125 | return "" 126 | } 127 | 128 | func (x *Label) GetValue() string { 129 | if x != nil { 130 | return x.Value 131 | } 132 | return "" 133 | } 134 | 135 | func (x *Label) GetValues() []string { 136 | if x != nil { 137 | return x.Values 138 | } 139 | return nil 140 | } 141 | 142 | type NumericLabel struct { 143 | state protoimpl.MessageState 144 | sizeCache protoimpl.SizeCache 145 | unknownFields protoimpl.UnknownFields 146 | 147 | Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` 148 | Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` 149 | Values []float64 `protobuf:"fixed64,3,rep,packed,name=values,proto3" json:"values,omitempty"` 150 | } 151 | 152 | func (x *NumericLabel) Reset() { 153 | *x = NumericLabel{} 154 | mi := &file_proto_labels_proto_msgTypes[2] 155 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 156 | ms.StoreMessageInfo(mi) 157 | } 158 | 159 | func (x *NumericLabel) String() string { 160 | return protoimpl.X.MessageStringOf(x) 161 | } 162 | 163 | func (*NumericLabel) ProtoMessage() {} 164 | 165 | func (x *NumericLabel) ProtoReflect() protoreflect.Message { 166 | mi := &file_proto_labels_proto_msgTypes[2] 167 | if x != nil { 168 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 169 | if ms.LoadMessageInfo() == nil { 170 | ms.StoreMessageInfo(mi) 171 | } 172 | return ms 173 | } 174 | return mi.MessageOf(x) 175 | } 176 | 177 | // Deprecated: Use NumericLabel.ProtoReflect.Descriptor instead. 178 | func (*NumericLabel) Descriptor() ([]byte, []int) { 179 | return file_proto_labels_proto_rawDescGZIP(), []int{2} 180 | } 181 | 182 | func (x *NumericLabel) GetKey() string { 183 | if x != nil { 184 | return x.Key 185 | } 186 | return "" 187 | } 188 | 189 | func (x *NumericLabel) GetValue() float64 { 190 | if x != nil { 191 | return x.Value 192 | } 193 | return 0 194 | } 195 | 196 | func (x *NumericLabel) GetValues() []float64 { 197 | if x != nil { 198 | return x.Values 199 | } 200 | return nil 201 | } 202 | 203 | var File_proto_labels_proto protoreflect.FileDescriptor 204 | 205 | var file_proto_labels_proto_rawDesc = []byte{ 206 | 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, 207 | 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 208 | 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x82, 0x01, 0x0a, 0x0c, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4c, 209 | 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x2d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 210 | 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 211 | 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x06, 0x6c, 0x61, 212 | 0x62, 0x65, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 213 | 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 214 | 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x75, 215 | 0x6d, 0x65, 0x72, 0x69, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x65, 216 | 0x72, 0x69, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x47, 0x0a, 0x05, 0x4c, 0x61, 0x62, 217 | 0x65, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 218 | 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 219 | 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 220 | 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 221 | 0x65, 0x73, 0x22, 0x4e, 0x0a, 0x0c, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x4c, 0x61, 0x62, 222 | 0x65, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 223 | 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 224 | 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 225 | 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 226 | 0x65, 0x73, 0x42, 0x13, 0x48, 0x01, 0x5a, 0x0f, 0x2e, 0x2f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 227 | 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 228 | } 229 | 230 | var ( 231 | file_proto_labels_proto_rawDescOnce sync.Once 232 | file_proto_labels_proto_rawDescData = file_proto_labels_proto_rawDesc 233 | ) 234 | 235 | func file_proto_labels_proto_rawDescGZIP() []byte { 236 | file_proto_labels_proto_rawDescOnce.Do(func() { 237 | file_proto_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_labels_proto_rawDescData) 238 | }) 239 | return file_proto_labels_proto_rawDescData 240 | } 241 | 242 | var file_proto_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 3) 243 | var file_proto_labels_proto_goTypes = []any{ 244 | (*GlobalLabels)(nil), // 0: elastic.apm.v1.GlobalLabels 245 | (*Label)(nil), // 1: elastic.apm.v1.Label 246 | (*NumericLabel)(nil), // 2: elastic.apm.v1.NumericLabel 247 | } 248 | var file_proto_labels_proto_depIdxs = []int32{ 249 | 1, // 0: elastic.apm.v1.GlobalLabels.labels:type_name -> elastic.apm.v1.Label 250 | 2, // 1: elastic.apm.v1.GlobalLabels.numeric_labels:type_name -> elastic.apm.v1.NumericLabel 251 | 2, // [2:2] is the sub-list for method output_type 252 | 2, // [2:2] is the sub-list for method input_type 253 | 2, // [2:2] is the sub-list for extension type_name 254 | 2, // [2:2] is the sub-list for extension extendee 255 | 0, // [0:2] is the sub-list for field type_name 256 | } 257 | 258 | func init() { file_proto_labels_proto_init() } 259 | func file_proto_labels_proto_init() { 260 | if File_proto_labels_proto != nil { 261 | return 262 | } 263 | type x struct{} 264 | out := protoimpl.TypeBuilder{ 265 | File: protoimpl.DescBuilder{ 266 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 267 | RawDescriptor: file_proto_labels_proto_rawDesc, 268 | NumEnums: 0, 269 | NumMessages: 3, 270 | NumExtensions: 0, 271 | NumServices: 0, 272 | }, 273 | GoTypes: file_proto_labels_proto_goTypes, 274 | DependencyIndexes: file_proto_labels_proto_depIdxs, 275 | MessageInfos: file_proto_labels_proto_msgTypes, 276 | }.Build() 277 | File_proto_labels_proto = out.File 278 | file_proto_labels_proto_rawDesc = nil 279 | file_proto_labels_proto_goTypes = nil 280 | file_proto_labels_proto_depIdxs = nil 281 | } 282 | -------------------------------------------------------------------------------- /aggregators/config.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "context" 9 | "errors" 10 | "fmt" 11 | "sort" 12 | "time" 13 | 14 | "go.opentelemetry.io/otel" 15 | "go.opentelemetry.io/otel/attribute" 16 | "go.opentelemetry.io/otel/metric" 17 | "go.opentelemetry.io/otel/trace" 18 | "go.uber.org/zap" 19 | 20 | "github.com/elastic/apm-aggregation/aggregationpb" 21 | ) 22 | 23 | const instrumentationName = "aggregators" 24 | 25 | // Processor defines handling of the aggregated metrics post harvest. 26 | // CombinedMetrics passed to the processor is pooled and it is released 27 | // back to the pool after processor has returned. If the processor mutates 28 | // the CombinedMetrics such that it can no longer access the pooled objects, 29 | // then the Processor should release the objects back to the pool. 30 | type Processor func( 31 | ctx context.Context, 32 | cmk CombinedMetricsKey, 33 | cm *aggregationpb.CombinedMetrics, 34 | aggregationIvl time.Duration, 35 | ) error 36 | 37 | // config contains the required config for running the aggregator. 38 | type config struct { 39 | DataDir string 40 | Limits Limits 41 | Processor Processor 42 | Partitions uint16 43 | AggregationIntervals []time.Duration 44 | HarvestDelay time.Duration 45 | Lookback time.Duration 46 | CombinedMetricsIDToKVs func([16]byte) []attribute.KeyValue 47 | InMemory bool 48 | 49 | Meter metric.Meter 50 | Tracer trace.Tracer 51 | Logger *zap.Logger 52 | OverflowLogging bool 53 | } 54 | 55 | // Option allows configuring aggregator based on functional options. 56 | type Option func(config) config 57 | 58 | // NewConfig creates a new aggregator config based on the passed options. 59 | func newConfig(opts ...Option) (config, error) { 60 | cfg := defaultCfg() 61 | for _, opt := range opts { 62 | cfg = opt(cfg) 63 | } 64 | return cfg, validateCfg(cfg) 65 | } 66 | 67 | // WithDataDir configures the data directory to be used by the database. 68 | func WithDataDir(dataDir string) Option { 69 | return func(c config) config { 70 | c.DataDir = dataDir 71 | return c 72 | } 73 | } 74 | 75 | // WithLimits configures the limits to be used by the aggregator. 76 | func WithLimits(limits Limits) Option { 77 | return func(c config) config { 78 | c.Limits = limits 79 | return c 80 | } 81 | } 82 | 83 | // WithProcessor configures the processor for handling of the aggregated 84 | // metrics post harvest. Processor is called for each decoded combined 85 | // metrics after they are harvested. CombinedMetrics passed to the 86 | // processor is pooled and it is released back to the pool after processor 87 | // has returned. If the processor mutates the CombinedMetrics such that it 88 | // can no longer access the pooled objects, then the Processor should 89 | // release the objects back to the pool. 90 | func WithProcessor(processor Processor) Option { 91 | return func(c config) config { 92 | c.Processor = processor 93 | return c 94 | } 95 | } 96 | 97 | // WithPartitions configures the number of partitions for combined metrics 98 | // written to pebble. Defaults to 1. 99 | // 100 | // Partition IDs are encoded in a way that all the partitions of a specific 101 | // combined metric are listed before any other if compared using the bytes 102 | // comparer. 103 | func WithPartitions(n uint16) Option { 104 | return func(c config) config { 105 | c.Partitions = n 106 | return c 107 | } 108 | } 109 | 110 | // WithAggregationIntervals defines the intervals that aggregator will 111 | // aggregate for. 112 | func WithAggregationIntervals(aggIvls []time.Duration) Option { 113 | return func(c config) config { 114 | c.AggregationIntervals = aggIvls 115 | return c 116 | } 117 | } 118 | 119 | // WithHarvestDelay delays the harvest by the configured duration. 120 | // This means that harvest for a specific processing time would be 121 | // performed with the given delay. 122 | // 123 | // Without delay, a normal harvest schedule will harvest metrics 124 | // aggregated for processing time, say `t0`, at time `t1`, where 125 | // `t1 = t0 + aggregation_interval`. With delay of, say `d`, the 126 | // harvester will harvest the metrics for `t0` at `t1 + d`. In 127 | // addition to harvest the duration for which the metrics are 128 | // aggregated by the AggregateBatch API will also be affected. 129 | // 130 | // The main purpose of the delay is to handle the latency of 131 | // receiving the l1 aggregated metrics in l2 aggregation. Thus 132 | // the value must be configured for the l2 aggregator and is 133 | // not required for l1 aggregator. If used as such then the 134 | // harvest delay has no effects on the duration for which the 135 | // metrics are aggregated. This is because AggregateBatch API is 136 | // not used by the l2 aggregator. 137 | func WithHarvestDelay(delay time.Duration) Option { 138 | return func(c config) config { 139 | c.HarvestDelay = delay 140 | return c 141 | } 142 | } 143 | 144 | // WithLookback configures the maximum duration that the 145 | // aggregator will use to query the database during harvest time 146 | // in addition to the original period derived from aggregation 147 | // interval i.e. the harvest interval for each aggregation interval 148 | // will be defined as [end-Lookback-AggregationIvl, end). 149 | // 150 | // The main purpose of Lookback is to protect against data loss for 151 | // multi level deployments of aggregators where AggregateCombinedMetrics 152 | // is used to aggregate partial aggregates. In these cases, the 153 | // Lookback configuration can protect against data loss due to 154 | // delayed partial aggregates. Note that these delayed partial 155 | // aggregates will only be aggregated with other delayed partial 156 | // aggregates and thus we can have multiple aggregated metrics for 157 | // the same CombinedMetricsKey{Interval, ProcessingTime, ID}. 158 | func WithLookback(lookback time.Duration) Option { 159 | return func(c config) config { 160 | c.Lookback = lookback 161 | return c 162 | } 163 | } 164 | 165 | // WithMeter defines a custom meter which will be used for collecting 166 | // telemetry. Defaults to the meter provided by global provider. 167 | func WithMeter(meter metric.Meter) Option { 168 | return func(c config) config { 169 | c.Meter = meter 170 | return c 171 | } 172 | } 173 | 174 | // WithTracer defines a custom tracer which will be used for collecting 175 | // traces. Defaults to the tracer provided by global provider. 176 | func WithTracer(tracer trace.Tracer) Option { 177 | return func(c config) config { 178 | c.Tracer = tracer 179 | return c 180 | } 181 | } 182 | 183 | // WithCombinedMetricsIDToKVs defines a function that converts a combined 184 | // metrics ID to zero or more attribute.KeyValue for telemetry. 185 | func WithCombinedMetricsIDToKVs(f func([16]byte) []attribute.KeyValue) Option { 186 | return func(c config) config { 187 | c.CombinedMetricsIDToKVs = f 188 | return c 189 | } 190 | } 191 | 192 | // WithLogger defines a custom logger to be used by aggregator. 193 | func WithLogger(logger *zap.Logger) Option { 194 | return func(c config) config { 195 | c.Logger = logger 196 | return c 197 | } 198 | } 199 | 200 | // WithOverflowLogging enables warning logs at harvest time, when overflows have occurred. 201 | // 202 | // Logging of overflows is disabled by default, as most callers are expected to rely on 203 | // metrics to surface cardinality issues. Support for logging exists for historical reasons. 204 | func WithOverflowLogging(enabled bool) Option { 205 | return func(c config) config { 206 | c.OverflowLogging = enabled 207 | return c 208 | } 209 | } 210 | 211 | // WithInMemory defines whether aggregator uses in-memory file system. 212 | func WithInMemory(enabled bool) Option { 213 | return func(c config) config { 214 | c.InMemory = enabled 215 | return c 216 | } 217 | } 218 | 219 | func defaultCfg() config { 220 | return config{ 221 | DataDir: "/tmp", 222 | Processor: stdoutProcessor, 223 | Partitions: 1, 224 | AggregationIntervals: []time.Duration{time.Minute}, 225 | Meter: otel.Meter(instrumentationName), 226 | Tracer: otel.Tracer(instrumentationName), 227 | CombinedMetricsIDToKVs: func(_ [16]byte) []attribute.KeyValue { return nil }, 228 | Logger: zap.Must(zap.NewDevelopment()), 229 | } 230 | } 231 | 232 | func validateCfg(cfg config) error { 233 | if cfg.DataDir == "" { 234 | return errors.New("data directory is required") 235 | } 236 | if cfg.Processor == nil { 237 | return errors.New("processor is required") 238 | } 239 | if cfg.Partitions == 0 { 240 | return errors.New("partitions must be greater than zero") 241 | } 242 | if len(cfg.AggregationIntervals) == 0 { 243 | return errors.New("at least one aggregation interval is required") 244 | } 245 | if !sort.SliceIsSorted(cfg.AggregationIntervals, func(i, j int) bool { 246 | return cfg.AggregationIntervals[i] < cfg.AggregationIntervals[j] 247 | }) { 248 | return errors.New("aggregation intervals must be in ascending order") 249 | } 250 | lowest := cfg.AggregationIntervals[0] 251 | highest := cfg.AggregationIntervals[len(cfg.AggregationIntervals)-1] 252 | for i := 1; i < len(cfg.AggregationIntervals); i++ { 253 | ivl := cfg.AggregationIntervals[i] 254 | if ivl%lowest != 0 { 255 | return errors.New("aggregation intervals must be a factor of lowest interval") 256 | } 257 | } 258 | // For encoding/decoding the processing time for combined metrics we only 259 | // consider seconds granularity making 1 sec the lowest possible 260 | // aggregation interval. We also encode interval as 2 unsigned bytes making 261 | // 65535 (~18 hours) the highest possible aggregation interval. 262 | if lowest < time.Second { 263 | return errors.New("aggregation interval less than one second is not supported") 264 | } 265 | if highest > 18*time.Hour { 266 | return errors.New("aggregation interval greater than 18 hours is not supported") 267 | } 268 | return nil 269 | } 270 | 271 | func stdoutProcessor( 272 | ctx context.Context, 273 | cmk CombinedMetricsKey, 274 | _ *aggregationpb.CombinedMetrics, 275 | _ time.Duration, 276 | ) error { 277 | fmt.Printf("Recevied combined metrics with key: %+v\n", cmk) 278 | return nil 279 | } 280 | -------------------------------------------------------------------------------- /aggregators/models.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "time" 9 | 10 | "github.com/axiomhq/hyperloglog" 11 | 12 | "github.com/elastic/apm-aggregation/aggregationpb" 13 | "github.com/elastic/apm-aggregation/aggregators/nullable" 14 | "github.com/elastic/apm-data/model/modelpb" 15 | ) 16 | 17 | // Limits define the aggregation limits. Once the limits are reached 18 | // the metrics will overflow into dedicated overflow buckets. 19 | type Limits struct { 20 | // MaxServices is the limit on the total number of unique services. 21 | // A unique service is identified by a unique ServiceAggregationKey. 22 | // This limit is shared across all aggregation metrics. 23 | MaxServices int 24 | 25 | // MaxSpanGroups is the limit on total number of unique span groups 26 | // across all services. 27 | // A unique span group is identified by a unique 28 | // ServiceAggregationKey + SpanAggregationKey. 29 | MaxSpanGroups int 30 | 31 | // MaxSpanGroupsPerService is the limit on the total number of unique 32 | // span groups within a service. 33 | // A unique span group within a service is identified by a unique 34 | // SpanAggregationKey. 35 | MaxSpanGroupsPerService int 36 | 37 | // MaxTransactionGroups is the limit on total number of unique 38 | // transaction groups across all services. 39 | // A unique transaction group is identified by a unique 40 | // ServiceAggregationKey + TransactionAggregationKey. 41 | MaxTransactionGroups int 42 | 43 | // MaxTransactionGroupsPerService is the limit on the number of unique 44 | // transaction groups within a service. 45 | // A unique transaction group within a service is identified by a unique 46 | // TransactionAggregationKey. 47 | MaxTransactionGroupsPerService int 48 | 49 | // MaxServiceTransactionGroups is the limit on total number of unique 50 | // service transaction groups across all services. 51 | // A unique service transaction group is identified by a unique 52 | // ServiceAggregationKey + ServiceTransactionAggregationKey. 53 | MaxServiceTransactionGroups int 54 | 55 | // MaxServiceTransactionGroupsPerService is the limit on the number 56 | // of unique service transaction groups within a service. 57 | // A unique service transaction group within a service is identified 58 | // by a unique ServiceTransactionAggregationKey. 59 | MaxServiceTransactionGroupsPerService int 60 | } 61 | 62 | // CombinedMetricsKey models the key to store the data in LSM tree. 63 | // Each key-value pair represents a set of unique metric for a combined metrics ID. 64 | // The processing time used in the key should be rounded to the 65 | // duration of aggregation since the zero time. 66 | type CombinedMetricsKey struct { 67 | Interval time.Duration 68 | ProcessingTime time.Time 69 | PartitionID uint16 70 | ID [16]byte 71 | } 72 | 73 | // globalLabels is an intermediate struct used to marshal/unmarshal the 74 | // provided global labels into a comparable format. The format is used by 75 | // pebble db to compare service aggregation keys. 76 | type globalLabels struct { 77 | Labels modelpb.Labels 78 | NumericLabels modelpb.NumericLabels 79 | } 80 | 81 | // combinedMetrics models the value to store the data in LSM tree. 82 | // Each unique combined metrics ID stores a combined metrics per aggregation 83 | // interval. combinedMetrics encapsulates the aggregated metrics 84 | // as well as the overflow metrics. 85 | type combinedMetrics struct { 86 | Services map[serviceAggregationKey]serviceMetrics 87 | 88 | // OverflowServices provides a dedicated bucket for collecting 89 | // aggregate metrics for all the aggregation groups for all services 90 | // that overflowed due to max services limit being reached. 91 | OverflowServices overflow 92 | 93 | // OverflowServicesEstimator estimates the number of unique service 94 | // aggregation keys that overflowed due to max services limit. 95 | OverflowServicesEstimator *hyperloglog.Sketch 96 | 97 | // EventsTotal is the total number of individual events, including 98 | // all overflows, that were aggregated for this combined metrics. It 99 | // is used for internal monitoring purposes and is approximated when 100 | // partitioning is enabled. 101 | EventsTotal float64 102 | 103 | // YoungestEventTimestamp is the youngest event that was aggregated 104 | // in the combined metrics based on the received timestamp. 105 | YoungestEventTimestamp uint64 106 | } 107 | 108 | // serviceAggregationKey models the key used to store service specific 109 | // aggregation metrics. 110 | type serviceAggregationKey struct { 111 | Timestamp time.Time 112 | ServiceName string 113 | ServiceEnvironment string 114 | ServiceLanguageName string 115 | AgentName string 116 | GlobalLabelsStr string 117 | } 118 | 119 | // serviceMetrics models the value to store all the aggregated metrics 120 | // for a specific service aggregation key. 121 | type serviceMetrics struct { 122 | OverflowGroups overflow 123 | TransactionGroups map[transactionAggregationKey]*aggregationpb.KeyedTransactionMetrics 124 | ServiceTransactionGroups map[serviceTransactionAggregationKey]*aggregationpb.KeyedServiceTransactionMetrics 125 | SpanGroups map[spanAggregationKey]*aggregationpb.KeyedSpanMetrics 126 | } 127 | 128 | func insertHash(to **hyperloglog.Sketch, hash uint64) { 129 | if *to == nil { 130 | *to = hyperloglog.New14() 131 | } 132 | (*to).InsertHash(hash) 133 | } 134 | 135 | func mergeEstimator(to **hyperloglog.Sketch, from *hyperloglog.Sketch) { 136 | if *to == nil { 137 | *to = hyperloglog.New14() 138 | } 139 | // Ignoring returned error here since the error is only returned if 140 | // the precision is set outside bounds which is not possible for our case. 141 | (*to).Merge(from) 142 | } 143 | 144 | type overflowTransaction struct { 145 | Metrics *aggregationpb.TransactionMetrics 146 | Estimator *hyperloglog.Sketch 147 | } 148 | 149 | func (o *overflowTransaction) Merge( 150 | from *aggregationpb.TransactionMetrics, 151 | hash uint64, 152 | ) { 153 | if o.Metrics == nil { 154 | o.Metrics = &aggregationpb.TransactionMetrics{} 155 | } 156 | mergeTransactionMetrics(o.Metrics, from) 157 | insertHash(&o.Estimator, hash) 158 | } 159 | 160 | func (o *overflowTransaction) MergeOverflow(from *overflowTransaction) { 161 | if from.Estimator != nil { 162 | if o.Metrics == nil { 163 | o.Metrics = &aggregationpb.TransactionMetrics{} 164 | } 165 | mergeTransactionMetrics(o.Metrics, from.Metrics) 166 | mergeEstimator(&o.Estimator, from.Estimator) 167 | } 168 | } 169 | 170 | func (o *overflowTransaction) Empty() bool { 171 | return o.Estimator == nil 172 | } 173 | 174 | type overflowServiceTransaction struct { 175 | Metrics *aggregationpb.ServiceTransactionMetrics 176 | Estimator *hyperloglog.Sketch 177 | } 178 | 179 | func (o *overflowServiceTransaction) Merge( 180 | from *aggregationpb.ServiceTransactionMetrics, 181 | hash uint64, 182 | ) { 183 | if o.Metrics == nil { 184 | o.Metrics = &aggregationpb.ServiceTransactionMetrics{} 185 | } 186 | mergeServiceTransactionMetrics(o.Metrics, from) 187 | insertHash(&o.Estimator, hash) 188 | } 189 | 190 | func (o *overflowServiceTransaction) MergeOverflow(from *overflowServiceTransaction) { 191 | if from.Estimator != nil { 192 | if o.Metrics == nil { 193 | o.Metrics = &aggregationpb.ServiceTransactionMetrics{} 194 | } 195 | mergeServiceTransactionMetrics(o.Metrics, from.Metrics) 196 | mergeEstimator(&o.Estimator, from.Estimator) 197 | } 198 | } 199 | 200 | func (o *overflowServiceTransaction) Empty() bool { 201 | return o.Estimator == nil 202 | } 203 | 204 | type overflowSpan struct { 205 | Metrics *aggregationpb.SpanMetrics 206 | Estimator *hyperloglog.Sketch 207 | } 208 | 209 | func (o *overflowSpan) Merge( 210 | from *aggregationpb.SpanMetrics, 211 | hash uint64, 212 | ) { 213 | if o.Metrics == nil { 214 | o.Metrics = &aggregationpb.SpanMetrics{} 215 | } 216 | mergeSpanMetrics(o.Metrics, from) 217 | insertHash(&o.Estimator, hash) 218 | } 219 | 220 | func (o *overflowSpan) MergeOverflow(from *overflowSpan) { 221 | if from.Estimator != nil { 222 | if o.Metrics == nil { 223 | o.Metrics = &aggregationpb.SpanMetrics{} 224 | } 225 | mergeSpanMetrics(o.Metrics, from.Metrics) 226 | mergeEstimator(&o.Estimator, from.Estimator) 227 | } 228 | } 229 | 230 | func (o *overflowSpan) Empty() bool { 231 | return o.Estimator == nil 232 | } 233 | 234 | // overflow contains transaction and spans overflow metrics and cardinality 235 | // estimators for the aggregation group for overflow buckets. 236 | type overflow struct { 237 | OverflowTransaction overflowTransaction 238 | OverflowServiceTransaction overflowServiceTransaction 239 | OverflowSpan overflowSpan 240 | } 241 | 242 | // transactionAggregationKey models the key used to store transaction 243 | // aggregation metrics. 244 | type transactionAggregationKey struct { 245 | TraceRoot bool 246 | 247 | ContainerID string 248 | KubernetesPodName string 249 | 250 | ServiceVersion string 251 | ServiceNodeName string 252 | 253 | ServiceRuntimeName string 254 | ServiceRuntimeVersion string 255 | ServiceLanguageVersion string 256 | 257 | HostHostname string 258 | HostName string 259 | HostOSPlatform string 260 | 261 | EventOutcome string 262 | 263 | TransactionName string 264 | TransactionType string 265 | TransactionResult string 266 | 267 | FAASColdstart nullable.Bool 268 | FAASID string 269 | FAASName string 270 | FAASVersion string 271 | FAASTriggerType string 272 | 273 | CloudProvider string 274 | CloudRegion string 275 | CloudAvailabilityZone string 276 | CloudServiceName string 277 | CloudAccountID string 278 | CloudAccountName string 279 | CloudMachineType string 280 | CloudProjectID string 281 | CloudProjectName string 282 | } 283 | 284 | // spanAggregationKey models the key used to store span aggregation metrics. 285 | type spanAggregationKey struct { 286 | SpanName string 287 | Outcome string 288 | 289 | TargetType string 290 | TargetName string 291 | 292 | Resource string 293 | } 294 | 295 | // serviceTransactionAggregationKey models the key used to store 296 | // service transaction aggregation metrics. 297 | type serviceTransactionAggregationKey struct { 298 | TransactionType string 299 | } 300 | -------------------------------------------------------------------------------- /aggregators/internal/telemetry/metrics.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | // Package telemetry holds the logic for emitting telemetry when performing aggregation. 6 | package telemetry 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | 12 | "github.com/cockroachdb/pebble/v2" 13 | "go.opentelemetry.io/otel/attribute" 14 | "go.opentelemetry.io/otel/metric" 15 | ) 16 | 17 | const ( 18 | bytesUnit = "by" 19 | countUnit = "1" 20 | durationUnit = "s" 21 | ) 22 | 23 | // Metrics are a collection of metric used to record all the 24 | // measurements for the aggregators. Sync metrics are exposed 25 | // and used by the calling code to record measurements whereas 26 | // async instruments (mainly pebble database metrics) are 27 | // collected by the observer pattern by passing a metrics provider. 28 | type Metrics struct { 29 | // Synchronous metrics used to record aggregation measurements. 30 | 31 | EventsProcessed metric.Float64Counter 32 | BytesProcessed metric.Int64Counter 33 | MinQueuedDelay metric.Float64Histogram 34 | ProcessingLatency metric.Float64Histogram 35 | MetricsOverflowed metric.Int64Counter 36 | 37 | // Asynchronous metrics used to get pebble metrics and 38 | // record measurements. These are kept unexported as they are 39 | // supposed to be updated via the registered callback. 40 | 41 | pebbleFlushes metric.Int64ObservableCounter 42 | pebbleFlushedBytes metric.Int64ObservableCounter 43 | pebbleCompactions metric.Int64ObservableCounter 44 | pebbleIngestedBytes metric.Int64ObservableCounter 45 | pebbleCompactedBytesRead metric.Int64ObservableCounter 46 | pebbleCompactedBytesWritten metric.Int64ObservableCounter 47 | pebbleMemtableTotalSize metric.Int64ObservableGauge 48 | pebbleTotalDiskUsage metric.Int64ObservableGauge 49 | pebbleReadAmplification metric.Int64ObservableGauge 50 | pebbleNumSSTables metric.Int64ObservableGauge 51 | pebbleTableReadersMemEstimate metric.Int64ObservableGauge 52 | pebblePendingCompaction metric.Int64ObservableGauge 53 | pebbleMarkedForCompactionFiles metric.Int64ObservableGauge 54 | pebbleKeysTombstones metric.Int64ObservableGauge 55 | 56 | // registration represents the token for a the configured callback. 57 | registration metric.Registration 58 | } 59 | 60 | type pebbleProvider func() *pebble.Metrics 61 | 62 | // NewMetrics returns a new instance of the metrics. 63 | func NewMetrics(provider pebbleProvider, opts ...Option) (*Metrics, error) { 64 | var err error 65 | var i Metrics 66 | 67 | cfg := newConfig(opts...) 68 | meter := cfg.Meter 69 | 70 | // Aggregator metrics 71 | i.EventsProcessed, err = meter.Float64Counter( 72 | "events.processed.count", 73 | metric.WithDescription("Number of processed APM Events. Dimensions are used to report the outcome"), 74 | metric.WithUnit(countUnit), 75 | ) 76 | if err != nil { 77 | return nil, fmt.Errorf("failed to create metric for events processed: %w", err) 78 | } 79 | i.BytesProcessed, err = meter.Int64Counter( 80 | "events.processed.bytes", 81 | metric.WithDescription("Number of bytes processed by the aggregators"), 82 | metric.WithUnit(bytesUnit), 83 | ) 84 | if err != nil { 85 | return nil, fmt.Errorf("failed to create metric for bytes processed: %w", err) 86 | } 87 | i.ProcessingLatency, err = meter.Float64Histogram( 88 | "events.processed.latency", 89 | metric.WithDescription("Records the processing delays, removes expected delays due to aggregation intervals"), 90 | metric.WithUnit(durationUnit), 91 | ) 92 | if err != nil { 93 | return nil, fmt.Errorf("failed to create metric for processing delay: %w", err) 94 | } 95 | i.MinQueuedDelay, err = meter.Float64Histogram( 96 | "events.processed.queued-latency", 97 | metric.WithDescription("Records total duration for aggregating a batch w.r.t. its youngest member"), 98 | metric.WithUnit(durationUnit), 99 | ) 100 | if err != nil { 101 | return nil, fmt.Errorf("failed to create metric for queued delay: %w", err) 102 | } 103 | i.MetricsOverflowed, err = meter.Int64Counter( 104 | "metrics.overflowed.count", 105 | metric.WithDescription( 106 | "Estimated number of metric aggregation keys that resulted in an overflow, per interval and aggregation type", 107 | ), 108 | metric.WithUnit(countUnit), 109 | ) 110 | if err != nil { 111 | return nil, fmt.Errorf("failed to create metric for metrics overflowed: %w", err) 112 | } 113 | 114 | // Pebble metrics 115 | i.pebbleFlushes, err = meter.Int64ObservableCounter( 116 | "pebble.flushes", 117 | metric.WithDescription("Number of memtable flushes to disk"), 118 | metric.WithUnit(countUnit), 119 | ) 120 | if err != nil { 121 | return nil, fmt.Errorf("failed to create metric for flushes: %w", err) 122 | } 123 | i.pebbleFlushedBytes, err = meter.Int64ObservableCounter( 124 | "pebble.flushed-bytes", 125 | metric.WithDescription("Bytes written during flush"), 126 | metric.WithUnit(bytesUnit), 127 | ) 128 | if err != nil { 129 | return nil, fmt.Errorf("failed to create metric for flushed bytes: %w", err) 130 | } 131 | i.pebbleCompactions, err = meter.Int64ObservableCounter( 132 | "pebble.compactions", 133 | metric.WithDescription("Number of table compactions"), 134 | metric.WithUnit(countUnit), 135 | ) 136 | if err != nil { 137 | return nil, fmt.Errorf("failed to create metric for compactions: %w", err) 138 | } 139 | i.pebbleIngestedBytes, err = meter.Int64ObservableCounter( 140 | "pebble.ingested-bytes", 141 | metric.WithDescription("Bytes ingested"), 142 | metric.WithUnit(bytesUnit), 143 | ) 144 | if err != nil { 145 | return nil, fmt.Errorf("failed to create metric for ingested bytes: %w", err) 146 | } 147 | i.pebbleCompactedBytesRead, err = meter.Int64ObservableCounter( 148 | "pebble.compacted-bytes-read", 149 | metric.WithDescription("Bytes read during compaction"), 150 | metric.WithUnit(bytesUnit), 151 | ) 152 | if err != nil { 153 | return nil, fmt.Errorf("failed to create metric for compacted bytes read: %w", err) 154 | } 155 | i.pebbleCompactedBytesWritten, err = meter.Int64ObservableCounter( 156 | "pebble.compacted-bytes-written", 157 | metric.WithDescription("Bytes written during compaction"), 158 | metric.WithUnit(bytesUnit), 159 | ) 160 | if err != nil { 161 | return nil, fmt.Errorf("failed to create metric for compacted bytes written: %w", err) 162 | } 163 | i.pebbleMemtableTotalSize, err = meter.Int64ObservableGauge( 164 | "pebble.memtable.total-size", 165 | metric.WithDescription("Current size of memtable in bytes"), 166 | metric.WithUnit(bytesUnit), 167 | ) 168 | if err != nil { 169 | return nil, fmt.Errorf("failed to create metric for memtable size: %w", err) 170 | } 171 | i.pebbleTotalDiskUsage, err = meter.Int64ObservableGauge( 172 | "pebble.disk.usage", 173 | metric.WithDescription("Total disk usage by pebble, including live and obsolete files"), 174 | metric.WithUnit(bytesUnit), 175 | ) 176 | if err != nil { 177 | return nil, fmt.Errorf("failed to create metric for total disk usage: %w", err) 178 | } 179 | i.pebbleReadAmplification, err = meter.Int64ObservableGauge( 180 | "pebble.read-amplification", 181 | metric.WithDescription("Current read amplification for the db"), 182 | metric.WithUnit(countUnit), 183 | ) 184 | if err != nil { 185 | return nil, fmt.Errorf("failed to create metric for read amplification: %w", err) 186 | } 187 | i.pebbleNumSSTables, err = meter.Int64ObservableGauge( 188 | "pebble.num-sstables", 189 | metric.WithDescription("Current number of storage engine SSTables"), 190 | metric.WithUnit(countUnit), 191 | ) 192 | if err != nil { 193 | return nil, fmt.Errorf("failed to create metric for count of sstables: %w", err) 194 | } 195 | i.pebbleTableReadersMemEstimate, err = meter.Int64ObservableGauge( 196 | "pebble.table-readers-mem-estimate", 197 | metric.WithDescription("Memory used by index and filter blocks"), 198 | metric.WithUnit(bytesUnit), 199 | ) 200 | if err != nil { 201 | return nil, fmt.Errorf("failed to create metric for table cache readers: %w", err) 202 | } 203 | i.pebblePendingCompaction, err = meter.Int64ObservableGauge( 204 | "pebble.estimated-pending-compaction", 205 | metric.WithDescription("Estimated pending compaction bytes"), 206 | metric.WithUnit(bytesUnit), 207 | ) 208 | if err != nil { 209 | return nil, fmt.Errorf("failed to create metric for pending compaction: %w", err) 210 | } 211 | i.pebbleMarkedForCompactionFiles, err = meter.Int64ObservableGauge( 212 | "pebble.marked-for-compaction-files", 213 | metric.WithDescription("Count of SSTables marked for compaction"), 214 | metric.WithUnit(countUnit), 215 | ) 216 | if err != nil { 217 | return nil, fmt.Errorf("failed to create metric for compaction marked files: %w", err) 218 | } 219 | i.pebbleKeysTombstones, err = meter.Int64ObservableGauge( 220 | "pebble.keys.tombstone.count", 221 | metric.WithDescription("Approximate count of delete keys across the storage engine"), 222 | metric.WithUnit(countUnit), 223 | ) 224 | if err != nil { 225 | return nil, fmt.Errorf("failed to create metric for tombstones: %w", err) 226 | } 227 | 228 | if err := i.registerCallback(meter, provider); err != nil { 229 | return nil, fmt.Errorf("failed to register callback: %w", err) 230 | } 231 | return &i, nil 232 | } 233 | 234 | // CleanUp unregisters any registered callback for collecting async 235 | // measurements. 236 | func (i *Metrics) CleanUp() error { 237 | if i == nil || i.registration == nil { 238 | return nil 239 | } 240 | if err := i.registration.Unregister(); err != nil { 241 | return fmt.Errorf("failed to unregister callback: %w", err) 242 | } 243 | return nil 244 | } 245 | 246 | func (i *Metrics) registerCallback(meter metric.Meter, provider pebbleProvider) (err error) { 247 | i.registration, err = meter.RegisterCallback(func(ctx context.Context, obs metric.Observer) error { 248 | pm := provider() 249 | obs.ObserveInt64(i.pebbleMemtableTotalSize, int64(pm.MemTable.Size)) 250 | obs.ObserveInt64(i.pebbleTotalDiskUsage, int64(pm.DiskSpaceUsage())) 251 | 252 | obs.ObserveInt64(i.pebbleFlushes, pm.Flush.Count) 253 | obs.ObserveInt64(i.pebbleFlushedBytes, int64(pm.Levels[0].TableBytesFlushed)) 254 | 255 | obs.ObserveInt64(i.pebbleCompactions, pm.Compact.Count) 256 | obs.ObserveInt64(i.pebblePendingCompaction, int64(pm.Compact.EstimatedDebt)) 257 | obs.ObserveInt64(i.pebbleMarkedForCompactionFiles, int64(pm.Compact.MarkedFiles)) 258 | 259 | obs.ObserveInt64(i.pebbleTableReadersMemEstimate, pm.FileCache.Size) 260 | obs.ObserveInt64(i.pebbleKeysTombstones, int64(pm.Keys.TombstoneCount)) 261 | 262 | lm := pm.Total() 263 | obs.ObserveInt64(i.pebbleNumSSTables, lm.TablesCount) 264 | obs.ObserveInt64(i.pebbleIngestedBytes, int64(lm.TableBytesIngested)) 265 | obs.ObserveInt64(i.pebbleCompactedBytesRead, int64(lm.TableBytesRead)) 266 | obs.ObserveInt64(i.pebbleCompactedBytesWritten, int64(lm.TableBytesCompacted)) 267 | obs.ObserveInt64(i.pebbleReadAmplification, int64(lm.Sublevels)) 268 | return nil 269 | }, 270 | i.pebbleMemtableTotalSize, 271 | i.pebbleTotalDiskUsage, 272 | i.pebbleFlushes, 273 | i.pebbleFlushedBytes, 274 | i.pebbleCompactions, 275 | i.pebbleIngestedBytes, 276 | i.pebbleCompactedBytesRead, 277 | i.pebbleCompactedBytesWritten, 278 | i.pebbleReadAmplification, 279 | i.pebbleNumSSTables, 280 | i.pebbleTableReadersMemEstimate, 281 | i.pebblePendingCompaction, 282 | i.pebbleMarkedForCompactionFiles, 283 | i.pebbleKeysTombstones, 284 | ) 285 | return 286 | } 287 | 288 | // WithSuccess returns an attribute representing a successful event outcome. 289 | func WithSuccess() attribute.KeyValue { 290 | return WithOutcome("success") 291 | } 292 | 293 | // WithFailure returns an attribute representing a failed event outcome. 294 | func WithFailure() attribute.KeyValue { 295 | return WithOutcome("failure") 296 | } 297 | 298 | // WithOutcome returns an attribute for event outcome. 299 | func WithOutcome(outcome string) attribute.KeyValue { 300 | return attribute.String("outcome", outcome) 301 | } 302 | -------------------------------------------------------------------------------- /aggregators/combined_metrics_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "time" 9 | 10 | "github.com/cespare/xxhash/v2" 11 | "github.com/google/go-cmp/cmp" 12 | "google.golang.org/protobuf/testing/protocmp" 13 | 14 | "github.com/elastic/apm-aggregation/aggregationpb" 15 | "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" 16 | "github.com/elastic/apm-aggregation/aggregators/internal/protohash" 17 | "github.com/elastic/apm-data/model/modelpb" 18 | ) 19 | 20 | type TestCombinedMetricsCfg struct { 21 | key CombinedMetricsKey 22 | eventsTotal float64 23 | youngestEventTimestamp time.Time 24 | } 25 | 26 | type TestCombinedMetricsOpt func(cfg TestCombinedMetricsCfg) TestCombinedMetricsCfg 27 | 28 | func WithKey(key CombinedMetricsKey) TestCombinedMetricsOpt { 29 | return func(cfg TestCombinedMetricsCfg) TestCombinedMetricsCfg { 30 | cfg.key = key 31 | return cfg 32 | } 33 | } 34 | 35 | func WithEventsTotal(total float64) TestCombinedMetricsOpt { 36 | return func(cfg TestCombinedMetricsCfg) TestCombinedMetricsCfg { 37 | cfg.eventsTotal = total 38 | return cfg 39 | } 40 | } 41 | 42 | func WithYoungestEventTimestamp(ts time.Time) TestCombinedMetricsOpt { 43 | return func(cfg TestCombinedMetricsCfg) TestCombinedMetricsCfg { 44 | cfg.youngestEventTimestamp = ts 45 | return cfg 46 | } 47 | } 48 | 49 | var defaultTestCombinedMetricsCfg = TestCombinedMetricsCfg{ 50 | eventsTotal: 1, 51 | youngestEventTimestamp: time.Unix(0, 0).UTC(), 52 | } 53 | 54 | type TestTransactionCfg struct { 55 | duration time.Duration 56 | count int 57 | // outcome is used for service transaction as transaction already 58 | // have `EventOutcome` in their key. For transactions this field 59 | // will automatically be overriden based on the key value. 60 | outcome string 61 | } 62 | 63 | type TestTransactionOpt func(TestTransactionCfg) TestTransactionCfg 64 | 65 | func WithTransactionDuration(d time.Duration) TestTransactionOpt { 66 | return func(cfg TestTransactionCfg) TestTransactionCfg { 67 | cfg.duration = d 68 | return cfg 69 | } 70 | } 71 | 72 | func WithTransactionCount(c int) TestTransactionOpt { 73 | return func(cfg TestTransactionCfg) TestTransactionCfg { 74 | cfg.count = c 75 | return cfg 76 | } 77 | } 78 | 79 | // WithEventOutcome is used to specify the event outcome for building 80 | // test service transaction metrics. If it is specified for building 81 | // test transaction metrics then it will be overridden based on the 82 | // `EventOutcome` in the transaction aggregation key. 83 | func WithEventOutcome(o string) TestTransactionOpt { 84 | return func(cfg TestTransactionCfg) TestTransactionCfg { 85 | cfg.outcome = o 86 | return cfg 87 | } 88 | } 89 | 90 | var defaultTestTransactionCfg = TestTransactionCfg{ 91 | duration: time.Second, 92 | count: 1, 93 | outcome: "success", 94 | } 95 | 96 | type TestSpanCfg struct { 97 | duration time.Duration 98 | count int 99 | } 100 | 101 | type TestSpanOpt func(TestSpanCfg) TestSpanCfg 102 | 103 | func WithSpanDuration(d time.Duration) TestSpanOpt { 104 | return func(cfg TestSpanCfg) TestSpanCfg { 105 | cfg.duration = d 106 | return cfg 107 | } 108 | } 109 | 110 | func WithSpanCount(c int) TestSpanOpt { 111 | return func(cfg TestSpanCfg) TestSpanCfg { 112 | cfg.count = c 113 | return cfg 114 | } 115 | } 116 | 117 | var defaultTestSpanCfg = TestSpanCfg{ 118 | duration: time.Nanosecond, // for backward compatibility with previous tests 119 | count: 1, 120 | } 121 | 122 | // TestCombinedMetrics creates combined metrics for testing. The creation logic 123 | // is arranged in a way to allow chained creation and addition of leaf nodes 124 | // to combined metrics. 125 | type TestCombinedMetrics struct { 126 | key CombinedMetricsKey 127 | value *combinedMetrics 128 | } 129 | 130 | func NewTestCombinedMetrics(opts ...TestCombinedMetricsOpt) *TestCombinedMetrics { 131 | cfg := defaultTestCombinedMetricsCfg 132 | for _, opt := range opts { 133 | cfg = opt(cfg) 134 | } 135 | var cm combinedMetrics 136 | cm.EventsTotal = cfg.eventsTotal 137 | cm.YoungestEventTimestamp = modelpb.FromTime(cfg.youngestEventTimestamp) 138 | cm.Services = make(map[serviceAggregationKey]serviceMetrics) 139 | return &TestCombinedMetrics{ 140 | key: cfg.key, 141 | value: &cm, 142 | } 143 | } 144 | 145 | func (tcm *TestCombinedMetrics) GetProto() *aggregationpb.CombinedMetrics { 146 | return tcm.value.ToProto() 147 | } 148 | 149 | func (tcm *TestCombinedMetrics) Get() combinedMetrics { 150 | return *tcm.value 151 | } 152 | 153 | func (tcm *TestCombinedMetrics) GetKey() CombinedMetricsKey { 154 | return tcm.key 155 | } 156 | 157 | type TestServiceMetrics struct { 158 | sk serviceAggregationKey 159 | tcm *TestCombinedMetrics 160 | overflow bool // indicates if the service has overflowed to global 161 | } 162 | 163 | func (tcm *TestCombinedMetrics) AddServiceMetrics( 164 | sk serviceAggregationKey, 165 | ) *TestServiceMetrics { 166 | if _, ok := tcm.value.Services[sk]; !ok { 167 | tcm.value.Services[sk] = newServiceMetrics() 168 | } 169 | return &TestServiceMetrics{sk: sk, tcm: tcm} 170 | } 171 | 172 | func (tcm *TestCombinedMetrics) AddServiceMetricsOverflow( 173 | sk serviceAggregationKey, 174 | ) *TestServiceMetrics { 175 | if _, ok := tcm.value.Services[sk]; ok { 176 | panic("service already added as non overflow") 177 | } 178 | 179 | hash := protohash.HashServiceAggregationKey(xxhash.Digest{}, sk.ToProto()) 180 | insertHash(&tcm.value.OverflowServicesEstimator, hash.Sum64()) 181 | 182 | // Does not save to a map, any service instance added to this will 183 | // automatically be overflowed to the global overflow bucket. 184 | return &TestServiceMetrics{sk: sk, tcm: tcm, overflow: true} 185 | } 186 | 187 | func (tsm *TestServiceMetrics) AddTransaction( 188 | tk transactionAggregationKey, 189 | opts ...TestTransactionOpt, 190 | ) *TestServiceMetrics { 191 | if tsm.overflow { 192 | panic("cannot add transaction to overflowed service transaction") 193 | } 194 | cfg := defaultTestTransactionCfg 195 | for _, opt := range opts { 196 | cfg = opt(cfg) 197 | } 198 | cfg.outcome = tk.EventOutcome 199 | 200 | hdr := hdrhistogram.New() 201 | hdr.RecordDuration(cfg.duration, float64(cfg.count)) 202 | ktm := &aggregationpb.KeyedTransactionMetrics{} 203 | ktm.Key = tk.ToProto() 204 | ktm.Metrics = &aggregationpb.TransactionMetrics{} 205 | ktm.Metrics.Histogram = histogramToProto(hdr) 206 | 207 | svc := tsm.tcm.value.Services[tsm.sk] 208 | if oldKtm, ok := svc.TransactionGroups[tk]; ok { 209 | mergeKeyedTransactionMetrics(oldKtm, ktm) 210 | ktm = oldKtm 211 | } 212 | svc.TransactionGroups[tk] = ktm 213 | return tsm 214 | } 215 | 216 | func (tsm *TestServiceMetrics) AddTransactionOverflow( 217 | tk transactionAggregationKey, 218 | opts ...TestTransactionOpt, 219 | ) *TestServiceMetrics { 220 | cfg := defaultTestTransactionCfg 221 | for _, opt := range opts { 222 | cfg = opt(cfg) 223 | } 224 | cfg.outcome = tk.EventOutcome 225 | 226 | hdr := hdrhistogram.New() 227 | hdr.RecordDuration(cfg.duration, float64(cfg.count)) 228 | from := &aggregationpb.TransactionMetrics{} 229 | from.Histogram = histogramToProto(hdr) 230 | 231 | hash := protohash.HashTransactionAggregationKey( 232 | protohash.HashServiceAggregationKey(xxhash.Digest{}, tsm.sk.ToProto()), 233 | tk.ToProto(), 234 | ) 235 | if tsm.overflow { 236 | // Global overflow 237 | tsm.tcm.value.OverflowServices.OverflowTransaction.Merge(from, hash.Sum64()) 238 | } else { 239 | // Per service overflow 240 | svc := tsm.tcm.value.Services[tsm.sk] 241 | svc.OverflowGroups.OverflowTransaction.Merge(from, hash.Sum64()) 242 | tsm.tcm.value.Services[tsm.sk] = svc 243 | } 244 | return tsm 245 | } 246 | 247 | func (tsm *TestServiceMetrics) AddServiceTransaction( 248 | stk serviceTransactionAggregationKey, 249 | opts ...TestTransactionOpt, 250 | ) *TestServiceMetrics { 251 | cfg := defaultTestTransactionCfg 252 | for _, opt := range opts { 253 | cfg = opt(cfg) 254 | } 255 | 256 | hdr := hdrhistogram.New() 257 | hdr.RecordDuration(cfg.duration, float64(cfg.count)) 258 | kstm := &aggregationpb.KeyedServiceTransactionMetrics{} 259 | kstm.Key = stk.ToProto() 260 | kstm.Metrics = &aggregationpb.ServiceTransactionMetrics{} 261 | kstm.Metrics.Histogram = histogramToProto(hdr) 262 | switch cfg.outcome { 263 | case "failure": 264 | kstm.Metrics.FailureCount = float64(cfg.count) 265 | case "success": 266 | kstm.Metrics.SuccessCount = float64(cfg.count) 267 | } 268 | 269 | svc := tsm.tcm.value.Services[tsm.sk] 270 | if oldKstm, ok := svc.ServiceTransactionGroups[stk]; ok { 271 | mergeKeyedServiceTransactionMetrics(oldKstm, kstm) 272 | kstm = oldKstm 273 | } 274 | svc.ServiceTransactionGroups[stk] = kstm 275 | return tsm 276 | } 277 | 278 | func (tsm *TestServiceMetrics) AddServiceTransactionOverflow( 279 | stk serviceTransactionAggregationKey, 280 | opts ...TestTransactionOpt, 281 | ) *TestServiceMetrics { 282 | cfg := defaultTestTransactionCfg 283 | for _, opt := range opts { 284 | cfg = opt(cfg) 285 | } 286 | 287 | hdr := hdrhistogram.New() 288 | hdr.RecordDuration(cfg.duration, float64(cfg.count)) 289 | from := &aggregationpb.ServiceTransactionMetrics{} 290 | from.Histogram = histogramToProto(hdr) 291 | switch cfg.outcome { 292 | case "failure": 293 | from.FailureCount = float64(cfg.count) 294 | case "success": 295 | from.SuccessCount = float64(cfg.count) 296 | } 297 | 298 | hash := protohash.HashServiceTransactionAggregationKey( 299 | protohash.HashServiceAggregationKey(xxhash.Digest{}, tsm.sk.ToProto()), 300 | stk.ToProto(), 301 | ) 302 | if tsm.overflow { 303 | // Global overflow 304 | tsm.tcm.value.OverflowServices.OverflowServiceTransaction.Merge(from, hash.Sum64()) 305 | } else { 306 | // Per service overflow 307 | svc := tsm.tcm.value.Services[tsm.sk] 308 | svc.OverflowGroups.OverflowServiceTransaction.Merge(from, hash.Sum64()) 309 | tsm.tcm.value.Services[tsm.sk] = svc 310 | } 311 | return tsm 312 | } 313 | 314 | func (tsm *TestServiceMetrics) AddSpan( 315 | spk spanAggregationKey, 316 | opts ...TestSpanOpt, 317 | ) *TestServiceMetrics { 318 | cfg := defaultTestSpanCfg 319 | for _, opt := range opts { 320 | cfg = opt(cfg) 321 | } 322 | 323 | ksm := &aggregationpb.KeyedSpanMetrics{} 324 | ksm.Key = spk.ToProto() 325 | ksm.Metrics = &aggregationpb.SpanMetrics{} 326 | ksm.Metrics.Sum += float64(cfg.duration * time.Duration(cfg.count)) 327 | ksm.Metrics.Count += float64(cfg.count) 328 | 329 | svc := tsm.tcm.value.Services[tsm.sk] 330 | if oldKsm, ok := svc.SpanGroups[spk]; ok { 331 | mergeKeyedSpanMetrics(oldKsm, ksm) 332 | ksm = oldKsm 333 | } 334 | svc.SpanGroups[spk] = ksm 335 | return tsm 336 | } 337 | 338 | func (tsm *TestServiceMetrics) AddSpanOverflow( 339 | spk spanAggregationKey, 340 | opts ...TestSpanOpt, 341 | ) *TestServiceMetrics { 342 | cfg := defaultTestSpanCfg 343 | for _, opt := range opts { 344 | cfg = opt(cfg) 345 | } 346 | 347 | from := &aggregationpb.SpanMetrics{} 348 | from.Sum += float64(cfg.duration * time.Duration(cfg.count)) 349 | from.Count += float64(cfg.count) 350 | 351 | hash := protohash.HashSpanAggregationKey( 352 | protohash.HashServiceAggregationKey(xxhash.Digest{}, tsm.sk.ToProto()), 353 | spk.ToProto(), 354 | ) 355 | if tsm.overflow { 356 | // Global overflow 357 | tsm.tcm.value.OverflowServices.OverflowSpan.Merge(from, hash.Sum64()) 358 | } else { 359 | // Per service overflow 360 | svc := tsm.tcm.value.Services[tsm.sk] 361 | svc.OverflowGroups.OverflowSpan.Merge(from, hash.Sum64()) 362 | tsm.tcm.value.Services[tsm.sk] = svc 363 | } 364 | return tsm 365 | } 366 | 367 | func (tsm *TestServiceMetrics) GetProto() *aggregationpb.CombinedMetrics { 368 | return tsm.tcm.GetProto() 369 | } 370 | 371 | func (tsm *TestServiceMetrics) Get() combinedMetrics { 372 | return tsm.tcm.Get() 373 | } 374 | 375 | func (tsm *TestServiceMetrics) GetTest() *TestCombinedMetrics { 376 | return tsm.tcm 377 | } 378 | 379 | // Set of cmp options to sort combined metrics based on key hash. Hash collisions 380 | // are not considered. 381 | var combinedMetricsSliceSorters = []cmp.Option{ 382 | protocmp.SortRepeated(func(a, b *aggregationpb.KeyedServiceMetrics) bool { 383 | return xxhashDigestLess( 384 | protohash.HashServiceAggregationKey(xxhash.Digest{}, a.Key), 385 | protohash.HashServiceAggregationKey(xxhash.Digest{}, b.Key), 386 | ) 387 | }), 388 | protocmp.SortRepeated(func(a, b *aggregationpb.KeyedTransactionMetrics) bool { 389 | return xxhashDigestLess( 390 | protohash.HashTransactionAggregationKey(xxhash.Digest{}, a.Key), 391 | protohash.HashTransactionAggregationKey(xxhash.Digest{}, b.Key), 392 | ) 393 | }), 394 | protocmp.SortRepeated(func(a, b *aggregationpb.KeyedServiceTransactionMetrics) bool { 395 | return xxhashDigestLess( 396 | protohash.HashServiceTransactionAggregationKey(xxhash.Digest{}, a.Key), 397 | protohash.HashServiceTransactionAggregationKey(xxhash.Digest{}, b.Key), 398 | ) 399 | }), 400 | protocmp.SortRepeated(func(a, b *aggregationpb.KeyedSpanMetrics) bool { 401 | return xxhashDigestLess( 402 | protohash.HashSpanAggregationKey(xxhash.Digest{}, a.Key), 403 | protohash.HashSpanAggregationKey(xxhash.Digest{}, b.Key), 404 | ) 405 | }), 406 | } 407 | 408 | func xxhashDigestLess(a, b xxhash.Digest) bool { 409 | return a.Sum64() < b.Sum64() 410 | } 411 | -------------------------------------------------------------------------------- /aggregators/internal/hdrhistogram/hdrhistogram.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | // The MIT License (MIT) 6 | // 7 | // Copyright (c) 2014 Coda Hale 8 | // 9 | // Permission is hereby granted, free of charge, to any person obtaining a copy 10 | // of this software and associated documentation files (the "Software"), to deal 11 | // in the Software without restriction, including without limitation the rights 12 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | // copies of the Software, and to permit persons to whom the Software is 14 | // furnished to do so, subject to the following conditions: 15 | // 16 | // The above copyright notice and this permission notice shall be included in 17 | // all copies or substantial portions of the Software. 18 | // 19 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 25 | // THE SOFTWARE. 26 | 27 | // Package hdrhistogram provides an optimized histogram for sparse samples. 28 | // This is a stop gap measure until we have [packed histogram implementation](https://www.javadoc.io/static/org.hdrhistogram/HdrHistogram/2.1.12/org/HdrHistogram/PackedHistogram.html). 29 | package hdrhistogram 30 | 31 | import ( 32 | "fmt" 33 | "math" 34 | "math/bits" 35 | "slices" 36 | "time" 37 | ) 38 | 39 | const ( 40 | lowestTrackableValue = 1 41 | highestTrackableValue = 3.6e+9 // 1 hour in microseconds 42 | significantFigures = 2 43 | 44 | // We scale transaction counts in the histogram, which only permits storing 45 | // integer counts, to allow for fractional transactions due to sampling. 46 | // 47 | // e.g. if the sampling rate is 0.4, then each sampled transaction has a 48 | // representative count of 2.5 (1/0.4). If we receive two such transactions 49 | // we will record a count of 5000 (2 * 2.5 * histogramCountScale). When we 50 | // publish metrics, we will scale down to 5 (5000 / histogramCountScale). 51 | histogramCountScale = 1000 52 | ) 53 | 54 | var ( 55 | unitMagnitude = getUnitMagnitude() 56 | bucketCount = getBucketCount() 57 | subBucketCount = getSubBucketCount() 58 | subBucketHalfCountMagnitude = getSubBucketHalfCountMagnitude() 59 | subBucketHalfCount = getSubBucketHalfCount() 60 | subBucketMask = getSubBucketMask() 61 | countsLen = getCountsLen() 62 | ) 63 | 64 | // HistogramRepresentation is an optimization over HDR histogram mainly useful 65 | // for recording values clustered in some range rather than distributed over 66 | // the full range of the HDR histogram. It is based on the [hdrhistogram-go](https://github.com/HdrHistogram/hdrhistogram-go) package. 67 | // The package is not safe for concurrent usage, use an external lock 68 | // protection if required. 69 | type HistogramRepresentation struct { 70 | LowestTrackableValue int64 71 | HighestTrackableValue int64 72 | SignificantFigures int64 73 | CountsRep HybridCountsRep 74 | } 75 | 76 | // New returns a new instance of HistogramRepresentation 77 | func New() *HistogramRepresentation { 78 | return &HistogramRepresentation{ 79 | LowestTrackableValue: lowestTrackableValue, 80 | HighestTrackableValue: highestTrackableValue, 81 | SignificantFigures: significantFigures, 82 | } 83 | } 84 | 85 | // RecordDuration records duration in the histogram representation. It 86 | // supports recording float64 upto 3 decimal places. This is achieved 87 | // by scaling the count. 88 | func (h *HistogramRepresentation) RecordDuration(d time.Duration, n float64) error { 89 | count := int64(math.Round(n * histogramCountScale)) 90 | v := d.Microseconds() 91 | 92 | return h.RecordValues(v, count) 93 | } 94 | 95 | // RecordValues records values in the histogram representation. 96 | func (h *HistogramRepresentation) RecordValues(v, n int64) error { 97 | idx := h.countsIndexFor(v) 98 | if idx < 0 || int32(countsLen) <= idx { 99 | return fmt.Errorf("value %d is too large to be recorded", v) 100 | } 101 | h.CountsRep.Add(idx, n) 102 | return nil 103 | } 104 | 105 | // Merge merges the provided histogram representation. 106 | // TODO: Add support for migration from a histogram representation 107 | // with different parameters. 108 | func (h *HistogramRepresentation) Merge(from *HistogramRepresentation) { 109 | if from == nil { 110 | return 111 | } 112 | from.CountsRep.ForEach(func(bucket int32, value int64) { 113 | h.CountsRep.Add(bucket, value) 114 | }) 115 | } 116 | 117 | // Buckets converts the histogram into ordered slices of counts 118 | // and values per bar along with the total count. 119 | func (h *HistogramRepresentation) Buckets() (uint64, []uint64, []float64) { 120 | counts := make([]uint64, 0, h.CountsRep.Len()) 121 | values := make([]float64, 0, h.CountsRep.Len()) 122 | 123 | var totalCount uint64 124 | var prevBucket int32 125 | iter := h.iterator() 126 | iter.nextCountAtIdx() 127 | h.CountsRep.ForEach(func(bucket int32, scaledCounts int64) { 128 | if scaledCounts <= 0 { 129 | return 130 | } 131 | if iter.advance(int(bucket - prevBucket)) { 132 | count := uint64(math.Round(float64(scaledCounts) / histogramCountScale)) 133 | counts = append(counts, count) 134 | values = append(values, float64(iter.highestEquivalentValue)) 135 | totalCount += count 136 | } 137 | prevBucket = bucket 138 | }) 139 | return totalCount, counts, values 140 | } 141 | 142 | func (h *HistogramRepresentation) countsIndexFor(v int64) int32 { 143 | bucketIdx := h.getBucketIndex(v) 144 | subBucketIdx := h.getSubBucketIdx(v, bucketIdx) 145 | return h.countsIndex(bucketIdx, subBucketIdx) 146 | } 147 | 148 | func (h *HistogramRepresentation) countsIndex(bucketIdx, subBucketIdx int32) int32 { 149 | baseBucketIdx := (bucketIdx + 1) << uint(subBucketHalfCountMagnitude) 150 | return baseBucketIdx + subBucketIdx - subBucketHalfCount 151 | } 152 | 153 | func (h *HistogramRepresentation) getBucketIndex(v int64) int32 { 154 | var pow2Ceiling = int64(64 - bits.LeadingZeros64(uint64(v|subBucketMask))) 155 | return int32(pow2Ceiling - int64(unitMagnitude) - 156 | int64(subBucketHalfCountMagnitude+1)) 157 | } 158 | 159 | func (h *HistogramRepresentation) getSubBucketIdx(v int64, idx int32) int32 { 160 | return int32(v >> uint(int64(idx)+int64(unitMagnitude))) 161 | } 162 | 163 | func (h *HistogramRepresentation) valueFromIndex(bucketIdx, subBucketIdx int32) int64 { 164 | return int64(subBucketIdx) << uint(bucketIdx+unitMagnitude) 165 | } 166 | 167 | func (h *HistogramRepresentation) highestEquivalentValue(v int64) int64 { 168 | return h.nextNonEquivalentValue(v) - 1 169 | } 170 | 171 | func (h *HistogramRepresentation) nextNonEquivalentValue(v int64) int64 { 172 | bucketIdx := h.getBucketIndex(v) 173 | return h.lowestEquivalentValueGivenBucketIdx(v, bucketIdx) + h.sizeOfEquivalentValueRangeGivenBucketIdx(v, bucketIdx) 174 | } 175 | 176 | func (h *HistogramRepresentation) lowestEquivalentValueGivenBucketIdx(v int64, bucketIdx int32) int64 { 177 | subBucketIdx := h.getSubBucketIdx(v, bucketIdx) 178 | return h.valueFromIndex(bucketIdx, subBucketIdx) 179 | } 180 | 181 | func (h *HistogramRepresentation) sizeOfEquivalentValueRangeGivenBucketIdx(v int64, bucketIdx int32) int64 { 182 | subBucketIdx := h.getSubBucketIdx(v, bucketIdx) 183 | adjustedBucket := bucketIdx 184 | if subBucketIdx >= subBucketCount { 185 | adjustedBucket++ 186 | } 187 | return int64(1) << uint(unitMagnitude+adjustedBucket) 188 | } 189 | 190 | func (h *HistogramRepresentation) iterator() *iterator { 191 | return &iterator{ 192 | h: h, 193 | subBucketIdx: -1, 194 | } 195 | } 196 | 197 | type iterator struct { 198 | h *HistogramRepresentation 199 | bucketIdx, subBucketIdx int32 200 | valueFromIdx int64 201 | highestEquivalentValue int64 202 | } 203 | 204 | // advance advances the iterator by count 205 | func (i *iterator) advance(count int) bool { 206 | for c := 0; c < count; c++ { 207 | if !i.nextCountAtIdx() { 208 | return false 209 | } 210 | } 211 | i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx) 212 | return true 213 | } 214 | 215 | func (i *iterator) nextCountAtIdx() bool { 216 | // increment bucket 217 | i.subBucketIdx++ 218 | if i.subBucketIdx >= subBucketCount { 219 | i.subBucketIdx = subBucketHalfCount 220 | i.bucketIdx++ 221 | } 222 | 223 | if i.bucketIdx >= bucketCount { 224 | return false 225 | } 226 | 227 | i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx) 228 | return true 229 | } 230 | 231 | func getSubBucketHalfCountMagnitude() int32 { 232 | largetValueWithSingleUnitResolution := 2 * math.Pow10(significantFigures) 233 | subBucketCountMagnitude := int32(math.Ceil(math.Log2( 234 | largetValueWithSingleUnitResolution, 235 | ))) 236 | if subBucketCountMagnitude < 1 { 237 | return 0 238 | } 239 | return subBucketCountMagnitude - 1 240 | } 241 | 242 | func getUnitMagnitude() int32 { 243 | unitMag := int32(math.Floor(math.Log2( 244 | lowestTrackableValue, 245 | ))) 246 | if unitMag < 0 { 247 | return 0 248 | } 249 | return unitMag 250 | } 251 | 252 | func getSubBucketCount() int32 { 253 | return int32(math.Pow(2, float64(getSubBucketHalfCountMagnitude()+1))) 254 | } 255 | 256 | func getSubBucketHalfCount() int32 { 257 | return getSubBucketCount() / 2 258 | } 259 | 260 | func getSubBucketMask() int64 { 261 | return int64(getSubBucketCount()-1) << uint(getUnitMagnitude()) 262 | } 263 | 264 | func getCountsLen() int64 { 265 | return int64((getBucketCount() + 1) * (getSubBucketCount() / 2)) 266 | } 267 | 268 | func getBucketCount() int32 { 269 | smallestUntrackableValue := int64(getSubBucketCount()) << uint(getUnitMagnitude()) 270 | bucketsNeeded := int32(1) 271 | for smallestUntrackableValue < highestTrackableValue { 272 | if smallestUntrackableValue > (math.MaxInt64 / 2) { 273 | // next shift will overflow, meaning that bucket could 274 | // represent values up to ones greater than math.MaxInt64, 275 | // so it's the last bucket 276 | return bucketsNeeded + 1 277 | } 278 | smallestUntrackableValue <<= 1 279 | bucketsNeeded++ 280 | } 281 | return bucketsNeeded 282 | } 283 | 284 | // bar represents a bar of histogram. Each bar has a bucket, representing 285 | // where the bar belongs to in the histogram range, and the count of values 286 | // in each bucket. 287 | type bar struct { 288 | Bucket int32 289 | Count int64 290 | } 291 | 292 | // HybridCountsRep represents a hybrid counts representation for 293 | // sparse histogram. It is optimized to record a single value as 294 | // integer type and more values as map. 295 | type HybridCountsRep struct { 296 | bucket int32 297 | value int64 298 | s []bar 299 | } 300 | 301 | // Add adds a new value to a bucket of given index. 302 | func (c *HybridCountsRep) Add(bucket int32, value int64) { 303 | if c.s == nil && c.bucket == 0 && c.value == 0 { 304 | c.bucket = bucket 305 | c.value = value 306 | return 307 | } 308 | if c.s == nil { 309 | // automatic promotion to slice 310 | c.s = make([]bar, 0, 128) // TODO: Use pool 311 | c.s = slices.Insert(c.s, 0, bar{Bucket: c.bucket, Count: c.value}) 312 | c.bucket, c.value = 0, 0 313 | } 314 | at, found := slices.BinarySearchFunc(c.s, bar{Bucket: bucket}, compareBar) 315 | if found { 316 | c.s[at].Count += value 317 | return 318 | } 319 | c.s = slices.Insert(c.s, at, bar{Bucket: bucket, Count: value}) 320 | } 321 | 322 | // ForEach iterates over each bucket and calls the given function. 323 | func (c *HybridCountsRep) ForEach(f func(int32, int64)) { 324 | if c.s == nil && (c.bucket != 0 || c.value != 0) { 325 | f(c.bucket, c.value) 326 | return 327 | } 328 | for i := range c.s { 329 | f(c.s[i].Bucket, c.s[i].Count) 330 | } 331 | } 332 | 333 | // Len returns the number of buckets currently recording. 334 | func (c *HybridCountsRep) Len() int { 335 | if c.s != nil { 336 | return len(c.s) 337 | } 338 | if c.bucket != 0 || c.value != 0 { 339 | return 1 340 | } 341 | return 0 342 | } 343 | 344 | // Get returns the count of values in a given bucket along with a bool 345 | // which is false if the bucket is not found. 346 | func (c *HybridCountsRep) Get(bucket int32) (int64, bool) { 347 | if c.s == nil { 348 | if c.bucket == bucket { 349 | return c.value, true 350 | } 351 | return 0, false 352 | } 353 | at, found := slices.BinarySearchFunc(c.s, bar{Bucket: bucket}, compareBar) 354 | if found { 355 | return c.s[at].Count, true 356 | } 357 | return 0, false 358 | } 359 | 360 | // Reset resets the values recorded. 361 | func (c *HybridCountsRep) Reset() { 362 | c.bucket = 0 363 | c.value = 0 364 | c.s = c.s[:0] 365 | } 366 | 367 | // Equal returns true if same bucket and count is recorded in both. 368 | func (c *HybridCountsRep) Equal(h *HybridCountsRep) bool { 369 | if c.Len() != h.Len() { 370 | return false 371 | } 372 | if c.Len() == 0 { 373 | return true 374 | } 375 | equal := true 376 | c.ForEach(func(bucket int32, value1 int64) { 377 | value2, ok := h.Get(bucket) 378 | if !ok || value1 != value2 { 379 | equal = false 380 | } 381 | }) 382 | return equal 383 | } 384 | 385 | func compareBar(a, b bar) int { 386 | if a.Bucket == b.Bucket { 387 | return 0 388 | } 389 | if a.Bucket > b.Bucket { 390 | return 1 391 | } 392 | return -1 393 | } 394 | -------------------------------------------------------------------------------- /aggregators/merger.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | import ( 8 | "io" 9 | "slices" 10 | "sort" 11 | 12 | "github.com/cespare/xxhash/v2" 13 | 14 | "github.com/elastic/apm-aggregation/aggregationpb" 15 | "github.com/elastic/apm-aggregation/aggregators/internal/constraint" 16 | "github.com/elastic/apm-aggregation/aggregators/internal/protohash" 17 | ) 18 | 19 | type combinedMetricsMerger struct { 20 | limits Limits 21 | constraints constraints 22 | metrics combinedMetrics 23 | } 24 | 25 | func (m *combinedMetricsMerger) MergeNewer(value []byte) error { 26 | from := &aggregationpb.CombinedMetrics{} 27 | if err := from.UnmarshalVT(value); err != nil { 28 | return err 29 | } 30 | m.merge(from) 31 | return nil 32 | } 33 | 34 | func (m *combinedMetricsMerger) MergeOlder(value []byte) error { 35 | from := &aggregationpb.CombinedMetrics{} 36 | if err := from.UnmarshalVT(value); err != nil { 37 | return err 38 | } 39 | m.merge(from) 40 | return nil 41 | } 42 | 43 | func (m *combinedMetricsMerger) Finish(includesBase bool) ([]byte, io.Closer, error) { 44 | pb := m.metrics.ToProto() 45 | data, err := pb.MarshalVT() 46 | return data, nil, err 47 | } 48 | 49 | func (m *combinedMetricsMerger) merge(from *aggregationpb.CombinedMetrics) { 50 | // We merge the below fields irrespective of the services present 51 | // because it is possible for services to be empty if the event 52 | // does not fit the criteria for aggregations. 53 | m.metrics.EventsTotal += from.EventsTotal 54 | if m.metrics.YoungestEventTimestamp < from.YoungestEventTimestamp { 55 | m.metrics.YoungestEventTimestamp = from.YoungestEventTimestamp 56 | } 57 | // If there is overflow due to max services in either of the buckets being 58 | // merged then we can merge the overflow buckets without considering any 59 | // other scenarios. 60 | if len(from.OverflowServicesEstimator) > 0 { 61 | mergeOverflow(&m.metrics.OverflowServices, from.OverflowServices) 62 | mergeEstimator( 63 | &m.metrics.OverflowServicesEstimator, 64 | hllSketch(from.OverflowServicesEstimator), 65 | ) 66 | } 67 | 68 | if len(from.ServiceMetrics) == 0 { 69 | return 70 | } 71 | if m.metrics.Services == nil { 72 | m.metrics.Services = make(map[serviceAggregationKey]serviceMetrics) 73 | } 74 | 75 | // Iterate over the services in the _from_ combined metrics and merge them 76 | // into the _to_ combined metrics as per the following rules: 77 | // 1. If the service in the _from_ bucket is also present in the _to_ 78 | // bucket then merge them. 79 | // 2. If the service in the _from_ bucket is not in the _to_ bucket: 80 | // 2.a. If the _to_ bucket hasn't breached the max services limit then 81 | // create a new service in _to_ bucket and merge. 82 | // 2.b. Else, merge the _from_ bucket to the overflow service bucket 83 | // of the _to_ combined metrics. 84 | for i := range from.ServiceMetrics { 85 | fromSvc := from.ServiceMetrics[i] 86 | serviceKeyHash := protohash.HashServiceAggregationKey(xxhash.Digest{}, fromSvc.Key) 87 | var sk serviceAggregationKey 88 | sk.FromProto(fromSvc.Key) 89 | toSvc, svcOverflow := getServiceMetrics(&m.metrics, sk, m.limits.MaxServices) 90 | if svcOverflow { 91 | mergeOverflow(&m.metrics.OverflowServices, fromSvc.Metrics.OverflowGroups) 92 | mergeToOverflowFromServiceMetrics(&m.metrics.OverflowServices, fromSvc.Metrics, serviceKeyHash) 93 | insertHash(&m.metrics.OverflowServicesEstimator, serviceKeyHash.Sum64()) 94 | continue 95 | } 96 | if fromSvc.Metrics != nil { 97 | mergeOverflow(&toSvc.OverflowGroups, fromSvc.Metrics.OverflowGroups) 98 | mergeTransactionGroups( 99 | toSvc.TransactionGroups, 100 | fromSvc.Metrics.TransactionMetrics, 101 | constraint.New( 102 | len(toSvc.TransactionGroups), 103 | m.limits.MaxTransactionGroupsPerService, 104 | ), 105 | m.constraints.totalTransactionGroups, 106 | serviceKeyHash, 107 | &toSvc.OverflowGroups.OverflowTransaction, 108 | ) 109 | mergeServiceTransactionGroups( 110 | toSvc.ServiceTransactionGroups, 111 | fromSvc.Metrics.ServiceTransactionMetrics, 112 | constraint.New( 113 | len(toSvc.ServiceTransactionGroups), 114 | m.limits.MaxServiceTransactionGroupsPerService, 115 | ), 116 | m.constraints.totalServiceTransactionGroups, 117 | serviceKeyHash, 118 | &toSvc.OverflowGroups.OverflowServiceTransaction, 119 | ) 120 | mergeSpanGroups( 121 | toSvc.SpanGroups, 122 | fromSvc.Metrics.SpanMetrics, 123 | constraint.New( 124 | len(toSvc.SpanGroups), 125 | m.limits.MaxSpanGroupsPerService, 126 | ), 127 | m.constraints.totalSpanGroups, 128 | serviceKeyHash, 129 | &toSvc.OverflowGroups.OverflowSpan, 130 | ) 131 | } 132 | m.metrics.Services[sk] = toSvc 133 | } 134 | } 135 | 136 | // mergeTransactionGroups merges transaction aggregation groups for two combined metrics 137 | // considering max transaction groups and max transaction groups per service limits. 138 | func mergeTransactionGroups( 139 | to map[transactionAggregationKey]*aggregationpb.KeyedTransactionMetrics, 140 | from []*aggregationpb.KeyedTransactionMetrics, 141 | perSvcConstraint, globalConstraint *constraint.Constraint, 142 | hash xxhash.Digest, 143 | overflowTo *overflowTransaction, 144 | ) { 145 | for i := range from { 146 | fromTxn := from[i] 147 | var tk transactionAggregationKey 148 | tk.FromProto(fromTxn.Key) 149 | toTxn, ok := to[tk] 150 | if !ok { 151 | overflowed := perSvcConstraint.Maxed() || globalConstraint.Maxed() 152 | if overflowed { 153 | fromTxnKeyHash := protohash.HashTransactionAggregationKey(hash, fromTxn.Key) 154 | overflowTo.Merge(fromTxn.Metrics, fromTxnKeyHash.Sum64()) 155 | continue 156 | } 157 | perSvcConstraint.Add(1) 158 | globalConstraint.Add(1) 159 | 160 | to[tk] = fromTxn.CloneVT() 161 | continue 162 | } 163 | mergeKeyedTransactionMetrics(toTxn, fromTxn) 164 | } 165 | } 166 | 167 | // mergeServiceTransactionGroups merges service transaction aggregation groups for two 168 | // combined metrics considering max service transaction groups and max service 169 | // transaction groups per service limits. 170 | func mergeServiceTransactionGroups( 171 | to map[serviceTransactionAggregationKey]*aggregationpb.KeyedServiceTransactionMetrics, 172 | from []*aggregationpb.KeyedServiceTransactionMetrics, 173 | perSvcConstraint, globalConstraint *constraint.Constraint, 174 | hash xxhash.Digest, 175 | overflowTo *overflowServiceTransaction, 176 | ) { 177 | for i := range from { 178 | fromSvcTxn := from[i] 179 | var stk serviceTransactionAggregationKey 180 | stk.FromProto(fromSvcTxn.Key) 181 | toSvcTxn, ok := to[stk] 182 | if !ok { 183 | overflowed := perSvcConstraint.Maxed() || globalConstraint.Maxed() 184 | if overflowed { 185 | fromSvcTxnKeyHash := protohash.HashServiceTransactionAggregationKey(hash, fromSvcTxn.Key) 186 | overflowTo.Merge(fromSvcTxn.Metrics, fromSvcTxnKeyHash.Sum64()) 187 | continue 188 | } 189 | perSvcConstraint.Add(1) 190 | globalConstraint.Add(1) 191 | 192 | to[stk] = fromSvcTxn.CloneVT() 193 | continue 194 | } 195 | mergeKeyedServiceTransactionMetrics(toSvcTxn, fromSvcTxn) 196 | } 197 | } 198 | 199 | // mergeSpanGroups merges span aggregation groups for two combined metrics considering 200 | // max span groups and max span groups per service limits. 201 | func mergeSpanGroups( 202 | to map[spanAggregationKey]*aggregationpb.KeyedSpanMetrics, 203 | from []*aggregationpb.KeyedSpanMetrics, 204 | perSvcConstraint, globalConstraint *constraint.Constraint, 205 | hash xxhash.Digest, 206 | overflowTo *overflowSpan, 207 | ) { 208 | for i := range from { 209 | fromSpan := from[i] 210 | var spk spanAggregationKey 211 | spk.FromProto(fromSpan.Key) 212 | toSpan, ok := to[spk] 213 | if !ok { 214 | // Protect against agents that send high cardinality span names by dropping 215 | // span.name if more than half of the per svc span group limit is reached. 216 | originalSpanName := fromSpan.Key.SpanName 217 | half := perSvcConstraint.Limit() / 2 218 | if perSvcConstraint.Value() >= half { 219 | spk.SpanName = "" 220 | fromSpan.Key.SpanName = "" 221 | toSpan, ok = to[spk] 222 | } 223 | if !ok { 224 | overflowed := perSvcConstraint.Maxed() || globalConstraint.Maxed() 225 | if overflowed { 226 | // Restore span name in case it was dropped above, 227 | // for cardinality estimation. 228 | fromSpan.Key.SpanName = originalSpanName 229 | fromSpanKeyHash := protohash.HashSpanAggregationKey(hash, fromSpan.Key) 230 | overflowTo.Merge(fromSpan.Metrics, fromSpanKeyHash.Sum64()) 231 | continue 232 | } 233 | perSvcConstraint.Add(1) 234 | globalConstraint.Add(1) 235 | 236 | to[spk] = fromSpan.CloneVT() 237 | continue 238 | } 239 | } 240 | mergeKeyedSpanMetrics(toSpan, fromSpan) 241 | } 242 | } 243 | 244 | func mergeToOverflowFromServiceMetrics( 245 | to *overflow, 246 | from *aggregationpb.ServiceMetrics, 247 | hash xxhash.Digest, 248 | ) { 249 | if from == nil { 250 | return 251 | } 252 | for _, ktm := range from.TransactionMetrics { 253 | ktmKeyHash := protohash.HashTransactionAggregationKey(hash, ktm.Key) 254 | to.OverflowTransaction.Merge(ktm.Metrics, ktmKeyHash.Sum64()) 255 | } 256 | for _, kstm := range from.ServiceTransactionMetrics { 257 | kstmKeyHash := protohash.HashServiceTransactionAggregationKey(hash, kstm.Key) 258 | to.OverflowServiceTransaction.Merge(kstm.Metrics, kstmKeyHash.Sum64()) 259 | } 260 | for _, ksm := range from.SpanMetrics { 261 | ksmKeyHash := protohash.HashSpanAggregationKey(hash, ksm.Key) 262 | to.OverflowSpan.Merge(ksm.Metrics, ksmKeyHash.Sum64()) 263 | } 264 | } 265 | 266 | func mergeOverflow( 267 | to *overflow, 268 | fromproto *aggregationpb.Overflow, 269 | ) { 270 | if fromproto == nil { 271 | return 272 | } 273 | var from overflow 274 | from.FromProto(fromproto) 275 | to.OverflowTransaction.MergeOverflow(&from.OverflowTransaction) 276 | to.OverflowServiceTransaction.MergeOverflow(&from.OverflowServiceTransaction) 277 | to.OverflowSpan.MergeOverflow(&from.OverflowSpan) 278 | } 279 | 280 | func mergeKeyedTransactionMetrics( 281 | to, from *aggregationpb.KeyedTransactionMetrics, 282 | ) { 283 | if from.Metrics == nil { 284 | return 285 | } 286 | if to.Metrics == nil { 287 | to.Metrics = &aggregationpb.TransactionMetrics{} 288 | } 289 | mergeTransactionMetrics(to.Metrics, from.Metrics) 290 | } 291 | 292 | func mergeTransactionMetrics( 293 | to, from *aggregationpb.TransactionMetrics, 294 | ) { 295 | if to.Histogram == nil && from.Histogram != nil { 296 | to.Histogram = &aggregationpb.HDRHistogram{} 297 | } 298 | if to.Histogram != nil && from.Histogram != nil { 299 | mergeHistogram(to.Histogram, from.Histogram) 300 | } 301 | } 302 | 303 | func mergeKeyedServiceTransactionMetrics( 304 | to, from *aggregationpb.KeyedServiceTransactionMetrics, 305 | ) { 306 | if from.Metrics == nil { 307 | return 308 | } 309 | if to.Metrics == nil { 310 | to.Metrics = &aggregationpb.ServiceTransactionMetrics{} 311 | } 312 | mergeServiceTransactionMetrics(to.Metrics, from.Metrics) 313 | } 314 | 315 | func mergeServiceTransactionMetrics( 316 | to, from *aggregationpb.ServiceTransactionMetrics, 317 | ) { 318 | if to.Histogram == nil && from.Histogram != nil { 319 | to.Histogram = &aggregationpb.HDRHistogram{} 320 | } 321 | if to.Histogram != nil && from.Histogram != nil { 322 | mergeHistogram(to.Histogram, from.Histogram) 323 | } 324 | to.FailureCount += from.FailureCount 325 | to.SuccessCount += from.SuccessCount 326 | } 327 | 328 | func mergeKeyedSpanMetrics(to, from *aggregationpb.KeyedSpanMetrics) { 329 | if from.Metrics == nil { 330 | return 331 | } 332 | if to.Metrics == nil { 333 | to.Metrics = &aggregationpb.SpanMetrics{} 334 | } 335 | mergeSpanMetrics(to.Metrics, from.Metrics) 336 | } 337 | 338 | func mergeSpanMetrics(to, from *aggregationpb.SpanMetrics) { 339 | to.Count += from.Count 340 | to.Sum += from.Sum 341 | } 342 | 343 | // mergeHistogram merges two proto representation of HDRHistogram. The 344 | // merge assumes both histograms are created with same arguments and 345 | // their representations are sorted by bucket. 346 | func mergeHistogram(to, from *aggregationpb.HDRHistogram) { 347 | if len(from.Buckets) == 0 { 348 | return 349 | } 350 | 351 | if len(to.Buckets) == 0 { 352 | to.Buckets = append(to.Buckets, from.Buckets...) 353 | to.Counts = append(to.Counts, from.Counts...) 354 | return 355 | } 356 | 357 | startToIdx, found := sort.Find(len(to.Buckets), func(i int) int { 358 | return int(from.Buckets[0] - to.Buckets[i]) 359 | }) 360 | if found && len(from.Buckets) == 1 { 361 | // optimize for single value of `from` also found in `to` 362 | to.Counts[startToIdx] += from.Counts[0] 363 | return 364 | } 365 | 366 | // Since all values of `from` must be greater than the first value, we can 367 | // limit the search space in `to` to [startToIdx, len(to.Buckets)) 368 | requiredLen := len(to.Buckets) + len(from.Buckets) 369 | for toIdx, fromIdx := startToIdx, 0; toIdx < len(to.Buckets) && fromIdx < len(from.Buckets); { 370 | v := to.Buckets[toIdx] - from.Buckets[fromIdx] 371 | switch { 372 | case v == 0: 373 | // For every bucket that is common, we need one less bucket in final slice 374 | requiredLen-- 375 | toIdx++ 376 | fromIdx++ 377 | case v < 0: 378 | toIdx++ 379 | case v > 0: 380 | fromIdx++ 381 | } 382 | } 383 | 384 | toIdx, fromIdx := len(to.Buckets)-1, len(from.Buckets)-1 385 | to.Buckets = slices.Grow(to.Buckets, requiredLen-len(to.Buckets))[:requiredLen] 386 | to.Counts = slices.Grow(to.Counts, requiredLen-len(to.Counts))[:requiredLen] 387 | for idx := len(to.Buckets) - 1; idx >= 0; idx-- { 388 | if fromIdx < 0 { 389 | break 390 | } 391 | if toIdx < startToIdx { 392 | copy(to.Counts[startToIdx:idx+1], from.Counts[0:fromIdx+1]) 393 | copy(to.Buckets[startToIdx:idx+1], from.Buckets[0:fromIdx+1]) 394 | break 395 | } 396 | v := to.Buckets[toIdx] - from.Buckets[fromIdx] 397 | switch { 398 | case v == 0: 399 | to.Counts[idx] = to.Counts[toIdx] + from.Counts[fromIdx] 400 | to.Buckets[idx] = to.Buckets[toIdx] 401 | toIdx-- 402 | fromIdx-- 403 | case v > 0: 404 | to.Counts[idx] = to.Counts[toIdx] 405 | to.Buckets[idx] = to.Buckets[toIdx] 406 | toIdx-- 407 | case v < 0: 408 | to.Counts[idx] = from.Counts[fromIdx] 409 | to.Buckets[idx] = from.Buckets[fromIdx] 410 | fromIdx-- 411 | } 412 | } 413 | } 414 | 415 | // getServiceMetrics returns the service metric from a combined metrics based on the 416 | // service key argument, creating one if needed. A second bool return value indicates 417 | // if a service is returned or no service can be created due to max svcs limit breach. 418 | func getServiceMetrics(cm *combinedMetrics, svcKey serviceAggregationKey, maxSvcs int) (serviceMetrics, bool) { 419 | srcSvc, ok := cm.Services[svcKey] 420 | if !ok { 421 | if len(cm.Services) < maxSvcs { 422 | return newServiceMetrics(), false 423 | } 424 | return serviceMetrics{}, true 425 | } 426 | return srcSvc, false 427 | } 428 | 429 | func newServiceMetrics() serviceMetrics { 430 | return serviceMetrics{ 431 | TransactionGroups: make(map[transactionAggregationKey]*aggregationpb.KeyedTransactionMetrics), 432 | ServiceTransactionGroups: make(map[serviceTransactionAggregationKey]*aggregationpb.KeyedServiceTransactionMetrics), 433 | SpanGroups: make(map[spanAggregationKey]*aggregationpb.KeyedSpanMetrics), 434 | } 435 | } 436 | 437 | // constraints is a group of constraints to be observed during merge operations. 438 | type constraints struct { 439 | totalTransactionGroups *constraint.Constraint 440 | totalServiceTransactionGroups *constraint.Constraint 441 | totalSpanGroups *constraint.Constraint 442 | } 443 | 444 | func newConstraints(limits Limits) constraints { 445 | return constraints{ 446 | totalTransactionGroups: constraint.New(0, limits.MaxTransactionGroups), 447 | totalServiceTransactionGroups: constraint.New(0, limits.MaxServiceTransactionGroups), 448 | totalSpanGroups: constraint.New(0, limits.MaxSpanGroups), 449 | } 450 | } 451 | 452 | // MergeCombinedMetrics merges provided CombinedMetrics together. 453 | func MergeCombinedMetrics(limits Limits, metrics ...*aggregationpb.CombinedMetrics) *aggregationpb.CombinedMetrics { 454 | merger := combinedMetricsMerger{ 455 | limits: limits, 456 | constraints: newConstraints(limits), 457 | } 458 | for _, m := range metrics { 459 | merger.merge(m) 460 | } 461 | return merger.metrics.ToProto() 462 | } 463 | -------------------------------------------------------------------------------- /aggregators/codec.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | package aggregators 6 | 7 | // TODO(lahsivjar): Add a test using reflect to validate if all 8 | // fields are properly set. 9 | 10 | import ( 11 | "encoding/binary" 12 | "errors" 13 | "slices" 14 | "sort" 15 | "time" 16 | 17 | "github.com/axiomhq/hyperloglog" 18 | 19 | "github.com/elastic/apm-aggregation/aggregationpb" 20 | "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" 21 | "github.com/elastic/apm-aggregation/aggregators/nullable" 22 | "github.com/elastic/apm-data/model/modelpb" 23 | ) 24 | 25 | // CombinedMetricsKeyEncodedSize gives the encoded size gives the size of 26 | // CombinedMetricsKey in bytes. The size is used as follows: 27 | // - 2 bytes for interval encoding 28 | // - 8 bytes for timestamp encoding 29 | // - 16 bytes for ID encoding 30 | // - 2 bytes for partition ID 31 | const CombinedMetricsKeyEncodedSize = 28 32 | 33 | // MarshalBinaryToSizedBuffer will marshal the combined metrics key into 34 | // its binary representation. The encoded byte slice will be used as a 35 | // key in pebbledb. To ensure efficient sorting and time range based 36 | // query, the first 2 bytes of the encoded slice is the aggregation 37 | // interval, the next 8 bytes of the encoded slice is the processing time 38 | // followed by combined metrics ID, the last 2 bytes is the partition ID. 39 | // The binary representation ensures that all entries are ordered by the 40 | // ID first and then ordered by the partition ID. 41 | func (k *CombinedMetricsKey) MarshalBinaryToSizedBuffer(data []byte) error { 42 | ivlSeconds := uint16(k.Interval.Seconds()) 43 | if len(data) != CombinedMetricsKeyEncodedSize { 44 | return errors.New("failed to marshal due to incorrect sized buffer") 45 | } 46 | var offset int 47 | 48 | binary.BigEndian.PutUint16(data[offset:], ivlSeconds) 49 | offset += 2 50 | 51 | binary.BigEndian.PutUint64(data[offset:], uint64(k.ProcessingTime.Unix())) 52 | offset += 8 53 | 54 | copy(data[offset:], k.ID[:]) 55 | offset += 16 56 | 57 | binary.BigEndian.PutUint16(data[offset:], k.PartitionID) 58 | return nil 59 | } 60 | 61 | // UnmarshalBinary will convert the byte encoded data into CombinedMetricsKey. 62 | func (k *CombinedMetricsKey) UnmarshalBinary(data []byte) error { 63 | if len(data) < 12 { 64 | return errors.New("invalid encoded data of insufficient length") 65 | } 66 | var offset int 67 | k.Interval = time.Duration(binary.BigEndian.Uint16(data[offset:2])) * time.Second 68 | offset += 2 69 | 70 | k.ProcessingTime = time.Unix(int64(binary.BigEndian.Uint64(data[offset:offset+8])), 0) 71 | offset += 8 72 | 73 | copy(k.ID[:], data[offset:offset+len(k.ID)]) 74 | offset += len(k.ID) 75 | 76 | k.PartitionID = binary.BigEndian.Uint16(data[offset:]) 77 | return nil 78 | } 79 | 80 | // SizeBinary returns the size of the byte array required to encode 81 | // combined metrics key. Encoded size for CombinedMetricsKey is constant 82 | // and alternatively the constant CombinedMetricsKeyEncodedSize can be used. 83 | func (k *CombinedMetricsKey) SizeBinary() int { 84 | return CombinedMetricsKeyEncodedSize 85 | } 86 | 87 | // GetEncodedCombinedMetricsKeyWithoutPartitionID is a util function to 88 | // remove partition bits from an encoded CombinedMetricsKey. 89 | func GetEncodedCombinedMetricsKeyWithoutPartitionID(src []byte) []byte { 90 | var buf [CombinedMetricsKeyEncodedSize]byte 91 | copy(buf[:CombinedMetricsKeyEncodedSize-2], src) 92 | return buf[:] 93 | } 94 | 95 | // ToProto converts CombinedMetrics to its protobuf representation. 96 | func (m *combinedMetrics) ToProto() *aggregationpb.CombinedMetrics { 97 | var pb aggregationpb.CombinedMetrics 98 | pb.ServiceMetrics = slices.Grow(pb.ServiceMetrics, len(m.Services))[:len(m.Services)] 99 | var i int 100 | for k, m := range m.Services { 101 | if pb.ServiceMetrics[i] == nil { 102 | pb.ServiceMetrics[i] = &aggregationpb.KeyedServiceMetrics{} 103 | } 104 | pb.ServiceMetrics[i].Key = k.ToProto() 105 | pb.ServiceMetrics[i].Metrics = m.ToProto() 106 | i++ 107 | } 108 | if m.OverflowServicesEstimator != nil { 109 | pb.OverflowServices = m.OverflowServices.ToProto() 110 | pb.OverflowServicesEstimator = hllBytes(m.OverflowServicesEstimator) 111 | } 112 | pb.EventsTotal = m.EventsTotal 113 | pb.YoungestEventTimestamp = m.YoungestEventTimestamp 114 | return &pb 115 | } 116 | 117 | // ToProto converts ServiceAggregationKey to its protobuf representation. 118 | func (k *serviceAggregationKey) ToProto() *aggregationpb.ServiceAggregationKey { 119 | var pb aggregationpb.ServiceAggregationKey 120 | pb.Timestamp = modelpb.FromTime(k.Timestamp) 121 | pb.ServiceName = k.ServiceName 122 | pb.ServiceEnvironment = k.ServiceEnvironment 123 | pb.ServiceLanguageName = k.ServiceLanguageName 124 | pb.AgentName = k.AgentName 125 | pb.GlobalLabelsStr = []byte(k.GlobalLabelsStr) 126 | return &pb 127 | } 128 | 129 | // FromProto converts protobuf representation to ServiceAggregationKey. 130 | func (k *serviceAggregationKey) FromProto(pb *aggregationpb.ServiceAggregationKey) { 131 | k.Timestamp = modelpb.ToTime(pb.Timestamp) 132 | k.ServiceName = pb.ServiceName 133 | k.ServiceEnvironment = pb.ServiceEnvironment 134 | k.ServiceLanguageName = pb.ServiceLanguageName 135 | k.AgentName = pb.AgentName 136 | k.GlobalLabelsStr = string(pb.GlobalLabelsStr) 137 | } 138 | 139 | // ToProto converts ServiceMetrics to its protobuf representation. 140 | func (m *serviceMetrics) ToProto() *aggregationpb.ServiceMetrics { 141 | var pb aggregationpb.ServiceMetrics 142 | pb.OverflowGroups = m.OverflowGroups.ToProto() 143 | 144 | pb.TransactionMetrics = slices.Grow(pb.TransactionMetrics, len(m.TransactionGroups)) 145 | for _, m := range m.TransactionGroups { 146 | pb.TransactionMetrics = append(pb.TransactionMetrics, m) 147 | } 148 | 149 | pb.ServiceTransactionMetrics = slices.Grow(pb.ServiceTransactionMetrics, len(m.ServiceTransactionGroups)) 150 | for _, m := range m.ServiceTransactionGroups { 151 | pb.ServiceTransactionMetrics = append(pb.ServiceTransactionMetrics, m) 152 | } 153 | 154 | pb.SpanMetrics = slices.Grow(pb.SpanMetrics, len(m.SpanGroups)) 155 | for _, m := range m.SpanGroups { 156 | pb.SpanMetrics = append(pb.SpanMetrics, m) 157 | } 158 | 159 | return &pb 160 | } 161 | 162 | // ToProto converts TransactionAggregationKey to its protobuf representation. 163 | func (k *transactionAggregationKey) ToProto() *aggregationpb.TransactionAggregationKey { 164 | var pb aggregationpb.TransactionAggregationKey 165 | pb.TraceRoot = k.TraceRoot 166 | 167 | pb.ContainerId = k.ContainerID 168 | pb.KubernetesPodName = k.KubernetesPodName 169 | 170 | pb.ServiceVersion = k.ServiceVersion 171 | pb.ServiceNodeName = k.ServiceNodeName 172 | 173 | pb.ServiceRuntimeName = k.ServiceRuntimeName 174 | pb.ServiceRuntimeVersion = k.ServiceRuntimeVersion 175 | pb.ServiceLanguageVersion = k.ServiceLanguageVersion 176 | 177 | pb.HostHostname = k.HostHostname 178 | pb.HostName = k.HostName 179 | pb.HostOsPlatform = k.HostOSPlatform 180 | 181 | pb.EventOutcome = k.EventOutcome 182 | 183 | pb.TransactionName = k.TransactionName 184 | pb.TransactionType = k.TransactionType 185 | pb.TransactionResult = k.TransactionResult 186 | 187 | pb.FaasColdstart = uint32(k.FAASColdstart) 188 | pb.FaasId = k.FAASID 189 | pb.FaasName = k.FAASName 190 | pb.FaasVersion = k.FAASVersion 191 | pb.FaasTriggerType = k.FAASTriggerType 192 | 193 | pb.CloudProvider = k.CloudProvider 194 | pb.CloudRegion = k.CloudRegion 195 | pb.CloudAvailabilityZone = k.CloudAvailabilityZone 196 | pb.CloudServiceName = k.CloudServiceName 197 | pb.CloudAccountId = k.CloudAccountID 198 | pb.CloudAccountName = k.CloudAccountName 199 | pb.CloudMachineType = k.CloudMachineType 200 | pb.CloudProjectId = k.CloudProjectID 201 | pb.CloudProjectName = k.CloudProjectName 202 | return &pb 203 | } 204 | 205 | // FromProto converts protobuf representation to TransactionAggregationKey. 206 | func (k *transactionAggregationKey) FromProto(pb *aggregationpb.TransactionAggregationKey) { 207 | k.TraceRoot = pb.TraceRoot 208 | 209 | k.ContainerID = pb.ContainerId 210 | k.KubernetesPodName = pb.KubernetesPodName 211 | 212 | k.ServiceVersion = pb.ServiceVersion 213 | k.ServiceNodeName = pb.ServiceNodeName 214 | 215 | k.ServiceRuntimeName = pb.ServiceRuntimeName 216 | k.ServiceRuntimeVersion = pb.ServiceRuntimeVersion 217 | k.ServiceLanguageVersion = pb.ServiceLanguageVersion 218 | 219 | k.HostHostname = pb.HostHostname 220 | k.HostName = pb.HostName 221 | k.HostOSPlatform = pb.HostOsPlatform 222 | 223 | k.EventOutcome = pb.EventOutcome 224 | 225 | k.TransactionName = pb.TransactionName 226 | k.TransactionType = pb.TransactionType 227 | k.TransactionResult = pb.TransactionResult 228 | 229 | k.FAASColdstart = nullable.Bool(pb.FaasColdstart) 230 | k.FAASID = pb.FaasId 231 | k.FAASName = pb.FaasName 232 | k.FAASVersion = pb.FaasVersion 233 | k.FAASTriggerType = pb.FaasTriggerType 234 | 235 | k.CloudProvider = pb.CloudProvider 236 | k.CloudRegion = pb.CloudRegion 237 | k.CloudAvailabilityZone = pb.CloudAvailabilityZone 238 | k.CloudServiceName = pb.CloudServiceName 239 | k.CloudAccountID = pb.CloudAccountId 240 | k.CloudAccountName = pb.CloudAccountName 241 | k.CloudMachineType = pb.CloudMachineType 242 | k.CloudProjectID = pb.CloudProjectId 243 | k.CloudProjectName = pb.CloudProjectName 244 | } 245 | 246 | // ToProto converts ServiceTransactionAggregationKey to its protobuf representation. 247 | func (k *serviceTransactionAggregationKey) ToProto() *aggregationpb.ServiceTransactionAggregationKey { 248 | var pb aggregationpb.ServiceTransactionAggregationKey 249 | pb.TransactionType = k.TransactionType 250 | return &pb 251 | } 252 | 253 | // FromProto converts protobuf representation to ServiceTransactionAggregationKey. 254 | func (k *serviceTransactionAggregationKey) FromProto(pb *aggregationpb.ServiceTransactionAggregationKey) { 255 | k.TransactionType = pb.TransactionType 256 | } 257 | 258 | // ToProto converts SpanAggregationKey to its protobuf representation. 259 | func (k *spanAggregationKey) ToProto() *aggregationpb.SpanAggregationKey { 260 | var pb aggregationpb.SpanAggregationKey 261 | pb.SpanName = k.SpanName 262 | pb.Outcome = k.Outcome 263 | 264 | pb.TargetType = k.TargetType 265 | pb.TargetName = k.TargetName 266 | 267 | pb.Resource = k.Resource 268 | return &pb 269 | } 270 | 271 | // FromProto converts protobuf representation to SpanAggregationKey. 272 | func (k *spanAggregationKey) FromProto(pb *aggregationpb.SpanAggregationKey) { 273 | k.SpanName = pb.SpanName 274 | k.Outcome = pb.Outcome 275 | 276 | k.TargetType = pb.TargetType 277 | k.TargetName = pb.TargetName 278 | 279 | k.Resource = pb.Resource 280 | } 281 | 282 | // ToProto converts Overflow to its protobuf representation. 283 | func (o *overflow) ToProto() *aggregationpb.Overflow { 284 | var pb aggregationpb.Overflow 285 | if !o.OverflowTransaction.Empty() { 286 | pb.OverflowTransactions = o.OverflowTransaction.Metrics 287 | pb.OverflowTransactionsEstimator = hllBytes(o.OverflowTransaction.Estimator) 288 | } 289 | if !o.OverflowServiceTransaction.Empty() { 290 | pb.OverflowServiceTransactions = o.OverflowServiceTransaction.Metrics 291 | pb.OverflowServiceTransactionsEstimator = hllBytes(o.OverflowServiceTransaction.Estimator) 292 | } 293 | if !o.OverflowSpan.Empty() { 294 | pb.OverflowSpans = o.OverflowSpan.Metrics 295 | pb.OverflowSpansEstimator = hllBytes(o.OverflowSpan.Estimator) 296 | } 297 | return &pb 298 | } 299 | 300 | // FromProto converts protobuf representation to Overflow. 301 | func (o *overflow) FromProto(pb *aggregationpb.Overflow) { 302 | if pb.OverflowTransactions != nil { 303 | o.OverflowTransaction.Estimator = hllSketch(pb.OverflowTransactionsEstimator) 304 | o.OverflowTransaction.Metrics = pb.OverflowTransactions 305 | pb.OverflowTransactions = nil 306 | } 307 | if pb.OverflowServiceTransactions != nil { 308 | o.OverflowServiceTransaction.Estimator = hllSketch(pb.OverflowServiceTransactionsEstimator) 309 | o.OverflowServiceTransaction.Metrics = pb.OverflowServiceTransactions 310 | pb.OverflowServiceTransactions = nil 311 | } 312 | if pb.OverflowSpans != nil { 313 | o.OverflowSpan.Estimator = hllSketch(pb.OverflowSpansEstimator) 314 | o.OverflowSpan.Metrics = pb.OverflowSpans 315 | pb.OverflowSpans = nil 316 | } 317 | } 318 | 319 | // ToProto converts GlobalLabels to its protobuf representation. 320 | func (gl *globalLabels) ToProto() *aggregationpb.GlobalLabels { 321 | var pb aggregationpb.GlobalLabels 322 | 323 | // Keys must be sorted to ensure wire formats are deterministically generated and strings are directly comparable 324 | // i.e. Protobuf formats are equal if and only if the structs are equal 325 | pb.Labels = slices.Grow(pb.Labels, len(gl.Labels))[:len(gl.Labels)] 326 | var i int 327 | for k, v := range gl.Labels { 328 | if pb.Labels[i] == nil { 329 | pb.Labels[i] = &aggregationpb.Label{} 330 | } 331 | pb.Labels[i].Key = k 332 | pb.Labels[i].Value = v.Value 333 | pb.Labels[i].Values = slices.Grow(pb.Labels[i].Values, len(v.Values))[:len(v.Values)] 334 | copy(pb.Labels[i].Values, v.Values) 335 | i++ 336 | } 337 | sort.Slice(pb.Labels, func(i, j int) bool { 338 | return pb.Labels[i].Key < pb.Labels[j].Key 339 | }) 340 | 341 | pb.NumericLabels = slices.Grow(pb.NumericLabels, len(gl.NumericLabels))[:len(gl.NumericLabels)] 342 | i = 0 343 | for k, v := range gl.NumericLabels { 344 | if pb.NumericLabels[i] == nil { 345 | pb.NumericLabels[i] = &aggregationpb.NumericLabel{} 346 | } 347 | pb.NumericLabels[i].Key = k 348 | pb.NumericLabels[i].Value = v.Value 349 | pb.NumericLabels[i].Values = slices.Grow(pb.NumericLabels[i].Values, len(v.Values))[:len(v.Values)] 350 | copy(pb.NumericLabels[i].Values, v.Values) 351 | i++ 352 | } 353 | sort.Slice(pb.NumericLabels, func(i, j int) bool { 354 | return pb.NumericLabels[i].Key < pb.NumericLabels[j].Key 355 | }) 356 | 357 | return &pb 358 | } 359 | 360 | // FromProto converts protobuf representation to globalLabels. 361 | func (gl *globalLabels) FromProto(pb *aggregationpb.GlobalLabels) { 362 | gl.Labels = make(modelpb.Labels, len(pb.Labels)) 363 | for _, l := range pb.Labels { 364 | gl.Labels[l.Key] = &modelpb.LabelValue{Value: l.Value, Global: true} 365 | gl.Labels[l.Key].Values = slices.Grow(gl.Labels[l.Key].Values, len(l.Values))[:len(l.Values)] 366 | copy(gl.Labels[l.Key].Values, l.Values) 367 | } 368 | gl.NumericLabels = make(modelpb.NumericLabels, len(pb.NumericLabels)) 369 | for _, l := range pb.NumericLabels { 370 | gl.NumericLabels[l.Key] = &modelpb.NumericLabelValue{Value: l.Value, Global: true} 371 | gl.NumericLabels[l.Key].Values = slices.Grow(gl.NumericLabels[l.Key].Values, len(l.Values))[:len(l.Values)] 372 | copy(gl.NumericLabels[l.Key].Values, l.Values) 373 | } 374 | } 375 | 376 | // MarshalBinary marshals globalLabels to binary using protobuf. 377 | func (gl *globalLabels) MarshalBinary() ([]byte, error) { 378 | if gl.Labels == nil && gl.NumericLabels == nil { 379 | return nil, nil 380 | } 381 | pb := gl.ToProto() 382 | return pb.MarshalVT() 383 | } 384 | 385 | // MarshalString marshals globalLabels to string from binary using protobuf. 386 | func (gl *globalLabels) MarshalString() (string, error) { 387 | b, err := gl.MarshalBinary() 388 | return string(b), err 389 | } 390 | 391 | // UnmarshalBinary unmarshals binary protobuf to globalLabels. 392 | func (gl *globalLabels) UnmarshalBinary(data []byte) error { 393 | if len(data) == 0 { 394 | gl.Labels = nil 395 | gl.NumericLabels = nil 396 | return nil 397 | } 398 | var pb aggregationpb.GlobalLabels 399 | if err := pb.UnmarshalVT(data); err != nil { 400 | return err 401 | } 402 | gl.FromProto(&pb) 403 | return nil 404 | } 405 | 406 | // UnmarshalString unmarshals string of binary protobuf to globalLabels. 407 | func (gl *globalLabels) UnmarshalString(data string) error { 408 | return gl.UnmarshalBinary([]byte(data)) 409 | } 410 | 411 | func histogramFromProto(h *hdrhistogram.HistogramRepresentation, pb *aggregationpb.HDRHistogram) { 412 | if pb == nil { 413 | return 414 | } 415 | h.LowestTrackableValue = pb.LowestTrackableValue 416 | h.HighestTrackableValue = pb.HighestTrackableValue 417 | h.SignificantFigures = pb.SignificantFigures 418 | h.CountsRep.Reset() 419 | 420 | for i := 0; i < len(pb.Buckets); i++ { 421 | h.CountsRep.Add(pb.Buckets[i], pb.Counts[i]) 422 | } 423 | } 424 | 425 | func histogramToProto(h *hdrhistogram.HistogramRepresentation) *aggregationpb.HDRHistogram { 426 | if h == nil { 427 | return nil 428 | } 429 | var pb aggregationpb.HDRHistogram 430 | setHistogramProto(h, &pb) 431 | return &pb 432 | } 433 | 434 | func setHistogramProto(h *hdrhistogram.HistogramRepresentation, pb *aggregationpb.HDRHistogram) { 435 | pb.LowestTrackableValue = h.LowestTrackableValue 436 | pb.HighestTrackableValue = h.HighestTrackableValue 437 | pb.SignificantFigures = h.SignificantFigures 438 | pb.Buckets = pb.Buckets[:0] 439 | pb.Counts = pb.Counts[:0] 440 | countsLen := h.CountsRep.Len() 441 | if countsLen > cap(pb.Buckets) { 442 | pb.Buckets = make([]int32, 0, countsLen) 443 | } 444 | if countsLen > cap(pb.Counts) { 445 | pb.Counts = make([]int64, 0, countsLen) 446 | } 447 | h.CountsRep.ForEach(func(bucket int32, count int64) { 448 | pb.Buckets = append(pb.Buckets, bucket) 449 | pb.Counts = append(pb.Counts, count) 450 | }) 451 | } 452 | 453 | func hllBytes(estimator *hyperloglog.Sketch) []byte { 454 | if estimator == nil { 455 | return nil 456 | } 457 | // Ignoring error here since error will always be nil 458 | b, _ := estimator.MarshalBinary() 459 | return b 460 | } 461 | 462 | // hllSketchEstimate returns hllSketch(estimator).Estimate() if estimator is 463 | // non-nil, and zero if estimator is nil. 464 | func hllSketchEstimate(estimator []byte) uint64 { 465 | if sketch := hllSketch(estimator); sketch != nil { 466 | return sketch.Estimate() 467 | } 468 | return 0 469 | } 470 | 471 | func hllSketch(estimator []byte) *hyperloglog.Sketch { 472 | if len(estimator) == 0 { 473 | return nil 474 | } 475 | var sketch hyperloglog.Sketch 476 | // Ignoring returned error here since the error is only returned if 477 | // the precision is set outside bounds which is not possible for our case. 478 | sketch.UnmarshalBinary(estimator) 479 | return &sketch 480 | } 481 | -------------------------------------------------------------------------------- /aggregationpb/labels_vtproto.pb.go: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. Licensed under the Elastic License 2.0; 3 | // you may not use this file except in compliance with the Elastic License 2.0. 4 | 5 | // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. 6 | // protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10 7 | // source: proto/labels.proto 8 | 9 | package aggregationpb 10 | 11 | import ( 12 | binary "encoding/binary" 13 | fmt "fmt" 14 | io "io" 15 | math "math" 16 | 17 | protohelpers "github.com/planetscale/vtprotobuf/protohelpers" 18 | proto "google.golang.org/protobuf/proto" 19 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 20 | ) 21 | 22 | const ( 23 | // Verify that this generated code is sufficiently up-to-date. 24 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 25 | // Verify that runtime/protoimpl is sufficiently up-to-date. 26 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 27 | ) 28 | 29 | func (m *GlobalLabels) CloneVT() *GlobalLabels { 30 | if m == nil { 31 | return (*GlobalLabels)(nil) 32 | } 33 | r := new(GlobalLabels) 34 | if rhs := m.Labels; rhs != nil { 35 | tmpContainer := make([]*Label, len(rhs)) 36 | for k, v := range rhs { 37 | tmpContainer[k] = v.CloneVT() 38 | } 39 | r.Labels = tmpContainer 40 | } 41 | if rhs := m.NumericLabels; rhs != nil { 42 | tmpContainer := make([]*NumericLabel, len(rhs)) 43 | for k, v := range rhs { 44 | tmpContainer[k] = v.CloneVT() 45 | } 46 | r.NumericLabels = tmpContainer 47 | } 48 | if len(m.unknownFields) > 0 { 49 | r.unknownFields = make([]byte, len(m.unknownFields)) 50 | copy(r.unknownFields, m.unknownFields) 51 | } 52 | return r 53 | } 54 | 55 | func (m *GlobalLabels) CloneMessageVT() proto.Message { 56 | return m.CloneVT() 57 | } 58 | 59 | func (m *Label) CloneVT() *Label { 60 | if m == nil { 61 | return (*Label)(nil) 62 | } 63 | r := new(Label) 64 | r.Key = m.Key 65 | r.Value = m.Value 66 | if rhs := m.Values; rhs != nil { 67 | tmpContainer := make([]string, len(rhs)) 68 | copy(tmpContainer, rhs) 69 | r.Values = tmpContainer 70 | } 71 | if len(m.unknownFields) > 0 { 72 | r.unknownFields = make([]byte, len(m.unknownFields)) 73 | copy(r.unknownFields, m.unknownFields) 74 | } 75 | return r 76 | } 77 | 78 | func (m *Label) CloneMessageVT() proto.Message { 79 | return m.CloneVT() 80 | } 81 | 82 | func (m *NumericLabel) CloneVT() *NumericLabel { 83 | if m == nil { 84 | return (*NumericLabel)(nil) 85 | } 86 | r := new(NumericLabel) 87 | r.Key = m.Key 88 | r.Value = m.Value 89 | if rhs := m.Values; rhs != nil { 90 | tmpContainer := make([]float64, len(rhs)) 91 | copy(tmpContainer, rhs) 92 | r.Values = tmpContainer 93 | } 94 | if len(m.unknownFields) > 0 { 95 | r.unknownFields = make([]byte, len(m.unknownFields)) 96 | copy(r.unknownFields, m.unknownFields) 97 | } 98 | return r 99 | } 100 | 101 | func (m *NumericLabel) CloneMessageVT() proto.Message { 102 | return m.CloneVT() 103 | } 104 | 105 | func (m *GlobalLabels) MarshalVT() (dAtA []byte, err error) { 106 | if m == nil { 107 | return nil, nil 108 | } 109 | size := m.SizeVT() 110 | dAtA = make([]byte, size) 111 | n, err := m.MarshalToSizedBufferVT(dAtA[:size]) 112 | if err != nil { 113 | return nil, err 114 | } 115 | return dAtA[:n], nil 116 | } 117 | 118 | func (m *GlobalLabels) MarshalToVT(dAtA []byte) (int, error) { 119 | size := m.SizeVT() 120 | return m.MarshalToSizedBufferVT(dAtA[:size]) 121 | } 122 | 123 | func (m *GlobalLabels) MarshalToSizedBufferVT(dAtA []byte) (int, error) { 124 | if m == nil { 125 | return 0, nil 126 | } 127 | i := len(dAtA) 128 | _ = i 129 | var l int 130 | _ = l 131 | if m.unknownFields != nil { 132 | i -= len(m.unknownFields) 133 | copy(dAtA[i:], m.unknownFields) 134 | } 135 | if len(m.NumericLabels) > 0 { 136 | for iNdEx := len(m.NumericLabels) - 1; iNdEx >= 0; iNdEx-- { 137 | size, err := m.NumericLabels[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) 138 | if err != nil { 139 | return 0, err 140 | } 141 | i -= size 142 | i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) 143 | i-- 144 | dAtA[i] = 0x12 145 | } 146 | } 147 | if len(m.Labels) > 0 { 148 | for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { 149 | size, err := m.Labels[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) 150 | if err != nil { 151 | return 0, err 152 | } 153 | i -= size 154 | i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) 155 | i-- 156 | dAtA[i] = 0xa 157 | } 158 | } 159 | return len(dAtA) - i, nil 160 | } 161 | 162 | func (m *Label) MarshalVT() (dAtA []byte, err error) { 163 | if m == nil { 164 | return nil, nil 165 | } 166 | size := m.SizeVT() 167 | dAtA = make([]byte, size) 168 | n, err := m.MarshalToSizedBufferVT(dAtA[:size]) 169 | if err != nil { 170 | return nil, err 171 | } 172 | return dAtA[:n], nil 173 | } 174 | 175 | func (m *Label) MarshalToVT(dAtA []byte) (int, error) { 176 | size := m.SizeVT() 177 | return m.MarshalToSizedBufferVT(dAtA[:size]) 178 | } 179 | 180 | func (m *Label) MarshalToSizedBufferVT(dAtA []byte) (int, error) { 181 | if m == nil { 182 | return 0, nil 183 | } 184 | i := len(dAtA) 185 | _ = i 186 | var l int 187 | _ = l 188 | if m.unknownFields != nil { 189 | i -= len(m.unknownFields) 190 | copy(dAtA[i:], m.unknownFields) 191 | } 192 | if len(m.Values) > 0 { 193 | for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { 194 | i -= len(m.Values[iNdEx]) 195 | copy(dAtA[i:], m.Values[iNdEx]) 196 | i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Values[iNdEx]))) 197 | i-- 198 | dAtA[i] = 0x1a 199 | } 200 | } 201 | if len(m.Value) > 0 { 202 | i -= len(m.Value) 203 | copy(dAtA[i:], m.Value) 204 | i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) 205 | i-- 206 | dAtA[i] = 0x12 207 | } 208 | if len(m.Key) > 0 { 209 | i -= len(m.Key) 210 | copy(dAtA[i:], m.Key) 211 | i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) 212 | i-- 213 | dAtA[i] = 0xa 214 | } 215 | return len(dAtA) - i, nil 216 | } 217 | 218 | func (m *NumericLabel) MarshalVT() (dAtA []byte, err error) { 219 | if m == nil { 220 | return nil, nil 221 | } 222 | size := m.SizeVT() 223 | dAtA = make([]byte, size) 224 | n, err := m.MarshalToSizedBufferVT(dAtA[:size]) 225 | if err != nil { 226 | return nil, err 227 | } 228 | return dAtA[:n], nil 229 | } 230 | 231 | func (m *NumericLabel) MarshalToVT(dAtA []byte) (int, error) { 232 | size := m.SizeVT() 233 | return m.MarshalToSizedBufferVT(dAtA[:size]) 234 | } 235 | 236 | func (m *NumericLabel) MarshalToSizedBufferVT(dAtA []byte) (int, error) { 237 | if m == nil { 238 | return 0, nil 239 | } 240 | i := len(dAtA) 241 | _ = i 242 | var l int 243 | _ = l 244 | if m.unknownFields != nil { 245 | i -= len(m.unknownFields) 246 | copy(dAtA[i:], m.unknownFields) 247 | } 248 | if len(m.Values) > 0 { 249 | for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { 250 | f1 := math.Float64bits(float64(m.Values[iNdEx])) 251 | i -= 8 252 | binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) 253 | } 254 | i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Values)*8)) 255 | i-- 256 | dAtA[i] = 0x1a 257 | } 258 | if m.Value != 0 { 259 | i -= 8 260 | binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) 261 | i-- 262 | dAtA[i] = 0x11 263 | } 264 | if len(m.Key) > 0 { 265 | i -= len(m.Key) 266 | copy(dAtA[i:], m.Key) 267 | i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) 268 | i-- 269 | dAtA[i] = 0xa 270 | } 271 | return len(dAtA) - i, nil 272 | } 273 | 274 | func (m *GlobalLabels) SizeVT() (n int) { 275 | if m == nil { 276 | return 0 277 | } 278 | var l int 279 | _ = l 280 | if len(m.Labels) > 0 { 281 | for _, e := range m.Labels { 282 | l = e.SizeVT() 283 | n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) 284 | } 285 | } 286 | if len(m.NumericLabels) > 0 { 287 | for _, e := range m.NumericLabels { 288 | l = e.SizeVT() 289 | n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) 290 | } 291 | } 292 | n += len(m.unknownFields) 293 | return n 294 | } 295 | 296 | func (m *Label) SizeVT() (n int) { 297 | if m == nil { 298 | return 0 299 | } 300 | var l int 301 | _ = l 302 | l = len(m.Key) 303 | if l > 0 { 304 | n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) 305 | } 306 | l = len(m.Value) 307 | if l > 0 { 308 | n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) 309 | } 310 | if len(m.Values) > 0 { 311 | for _, s := range m.Values { 312 | l = len(s) 313 | n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) 314 | } 315 | } 316 | n += len(m.unknownFields) 317 | return n 318 | } 319 | 320 | func (m *NumericLabel) SizeVT() (n int) { 321 | if m == nil { 322 | return 0 323 | } 324 | var l int 325 | _ = l 326 | l = len(m.Key) 327 | if l > 0 { 328 | n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) 329 | } 330 | if m.Value != 0 { 331 | n += 9 332 | } 333 | if len(m.Values) > 0 { 334 | n += 1 + protohelpers.SizeOfVarint(uint64(len(m.Values)*8)) + len(m.Values)*8 335 | } 336 | n += len(m.unknownFields) 337 | return n 338 | } 339 | 340 | func (m *GlobalLabels) UnmarshalVT(dAtA []byte) error { 341 | l := len(dAtA) 342 | iNdEx := 0 343 | for iNdEx < l { 344 | preIndex := iNdEx 345 | var wire uint64 346 | for shift := uint(0); ; shift += 7 { 347 | if shift >= 64 { 348 | return protohelpers.ErrIntOverflow 349 | } 350 | if iNdEx >= l { 351 | return io.ErrUnexpectedEOF 352 | } 353 | b := dAtA[iNdEx] 354 | iNdEx++ 355 | wire |= uint64(b&0x7F) << shift 356 | if b < 0x80 { 357 | break 358 | } 359 | } 360 | fieldNum := int32(wire >> 3) 361 | wireType := int(wire & 0x7) 362 | if wireType == 4 { 363 | return fmt.Errorf("proto: GlobalLabels: wiretype end group for non-group") 364 | } 365 | if fieldNum <= 0 { 366 | return fmt.Errorf("proto: GlobalLabels: illegal tag %d (wire type %d)", fieldNum, wire) 367 | } 368 | switch fieldNum { 369 | case 1: 370 | if wireType != 2 { 371 | return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) 372 | } 373 | var msglen int 374 | for shift := uint(0); ; shift += 7 { 375 | if shift >= 64 { 376 | return protohelpers.ErrIntOverflow 377 | } 378 | if iNdEx >= l { 379 | return io.ErrUnexpectedEOF 380 | } 381 | b := dAtA[iNdEx] 382 | iNdEx++ 383 | msglen |= int(b&0x7F) << shift 384 | if b < 0x80 { 385 | break 386 | } 387 | } 388 | if msglen < 0 { 389 | return protohelpers.ErrInvalidLength 390 | } 391 | postIndex := iNdEx + msglen 392 | if postIndex < 0 { 393 | return protohelpers.ErrInvalidLength 394 | } 395 | if postIndex > l { 396 | return io.ErrUnexpectedEOF 397 | } 398 | m.Labels = append(m.Labels, &Label{}) 399 | if err := m.Labels[len(m.Labels)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { 400 | return err 401 | } 402 | iNdEx = postIndex 403 | case 2: 404 | if wireType != 2 { 405 | return fmt.Errorf("proto: wrong wireType = %d for field NumericLabels", wireType) 406 | } 407 | var msglen int 408 | for shift := uint(0); ; shift += 7 { 409 | if shift >= 64 { 410 | return protohelpers.ErrIntOverflow 411 | } 412 | if iNdEx >= l { 413 | return io.ErrUnexpectedEOF 414 | } 415 | b := dAtA[iNdEx] 416 | iNdEx++ 417 | msglen |= int(b&0x7F) << shift 418 | if b < 0x80 { 419 | break 420 | } 421 | } 422 | if msglen < 0 { 423 | return protohelpers.ErrInvalidLength 424 | } 425 | postIndex := iNdEx + msglen 426 | if postIndex < 0 { 427 | return protohelpers.ErrInvalidLength 428 | } 429 | if postIndex > l { 430 | return io.ErrUnexpectedEOF 431 | } 432 | m.NumericLabels = append(m.NumericLabels, &NumericLabel{}) 433 | if err := m.NumericLabels[len(m.NumericLabels)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { 434 | return err 435 | } 436 | iNdEx = postIndex 437 | default: 438 | iNdEx = preIndex 439 | skippy, err := protohelpers.Skip(dAtA[iNdEx:]) 440 | if err != nil { 441 | return err 442 | } 443 | if (skippy < 0) || (iNdEx+skippy) < 0 { 444 | return protohelpers.ErrInvalidLength 445 | } 446 | if (iNdEx + skippy) > l { 447 | return io.ErrUnexpectedEOF 448 | } 449 | m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) 450 | iNdEx += skippy 451 | } 452 | } 453 | 454 | if iNdEx > l { 455 | return io.ErrUnexpectedEOF 456 | } 457 | return nil 458 | } 459 | func (m *Label) UnmarshalVT(dAtA []byte) error { 460 | l := len(dAtA) 461 | iNdEx := 0 462 | for iNdEx < l { 463 | preIndex := iNdEx 464 | var wire uint64 465 | for shift := uint(0); ; shift += 7 { 466 | if shift >= 64 { 467 | return protohelpers.ErrIntOverflow 468 | } 469 | if iNdEx >= l { 470 | return io.ErrUnexpectedEOF 471 | } 472 | b := dAtA[iNdEx] 473 | iNdEx++ 474 | wire |= uint64(b&0x7F) << shift 475 | if b < 0x80 { 476 | break 477 | } 478 | } 479 | fieldNum := int32(wire >> 3) 480 | wireType := int(wire & 0x7) 481 | if wireType == 4 { 482 | return fmt.Errorf("proto: Label: wiretype end group for non-group") 483 | } 484 | if fieldNum <= 0 { 485 | return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) 486 | } 487 | switch fieldNum { 488 | case 1: 489 | if wireType != 2 { 490 | return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) 491 | } 492 | var stringLen uint64 493 | for shift := uint(0); ; shift += 7 { 494 | if shift >= 64 { 495 | return protohelpers.ErrIntOverflow 496 | } 497 | if iNdEx >= l { 498 | return io.ErrUnexpectedEOF 499 | } 500 | b := dAtA[iNdEx] 501 | iNdEx++ 502 | stringLen |= uint64(b&0x7F) << shift 503 | if b < 0x80 { 504 | break 505 | } 506 | } 507 | intStringLen := int(stringLen) 508 | if intStringLen < 0 { 509 | return protohelpers.ErrInvalidLength 510 | } 511 | postIndex := iNdEx + intStringLen 512 | if postIndex < 0 { 513 | return protohelpers.ErrInvalidLength 514 | } 515 | if postIndex > l { 516 | return io.ErrUnexpectedEOF 517 | } 518 | m.Key = string(dAtA[iNdEx:postIndex]) 519 | iNdEx = postIndex 520 | case 2: 521 | if wireType != 2 { 522 | return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) 523 | } 524 | var stringLen uint64 525 | for shift := uint(0); ; shift += 7 { 526 | if shift >= 64 { 527 | return protohelpers.ErrIntOverflow 528 | } 529 | if iNdEx >= l { 530 | return io.ErrUnexpectedEOF 531 | } 532 | b := dAtA[iNdEx] 533 | iNdEx++ 534 | stringLen |= uint64(b&0x7F) << shift 535 | if b < 0x80 { 536 | break 537 | } 538 | } 539 | intStringLen := int(stringLen) 540 | if intStringLen < 0 { 541 | return protohelpers.ErrInvalidLength 542 | } 543 | postIndex := iNdEx + intStringLen 544 | if postIndex < 0 { 545 | return protohelpers.ErrInvalidLength 546 | } 547 | if postIndex > l { 548 | return io.ErrUnexpectedEOF 549 | } 550 | m.Value = string(dAtA[iNdEx:postIndex]) 551 | iNdEx = postIndex 552 | case 3: 553 | if wireType != 2 { 554 | return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) 555 | } 556 | var stringLen uint64 557 | for shift := uint(0); ; shift += 7 { 558 | if shift >= 64 { 559 | return protohelpers.ErrIntOverflow 560 | } 561 | if iNdEx >= l { 562 | return io.ErrUnexpectedEOF 563 | } 564 | b := dAtA[iNdEx] 565 | iNdEx++ 566 | stringLen |= uint64(b&0x7F) << shift 567 | if b < 0x80 { 568 | break 569 | } 570 | } 571 | intStringLen := int(stringLen) 572 | if intStringLen < 0 { 573 | return protohelpers.ErrInvalidLength 574 | } 575 | postIndex := iNdEx + intStringLen 576 | if postIndex < 0 { 577 | return protohelpers.ErrInvalidLength 578 | } 579 | if postIndex > l { 580 | return io.ErrUnexpectedEOF 581 | } 582 | m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) 583 | iNdEx = postIndex 584 | default: 585 | iNdEx = preIndex 586 | skippy, err := protohelpers.Skip(dAtA[iNdEx:]) 587 | if err != nil { 588 | return err 589 | } 590 | if (skippy < 0) || (iNdEx+skippy) < 0 { 591 | return protohelpers.ErrInvalidLength 592 | } 593 | if (iNdEx + skippy) > l { 594 | return io.ErrUnexpectedEOF 595 | } 596 | m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) 597 | iNdEx += skippy 598 | } 599 | } 600 | 601 | if iNdEx > l { 602 | return io.ErrUnexpectedEOF 603 | } 604 | return nil 605 | } 606 | func (m *NumericLabel) UnmarshalVT(dAtA []byte) error { 607 | l := len(dAtA) 608 | iNdEx := 0 609 | for iNdEx < l { 610 | preIndex := iNdEx 611 | var wire uint64 612 | for shift := uint(0); ; shift += 7 { 613 | if shift >= 64 { 614 | return protohelpers.ErrIntOverflow 615 | } 616 | if iNdEx >= l { 617 | return io.ErrUnexpectedEOF 618 | } 619 | b := dAtA[iNdEx] 620 | iNdEx++ 621 | wire |= uint64(b&0x7F) << shift 622 | if b < 0x80 { 623 | break 624 | } 625 | } 626 | fieldNum := int32(wire >> 3) 627 | wireType := int(wire & 0x7) 628 | if wireType == 4 { 629 | return fmt.Errorf("proto: NumericLabel: wiretype end group for non-group") 630 | } 631 | if fieldNum <= 0 { 632 | return fmt.Errorf("proto: NumericLabel: illegal tag %d (wire type %d)", fieldNum, wire) 633 | } 634 | switch fieldNum { 635 | case 1: 636 | if wireType != 2 { 637 | return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) 638 | } 639 | var stringLen uint64 640 | for shift := uint(0); ; shift += 7 { 641 | if shift >= 64 { 642 | return protohelpers.ErrIntOverflow 643 | } 644 | if iNdEx >= l { 645 | return io.ErrUnexpectedEOF 646 | } 647 | b := dAtA[iNdEx] 648 | iNdEx++ 649 | stringLen |= uint64(b&0x7F) << shift 650 | if b < 0x80 { 651 | break 652 | } 653 | } 654 | intStringLen := int(stringLen) 655 | if intStringLen < 0 { 656 | return protohelpers.ErrInvalidLength 657 | } 658 | postIndex := iNdEx + intStringLen 659 | if postIndex < 0 { 660 | return protohelpers.ErrInvalidLength 661 | } 662 | if postIndex > l { 663 | return io.ErrUnexpectedEOF 664 | } 665 | m.Key = string(dAtA[iNdEx:postIndex]) 666 | iNdEx = postIndex 667 | case 2: 668 | if wireType != 1 { 669 | return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) 670 | } 671 | var v uint64 672 | if (iNdEx + 8) > l { 673 | return io.ErrUnexpectedEOF 674 | } 675 | v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) 676 | iNdEx += 8 677 | m.Value = float64(math.Float64frombits(v)) 678 | case 3: 679 | if wireType == 1 { 680 | var v uint64 681 | if (iNdEx + 8) > l { 682 | return io.ErrUnexpectedEOF 683 | } 684 | v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) 685 | iNdEx += 8 686 | v2 := float64(math.Float64frombits(v)) 687 | m.Values = append(m.Values, v2) 688 | } else if wireType == 2 { 689 | var packedLen int 690 | for shift := uint(0); ; shift += 7 { 691 | if shift >= 64 { 692 | return protohelpers.ErrIntOverflow 693 | } 694 | if iNdEx >= l { 695 | return io.ErrUnexpectedEOF 696 | } 697 | b := dAtA[iNdEx] 698 | iNdEx++ 699 | packedLen |= int(b&0x7F) << shift 700 | if b < 0x80 { 701 | break 702 | } 703 | } 704 | if packedLen < 0 { 705 | return protohelpers.ErrInvalidLength 706 | } 707 | postIndex := iNdEx + packedLen 708 | if postIndex < 0 { 709 | return protohelpers.ErrInvalidLength 710 | } 711 | if postIndex > l { 712 | return io.ErrUnexpectedEOF 713 | } 714 | var elementCount int 715 | elementCount = packedLen / 8 716 | if elementCount != 0 && len(m.Values) == 0 { 717 | m.Values = make([]float64, 0, elementCount) 718 | } 719 | for iNdEx < postIndex { 720 | var v uint64 721 | if (iNdEx + 8) > l { 722 | return io.ErrUnexpectedEOF 723 | } 724 | v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) 725 | iNdEx += 8 726 | v2 := float64(math.Float64frombits(v)) 727 | m.Values = append(m.Values, v2) 728 | } 729 | } else { 730 | return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) 731 | } 732 | default: 733 | iNdEx = preIndex 734 | skippy, err := protohelpers.Skip(dAtA[iNdEx:]) 735 | if err != nil { 736 | return err 737 | } 738 | if (skippy < 0) || (iNdEx+skippy) < 0 { 739 | return protohelpers.ErrInvalidLength 740 | } 741 | if (iNdEx + skippy) > l { 742 | return io.ErrUnexpectedEOF 743 | } 744 | m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) 745 | iNdEx += skippy 746 | } 747 | } 748 | 749 | if iNdEx > l { 750 | return io.ErrUnexpectedEOF 751 | } 752 | return nil 753 | } 754 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= 2 | github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= 3 | github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= 4 | github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= 5 | github.com/HdrHistogram/hdrhistogram-go v1.2.0 h1:XMJkDWuz6bM9Fzy7zORuVFKH7ZJY41G2q8KWhVGkNiY= 6 | github.com/HdrHistogram/hdrhistogram-go v1.2.0/go.mod h1:CiIeGiHSd06zjX+FypuEJ5EQ07KKtxZ+8J6hszwVQig= 7 | github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 h1:8XBWWQD+vFF+JqOsm16t0Kab1a7YWV8+GISVEP8AuZ8= 8 | github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= 9 | github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= 10 | github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= 11 | github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= 12 | github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= 13 | github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= 14 | github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= 15 | github.com/axiomhq/hyperloglog v0.2.6 h1:sRhvvF3RIXWQgAXaTphLp4yJiX4S0IN3MWTaAgZoRJw= 16 | github.com/axiomhq/hyperloglog v0.2.6/go.mod h1:YjX/dQqCR/7QYX0g8mu8UZAjpIenz1FKM71UEsjFoTo= 17 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 18 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 19 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 20 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 21 | github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= 22 | github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= 23 | github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5 h1:UycK/E0TkisVrQbSoxvU827FwgBBcZ95nRRmpj/12QI= 24 | github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5/go.mod h1:jsaKMvD3RBCATk1/jbUZM8C9idWBJME9+VRZ5+Liq1g= 25 | github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= 26 | github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= 27 | github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= 28 | github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= 29 | github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= 30 | github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= 31 | github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA= 32 | github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw= 33 | github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= 34 | github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= 35 | github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA= 36 | github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= 37 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= 38 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= 39 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 40 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 41 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 42 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 43 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 44 | github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 h1:ucRHb6/lvW/+mTEIGbvhcYU3S8+uSNkuMjx/qZFfhtM= 45 | github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= 46 | github.com/elastic/apm-data v1.19.5 h1:Rc8bcArUpDMv4wluw6G+LdBlGEO16nO+gnNuKUArbmc= 47 | github.com/elastic/apm-data v1.19.5/go.mod h1:jfJZw+SiGv0y5jIeAjmzIxixRIfLuWREOYgNvmhcek8= 48 | github.com/elastic/go-licenser v0.4.2 h1:bPbGm8bUd8rxzSswFOqvQh1dAkKGkgAmrPxbUi+Y9+A= 49 | github.com/elastic/go-licenser v0.4.2/go.mod h1:W8eH6FaZDR8fQGm+7FnVa7MxI1b/6dAqxz+zPB8nm5c= 50 | github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= 51 | github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= 52 | github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= 53 | github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= 54 | github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= 55 | github.com/elastic/opentelemetry-lib v0.21.0 h1:aktZQ6hocz/yVMOikDrdcjWtl/qcK/AfMBundZJTCcQ= 56 | github.com/elastic/opentelemetry-lib v0.21.0/go.mod h1:mNcdHi6ivTn5w4bE18as43gzKt2rVmK4HCeJZ9JmFXk= 57 | github.com/getsentry/sentry-go v0.29.1 h1:DyZuChN8Hz3ARxGVV8ePaNXh1dQ7d76AiB117xcREwA= 58 | github.com/getsentry/sentry-go v0.29.1/go.mod h1:x3AtIzN01d6SiWkderzaH28Tm0lgkafpJ5Bm3li39O0= 59 | github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= 60 | github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= 61 | github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= 62 | github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= 63 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 64 | github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= 65 | github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 66 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 67 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 68 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 69 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 70 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 71 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 72 | github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= 73 | github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 74 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 75 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 76 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 77 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 78 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 79 | github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= 80 | github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= 81 | github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= 82 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 83 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 84 | github.com/kamstrup/intmap v0.5.2 h1:qnwBm1mh4XAnW9W9Ue9tZtTff8pS6+s6iKF6JRIV2Dk= 85 | github.com/kamstrup/intmap v0.5.2/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4= 86 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 87 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 88 | github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 89 | github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 90 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 91 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 92 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 93 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 94 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 95 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 96 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 97 | github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= 98 | github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= 99 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 100 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 101 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 102 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 103 | github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= 104 | github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 105 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 106 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 107 | github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= 108 | github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= 109 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 110 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 111 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 112 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 113 | github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= 114 | github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= 115 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 116 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 117 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 118 | github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= 119 | github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= 120 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 121 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 122 | github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= 123 | github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= 124 | github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 125 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 126 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 127 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 128 | github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= 129 | github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= 130 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 131 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 132 | github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= 133 | github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= 134 | github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= 135 | github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= 136 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= 137 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= 138 | github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= 139 | github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= 140 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 141 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 142 | go.elastic.co/apm/module/apmhttp/v2 v2.7.2 h1:grLycchDH4B6aGRkZjIV/sweAivJDl8IcP+nCorktm8= 143 | go.elastic.co/apm/module/apmhttp/v2 v2.7.2/go.mod h1:N9CJn3x7cyFnZ54WKxgm/t76drcsmSpu6aU8zGwP4zQ= 144 | go.elastic.co/apm/module/apmotel/v2 v2.7.2 h1:cAs0vv6laivMlhPGrgsdTqoG1u6fuiDctoWizHaqQOA= 145 | go.elastic.co/apm/module/apmotel/v2 v2.7.2/go.mod h1:CwPbr5N0/9xS8MNwxxGigoxM97vfFhKB/byefKVKt7Y= 146 | go.elastic.co/apm/v2 v2.7.2 h1:0blxpxOMOcpBTz034RBqvEw806y0CDJwo/ut+2wZsHA= 147 | go.elastic.co/apm/v2 v2.7.2/go.mod h1:KJcwwsaouDzcLd8EviAO+y8yrfZzD6PhUCEg82bvLV4= 148 | go.elastic.co/fastjson v1.5.1 h1:zeh1xHrFH79aQ6Xsw7YxixvnOdAl3OSv0xch/jRDzko= 149 | go.elastic.co/fastjson v1.5.1/go.mod h1:WtvH5wz8z9pDOPqNYSYKoLLv/9zCWZLeejHWuvdL/EM= 150 | go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= 151 | go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= 152 | go.opentelemetry.io/collector/consumer v1.37.0 h1:RqTqEcc95Fg7T3MRPPjUX2nxzn1X88yfFUQV+AjdMK0= 153 | go.opentelemetry.io/collector/consumer v1.37.0/go.mod h1:vDA1JDXeb7vnQ02PXIjjR6dI9LTaya+Qr89Nyt2Gl7Y= 154 | go.opentelemetry.io/collector/pdata v1.37.0 h1:aEEpd03GgAS352xntcYMsaxYvRXvzqEWqdrSro+TSh4= 155 | go.opentelemetry.io/collector/pdata v1.37.0/go.mod h1:aE9l1Lcdsg7nmSoiucnWHuPYIk6T0RKzOjPepNJC5AQ= 156 | go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= 157 | go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= 158 | go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= 159 | go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= 160 | go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= 161 | go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= 162 | go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= 163 | go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= 164 | go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= 165 | go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= 166 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 167 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 168 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 169 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 170 | go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= 171 | go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 172 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 173 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 174 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 175 | golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= 176 | golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= 177 | golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ= 178 | golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= 179 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 180 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 181 | golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= 182 | golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= 183 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 184 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 185 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 186 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 187 | golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= 188 | golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= 189 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 190 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 191 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 192 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 193 | golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= 194 | golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= 195 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 196 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 197 | golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 198 | golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 199 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 200 | golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= 201 | golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= 202 | golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= 203 | golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= 204 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 205 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 206 | golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= 207 | golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= 208 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 209 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 210 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 211 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 212 | golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= 213 | golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= 214 | golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= 215 | golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= 216 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 217 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 218 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 219 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 220 | gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= 221 | gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= 222 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= 223 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= 224 | google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= 225 | google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= 226 | google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= 227 | google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= 228 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 229 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 230 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 231 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 232 | gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= 233 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 234 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 235 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 236 | honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= 237 | honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= 238 | howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= 239 | howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= 240 | howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= 241 | --------------------------------------------------------------------------------