├── CODEOWNERS ├── images ├── dashboard-k6-prometheus.png ├── dashboard-k6-prometheus-nh.png ├── dashboard-k6-prometheus-lower-section.png ├── dashboard-k6-prometheus-upper-section.png ├── dashboard-k6-prometheus-nh-lower-section.png └── dashboard-k6-prometheus-nh-upper-section.png ├── .gitignore ├── .github └── workflows │ ├── issue-auto-assign.yml │ └── all.yml ├── samples ├── simple.js ├── test.js ├── checks.js ├── scenarios.js ├── stages.js ├── error.js ├── simple_batch_status.js └── custom_metrics.js ├── register.go ├── grafana ├── dashboards │ └── dashboards.yaml └── datasources │ └── datasource.yaml ├── Dockerfile ├── docker-compose.yml ├── pkg ├── sigv4 │ ├── const.go │ ├── tripper.go │ ├── sigv4_test.go │ ├── tripper_test.go │ ├── utils.go │ ├── util_test.go │ └── sigv4.go ├── stale │ └── stale.go ├── remotewrite │ ├── prometheus.go │ ├── prometheus_test.go │ ├── trend_test.go │ ├── trend.go │ ├── remotewrite_test.go │ ├── remotewrite.go │ ├── config.go │ └── config_test.go └── remote │ ├── client.go │ └── client_test.go ├── Makefile ├── README.md ├── go.mod ├── CODE_OF_CONDUCT.md ├── go.sum └── LICENSE /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @grafana/k6-core 2 | -------------------------------------------------------------------------------- /images/dashboard-k6-prometheus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/xk6-output-prometheus-remote/HEAD/images/dashboard-k6-prometheus.png -------------------------------------------------------------------------------- /images/dashboard-k6-prometheus-nh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/xk6-output-prometheus-remote/HEAD/images/dashboard-k6-prometheus-nh.png -------------------------------------------------------------------------------- /images/dashboard-k6-prometheus-lower-section.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/xk6-output-prometheus-remote/HEAD/images/dashboard-k6-prometheus-lower-section.png -------------------------------------------------------------------------------- /images/dashboard-k6-prometheus-upper-section.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/xk6-output-prometheus-remote/HEAD/images/dashboard-k6-prometheus-upper-section.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .DS_Store 3 | k6 4 | 5 | # we use the config from the main k6's repository 6 | # https://github.com/grafana/k6/blob/master/.golangci.yml 7 | .golangci.yml -------------------------------------------------------------------------------- /images/dashboard-k6-prometheus-nh-lower-section.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/xk6-output-prometheus-remote/HEAD/images/dashboard-k6-prometheus-nh-lower-section.png -------------------------------------------------------------------------------- /images/dashboard-k6-prometheus-nh-upper-section.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/xk6-output-prometheus-remote/HEAD/images/dashboard-k6-prometheus-nh-upper-section.png -------------------------------------------------------------------------------- /.github/workflows/issue-auto-assign.yml: -------------------------------------------------------------------------------- 1 | name: "Auto assign maintainer to issue" 2 | on: 3 | issues: 4 | types: [opened] 5 | 6 | permissions: 7 | issues: write 8 | 9 | jobs: 10 | assign-maintainer: 11 | uses: grafana/k6/.github/workflows/issue-auto-assign.yml@master 12 | -------------------------------------------------------------------------------- /samples/simple.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | 3 | export const options = { 4 | vus: 10, 5 | iterations: 1000, 6 | 7 | thresholds: { 8 | "http_reqs{expected_response:false}": ["rate>10"], 9 | }, 10 | }; 11 | 12 | export default function () { 13 | http.get("https://test.k6.io"); 14 | } 15 | -------------------------------------------------------------------------------- /.github/workflows/all.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | # Enable manually triggering this workflow via the API or web UI 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | tags: 9 | - v* 10 | pull_request: 11 | 12 | jobs: 13 | checks: 14 | uses: grafana/k6-ci/.github/workflows/all.yml@main -------------------------------------------------------------------------------- /register.go: -------------------------------------------------------------------------------- 1 | // Package remotewrite registers the xk6-output-prometheus-remote extension 2 | package remotewrite 3 | 4 | import ( 5 | "github.com/grafana/xk6-output-prometheus-remote/pkg/remotewrite" 6 | "go.k6.io/k6/output" 7 | ) 8 | 9 | func init() { 10 | output.RegisterExtension("xk6-prometheus-rw", func(p output.Params) (output.Output, error) { 11 | return remotewrite.New(p) 12 | }) 13 | } 14 | -------------------------------------------------------------------------------- /samples/test.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | import { check } from "k6"; 3 | 4 | export const options = { 5 | vus: 10, 6 | duration: '10s', 7 | thresholds: { 8 | 'http_reqs{expected_response:true}': ['rate>10'], 9 | }, 10 | }; 11 | 12 | export default function () { 13 | check(http.get("https://test-api.k6.io/"), { 14 | "status is 200": (r) => r.status == 200, 15 | "protocol is HTTP/2": (r) => r.proto == "HTTP/2.0", 16 | }); 17 | } 18 | -------------------------------------------------------------------------------- /grafana/dashboards/dashboards.yaml: -------------------------------------------------------------------------------- 1 | # For configuration options, see 2 | # https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards 3 | 4 | apiVersion: 1 5 | 6 | providers: 7 | # We're defining a directory from which to load file-based dashboards 8 | - name: 'prometheus' 9 | type: file 10 | disableDeletion: false 11 | updateIntervalSeconds: 10 12 | editable: true 13 | options: 14 | path: /etc/grafana/provisioning/dashboards 15 | -------------------------------------------------------------------------------- /grafana/datasources/datasource.yaml: -------------------------------------------------------------------------------- 1 | # For configuration options, see 2 | # https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-config-file 3 | 4 | apiVersion: 1 5 | 6 | datasources: 7 | - name: prometheus 8 | type: prometheus 9 | access: proxy 10 | url: http://prometheus:9090 11 | basicAuth: false 12 | isDefault: true 13 | jsonData: 14 | tlsAuth: false 15 | tlsAuthWithCACert: false 16 | editable: false 17 | -------------------------------------------------------------------------------- /samples/checks.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | import { check } from "k6"; 3 | // for test check on dashboard 4 | export const options = { 5 | vus: 10, 6 | duration: '10s', 7 | thresholds: { 8 | 'http_reqs{expected_response:true}': ['rate>10'], 9 | }, 10 | }; 11 | 12 | export default function () { 13 | check(http.get("https://test-api.k6.io/"), { 14 | "status is 200": (r) => r.status == 200, 15 | "protocol is HTTP/2": (r) => r.proto == "HTTP/2.0", 16 | }); 17 | } 18 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Multi-stage build to generate custom k6 with extension 2 | FROM golang:1.20-alpine3.18 as builder 3 | WORKDIR $GOPATH/src/go.k6.io/k6 4 | COPY . . 5 | RUN apk --no-cache add git=~2 6 | RUN CGO_ENABLED=0 go install go.k6.io/xk6/cmd/xk6@latest \ 7 | && CGO_ENABLED=0 xk6 build \ 8 | --with github.com/grafana/xk6-output-prometheus-remote=. \ 9 | --output /tmp/k6 10 | 11 | # Create image for running k6 with output for Prometheus remote write 12 | FROM alpine:3.18 13 | 14 | # hadolint ignore=DL3018 15 | RUN apk add --no-cache ca-certificates && \ 16 | adduser -D -u 12345 -g 12345 k6 17 | COPY --from=builder /tmp/k6 /usr/bin/k6 18 | 19 | USER 12345 20 | WORKDIR /home/k6 21 | 22 | ENTRYPOINT ["k6"] 23 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | networks: 4 | k6: 5 | 6 | services: 7 | prometheus: 8 | image: prom/prometheus:v2.47.0 9 | command: 10 | - --web.enable-remote-write-receiver 11 | - --enable-feature=native-histograms 12 | - --config.file=/etc/prometheus/prometheus.yml 13 | networks: 14 | - k6 15 | ports: 16 | - "9090:9090" 17 | 18 | grafana: 19 | image: grafana/grafana:10.1.2 20 | networks: 21 | - k6 22 | ports: 23 | - "3000:3000" 24 | environment: 25 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 26 | - GF_AUTH_ANONYMOUS_ENABLED=true 27 | - GF_AUTH_BASIC_ENABLED=false 28 | volumes: 29 | - ./grafana:/etc/grafana/provisioning/ -------------------------------------------------------------------------------- /pkg/sigv4/const.go: -------------------------------------------------------------------------------- 1 | package sigv4 2 | 3 | const ( 4 | // Amazon Managed Service for Prometheus 5 | awsServiceName = "aps" 6 | 7 | signingAlgorithm = "AWS4-HMAC-SHA256" 8 | 9 | authorizationHeaderKey = "Authorization" 10 | amzDateKey = "X-Amz-Date" 11 | 12 | // emptyStringSHA256 is the hex encoded sha256 value of an empty string 13 | emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` 14 | 15 | // timeFormat is the time format to be used in the X-Amz-Date header or query parameter 16 | timeFormat = "20060102T150405Z" 17 | 18 | // shortTimeFormat is the shorten time format used in the credential scope 19 | shortTimeFormat = "20060102" 20 | 21 | // contentSHAKey is the SHA256 of request body 22 | contentSHAKey = "X-Amz-Content-Sha256" 23 | ) 24 | -------------------------------------------------------------------------------- /samples/scenarios.js: -------------------------------------------------------------------------------- 1 | import http from 'k6/http'; 2 | 3 | export let options = { 4 | discardResponseBodies: true, 5 | scenarios: { 6 | Scenario_GetCrocodiles: { 7 | exec: 'FunctionForThisScenario', 8 | executor: 'ramping-vus', 9 | startTime: '0s', 10 | startVUs: 1, 11 | stages: [ 12 | { duration: '5s', target: 10 }, 13 | ], 14 | }, 15 | Scenario_GetContacts: { 16 | exec: 'FunctionGetContacts', 17 | executor: 'ramping-vus', 18 | startTime: '3s', 19 | startVUs: 5, 20 | stages: [ 21 | { duration: '2s', target: 5 }, 22 | ], 23 | }, 24 | }, 25 | }; 26 | 27 | export function FunctionForThisScenario() { 28 | http.get('https://test-api.k6.io/public/crocodiles/'); 29 | } 30 | 31 | export function FunctionGetContacts() { 32 | http.get('https://test.k6.io/contacts.php'); 33 | } -------------------------------------------------------------------------------- /samples/stages.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | import { check } from "k6"; 3 | // for test stages on dashboard 4 | export const options = { 5 | stages: [ 6 | // Ramp-up from 1 to 5 VUs in 10s 7 | { duration: "10s", target: 5 }, 8 | 9 | // Stay at rest on 5 VUs for 5s 10 | { duration: "5s", target: 5 }, 11 | 12 | // Ramp-up from 5 to 10 VUs in 5s 13 | { duration: "5s", target: 10 }, 14 | 15 | // Stay at rest on 10 VUs for 5s 16 | { duration: "10s", target: 10 }, 17 | 18 | // Ramp-down from 5 to 0 VUs for 5s 19 | { duration: "5s", target: 0 } 20 | ], 21 | thresholds: { 22 | 'http_reqs{expected_response:true}': ['rate>10'], 23 | }, 24 | }; 25 | 26 | export default function () { 27 | check(http.get("https://test-api.k6.io/"), { 28 | "status is 200": (r) => r.status == 200, 29 | "protocol is HTTP/2": (r) => r.proto == "HTTP/2.0", 30 | }); 31 | } 32 | -------------------------------------------------------------------------------- /pkg/stale/stale.go: -------------------------------------------------------------------------------- 1 | // Package stale handles the staleness process. 2 | // 3 | // TODO: migrate here more logic dedicated to this topic 4 | // from the remote write package. 5 | package stale 6 | 7 | import "math" 8 | 9 | // Marker is the Prometheus Remote Write special value for marking 10 | // a time series as stale. 11 | // 12 | // Check https://www.robustperception.io/staleness-and-promql and 13 | // https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness 14 | // for details about the Prometheus staleness markers. 15 | // 16 | // The value is the same used by the Prometheus package. 17 | // https://pkg.go.dev/github.com/prometheus/prometheus/pkg/value#pkg-constants 18 | // 19 | // It isn't imported directly to avoid the direct dependency 20 | // from the big Prometheus project that would bring more 21 | // dependencies. 22 | // 23 | //nolint:gochecknoglobals 24 | var Marker = math.Float64frombits(0x7ff0000000000002) 25 | -------------------------------------------------------------------------------- /samples/error.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | import { check } from "k6"; 3 | import { Counter, Gauge, Rate, Trend } from "k6/metrics"; 4 | let myCounter = new Counter("my_counter"); 5 | let myGauge = new Gauge("my_gauge"); 6 | let myRate = new Rate("my_rate"); 7 | let myTrend = new Trend("my_trend"); 8 | 9 | let maxResponseTime = 0.0; 10 | 11 | export const options = { 12 | stages: [ 13 | // Ramp-up from 1 to 5 VUs in 10s 14 | { duration: "10s", target: 5 }, 15 | 16 | // Stay at rest on 5 VUs for 5s 17 | { duration: "5s", target: 5 }, 18 | 19 | // Ramp-down from 5 to 0 VUs for 5s 20 | { duration: "5s", target: 0 } 21 | ], 22 | 23 | }; 24 | 25 | export default function () { 26 | let res = http.get("https://httpbin.test.k6.io/"); 27 | 28 | 29 | //for test error on dashboard 30 | const responses = http.batch([ 31 | "http://test.k6.io", 32 | "https://httpstat.us/500", 33 | "https://httpstat.us/404", 34 | ]); 35 | 36 | check(responses[0], { 37 | "main page 200": res => res.status === 200, 38 | }); 39 | 40 | check(responses[1], { 41 | "pi page 200": res => res.status === 200, 42 | "pi page has right content": res => res.body === "2", 43 | }); 44 | 45 | } 46 | -------------------------------------------------------------------------------- /samples/simple_batch_status.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | import { check } from "k6"; 3 | 4 | export const options = { 5 | vus: 10, 6 | iterations: 1000, 7 | 8 | thresholds: { 9 | "http_reqs{expected_response:false}": ["rate>10"], 10 | }, 11 | }; 12 | 13 | export default function () { 14 | const responses = http.batch([ 15 | ['GET', 'https://httpstat.us/200', null, { tags: { type: 'ok' } }], 16 | ['GET', 'https://httpstat.us/400', null, { tags: { type: 'Bad Request' } }], 17 | ['GET', 'https://httpstat.us/404', null, { tags: { type: 'Not Found' } }], 18 | ['GET', 'https://httpstat.us/500', null, { tags: { type: 'Internal Server Error' } }], 19 | ['GET', 'https://httpstat.us/502', null, { tags: { type: 'Bad Gateway' } }], 20 | ['GET', 'https://httpstat.us/503', null, { tags: { type: 'Service Unavailable' } }], 21 | ['GET', 'https://httpstat.us/504', null, { tags: { type: 'Gateway Timeout' } }], 22 | 23 | 24 | ]); 25 | check(responses[0], { 26 | 'main page status was 200': (res) => res.status === 200, 27 | }); 28 | check(responses[1], { 29 | 'main page status was 400': (res) => res.status === 400, 30 | }); 31 | check(responses[6], { 32 | 'main page status was 504': (res) => res.status === 504, 33 | }); 34 | } -------------------------------------------------------------------------------- /pkg/remotewrite/prometheus.go: -------------------------------------------------------------------------------- 1 | package remotewrite 2 | 3 | import ( 4 | "sort" 5 | 6 | prompb "buf.build/gen/go/prometheus/prometheus/protocolbuffers/go" 7 | "github.com/mstoykov/atlas" 8 | "go.k6.io/k6/metrics" 9 | ) 10 | 11 | const namelbl = "__name__" 12 | 13 | // MapTagSet converts a k6 tag set into 14 | // the equivalent set of Labels as expected from the 15 | // Prometheus' data model. 16 | func MapTagSet(t *metrics.TagSet) []*prompb.Label { 17 | n := (*atlas.Node)(t) 18 | if n.Len() < 1 { 19 | return nil 20 | } 21 | labels := make([]*prompb.Label, 0, n.Len()) 22 | for !n.IsRoot() { 23 | prev, key, value := n.Data() 24 | n = prev 25 | if key == "" || value == "" { 26 | continue 27 | } 28 | labels = append(labels, &prompb.Label{Name: key, Value: value}) 29 | } 30 | return labels 31 | } 32 | 33 | // MapSeries converts a k6 time series into 34 | // the equivalent set of Labels (name+tags) as expected from the 35 | // Prometheus' data model. 36 | // 37 | // The labels are lexicographic sorted as required 38 | // from the Remote write's specification. 39 | func MapSeries(series metrics.TimeSeries, suffix string) []*prompb.Label { 40 | v := defaultMetricPrefix + series.Metric.Name 41 | if suffix != "" { 42 | v += "_" + suffix 43 | } 44 | lbls := append(MapTagSet(series.Tags), &prompb.Label{ 45 | Name: namelbl, 46 | Value: v, 47 | }) 48 | sort.Slice(lbls, func(i int, j int) bool { 49 | return lbls[i].Name < lbls[j].Name 50 | }) 51 | return lbls 52 | } 53 | -------------------------------------------------------------------------------- /samples/custom_metrics.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | import { Counter, Gauge, Rate, Trend } from "k6/metrics"; 3 | import { check } from "k6"; 4 | 5 | /* 6 | * Custom metrics are useful when you want to track something that is not 7 | * provided out of the box. 8 | * 9 | * There are four types of custom metrics: Counter, Gauge, Rate and Trend. 10 | * 11 | * - Counter: a sum of all values added to the metric 12 | * - Gauge: a value that change to whatever you set it to 13 | * - Rate: rate of "truthiness", how many values out of total are !=0 14 | * - Trend: time series, all values are recorded, statistics can be calculated 15 | * on it 16 | */ 17 | 18 | let myCounter = new Counter("my_counter"); 19 | let myGauge = new Gauge("my_gauge"); 20 | let myRate = new Rate("my_rate"); 21 | let myTrend = new Trend("my_trend"); 22 | 23 | let maxResponseTime = 0.0; 24 | 25 | export default function () { 26 | let res = http.get("http://httpbin.org/"); 27 | let passed = check(res, { "status is 200": (r) => r.status === 200 }); 28 | 29 | // Add one for number of requests 30 | myCounter.add(1); 31 | console.log(myCounter.name, " is config ready") 32 | 33 | // Set max response time seen 34 | maxResponseTime = Math.max(maxResponseTime, res.timings.duration); 35 | myGauge.add(maxResponseTime); 36 | 37 | // Add check success or failure to keep track of rate 38 | myRate.add(passed); 39 | 40 | // Keep track of TCP-connecting and TLS handshaking part of the response time 41 | myTrend.add(res.timings.connecting + res.timings.tls_handshaking); 42 | } 43 | -------------------------------------------------------------------------------- /pkg/sigv4/tripper.go: -------------------------------------------------------------------------------- 1 | package sigv4 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "strings" 7 | ) 8 | 9 | // Tripper signs each request with sigv4 10 | type Tripper struct { 11 | config *Config 12 | signer signer 13 | next http.RoundTripper 14 | } 15 | 16 | // Config holds aws access configurations 17 | type Config struct { 18 | Region string 19 | AwsAccessKeyID string 20 | AwsSecretAccessKey string 21 | } 22 | 23 | func (c *Config) validate() error { 24 | if c == nil { 25 | return errors.New("config should not be nil") 26 | } 27 | hasRegion := len(strings.TrimSpace(c.Region)) != 0 28 | hasAccessID := len(strings.TrimSpace(c.AwsAccessKeyID)) != 0 29 | hasSecretAccessKey := len(strings.TrimSpace(c.AwsSecretAccessKey)) != 0 30 | if !hasRegion || !hasAccessID || !hasSecretAccessKey { 31 | return errors.New("sigV4 config `Region`, `AwsAccessKeyID`, `AwsSecretAccessKey` must all be set") 32 | } 33 | return nil 34 | } 35 | 36 | // NewRoundTripper creates a new sigv4 round tripper 37 | func NewRoundTripper(config *Config, next http.RoundTripper) (*Tripper, error) { 38 | if err := config.validate(); err != nil { 39 | return nil, err 40 | } 41 | 42 | if next == nil { 43 | next = http.DefaultTransport 44 | } 45 | 46 | tripper := &Tripper{ 47 | config: config, 48 | next: next, 49 | signer: newDefaultSigner(config), 50 | } 51 | return tripper, nil 52 | } 53 | 54 | // RoundTrip implements the tripper interface for sigv4 signing of requests 55 | func (c *Tripper) RoundTrip(req *http.Request) (*http.Response, error) { 56 | if err := c.signer.sign(req); err != nil { 57 | return nil, err 58 | } 59 | return c.next.RoundTrip(req) 60 | } 61 | -------------------------------------------------------------------------------- /pkg/sigv4/sigv4_test.go: -------------------------------------------------------------------------------- 1 | package sigv4 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "strings" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestBuildCanonicalHeaders(t *testing.T) { 14 | t.Parallel() 15 | 16 | serviceName := "mockAPI" 17 | region := "mock-region" 18 | endpoint := "https://" + serviceName + "." + region + ".example.com" 19 | 20 | now := time.Now().UTC() 21 | iSO8601Date := now.Format(timeFormat) 22 | 23 | req, err := http.NewRequestWithContext(context.Background(), http.MethodPost, endpoint, nil) 24 | if err != nil { 25 | t.Fatalf("failed to create request, %v", err) 26 | } 27 | 28 | req.Header.Set("Host", req.Host) 29 | req.Header.Set(amzDateKey, iSO8601Date) 30 | req.Header.Set("InnerSpace", " inner space ") 31 | req.Header.Set("LeadingSpace", " leading-space") 32 | req.Header.Add("MultipleSpace", "no-space") 33 | req.Header.Add("MultipleSpace", "\ttab-space") 34 | req.Header.Add("MultipleSpace", "trailing-space ") 35 | req.Header.Set("NoSpace", "no-space") 36 | req.Header.Set("TabSpace", "\ttab-space\t") 37 | req.Header.Set("TrailingSpace", "trailing-space ") 38 | req.Header.Set("WrappedSpace", " wrapped-space ") 39 | 40 | wantSignedHeader := "host;innerspace;leadingspace;multiplespace;nospace;tabspace;trailingspace;wrappedspace;x-amz-date" 41 | wantCanonicalHeader := strings.Join([]string{ 42 | "host:mockAPI.mock-region.example.com", 43 | "innerspace:inner space", 44 | "leadingspace:leading-space", 45 | "multiplespace:no-space,tab-space,trailing-space", 46 | "nospace:no-space", 47 | "tabspace:tab-space", 48 | "trailingspace:trailing-space", 49 | "wrappedspace:wrapped-space", 50 | "x-amz-date:" + iSO8601Date, 51 | "", 52 | }, "\n") 53 | 54 | gotSignedHeaders, gotCanonicalHeader := buildCanonicalHeaders(req, nil) 55 | assert.Equal(t, wantSignedHeader, gotSignedHeaders) 56 | assert.Equal(t, wantCanonicalHeader, gotCanonicalHeader) 57 | } 58 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | MAKEFLAGS += --silent 2 | GOLANGCI_CONFIG ?= .golangci.yml 3 | 4 | all: clean format test build 5 | 6 | ## help: Prints a list of available build targets. 7 | help: 8 | echo "Usage: make ... " 9 | echo "" 10 | echo "Available targets are:" 11 | echo '' 12 | sed -n 's/^##//p' ${PWD}/Makefile | column -t -s ':' | sed -e 's/^/ /' 13 | echo 14 | echo "Targets run by default are: `sed -n 's/^all: //p' ./Makefile | sed -e 's/ /, /g' | sed -e 's/\(.*\), /\1, and /'`" 15 | 16 | ## clean: Removes any previously created artifacts. 17 | clean: 18 | rm -f ./k6 .golangci.yml 19 | 20 | ## build: Builds a custom 'k6' with the local extension. 21 | build: 22 | go install go.k6.io/xk6/cmd/xk6@latest 23 | xk6 build --with github.com/grafana/xk6-output-prometheus-remote=. 24 | 25 | ## format: Applies Go formatting to code. 26 | format: 27 | go fmt ./... 28 | 29 | ## linter-config: Checks if the linter config exists, if not, downloads it from the main k6 repository. 30 | linter-config: 31 | test -s "${GOLANGCI_CONFIG}" || (echo "No linter config, downloading from main k6 repository..." && curl --silent --show-error --fail --no-location https://raw.githubusercontent.com/grafana/k6/master/.golangci.yml --output "${GOLANGCI_CONFIG}") 32 | 33 | ## check-linter-version: Checks if the linter version is the same as the one specified in the linter config. 34 | check-linter-version: 35 | (golangci-lint version | grep "version $(shell head -n 1 .golangci.yml | tr -d '\# ')") || echo "Your installation of golangci-lint is different from the one that is specified in k6's linter config (there it's $(shell head -n 1 .golangci.yml | tr -d '\# ')). Results could be different in the CI." 36 | 37 | ## lint: Runs the linters. 38 | lint: linter-config check-linter-version 39 | echo "Running linters..." 40 | golangci-lint run --out-format=tab ./... 41 | 42 | ## check: Runs the linters and tests. 43 | check: lint test 44 | 45 | ## test: Executes any unit tests. 46 | test: 47 | go test -cover -race ./... 48 | 49 | .PHONY: build clean format help test lint check check-linter-version linter-config 50 | -------------------------------------------------------------------------------- /pkg/remotewrite/prometheus_test.go: -------------------------------------------------------------------------------- 1 | package remotewrite 2 | 3 | import ( 4 | "sort" 5 | "testing" 6 | "time" 7 | 8 | prompb "buf.build/gen/go/prometheus/prometheus/protocolbuffers/go" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | "go.k6.io/k6/metrics" 12 | ) 13 | 14 | // TODO: test MapSeries with suffix 15 | 16 | func TestMapSeries(t *testing.T) { 17 | t.Parallel() 18 | 19 | r := metrics.NewRegistry() 20 | tags := r.RootTagSet(). 21 | With("tagk1", "tagv1").With("b1", "v1"). 22 | // labels with empty key or value are not allowed 23 | // so they will be not added as labels 24 | With("tagEmptyValue", ""). 25 | With("", "tagEmptyKey") 26 | 27 | series := metrics.TimeSeries{ 28 | Metric: &metrics.Metric{ 29 | Name: "test", 30 | Type: metrics.Counter, 31 | }, 32 | Tags: tags, 33 | } 34 | 35 | lbls := MapSeries(series, "") 36 | require.Len(t, lbls, 3) 37 | 38 | exp := []*prompb.Label{ 39 | {Name: "__name__", Value: "k6_test"}, 40 | {Name: "b1", Value: "v1"}, 41 | {Name: "tagk1", Value: "tagv1"}, 42 | } 43 | assert.Equal(t, exp, lbls) 44 | } 45 | 46 | // buildTimeSeries creates a TimSeries with the given name, value and timestamp 47 | func buildTimeSeries(name string, value float64, timestamp time.Time) *prompb.TimeSeries { //nolint:unparam 48 | return &prompb.TimeSeries{ 49 | Labels: []*prompb.Label{ 50 | { 51 | Name: "__name__", 52 | Value: name, 53 | }, 54 | }, 55 | Samples: []*prompb.Sample{ 56 | { 57 | Value: value, 58 | Timestamp: timestamp.UnixMilli(), 59 | }, 60 | }, 61 | } 62 | } 63 | 64 | // assertTimeSeriesMatch asserts if the elements of two slices of TimeSeries matches. 65 | func assertTimeSeriesEqual(t *testing.T, expected []*prompb.TimeSeries, actual []*prompb.TimeSeries) { 66 | t.Helper() 67 | require.Len(t, actual, len(expected)) 68 | 69 | for i := 0; i < len(expected); i++ { 70 | assert.Equal(t, expected[i], actual[i]) 71 | } 72 | } 73 | 74 | // sortByLabelName sorts a slice of time series by Name label. 75 | // 76 | // TODO: remove the assumption that Name label is the first. 77 | func sortByNameLabel(s []*prompb.TimeSeries) { 78 | sort.Slice(s, func(i, j int) bool { 79 | return s[i].Labels[0].Value <= s[j].Labels[0].Value 80 | }) 81 | } 82 | -------------------------------------------------------------------------------- /pkg/sigv4/tripper_test.go: -------------------------------------------------------------------------------- 1 | package sigv4 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestTripper_request_includes_required_headers(t *testing.T) { 13 | t.Parallel() 14 | 15 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 16 | // Check if required headers are present 17 | authorization := r.Header.Get(authorizationHeaderKey) 18 | amzDate := r.Header.Get(amzDateKey) 19 | contentSHA256 := r.Header.Get(contentSHAKey) 20 | 21 | // Respond to the request 22 | w.WriteHeader(http.StatusOK) 23 | 24 | assert.NotEmptyf(t, authorization, "%s header should be present", authorizationHeaderKey) 25 | assert.NotEmptyf(t, amzDate, "%s header should be present", amzDateKey) 26 | assert.NotEmpty(t, contentSHA256, "%s header should be present", contentSHAKey) 27 | })) 28 | defer server.Close() 29 | 30 | client := http.Client{} 31 | tripper, err := NewRoundTripper(&Config{ 32 | Region: "us-east1", 33 | AwsSecretAccessKey: "xyz", 34 | AwsAccessKeyID: "abc", 35 | }, http.DefaultTransport) 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | client.Transport = tripper 40 | 41 | req, err := http.NewRequestWithContext(context.Background(), http.MethodPost, server.URL, nil) 42 | if err != nil { 43 | t.Fatal(err) 44 | } 45 | 46 | response, _ := client.Do(req) 47 | _ = response.Body.Close() 48 | } 49 | 50 | func TestConfig_Validation(t *testing.T) { 51 | t.Parallel() 52 | 53 | testCases := []struct { 54 | shouldError bool 55 | arg *Config 56 | }{ 57 | { 58 | shouldError: false, 59 | arg: &Config{ 60 | Region: "us-east1", 61 | AwsAccessKeyID: "someAccessKey", 62 | AwsSecretAccessKey: "someSecretKey", 63 | }, 64 | }, 65 | { 66 | shouldError: true, 67 | arg: nil, 68 | }, 69 | { 70 | shouldError: true, 71 | arg: &Config{ 72 | Region: "us-east1", 73 | }, 74 | }, 75 | { 76 | shouldError: true, 77 | arg: &Config{ 78 | Region: "us-east1", 79 | AwsAccessKeyID: "someAccessKeyId", 80 | }, 81 | }, 82 | { 83 | shouldError: true, 84 | arg: &Config{ 85 | AwsAccessKeyID: "SomeAccessKey", 86 | AwsSecretAccessKey: "SomeSecretKey", 87 | }, 88 | }, 89 | } 90 | 91 | for _, tc := range testCases { 92 | got := tc.arg.validate() 93 | if tc.shouldError { 94 | assert.Error(t, got) 95 | continue 96 | } 97 | assert.NoError(t, got) 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # xk6-output-prometheus-remote 2 | 3 | > [!WARNING] 4 | > The `xk6-output-prometheus-remote` extension [has been merged](https://github.com/grafana/k6/pull/4519) to the [main k6 repository](https://github.com/grafana/k6). Please contribute and [open issues there](https://github.com/grafana/k6/issues). This repository is no longer maintained. 5 | 6 | The `xk6-output-prometheus-remote` extension allows you to publish test-run metrics to Prometheus via Remote Write endpoint. 7 | 8 | > :warning: Be careful not to confuse this with the [Prometheus Remote Write **client** extension](https://github.com/grafana/xk6-client-prometheus-remote) which is used for load and performance testing of _Prometheus_ itself. 9 | 10 | As of k6 v0.42.0, this extension is available within [k6](https://github.com/grafana/k6) as an _experimental module_. For further details, read the [extension graduation guide](https://k6.io/docs/extensions/explanations/extension-graduation/). 11 | 12 | 13 | ## Usage 14 | 15 | Consult the [Prometheus remote write guide in the k6 docs](https://k6.io/docs/results-output/real-time/prometheus-remote-write/) to explore the various methods and options for sending k6 metrics to a Prometheus remote-write endpoint. 16 | 17 | ## Development 18 | 19 | For developing or testing this extension, you can build a k6 binary with the local extension using [xk6](https://github.com/grafana/xk6) with the following steps: 20 | 21 | ```bash 22 | xk6 build --with github.com/grafana/xk6-output-prometheus-remote=. 23 | ``` 24 | 25 | For more details, refer to the k6 docs: 26 | - [Build a k6 binary using Go](https://k6.io/docs/extensions/guides/build-a-k6-binary-using-go/) 27 | - [k6 output extensions](https://k6.io/docs/extensions/get-started/create/output-extensions/) 28 | 29 | ## Dashboards 30 | 31 |

 

32 | 33 | [](./images/dashboard-k6-prometheus.png) 34 | 35 | This repo contains the [source code](./grafana/dashboards) of two Grafana dashboards designed to visualize test results: [`k6 Prometheus`](https://grafana.com/grafana/dashboards/19665-k6-prometheus/) and [k6 Prometheus (Native Histograms)](https://grafana.com/grafana/dashboards/18030-k6-prometheus-native-histograms/). 36 | 37 | Visit the [documentation](https://k6.io/docs/results-output/real-time/prometheus-remote-write/#time-series-visualization) to learn more about these dashboards. You can import them to your Grafana instance or with the docker-compose example on this repo. 38 | 39 | 🌟 Special thanks to [jwcastillo](https://github.com/jwcastillo) for his contributions and dedication to improving the dashboards. 40 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/grafana/xk6-output-prometheus-remote 2 | 3 | go 1.20 4 | 5 | require ( 6 | buf.build/gen/go/prometheus/prometheus/protocolbuffers/go v1.31.0-20230627135113-9a12bc2590d2.1 7 | github.com/golang/snappy v0.0.4 8 | github.com/klauspost/compress v1.17.7 9 | github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd 10 | github.com/prometheus/client_golang v1.16.0 11 | github.com/prometheus/client_model v0.4.0 12 | github.com/sirupsen/logrus v1.9.3 13 | github.com/stretchr/testify v1.9.0 14 | go.k6.io/k6 v0.51.1-0.20240606120708-bd114fdbd683 15 | google.golang.org/protobuf v1.33.0 16 | gopkg.in/guregu/null.v3 v3.3.0 17 | ) 18 | 19 | require ( 20 | buf.build/gen/go/gogo/protobuf/protocolbuffers/go v1.31.0-20210810001428-4df00b267f94.1 // indirect 21 | github.com/beorn7/perks v1.0.1 // indirect 22 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect 23 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 24 | github.com/davecgh/go-spew v1.1.1 // indirect 25 | github.com/fatih/color v1.16.0 // indirect 26 | github.com/go-logr/logr v1.4.1 // indirect 27 | github.com/go-logr/stdr v1.2.2 // indirect 28 | github.com/golang/protobuf v1.5.4 // indirect 29 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect 30 | github.com/josharian/intern v1.0.0 // indirect 31 | github.com/mailru/easyjson v0.7.7 // indirect 32 | github.com/mattn/go-colorable v0.1.13 // indirect 33 | github.com/mattn/go-isatty v0.0.20 // indirect 34 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 35 | github.com/pmezard/go-difflib v1.0.0 // indirect 36 | github.com/prometheus/common v0.42.0 // indirect 37 | github.com/prometheus/procfs v0.10.1 // indirect 38 | github.com/spf13/afero v1.1.2 // indirect 39 | go.opentelemetry.io/otel v1.24.0 // indirect 40 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect 41 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 // indirect 42 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect 43 | go.opentelemetry.io/otel/metric v1.24.0 // indirect 44 | go.opentelemetry.io/otel/sdk v1.24.0 // indirect 45 | go.opentelemetry.io/otel/trace v1.24.0 // indirect 46 | go.opentelemetry.io/proto/otlp v1.1.0 // indirect 47 | golang.org/x/net v0.23.0 // indirect 48 | golang.org/x/sys v0.18.0 // indirect 49 | golang.org/x/text v0.14.0 // indirect 50 | golang.org/x/time v0.5.0 // indirect 51 | google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect 52 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect 53 | google.golang.org/grpc v1.63.2 // indirect 54 | gopkg.in/yaml.v3 v3.0.1 // indirect 55 | ) 56 | -------------------------------------------------------------------------------- /pkg/sigv4/utils.go: -------------------------------------------------------------------------------- 1 | package sigv4 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "net/url" 7 | "strings" 8 | ) 9 | 10 | func buildAwsNoEscape() [256]bool { 11 | var noEscape [256]bool 12 | 13 | for i := 0; i < len(noEscape); i++ { 14 | // AWS expects every character except these to be escaped 15 | noEscape[i] = (i >= 'A' && i <= 'Z') || 16 | (i >= 'a' && i <= 'z') || 17 | (i >= '0' && i <= '9') || 18 | i == '-' || 19 | i == '.' || 20 | i == '_' || 21 | i == '~' || 22 | i == '/' 23 | } 24 | return noEscape 25 | } 26 | 27 | // escapePath escapes part of a URL path in Amazon style. 28 | // except for the noEscape provided. 29 | // inspired by github.com/aws/smithy-go/encoding/httpbinding EscapePath method 30 | func escapePath(path string, noEscape [256]bool) string { 31 | var buf bytes.Buffer 32 | for i := 0; i < len(path); i++ { 33 | c := path[i] 34 | if noEscape[c] { 35 | buf.WriteByte(c) 36 | continue 37 | } 38 | fmt.Fprintf(&buf, "%%%02X", c) 39 | } 40 | return buf.String() 41 | } 42 | 43 | // stripExcessSpaces will remove the leading and trailing spaces, and side-by-side spaces are converted 44 | // into a single space. 45 | func stripExcessSpaces(str string) string { 46 | if !strings.Contains(str, " ") && !strings.Contains(str, "\t") { 47 | return str 48 | } 49 | 50 | builder := strings.Builder{} 51 | lastFoundSpace := -1 52 | const space = ' ' 53 | str = strings.TrimSpace(str) 54 | for i := 0; i < len(str); i++ { 55 | if str[i] == space || str[i] == '\t' { 56 | lastFoundSpace = i 57 | continue 58 | } 59 | 60 | if lastFoundSpace > 0 && builder.Len() != 0 { 61 | builder.WriteByte(space) 62 | } 63 | builder.WriteByte(str[i]) 64 | lastFoundSpace = -1 65 | } 66 | return builder.String() 67 | } 68 | 69 | // getURIPath returns the escaped URI component from the provided URL. 70 | // Ported from inspired by github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 GetURIPath 71 | func getURIPath(u *url.URL) string { 72 | var uriPath string 73 | 74 | opaque := u.Opaque 75 | if len(opaque) == 0 { 76 | uriPath = u.EscapedPath() 77 | } 78 | 79 | if len(opaque) == 0 && len(uriPath) == 0 { 80 | return "/" 81 | } 82 | 83 | const schemeSep, pathSep, queryStart = "//", "/", "?" 84 | 85 | // Cutout the scheme separator if present. 86 | if strings.Index(opaque, schemeSep) == 0 { 87 | opaque = opaque[len(schemeSep):] 88 | } 89 | 90 | // Cut off the query string if present. 91 | if idx := strings.Index(opaque, queryStart); idx >= 0 { 92 | opaque = opaque[:idx] 93 | } 94 | 95 | // capture URI path starting with first path separator. 96 | if idx := strings.Index(opaque, pathSep); idx >= 0 { 97 | uriPath = opaque[idx:] 98 | } 99 | 100 | if len(uriPath) == 0 { 101 | uriPath = "/" 102 | } 103 | 104 | return uriPath 105 | } 106 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting our Developer Relations team, avocados@k6.io. 59 | 60 | All complaints will be reviewed and investigated and will result in a response that 61 | is deemed necessary and appropriate to the circumstances. The project team is 62 | obligated to maintain confidentiality with regard to the reporter of an incident. 63 | Further details of specific enforcement policies may be posted separately. 64 | 65 | Project maintainers who do not follow or enforce the Code of Conduct in good 66 | faith may face temporary or permanent repercussions as determined by other 67 | members of the project's leadership. 68 | 69 | ## Attribution 70 | 71 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 72 | available at [http://contributor-covenant.org/version/1/4][version] 73 | 74 | [homepage]: http://contributor-covenant.org 75 | [version]: http://contributor-covenant.org/version/1/4/ 76 | -------------------------------------------------------------------------------- /pkg/sigv4/util_test.go: -------------------------------------------------------------------------------- 1 | package sigv4 2 | 3 | import ( 4 | "net/url" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestStripExcessSpaces(t *testing.T) { 11 | t.Parallel() 12 | 13 | testcases := []struct { 14 | arg string 15 | want string 16 | }{ 17 | { 18 | arg: `AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`, 19 | want: `AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`, 20 | }, 21 | { 22 | arg: "a b c d", 23 | want: "a b c d", 24 | }, 25 | { 26 | arg: " abc def ghi jk ", 27 | want: "abc def ghi jk", 28 | }, 29 | { 30 | arg: " 123 456 789 101112 ", 31 | want: "123 456 789 101112", 32 | }, 33 | { 34 | arg: "12 3 1abc123", 35 | want: "12 3 1abc123", 36 | }, 37 | { 38 | arg: "aaa \t bb", 39 | want: "aaa bb", 40 | }, 41 | } 42 | 43 | for _, tc := range testcases { 44 | assert.Equal(t, tc.want, stripExcessSpaces(tc.arg)) 45 | } 46 | } 47 | 48 | func TestGetUriPath(t *testing.T) { 49 | t.Parallel() 50 | 51 | testcases := map[string]struct { 52 | arg string 53 | want string 54 | }{ 55 | "schema and port": { 56 | arg: "https://localhost:9000", 57 | want: "/", 58 | }, 59 | "schema and no port": { 60 | arg: "https://localhost", 61 | want: "/", 62 | }, 63 | "no schema": { 64 | arg: "localhost:9000", 65 | want: "/", 66 | }, 67 | "no schema + path": { 68 | arg: "localhost:9000/abc123", 69 | want: "/abc123", 70 | }, 71 | "no schema, with separator": { 72 | arg: "//localhost:9000", 73 | want: "/", 74 | }, 75 | "no scheme, no port, with separator": { 76 | arg: "//localhost", 77 | want: "/", 78 | }, 79 | "no scheme, with separator, with path": { 80 | arg: "//localhost:9000/abc123", 81 | want: "/abc123", 82 | }, 83 | "no scheme, no port, with separator, with path": { 84 | arg: "//localhost/abc123", 85 | want: "/abc123", 86 | }, 87 | "no schema, query string": { 88 | arg: "localhost:9000/abc123?efg=456", 89 | want: "/abc123", 90 | }, 91 | } 92 | for name, tc := range testcases { 93 | u, err := url.Parse(tc.arg) 94 | if err != nil { 95 | t.Fatal(err) 96 | } 97 | 98 | got := getURIPath(u) 99 | if tc.want != got { 100 | t.Fatalf("test %v failed, want %v got %v \n", name, tc.want, got) 101 | } 102 | } 103 | } 104 | 105 | func TestGetUriPath_invalid_url_noescape(t *testing.T) { 106 | t.Parallel() 107 | 108 | arg := &url.URL{ 109 | Opaque: "//example.org/bucket/key-._~,!@#$%^&*()", 110 | } 111 | 112 | want := "/bucket/key-._~,!@#$%^&*()" 113 | got := getURIPath(arg) 114 | assert.Equal(t, want, got) 115 | } 116 | 117 | func TestEscapePath(t *testing.T) { 118 | t.Parallel() 119 | 120 | testcases := []struct { 121 | arg string 122 | want string 123 | }{ 124 | { 125 | arg: "/", 126 | want: "/", 127 | }, 128 | { 129 | arg: "/abc", 130 | want: "/abc", 131 | }, 132 | { 133 | arg: "/abc129", 134 | want: "/abc129", 135 | }, 136 | { 137 | arg: "/abc-def", 138 | want: "/abc-def", 139 | }, 140 | { 141 | arg: "/abc.xyz~123-456", 142 | want: "/abc.xyz~123-456", 143 | }, 144 | { 145 | arg: "/abc def-ghi", 146 | want: "/abc%20def-ghi", 147 | }, 148 | { 149 | arg: "abc!def ghi", 150 | want: "abc%21def%20ghi", 151 | }, 152 | } 153 | 154 | noEscape := buildAwsNoEscape() 155 | 156 | for _, tc := range testcases { 157 | assert.Equal(t, tc.want, escapePath(tc.arg, noEscape)) 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /pkg/remote/client.go: -------------------------------------------------------------------------------- 1 | // Package remote implements the Prometheus remote write protocol. 2 | package remote 3 | 4 | import ( 5 | "bytes" 6 | "context" 7 | "crypto/tls" 8 | "fmt" 9 | "io" 10 | "math" 11 | "net/http" 12 | "net/url" 13 | "time" 14 | 15 | "github.com/grafana/xk6-output-prometheus-remote/pkg/sigv4" 16 | 17 | prompb "buf.build/gen/go/prometheus/prometheus/protocolbuffers/go" 18 | "github.com/klauspost/compress/snappy" 19 | "google.golang.org/protobuf/proto" 20 | ) 21 | 22 | // HTTPConfig holds the config for the HTTP client. 23 | type HTTPConfig struct { 24 | Timeout time.Duration 25 | TLSConfig *tls.Config 26 | BasicAuth *BasicAuth 27 | SigV4 *sigv4.Config 28 | Headers http.Header 29 | } 30 | 31 | // BasicAuth holds the config for basic authentication. 32 | type BasicAuth struct { 33 | Username, Password string 34 | } 35 | 36 | // WriteClient is a client implementation of the Prometheus remote write protocol. 37 | // It follows the specs defined by the official design document: 38 | // https://docs.google.com/document/d/1LPhVRSFkGNSuU1fBd81ulhsCPR4hkSZyyBj1SZ8fWOM 39 | type WriteClient struct { 40 | hc *http.Client 41 | url *url.URL 42 | cfg *HTTPConfig 43 | } 44 | 45 | // NewWriteClient creates a new WriteClient. 46 | func NewWriteClient(endpoint string, cfg *HTTPConfig) (*WriteClient, error) { 47 | if cfg == nil { 48 | cfg = &HTTPConfig{} 49 | } 50 | u, err := url.Parse(endpoint) 51 | if err != nil { 52 | return nil, err 53 | } 54 | wc := &WriteClient{ 55 | hc: &http.Client{ 56 | Timeout: cfg.Timeout, 57 | }, 58 | url: u, 59 | cfg: cfg, 60 | } 61 | if cfg.TLSConfig != nil { 62 | wc.hc.Transport = &http.Transport{ 63 | TLSClientConfig: cfg.TLSConfig, 64 | } 65 | } 66 | if cfg.SigV4 != nil { 67 | tripper, err := sigv4.NewRoundTripper(cfg.SigV4, wc.hc.Transport) 68 | if err != nil { 69 | return nil, err 70 | } 71 | wc.hc.Transport = tripper 72 | } 73 | return wc, nil 74 | } 75 | 76 | // Store sends a batch of samples to the HTTP endpoint, 77 | // the request is the proto marshaled and encoded. 78 | func (c *WriteClient) Store(ctx context.Context, series []*prompb.TimeSeries) error { 79 | b, err := newWriteRequestBody(series) 80 | if err != nil { 81 | return err 82 | } 83 | req, err := http.NewRequestWithContext( 84 | ctx, http.MethodPost, c.url.String(), bytes.NewReader(b)) 85 | if err != nil { 86 | return fmt.Errorf("create new HTTP request failed: %w", err) 87 | } 88 | if c.cfg.BasicAuth != nil { 89 | req.SetBasicAuth(c.cfg.BasicAuth.Username, c.cfg.BasicAuth.Password) 90 | } 91 | 92 | if len(c.cfg.Headers) > 0 { 93 | req.Header = c.cfg.Headers.Clone() 94 | } 95 | 96 | req.Header.Set("User-Agent", "k6-prometheus-rw-output") 97 | 98 | // They are mostly defined by the specs 99 | req.Header.Set("Content-Encoding", "snappy") 100 | req.Header.Set("Content-Type", "application/x-protobuf") 101 | req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") 102 | 103 | resp, err := c.hc.Do(req) 104 | if err != nil { 105 | return fmt.Errorf("HTTP POST request failed: %w", err) 106 | } 107 | defer func() { 108 | err = resp.Body.Close() 109 | if err != nil { 110 | panic(err) 111 | } 112 | }() 113 | 114 | _, err = io.Copy(io.Discard, resp.Body) 115 | if err != nil { 116 | return err 117 | } 118 | 119 | return validateResponseStatus(resp.StatusCode) 120 | } 121 | 122 | func newWriteRequestBody(series []*prompb.TimeSeries) ([]byte, error) { 123 | b, err := proto.Marshal(&prompb.WriteRequest{ 124 | Timeseries: series, 125 | }) 126 | if err != nil { 127 | return nil, fmt.Errorf("encoding series as protobuf write request failed: %w", err) 128 | } 129 | if snappy.MaxEncodedLen(len(b)) < 0 { 130 | return nil, fmt.Errorf("the protobuf message is too large to be handled by Snappy encoder; "+ 131 | "size: %d, limit: %d", len(b), math.MaxUint32) 132 | } 133 | return snappy.Encode(nil, b), nil 134 | } 135 | 136 | func validateResponseStatus(code int) error { 137 | if code >= http.StatusOK && code < 300 { 138 | return nil 139 | } 140 | 141 | return fmt.Errorf("got status code: %d instead expected a 2xx successful status code", code) 142 | } 143 | -------------------------------------------------------------------------------- /pkg/remote/client_test.go: -------------------------------------------------------------------------------- 1 | package remote 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "math" 7 | "net/http" 8 | "net/http/httptest" 9 | "net/url" 10 | "testing" 11 | "time" 12 | 13 | "github.com/grafana/xk6-output-prometheus-remote/pkg/stale" 14 | 15 | prompb "buf.build/gen/go/prometheus/prometheus/protocolbuffers/go" 16 | "github.com/golang/snappy" 17 | "github.com/stretchr/testify/assert" 18 | "github.com/stretchr/testify/require" 19 | "google.golang.org/protobuf/proto" 20 | ) 21 | 22 | func TestNewWriteClient(t *testing.T) { 23 | t.Parallel() 24 | t.Run("DefaultConfig", func(t *testing.T) { 25 | t.Parallel() 26 | wc, err := NewWriteClient("http://example.com/api/v1/write", nil) 27 | require.NoError(t, err) 28 | require.NotNil(t, wc) 29 | assert.Equal(t, wc.cfg, &HTTPConfig{}) 30 | }) 31 | 32 | t.Run("CustomConfig", func(t *testing.T) { 33 | t.Parallel() 34 | hc := &HTTPConfig{Timeout: time.Second} 35 | wc, err := NewWriteClient("http://example.com/api/v1/write", hc) 36 | require.NoError(t, err) 37 | require.NotNil(t, wc) 38 | assert.Equal(t, wc.cfg, hc) 39 | }) 40 | 41 | t.Run("InvalidURL", func(t *testing.T) { 42 | t.Parallel() 43 | wc, err := NewWriteClient("fake://bad url", nil) 44 | require.Error(t, err) 45 | assert.Nil(t, wc) 46 | }) 47 | } 48 | 49 | func TestClientStore(t *testing.T) { 50 | t.Parallel() 51 | h := func(rw http.ResponseWriter, r *http.Request) { 52 | assert.Equal(t, r.Header.Get("Content-Encoding"), "snappy") 53 | assert.Equal(t, r.Header.Get("Content-Type"), "application/x-protobuf") 54 | assert.Equal(t, r.Header.Get("User-Agent"), "k6-prometheus-rw-output") 55 | assert.Equal(t, r.Header.Get("X-Prometheus-Remote-Write-Version"), "0.1.0") 56 | assert.NotEmpty(t, r.Header.Get("Content-Length")) 57 | 58 | b, err := io.ReadAll(r.Body) 59 | assert.NoError(t, err) 60 | assert.NotEmpty(t, len(b)) 61 | 62 | rw.WriteHeader(http.StatusNoContent) 63 | } 64 | ts := httptest.NewServer(http.HandlerFunc(h)) 65 | defer ts.Close() 66 | 67 | u, err := url.Parse(ts.URL) 68 | require.NoError(t, err) 69 | 70 | c := &WriteClient{ 71 | hc: ts.Client(), 72 | url: u, 73 | cfg: &HTTPConfig{}, 74 | } 75 | data := &prompb.TimeSeries{ 76 | Labels: []*prompb.Label{ 77 | { 78 | Name: "label1", 79 | Value: "label1-val", 80 | }, 81 | }, 82 | Samples: []*prompb.Sample{ 83 | { 84 | Value: 8.5, 85 | Timestamp: time.Now().UnixMilli(), 86 | }, 87 | }, 88 | } 89 | err = c.Store(context.Background(), []*prompb.TimeSeries{data}) 90 | assert.NoError(t, err) 91 | } 92 | 93 | func TestClientStoreHTTPError(t *testing.T) { 94 | t.Parallel() 95 | h := func(w http.ResponseWriter, _ *http.Request) { 96 | http.Error(w, "bad", http.StatusUnauthorized) 97 | } 98 | ts := httptest.NewServer(http.HandlerFunc(h)) 99 | defer ts.Close() 100 | 101 | u, err := url.Parse(ts.URL) 102 | require.NoError(t, err) 103 | 104 | c := &WriteClient{ 105 | hc: ts.Client(), 106 | url: u, 107 | cfg: &HTTPConfig{}, 108 | } 109 | assert.Error(t, c.Store(context.Background(), nil)) 110 | } 111 | 112 | func TestClientStoreHTTPBasic(t *testing.T) { 113 | t.Parallel() 114 | h := func(_ http.ResponseWriter, r *http.Request) { 115 | u, pwd, ok := r.BasicAuth() 116 | require.True(t, ok) 117 | assert.Equal(t, "usertest", u) 118 | assert.Equal(t, "pwdtest", pwd) 119 | } 120 | ts := httptest.NewServer(http.HandlerFunc(h)) 121 | defer ts.Close() 122 | 123 | u, err := url.Parse(ts.URL) 124 | require.NoError(t, err) 125 | 126 | c := &WriteClient{ 127 | hc: ts.Client(), 128 | url: u, 129 | cfg: &HTTPConfig{ 130 | BasicAuth: &BasicAuth{ 131 | Username: "usertest", 132 | Password: "pwdtest", 133 | }, 134 | }, 135 | } 136 | assert.NoError(t, c.Store(context.Background(), nil)) 137 | } 138 | 139 | func TestClientStoreHeaders(t *testing.T) { 140 | t.Parallel() 141 | h := func(_ http.ResponseWriter, r *http.Request) { 142 | assert.Equal(t, r.Header.Get("X-Prometheus-Remote-Write-Version"), "0.1.0") 143 | assert.Equal(t, r.Header.Get("X-MY-CUSTOM-HEADER"), "fake") 144 | } 145 | ts := httptest.NewServer(http.HandlerFunc(h)) 146 | defer ts.Close() 147 | 148 | u, err := url.Parse(ts.URL) 149 | require.NoError(t, err) 150 | 151 | c := &WriteClient{ 152 | hc: ts.Client(), 153 | url: u, 154 | cfg: &HTTPConfig{ 155 | Headers: http.Header(map[string][]string{ 156 | "X-MY-CUSTOM-HEADER": {"fake"}, 157 | // If the same key, of a mandatory protocol's header 158 | // is provided, it will be overwritten. 159 | "X-Prometheus-Remote-Write-Version": {"fake"}, 160 | }), 161 | }, 162 | } 163 | assert.NoError(t, c.Store(context.Background(), nil)) 164 | } 165 | 166 | func TestNewWriteRequestBody(t *testing.T) { 167 | t.Parallel() 168 | ts := []*prompb.TimeSeries{ 169 | { 170 | Labels: []*prompb.Label{{Name: "label1", Value: "val1"}}, 171 | Samples: []*prompb.Sample{{Value: 10.1, Timestamp: time.Unix(1, 0).Unix()}}, 172 | }, 173 | } 174 | b, err := newWriteRequestBody(ts) 175 | require.NoError(t, err) 176 | require.NotEmpty(t, string(b)) 177 | assert.Contains(t, string(b), `label1`) 178 | } 179 | 180 | func TestNewWriteRequestBodyWithStaleMarker(t *testing.T) { 181 | t.Parallel() 182 | 183 | timestamp := time.Date(2022, time.December, 15, 11, 41, 18, 123, time.UTC) 184 | 185 | ts := []*prompb.TimeSeries{ 186 | { 187 | Labels: []*prompb.Label{{Name: "label1", Value: "val1"}}, 188 | Samples: []*prompb.Sample{{ 189 | Value: stale.Marker, 190 | Timestamp: timestamp.UnixMilli(), 191 | }}, 192 | }, 193 | } 194 | b, err := newWriteRequestBody(ts) 195 | require.NoError(t, err) 196 | require.NotEmpty(t, b) 197 | 198 | sb, err := snappy.Decode(nil, b) 199 | require.NoError(t, err) 200 | 201 | var series prompb.WriteRequest 202 | err = proto.Unmarshal(sb, &series) 203 | require.NoError(t, err) 204 | require.NotEmpty(t, series.Timeseries[0]) 205 | require.NotEmpty(t, series.Timeseries[0].Samples) 206 | 207 | assert.True(t, math.IsNaN(series.Timeseries[0].Samples[0].Value)) 208 | assert.Equal(t, timestamp.UnixMilli(), series.Timeseries[0].Samples[0].Timestamp) 209 | } 210 | 211 | func TestValidateStatusCode(t *testing.T) { 212 | t.Parallel() 213 | tests := []struct { 214 | status int 215 | expErr bool 216 | }{ 217 | {status: http.StatusOK, expErr: false}, // Mimir 218 | {status: http.StatusNoContent, expErr: false}, // Prometheus 219 | {status: http.StatusBadRequest, expErr: true}, 220 | } 221 | for _, tt := range tests { 222 | err := validateResponseStatus(tt.status) 223 | if tt.expErr { 224 | assert.Error(t, err) 225 | continue 226 | } 227 | assert.NoError(t, err) 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /pkg/remotewrite/trend_test.go: -------------------------------------------------------------------------------- 1 | package remotewrite 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | prompb "buf.build/gen/go/prometheus/prometheus/protocolbuffers/go" 9 | dto "github.com/prometheus/client_model/go" 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | "go.k6.io/k6/metrics" 13 | "google.golang.org/protobuf/encoding/protojson" 14 | ) 15 | 16 | func TestExtendedTrendSinkMapPrompb(t *testing.T) { 17 | t.Parallel() 18 | 19 | now := time.Now() 20 | r := metrics.NewRegistry() 21 | 22 | sample := metrics.Sample{ 23 | TimeSeries: metrics.TimeSeries{ 24 | Metric: &metrics.Metric{ 25 | Name: "test", 26 | Type: metrics.Trend, 27 | }, 28 | Tags: r.RootTagSet(), 29 | }, 30 | Value: 1.0, 31 | Time: now, 32 | } 33 | 34 | expected := []*prompb.TimeSeries{ 35 | buildTimeSeries("k6_test_avg", 1.0, now), 36 | buildTimeSeries("k6_test_count", 1.0, now), 37 | buildTimeSeries("k6_test_max", 1.0, now), 38 | buildTimeSeries("k6_test_med", 1.0, now), 39 | buildTimeSeries("k6_test_min", 1.0, now), 40 | buildTimeSeries("k6_test_p095", 1.0, now), 41 | buildTimeSeries("k6_test_p90", 1.0, now), 42 | buildTimeSeries("k6_test_sum", 1.0, now), 43 | } 44 | resolver, err := metrics.GetResolversForTrendColumns([]string{"count", "min", "max", "avg", "med", "p(90)", "p(95)"}) 45 | require.NoError(t, err) 46 | resolver["p90"] = resolver["p(90)"] 47 | delete(resolver, "p(90)") 48 | resolver["p095"] = resolver["p(95)"] 49 | delete(resolver, "p(95)") 50 | resolver["sum"] = func(t *metrics.TrendSink) float64 { 51 | return t.Total() 52 | } 53 | 54 | st, err := newExtendedTrendSink(resolver) 55 | require.NoError(t, err) 56 | st.Add(sample) 57 | require.Equal(t, st.Count(), uint64(1)) 58 | 59 | ts := st.MapPrompb(sample.TimeSeries, sample.Time) 60 | require.Len(t, ts, 8) 61 | 62 | sortByNameLabel(ts) 63 | assertTimeSeriesEqual(t, expected, ts) 64 | } 65 | 66 | func TestTrendAsGaugesFindIxName(t *testing.T) { 67 | t.Parallel() 68 | 69 | cases := []struct { 70 | // they have to be sorted 71 | labels []string 72 | expIndex uint16 73 | }{ 74 | { 75 | labels: []string{"tag1", "tag2"}, 76 | expIndex: 0, 77 | }, 78 | { 79 | labels: []string{"2", "__name__"}, 80 | expIndex: 1, 81 | }, 82 | { 83 | labels: []string{"__name__", "tag1", "__name__"}, 84 | expIndex: 0, 85 | }, 86 | { 87 | labels: []string{"1", "__name__", "__name__1"}, 88 | expIndex: 1, 89 | }, 90 | } 91 | for _, tc := range cases { 92 | lbls := make([]*prompb.Label, 0, len(tc.labels)) 93 | for _, l := range tc.labels { 94 | lbls = append(lbls, &prompb.Label{Name: l}) 95 | } 96 | tg := trendAsGauges{labels: lbls} 97 | tg.CacheNameIndex() 98 | assert.Equal(t, tc.expIndex, tg.ixname) 99 | } 100 | } 101 | 102 | func TestNativeHistogramSinkAdd(t *testing.T) { 103 | t.Parallel() 104 | 105 | ts := metrics.TimeSeries{ 106 | Metric: &metrics.Metric{ 107 | Name: "k6_test_metric", 108 | Contains: metrics.Time, 109 | }, 110 | } 111 | sink := newNativeHistogramSink(ts.Metric) 112 | 113 | // k6 passes time values with ms time unit 114 | // the sink converts them to seconds. 115 | sink.Add(metrics.Sample{TimeSeries: ts, Value: float64((3 * time.Second).Milliseconds())}) 116 | sink.Add(metrics.Sample{TimeSeries: ts, Value: float64((2 * time.Second).Milliseconds())}) 117 | 118 | dmetric := &dto.Metric{} 119 | err := sink.H.Write(dmetric) 120 | require.NoError(t, err) 121 | 122 | assert.Equal(t, float64(5), *dmetric.Histogram.SampleSum) 123 | 124 | // the schema is generated from the bucket factor used 125 | assert.Equal(t, int32(3), *dmetric.Histogram.Schema) 126 | } 127 | 128 | func TestNativeHistogramSinkMapPrompb(t *testing.T) { 129 | t.Parallel() 130 | 131 | r := metrics.NewRegistry() 132 | series := metrics.TimeSeries{ 133 | Metric: &metrics.Metric{ 134 | Name: "test", 135 | Type: metrics.Trend, 136 | }, 137 | Tags: r.RootTagSet().With("tagk1", "tagv1"), 138 | } 139 | 140 | st := newNativeHistogramSink(series.Metric) 141 | st.Add(metrics.Sample{ 142 | TimeSeries: series, 143 | Value: 1.52, 144 | Time: time.Unix(1, 0), 145 | }) 146 | st.Add(metrics.Sample{ 147 | TimeSeries: series, 148 | Value: 3.14, 149 | Time: time.Unix(2, 0), 150 | }) 151 | ts := st.MapPrompb(series, time.Unix(3, 0)) 152 | 153 | // It should be the easiest way for asserting the entire struct, 154 | // because the structs contains a bunch of internals value that we don't want to assert. 155 | require.Len(t, ts, 1) 156 | b, err := protojson.Marshal(ts[0]) 157 | require.NoError(t, err) 158 | 159 | expected := `{"labels":[{"name":"__name__","value":"k6_test"},{"name":"tagk1","value":"tagv1"}],"histograms":[{"countInt":"2","positiveDeltas":["1","0"],"positiveSpans":[{"length":1,"offset":5},{"length":1,"offset":8}],"schema":3,"sum":4.66,"timestamp":"3000","zeroCountInt":"0","zeroThreshold":2.938735877055719e-39}]}` 160 | assert.JSONEq(t, expected, string(b)) 161 | } 162 | 163 | func BenchmarkK6TrendSinkAdd(b *testing.B) { 164 | m := &metrics.Metric{ 165 | Type: metrics.Trend, 166 | Sink: metrics.NewTrendSink(), 167 | } 168 | s := metrics.Sample{ 169 | TimeSeries: metrics.TimeSeries{ 170 | Metric: m, 171 | }, 172 | Value: rand.Float64(), //nolint:gosec 173 | Time: time.Now(), 174 | } 175 | b.ResetTimer() 176 | for i := 0; i < b.N; i++ { 177 | m.Sink.Add(s) 178 | } 179 | } 180 | 181 | func TestNativeHistogramSinkMapPrompbWithValueType(t *testing.T) { 182 | t.Parallel() 183 | 184 | r := metrics.NewRegistry() 185 | series := metrics.TimeSeries{ 186 | Metric: &metrics.Metric{ 187 | Name: "test", 188 | Type: metrics.Trend, 189 | Contains: metrics.Time, 190 | }, 191 | Tags: r.RootTagSet(), 192 | } 193 | 194 | st := newNativeHistogramSink(series.Metric) 195 | st.Add(metrics.Sample{ 196 | TimeSeries: series, 197 | Value: 1.52, 198 | Time: time.Unix(1, 0), 199 | }) 200 | ts := st.MapPrompb(series, time.Unix(2, 0)) 201 | require.Len(t, ts, 1) 202 | assert.Equal(t, "k6_test_seconds", ts[0].Labels[0].Value) 203 | } 204 | 205 | func TestBaseUnit(t *testing.T) { 206 | t.Parallel() 207 | 208 | tests := []struct { 209 | in metrics.ValueType 210 | exp string 211 | }{ 212 | {in: metrics.Default, exp: ""}, 213 | {in: metrics.Time, exp: "seconds"}, 214 | {in: metrics.Data, exp: "bytes"}, 215 | } 216 | for _, tt := range tests { 217 | assert.Equal(t, tt.exp, baseUnit(tt.in)) 218 | } 219 | } 220 | 221 | func BenchmarkHistogramSinkAdd(b *testing.B) { 222 | m := &metrics.Metric{ 223 | Name: "bench", 224 | Type: metrics.Trend, 225 | Contains: metrics.Time, 226 | } 227 | ts := newNativeHistogramSink(m) 228 | s := metrics.Sample{ 229 | TimeSeries: metrics.TimeSeries{ 230 | Metric: m, 231 | }, 232 | Value: rand.Float64(), //nolint:gosec 233 | Time: time.Now(), 234 | } 235 | b.ResetTimer() 236 | for i := 0; i < b.N; i++ { 237 | ts.Add(s) 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /pkg/sigv4/sigv4.go: -------------------------------------------------------------------------------- 1 | // Package sigv4 is responsible to for aws sigv4 signing of requests 2 | package sigv4 3 | 4 | import ( 5 | "bytes" 6 | "crypto/hmac" 7 | "crypto/sha256" 8 | "encoding/hex" 9 | "fmt" 10 | "io" 11 | "net/http" 12 | "net/url" 13 | "sort" 14 | "strconv" 15 | "strings" 16 | "time" 17 | ) 18 | 19 | type signer interface { 20 | sign(req *http.Request) error 21 | } 22 | 23 | type defaultSigner struct { 24 | config *Config 25 | 26 | // noEscape represents the characters that AWS doesn't escape 27 | noEscape [256]bool 28 | 29 | ignoredHeaders map[string]struct{} 30 | } 31 | 32 | func newDefaultSigner(config *Config) signer { 33 | ds := &defaultSigner{ 34 | config: config, 35 | noEscape: buildAwsNoEscape(), 36 | ignoredHeaders: map[string]struct{}{ 37 | "Authorization": {}, 38 | "User-Agent": {}, 39 | "X-Amzn-Trace-Id": {}, 40 | "Expect": {}, 41 | }, 42 | } 43 | 44 | return ds 45 | } 46 | 47 | func (d *defaultSigner) sign(req *http.Request) error { 48 | now := time.Now().UTC() 49 | iSO8601Date := now.Format(timeFormat) 50 | 51 | credentialScope := buildCredentialScope(now, d.config.Region) 52 | 53 | payloadHash, err := d.getPayloadHash(req) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | req.Header.Set("Host", req.Host) 59 | req.Header.Set(amzDateKey, iSO8601Date) 60 | req.Header.Set(contentSHAKey, payloadHash) 61 | 62 | signedHeadersStr, canonicalHeaderStr := buildCanonicalHeaders(req, d.ignoredHeaders) 63 | 64 | canonicalQueryString := getCanonicalQueryString(req.URL) 65 | canonicalReq := buildCanonicalString( 66 | req.Method, 67 | getCanonicalURI(req.URL, d.noEscape), 68 | canonicalQueryString, 69 | canonicalHeaderStr, 70 | signedHeadersStr, 71 | payloadHash, 72 | ) 73 | 74 | signature := sign( 75 | deriveKey(d.config.AwsSecretAccessKey, d.config.Region), 76 | buildStringToSign(iSO8601Date, credentialScope, canonicalReq), 77 | ) 78 | 79 | authorizationHeader := fmt.Sprintf( 80 | "%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", 81 | signingAlgorithm, 82 | d.config.AwsAccessKeyID, 83 | credentialScope, 84 | signedHeadersStr, 85 | signature, 86 | ) 87 | 88 | req.URL.RawQuery = canonicalQueryString 89 | req.Header.Set(authorizationHeaderKey, authorizationHeader) 90 | return nil 91 | } 92 | 93 | func (d *defaultSigner) getPayloadHash(req *http.Request) (string, error) { 94 | if req.Body == nil { 95 | return emptyStringSHA256, nil 96 | } 97 | 98 | reqBody, err := io.ReadAll(req.Body) 99 | if err != nil { 100 | return "", err 101 | } 102 | reqBodyBuffer := bytes.NewReader(reqBody) 103 | 104 | hash := sha256.New() 105 | if _, err := io.Copy(hash, reqBodyBuffer); err != nil { 106 | return "", err 107 | } 108 | 109 | payloadHash := hex.EncodeToString(hash.Sum(nil)) 110 | 111 | // ensuring that we keep the request body intact for next tripper 112 | req.Body = io.NopCloser(bytes.NewReader(reqBody)) 113 | 114 | return payloadHash, nil 115 | } 116 | 117 | func buildCredentialScope(signingTime time.Time, region string) string { 118 | return fmt.Sprintf( 119 | "%s/%s/%s/aws4_request", 120 | signingTime.UTC().Format(shortTimeFormat), 121 | region, 122 | awsServiceName, 123 | ) 124 | } 125 | 126 | func buildCanonicalString(method, uri, query, canonicalHeaders, signedHeaders, payloadHash string) string { 127 | return strings.Join([]string{ 128 | method, 129 | uri, 130 | query, 131 | canonicalHeaders, 132 | signedHeaders, 133 | payloadHash, 134 | }, "\n") 135 | } 136 | 137 | // buildCanonicalHeaders is mostly ported from https://github.com/aws/aws-sdk-go-v2/aws/signer/v4 buildCanonicalHeaders 138 | func buildCanonicalHeaders( 139 | req *http.Request, 140 | ignoredHeaders map[string]struct{}, 141 | ) (signedHeaders, canonicalHeadersStr string) { 142 | const hostHeader, contentLengthHeader = "host", "content-length" 143 | host, header, length := req.Host, req.Header, req.ContentLength 144 | 145 | signed := make(http.Header) 146 | headers := append([]string{}, hostHeader) 147 | signed[hostHeader] = append(signed[hostHeader], host) 148 | 149 | if length > 0 { 150 | headers = append(headers, contentLengthHeader) 151 | signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) 152 | } 153 | 154 | for k, v := range header { 155 | if _, ok := ignoredHeaders[k]; ok { 156 | continue 157 | } 158 | 159 | if strings.EqualFold(k, contentLengthHeader) { 160 | // prevent signing already handled content-length header. 161 | continue 162 | } 163 | 164 | lowerCaseKey := strings.ToLower(k) 165 | if _, ok := signed[lowerCaseKey]; ok { 166 | // include additional values 167 | signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) 168 | continue 169 | } 170 | 171 | headers = append(headers, lowerCaseKey) 172 | signed[lowerCaseKey] = v 173 | } 174 | 175 | // aws requires headers to keys to be sorted 176 | sort.Strings(headers) 177 | signedHeaders = strings.Join(headers, ";") 178 | 179 | var canonicalHeaders strings.Builder 180 | for _, h := range headers { 181 | if h == hostHeader { 182 | canonicalHeaders.WriteString(fmt.Sprintf("%s:%s\n", hostHeader, stripExcessSpaces(host))) 183 | continue 184 | } 185 | 186 | canonicalHeaders.WriteString(fmt.Sprintf("%s:", h)) 187 | values := signed[h] 188 | for j, v := range values { 189 | cleanedValue := strings.TrimSpace(stripExcessSpaces(v)) 190 | canonicalHeaders.WriteString(cleanedValue) 191 | if j < len(values)-1 { 192 | canonicalHeaders.WriteRune(',') 193 | } 194 | } 195 | canonicalHeaders.WriteRune('\n') 196 | } 197 | canonicalHeadersStr = canonicalHeaders.String() 198 | return signedHeaders, canonicalHeadersStr 199 | } 200 | 201 | func getCanonicalURI(u *url.URL, noEscape [256]bool) string { 202 | return escapePath(getURIPath(u), noEscape) 203 | } 204 | 205 | func getCanonicalQueryString(u *url.URL) string { 206 | query := u.Query() 207 | 208 | // Sort Each Query Key's Values 209 | for key := range query { 210 | sort.Strings(query[key]) 211 | } 212 | 213 | var rawQuery strings.Builder 214 | rawQuery.WriteString(strings.ReplaceAll(query.Encode(), "+", "%20")) 215 | return rawQuery.String() 216 | } 217 | 218 | func buildStringToSign(amzDate, credentialScope, canonicalRequestString string) string { 219 | hash := sha256.New() 220 | hash.Write([]byte(canonicalRequestString)) 221 | return strings.Join([]string{ 222 | signingAlgorithm, 223 | amzDate, 224 | credentialScope, 225 | hex.EncodeToString(hash.Sum(nil)), 226 | }, "\n") 227 | } 228 | 229 | func deriveKey(secretKey, region string) string { 230 | signingDate := time.Now().UTC().Format(shortTimeFormat) 231 | hmacDate := hmacSHA256([]byte("AWS4"+secretKey), signingDate) 232 | hmacRegion := hmacSHA256(hmacDate, region) 233 | hmacService := hmacSHA256(hmacRegion, awsServiceName) 234 | signingKey := hmacSHA256(hmacService, "aws4_request") 235 | return string(signingKey) 236 | } 237 | 238 | func hmacSHA256(key []byte, data string) []byte { 239 | h := hmac.New(sha256.New, key) 240 | h.Write([]byte(data)) 241 | return h.Sum(nil) 242 | } 243 | 244 | func sign(signingKey string, strToSign string) string { 245 | h := hmac.New(sha256.New, []byte(signingKey)) 246 | h.Write([]byte(strToSign)) 247 | sig := hex.EncodeToString(h.Sum(nil)) 248 | return sig 249 | } 250 | -------------------------------------------------------------------------------- /pkg/remotewrite/trend.go: -------------------------------------------------------------------------------- 1 | package remotewrite 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "time" 7 | 8 | prompb "buf.build/gen/go/prometheus/prometheus/protocolbuffers/go" 9 | "github.com/prometheus/client_golang/prometheus" 10 | dto "github.com/prometheus/client_model/go" 11 | "go.k6.io/k6/metrics" 12 | ) 13 | 14 | // TrendStatsResolver is a map of trend stats name and their relative resolver function 15 | type TrendStatsResolver map[string]func(*metrics.TrendSink) float64 16 | 17 | type extendedTrendSink struct { 18 | *metrics.TrendSink 19 | 20 | trendStats map[string]func(*metrics.TrendSink) float64 21 | } 22 | 23 | func newExtendedTrendSink(tsr TrendStatsResolver) (*extendedTrendSink, error) { 24 | if len(tsr) < 1 { 25 | return nil, fmt.Errorf("trend stats resolver is empty") 26 | } 27 | return &extendedTrendSink{ 28 | TrendSink: metrics.NewTrendSink(), 29 | trendStats: tsr, 30 | }, nil 31 | } 32 | 33 | // MapPrompb converts a k6 time series and its relative 34 | // Sink into the equivalent TimeSeries model as defined from 35 | // the Remote write specification. 36 | func (sink *extendedTrendSink) MapPrompb(series metrics.TimeSeries, t time.Time) []*prompb.TimeSeries { 37 | // Prometheus metric system does not support Trend so this mapping will 38 | // store a counter for the number of reported values and gauges to keep 39 | // track of aggregated values. Also store a sum of the values to allow 40 | // the calculation of moving averages. 41 | // TODO: when Prometheus implements support for sparse histograms, re-visit this implementation 42 | 43 | tg := &trendAsGauges{ 44 | series: make([]*prompb.TimeSeries, 0, len(sink.trendStats)), 45 | // TODO: should we add the base unit suffix? 46 | // It could depends from the decision for other metric types 47 | // Does k6_http_req_duration_seconds_count make sense? 48 | labels: MapSeries(series, ""), 49 | timestamp: t.UnixMilli(), 50 | } 51 | tg.CacheNameIndex() 52 | 53 | for stat, statfn := range sink.trendStats { 54 | tg.Append(stat, adaptUnit(series.Metric.Contains, statfn(sink.TrendSink))) 55 | } 56 | return tg.series 57 | } 58 | 59 | type trendAsGauges struct { 60 | // series is the slice of the converted TimeSeries. 61 | series []*prompb.TimeSeries 62 | 63 | // labels are the shared labels between all the Gauges. 64 | labels []*prompb.Label 65 | 66 | // timestamp is the shared timestamp in ms between all the Gauges. 67 | timestamp int64 68 | 69 | // ixname is the slice's index 70 | // of the __name__ Label item. 71 | // 72 | // 16 bytes should be enough for the max length 73 | // an higher value will probably generate 74 | // serious issues in other places. 75 | ixname uint16 76 | } 77 | 78 | func (tg *trendAsGauges) Append(suffix string, v float64) { 79 | ts := &prompb.TimeSeries{ 80 | Labels: make([]*prompb.Label, len(tg.labels)), 81 | Samples: make([]*prompb.Sample, 1), 82 | } 83 | for i := 0; i < len(tg.labels); i++ { 84 | ts.Labels[i] = &prompb.Label{ 85 | Name: tg.labels[i].Name, 86 | Value: tg.labels[i].Value, 87 | } 88 | } 89 | ts.Labels[tg.ixname].Value += "_" + suffix 90 | 91 | ts.Samples[0] = &prompb.Sample{ 92 | Timestamp: tg.timestamp, 93 | Value: v, 94 | } 95 | tg.series = append(tg.series, ts) 96 | } 97 | 98 | // CacheNameIndex finds the __name__ label's index 99 | // if it is different from the most common expected case 100 | // then it caches the value. 101 | // The labels slice is expected to be sorted. 102 | func (tg *trendAsGauges) CacheNameIndex() { 103 | if tg.labels[0].Name == namelbl { 104 | // ixname is expected to be the first in most of the cases 105 | // the default value is already 0 106 | return 107 | } 108 | 109 | // in the case __name__ is not the first 110 | // then search for its position 111 | 112 | i := sort.Search(len(tg.labels), func(i int) bool { 113 | return tg.labels[i].Name == namelbl 114 | }) 115 | 116 | if i < len(tg.labels) && tg.labels[i].Name == namelbl { 117 | tg.ixname = uint16(i) //nolint:gosec 118 | } 119 | } 120 | 121 | type nativeHistogramSink struct { 122 | H prometheus.Histogram 123 | } 124 | 125 | func newNativeHistogramSink(m *metrics.Metric) *nativeHistogramSink { 126 | return &nativeHistogramSink{ 127 | H: prometheus.NewHistogram(prometheus.HistogramOpts{ 128 | Name: m.Name, 129 | // 1.1 is the starting value suggested by Prometheus' 130 | // It sounds good considering the general purpose 131 | // it have to address. 132 | // In the future, we could consider to add more tuning 133 | // if it will be required. 134 | NativeHistogramBucketFactor: 1.1, 135 | }), 136 | } 137 | } 138 | 139 | func (sink *nativeHistogramSink) Add(s metrics.Sample) { 140 | // The Prometheus' convention is to use seconds 141 | // as time unit. 142 | // 143 | // It isn't a requirement but having the current factor fixed to 1.1 then 144 | // have seconds is beneficial for having a better resolution. 145 | // 146 | // The assumption is that an higher precision is required 147 | // in case of under-second and more relaxed in case of higher values. 148 | // If the Value type is not defined any assumption can be done 149 | // because the Sample's Value could contains any unit. 150 | sink.H.Observe(adaptUnit(s.Metric.Contains, s.Value)) 151 | } 152 | 153 | // TODO: create a smaller Sink interface for this Output. 154 | // Sink with only Add and MapPrompb methods should be enough. 155 | // One method interfaces could be even better, to be checked. 156 | 157 | // P implements metrics.Sink. 158 | func (*nativeHistogramSink) P(_ float64) float64 { 159 | panic("Native Histogram Sink has no support of percentile (P)") 160 | } 161 | 162 | // Format implements metrics.Sink. 163 | func (*nativeHistogramSink) Format(_ time.Duration) map[string]float64 { 164 | panic("Native Histogram Sink has no support of formatting (Format)") 165 | } 166 | 167 | // IsEmpty implements metrics.Sink. 168 | func (*nativeHistogramSink) IsEmpty() bool { 169 | panic("Native Histogram Sink has no support of emptiness check (IsEmpty)") 170 | } 171 | 172 | // Drain implements metrics.Sink. 173 | func (*nativeHistogramSink) Drain() ([]byte, error) { 174 | panic("Native Histogram Sink has no support of draining") 175 | } 176 | 177 | // Merge implements metrics.Sink. 178 | func (*nativeHistogramSink) Merge(_ []byte) error { 179 | panic("Native Histogram Sink has no support of merging") 180 | } 181 | 182 | // MapPrompb maps the Trend type to the experimental Native Histogram. 183 | func (sink *nativeHistogramSink) MapPrompb(series metrics.TimeSeries, t time.Time) []*prompb.TimeSeries { 184 | suffix := baseUnit(series.Metric.Contains) 185 | labels := MapSeries(series, suffix) 186 | timestamp := t.UnixMilli() 187 | 188 | return []*prompb.TimeSeries{ 189 | { 190 | Labels: labels, 191 | Histograms: []*prompb.Histogram{ 192 | histogramToHistogramProto(timestamp, sink.H), 193 | }, 194 | }, 195 | } 196 | } 197 | 198 | func histogramToHistogramProto(timestamp int64, h prometheus.Histogram) *prompb.Histogram { 199 | // TODO: research more if a better way is possible. 200 | metric := &dto.Metric{} 201 | if err := h.Write(metric); err != nil { 202 | panic(fmt.Errorf("failed to convert Native Histogram to the related Protobuf: %w", err)) 203 | } 204 | hmetric := metric.Histogram 205 | 206 | return &prompb.Histogram{ 207 | Count: &prompb.Histogram_CountInt{CountInt: *hmetric.SampleCount}, 208 | Sum: *hmetric.SampleSum, 209 | Schema: *hmetric.Schema, 210 | ZeroThreshold: *hmetric.ZeroThreshold, 211 | ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: *hmetric.ZeroCount}, 212 | NegativeSpans: toBucketSpanProto(hmetric.NegativeSpan), 213 | NegativeDeltas: hmetric.NegativeDelta, 214 | PositiveSpans: toBucketSpanProto(hmetric.PositiveSpan), 215 | PositiveDeltas: hmetric.PositiveDelta, 216 | Timestamp: timestamp, 217 | } 218 | } 219 | 220 | func toBucketSpanProto(s []*dto.BucketSpan) []*prompb.BucketSpan { 221 | spans := make([]*prompb.BucketSpan, len(s)) 222 | for i := 0; i < len(s); i++ { 223 | spans[i] = &prompb.BucketSpan{Offset: *s[i].Offset, Length: *s[i].Length} 224 | } 225 | return spans 226 | } 227 | 228 | func baseUnit(vt metrics.ValueType) string { 229 | switch vt { 230 | case metrics.Time: 231 | return "seconds" 232 | case metrics.Data: 233 | return "bytes" 234 | default: 235 | return "" 236 | } 237 | } 238 | 239 | // adaptUnit converts the generated value into the expected base unit 240 | // as requested by the Prometheus convention. 241 | // 242 | // Time: converted to seconds from milliseconds. 243 | // Data: k6 emits it in Bytes so it already fine. 244 | // Other: use the submitted unit. 245 | func adaptUnit(vt metrics.ValueType, v float64) float64 { 246 | if vt == metrics.Time { 247 | return v / 1000 248 | } 249 | return v 250 | } 251 | -------------------------------------------------------------------------------- /pkg/remotewrite/remotewrite_test.go: -------------------------------------------------------------------------------- 1 | package remotewrite 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "math" 7 | "testing" 8 | "time" 9 | 10 | prompb "buf.build/gen/go/prometheus/prometheus/protocolbuffers/go" 11 | "github.com/sirupsen/logrus" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | "go.k6.io/k6/lib/types" 15 | "go.k6.io/k6/metrics" 16 | "gopkg.in/guregu/null.v3" 17 | ) 18 | 19 | func TestOutputDescription(t *testing.T) { 20 | t.Parallel() 21 | o := Output{ 22 | config: Config{ 23 | ServerURL: null.StringFrom("http://remote-url.fake"), 24 | }, 25 | } 26 | exp := "Prometheus remote write (http://remote-url.fake)" 27 | assert.Equal(t, exp, o.Description()) 28 | } 29 | 30 | func TestOutputConvertToPbSeries(t *testing.T) { 31 | t.Parallel() 32 | 33 | registry := metrics.NewRegistry() 34 | metric1 := registry.MustNewMetric("metric1", metrics.Counter) 35 | tagset := registry.RootTagSet().With("tagk1", "tagv1") 36 | 37 | samples := []metrics.SampleContainer{ 38 | metrics.Sample{ 39 | TimeSeries: metrics.TimeSeries{ 40 | Metric: metric1, 41 | Tags: tagset, 42 | }, 43 | Time: time.Date(2022, time.September, 1, 0, 0, 0, 0, time.UTC), 44 | Value: 3, 45 | }, 46 | metrics.Sample{ 47 | TimeSeries: metrics.TimeSeries{ 48 | Metric: metric1, 49 | Tags: tagset, 50 | }, 51 | Time: time.Date(2022, time.August, 31, 0, 0, 0, 0, time.UTC), 52 | Value: 4, 53 | }, 54 | metrics.Sample{ 55 | TimeSeries: metrics.TimeSeries{ 56 | Metric: registry.MustNewMetric("metric2", metrics.Counter), 57 | Tags: tagset, 58 | }, 59 | Time: time.Date(2022, time.September, 1, 0, 0, 0, 0, time.UTC), 60 | Value: 2, 61 | }, 62 | metrics.Sample{ 63 | TimeSeries: metrics.TimeSeries{ 64 | Metric: registry.MustNewMetric("metric3", metrics.Rate), 65 | Tags: tagset, 66 | }, 67 | Time: time.Date(2022, time.September, 1, 0, 0, 0, 0, time.UTC), 68 | Value: 7, 69 | }, 70 | } 71 | 72 | o := Output{ 73 | tsdb: make(map[metrics.TimeSeries]*seriesWithMeasure), 74 | } 75 | 76 | pbseries := o.convertToPbSeries(samples) 77 | require.Len(t, pbseries, 3) 78 | require.Len(t, o.tsdb, 3) 79 | 80 | unix1sept := int64(1661990400 * 1000) // in ms 81 | exp := []*prompb.TimeSeries{ 82 | { 83 | Labels: []*prompb.Label{ 84 | {Name: "__name__", Value: "k6_metric1_total"}, 85 | {Name: "tagk1", Value: "tagv1"}, 86 | }, 87 | Samples: []*prompb.Sample{ 88 | {Value: 7, Timestamp: unix1sept}, 89 | }, 90 | }, 91 | { 92 | Labels: []*prompb.Label{ 93 | {Name: "__name__", Value: "k6_metric2_total"}, 94 | {Name: "tagk1", Value: "tagv1"}, 95 | }, 96 | Samples: []*prompb.Sample{ 97 | {Value: 2, Timestamp: unix1sept}, 98 | }, 99 | }, 100 | { 101 | Labels: []*prompb.Label{ 102 | {Name: "__name__", Value: "k6_metric3_rate"}, 103 | {Name: "tagk1", Value: "tagv1"}, 104 | }, 105 | Samples: []*prompb.Sample{ 106 | {Value: 1, Timestamp: unix1sept}, 107 | }, 108 | }, 109 | } 110 | 111 | sortByNameLabel(pbseries) 112 | assert.Equal(t, exp, pbseries) 113 | } 114 | 115 | //nolint:paralleltest,tparallel 116 | func TestOutputConvertToPbSeries_WithPreviousState(t *testing.T) { 117 | t.Parallel() 118 | 119 | registry := metrics.NewRegistry() 120 | metric1 := registry.MustNewMetric("metric1", metrics.Counter) 121 | tagset := registry.RootTagSet().With("tagk1", "tagv1") 122 | t0 := time.Date(2022, time.September, 1, 0, 0, 0, 0, time.UTC).Add(10 * time.Millisecond) 123 | 124 | swm := &seriesWithMeasure{ 125 | TimeSeries: metrics.TimeSeries{ 126 | Metric: metric1, 127 | Tags: tagset, 128 | }, 129 | Latest: t0, 130 | // it's not relevant for this test to initialize the Sink's values 131 | Measure: &metrics.CounterSink{}, 132 | } 133 | 134 | o := Output{ 135 | tsdb: map[metrics.TimeSeries]*seriesWithMeasure{ 136 | swm.TimeSeries: swm, 137 | }, 138 | } 139 | 140 | testcases := []struct { 141 | name string 142 | time time.Time 143 | expSeries int 144 | expCount float64 145 | expLatest time.Time 146 | }{ 147 | { 148 | name: "Before", 149 | time: time.Date(2022, time.August, 31, 0, 0, 0, 0, time.UTC), 150 | expSeries: 0, 151 | expCount: 1, 152 | expLatest: t0, 153 | }, 154 | { 155 | name: "AfterButSub-ms", // so equal when truncated 156 | time: t0.Add(10 * time.Microsecond), 157 | expSeries: 0, 158 | expCount: 2, 159 | expLatest: time.Date(2022, time.September, 1, 0, 0, 0, int(10*time.Millisecond), time.UTC), 160 | }, 161 | { 162 | name: "After", 163 | time: t0.Add(1 * time.Millisecond), 164 | expSeries: 1, 165 | expCount: 3, 166 | expLatest: time.Date(2022, time.September, 1, 0, 0, 0, int(11*time.Millisecond), time.UTC), 167 | }, 168 | } 169 | 170 | for _, tc := range testcases { 171 | tc := tc 172 | t.Run(tc.name, func(t *testing.T) { 173 | pbseries := o.convertToPbSeries([]metrics.SampleContainer{ 174 | metrics.Sample{ 175 | TimeSeries: metrics.TimeSeries{ 176 | Metric: metric1, 177 | Tags: tagset, 178 | }, 179 | Value: 1, 180 | Time: tc.time, 181 | }, 182 | }) 183 | require.Len(t, o.tsdb, 1) 184 | require.Equal(t, tc.expSeries, len(pbseries)) 185 | assert.Equal(t, tc.expCount, swm.Measure.(*metrics.CounterSink).Value) 186 | assert.Equal(t, tc.expLatest, swm.Latest) 187 | }) 188 | } 189 | } 190 | 191 | func TestNewSeriesWithK6SinkMeasure(t *testing.T) { 192 | t.Parallel() 193 | 194 | tests := []struct { 195 | expSink metrics.Sink 196 | metricType metrics.MetricType 197 | }{ 198 | { 199 | metricType: metrics.Counter, 200 | expSink: &metrics.CounterSink{}, 201 | }, 202 | { 203 | metricType: metrics.Gauge, 204 | expSink: &metrics.GaugeSink{}, 205 | }, 206 | { 207 | metricType: metrics.Rate, 208 | expSink: &metrics.RateSink{}, 209 | }, 210 | { 211 | metricType: metrics.Trend, 212 | expSink: &extendedTrendSink{}, 213 | }, 214 | } 215 | 216 | registry := metrics.NewRegistry() 217 | for i, tt := range tests { 218 | s := metrics.TimeSeries{ 219 | Metric: registry.MustNewMetric(fmt.Sprintf("metric%d", i), tt.metricType), 220 | } 221 | resolvers, err := metrics.GetResolversForTrendColumns([]string{"avg"}) 222 | require.NoError(t, err) 223 | swm := newSeriesWithMeasure(s, false, resolvers) 224 | require.NotNil(t, swm) 225 | assert.Equal(t, s, swm.TimeSeries) 226 | require.NotNil(t, swm.Measure) 227 | assert.IsType(t, tt.expSink, swm.Measure) 228 | } 229 | } 230 | 231 | func TestNewSeriesWithNativeHistogramMeasure(t *testing.T) { 232 | t.Parallel() 233 | 234 | registry := metrics.NewRegistry() 235 | s := metrics.TimeSeries{ 236 | Metric: registry.MustNewMetric("metric1", metrics.Trend), 237 | } 238 | 239 | swm := newSeriesWithMeasure(s, true, nil) 240 | require.NotNil(t, swm) 241 | assert.Equal(t, s, swm.TimeSeries) 242 | require.NotNil(t, swm.Measure) 243 | 244 | nhs, ok := swm.Measure.(*nativeHistogramSink) 245 | require.True(t, ok) 246 | assert.NotNil(t, nhs.H) 247 | } 248 | 249 | func TestOutputSetTrendStatsResolver(t *testing.T) { 250 | t.Parallel() 251 | 252 | tests := []struct { 253 | stats []string 254 | expResolverKeys []string 255 | }{ 256 | { 257 | stats: []string{}, 258 | expResolverKeys: []string{}, 259 | }, 260 | { 261 | stats: []string{"sum"}, 262 | expResolverKeys: []string{"sum"}, 263 | }, 264 | { 265 | stats: []string{"avg"}, 266 | expResolverKeys: []string{"avg"}, 267 | }, 268 | { 269 | stats: []string{"p(27)", "p(0.999)", "p(1)", "p(0)"}, 270 | expResolverKeys: []string{"p27", "p0999", "p1", "p0"}, 271 | }, 272 | { 273 | stats: []string{ 274 | "count", "sum", 275 | "max", "min", "med", "avg", "p(90)", "p(99)", 276 | }, 277 | expResolverKeys: []string{ 278 | "count", "sum", 279 | "max", "min", "med", "avg", "p90", "p99", 280 | }, 281 | }, 282 | } 283 | 284 | for _, tt := range tests { 285 | o := Output{} 286 | err := o.setTrendStatsResolver(tt.stats) 287 | require.NoError(t, err) 288 | require.NotNil(t, o.trendStatsResolver) 289 | 290 | assert.Len(t, o.trendStatsResolver, len(tt.expResolverKeys)) 291 | assert.ElementsMatch(t, tt.expResolverKeys, func() []string { 292 | var keys []string 293 | for statKey := range o.trendStatsResolver { 294 | keys = append(keys, statKey) 295 | } 296 | return keys 297 | }()) 298 | } 299 | } 300 | 301 | func TestOutputStaleMarkers(t *testing.T) { 302 | t.Parallel() 303 | 304 | registry := metrics.NewRegistry() 305 | trendSinkSeries := metrics.TimeSeries{ 306 | Metric: registry.MustNewMetric("metric1", metrics.Trend), 307 | Tags: registry.RootTagSet(), 308 | } 309 | counterSinkSeries := metrics.TimeSeries{ 310 | Metric: registry.MustNewMetric("metric2", metrics.Counter), 311 | Tags: registry.RootTagSet(), 312 | } 313 | 314 | o := Output{ 315 | now: func() time.Time { 316 | return time.Unix(1, 0) 317 | }, 318 | } 319 | err := o.setTrendStatsResolver([]string{"p(99)"}) 320 | require.NoError(t, err) 321 | trendSink, err := newExtendedTrendSink(o.trendStatsResolver) 322 | require.NoError(t, err) 323 | 324 | o.tsdb = map[metrics.TimeSeries]*seriesWithMeasure{ 325 | trendSinkSeries: { 326 | TimeSeries: trendSinkSeries, 327 | // TODO: if Measure is a lighter interface 328 | // then it can be just a mapper mock. 329 | Measure: trendSink, 330 | }, 331 | counterSinkSeries: { 332 | TimeSeries: counterSinkSeries, 333 | Measure: &metrics.CounterSink{}, 334 | }, 335 | } 336 | 337 | markers := o.staleMarkers() 338 | require.Len(t, markers, 2) 339 | 340 | sortByNameLabel(markers) 341 | expNameLabels := []string{"k6_metric1_p99", "k6_metric2_total"} 342 | expTimestamp := time.Unix(1, int64(1*time.Millisecond)).UnixMilli() 343 | for i, expName := range expNameLabels { 344 | assert.Equal(t, expName, markers[i].Labels[0].Value) 345 | assert.Equal(t, expTimestamp, markers[i].Samples[0].Timestamp) 346 | assert.True(t, math.IsNaN(markers[i].Samples[0].Value), "it isn't a StaleNaN value") 347 | } 348 | } 349 | 350 | func TestOutputStopWithStaleMarkers(t *testing.T) { 351 | t.Parallel() 352 | 353 | for _, tc := range []bool{true, false} { 354 | buf := bytes.NewBuffer(nil) 355 | logger := logrus.New() 356 | logger.SetLevel(logrus.DebugLevel) 357 | logger.SetOutput(buf) 358 | 359 | o := Output{ 360 | logger: logger, 361 | config: Config{ 362 | // setting a large interval so it does not trigger 363 | // and the trigger can be inoked only when Stop is 364 | // invoked. 365 | PushInterval: types.NullDurationFrom(1 * time.Hour), 366 | StaleMarkers: null.BoolFrom(tc), 367 | }, 368 | now: time.Now, 369 | } 370 | 371 | err := o.Start() 372 | require.NoError(t, err) 373 | err = o.Stop() 374 | require.NoError(t, err) 375 | 376 | // TODO: it isn't optimal to maintain 377 | // if a new logline is added in Start or flushMetrics 378 | // then this test will break 379 | // A mock of the client and check if Store is invoked 380 | // should be a more stable method. 381 | messages := buf.String() 382 | msg := "No time series to mark as stale" 383 | assertfn := assert.Contains 384 | if !tc { 385 | assertfn = assert.NotContains 386 | } 387 | assertfn(t, messages, msg) 388 | } 389 | } 390 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | buf.build/gen/go/gogo/protobuf/protocolbuffers/go v1.31.0-20210810001428-4df00b267f94.1 h1:IpfoSUtXcmtXmL672yCeHx96evE7Z4AyWo8R2lVBU3o= 2 | buf.build/gen/go/gogo/protobuf/protocolbuffers/go v1.31.0-20210810001428-4df00b267f94.1/go.mod h1:Az9fvKFYQGtiDa7cPW9T3Nbw8u3hpmD6wG15RsbQlA0= 3 | buf.build/gen/go/prometheus/prometheus/protocolbuffers/go v1.31.0-20230627135113-9a12bc2590d2.1 h1:aAMGEehZVBrkvsvQYwE4yNrXRYkSX84eZpRaKPiDuxg= 4 | buf.build/gen/go/prometheus/prometheus/protocolbuffers/go v1.31.0-20230627135113-9a12bc2590d2.1/go.mod h1:iqW5nSujn3ZJ9ISZQX3K/uWwjckAp8hz0J4/wNgFBZo= 5 | github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= 6 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 7 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 8 | github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= 9 | github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= 10 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 11 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 12 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 13 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 14 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 15 | github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= 16 | github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= 17 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 18 | github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= 19 | github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 20 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 21 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 22 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 23 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 24 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 25 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 26 | github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= 27 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 28 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 29 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 30 | github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= 31 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= 32 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= 33 | github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= 34 | github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= 35 | github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= 36 | github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= 37 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 38 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 39 | github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= 40 | github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= 41 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 42 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 43 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 44 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 45 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 46 | github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= 47 | github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= 48 | github.com/mccutchen/go-httpbin v1.1.2-0.20190116014521-c5cb2f4802fa h1:lx8ZnNPwjkXSzOROz0cg69RlErRXs+L3eDkggASWKLo= 49 | github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd h1:AC3N94irbx2kWGA8f/2Ks7EQl2LxKIRQYuT9IJDwgiI= 50 | github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd/go.mod h1:9vRHVuLCjoFfE3GT06X0spdOAO+Zzo4AMjdIwUHBvAk= 51 | github.com/mstoykov/envconfig v1.5.0 h1:E2FgWf73BQt0ddgn7aoITkQHmgwAcHup1s//MsS5/f8= 52 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 53 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 54 | github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= 55 | github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= 56 | github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= 57 | github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= 58 | github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= 59 | github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= 60 | github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= 61 | github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= 62 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 63 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 64 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 65 | github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= 66 | github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= 67 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 68 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 69 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 70 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 71 | go.k6.io/k6 v0.51.1-0.20240606120708-bd114fdbd683 h1:ockJVj41NNWzadyC3fvlSMRXT8QYxEzo13tVvE/WBEw= 72 | go.k6.io/k6 v0.51.1-0.20240606120708-bd114fdbd683/go.mod h1:jKW0vrZjFqum5UGRPw/38ks4bYEywYuEo8vMccp/0Nc= 73 | go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= 74 | go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= 75 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= 76 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= 77 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE= 78 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM= 79 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= 80 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= 81 | go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= 82 | go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= 83 | go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= 84 | go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= 85 | go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= 86 | go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= 87 | go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= 88 | go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= 89 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 90 | golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= 91 | golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= 92 | golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= 93 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 94 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 95 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 96 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 97 | golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= 98 | golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 99 | golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= 100 | golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= 101 | golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= 102 | golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= 103 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 104 | google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= 105 | google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= 106 | google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= 107 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= 108 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= 109 | google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= 110 | google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= 111 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 112 | google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 113 | google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= 114 | google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= 115 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 116 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 117 | gopkg.in/guregu/null.v3 v3.3.0 h1:8j3ggqq+NgKt/O7mbFVUFKUMWN+l1AmT5jQmJ6nPh2c= 118 | gopkg.in/guregu/null.v3 v3.3.0/go.mod h1:E4tX2Qe3h7QdL+uZ3a0vqvYwKQsRSQKM5V4YltdgH9Y= 119 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 120 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 121 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 122 | -------------------------------------------------------------------------------- /pkg/remotewrite/remotewrite.go: -------------------------------------------------------------------------------- 1 | // Package remotewrite is a k6 output that sends metrics to a Prometheus remote write endpoint. 2 | package remotewrite 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "strings" 8 | "time" 9 | 10 | "github.com/grafana/xk6-output-prometheus-remote/pkg/remote" 11 | "github.com/grafana/xk6-output-prometheus-remote/pkg/stale" 12 | 13 | "go.k6.io/k6/metrics" 14 | "go.k6.io/k6/output" 15 | 16 | prompb "buf.build/gen/go/prometheus/prometheus/protocolbuffers/go" 17 | "github.com/sirupsen/logrus" 18 | ) 19 | 20 | var _ output.Output = new(Output) 21 | 22 | // Output is a k6 output that sends metrics to a Prometheus remote write endpoint. 23 | type Output struct { 24 | output.SampleBuffer 25 | 26 | config Config 27 | logger logrus.FieldLogger 28 | now func() time.Time 29 | periodicFlusher *output.PeriodicFlusher 30 | tsdb map[metrics.TimeSeries]*seriesWithMeasure 31 | trendStatsResolver map[string]func(*metrics.TrendSink) float64 32 | 33 | // TODO: copy the prometheus/remote.WriteClient interface and depend on it 34 | client *remote.WriteClient 35 | } 36 | 37 | // New creates a new Output instance. 38 | func New(params output.Params) (*Output, error) { 39 | logger := params.Logger.WithFields(logrus.Fields{"output": "Prometheus remote write"}) 40 | 41 | config, err := GetConsolidatedConfig(params.JSONConfig, params.Environment, params.ConfigArgument) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | clientConfig, err := config.RemoteConfig() 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | wc, err := remote.NewWriteClient(config.ServerURL.String, clientConfig) 52 | if err != nil { 53 | return nil, fmt.Errorf("failed to initialize the Prometheus remote write client: %w", err) 54 | } 55 | 56 | o := &Output{ 57 | client: wc, 58 | config: config, 59 | // TODO: consider to do this function millisecond-based 60 | // so we don't need to truncate all the time we invoke it. 61 | // Before we should analyze if in some cases is it useful to have it in ns. 62 | now: time.Now, 63 | logger: logger, 64 | tsdb: make(map[metrics.TimeSeries]*seriesWithMeasure), 65 | } 66 | 67 | if len(config.TrendStats) > 0 { 68 | if err := o.setTrendStatsResolver(config.TrendStats); err != nil { 69 | return nil, err 70 | } 71 | } 72 | return o, nil 73 | } 74 | 75 | // Description returns a short human-readable description of the output. 76 | func (o *Output) Description() string { 77 | return fmt.Sprintf("Prometheus remote write (%s)", o.config.ServerURL.String) 78 | } 79 | 80 | // Start initializes the output. 81 | func (o *Output) Start() error { 82 | d := o.config.PushInterval.TimeDuration() 83 | periodicFlusher, err := output.NewPeriodicFlusher(d, o.flush) 84 | if err != nil { 85 | return err 86 | } 87 | o.periodicFlusher = periodicFlusher 88 | o.logger.WithField("flushtime", d).Debug("Output initialized") 89 | return nil 90 | } 91 | 92 | // Stop stops the output. 93 | func (o *Output) Stop() error { 94 | o.logger.Debug("Stopping the output") 95 | defer o.logger.Debug("Output stopped") 96 | o.periodicFlusher.Stop() 97 | 98 | if !o.config.StaleMarkers.Bool { 99 | return nil 100 | } 101 | staleMarkers := o.staleMarkers() 102 | if len(staleMarkers) < 1 { 103 | o.logger.Debug("No time series to mark as stale") 104 | return nil 105 | } 106 | o.logger.WithField("staleMarkers", len(staleMarkers)).Debug("Marking time series as stale") 107 | 108 | err := o.client.Store(context.Background(), staleMarkers) 109 | if err != nil { 110 | return fmt.Errorf("marking time series as stale failed: %w", err) 111 | } 112 | return nil 113 | } 114 | 115 | // staleMarkers maps all the seen time series with a stale marker. 116 | func (o *Output) staleMarkers() []*prompb.TimeSeries { 117 | // Add 1ms so in the extreme case that the time frame 118 | // between the last and the next flush operation is under-millisecond, 119 | // we can avoid the sample being seen as a duplicate, 120 | // if we force it in the future. 121 | // It is essential because if it overlaps, the remote write discards the last sample, 122 | // so the stale marker and the metric will remain active for the next 5 min 123 | // as the default logic without stale markers. 124 | timestamp := o.now(). 125 | Truncate(time.Millisecond).Add(1 * time.Millisecond).UnixMilli() 126 | 127 | staleMarkers := make([]*prompb.TimeSeries, 0, len(o.tsdb)) 128 | for _, swm := range o.tsdb { 129 | series := swm.MapPrompb() 130 | // series' length is expected to be equal to 1 for most of the cases 131 | // the unique exception where more than 1 is expected is when 132 | // trend stats have been configured with multiple values. 133 | for _, s := range series { 134 | if len(s.Samples) < 1 { 135 | if len(s.Histograms) < 1 { 136 | panic("data integrity check: samples and native histograms" + 137 | " can't be empty at the same time") 138 | } 139 | s.Samples = append(s.Samples, &prompb.Sample{}) 140 | } 141 | 142 | s.Samples[0].Value = stale.Marker 143 | s.Samples[0].Timestamp = timestamp 144 | } 145 | staleMarkers = append(staleMarkers, series...) 146 | } 147 | return staleMarkers 148 | } 149 | 150 | // setTrendStatsResolver sets the resolver for the Trend stats. 151 | // 152 | // TODO: refactor, the code can be improved 153 | func (o *Output) setTrendStatsResolver(trendStats []string) error { 154 | trendStatsCopy := make([]string, 0, len(trendStats)) 155 | hasSum := false 156 | // copy excluding sum 157 | for _, stat := range trendStats { 158 | if stat == "sum" { 159 | hasSum = true 160 | continue 161 | } 162 | trendStatsCopy = append(trendStatsCopy, stat) 163 | } 164 | resolvers, err := metrics.GetResolversForTrendColumns(trendStatsCopy) 165 | if err != nil { 166 | return err 167 | } 168 | // sum is not supported from GetResolversForTrendColumns 169 | // so if it has been requested 170 | // it adds it specifically 171 | if hasSum { 172 | resolvers["sum"] = func(t *metrics.TrendSink) float64 { 173 | return t.Total() 174 | } 175 | } 176 | o.trendStatsResolver = make(TrendStatsResolver, len(resolvers)) 177 | for stat, fn := range resolvers { 178 | statKey := stat 179 | 180 | // the config passes percentiles with p(x) form, for example p(95), 181 | // but the mapping generates series name in the form p95. 182 | // 183 | // TODO: maybe decoupling mapping from the stat resolver keys? 184 | if strings.HasPrefix(statKey, "p(") { 185 | statKey = stat[2 : len(statKey)-1] // trim the parenthesis 186 | statKey = strings.ReplaceAll(statKey, ".", "") // remove dots, p(0.95) => p095 187 | statKey = "p" + statKey 188 | } 189 | o.trendStatsResolver[statKey] = fn 190 | } 191 | return nil 192 | } 193 | 194 | func (o *Output) flush() { 195 | var ( 196 | start = time.Now() 197 | nts int 198 | ) 199 | 200 | defer func() { 201 | d := time.Since(start) 202 | okmsg := "Successful flushed time series to remote write endpoint" 203 | if d > time.Duration(o.config.PushInterval.Duration) { 204 | // There is no intermediary storage so warn if writing to remote write endpoint becomes too slow 205 | o.logger.WithField("nts", nts). 206 | Warnf("%s but it took %s while flush period is %s. Some samples may be dropped.", 207 | okmsg, d.String(), o.config.PushInterval.String()) 208 | } else { 209 | o.logger.WithField("nts", nts).WithField("took", d).Debug(okmsg) 210 | } 211 | }() 212 | 213 | samplesContainers := o.GetBufferedSamples() 214 | if len(samplesContainers) < 1 { 215 | o.logger.Debug("no buffered samples, skip the flushing operation") 216 | return 217 | } 218 | 219 | // Remote write endpoint accepts TimeSeries structure defined in gRPC. It must: 220 | // a) contain Labels array 221 | // b) have a __name__ label: without it, metric might be unquerable or even rejected 222 | // as a metric without a name. This behaviour depends on underlying storage used. 223 | // c) not have duplicate timestamps within 1 timeseries, see https://github.com/prometheus/prometheus/issues/9210 224 | // Prometheus write handler processes only some fields as of now, so here we'll add only them. 225 | 226 | promTimeSeries := o.convertToPbSeries(samplesContainers) 227 | nts = len(promTimeSeries) 228 | o.logger.WithField("nts", nts).Debug("Converted samples to Prometheus TimeSeries") 229 | 230 | if err := o.client.Store(context.Background(), promTimeSeries); err != nil { 231 | o.logger.WithError(err).Error("Failed to send the time series data to the endpoint") 232 | return 233 | } 234 | } 235 | 236 | func (o *Output) convertToPbSeries(samplesContainers []metrics.SampleContainer) []*prompb.TimeSeries { 237 | // The seen map is required because the samples containers 238 | // could have several samples for the same time series 239 | // in this way, we can aggregate and flush them in a unique value 240 | // without overloading the remote write endpoint. 241 | // 242 | // It is also essential because the core generates timestamps 243 | // with a higher precision (ns) than Prometheus (ms), 244 | // so we need to aggregate all the samples in the same time bucket. 245 | // More context can be found in the issue 246 | // https://github.com/grafana/xk6-output-prometheus-remote/issues/11 247 | seen := make(map[metrics.TimeSeries]struct{}) 248 | 249 | for _, samplesContainer := range samplesContainers { 250 | samples := samplesContainer.GetSamples() 251 | 252 | for _, sample := range samples { 253 | truncTime := sample.Time.Truncate(time.Millisecond) 254 | swm, ok := o.tsdb[sample.TimeSeries] 255 | if !ok { 256 | // TODO: encapsulate the trend arguments into a Trend Mapping factory 257 | swm = newSeriesWithMeasure(sample.TimeSeries, o.config.TrendAsNativeHistogram.Bool, o.trendStatsResolver) 258 | swm.Latest = truncTime 259 | o.tsdb[sample.TimeSeries] = swm 260 | seen[sample.TimeSeries] = struct{}{} 261 | } else { //nolint:gocritic 262 | // FIXME: remove the gocritic linter inhibition as soon as the rest of the todo are done 263 | // save as a seen item only when the samples have a time greater than 264 | // the previous saved, otherwise some implementations 265 | // could see it as a duplicate and generate warnings (e.g. Mimir) 266 | if truncTime.After(swm.Latest) { 267 | swm.Latest = truncTime 268 | seen[sample.TimeSeries] = struct{}{} 269 | } 270 | 271 | // If current == previous: 272 | // the current received time before being truncated had a higher precision. 273 | // It's fine to aggregate them but we avoid to add to the seen map because: 274 | // - in the case it is a new flush operation then we avoid delivering 275 | // for not generating duplicates 276 | // - in the case it is in the same operation but across sample containers 277 | // then the time series should be already on the seen map and we can skip 278 | // to re-add it. 279 | 280 | // If current < previous: 281 | // - in the case current is a new flush operation, it shouldn't happen, 282 | // for this reason, we can avoid creating a dedicated logic. 283 | // TODO: We should evaluate if it would be better to have a defensive condition 284 | // for handling it, logging a warning or returning an error 285 | // and avoid aggregating the value. 286 | // - in the case current is in the same operation but across sample containers 287 | // it's fine to aggregate 288 | // but same as for the equal condition it can rely on the previous seen value. 289 | } 290 | swm.Measure.Add(sample) 291 | } 292 | } 293 | 294 | pbseries := make([]*prompb.TimeSeries, 0, len(seen)) 295 | for s := range seen { 296 | pbseries = append(pbseries, o.tsdb[s].MapPrompb()...) 297 | } 298 | return pbseries 299 | } 300 | 301 | type seriesWithMeasure struct { 302 | metrics.TimeSeries 303 | Measure metrics.Sink 304 | 305 | // Latest tracks the latest time 306 | // when the measure has been updated 307 | // 308 | // TODO: the logic for this value should stay directly 309 | // in a method in struct 310 | Latest time.Time 311 | 312 | // TODO: maybe add some caching for the mapping? 313 | } 314 | 315 | // TODO: add unit tests 316 | func (swm seriesWithMeasure) MapPrompb() []*prompb.TimeSeries { 317 | var newts []*prompb.TimeSeries 318 | 319 | mapMonoSeries := func(s metrics.TimeSeries, suffix string, t time.Time) prompb.TimeSeries { 320 | return prompb.TimeSeries{ 321 | Labels: MapSeries(s, suffix), 322 | Samples: []*prompb.Sample{ 323 | {Timestamp: t.UnixMilli()}, 324 | }, 325 | } 326 | } 327 | 328 | //nolint:forcetypeassert 329 | switch swm.Metric.Type { 330 | case metrics.Counter: 331 | ts := mapMonoSeries(swm.TimeSeries, "total", swm.Latest) 332 | ts.Samples[0].Value = swm.Measure.(*metrics.CounterSink).Value 333 | newts = []*prompb.TimeSeries{&ts} 334 | 335 | case metrics.Gauge: 336 | ts := mapMonoSeries(swm.TimeSeries, "", swm.Latest) 337 | ts.Samples[0].Value = swm.Measure.(*metrics.GaugeSink).Value 338 | newts = []*prompb.TimeSeries{&ts} 339 | 340 | case metrics.Rate: 341 | ts := mapMonoSeries(swm.TimeSeries, "rate", swm.Latest) 342 | // pass zero duration here because time is useless for formatting rate 343 | rateVals := swm.Measure.(*metrics.RateSink).Format(time.Duration(0)) 344 | ts.Samples[0].Value = rateVals["rate"] 345 | newts = []*prompb.TimeSeries{&ts} 346 | 347 | case metrics.Trend: 348 | // TODO: 349 | // - Add a PrompbMapSinker interface 350 | // and implements it on all the sinks "extending" them. 351 | // - Call directly MapPrompb on Measure without any type assertion. 352 | trend, ok := swm.Measure.(prompbMapper) 353 | if !ok { 354 | panic("Measure for Trend types must implement MapPromPb") 355 | } 356 | newts = trend.MapPrompb(swm.TimeSeries, swm.Latest) 357 | 358 | default: 359 | panic( 360 | fmt.Sprintf( 361 | "the output reached an unrecoverable state; unable to recognize processed metric %s's type `%s`", 362 | swm.Metric.Name, 363 | swm.Metric.Type, 364 | ), 365 | ) 366 | } 367 | return newts 368 | } 369 | 370 | type prompbMapper interface { 371 | MapPrompb(series metrics.TimeSeries, t time.Time) []*prompb.TimeSeries 372 | } 373 | 374 | func newSeriesWithMeasure( 375 | series metrics.TimeSeries, 376 | trendAsNativeHistogram bool, 377 | tsr TrendStatsResolver, 378 | ) *seriesWithMeasure { 379 | var sink metrics.Sink 380 | switch series.Metric.Type { 381 | case metrics.Counter: 382 | sink = &metrics.CounterSink{} 383 | case metrics.Gauge: 384 | sink = &metrics.GaugeSink{} 385 | case metrics.Trend: 386 | // TODO: refactor encapsulating in a factory method 387 | if trendAsNativeHistogram { 388 | sink = newNativeHistogramSink(series.Metric) 389 | } else { 390 | var err error 391 | sink, err = newExtendedTrendSink(tsr) 392 | if err != nil { 393 | // the resolver must be already validated 394 | panic(err) 395 | } 396 | } 397 | case metrics.Rate: 398 | sink = &metrics.RateSink{} 399 | default: 400 | panic(fmt.Sprintf("metric type %q unsupported", series.Metric.Type.String())) 401 | } 402 | return &seriesWithMeasure{ 403 | TimeSeries: series, 404 | Measure: sink, 405 | } 406 | } 407 | -------------------------------------------------------------------------------- /pkg/remotewrite/config.go: -------------------------------------------------------------------------------- 1 | package remotewrite 2 | 3 | import ( 4 | "crypto/tls" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "net/http" 9 | "strconv" 10 | "strings" 11 | "time" 12 | 13 | "github.com/grafana/xk6-output-prometheus-remote/pkg/sigv4" 14 | 15 | "github.com/grafana/xk6-output-prometheus-remote/pkg/remote" 16 | "go.k6.io/k6/lib/types" 17 | "gopkg.in/guregu/null.v3" 18 | ) 19 | 20 | const ( 21 | defaultServerURL = "http://localhost:9090/api/v1/write" 22 | defaultTimeout = 5 * time.Second 23 | defaultPushInterval = 5 * time.Second 24 | defaultMetricPrefix = "k6_" 25 | ) 26 | 27 | //nolint:gochecknoglobals 28 | var defaultTrendStats = []string{"p(99)"} 29 | 30 | // Config contains the configuration for the Output. 31 | type Config struct { 32 | // ServerURL contains the absolute ServerURL for the Write endpoint where to flush the time series. 33 | ServerURL null.String `json:"url"` 34 | 35 | // Headers contains additional headers that should be included in the HTTP requests. 36 | Headers map[string]string `json:"headers"` 37 | 38 | // InsecureSkipTLSVerify skips TLS client side checks. 39 | InsecureSkipTLSVerify null.Bool `json:"insecureSkipTLSVerify"` 40 | 41 | // Username is the User for Basic Auth. 42 | Username null.String `json:"username"` 43 | 44 | // Password is the Password for the Basic Auth. 45 | Password null.String `json:"password"` 46 | 47 | // ClientCertificate is the public key of the SSL certificate. 48 | // It is expected the path of the certificate on the file system. 49 | // If it is required a dedicated Certifacate Authority then it should be added 50 | // to the conventional folders defined by the operating system's registry. 51 | ClientCertificate null.String `json:"clientCertificate"` 52 | 53 | // ClientCertificateKey is the private key of the SSL certificate. 54 | // It is expected the path of the certificate on the file system. 55 | ClientCertificateKey null.String `json:"clientCertificateKey"` 56 | 57 | // BearerToken if set is the token used for the `Authorization` header. 58 | BearerToken null.String `json:"bearerToken"` 59 | 60 | // PushInterval defines the time between flushes. The Output will wait the set time 61 | // before push a new set of time series to the endpoint. 62 | PushInterval types.NullDuration `json:"pushInterval"` 63 | 64 | // TrendAsNativeHistogram defines if the mapping for metrics defined as Trend type 65 | // should map to a Prometheus' Native Histogram. 66 | TrendAsNativeHistogram null.Bool `json:"trendAsNativeHistogram"` 67 | 68 | // TrendStats defines the stats to flush for Trend metrics. 69 | // 70 | // TODO: should we support K6_SUMMARY_TREND_STATS? 71 | TrendStats []string `json:"trendStats"` 72 | 73 | StaleMarkers null.Bool `json:"staleMarkers"` 74 | 75 | // SigV4Region is the AWS region where the workspace is. 76 | SigV4Region null.String `json:"sigV4Region"` 77 | 78 | // SigV4AccessKey is the AWS access key. 79 | SigV4AccessKey null.String `json:"sigV4AccessKey"` 80 | 81 | // SigV4SecretKey is the AWS secret key. 82 | SigV4SecretKey null.String `json:"sigV4SecretKey"` 83 | } 84 | 85 | // NewConfig creates an Output's configuration. 86 | func NewConfig() Config { 87 | return Config{ 88 | ServerURL: null.StringFrom(defaultServerURL), 89 | InsecureSkipTLSVerify: null.BoolFrom(false), 90 | Username: null.NewString("", false), 91 | Password: null.NewString("", false), 92 | PushInterval: types.NullDurationFrom(defaultPushInterval), 93 | Headers: make(map[string]string), 94 | TrendStats: defaultTrendStats, 95 | StaleMarkers: null.BoolFrom(false), 96 | SigV4Region: null.NewString("", false), 97 | SigV4AccessKey: null.NewString("", false), 98 | SigV4SecretKey: null.NewString("", false), 99 | } 100 | } 101 | 102 | // RemoteConfig creates a configuration for the HTTP Remote-write client. 103 | func (conf Config) RemoteConfig() (*remote.HTTPConfig, error) { 104 | hc := remote.HTTPConfig{ 105 | Timeout: defaultTimeout, 106 | } 107 | 108 | // if at least valid user was configured, use basic auth 109 | if conf.Username.Valid { 110 | hc.BasicAuth = &remote.BasicAuth{ 111 | Username: conf.Username.String, 112 | Password: conf.Password.String, 113 | } 114 | } 115 | 116 | hc.TLSConfig = &tls.Config{ 117 | InsecureSkipVerify: conf.InsecureSkipTLSVerify.Bool, //nolint:gosec 118 | } 119 | 120 | if conf.ClientCertificate.Valid && conf.ClientCertificateKey.Valid { 121 | cert, err := tls.LoadX509KeyPair(conf.ClientCertificate.String, conf.ClientCertificateKey.String) 122 | if err != nil { 123 | return nil, fmt.Errorf("failed to load the TLS certificate: %w", err) 124 | } 125 | hc.TLSConfig.Certificates = []tls.Certificate{cert} 126 | } 127 | 128 | if isSigV4PartiallyConfigured(conf.SigV4Region, conf.SigV4AccessKey, conf.SigV4SecretKey) { 129 | return nil, errors.New( 130 | "sigv4 seems to be partially configured. All of " + 131 | "K6_PROMETHEUS_RW_SIGV4_REGION, K6_PROMETHEUS_RW_SIGV4_ACCESS_KEY, K6_PROMETHEUS_RW_SIGV4_SECRET_KEY " + 132 | "must all be set. Unset all to bypass sigv4", 133 | ) 134 | } 135 | 136 | if conf.SigV4Region.Valid && conf.SigV4AccessKey.Valid && conf.SigV4SecretKey.Valid { 137 | hc.SigV4 = &sigv4.Config{ 138 | Region: conf.SigV4Region.String, 139 | AwsAccessKeyID: conf.SigV4AccessKey.String, 140 | AwsSecretAccessKey: conf.SigV4SecretKey.String, 141 | } 142 | } 143 | 144 | if len(conf.Headers) > 0 { 145 | hc.Headers = make(http.Header) 146 | for k, v := range conf.Headers { 147 | hc.Headers.Add(k, v) 148 | } 149 | } 150 | 151 | if conf.BearerToken.String != "" { 152 | if hc.Headers == nil { 153 | hc.Headers = make(http.Header) 154 | } 155 | hc.Headers.Set("Authorization", "Bearer "+conf.BearerToken.String) 156 | } 157 | 158 | return &hc, nil 159 | } 160 | 161 | // Apply merges applied Config into base. 162 | func (conf Config) Apply(applied Config) Config { 163 | if applied.ServerURL.Valid { 164 | conf.ServerURL = applied.ServerURL 165 | } 166 | 167 | if applied.InsecureSkipTLSVerify.Valid { 168 | conf.InsecureSkipTLSVerify = applied.InsecureSkipTLSVerify 169 | } 170 | 171 | if applied.Username.Valid { 172 | conf.Username = applied.Username 173 | } 174 | 175 | if applied.Password.Valid { 176 | conf.Password = applied.Password 177 | } 178 | 179 | if applied.BearerToken.Valid { 180 | conf.BearerToken = applied.BearerToken 181 | } 182 | 183 | if applied.SigV4Region.Valid { 184 | conf.SigV4Region = applied.SigV4Region 185 | } 186 | 187 | if applied.SigV4AccessKey.Valid { 188 | conf.SigV4AccessKey = applied.SigV4AccessKey 189 | } 190 | 191 | if applied.SigV4SecretKey.Valid { 192 | conf.SigV4SecretKey = applied.SigV4SecretKey 193 | } 194 | 195 | if applied.PushInterval.Valid { 196 | conf.PushInterval = applied.PushInterval 197 | } 198 | 199 | if applied.TrendAsNativeHistogram.Valid { 200 | conf.TrendAsNativeHistogram = applied.TrendAsNativeHistogram 201 | } 202 | 203 | if applied.StaleMarkers.Valid { 204 | conf.StaleMarkers = applied.StaleMarkers 205 | } 206 | 207 | if len(applied.Headers) > 0 { 208 | for k, v := range applied.Headers { 209 | conf.Headers[k] = v 210 | } 211 | } 212 | 213 | if len(applied.TrendStats) > 0 { 214 | conf.TrendStats = make([]string, len(applied.TrendStats)) 215 | copy(conf.TrendStats, applied.TrendStats) 216 | } 217 | 218 | if applied.ClientCertificate.Valid { 219 | conf.ClientCertificate = applied.ClientCertificate 220 | } 221 | 222 | if applied.ClientCertificateKey.Valid { 223 | conf.ClientCertificateKey = applied.ClientCertificateKey 224 | } 225 | 226 | return conf 227 | } 228 | 229 | // GetConsolidatedConfig combines the options' values from the different sources 230 | // and returns the merged options. The Order of precedence used is documented 231 | // in the k6 Documentation https://k6.io/docs/using-k6/k6-options/how-to/#order-of-precedence. 232 | func GetConsolidatedConfig(jsonRawConf json.RawMessage, env map[string]string, _ string) (Config, error) { 233 | result := NewConfig() 234 | if jsonRawConf != nil { 235 | jsonConf, err := parseJSON(jsonRawConf) 236 | if err != nil { 237 | return result, fmt.Errorf("parse JSON options failed: %w", err) 238 | } 239 | result = result.Apply(jsonConf) 240 | } 241 | 242 | if len(env) > 0 { 243 | envConf, err := parseEnvs(env) 244 | if err != nil { 245 | return result, fmt.Errorf("parse environment variables options failed: %w", err) 246 | } 247 | result = result.Apply(envConf) 248 | } 249 | 250 | // TODO: define a way for defining Output's options 251 | // then support them. 252 | // url is the third GetConsolidatedConfig's argument which is omitted for now 253 | //nolint:gocritic 254 | // 255 | //if url != "" { 256 | //urlConf, err := parseArg(url) 257 | //if err != nil { 258 | //return result, fmt.Errorf("parse argument string options failed: %w", err) 259 | //} 260 | //result = result.Apply(urlConf) 261 | //} 262 | 263 | return result, nil 264 | } 265 | 266 | func envBool(env map[string]string, name string) (null.Bool, error) { 267 | if v, vDefined := env[name]; vDefined { 268 | b, err := strconv.ParseBool(v) 269 | if err != nil { 270 | return null.NewBool(false, false), err 271 | } 272 | 273 | return null.BoolFrom(b), nil 274 | } 275 | return null.NewBool(false, false), nil 276 | } 277 | 278 | func envMap(env map[string]string, prefix string) map[string]string { 279 | result := make(map[string]string) 280 | for ek, ev := range env { 281 | if strings.HasPrefix(ek, prefix) { 282 | k := strings.TrimPrefix(ek, prefix) 283 | result[k] = ev 284 | } 285 | } 286 | return result 287 | } 288 | 289 | // TODO: try to migrate to github.com/mstoykov/envconfig like it's done on other projects? 290 | func parseEnvs(env map[string]string) (Config, error) { //nolint:funlen 291 | c := Config{ 292 | Headers: make(map[string]string), 293 | } 294 | 295 | if pushInterval, pushIntervalDefined := env["K6_PROMETHEUS_RW_PUSH_INTERVAL"]; pushIntervalDefined { 296 | if err := c.PushInterval.UnmarshalText([]byte(pushInterval)); err != nil { 297 | return c, err 298 | } 299 | } 300 | 301 | if url, urlDefined := env["K6_PROMETHEUS_RW_SERVER_URL"]; urlDefined { 302 | c.ServerURL = null.StringFrom(url) 303 | } 304 | 305 | if b, err := envBool(env, "K6_PROMETHEUS_RW_INSECURE_SKIP_TLS_VERIFY"); err != nil { 306 | return c, err 307 | } else if b.Valid { 308 | c.InsecureSkipTLSVerify = b 309 | } 310 | 311 | if user, userDefined := env["K6_PROMETHEUS_RW_USERNAME"]; userDefined { 312 | c.Username = null.StringFrom(user) 313 | } 314 | 315 | if password, passwordDefined := env["K6_PROMETHEUS_RW_PASSWORD"]; passwordDefined { 316 | c.Password = null.StringFrom(password) 317 | } 318 | 319 | if clientCertificate, certDefined := env["K6_PROMETHEUS_RW_CLIENT_CERTIFICATE"]; certDefined { 320 | c.ClientCertificate = null.StringFrom(clientCertificate) 321 | } 322 | 323 | if clientCertificateKey, certDefined := env["K6_PROMETHEUS_RW_CLIENT_CERTIFICATE_KEY"]; certDefined { 324 | c.ClientCertificateKey = null.StringFrom(clientCertificateKey) 325 | } 326 | 327 | if token, tokenDefined := env["K6_PROMETHEUS_RW_BEARER_TOKEN"]; tokenDefined { 328 | c.BearerToken = null.StringFrom(token) 329 | } 330 | 331 | envHeaders := envMap(env, "K6_PROMETHEUS_RW_HEADERS_") 332 | for k, v := range envHeaders { 333 | c.Headers[k] = v 334 | } 335 | 336 | if headers, headersDefined := env["K6_PROMETHEUS_RW_HTTP_HEADERS"]; headersDefined { 337 | for _, kvPair := range strings.Split(headers, ",") { 338 | header := strings.Split(kvPair, ":") 339 | if len(header) != 2 { 340 | return c, fmt.Errorf("the provided header (%s) does not respect the expected format
:", kvPair) 341 | } 342 | c.Headers[header[0]] = header[1] 343 | } 344 | } 345 | 346 | if sigV4Region, sigV4RegionDefined := env["K6_PROMETHEUS_RW_SIGV4_REGION"]; sigV4RegionDefined { 347 | c.SigV4Region = null.StringFrom(sigV4Region) 348 | } 349 | 350 | if sigV4AccessKey, sigV4AccessKeyDefined := env["K6_PROMETHEUS_RW_SIGV4_ACCESS_KEY"]; sigV4AccessKeyDefined { 351 | c.SigV4AccessKey = null.StringFrom(sigV4AccessKey) 352 | } 353 | 354 | if sigV4SecretKey, sigV4SecretKeyDefined := env["K6_PROMETHEUS_RW_SIGV4_SECRET_KEY"]; sigV4SecretKeyDefined { 355 | c.SigV4SecretKey = null.StringFrom(sigV4SecretKey) 356 | } 357 | 358 | if b, err := envBool(env, "K6_PROMETHEUS_RW_TREND_AS_NATIVE_HISTOGRAM"); err != nil { 359 | return c, err 360 | } else if b.Valid { 361 | c.TrendAsNativeHistogram = b 362 | } 363 | 364 | if b, err := envBool(env, "K6_PROMETHEUS_RW_STALE_MARKERS"); err != nil { 365 | return c, err 366 | } else if b.Valid { 367 | c.StaleMarkers = b 368 | } 369 | 370 | if trendStats, trendStatsDefined := env["K6_PROMETHEUS_RW_TREND_STATS"]; trendStatsDefined { 371 | c.TrendStats = strings.Split(trendStats, ",") 372 | } 373 | 374 | return c, nil 375 | } 376 | 377 | // parseJSON parses the supplied JSON into a Config. 378 | func parseJSON(data json.RawMessage) (Config, error) { 379 | var c Config 380 | err := json.Unmarshal(data, &c) 381 | return c, err 382 | } 383 | 384 | // parseArg parses the supplied string of arguments into a Config. 385 | func parseArg(text string) (Config, error) { 386 | var c Config 387 | opts := strings.Split(text, ",") 388 | 389 | for _, opt := range opts { 390 | r := strings.SplitN(opt, "=", 2) 391 | if len(r) != 2 { 392 | return c, fmt.Errorf("couldn't parse argument %q as option", opt) 393 | } 394 | key, v := r[0], r[1] 395 | switch key { 396 | case "url": 397 | c.ServerURL = null.StringFrom(v) 398 | case "insecureSkipTLSVerify": 399 | if err := c.InsecureSkipTLSVerify.UnmarshalText([]byte(v)); err != nil { 400 | return c, fmt.Errorf("insecureSkipTLSVerify value must be true or false, not %q", v) 401 | } 402 | case "username": 403 | c.Username = null.StringFrom(v) 404 | case "password": 405 | c.Password = null.StringFrom(v) 406 | case "pushInterval": 407 | if err := c.PushInterval.UnmarshalText([]byte(v)); err != nil { 408 | return c, err 409 | } 410 | case "trendAsNativeHistogram": 411 | if err := c.TrendAsNativeHistogram.UnmarshalText([]byte(v)); err != nil { 412 | return c, fmt.Errorf("trendAsNativeHistogram value must be true or false, not %q", v) 413 | } 414 | 415 | // TODO: add the support for trendStats 416 | // strvals doesn't support the same format used by --summary-trend-stats 417 | // using the comma as the separator, because it is already used for 418 | // dividing the keys. 419 | //nolint:gocritic 420 | // 421 | //if v, ok := params["trendStats"].(string); ok && len(v) > 0 { 422 | //c.TrendStats = strings.Split(v, ",") 423 | //} 424 | 425 | case "clientCertificate": 426 | c.ClientCertificate = null.StringFrom(v) 427 | case "clientCertificateKey": 428 | c.ClientCertificateKey = null.StringFrom(v) 429 | 430 | default: 431 | if !strings.HasPrefix(key, "headers.") { 432 | return c, fmt.Errorf("%q is an unknown option's key", r[0]) 433 | } 434 | if c.Headers == nil { 435 | c.Headers = make(map[string]string) 436 | } 437 | c.Headers[strings.TrimPrefix(key, "headers.")] = v 438 | } 439 | } 440 | 441 | return c, nil 442 | } 443 | 444 | func isSigV4PartiallyConfigured(region, accessKey, secretKey null.String) bool { 445 | hasRegion := region.Valid && len(strings.TrimSpace(region.String)) != 0 446 | hasAccessID := accessKey.Valid && len(strings.TrimSpace(accessKey.String)) != 0 447 | hasSecretAccessKey := secretKey.Valid && len(strings.TrimSpace(secretKey.String)) != 0 448 | // either they are all set, or all not set. False if partial 449 | isComplete := (hasRegion && hasAccessID && hasSecretAccessKey) || (!hasRegion && !hasAccessID && !hasSecretAccessKey) 450 | return !isComplete 451 | } 452 | -------------------------------------------------------------------------------- /pkg/remotewrite/config_test.go: -------------------------------------------------------------------------------- 1 | package remotewrite 2 | 3 | import ( 4 | "crypto/tls" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | "net/url" 9 | "testing" 10 | "time" 11 | 12 | "github.com/grafana/xk6-output-prometheus-remote/pkg/remote" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | "go.k6.io/k6/lib/types" 16 | "gopkg.in/guregu/null.v3" 17 | ) 18 | 19 | func TestConfigApply(t *testing.T) { 20 | t.Parallel() 21 | 22 | fullConfig := Config{ 23 | ServerURL: null.StringFrom("some-url"), 24 | InsecureSkipTLSVerify: null.BoolFrom(false), 25 | Username: null.StringFrom("user"), 26 | Password: null.StringFrom("pass"), 27 | PushInterval: types.NullDurationFrom(10 * time.Second), 28 | Headers: map[string]string{ 29 | "X-Header": "value", 30 | }, 31 | TrendStats: []string{"p(99)"}, 32 | StaleMarkers: null.BoolFrom(true), 33 | } 34 | 35 | // Defaults should be overwritten by valid values 36 | c := NewConfig() 37 | c = c.Apply(fullConfig) 38 | assert.Equal(t, fullConfig, c) 39 | 40 | // Defaults shouldn't be impacted by invalid values 41 | c = NewConfig() 42 | c = c.Apply(Config{ 43 | Username: null.NewString("user", false), 44 | Password: null.NewString("pass", false), 45 | InsecureSkipTLSVerify: null.NewBool(false, false), 46 | }) 47 | assert.Equal(t, false, c.Username.Valid) 48 | assert.Equal(t, false, c.Password.Valid) 49 | assert.Equal(t, true, c.InsecureSkipTLSVerify.Valid) 50 | } 51 | 52 | func TestConfigRemoteConfig(t *testing.T) { 53 | t.Parallel() 54 | u, err := url.Parse("https://prometheus.ie/remote") 55 | require.NoError(t, err) 56 | 57 | config := Config{ 58 | ServerURL: null.StringFrom(u.String()), 59 | InsecureSkipTLSVerify: null.BoolFrom(true), 60 | Username: null.StringFrom("myuser"), 61 | Password: null.StringFrom("mypass"), 62 | Headers: map[string]string{ 63 | "X-MYCUSTOM-HEADER": "val1", 64 | // it asserts that Authz header is overwritten if the token is set 65 | "Authorization": "pre-set-token", 66 | }, 67 | BearerToken: null.StringFrom("my-fake-token"), 68 | } 69 | 70 | headers := http.Header{} 71 | headers.Set("X-MYCUSTOM-HEADER", "val1") 72 | headers.Set("Authorization", "Bearer my-fake-token") 73 | exprcc := &remote.HTTPConfig{ 74 | Timeout: 5 * time.Second, 75 | TLSConfig: &tls.Config{ 76 | InsecureSkipVerify: true, //nolint:gosec 77 | }, 78 | BasicAuth: &remote.BasicAuth{ 79 | Username: "myuser", 80 | Password: "mypass", 81 | }, 82 | Headers: headers, 83 | } 84 | rcc, err := config.RemoteConfig() 85 | require.NoError(t, err) 86 | assert.Equal(t, exprcc, rcc) 87 | } 88 | 89 | func TestConfigRemoteConfigClientCertificateError(t *testing.T) { 90 | t.Parallel() 91 | 92 | config := Config{ 93 | ClientCertificate: null.StringFrom("bad-cert-value"), 94 | ClientCertificateKey: null.StringFrom("bad-cert-key"), 95 | } 96 | 97 | rcc, err := config.RemoteConfig() 98 | assert.ErrorContains(t, err, "TLS certificate") 99 | assert.Nil(t, rcc) 100 | } 101 | 102 | func TestGetConsolidatedConfig(t *testing.T) { 103 | t.Parallel() 104 | 105 | u, err := url.Parse("https://prometheus.ie/remote") 106 | require.NoError(t, err) 107 | 108 | testCases := map[string]struct { 109 | jsonRaw json.RawMessage 110 | env map[string]string 111 | arg string 112 | config Config 113 | errString string 114 | }{ 115 | "Defaults": { 116 | jsonRaw: nil, 117 | env: nil, 118 | arg: "", 119 | config: Config{ 120 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 121 | InsecureSkipTLSVerify: null.BoolFrom(false), 122 | Username: null.NewString("", false), 123 | Password: null.NewString("", false), 124 | PushInterval: types.NullDurationFrom(5 * time.Second), 125 | Headers: make(map[string]string), 126 | TrendStats: []string{"p(99)"}, 127 | StaleMarkers: null.BoolFrom(false), 128 | }, 129 | }, 130 | "JSONSuccess": { 131 | jsonRaw: json.RawMessage(fmt.Sprintf(`{"url":"%s"}`, u.String())), 132 | config: Config{ 133 | ServerURL: null.StringFrom(u.String()), 134 | InsecureSkipTLSVerify: null.BoolFrom(false), 135 | Username: null.NewString("", false), 136 | Password: null.NewString("", false), 137 | PushInterval: types.NullDurationFrom(defaultPushInterval), 138 | Headers: make(map[string]string), 139 | TrendStats: []string{"p(99)"}, 140 | StaleMarkers: null.BoolFrom(false), 141 | }, 142 | }, 143 | "MixedSuccess": { 144 | jsonRaw: json.RawMessage(fmt.Sprintf(`{"url":"%s"}`, u.String())), 145 | env: map[string]string{ 146 | "K6_PROMETHEUS_RW_INSECURE_SKIP_TLS_VERIFY": "false", 147 | "K6_PROMETHEUS_RW_USERNAME": "u", 148 | }, 149 | // arg: "username=user", 150 | config: Config{ 151 | ServerURL: null.StringFrom(u.String()), 152 | InsecureSkipTLSVerify: null.BoolFrom(false), 153 | Username: null.NewString("u", true), 154 | Password: null.NewString("", false), 155 | PushInterval: types.NullDurationFrom(defaultPushInterval), 156 | Headers: make(map[string]string), 157 | TrendStats: []string{"p(99)"}, 158 | StaleMarkers: null.BoolFrom(false), 159 | }, 160 | }, 161 | "OrderOfPrecedence": { 162 | jsonRaw: json.RawMessage(`{"url":"http://json:9090","username":"json","password":"json"}`), 163 | env: map[string]string{ 164 | "K6_PROMETHEUS_RW_USERNAME": "env", 165 | "K6_PROMETHEUS_RW_PASSWORD": "env", 166 | }, 167 | // arg: "password=arg", 168 | config: Config{ 169 | ServerURL: null.StringFrom("http://json:9090"), 170 | InsecureSkipTLSVerify: null.BoolFrom(false), 171 | Username: null.StringFrom("env"), 172 | Password: null.StringFrom("env"), 173 | PushInterval: types.NullDurationFrom(defaultPushInterval), 174 | Headers: make(map[string]string), 175 | TrendStats: []string{"p(99)"}, 176 | StaleMarkers: null.BoolFrom(false), 177 | }, 178 | }, 179 | "InvalidJSON": { 180 | jsonRaw: json.RawMessage(`{"invalid-json "astring"}`), 181 | errString: "parse JSON options failed", 182 | }, 183 | "InvalidEnv": { 184 | env: map[string]string{"K6_PROMETHEUS_RW_INSECURE_SKIP_TLS_VERIFY": "d"}, 185 | errString: "parse environment variables options failed", 186 | }, 187 | //nolint:gocritic 188 | //"InvalidArg": { 189 | //arg: "insecureSkipTLSVerify=wrongtime", 190 | //errString: "parse argument string options failed", 191 | //}, 192 | } 193 | 194 | for name, testCase := range testCases { 195 | testCase := testCase 196 | t.Run(name, func(t *testing.T) { 197 | t.Parallel() 198 | c, err := GetConsolidatedConfig(testCase.jsonRaw, testCase.env, testCase.arg) 199 | if len(testCase.errString) > 0 { 200 | require.NotNil(t, err) 201 | assert.Contains(t, err.Error(), testCase.errString) 202 | return 203 | } 204 | assert.Equal(t, testCase.config, c) 205 | }) 206 | } 207 | } 208 | 209 | func TestParseServerURL(t *testing.T) { 210 | t.Parallel() 211 | 212 | c, err := parseArg("url=http://prometheus.remote:3412/write") 213 | assert.Nil(t, err) 214 | assert.Equal(t, null.StringFrom("http://prometheus.remote:3412/write"), c.ServerURL) 215 | 216 | c, err = parseArg("url=http://prometheus.remote:3412/write,insecureSkipTLSVerify=false,pushInterval=2s") 217 | assert.Nil(t, err) 218 | assert.Equal(t, null.StringFrom("http://prometheus.remote:3412/write"), c.ServerURL) 219 | assert.Equal(t, null.BoolFrom(false), c.InsecureSkipTLSVerify) 220 | assert.Equal(t, types.NullDurationFrom(time.Second*2), c.PushInterval) 221 | 222 | c, err = parseArg("headers.X-Header=value") 223 | assert.Nil(t, err) 224 | assert.Equal(t, map[string]string{"X-Header": "value"}, c.Headers) 225 | } 226 | 227 | // TODO: replace all the expconfigs below 228 | // with a function that returns the expected default values, 229 | // then override only the values to expect differently. 230 | 231 | func TestOptionServerURL(t *testing.T) { 232 | t.Parallel() 233 | 234 | cases := map[string]struct { 235 | arg string 236 | env map[string]string 237 | jsonRaw json.RawMessage 238 | }{ 239 | "JSON": {jsonRaw: json.RawMessage(`{"url":"http://prometheus:9090/api/v1/write"}`)}, 240 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_SERVER_URL": "http://prometheus:9090/api/v1/write"}}, 241 | //nolint:gocritic 242 | //"Arg": {arg: "url=http://prometheus:9090/api/v1/write"}, 243 | } 244 | 245 | expconfig := Config{ 246 | ServerURL: null.StringFrom("http://prometheus:9090/api/v1/write"), 247 | InsecureSkipTLSVerify: null.BoolFrom(false), 248 | Username: null.NewString("", false), 249 | Password: null.NewString("", false), 250 | PushInterval: types.NullDurationFrom(5 * time.Second), 251 | Headers: make(map[string]string), 252 | TrendStats: []string{"p(99)"}, 253 | StaleMarkers: null.BoolFrom(false), 254 | } 255 | for name, tc := range cases { 256 | tc := tc 257 | t.Run(name, func(t *testing.T) { 258 | t.Parallel() 259 | c, err := GetConsolidatedConfig( 260 | tc.jsonRaw, tc.env, tc.arg) 261 | require.NoError(t, err) 262 | assert.Equal(t, expconfig, c) 263 | }) 264 | } 265 | } 266 | 267 | func TestOptionHeaders(t *testing.T) { 268 | t.Parallel() 269 | 270 | cases := map[string]struct { 271 | arg string 272 | env map[string]string 273 | jsonRaw json.RawMessage 274 | }{ 275 | "JSON": {jsonRaw: json.RawMessage( 276 | `{"headers":{"X-MY-HEADER1":"hval1","X-MY-HEADER2":"hval2","X-Scope-OrgID":"my-org-id","another-header":"true","empty":""}}`)}, 277 | "Env": {env: map[string]string{ 278 | "K6_PROMETHEUS_RW_HEADERS_X-MY-HEADER1": "hval1", 279 | "K6_PROMETHEUS_RW_HEADERS_X-MY-HEADER2": "hval2", 280 | // it assert that the new method using HTTP_HEADERS overwrites it 281 | "K6_PROMETHEUS_RW_HEADERS_X-Scope-OrgID": "my-org-id-old-method", 282 | "K6_PROMETHEUS_RW_HTTP_HEADERS": "X-Scope-OrgID:my-org-id,another-header:true,empty:", 283 | }}, 284 | //nolint:gocritic 285 | //"Arg": {arg: "headers.X-MY-HEADER1=hval1,headers.X-MY-HEADER2=hval2"}, 286 | } 287 | 288 | expconfig := Config{ 289 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 290 | InsecureSkipTLSVerify: null.BoolFrom(false), 291 | PushInterval: types.NullDurationFrom(5 * time.Second), 292 | Headers: map[string]string{ 293 | "X-MY-HEADER1": "hval1", 294 | "X-MY-HEADER2": "hval2", 295 | "X-Scope-OrgID": "my-org-id", 296 | "another-header": "true", 297 | "empty": "", 298 | }, 299 | TrendStats: []string{"p(99)"}, 300 | StaleMarkers: null.BoolFrom(false), 301 | } 302 | for name, tc := range cases { 303 | tc := tc 304 | t.Run(name, func(t *testing.T) { 305 | t.Parallel() 306 | c, err := GetConsolidatedConfig( 307 | tc.jsonRaw, tc.env, tc.arg) 308 | require.NoError(t, err) 309 | assert.Equal(t, expconfig, c) 310 | }) 311 | } 312 | } 313 | 314 | func TestOptionInsecureSkipTLSVerify(t *testing.T) { 315 | t.Parallel() 316 | 317 | cases := map[string]struct { 318 | arg string 319 | env map[string]string 320 | jsonRaw json.RawMessage 321 | }{ 322 | "JSON": {jsonRaw: json.RawMessage(`{"insecureSkipTLSVerify":false}`)}, 323 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_INSECURE_SKIP_TLS_VERIFY": "false"}}, 324 | //nolint:gocritic 325 | //"Arg": {arg: "insecureSkipTLSVerify=false"}, 326 | } 327 | 328 | expconfig := Config{ 329 | ServerURL: null.StringFrom(defaultServerURL), 330 | InsecureSkipTLSVerify: null.BoolFrom(false), 331 | PushInterval: types.NullDurationFrom(defaultPushInterval), 332 | Headers: make(map[string]string), 333 | TrendStats: []string{"p(99)"}, 334 | StaleMarkers: null.BoolFrom(false), 335 | } 336 | for name, tc := range cases { 337 | tc := tc 338 | t.Run(name, func(t *testing.T) { 339 | t.Parallel() 340 | c, err := GetConsolidatedConfig( 341 | tc.jsonRaw, tc.env, tc.arg) 342 | require.NoError(t, err) 343 | assert.Equal(t, expconfig, c) 344 | }) 345 | } 346 | } 347 | 348 | func TestOptionBasicAuth(t *testing.T) { 349 | t.Parallel() 350 | 351 | cases := map[string]struct { 352 | arg string 353 | env map[string]string 354 | jsonRaw json.RawMessage 355 | }{ 356 | "JSON": {jsonRaw: json.RawMessage(`{"username":"user1","password":"pass1"}`)}, 357 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_USERNAME": "user1", "K6_PROMETHEUS_RW_PASSWORD": "pass1"}}, 358 | //nolint:gocritic 359 | //"Arg": {arg: "username=user1,password=pass1"}, 360 | } 361 | 362 | expconfig := Config{ 363 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 364 | InsecureSkipTLSVerify: null.BoolFrom(false), 365 | Username: null.StringFrom("user1"), 366 | Password: null.StringFrom("pass1"), 367 | PushInterval: types.NullDurationFrom(5 * time.Second), 368 | Headers: make(map[string]string), 369 | TrendStats: []string{"p(99)"}, 370 | StaleMarkers: null.BoolFrom(false), 371 | } 372 | 373 | for name, tc := range cases { 374 | tc := tc 375 | t.Run(name, func(t *testing.T) { 376 | t.Parallel() 377 | c, err := GetConsolidatedConfig( 378 | tc.jsonRaw, tc.env, tc.arg) 379 | require.NoError(t, err) 380 | assert.Equal(t, expconfig, c) 381 | }) 382 | } 383 | } 384 | 385 | func TestOptionBearerToken(t *testing.T) { 386 | t.Parallel() 387 | 388 | cases := map[string]struct { 389 | arg string 390 | env map[string]string 391 | jsonRaw json.RawMessage 392 | }{ 393 | "JSON": {jsonRaw: json.RawMessage(`{"bearerToken":"my-bearer-token"}`)}, 394 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_BEARER_TOKEN": "my-bearer-token"}}, 395 | } 396 | 397 | expconfig := Config{ 398 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 399 | InsecureSkipTLSVerify: null.BoolFrom(false), 400 | BearerToken: null.StringFrom("my-bearer-token"), 401 | PushInterval: types.NullDurationFrom(5 * time.Second), 402 | Headers: make(map[string]string), 403 | TrendStats: []string{"p(99)"}, 404 | StaleMarkers: null.BoolFrom(false), 405 | } 406 | 407 | for name, tc := range cases { 408 | tc := tc 409 | t.Run(name, func(t *testing.T) { 410 | t.Parallel() 411 | c, err := GetConsolidatedConfig( 412 | tc.jsonRaw, tc.env, tc.arg) 413 | require.NoError(t, err) 414 | assert.Equal(t, expconfig, c) 415 | }) 416 | } 417 | } 418 | 419 | func TestOptionClientCertificate(t *testing.T) { 420 | t.Parallel() 421 | 422 | cases := map[string]struct { 423 | arg string 424 | env map[string]string 425 | jsonRaw json.RawMessage 426 | }{ 427 | "JSON": {jsonRaw: json.RawMessage(`{"clientCertificate":"client.crt","clientCertificateKey":"client.key"}`)}, 428 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_CLIENT_CERTIFICATE": "client.crt", "K6_PROMETHEUS_RW_CLIENT_CERTIFICATE_KEY": "client.key"}}, 429 | } 430 | 431 | expconfig := Config{ 432 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 433 | InsecureSkipTLSVerify: null.BoolFrom(false), 434 | PushInterval: types.NullDurationFrom(5 * time.Second), 435 | Headers: make(map[string]string), 436 | TrendStats: []string{"p(99)"}, 437 | ClientCertificate: null.StringFrom("client.crt"), 438 | ClientCertificateKey: null.StringFrom("client.key"), 439 | StaleMarkers: null.BoolFrom(false), 440 | } 441 | 442 | for name, tc := range cases { 443 | tc := tc 444 | t.Run(name, func(t *testing.T) { 445 | t.Parallel() 446 | c, err := GetConsolidatedConfig( 447 | tc.jsonRaw, tc.env, tc.arg) 448 | require.NoError(t, err) 449 | assert.Equal(t, expconfig, c) 450 | }) 451 | } 452 | } 453 | 454 | func TestOptionTrendAsNativeHistogram(t *testing.T) { 455 | t.Parallel() 456 | 457 | cases := map[string]struct { 458 | arg string 459 | env map[string]string 460 | jsonRaw json.RawMessage 461 | }{ 462 | "JSON": {jsonRaw: json.RawMessage(`{"trendAsNativeHistogram":true}`)}, 463 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_TREND_AS_NATIVE_HISTOGRAM": "true"}}, 464 | //nolint:gocritic 465 | //"Arg": {arg: "trendAsNativeHistogram=true"}, 466 | } 467 | 468 | expconfig := Config{ 469 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 470 | InsecureSkipTLSVerify: null.BoolFrom(false), 471 | Username: null.NewString("", false), 472 | Password: null.NewString("", false), 473 | PushInterval: types.NullDurationFrom(5 * time.Second), 474 | Headers: make(map[string]string), 475 | TrendAsNativeHistogram: null.BoolFrom(true), 476 | TrendStats: []string{"p(99)"}, 477 | StaleMarkers: null.BoolFrom(false), 478 | } 479 | 480 | for name, tc := range cases { 481 | tc := tc 482 | t.Run(name, func(t *testing.T) { 483 | t.Parallel() 484 | c, err := GetConsolidatedConfig( 485 | tc.jsonRaw, tc.env, tc.arg) 486 | require.NoError(t, err) 487 | assert.Equal(t, expconfig, c) 488 | }) 489 | } 490 | } 491 | 492 | func TestOptionPushInterval(t *testing.T) { 493 | t.Parallel() 494 | 495 | cases := map[string]struct { 496 | arg string 497 | env map[string]string 498 | jsonRaw json.RawMessage 499 | }{ 500 | "JSON": {jsonRaw: json.RawMessage(`{"pushInterval":"1m2s"}`)}, 501 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_PUSH_INTERVAL": "1m2s"}}, 502 | } 503 | 504 | expconfig := Config{ 505 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 506 | InsecureSkipTLSVerify: null.BoolFrom(false), 507 | Username: null.NewString("", false), 508 | Password: null.NewString("", false), 509 | PushInterval: types.NullDurationFrom((1 * time.Minute) + (2 * time.Second)), 510 | Headers: make(map[string]string), 511 | TrendStats: []string{"p(99)"}, 512 | StaleMarkers: null.BoolFrom(false), 513 | } 514 | 515 | for name, tc := range cases { 516 | tc := tc 517 | t.Run(name, func(t *testing.T) { 518 | t.Parallel() 519 | c, err := GetConsolidatedConfig( 520 | tc.jsonRaw, tc.env, tc.arg) 521 | require.NoError(t, err) 522 | assert.Equal(t, expconfig, c) 523 | }) 524 | } 525 | } 526 | 527 | func TestConfigTrendStats(t *testing.T) { 528 | t.Parallel() 529 | 530 | cases := map[string]struct { 531 | arg string 532 | env map[string]string 533 | jsonRaw json.RawMessage 534 | }{ 535 | "JSON": {jsonRaw: json.RawMessage(`{"trendStats":["max","p(95)"]}`)}, 536 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_TREND_STATS": "max,p(95)"}}, 537 | // TODO: support arg, check the comment in the code 538 | //nolint:gocritic 539 | //"Arg": {arg: "trendStats=max,p(95)"}, 540 | } 541 | 542 | expconfig := Config{ 543 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 544 | InsecureSkipTLSVerify: null.BoolFrom(false), 545 | PushInterval: types.NullDurationFrom(5 * time.Second), 546 | Headers: make(map[string]string), 547 | TrendStats: []string{"max", "p(95)"}, 548 | StaleMarkers: null.BoolFrom(false), 549 | } 550 | 551 | for name, tc := range cases { 552 | tc := tc 553 | t.Run(name, func(t *testing.T) { 554 | t.Parallel() 555 | c, err := GetConsolidatedConfig( 556 | tc.jsonRaw, tc.env, tc.arg) 557 | require.NoError(t, err) 558 | assert.Equal(t, expconfig, c) 559 | }) 560 | } 561 | } 562 | 563 | func TestOptionStaleMarker(t *testing.T) { 564 | t.Parallel() 565 | 566 | cases := map[string]struct { 567 | arg string 568 | env map[string]string 569 | jsonRaw json.RawMessage 570 | }{ 571 | "JSON": {jsonRaw: json.RawMessage(`{"staleMarkers":true}`)}, 572 | "Env": {env: map[string]string{"K6_PROMETHEUS_RW_STALE_MARKERS": "true"}}, 573 | } 574 | 575 | expconfig := Config{ 576 | ServerURL: null.StringFrom("http://localhost:9090/api/v1/write"), 577 | InsecureSkipTLSVerify: null.BoolFrom(false), 578 | PushInterval: types.NullDurationFrom(5 * time.Second), 579 | Headers: make(map[string]string), 580 | TrendStats: []string{"p(99)"}, 581 | StaleMarkers: null.BoolFrom(true), 582 | } 583 | 584 | for name, tc := range cases { 585 | tc := tc 586 | t.Run(name, func(t *testing.T) { 587 | t.Parallel() 588 | c, err := GetConsolidatedConfig( 589 | tc.jsonRaw, tc.env, tc.arg) 590 | require.NoError(t, err) 591 | assert.Equal(t, expconfig, c) 592 | }) 593 | } 594 | } 595 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | ### GNU AFFERO GENERAL PUBLIC LICENSE 2 | 3 | Version 3, 19 November 2007 4 | 5 | Copyright (C) 2007 Free Software Foundation, Inc. 6 | 7 | 8 | Everyone is permitted to copy and distribute verbatim copies of this 9 | license document, but changing it is not allowed. 10 | 11 | ### Preamble 12 | 13 | The GNU Affero General Public License is a free, copyleft license for 14 | software and other kinds of works, specifically designed to ensure 15 | cooperation with the community in the case of network server software. 16 | 17 | The licenses for most software and other practical works are designed 18 | to take away your freedom to share and change the works. By contrast, 19 | our General Public Licenses are intended to guarantee your freedom to 20 | share and change all versions of a program--to make sure it remains 21 | free software for all its users. 22 | 23 | When we speak of free software, we are referring to freedom, not 24 | price. Our General Public Licenses are designed to make sure that you 25 | have the freedom to distribute copies of free software (and charge for 26 | them if you wish), that you receive source code or can get it if you 27 | want it, that you can change the software or use pieces of it in new 28 | free programs, and that you know you can do these things. 29 | 30 | Developers that use our General Public Licenses protect your rights 31 | with two steps: (1) assert copyright on the software, and (2) offer 32 | you this License which gives you legal permission to copy, distribute 33 | and/or modify the software. 34 | 35 | A secondary benefit of defending all users' freedom is that 36 | improvements made in alternate versions of the program, if they 37 | receive widespread use, become available for other developers to 38 | incorporate. Many developers of free software are heartened and 39 | encouraged by the resulting cooperation. However, in the case of 40 | software used on network servers, this result may fail to come about. 41 | The GNU General Public License permits making a modified version and 42 | letting the public access it on a server without ever releasing its 43 | source code to the public. 44 | 45 | The GNU Affero General Public License is designed specifically to 46 | ensure that, in such cases, the modified source code becomes available 47 | to the community. It requires the operator of a network server to 48 | provide the source code of the modified version running there to the 49 | users of that server. Therefore, public use of a modified version, on 50 | a publicly accessible server, gives the public access to the source 51 | code of the modified version. 52 | 53 | An older license, called the Affero General Public License and 54 | published by Affero, was designed to accomplish similar goals. This is 55 | a different license, not a version of the Affero GPL, but Affero has 56 | released a new version of the Affero GPL which permits relicensing 57 | under this license. 58 | 59 | The precise terms and conditions for copying, distribution and 60 | modification follow. 61 | 62 | ### TERMS AND CONDITIONS 63 | 64 | #### 0. Definitions. 65 | 66 | "This License" refers to version 3 of the GNU Affero General Public 67 | License. 68 | 69 | "Copyright" also means copyright-like laws that apply to other kinds 70 | of works, such as semiconductor masks. 71 | 72 | "The Program" refers to any copyrightable work licensed under this 73 | License. Each licensee is addressed as "you". "Licensees" and 74 | "recipients" may be individuals or organizations. 75 | 76 | To "modify" a work means to copy from or adapt all or part of the work 77 | in a fashion requiring copyright permission, other than the making of 78 | an exact copy. The resulting work is called a "modified version" of 79 | the earlier work or a work "based on" the earlier work. 80 | 81 | A "covered work" means either the unmodified Program or a work based 82 | on the Program. 83 | 84 | To "propagate" a work means to do anything with it that, without 85 | permission, would make you directly or secondarily liable for 86 | infringement under applicable copyright law, except executing it on a 87 | computer or modifying a private copy. Propagation includes copying, 88 | distribution (with or without modification), making available to the 89 | public, and in some countries other activities as well. 90 | 91 | To "convey" a work means any kind of propagation that enables other 92 | parties to make or receive copies. Mere interaction with a user 93 | through a computer network, with no transfer of a copy, is not 94 | conveying. 95 | 96 | An interactive user interface displays "Appropriate Legal Notices" to 97 | the extent that it includes a convenient and prominently visible 98 | feature that (1) displays an appropriate copyright notice, and (2) 99 | tells the user that there is no warranty for the work (except to the 100 | extent that warranties are provided), that licensees may convey the 101 | work under this License, and how to view a copy of this License. If 102 | the interface presents a list of user commands or options, such as a 103 | menu, a prominent item in the list meets this criterion. 104 | 105 | #### 1. Source Code. 106 | 107 | The "source code" for a work means the preferred form of the work for 108 | making modifications to it. "Object code" means any non-source form of 109 | a work. 110 | 111 | A "Standard Interface" means an interface that either is an official 112 | standard defined by a recognized standards body, or, in the case of 113 | interfaces specified for a particular programming language, one that 114 | is widely used among developers working in that language. 115 | 116 | The "System Libraries" of an executable work include anything, other 117 | than the work as a whole, that (a) is included in the normal form of 118 | packaging a Major Component, but which is not part of that Major 119 | Component, and (b) serves only to enable use of the work with that 120 | Major Component, or to implement a Standard Interface for which an 121 | implementation is available to the public in source code form. A 122 | "Major Component", in this context, means a major essential component 123 | (kernel, window system, and so on) of the specific operating system 124 | (if any) on which the executable work runs, or a compiler used to 125 | produce the work, or an object code interpreter used to run it. 126 | 127 | The "Corresponding Source" for a work in object code form means all 128 | the source code needed to generate, install, and (for an executable 129 | work) run the object code and to modify the work, including scripts to 130 | control those activities. However, it does not include the work's 131 | System Libraries, or general-purpose tools or generally available free 132 | programs which are used unmodified in performing those activities but 133 | which are not part of the work. For example, Corresponding Source 134 | includes interface definition files associated with source files for 135 | the work, and the source code for shared libraries and dynamically 136 | linked subprograms that the work is specifically designed to require, 137 | such as by intimate data communication or control flow between those 138 | subprograms and other parts of the work. 139 | 140 | The Corresponding Source need not include anything that users can 141 | regenerate automatically from other parts of the Corresponding Source. 142 | 143 | The Corresponding Source for a work in source code form is that same 144 | work. 145 | 146 | #### 2. Basic Permissions. 147 | 148 | All rights granted under this License are granted for the term of 149 | copyright on the Program, and are irrevocable provided the stated 150 | conditions are met. This License explicitly affirms your unlimited 151 | permission to run the unmodified Program. The output from running a 152 | covered work is covered by this License only if the output, given its 153 | content, constitutes a covered work. This License acknowledges your 154 | rights of fair use or other equivalent, as provided by copyright law. 155 | 156 | You may make, run and propagate covered works that you do not convey, 157 | without conditions so long as your license otherwise remains in force. 158 | You may convey covered works to others for the sole purpose of having 159 | them make modifications exclusively for you, or provide you with 160 | facilities for running those works, provided that you comply with the 161 | terms of this License in conveying all material for which you do not 162 | control copyright. Those thus making or running the covered works for 163 | you must do so exclusively on your behalf, under your direction and 164 | control, on terms that prohibit them from making any copies of your 165 | copyrighted material outside their relationship with you. 166 | 167 | Conveying under any other circumstances is permitted solely under the 168 | conditions stated below. Sublicensing is not allowed; section 10 makes 169 | it unnecessary. 170 | 171 | #### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 172 | 173 | No covered work shall be deemed part of an effective technological 174 | measure under any applicable law fulfilling obligations under article 175 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 176 | similar laws prohibiting or restricting circumvention of such 177 | measures. 178 | 179 | When you convey a covered work, you waive any legal power to forbid 180 | circumvention of technological measures to the extent such 181 | circumvention is effected by exercising rights under this License with 182 | respect to the covered work, and you disclaim any intention to limit 183 | operation or modification of the work as a means of enforcing, against 184 | the work's users, your or third parties' legal rights to forbid 185 | circumvention of technological measures. 186 | 187 | #### 4. Conveying Verbatim Copies. 188 | 189 | You may convey verbatim copies of the Program's source code as you 190 | receive it, in any medium, provided that you conspicuously and 191 | appropriately publish on each copy an appropriate copyright notice; 192 | keep intact all notices stating that this License and any 193 | non-permissive terms added in accord with section 7 apply to the code; 194 | keep intact all notices of the absence of any warranty; and give all 195 | recipients a copy of this License along with the Program. 196 | 197 | You may charge any price or no price for each copy that you convey, 198 | and you may offer support or warranty protection for a fee. 199 | 200 | #### 5. Conveying Modified Source Versions. 201 | 202 | You may convey a work based on the Program, or the modifications to 203 | produce it from the Program, in the form of source code under the 204 | terms of section 4, provided that you also meet all of these 205 | conditions: 206 | 207 | - a) The work must carry prominent notices stating that you modified 208 | it, and giving a relevant date. 209 | - b) The work must carry prominent notices stating that it is 210 | released under this License and any conditions added under 211 | section 7. This requirement modifies the requirement in section 4 212 | to "keep intact all notices". 213 | - c) You must license the entire work, as a whole, under this 214 | License to anyone who comes into possession of a copy. This 215 | License will therefore apply, along with any applicable section 7 216 | additional terms, to the whole of the work, and all its parts, 217 | regardless of how they are packaged. This License gives no 218 | permission to license the work in any other way, but it does not 219 | invalidate such permission if you have separately received it. 220 | - d) If the work has interactive user interfaces, each must display 221 | Appropriate Legal Notices; however, if the Program has interactive 222 | interfaces that do not display Appropriate Legal Notices, your 223 | work need not make them do so. 224 | 225 | A compilation of a covered work with other separate and independent 226 | works, which are not by their nature extensions of the covered work, 227 | and which are not combined with it such as to form a larger program, 228 | in or on a volume of a storage or distribution medium, is called an 229 | "aggregate" if the compilation and its resulting copyright are not 230 | used to limit the access or legal rights of the compilation's users 231 | beyond what the individual works permit. Inclusion of a covered work 232 | in an aggregate does not cause this License to apply to the other 233 | parts of the aggregate. 234 | 235 | #### 6. Conveying Non-Source Forms. 236 | 237 | You may convey a covered work in object code form under the terms of 238 | sections 4 and 5, provided that you also convey the machine-readable 239 | Corresponding Source under the terms of this License, in one of these 240 | ways: 241 | 242 | - a) Convey the object code in, or embodied in, a physical product 243 | (including a physical distribution medium), accompanied by the 244 | Corresponding Source fixed on a durable physical medium 245 | customarily used for software interchange. 246 | - b) Convey the object code in, or embodied in, a physical product 247 | (including a physical distribution medium), accompanied by a 248 | written offer, valid for at least three years and valid for as 249 | long as you offer spare parts or customer support for that product 250 | model, to give anyone who possesses the object code either (1) a 251 | copy of the Corresponding Source for all the software in the 252 | product that is covered by this License, on a durable physical 253 | medium customarily used for software interchange, for a price no 254 | more than your reasonable cost of physically performing this 255 | conveying of source, or (2) access to copy the Corresponding 256 | Source from a network server at no charge. 257 | - c) Convey individual copies of the object code with a copy of the 258 | written offer to provide the Corresponding Source. This 259 | alternative is allowed only occasionally and noncommercially, and 260 | only if you received the object code with such an offer, in accord 261 | with subsection 6b. 262 | - d) Convey the object code by offering access from a designated 263 | place (gratis or for a charge), and offer equivalent access to the 264 | Corresponding Source in the same way through the same place at no 265 | further charge. You need not require recipients to copy the 266 | Corresponding Source along with the object code. If the place to 267 | copy the object code is a network server, the Corresponding Source 268 | may be on a different server (operated by you or a third party) 269 | that supports equivalent copying facilities, provided you maintain 270 | clear directions next to the object code saying where to find the 271 | Corresponding Source. Regardless of what server hosts the 272 | Corresponding Source, you remain obligated to ensure that it is 273 | available for as long as needed to satisfy these requirements. 274 | - e) Convey the object code using peer-to-peer transmission, 275 | provided you inform other peers where the object code and 276 | Corresponding Source of the work are being offered to the general 277 | public at no charge under subsection 6d. 278 | 279 | A separable portion of the object code, whose source code is excluded 280 | from the Corresponding Source as a System Library, need not be 281 | included in conveying the object code work. 282 | 283 | A "User Product" is either (1) a "consumer product", which means any 284 | tangible personal property which is normally used for personal, 285 | family, or household purposes, or (2) anything designed or sold for 286 | incorporation into a dwelling. In determining whether a product is a 287 | consumer product, doubtful cases shall be resolved in favor of 288 | coverage. For a particular product received by a particular user, 289 | "normally used" refers to a typical or common use of that class of 290 | product, regardless of the status of the particular user or of the way 291 | in which the particular user actually uses, or expects or is expected 292 | to use, the product. A product is a consumer product regardless of 293 | whether the product has substantial commercial, industrial or 294 | non-consumer uses, unless such uses represent the only significant 295 | mode of use of the product. 296 | 297 | "Installation Information" for a User Product means any methods, 298 | procedures, authorization keys, or other information required to 299 | install and execute modified versions of a covered work in that User 300 | Product from a modified version of its Corresponding Source. The 301 | information must suffice to ensure that the continued functioning of 302 | the modified object code is in no case prevented or interfered with 303 | solely because modification has been made. 304 | 305 | If you convey an object code work under this section in, or with, or 306 | specifically for use in, a User Product, and the conveying occurs as 307 | part of a transaction in which the right of possession and use of the 308 | User Product is transferred to the recipient in perpetuity or for a 309 | fixed term (regardless of how the transaction is characterized), the 310 | Corresponding Source conveyed under this section must be accompanied 311 | by the Installation Information. But this requirement does not apply 312 | if neither you nor any third party retains the ability to install 313 | modified object code on the User Product (for example, the work has 314 | been installed in ROM). 315 | 316 | The requirement to provide Installation Information does not include a 317 | requirement to continue to provide support service, warranty, or 318 | updates for a work that has been modified or installed by the 319 | recipient, or for the User Product in which it has been modified or 320 | installed. Access to a network may be denied when the modification 321 | itself materially and adversely affects the operation of the network 322 | or violates the rules and protocols for communication across the 323 | network. 324 | 325 | Corresponding Source conveyed, and Installation Information provided, 326 | in accord with this section must be in a format that is publicly 327 | documented (and with an implementation available to the public in 328 | source code form), and must require no special password or key for 329 | unpacking, reading or copying. 330 | 331 | #### 7. Additional Terms. 332 | 333 | "Additional permissions" are terms that supplement the terms of this 334 | License by making exceptions from one or more of its conditions. 335 | Additional permissions that are applicable to the entire Program shall 336 | be treated as though they were included in this License, to the extent 337 | that they are valid under applicable law. If additional permissions 338 | apply only to part of the Program, that part may be used separately 339 | under those permissions, but the entire Program remains governed by 340 | this License without regard to the additional permissions. 341 | 342 | When you convey a copy of a covered work, you may at your option 343 | remove any additional permissions from that copy, or from any part of 344 | it. (Additional permissions may be written to require their own 345 | removal in certain cases when you modify the work.) You may place 346 | additional permissions on material, added by you to a covered work, 347 | for which you have or can give appropriate copyright permission. 348 | 349 | Notwithstanding any other provision of this License, for material you 350 | add to a covered work, you may (if authorized by the copyright holders 351 | of that material) supplement the terms of this License with terms: 352 | 353 | - a) Disclaiming warranty or limiting liability differently from the 354 | terms of sections 15 and 16 of this License; or 355 | - b) Requiring preservation of specified reasonable legal notices or 356 | author attributions in that material or in the Appropriate Legal 357 | Notices displayed by works containing it; or 358 | - c) Prohibiting misrepresentation of the origin of that material, 359 | or requiring that modified versions of such material be marked in 360 | reasonable ways as different from the original version; or 361 | - d) Limiting the use for publicity purposes of names of licensors 362 | or authors of the material; or 363 | - e) Declining to grant rights under trademark law for use of some 364 | trade names, trademarks, or service marks; or 365 | - f) Requiring indemnification of licensors and authors of that 366 | material by anyone who conveys the material (or modified versions 367 | of it) with contractual assumptions of liability to the recipient, 368 | for any liability that these contractual assumptions directly 369 | impose on those licensors and authors. 370 | 371 | All other non-permissive additional terms are considered "further 372 | restrictions" within the meaning of section 10. If the Program as you 373 | received it, or any part of it, contains a notice stating that it is 374 | governed by this License along with a term that is a further 375 | restriction, you may remove that term. If a license document contains 376 | a further restriction but permits relicensing or conveying under this 377 | License, you may add to a covered work material governed by the terms 378 | of that license document, provided that the further restriction does 379 | not survive such relicensing or conveying. 380 | 381 | If you add terms to a covered work in accord with this section, you 382 | must place, in the relevant source files, a statement of the 383 | additional terms that apply to those files, or a notice indicating 384 | where to find the applicable terms. 385 | 386 | Additional terms, permissive or non-permissive, may be stated in the 387 | form of a separately written license, or stated as exceptions; the 388 | above requirements apply either way. 389 | 390 | #### 8. Termination. 391 | 392 | You may not propagate or modify a covered work except as expressly 393 | provided under this License. Any attempt otherwise to propagate or 394 | modify it is void, and will automatically terminate your rights under 395 | this License (including any patent licenses granted under the third 396 | paragraph of section 11). 397 | 398 | However, if you cease all violation of this License, then your license 399 | from a particular copyright holder is reinstated (a) provisionally, 400 | unless and until the copyright holder explicitly and finally 401 | terminates your license, and (b) permanently, if the copyright holder 402 | fails to notify you of the violation by some reasonable means prior to 403 | 60 days after the cessation. 404 | 405 | Moreover, your license from a particular copyright holder is 406 | reinstated permanently if the copyright holder notifies you of the 407 | violation by some reasonable means, this is the first time you have 408 | received notice of violation of this License (for any work) from that 409 | copyright holder, and you cure the violation prior to 30 days after 410 | your receipt of the notice. 411 | 412 | Termination of your rights under this section does not terminate the 413 | licenses of parties who have received copies or rights from you under 414 | this License. If your rights have been terminated and not permanently 415 | reinstated, you do not qualify to receive new licenses for the same 416 | material under section 10. 417 | 418 | #### 9. Acceptance Not Required for Having Copies. 419 | 420 | You are not required to accept this License in order to receive or run 421 | a copy of the Program. Ancillary propagation of a covered work 422 | occurring solely as a consequence of using peer-to-peer transmission 423 | to receive a copy likewise does not require acceptance. However, 424 | nothing other than this License grants you permission to propagate or 425 | modify any covered work. These actions infringe copyright if you do 426 | not accept this License. Therefore, by modifying or propagating a 427 | covered work, you indicate your acceptance of this License to do so. 428 | 429 | #### 10. Automatic Licensing of Downstream Recipients. 430 | 431 | Each time you convey a covered work, the recipient automatically 432 | receives a license from the original licensors, to run, modify and 433 | propagate that work, subject to this License. You are not responsible 434 | for enforcing compliance by third parties with this License. 435 | 436 | An "entity transaction" is a transaction transferring control of an 437 | organization, or substantially all assets of one, or subdividing an 438 | organization, or merging organizations. If propagation of a covered 439 | work results from an entity transaction, each party to that 440 | transaction who receives a copy of the work also receives whatever 441 | licenses to the work the party's predecessor in interest had or could 442 | give under the previous paragraph, plus a right to possession of the 443 | Corresponding Source of the work from the predecessor in interest, if 444 | the predecessor has it or can get it with reasonable efforts. 445 | 446 | You may not impose any further restrictions on the exercise of the 447 | rights granted or affirmed under this License. For example, you may 448 | not impose a license fee, royalty, or other charge for exercise of 449 | rights granted under this License, and you may not initiate litigation 450 | (including a cross-claim or counterclaim in a lawsuit) alleging that 451 | any patent claim is infringed by making, using, selling, offering for 452 | sale, or importing the Program or any portion of it. 453 | 454 | #### 11. Patents. 455 | 456 | A "contributor" is a copyright holder who authorizes use under this 457 | License of the Program or a work on which the Program is based. The 458 | work thus licensed is called the contributor's "contributor version". 459 | 460 | A contributor's "essential patent claims" are all patent claims owned 461 | or controlled by the contributor, whether already acquired or 462 | hereafter acquired, that would be infringed by some manner, permitted 463 | by this License, of making, using, or selling its contributor version, 464 | but do not include claims that would be infringed only as a 465 | consequence of further modification of the contributor version. For 466 | purposes of this definition, "control" includes the right to grant 467 | patent sublicenses in a manner consistent with the requirements of 468 | this License. 469 | 470 | Each contributor grants you a non-exclusive, worldwide, royalty-free 471 | patent license under the contributor's essential patent claims, to 472 | make, use, sell, offer for sale, import and otherwise run, modify and 473 | propagate the contents of its contributor version. 474 | 475 | In the following three paragraphs, a "patent license" is any express 476 | agreement or commitment, however denominated, not to enforce a patent 477 | (such as an express permission to practice a patent or covenant not to 478 | sue for patent infringement). To "grant" such a patent license to a 479 | party means to make such an agreement or commitment not to enforce a 480 | patent against the party. 481 | 482 | If you convey a covered work, knowingly relying on a patent license, 483 | and the Corresponding Source of the work is not available for anyone 484 | to copy, free of charge and under the terms of this License, through a 485 | publicly available network server or other readily accessible means, 486 | then you must either (1) cause the Corresponding Source to be so 487 | available, or (2) arrange to deprive yourself of the benefit of the 488 | patent license for this particular work, or (3) arrange, in a manner 489 | consistent with the requirements of this License, to extend the patent 490 | license to downstream recipients. "Knowingly relying" means you have 491 | actual knowledge that, but for the patent license, your conveying the 492 | covered work in a country, or your recipient's use of the covered work 493 | in a country, would infringe one or more identifiable patents in that 494 | country that you have reason to believe are valid. 495 | 496 | If, pursuant to or in connection with a single transaction or 497 | arrangement, you convey, or propagate by procuring conveyance of, a 498 | covered work, and grant a patent license to some of the parties 499 | receiving the covered work authorizing them to use, propagate, modify 500 | or convey a specific copy of the covered work, then the patent license 501 | you grant is automatically extended to all recipients of the covered 502 | work and works based on it. 503 | 504 | A patent license is "discriminatory" if it does not include within the 505 | scope of its coverage, prohibits the exercise of, or is conditioned on 506 | the non-exercise of one or more of the rights that are specifically 507 | granted under this License. You may not convey a covered work if you 508 | are a party to an arrangement with a third party that is in the 509 | business of distributing software, under which you make payment to the 510 | third party based on the extent of your activity of conveying the 511 | work, and under which the third party grants, to any of the parties 512 | who would receive the covered work from you, a discriminatory patent 513 | license (a) in connection with copies of the covered work conveyed by 514 | you (or copies made from those copies), or (b) primarily for and in 515 | connection with specific products or compilations that contain the 516 | covered work, unless you entered into that arrangement, or that patent 517 | license was granted, prior to 28 March 2007. 518 | 519 | Nothing in this License shall be construed as excluding or limiting 520 | any implied license or other defenses to infringement that may 521 | otherwise be available to you under applicable patent law. 522 | 523 | #### 12. No Surrender of Others' Freedom. 524 | 525 | If conditions are imposed on you (whether by court order, agreement or 526 | otherwise) that contradict the conditions of this License, they do not 527 | excuse you from the conditions of this License. If you cannot convey a 528 | covered work so as to satisfy simultaneously your obligations under 529 | this License and any other pertinent obligations, then as a 530 | consequence you may not convey it at all. For example, if you agree to 531 | terms that obligate you to collect a royalty for further conveying 532 | from those to whom you convey the Program, the only way you could 533 | satisfy both those terms and this License would be to refrain entirely 534 | from conveying the Program. 535 | 536 | #### 13. Remote Network Interaction; Use with the GNU General Public License. 537 | 538 | Notwithstanding any other provision of this License, if you modify the 539 | Program, your modified version must prominently offer all users 540 | interacting with it remotely through a computer network (if your 541 | version supports such interaction) an opportunity to receive the 542 | Corresponding Source of your version by providing access to the 543 | Corresponding Source from a network server at no charge, through some 544 | standard or customary means of facilitating copying of software. This 545 | Corresponding Source shall include the Corresponding Source for any 546 | work covered by version 3 of the GNU General Public License that is 547 | incorporated pursuant to the following paragraph. 548 | 549 | Notwithstanding any other provision of this License, you have 550 | permission to link or combine any covered work with a work licensed 551 | under version 3 of the GNU General Public License into a single 552 | combined work, and to convey the resulting work. The terms of this 553 | License will continue to apply to the part which is the covered work, 554 | but the work with which it is combined will remain governed by version 555 | 3 of the GNU General Public License. 556 | 557 | #### 14. Revised Versions of this License. 558 | 559 | The Free Software Foundation may publish revised and/or new versions 560 | of the GNU Affero General Public License from time to time. Such new 561 | versions will be similar in spirit to the present version, but may 562 | differ in detail to address new problems or concerns. 563 | 564 | Each version is given a distinguishing version number. If the Program 565 | specifies that a certain numbered version of the GNU Affero General 566 | Public License "or any later version" applies to it, you have the 567 | option of following the terms and conditions either of that numbered 568 | version or of any later version published by the Free Software 569 | Foundation. If the Program does not specify a version number of the 570 | GNU Affero General Public License, you may choose any version ever 571 | published by the Free Software Foundation. 572 | 573 | If the Program specifies that a proxy can decide which future versions 574 | of the GNU Affero General Public License can be used, that proxy's 575 | public statement of acceptance of a version permanently authorizes you 576 | to choose that version for the Program. 577 | 578 | Later license versions may give you additional or different 579 | permissions. However, no additional obligations are imposed on any 580 | author or copyright holder as a result of your choosing to follow a 581 | later version. 582 | 583 | #### 15. Disclaimer of Warranty. 584 | 585 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 586 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 587 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT 588 | WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT 589 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 590 | A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND 591 | PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE 592 | DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR 593 | CORRECTION. 594 | 595 | #### 16. Limitation of Liability. 596 | 597 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 598 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR 599 | CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 600 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES 601 | ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT 602 | NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR 603 | LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM 604 | TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER 605 | PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 606 | 607 | #### 17. Interpretation of Sections 15 and 16. 608 | 609 | If the disclaimer of warranty and limitation of liability provided 610 | above cannot be given local legal effect according to their terms, 611 | reviewing courts shall apply local law that most closely approximates 612 | an absolute waiver of all civil liability in connection with the 613 | Program, unless a warranty or assumption of liability accompanies a 614 | copy of the Program in return for a fee. 615 | 616 | END OF TERMS AND CONDITIONS 617 | 618 | ### How to Apply These Terms to Your New Programs 619 | 620 | If you develop a new program, and you want it to be of the greatest 621 | possible use to the public, the best way to achieve this is to make it 622 | free software which everyone can redistribute and change under these 623 | terms. 624 | 625 | To do so, attach the following notices to the program. It is safest to 626 | attach them to the start of each source file to most effectively state 627 | the exclusion of warranty; and each file should have at least the 628 | "copyright" line and a pointer to where the full notice is found. 629 | 630 | 631 | Copyright (C) 632 | 633 | This program is free software: you can redistribute it and/or modify 634 | it under the terms of the GNU Affero General Public License as 635 | published by the Free Software Foundation, either version 3 of the 636 | License, or (at your option) any later version. 637 | 638 | This program is distributed in the hope that it will be useful, 639 | but WITHOUT ANY WARRANTY; without even the implied warranty of 640 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 641 | GNU Affero General Public License for more details. 642 | 643 | You should have received a copy of the GNU Affero General Public License 644 | along with this program. If not, see . 645 | 646 | Also add information on how to contact you by electronic and paper 647 | mail. 648 | 649 | If your software can interact with users remotely through a computer 650 | network, you should also make sure that it provides a way for users to 651 | get its source. For example, if your program is a web application, its 652 | interface could display a "Source" link that leads users to an archive 653 | of the code. There are many ways you could offer source, and different 654 | solutions will be better for different programs; see section 13 for 655 | the specific requirements. 656 | 657 | You should also get your employer (if you work as a programmer) or 658 | school, if any, to sign a "copyright disclaimer" for the program, if 659 | necessary. For more information on this, and how to apply and follow 660 | the GNU AGPL, see . 661 | --------------------------------------------------------------------------------