├── CODEOWNERS
├── LICENSE
├── README.md
├── assets
└── logo_and_name.png
├── example
├── demo
│ ├── README.md
│ ├── file.river
│ ├── git.river
│ ├── logs
│ │ └── forward_to_loki
│ │ │ ├── README.md
│ │ │ └── module.river
│ ├── metrics
│ │ ├── prometheus_receiver
│ │ │ ├── README.md
│ │ │ └── module.river
│ │ └── prometheus_scrape
│ │ │ ├── README.md
│ │ │ └── module.river
│ ├── single.river
│ ├── string.river
│ └── traces
│ │ └── otel_input
│ │ ├── README.md
│ │ └── module.river
└── kubernetes
│ ├── logs
│ ├── simple-events.river
│ ├── simple-multi-tenant.river
│ ├── simple-single-tenant-journal.river
│ ├── simple-single-tenant.river
│ ├── single-tenant-custom-pipeline.river
│ ├── single-tenant-no-masking.river
│ └── single-tenant-specific-log-formats.river
│ └── metrics
│ ├── custom-rewrites.river
│ ├── json-exporter.river
│ ├── multi-target-example.river
│ ├── selective-modules.river
│ ├── simple-multi-tenant.river
│ └── simple-single-tenant.river
├── modules
├── grafana-agent
│ ├── dynamic-blackbox
│ │ ├── README.md
│ │ └── module.river
│ └── telemetry-to-lgtm
│ │ ├── README.md
│ │ └── module.river
├── grafana-cloud
│ └── autoconfigure
│ │ ├── README.md
│ │ └── module.river
├── host-filter
│ ├── README.md
│ └── module.river
├── k8s_api
│ ├── README.md
│ └── module.river
├── k8s_pods
│ ├── README.md
│ └── module.river
├── kubernetes
│ ├── README.md
│ ├── logs
│ │ ├── all.river
│ │ ├── drops
│ │ │ ├── level-debug.river
│ │ │ ├── level-info.river
│ │ │ ├── level-trace.river
│ │ │ └── levels.river
│ │ ├── embed
│ │ │ └── pod.river
│ │ ├── events.river
│ │ ├── kubelet.river
│ │ ├── labels
│ │ │ ├── keep-labels.river
│ │ │ ├── log-level.river
│ │ │ └── normalize-filename.river
│ │ ├── log-formats
│ │ │ ├── all.river
│ │ │ ├── common-log.river
│ │ │ ├── dotnet.river
│ │ │ ├── istio.river
│ │ │ ├── json.river
│ │ │ ├── klog.river
│ │ │ ├── log4j.river
│ │ │ ├── logfmt.river
│ │ │ ├── otel.river
│ │ │ ├── postgres.river
│ │ │ ├── python.river
│ │ │ ├── spring-boot.river
│ │ │ ├── syslog.river
│ │ │ └── zerolog.river
│ │ ├── masks
│ │ │ ├── all.river
│ │ │ ├── credit-card.river
│ │ │ ├── email.river
│ │ │ ├── ipv4.river
│ │ │ ├── ipv6.river
│ │ │ ├── phone.river
│ │ │ └── ssn.river
│ │ ├── metrics
│ │ │ ├── post-process-bytes-lines.river
│ │ │ └── pre-process-bytes-lines.river
│ │ ├── relabelings.river
│ │ ├── scrubs
│ │ │ ├── all.river
│ │ │ ├── json-empties.river
│ │ │ └── json-nulls.river
│ │ └── targets
│ │ │ ├── logs-from-api.river
│ │ │ └── logs-from-worker.river
│ ├── metrics
│ │ ├── all.river
│ │ ├── jobs
│ │ │ ├── README.md
│ │ │ ├── agent.river
│ │ │ ├── annotations-probe.river
│ │ │ ├── annotations-scrape.river
│ │ │ ├── cadvisor.river
│ │ │ ├── cert-manager.river
│ │ │ ├── consul.river
│ │ │ ├── etcd.river
│ │ │ ├── gitlab-exporter.river
│ │ │ ├── grafana.river
│ │ │ ├── haproxy.river
│ │ │ ├── kube-apiserver.river
│ │ │ ├── kube-probes.river
│ │ │ ├── kube-proxy.river
│ │ │ ├── kube-resource.river
│ │ │ ├── kube-state-metrics.river
│ │ │ ├── kubelet.river
│ │ │ ├── loki.river
│ │ │ ├── memcached.river
│ │ │ ├── mimir.river
│ │ │ ├── mysql.river
│ │ │ ├── node-exporter.river
│ │ │ ├── opencost.river
│ │ │ ├── prometheus-operator.river
│ │ │ ├── push-gateway.river
│ │ │ ├── rabbitmq.river
│ │ │ ├── redis.river
│ │ │ ├── statsd.river
│ │ │ └── tempo.river
│ │ ├── relabelings
│ │ │ ├── annotations
│ │ │ │ ├── json
│ │ │ │ │ ├── ingress.river
│ │ │ │ │ └── service.river
│ │ │ │ ├── metrics
│ │ │ │ │ ├── endpoints.river
│ │ │ │ │ ├── endpointslice.river
│ │ │ │ │ ├── node.river
│ │ │ │ │ └── pod.river
│ │ │ │ └── probes
│ │ │ │ │ ├── ingress.river
│ │ │ │ │ └── service.river
│ │ │ ├── auto-scrape.river
│ │ │ ├── blackbox.river
│ │ │ ├── json-exporter.river
│ │ │ ├── kube-apiserver.river
│ │ │ ├── kube-dns.river
│ │ │ ├── kube-state-metrics.river
│ │ │ ├── kubelet-cadvisor.river
│ │ │ ├── kubelet-probes.river
│ │ │ ├── kubelet-resource.river
│ │ │ ├── kubelet.river
│ │ │ ├── node-exporter.river
│ │ │ └── opencost.river
│ │ ├── scrapes
│ │ │ ├── auto-probe-ingresses.river
│ │ │ ├── auto-probe-services.river
│ │ │ ├── auto-scrape-endpoints.river
│ │ │ ├── auto-scrape-pods.river
│ │ │ ├── json-ingresses.river
│ │ │ ├── json-services.river
│ │ │ ├── kube-apiserver.river
│ │ │ ├── kube-dns.river
│ │ │ ├── kube-state-metrics.river
│ │ │ ├── kubelet-cadvisor.river
│ │ │ ├── kubelet-probes.river
│ │ │ ├── kubelet-resource.river
│ │ │ ├── kubelet.river
│ │ │ ├── node-exporter.river
│ │ │ └── opencost.river
│ │ └── targets
│ │ │ ├── endpoints.river
│ │ │ ├── ingresses.river
│ │ │ ├── nodes.river
│ │ │ ├── pods.river
│ │ │ └── services.river
│ └── relabelings
│ │ ├── common.river
│ │ ├── endpoints.river
│ │ ├── endpointslice.river
│ │ ├── ingress.river
│ │ ├── pod.river
│ │ ├── service.river
│ │ └── static.river
├── meta-monitoring
│ └── metrics
│ │ ├── scrape-agent.river
│ │ ├── scrape-grafana.river
│ │ ├── scrape-loki.river
│ │ ├── scrape-mimir.river
│ │ ├── scrape-resource.river
│ │ ├── scrape-tempo.river
│ │ └── targets-lgtm.river
└── otlp
│ └── otlp-to-lgtm
│ ├── README.md
│ └── module.river
└── util
└── agentfmt.sh
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @erikbaranowski @mattdurham @bentonam
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |

2 |
3 | [Modules](https://grafana.com/docs/agent/latest/flow/concepts/modules/) are a
4 | way to create Grafana Agent [Flow](https://grafana.com/docs/agent/latest/flow/)
5 | configurations which can be loaded as a component. Modules are a great way to
6 | parameterize a configuration to create reusable pipelines.
7 |
8 | ## Contents
9 | - modules: A library of usable modules out of the box
10 | - example: A practical example shown for each module loader plus without modules for comparison
11 | - util: Utilities for managing modules in this repo
12 |
13 | ## Modules
14 |
15 | | Name | Description | Agent Version |
16 | | ---- | ----------- | ------------- |
17 | | [Metrics and Logs Annotation Ingestion](./modules/kubernetes/) | Module to ingest Metrics (scraping/probes) and Logs through annotations. | `>= v0.36.1`
18 | | [OTLP to LGTM](./modules/otlp/otlp-to-lgtm/) | Module to ingest OTLP data and then send it to Loki, Mimir and Tempo stacks locally or in GrafanaCloud. | `>= v0.33`
19 | | [Grafana Agent Telemetry to LGTM](./modules/grafana-agent/telemetry-to-lgtm/) | Module to forward the Grafana Agent's own telemetry data to Loki, Mimir and Tempo stacks locally or in Grafana Cloud. | `>= v0.33`
20 | | [Grafana Agent Dynamic Blackbox Exporter](./modules/grafana-agent/dynamic-blackbox/) | Module to use blackbox exporter with dynamic targets. | `>= v0.39`
21 | | [Grafana Cloud Autoconfigure](./modules/grafana-cloud/autoconfigure/) | Module to automatically configure receivers for Grafana Cloud. | `>= v0.34`
22 | | [Host Filtering](./modules/host-filter/) | The host filtering module provides a Flow mode equivalent to static mode's host filtering functionality. | `>= v0.34`
23 |
24 | ## Submitting modules
25 |
26 | Add modules to the `modules` folder. Each module must have a README.MD that provides the following information:
27 | * Name
28 | * Brief description
29 | * Applicable Agent Versions
30 | * Arguments
31 | * Exports
32 | * Example
33 |
34 | Modules must contain the following elements:
35 | * Arguments
36 | * Exports
37 | * The body of the module
38 |
--------------------------------------------------------------------------------
/assets/logo_and_name.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grafana/agent-modules/712134a000dd4269d033d01e1cd9ff2d1ae6e30b/assets/logo_and_name.png
--------------------------------------------------------------------------------
/example/demo/file.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * AGENT METRICS
3 | ********************************************/
4 | module.file "metrics_prometheus_receiver" {
5 | filename = env("AGENT_CONFIG_FOLDER") + "/metrics/prometheus_receiver/module.river"
6 |
7 | arguments {
8 | username = env("METRIC_USERNAME")
9 | password = env("METRIC_PASSWORD")
10 | url = env("METRIC_URL")
11 | }
12 | }
13 |
14 | module.file "metrics_prometheus_scrape_agent" {
15 | filename = env("AGENT_CONFIG_FOLDER") + "/metrics/prometheus_scrape/module.river"
16 |
17 | arguments {
18 | address = "0.0.0.0:12345"
19 | receiver = module.file.metrics_prometheus_receiver.exports.receiver
20 | }
21 | }
22 |
23 | /********************************************
24 | * AGENT LOGGING
25 | ********************************************/
26 | logging {
27 | level = env("AGENT_LOG_LEVEL")
28 | format = "logfmt"
29 | write_to = [module.file.logs_forward_to_loki.exports.receiver]
30 | }
31 |
32 | module.file "logs_forward_to_loki" {
33 | filename = env("AGENT_CONFIG_FOLDER") + "/logs/forward_to_loki/module.river"
34 |
35 | arguments {
36 | username = env("LOG_USERNAME")
37 | password = env("LOG_PASSWORD")
38 | url = env("LOG_URL")
39 | }
40 | }
41 |
42 | // /********************************************
43 | // * AGENT TRACING
44 | // ********************************************/
45 | tracing {
46 | sampling_fraction = 1
47 | write_to = [module.file.traces_otel_input.exports.input]
48 | }
49 |
50 | module.file "traces_otel_input" {
51 | filename = env("AGENT_CONFIG_FOLDER") + "/traces/otel_input/module.river"
52 |
53 | arguments {
54 | username = env("TRACE_USERNAME")
55 | password = env("TRACE_PASSWORD")
56 | url = env("TRACE_URL")
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/example/demo/git.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * AGENT METRICS
3 | ********************************************/
4 | module.git "metrics_prometheus_receiver" {
5 | repository = "https://github.com/grafana/agent-modules.git"
6 | revision = "main"
7 | path = "example/demo/metrics/prometheus_receiver/module.river"
8 |
9 | arguments {
10 | username = env("METRIC_USERNAME")
11 | password = env("METRIC_PASSWORD")
12 | url = env("METRIC_URL")
13 | }
14 | }
15 |
16 | module.git "metrics_prometheus_scrape_agent" {
17 | repository = "https://github.com/grafana/agent-modules.git"
18 | revision = "main"
19 | path = "example/demo/metrics/prometheus_scrape/module.river"
20 |
21 | arguments {
22 | address = "0.0.0.0:12345"
23 | receiver = module.git.metrics_prometheus_receiver.exports.receiver
24 | }
25 | }
26 |
27 | /********************************************
28 | * AGENT LOGGING
29 | ********************************************/
30 | logging {
31 | level = env("AGENT_LOG_LEVEL")
32 | format = "logfmt"
33 | write_to = [module.git.logs_forward_to_loki.exports.receiver]
34 | }
35 |
36 | module.git "logs_forward_to_loki" {
37 | repository = "https://github.com/grafana/agent-modules.git"
38 | revision = "main"
39 | path = "example/demo/logs/forward_to_loki/module.river"
40 |
41 | arguments {
42 | username = env("LOG_USERNAME")
43 | password = env("LOG_PASSWORD")
44 | url = env("LOG_URL")
45 | }
46 | }
47 |
48 | /********************************************
49 | * AGENT TRACING
50 | ********************************************/
51 | tracing {
52 | sampling_fraction = 1
53 | write_to = [module.git.traces_otel_input.exports.input]
54 | }
55 |
56 | module.git "traces_otel_input" {
57 | repository = "https://github.com/grafana/agent-modules.git"
58 | revision = "main"
59 | path = "example/demo/traces/otel_input/module.river"
60 |
61 | arguments {
62 | username = env("TRACE_USERNAME")
63 | password = env("TRACE_PASSWORD")
64 | url = env("TRACE_URL")
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/example/demo/logs/forward_to_loki/README.md:
--------------------------------------------------------------------------------
1 | # logs/forward_to_loki
2 |
3 | The `logs/forward_to_loki` module is an example module which exports logs
4 | from a log file to a loki endpoint.
5 |
6 | ## Agent Version
7 |
8 | `>= v0.36`
9 |
10 | ## Module arguments
11 |
12 | The following arguments are supported when passing arguments to the module
13 | loader:
14 |
15 | | Name | Type | Description | Default | Required
16 | | ---- | ---- | ----------- | ------- | --------
17 | | `username` | `string` | The username for otelcol.auth.basic authentication. | | yes
18 | | `password` | `string` | The password for otelcol.auth.basic authentication. | | yes
19 | | `url` | `string` | The target loki url to forward logs to. | | yes
20 |
21 | ## Module exports
22 |
23 | The following fields are exported by the module:
24 |
25 | | Name | Type | Description
26 | | ---- | ---- | -----------
27 | | `receiver` | `LogsReceiver` | A logs receiver that other components can use to send telemetry data to.
28 |
29 | ## Example
30 |
31 | This example demonstrates how you can send all logs generated by the Grafana
32 | Agent to a loki endpoint.
33 |
34 | ```river
35 | logging {
36 | level = "info"
37 | format = "logfmt"
38 | write_to = [module.git.logs_forward_to_loki.exports.receiver]
39 | }
40 |
41 | module.git "logs_forward_to_loki" {
42 | repository = "https://github.com/grafana/agent-modules.git"
43 | revision = "main"
44 | path = "example/demo/logs/forward_to_loki/module.river"
45 |
46 | arguments {
47 | url = env("URL")
48 | }
49 | }
50 |
51 | ```
52 |
--------------------------------------------------------------------------------
/example/demo/logs/forward_to_loki/module.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * ARGUMENTS
3 | ********************************************/
4 | argument "username" {
5 | optional = false
6 | }
7 |
8 | argument "password" {
9 | optional = false
10 | }
11 |
12 | argument "url" {
13 | optional = false
14 | }
15 |
16 | /********************************************
17 | * EXPORTS
18 | ********************************************/
19 | export "receiver" {
20 | value = loki.write.default.receiver
21 | }
22 |
23 | /********************************************
24 | * LOG EXPORTER
25 | ********************************************/
26 | loki.write "default" {
27 | endpoint {
28 | url = argument.url.value
29 |
30 | basic_auth {
31 | username = argument.username.value
32 | password = argument.password.value
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/example/demo/metrics/prometheus_receiver/README.md:
--------------------------------------------------------------------------------
1 | # metrics/prometheus_receiver
2 |
3 | The `metrics/prometheus_receiver` module is an example module which exports a
4 | prometheus receiver for use by other components.
5 |
6 | ## Agent Version
7 |
8 | `>= v0.34`
9 |
10 | ## Module arguments
11 |
12 | The following arguments are supported when passing arguments to the module
13 | loader:
14 |
15 | | Name | Type | Description | Default | Required
16 | | ---- | ---- | ----------- | ------- | --------
17 | | `username` | `string` | The username for basic authentication. | | yes
18 | | `password` | `string` | The password for basic authentication. | | yes
19 | | `url` | `string` | The target url to forward metrics to. | | yes
20 |
21 | ## Module exports
22 |
23 | The following fields are exported by the module:
24 |
25 | | Name | Type | Description
26 | | ---- | ---- | -----------
27 | | `receiver` | `receiver` | A value that other components can use to send metrics data to.
28 |
29 | ## Example
30 |
31 | This example demonstrates how you can send all metrics generated by the Grafana
32 | Agent to prometheus.
33 |
34 | ```river
35 | module.git "metrics_prometheus_receiver" {
36 | repository = "https://github.com/grafana/agent-modules.git"
37 | revision = "main"
38 | path = "example/demo/metrics/prometheus_receiver/module.river"
39 |
40 | arguments {
41 | username = env("METRIC_USERNAME")
42 | password = env("METRIC_PASSWORD")
43 | url = env("METRIC_URL")
44 | }
45 | }
46 |
47 | prometheus.scrape "metrics_agent" {
48 | targets = [{"__address__" = "0.0.0.0:12345"}]
49 | forward_to = [module.git.metrics_prometheus_receiver.exports.receiver]
50 | }
51 |
52 | ```
53 |
--------------------------------------------------------------------------------
/example/demo/metrics/prometheus_receiver/module.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * ARGUMENTS
3 | ********************************************/
4 | argument "username" {
5 | optional = false
6 | }
7 |
8 | argument "password" {
9 | optional = false
10 | }
11 |
12 | argument "url" {
13 | optional = false
14 | }
15 |
16 | /********************************************
17 | * EXPORTS
18 | ********************************************/
19 | export "receiver" {
20 | value = prometheus.remote_write.default.receiver
21 | }
22 |
23 | /********************************************
24 | * METRICS RECEIVER
25 | ********************************************/
26 | prometheus.remote_write "default" {
27 | endpoint {
28 | url = argument.url.value
29 |
30 | basic_auth {
31 | username = argument.username.value
32 | password = argument.password.value
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/example/demo/metrics/prometheus_scrape/README.md:
--------------------------------------------------------------------------------
1 | # metrics/prometheus_scrape
2 |
3 | The `metrics/prometheus_scrape` module is an example module which scrapes
4 | metrics from a promtheus compatible endpoint and forwards them to a receiver.
5 |
6 | ## Agent Version
7 |
8 | `>= v0.34`
9 |
10 | ## Module arguments
11 |
12 | The following arguments are supported when passing arguments to the module
13 | loader:
14 |
15 | | Name | Type | Description | Default | Required
16 | | ---- | ---- | ----------- | ------- | --------
17 | | `address` | `string` | The target address to scrape metrics from. | | yes
18 | | `receiver` | `MetricsReceiver` | Receiver to send scraped metrics to. | | yes
19 | | `scrape_interval` | `duration` | How frequently to scrape the targets of this scrape config. | "10s" | no
20 |
21 | ## Module exports
22 |
23 | This module does not export anything.
24 |
25 | ## Example
26 |
27 | This example demonstrates how you can send all metrics generated by the Grafana
28 | Agent to a prometheus endpoint using basic auth.
29 |
30 | ```river
31 | prometheus.remote_write "default" {
32 | endpoint {
33 | url = env("METRIC_URL")
34 |
35 | basic_auth {
36 | username = env("METRIC_USERNAME")
37 | password = env("METRIC_PASSWORD")
38 | }
39 | }
40 | }
41 |
42 | module.git "metrics_prometheus_scrape_agent" {
43 | repository = "https://github.com/grafana/agent-modules.git"
44 | revision = "main"
45 | path = "example/demo/metrics/prometheus_scrape/module.river"
46 |
47 | arguments {
48 | address = "0.0.0.0:12345"
49 | receiver = prometheus.remote_write.default.receiver
50 | }
51 | }
52 |
53 | ```
54 |
--------------------------------------------------------------------------------
/example/demo/metrics/prometheus_scrape/module.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * ARGUMENTS
3 | ********************************************/
4 | argument "address" {
5 | optional = false
6 | }
7 |
8 | argument "receiver" {
9 | optional = false
10 | }
11 |
12 | argument "scrape_interval" {
13 | optional = true
14 | default = "10s"
15 | }
16 |
17 | /********************************************
18 | * EXPORTS
19 | ********************************************/
20 |
21 | /********************************************
22 | * SCRAPE
23 | ********************************************/
24 | prometheus.scrape "default" {
25 | targets = [{"__address__" = argument.address.value}]
26 | forward_to = [argument.receiver.value]
27 | scrape_interval = argument.scrape_interval.value
28 | }
29 |
--------------------------------------------------------------------------------
/example/demo/single.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * AGENT METRICS
3 | ********************************************/
4 | prometheus.remote_write "default" {
5 | endpoint {
6 | url = env("METRIC_URL")
7 |
8 | basic_auth {
9 | username = env("METRIC_USERNAME")
10 | password = env("METRIC_PASSWORD")
11 | }
12 | }
13 | }
14 |
15 | // SCRAPE
16 | prometheus.scrape "default" {
17 | targets = [{"__address__" = "0.0.0.0:12345"}]
18 | forward_to = [prometheus.remote_write.default.receiver]
19 | scrape_interval = "10s"
20 | }
21 |
22 | /********************************************
23 | * AGENT LOGGING
24 | ********************************************/
25 | logging {
26 | level = env("AGENT_LOG_LEVEL")
27 | format = "logfmt"
28 | write_to = [loki.write.default.receiver]
29 | }
30 |
31 | // LOG EXPORTER
32 | loki.write "default" {
33 | endpoint {
34 | url = env("LOG_URL")
35 |
36 | basic_auth {
37 | username = env("LOG_USERNAME")
38 | password = env("LOG_PASSWORD")
39 | }
40 | }
41 | }
42 |
43 | /********************************************
44 | * AGENT TRACING
45 | ********************************************/
46 | tracing {
47 | sampling_fraction = 1
48 | write_to = [otelcol.exporter.otlp.default.input]
49 | }
50 |
51 | // TRACE EXPORTERS
52 | otelcol.auth.basic "default" {
53 | username = env("TRACE_USERNAME")
54 | password = env("TRACE_PASSWORD")
55 | }
56 |
57 | otelcol.exporter.otlp "default" {
58 | client {
59 | endpoint = env("TRACE_URL")
60 | auth = otelcol.auth.basic.default.handler
61 | }
62 | }
63 |
64 | // OTLP PROCESSORS
65 | otelcol.processor.batch "default" {
66 | output {
67 | traces = [otelcol.exporter.otlp.default.input]
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/example/demo/string.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * AGENT METRICS
3 | ********************************************/
4 | local.file "metrics_prometheus_receiver" {
5 | filename = env("AGENT_CONFIG_FOLDER") + "/metrics/prometheus_receiver/module.river"
6 | }
7 |
8 | module.string "metrics_prometheus_receiver" {
9 | content = local.file.metrics_prometheus_receiver.content
10 |
11 | arguments {
12 | username = env("METRIC_USERNAME")
13 | password = env("METRIC_PASSWORD")
14 | url = env("METRIC_URL")
15 | }
16 | }
17 |
18 | local.file "metrics_prometheus_scrape_agent" {
19 | filename = env("AGENT_CONFIG_FOLDER") + "/metrics/prometheus_scrape/module.river"
20 | }
21 |
22 | module.string "metrics_prometheus_scrape_agent" {
23 | content = local.file.metrics_prometheus_scrape_agent.content
24 |
25 | arguments {
26 | address = "0.0.0.0:12345"
27 | receiver = module.string.metrics_prometheus_receiver.exports.receiver
28 | }
29 | }
30 |
31 | /********************************************
32 | * AGENT LOGGING
33 | ********************************************/
34 | logging {
35 | level = env("AGENT_LOG_LEVEL")
36 | format = "logfmt"
37 | write_to = [module.string.logs_forward_to_loki.exports.receiver]
38 | }
39 |
40 | local.file "logs_forward_to_loki" {
41 | filename = env("AGENT_CONFIG_FOLDER") + "/logs/forward_to_loki/module.river"
42 | }
43 |
44 | module.string "logs_forward_to_loki" {
45 | content = local.file.logs_forward_to_loki.content
46 |
47 | arguments {
48 | username = env("LOG_USERNAME")
49 | password = env("LOG_PASSWORD")
50 | url = env("LOG_URL")
51 | }
52 | }
53 |
54 | /********************************************
55 | * AGENT TRACING
56 | ********************************************/
57 | tracing {
58 | sampling_fraction = 1
59 | write_to = [module.string.traces_otel_input.exports.input]
60 | }
61 |
62 | local.file "traces_otel_input" {
63 | filename = env("AGENT_CONFIG_FOLDER") + "/traces/otel_input/module.river"
64 | }
65 |
66 | module.string "traces_otel_input" {
67 | content = local.file.traces_otel_input.content
68 |
69 | arguments {
70 | username = env("TRACE_USERNAME")
71 | password = env("TRACE_PASSWORD")
72 | url = env("TRACE_URL")
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/example/demo/traces/otel_input/README.md:
--------------------------------------------------------------------------------
1 | # traces/otel_input
2 |
3 | The `traces/otel_input` module is an example module which exports an otel trace
4 | input. That input will run the traces through the default
5 | `otelcol.processor.batch` settings and then forward them on to the provided
6 | target url.
7 |
8 | ## Agent Version
9 |
10 | `>= v0.34`
11 |
12 | ## Module arguments
13 |
14 | The following arguments are supported when passing arguments to the module
15 | loader:
16 |
17 | | Name | Type | Description | Default | Required
18 | | ---- | ---- | ----------- | ------- | --------
19 | | `username` | `string` | The username for otelcol.auth.basic authentication. | | yes
20 | | `password` | `string` | The password for otelcol.auth.basic authentication. | | yes
21 | | `url` | `string` | The target url to forward traces to. | | yes
22 |
23 | ## Module exports
24 |
25 | The following fields are exported by the module:
26 |
27 | | Name | Type | Description
28 | | ---- | ---- | -----------
29 | | `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to.
30 |
31 | ## Example
32 |
33 | This example demonstrates how you can send all traces generated by the Grafana
34 | Agent to a trace endpoint.
35 |
36 | ```river
37 | tracing {
38 | sampling_fraction = 1
39 | write_to = [module.file.traces_otel_input.exports.input]
40 | }
41 |
42 | module.git "traces_otel_input" {
43 | repository = "https://github.com/grafana/agent-modules.git"
44 | revision = "main"
45 | path = "example/demo/traces/otel_input/module.river"
46 |
47 | arguments {
48 | username = env("TRACE_USERNAME")
49 | password = env("TRACE_PASSWORD")
50 | url = env("TRACE_URL")
51 | }
52 | }
53 |
54 | ```
55 |
--------------------------------------------------------------------------------
/example/demo/traces/otel_input/module.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * ARGUMENTS
3 | ********************************************/
4 | argument "username" {
5 | optional = false
6 | }
7 |
8 | argument "password" {
9 | optional = false
10 | }
11 |
12 | argument "url" {
13 | optional = false
14 | }
15 |
16 | /********************************************
17 | * EXPORTS
18 | ********************************************/
19 | export "input" {
20 | value = otelcol.processor.batch.default.input
21 | }
22 |
23 | /********************************************
24 | * TRACE EXPORTERS
25 | ********************************************/
26 | otelcol.auth.basic "default" {
27 | username = argument.username.value
28 | password = argument.password.value
29 | }
30 |
31 | otelcol.exporter.otlp "default" {
32 | client {
33 | endpoint = argument.url.value
34 | auth = otelcol.auth.basic.default.handler
35 | }
36 | }
37 |
38 | /********************************************
39 | * OTLP PROCESSORS
40 | ********************************************/
41 | otelcol.processor.batch "default" {
42 | output {
43 | traces = [otelcol.exporter.otlp.default.input]
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/example/kubernetes/logs/simple-events.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows how to consume the kubernetes events and write them as log messages to loki
3 | */
4 | logging {
5 | level = coalesce(env("AGENT_LOG_LEVEL"), "info")
6 | format = "logfmt"
7 | }
8 |
9 | module.git "event_logs" {
10 | repository = "https://github.com/grafana/agent-modules.git"
11 | revision = "main"
12 | path = "modules/kubernetes/logs/events.river"
13 |
14 | arguments {
15 | forward_to = [loki.write.default.receiver]
16 | git_repo = coalesce(env("GIT_REPO"), env("AGENT_REPO"), "https://github.com/grafana/agent-modules.git")
17 | git_rev = coalesce(env("GIT_REV"), env("AGENT_REV"), "main")
18 | }
19 | }
20 |
21 | loki.write "default" {
22 | endpoint {
23 | url = env("LOGS_OPS_URL")
24 |
25 | basic_auth {
26 | username = env("LOGS_OPS_TENANT")
27 | password = env("LOGS_OPS_TOKEN")
28 | }
29 | }
30 |
31 | external_labels = {
32 | "cluster" = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), ""),
33 | "env" = coalesce(env("ENV"), ""),
34 | "region" = coalesce(env("REGION"), ""),
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/example/kubernetes/logs/simple-multi-tenant.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows using the default all logs processing module, for
3 | a multiple tenants and specifying the destination url/credentials via environment
4 | variables.
5 | */
6 | logging {
7 | level = "info"
8 | format = "logfmt"
9 | }
10 |
11 | module.git "logs_primary" {
12 | repository = "https://github.com/grafana/agent-modules.git"
13 | revision = "main"
14 | path = "modules/kubernetes/logs/all.river"
15 |
16 | arguments {
17 | forward_to = [loki.write.primary.receiver]
18 | tenant = "primary|"
19 | git_repo = "https://github.com/grafana/agent-modules.git"
20 | git_rev = "main"
21 | }
22 | }
23 |
24 | module.git "logs_op" {
25 | repository = "https://github.com/grafana/agent-modules.git"
26 | revision = "main"
27 | path = "modules/kubernetes/logs/all.river"
28 |
29 | arguments {
30 | forward_to = [loki.write.default.receiver]
31 | tenant = coalesce(env("OPS_TENANT_NAME"), "ops")
32 | git_repo = "https://github.com/grafana/agent-modules.git"
33 | git_rev = "main"
34 | }
35 | }
36 |
37 | loki.write "local_primary" {
38 | endpoint {
39 | url = env("LOGS_PRIMARY_URL")
40 |
41 | basic_auth {
42 | username = env("LOGS_PRIMARY_TENANT")
43 | password = env("LOGS_PRIMARY_TOKEN")
44 | }
45 | }
46 |
47 | external_labels = {
48 | "cluster" = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), ""),
49 | "env" = coalesce(env("ENV"), ""),
50 | "region" = coalesce(env("REGION"), ""),
51 | }
52 | }
53 |
54 | loki.write "local_ops" {
55 | endpoint {
56 | url = env("LOGS_OPS_URL")
57 |
58 | basic_auth {
59 | username = env("LOGS_OPS_TENANT")
60 | password = env("LOGS_OPS_TOKEN")
61 | }
62 | }
63 |
64 | external_labels = {
65 | "cluster" = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), ""),
66 | "env" = coalesce(env("ENV"), ""),
67 | "region" = coalesce(env("REGION"), ""),
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/example/kubernetes/logs/simple-single-tenant-journal.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows using the default all logs processing module, for
3 | a single tenant and gathering kubelet logs
4 | */
5 | logging {
6 | level = "info"
7 | format = "logfmt"
8 | }
9 |
10 | module.git "logs_primary" {
11 | repository = "https://github.com/grafana/agent-modules.git"
12 | revision = "main"
13 | path = "modules/kubernetes/logs/all.river"
14 |
15 | arguments {
16 | forward_to = [loki.write.default.receiver]
17 | git_repo = coalesce(env("GIT_REPO"), env("AGENT_REPO"), "https://github.com/grafana/agent-modules.git")
18 | git_rev = coalesce(env("GIT_REV"), env("AGENT_REV"), "main")
19 | }
20 | }
21 |
22 | module.git "logs_kubelet_journal" {
23 | repository = "https://github.com/grafana/agent-modules.git"
24 | revision = "main"
25 | path = "modules/kubernetes/logs/kubelet.river"
26 |
27 | arguments {
28 | forward_to = [loki.write.default.receiver]
29 | git_repo = coalesce(env("GIT_REPO"), env("AGENT_REPO"), "https://github.com/grafana/agent-modules.git")
30 | git_rev = coalesce(env("GIT_REV"), env("AGENT_REV"), "main")
31 | }
32 | }
33 |
34 | loki.write "default" {
35 | endpoint {
36 | url = env("LOGS_PRIMARY_URL")
37 |
38 | basic_auth {
39 | username = env("LOGS_PRIMARY_TENANT")
40 | password = env("LOGS_PRIMARY_TOKEN")
41 | }
42 | }
43 |
44 | external_labels = {
45 | "cluster" = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), ""),
46 | "env" = coalesce(env("ENV"), ""),
47 | "region" = coalesce(env("REGION"), ""),
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/example/kubernetes/logs/simple-single-tenant.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows using the default all logs processing module, for
3 | a single tenant and specifying the destination url/credentials via environment
4 | variables.
5 | */
6 | logging {
7 | level = "info"
8 | format = "logfmt"
9 | }
10 |
11 | module.git "logs_primary" {
12 | repository = "https://github.com/grafana/agent-modules.git"
13 | revision = "main"
14 | path = "modules/kubernetes/logs/all.river"
15 |
16 | arguments {
17 | forward_to = [loki.write.default.receiver]
18 | git_repo = coalesce(env("GIT_REPO"), env("AGENT_REPO"), "https://github.com/grafana/agent-modules.git")
19 | git_rev = coalesce(env("GIT_REV"), env("AGENT_REV"), "main")
20 | }
21 | }
22 |
23 | loki.write "default" {
24 | endpoint {
25 | url = env("LOGS_PRIMARY_URL")
26 |
27 | basic_auth {
28 | username = env("LOGS_PRIMARY_TENANT")
29 | password = env("LOGS_PRIMARY_TOKEN")
30 | }
31 | }
32 |
33 | external_labels = {
34 | "cluster" = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), ""),
35 | "env" = coalesce(env("ENV"), ""),
36 | "region" = coalesce(env("REGION"), ""),
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/example/kubernetes/logs/single-tenant-custom-pipeline.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following items would need to be defined to include your own specific processing steps,
3 | this example removes the following modules:
4 |
5 | - masking
6 | - normalize filename
7 |
8 | As well as adding custom pipeline processing stages after all of the log formats have been
9 | processed and forwards its results to the defaulting log level module
10 | */
11 | logging {
12 | level = "info"
13 | format = "logfmt"
14 | }
15 |
16 | module.git "log_targets" {
17 | repository = "https://github.com/grafana/agent-modules.git"
18 | revision = "main"
19 | path = "modules/kubernetes/logs/targets/logs-from-worker.river"
20 |
21 | arguments {
22 | forward_to = [module.git.log_formats_all.exports.process.receiver]
23 | tenant = coalesce(env("DEFAULT_TENANT_NAME"), "primary|")
24 | }
25 | }
26 |
27 | module.git "log_formats_all" {
28 | repository = "https://github.com/grafana/agent-modules.git"
29 | revision = "main"
30 | path = "modules/kubernetes/logs/log-formats/all.river"
31 |
32 | arguments {
33 | forward_to = loki.process.custom.receiver
34 | }
35 | }
36 |
37 | loki.process "custom" {
38 | forward_to = [module.git.log_level_default.exports.process.receiver]
39 |
40 | // your custom stages here
41 |
42 | stage.labels {
43 | values = {
44 | foo = "bar",
45 | }
46 | }
47 |
48 | }
49 |
50 | module.git "log_level_default" {
51 | repository = "https://github.com/grafana/agent-modules.git"
52 | revision = "main"
53 | path = "modules/kubernetes/logs/labels/log-level.river"
54 |
55 | arguments {
56 | forward_to = [module.git.scrub_all.exports.process.receiver]
57 | }
58 | }
59 |
60 | module.git "drop_levels" {
61 | repository = "https://github.com/grafana/agent-modules.git"
62 | revision = "main"
63 | path = "modules/kubernetes/logs/drops/levels.river"
64 |
65 | arguments {
66 | forward_to = [module.git.scrub_all.exports.process.receiver]
67 | }
68 | }
69 |
70 | module.git "scrub_all" {
71 | repository = "https://github.com/grafana/agent-modules.git"
72 | revision = "main"
73 | path = "modules/kubernetes/logs/scrubs/all.river"
74 |
75 | arguments {
76 | forward_to = [module.git.embed_pod.exports.process.receiver]
77 | }
78 | }
79 |
80 | module.git "embed_pod" {
81 | repository = "https://github.com/grafana/agent-modules.git"
82 | revision = "main"
83 | path = "modules/kubernetes/logs/embed/pod.river"
84 |
85 | arguments {
86 | forward_to = [module.git.label_keep.exports.process.receiver]
87 | }
88 | }
89 |
90 | module.git "label_keep" {
91 | repository = "https://github.com/grafana/agent-modules.git"
92 | revision = "main"
93 | path = "modules/kubernetes/logs/labels/keep-labels.river"
94 |
95 | arguments {
96 | forward_to = loki.write.destination.receiver
97 | keep_labels = [
98 | "app",
99 | "cluster",
100 | "component",
101 | "deployment",
102 | "env",
103 | "instance",
104 | "job",
105 | "level",
106 | "namespace",
107 | "region",
108 | "service",
109 | "squad",
110 | "team",
111 | ]
112 | }
113 | }
114 |
115 | loki.write "destination" {
116 | endpoint {
117 | url = env("DEFAULT_LOKI_ENDPOINT")
118 | basic_auth {
119 | username = env("DEFAULT_TENANT_ID")
120 | password = env("DEFAULT_TENANT_TOKEN")
121 | }
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/example/kubernetes/logs/single-tenant-no-masking.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following items would need to be defined to include your own specific steps,
3 | this example removes the following modules:
4 |
5 | - masking
6 | - normalize filename
7 |
8 | */
9 | logging {
10 | level = "info"
11 | format = "logfmt"
12 | }
13 |
14 | module.git "log_targets" {
15 | repository = "https://github.com/grafana/agent-modules.git"
16 | revision = "main"
17 | path = "modules/kubernetes/logs/targets/logs-from-worker.river"
18 |
19 | arguments {
20 | forward_to = [module.git.log_formats_all.exports.process.receiver]
21 | tenant = coalesce(env("DEFAULT_TENANT_NAME"), "primary|")
22 | }
23 | }
24 |
25 | module.git "log_formats_all" {
26 | repository = "https://github.com/grafana/agent-modules.git"
27 | revision = "main"
28 | path = "modules/kubernetes/logs/log-formats/all.river"
29 |
30 | arguments {
31 | forward_to = [module.git.log_level_default.exports.process.receiver]
32 | }
33 | }
34 |
35 | module.git "log_level_default" {
36 | repository = "https://github.com/grafana/agent-modules.git"
37 | revision = "main"
38 | path = "modules/kubernetes/logs/labels/log-level.river"
39 |
40 | arguments {
41 | forward_to = [module.git.scrub_all.exports.process.receiver]
42 | }
43 | }
44 |
45 | module.git "drop_levels" {
46 | repository = "https://github.com/grafana/agent-modules.git"
47 | revision = "main"
48 | path = "modules/kubernetes/logs/drops/levels.river"
49 |
50 | arguments {
51 | forward_to = [module.git.scrub_all.exports.process.receiver]
52 | }
53 | }
54 |
55 | module.git "scrub_all" {
56 | repository = "https://github.com/grafana/agent-modules.git"
57 | revision = "main"
58 | path = "modules/kubernetes/logs/scrubs/all.river"
59 |
60 | arguments {
61 | forward_to = [module.git.embed_pod.exports.process.receiver]
62 | }
63 | }
64 |
65 | module.git "embed_pod" {
66 | repository = "https://github.com/grafana/agent-modules.git"
67 | revision = "main"
68 | path = "modules/kubernetes/logs/embed/pod.river"
69 |
70 | arguments {
71 | forward_to = [module.git.label_keep.exports.process.receiver]
72 | }
73 | }
74 |
75 | module.git "label_keep" {
76 | repository = "https://github.com/grafana/agent-modules.git"
77 | revision = "main"
78 | path = "modules/kubernetes/logs/labels/keep-labels.river"
79 |
80 | arguments {
81 | forward_to = loki.write.destination.receiver
82 | keep_labels = [
83 | "app",
84 | "cluster",
85 | "component",
86 | "deployment",
87 | "env",
88 | "instance",
89 | "job",
90 | "level",
91 | "namespace",
92 | "region",
93 | "service",
94 | "squad",
95 | "team",
96 | ]
97 | }
98 | }
99 |
100 | loki.write "destination" {
101 | endpoint {
102 | url = env("DEFAULT_LOKI_ENDPOINT")
103 | basic_auth {
104 | username = env("DEFAULT_TENANT_ID")
105 | password = env("DEFAULT_TENANT_TOKEN")
106 | }
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/example/kubernetes/logs/single-tenant-specific-log-formats.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following items would need to be defined to include your own specific steps,
3 | this example removes the following modules:
4 |
5 | - masking
6 | - normalize filename
7 |
8 | As well as only suppporting the log-formats of logfmt, klog and json
9 | */
10 |
11 | logging {
12 | level = "info"
13 | format = "logfmt"
14 | }
15 |
16 | // get targets
17 | module.git "log_targets" {
18 | repository = "https://github.com/grafana/agent-modules.git"
19 | revision = "main"
20 | path = "modules/kubernetes/logs/targets/logs-from-worker.river"
21 |
22 | arguments {
23 | forward_to = [module.git.log_format_json.exports.process.receiver]
24 | tenant = coalesce(env("DEFAULT_TENANT_NAME"), "primary|")
25 | }
26 | }
27 |
28 | module.git "log_format_json" {
29 | repository = "https://github.com/grafana/agent-modules.git"
30 | revision = "main"
31 | path = "modules/kubernetes/logs/log-formats/json.river"
32 |
33 | arguments {
34 | forward_to = [module.git.log_format_klog.exports.process.receiver]
35 | }
36 | }
37 |
38 | module.git "log_format_klog" {
39 | repository = "https://github.com/grafana/agent-modules.git"
40 | revision = "main"
41 | path = "modules/kubernetes/logs/log-formats/klog.river"
42 |
43 | arguments {
44 | forward_to = [module.git.log_format_logfmt.exports.process.receiver]
45 | }
46 | }
47 |
48 | module.git "log_format_logfmt" {
49 | repository = "https://github.com/grafana/agent-modules.git"
50 | revision = "main"
51 | path = "modules/kubernetes/logs/log-formats/logfmt.river"
52 |
53 | arguments {
54 | forward_to = [module.git.log_level_default.exports.process.receiver]
55 | }
56 | }
57 |
58 | module.git "log_level_default" {
59 | repository = "https://github.com/grafana/agent-modules.git"
60 | revision = "main"
61 | path = "modules/kubernetes/logs/labels/log-level.river"
62 |
63 | arguments {
64 | forward_to = [module.git.scrub_all.exports.process.receiver]
65 | }
66 | }
67 |
68 | module.git "drop_levels" {
69 | repository = "https://github.com/grafana/agent-modules.git"
70 | revision = "main"
71 | path = "modules/kubernetes/logs/drops/levels.river"
72 |
73 | arguments {
74 | forward_to = [module.git.scrub_all.exports.process.receiver]
75 | }
76 | }
77 |
78 | module.git "scrub_all" {
79 | repository = "https://github.com/grafana/agent-modules.git"
80 | revision = "main"
81 | path = "modules/kubernetes/logs/scrubs/all.river"
82 |
83 | arguments {
84 | forward_to = [module.git.embed_pod.exports.process.receiver]
85 | }
86 | }
87 |
88 | module.git "embed_pod" {
89 | repository = "https://github.com/grafana/agent-modules.git"
90 | revision = "main"
91 | path = "modules/kubernetes/logs/embed/pod.river"
92 |
93 | arguments {
94 | forward_to = [module.git.label_keep.exports.process.receiver]
95 | }
96 | }
97 |
98 | module.git "label_keep" {
99 | repository = "https://github.com/grafana/agent-modules.git"
100 | revision = "main"
101 | path = "modules/kubernetes/logs/labels/keep-labels.river"
102 |
103 | arguments {
104 | forward_to = loki.write.destination.receiver
105 | keep_labels = [
106 | "app",
107 | "cluster",
108 | "component",
109 | "deployment",
110 | "env",
111 | "instance",
112 | "job",
113 | "level",
114 | "namespace",
115 | "region",
116 | "service",
117 | "squad",
118 | "team",
119 | ]
120 | }
121 | }
122 |
123 | loki.write "destination" {
124 | endpoint {
125 | url = env("DEFAULT_LOKI_ENDPOINT")
126 | basic_auth {
127 | username = env("DEFAULT_TENANT_ID")
128 | password = env("DEFAULT_TENANT_TOKEN")
129 | }
130 | }
131 | }
132 |
--------------------------------------------------------------------------------
/example/kubernetes/metrics/custom-rewrites.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows specific modules, kubernetes scrapes only, for
3 | a single tenant and specifying the destination url/credentials via environment
4 | variables.
5 | */
6 | logging {
7 | level = coalesce(env("AGENT_LOG_LEVEL"), "info")
8 | format = "logfmt"
9 | }
10 |
11 | module.git "scrape_endpoints" {
12 | repository = argument.git_repo.value
13 | revision = argument.git_rev.value
14 | pull_frequency = argument.git_pull_freq.value
15 | path = "modules/kubernetes/metrics/scrapes/auto-scrape-endpoints.river"
16 |
17 | arguments {
18 | forward_to = [prometheus.relabel.custom.receiver]
19 | }
20 | }
21 |
22 | prometheus.relabel "custom" {
23 | forward_to = [prometheus.remote_write.local_primary.receiver]
24 |
25 | // example rule to drop any go_* metrics
26 | rule {
27 | action = "drop"
28 | source_labels = ["__name__"]
29 | regex = "go_.*"
30 | }
31 |
32 | }
33 |
34 | prometheus.remote_write "local_primary" {
35 | endpoint {
36 | url = env("METRICS_PRIMARY_URL")
37 |
38 | basic_auth {
39 | username = env("METRICS_PRIMARY_TENANT")
40 | password = env("METRICS_PRIMARY_TOKEN")
41 | }
42 |
43 | write_relabel_config {
44 | replacement = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), "")
45 | target_label = "cluster"
46 | }
47 |
48 | write_relabel_config {
49 | replacement = coalesce(env("ENV"), "")
50 | target_label = "env"
51 | }
52 |
53 | write_relabel_config {
54 | replacement = coalesce(env("REGION"), "")
55 | target_label = "region"
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/example/kubernetes/metrics/json-exporter.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows using the default all metrics processing module, for
3 | a single tenant and specifying the destination url/credentials via environment
4 | variables.
5 | */
6 | logging {
7 | level = coalesce(env("AGENT_LOG_LEVEL"), "info")
8 | format = "logfmt"
9 | }
10 |
11 | module.git "metrics_primary" {
12 | repository = "https://github.com/grafana/agent-modules.git"
13 | revision = "main"
14 | path = "modules/kubernetes/metrics/all.river"
15 |
16 | arguments {
17 | forward_to = [prometheus.remote_write.local_primary.receiver]
18 | blackbox_url = "blackbox-prometheus-blackbox-exporter.agents.svc.cluster.local:9115"
19 | }
20 | }
21 |
22 | /*
23 | The json exporter modules are not included in the default all.river, as it is not as common. Include the json-services and/or json-ingress module,
24 | and set the forward_to to be the exporter writer from the all.river module or declare a new instance of the prometheus.remote_write component.
25 | Ensure JSON Exporter is installed in the cluster and the json_exporter_url is set to the service name and port of the json exporter.
26 |
27 | Docs: https://github.com/prometheus-community/json_exporter/tree/master
28 | Helm Chart: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-json-exporter
29 | */
30 |
31 | module.git "json_services" {
32 | repository = "https://github.com/grafana/agent-modules.git"
33 | revision = "main"
34 | path = "modules/kubernetes/metrics/scrapes/json-services.river"
35 |
36 | arguments {
37 | forward_to = [prometheus.remote_write.local_primary.receiver]
38 | json_exporter_url = "json-exporter.agents.svc.cluster.local:7979"
39 | }
40 | }
41 |
42 | module.git "json_ingresses" {
43 | repository = "https://github.com/grafana/agent-modules.git"
44 | revision = "main"
45 | path = "modules/kubernetes/metrics/scrapes/json-ingresses.river"
46 |
47 | arguments {
48 | forward_to = [prometheus.remote_write.local_primary.receiver]
49 | json_exporter_url = "json-exporter.agents.svc.cluster.local:7979"
50 | }
51 | }
52 |
53 | prometheus.remote_write "local_primary" {
54 | endpoint {
55 | url = env("METRICS_PRIMARY_URL")
56 |
57 | basic_auth {
58 | username = env("METRICS_PRIMARY_TENANT")
59 | password = env("METRICS_PRIMARY_TOKEN")
60 | }
61 |
62 | write_relabel_config {
63 | replacement = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), "")
64 | target_label = "cluster"
65 | }
66 |
67 | write_relabel_config {
68 | replacement = coalesce(env("ENV"), "")
69 | target_label = "env"
70 | }
71 |
72 | write_relabel_config {
73 | replacement = coalesce(env("REGION"), "")
74 | target_label = "region"
75 | }
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/example/kubernetes/metrics/multi-target-example.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows using the default all metrics processing module, for
3 | a single tenant and specifying the destination url/credentials via environment
4 | variables.
5 | */
6 | logging {
7 | level = coalesce(env("AGENT_LOG_LEVEL"), "info")
8 | format = "logfmt"
9 | }
10 |
11 | module.git "metrics_primary" {
12 | repository = "https://github.com/grafana/agent-modules.git"
13 | revision = "main"
14 | path = "modules/kubernetes/metrics/all.river"
15 |
16 | arguments {
17 | forward_to = [
18 | prometheus.remote_write.local_primary.receiver,
19 | prometheus.remote_write.grafana_cloud.receiver,
20 | ]
21 | blackbox_url = "blackbox-prometheus-blackbox-exporter.agents.svc.cluster.local:9115"
22 | }
23 | }
24 |
25 | prometheus.remote_write "local_primary" {
26 | endpoint {
27 | url = env("METRICS_PRIMARY_URL")
28 |
29 | basic_auth {
30 | username = env("METRICS_PRIMARY_TENANT")
31 | password = env("METRICS_PRIMARY_TOKEN")
32 | }
33 |
34 | write_relabel_config {
35 | replacement = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), "")
36 | target_label = "cluster"
37 | }
38 |
39 | write_relabel_config {
40 | replacement = coalesce(env("ENV"), "")
41 | target_label = "env"
42 | }
43 |
44 | write_relabel_config {
45 | replacement = coalesce(env("REGION"), "")
46 | target_label = "region"
47 | }
48 | }
49 | }
50 |
51 | prometheus.remote_write "grafana_cloud" {
52 | endpoint {
53 | url = "https://prometheus-us-central1.grafana.net/api/prom/push"
54 |
55 | basic_auth {
56 | username = "XXXXXX"
57 | password = "XXXXXX"
58 | }
59 |
60 | write_relabel_config {
61 | replacement = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), "")
62 | target_label = "cluster"
63 | }
64 |
65 | write_relabel_config {
66 | replacement = coalesce(env("ENV"), "")
67 | target_label = "env"
68 | }
69 |
70 | write_relabel_config {
71 | replacement = coalesce(env("REGION"), "")
72 | target_label = "region"
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/example/kubernetes/metrics/selective-modules.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows specific modules, kubernetes scrapes only, for
3 | a single tenant and specifying the destination url/credentials via environment
4 | variables.
5 | */
6 | logging {
7 | level = coalesce(env("AGENT_LOG_LEVEL"), "info")
8 | format = "logfmt"
9 | }
10 |
11 | module.git "scrape_kubelet_cadvisor" {
12 | repository = "https://github.com/grafana/agent-modules.git"
13 | revision = "main"
14 | path = "modules/kubernetes/metrics/scrapes/kubelet-cadvisor.river"
15 |
16 | arguments {
17 | forward_to = [prometheus.remote_write.local_primary.receiver]
18 | }
19 | }
20 |
21 | module.git "scrape_kubelet" {
22 | repository = "https://github.com/grafana/agent-modules.git"
23 | revision = "main"
24 | path = "modules/kubernetes/metrics/scrapes/kubelet.river"
25 |
26 | arguments {
27 | forward_to = [prometheus.remote_write.local_primary.receiver]
28 | }
29 | }
30 |
31 | module.git "scrape_kubelet_probes" {
32 | repository = "https://github.com/grafana/agent-modules.git"
33 | revision = "main"
34 | path = "modules/kubernetes/metrics/scrapes/kubelet-probes.river"
35 |
36 | arguments {
37 | forward_to = [prometheus.remote_write.local_primary.receiver]
38 | }
39 | }
40 |
41 | module.git "scrape_kube_apiserver" {
42 | repository = "https://github.com/grafana/agent-modules.git"
43 | revision = "main"
44 | path = "modules/kubernetes/metrics/scrapes/kube-apiserver.river"
45 |
46 | arguments {
47 | forward_to = [prometheus.remote_write.local_primary.receiver]
48 | }
49 | }
50 |
51 | prometheus.remote_write "local_primary" {
52 | endpoint {
53 | url = env("METRICS_PRIMARY_URL")
54 |
55 | basic_auth {
56 | username = env("METRICS_PRIMARY_TENANT")
57 | password = env("METRICS_PRIMARY_TOKEN")
58 | }
59 |
60 | write_relabel_config {
61 | replacement = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), "")
62 | target_label = "cluster"
63 | }
64 |
65 | write_relabel_config {
66 | replacement = coalesce(env("ENV"), "")
67 | target_label = "env"
68 | }
69 |
70 | write_relabel_config {
71 | replacement = coalesce(env("REGION"), "")
72 | target_label = "region"
73 | }
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/example/kubernetes/metrics/simple-multi-tenant.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows using the default all metrics processing module, for
3 | a multiple tenants and specifying the destination url/credentials via environment
4 | variables.
5 | */
6 | logging {
7 | level = coalesce(env("AGENT_LOG_LEVEL"), "info")
8 | format = "logfmt"
9 | }
10 |
11 | module.git "metrics_primary" {
12 | repository = "https://github.com/grafana/agent-modules.git"
13 | revision = "main"
14 | path = "modules/kubernetes/metrics/all.river"
15 |
16 | arguments {
17 | forward_to = [prometheus.remote_write.local_primary.receiver]
18 | // match aything with the annotation:
19 | // metrics.agent.grafana.com/tenant: primary or the annotation is not set
20 | tenant = "primary|"
21 | blackbox_url = "blackbox-prometheus-blackbox-exporter.agents.svc.cluster.local:9115"
22 | }
23 | }
24 |
25 | module.git "metrics_ops" {
26 | repository = "https://github.com/grafana/agent-modules.git"
27 | revision = "main"
28 | path = "modules/kubernetes/metrics/all.river"
29 |
30 | arguments {
31 | forward_to = [prometheus.remote_write.local_ops.receiver]
32 | // metrics.agent.grafana.com/tenant: ops
33 | tenant = "ops"
34 | blackbox_url = "blackbox-prometheus-blackbox-exporter.agents.svc.cluster.local:9115"
35 | }
36 | }
37 |
38 | prometheus.remote_write "local_primary" {
39 | endpoint {
40 | url = env("METRICS_PRIMARY_URL")
41 |
42 | basic_auth {
43 | username = env("METRICS_PRIMARY_TENANT")
44 | password = env("METRICS_PRIMARY_TOKEN")
45 | }
46 |
47 | write_relabel_config {
48 | replacement = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), "")
49 | target_label = "cluster"
50 | }
51 |
52 | write_relabel_config {
53 | replacement = coalesce(env("ENV"), "")
54 | target_label = "env"
55 | }
56 |
57 | write_relabel_config {
58 | replacement = coalesce(env("REGION"), "")
59 | target_label = "region"
60 | }
61 | }
62 | }
63 |
64 | prometheus.remote_write "local_ops" {
65 | endpoint {
66 | url = env("METRICS_OPS_URL")
67 |
68 | basic_auth {
69 | username = env("METRICS_OPS_TENANT")
70 | password = env("METRICS_OPS_TOKEN")
71 | }
72 |
73 | write_relabel_config {
74 | replacement = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), "")
75 | target_label = "cluster"
76 | }
77 |
78 | write_relabel_config {
79 | replacement = coalesce(env("ENV"), "")
80 | target_label = "env"
81 | }
82 |
83 | write_relabel_config {
84 | replacement = coalesce(env("REGION"), "")
85 | target_label = "region"
86 | }
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/example/kubernetes/metrics/simple-single-tenant.river:
--------------------------------------------------------------------------------
1 | /*
2 | The following example shows using the default all metrics processing module, for
3 | a single tenant and specifying the destination url/credentials via environment
4 | variables.
5 | */
6 | logging {
7 | level = coalesce(env("AGENT_LOG_LEVEL"), "info")
8 | format = "logfmt"
9 | }
10 |
11 | module.git "metrics_primary" {
12 | repository = "https://github.com/grafana/agent-modules.git"
13 | revision = "main"
14 | path = "modules/kubernetes/metrics/all.river"
15 |
16 | arguments {
17 | forward_to = [prometheus.remote_write.local_primary.receiver]
18 | blackbox_url = "blackbox-prometheus-blackbox-exporter.agents.svc.cluster.local:9115"
19 | }
20 | }
21 |
22 | prometheus.remote_write "local_primary" {
23 | endpoint {
24 | url = env("METRICS_PRIMARY_URL")
25 |
26 | basic_auth {
27 | username = env("METRICS_PRIMARY_TENANT")
28 | password = env("METRICS_PRIMARY_TOKEN")
29 | }
30 |
31 | write_relabel_config {
32 | replacement = coalesce(env("CLUSTER_NAME"), env("CLUSTER"), "")
33 | target_label = "cluster"
34 | }
35 |
36 | write_relabel_config {
37 | replacement = coalesce(env("ENV"), "")
38 | target_label = "env"
39 | }
40 |
41 | write_relabel_config {
42 | replacement = coalesce(env("REGION"), "")
43 | target_label = "region"
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/modules/grafana-agent/dynamic-blackbox/README.md:
--------------------------------------------------------------------------------
1 | # Grafana Agent Dynamic Blackbox Exporter
2 |
3 | A module to add a dynamic target list to blackbox exporter.
4 |
5 | The `prometheus.exporter.blackbox` component expects a series of target blocks as part of its configuration and doesn't accept a list of targets from another component.
6 |
7 | This module allows a list of targets from another component to be used by blackbox exporter for scraping.
8 |
9 | ## Agent Version
10 |
11 | `>= v0.35`
12 |
13 | ## Module arguments
14 |
15 | The following arguments are supported when passing arguments to the module loader:
16 |
17 | | Name | Type | Description | Default | Required
18 | | ---- | ---- | ----------- | ------- | --------
19 | | `targets` | `list(map(string))` | List of targets for blackbox | | yes
20 | | `target_label` | `string` | Metric label the original target label will be added to | `"address"` | no
21 | | `config_file` | `string` or `secret` | blackbox_exporter configuration file path | | no
22 | | `config` | `secret` | blackbox_exporter configuration as inline string | | no
23 | | `probe_timeout_offset` | `duration` | Offset in seconds to subtract from timeout when probing targets | `"0.5s"` | no
24 |
25 | The arguments `config` and `config_file` are mutually exclusive. If neither are specified, a default config is used.
26 |
27 | The `config_file` argument points to a YAML file defining which blackbox_exporter modules to use.
28 | The `config` argument must be a YAML document as string defining which blackbox_exporter modules to use.
29 | `config` is typically loaded by using the exports of another component. For example,
30 |
31 | - `local.file.LABEL.content`
32 | - `remote.http.LABEL.content`
33 | - `remote.s3.LABEL.content`
34 |
35 | See [blackbox_exporter]( https://github.com/prometheus/blackbox_exporter/blob/master/example.yml) for details on how to generate a config file.
36 |
37 | ## Module exports
38 |
39 | The following fields are exported by the module:
40 |
41 | | Name | Type | Description
42 | | ---- | ---- | -----------
43 | | `targets` | `list(map(string))` | The targets that can be used to collect blackbox metrics.
44 |
45 | ## Example
46 |
47 | ```
48 | discovery.file "targets" {
49 | files = ["targets.yml"]
50 | }
51 |
52 | module.git "blackbox" {
53 | repository = "https://github.com/grafana/agent-modules.git"
54 | revision = "main"
55 | path = "modules/grafana-agent/dynamic-blackbox/module.river"
56 |
57 | arguments {
58 | config = "{ modules: { tcps: { prober: tcp, tcp: { tls: true, tls_config: { insecure_skip_verify: true } } } } }"
59 | targets = discovery.file.targets.targets
60 | }
61 | }
62 |
63 | prometheus.scrape "scrape" {
64 | targets = module.git.blackbox.exports.targets
65 | forward_to = [ prometheus.remote_write.main.receiver ]
66 | }
67 |
68 | prometheus.remote_write "main" {
69 | endpoint {
70 | url = "http://url/to/push"
71 | }
72 | }
73 | ```
74 |
75 | Add a `__param_module` label in the targets.yml to select the blackbox module to use with a given target.
76 |
77 | ```
78 | ---
79 | - labels:
80 | type: external
81 | __param_module: tcps
82 | targets:
83 | - grafana.com:443
84 | - prometheus.io:443
85 | ```
--------------------------------------------------------------------------------
/modules/grafana-agent/dynamic-blackbox/module.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * ARGUMENTS
3 | ********************************************/
4 | argument "targets" { }
5 |
6 | argument "target_label" {
7 | optional = true
8 | default = "address"
9 | }
10 |
11 | argument "config_file" {
12 | optional = true
13 | }
14 |
15 | argument "config" {
16 | optional = true
17 | }
18 |
19 | argument "probe_timeout_offset" {
20 | optional = true
21 | }
22 |
23 | /********************************************
24 | * EXPORTS
25 | ********************************************/
26 | export "targets" {
27 | value = discovery.relabel.default.output
28 | }
29 |
30 | /********************************************
31 | * LOGIC
32 | ********************************************/
33 |
34 | prometheus.exporter.blackbox "base" {
35 | config_file = argument.config_file.value
36 | config = argument.config.value
37 | probe_timeout_offset = argument.probe_timeout_offset.value
38 | target {
39 | name = "dummy"
40 | address = "dummy"
41 | }
42 | }
43 |
44 | discovery.relabel "default" {
45 | targets = argument.targets.value
46 |
47 | rule {
48 | source_labels = ["__address__"]
49 | target_label = "__param_target"
50 | }
51 |
52 | rule {
53 | source_labels = ["__address__"]
54 | target_label = argument.target_label.value
55 | }
56 |
57 | rule {
58 | target_label = "__address__"
59 | replacement = prometheus.exporter.blackbox.base.targets[0].__address__
60 | }
61 |
62 | rule {
63 | target_label = "__metrics_path__"
64 | replacement = prometheus.exporter.blackbox.base.targets[0].__metrics_path__
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/modules/grafana-agent/telemetry-to-lgtm/README.md:
--------------------------------------------------------------------------------
1 | # Grafana Agent Telemetry to LGTM Stack Module
2 |
3 | Module to forward the Grafana Agent's own telemetry data to Loki, Mimir and Tempo stacks locally or in GrafanaCloud.
4 |
5 | ## Agent Version
6 |
7 | `>= v0.36`
8 |
9 | ## Module arguments
10 |
11 | The following arguments are supported when passing arguments to the module
12 | loader:
13 |
14 | | Name | Type | Description | Default | Required
15 | | ---- | ---- | ----------- | ------- | --------
16 | | `grafana_agent_port` | `string` | The port the Grafana Agent is running on. | `"12345"` | no
17 | | `prometheus_endpoint` | `receiver` | The Prometheus remote write endpoint. | | yes
18 | | `prometheus_user` | `string` | The Prometheus remote write basic auth username. | | yes
19 | | `prometheus_password` | `secret` | The Prometheus remote write basic auth password. | | yes
20 | | `loki_endpoint` | `string` | Loki endpoint | | yes
21 | | `loki_user` | `string` | Loki basic auth username. | | yes
22 | | `loki_password` | `secret` | Loki basic auth password. | | yes
23 | | `tempo_endpoint` | `string` | Tempo Endpoint | | yes
24 | | `tempo_user` | `string` | Tempo basic auth username. | | yes
25 | | `tempo_password` | `secret` | Tempo basic auth password. | | yes
26 |
27 | ## Module exports
28 |
29 | The following fields are exported by the module:
30 |
31 | | Name | Type | Description
32 | | ---- | ---- | -----------
33 | | `log_receiver` | `LogsReceiver` | A logs receiver that other components can use to send telemetry data to.
34 | | `trace_input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to.
35 |
36 | ## Example
37 |
38 | ```
39 | logging {
40 | level = "info"
41 | format = "logfmt"
42 | write_to = [module.git.agent_telemetry.exports.log_receiver]
43 | }
44 |
45 | tracing {
46 | sampling_fraction = 1
47 | write_to = [module.git.agent_telemetry.exports.trace_input]
48 | }
49 |
50 | module.git "agent_telemetry" {
51 | repository = "https://github.com/grafana/agent-modules.git"
52 | revision = "main"
53 | path = "modules/grafana-agent/telemetry-to-lgtm/module.river"
54 |
55 | arguments {
56 | prometheus_endpoint = "https://prometheus-us-central1.grafana.net/api/prom/push"
57 | prometheus_user = "123456"
58 | prometheus_password = env("GRAFANA_CLOUD_KEY")
59 |
60 | loki_endpoint = "https://logs-prod-us-central1.grafana.net/loki/api/v1/push"
61 | loki_user = "1234567"
62 | loki_password = env("GRAFANA_CLOUD_KEY")
63 |
64 | tempo_endpoint = "tempo-us-central1.grafana.net:443"
65 | tempo_user = "1234"
66 | tempo_password = env("GRAFANA_CLOUD_KEY")
67 | }
68 | }
69 | ```
70 |
--------------------------------------------------------------------------------
/modules/grafana-agent/telemetry-to-lgtm/module.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * ARGUMENTS
3 | ********************************************/
4 | argument "prometheus_endpoint" { }
5 |
6 | argument "prometheus_user" { }
7 |
8 | argument "prometheus_password" { }
9 |
10 | argument "loki_endpoint" { }
11 |
12 | argument "loki_user" { }
13 |
14 | argument "loki_password" { }
15 |
16 | argument "tempo_endpoint" { }
17 |
18 | argument "tempo_user" { }
19 |
20 | argument "tempo_password" { }
21 |
22 | argument "grafana_agent_port" {
23 | optional = true
24 | default = "12345"
25 | }
26 |
27 | /********************************************
28 | * EXPORTS
29 | ********************************************/
30 | export "log_receiver" {
31 | value = loki.write.default.receiver
32 | }
33 |
34 | export "trace_input" {
35 | value = otelcol.processor.batch.default.input
36 | }
37 |
38 | /********************************************
39 | * AGENT METRICS
40 | ********************************************/
41 | prometheus.remote_write "default" {
42 | endpoint {
43 | url = argument.prometheus_endpoint.value
44 |
45 | basic_auth {
46 | username = argument.prometheus_user.value
47 | password = argument.prometheus_password.value
48 | }
49 | }
50 | }
51 |
52 | prometheus.scrape "default" {
53 | targets = [{"__address__" = "0.0.0.0:" + argument.grafana_agent_port.value}]
54 | forward_to = [prometheus.remote_write.default.receiver]
55 | scrape_interval = "10s"
56 | }
57 |
58 | /********************************************
59 | * AGENT LOGGING
60 | ********************************************/
61 | loki.write "default" {
62 | endpoint {
63 | url = argument.loki_endpoint.value
64 |
65 | basic_auth {
66 | username = argument.loki_user.value
67 | password = argument.loki_password.value
68 | }
69 | }
70 | }
71 |
72 | /********************************************
73 | * AGENT TRACING
74 | ********************************************/
75 | otelcol.processor.batch "default" {
76 | output {
77 | traces = [otelcol.processor.memory_limiter.default.input]
78 | }
79 | }
80 |
81 | otelcol.processor.memory_limiter "default" {
82 | check_interval = "1s"
83 |
84 | limit = "150MiB"
85 |
86 | output {
87 | traces = [otelcol.exporter.otlp.default.input]
88 | }
89 | }
90 |
91 | otelcol.auth.basic "default" {
92 | username = argument.tempo_user.value
93 | password = argument.tempo_password.value
94 | }
95 |
96 | otelcol.exporter.otlp "default" {
97 | client {
98 | endpoint = argument.tempo_endpoint.value
99 | auth = otelcol.auth.basic.default.handler
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/modules/grafana-cloud/autoconfigure/README.md:
--------------------------------------------------------------------------------
1 | # Grafana Cloud Autoconfigure Module
2 |
3 | Module to automatically configure receivers for Grafana Cloud.
4 |
5 | ## Agent Version
6 |
7 | `>= v0.34`
8 |
9 | ## Module arguments
10 |
11 | The following arguments are supported when passing arguments to the module
12 | loader:
13 |
14 | | Name | Type | Description | Default | Required
15 | | ---- | ---- | ----------- | ------- | --------
16 | | `stack_name` | `string` | Name of your stack as shown in the account console | | yes
17 | | `token` | `secret` | Access policy token or API Key. | | yes
18 |
19 | To create a token:
20 | 1. Navigate to the [Grafana Cloud Portal](https://grafana.com/profile/org)
21 | 1. Go to either the `Access Policies` or `API Keys` page, located in the `Security` section
22 | 1. Create an Access Policy or API token with the correct permissions
23 |
24 | The token must have permissions to read stack information. The setup of these permissions depends on the type of token:
25 |
26 | * Access Policies need the `stacks:read` scope
27 | * API Keys need at least the the `MetricsPublisher` role
28 |
29 | ## Module exports
30 |
31 | The following fields are exported by the module:
32 |
33 | | Name | Type | Description
34 | | ---- | ---- | -----------
35 | | `metrics_receiver` | `prometheus.Interceptor` | A value that other components can use to send metrics data to.
36 | | `logs_receiver` | `loki.LogsReceiver` | A value that other components can use to send logs data to.
37 | | `traces_receiver` | `otelcol.Consumer` | A value that other components can use to send trace data to.
38 | | `profiles_receiver` | `write.fanOutClient` | A value that other components can use to send profiling data to.
39 | | `stack_information` | `object` | Decoded representation of the [Stack info endpoint](https://grafana.com/docs/grafana-cloud/api-reference/cloud-api/#stacks).
40 |
41 | ## Example
42 |
43 | ```
44 | module.git "grafana_cloud" {
45 | repository = "https://github.com/grafana/agent-modules.git"
46 | revision = "main"
47 | path = "modules/grafana-cloud/autoconfigure/module.river"
48 |
49 | arguments {
50 | stack_name = ""
51 | token = ""
52 | }
53 | }
54 |
55 | prometheus.scrape "default" {
56 | targets = [
57 | {"__address__" = "127.0.0.1:12345"},
58 | ]
59 | forward_to = [
60 | module.git.grafana_cloud.exports.metrics_receiver,
61 | ]
62 | }
63 | ```
64 |
--------------------------------------------------------------------------------
/modules/grafana-cloud/autoconfigure/module.river:
--------------------------------------------------------------------------------
1 | /********************************************
2 | * ARGUMENTS
3 | ********************************************/
4 | argument "stack_name" { }
5 |
6 | argument "token" {}
7 |
8 | /********************************************
9 | * EXPORTS
10 | ********************************************/
11 |
12 | export "metrics_receiver" {
13 | value = prometheus.remote_write.default.receiver
14 | }
15 |
16 | export "logs_receiver" {
17 | value = loki.write.default.receiver
18 | }
19 |
20 | export "traces_receiver" {
21 | value = otelcol.exporter.otlp.default.input
22 | }
23 |
24 | export "profiles_receiver" {
25 | value = pyroscope.write.default.receiver
26 | }
27 |
28 | export "stack_information" {
29 | value = json_decode(remote.http.config_file.content)
30 | }
31 |
32 | /********************************************
33 | * External information
34 | ********************************************/
35 |
36 | remote.http "config_file" {
37 | url = "https://grafana.com/api/instances/" + argument.stack_name.value
38 | client {
39 | bearer_token = argument.token.value
40 | }
41 | poll_frequency = "24h"
42 | }
43 |
44 | /********************************************
45 | * Endpoints
46 | ********************************************/
47 |
48 | // Metrics
49 | prometheus.remote_write "default" {
50 | endpoint {
51 | url = json_decode(remote.http.config_file.content)["hmInstancePromUrl"] + "/api/prom/push"
52 |
53 | basic_auth {
54 | username = json_decode(remote.http.config_file.content)["hmInstancePromId"]
55 | password = argument.token.value
56 | }
57 | }
58 | }
59 |
60 | // Logs
61 | loki.write "default" {
62 | endpoint {
63 | url = json_decode(remote.http.config_file.content)["hlInstanceUrl"] + "/loki/api/v1/push"
64 |
65 | basic_auth {
66 | username = json_decode(remote.http.config_file.content)["hlInstanceId"]
67 | password = argument.token.value
68 | }
69 | }
70 | }
71 |
72 | // Traces
73 | otelcol.auth.basic "default" {
74 | username = json_decode(remote.http.config_file.content)["htInstanceId"]
75 | password = argument.token.value
76 | }
77 |
78 | otelcol.exporter.otlp "default" {
79 | client {
80 | endpoint = json_decode(remote.http.config_file.content)["htInstanceUrl"] + ":443"
81 | auth = otelcol.auth.basic.default.handler
82 | }
83 | }
84 |
85 | // Profiles
86 | pyroscope.write "default" {
87 | endpoint {
88 | url = json_decode(remote.http.config_file.content)["hpInstanceUrl"]
89 |
90 | basic_auth {
91 | username = json_decode(remote.http.config_file.content)["hpInstanceId"]
92 | password = argument.token.value
93 | }
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/modules/host-filter/README.md:
--------------------------------------------------------------------------------
1 | # Host filtering module
2 |
3 | The host filtering module provides a Flow mode equivalent to static mode's
4 | [host filtering][] functionality.
5 |
6 | [host filtering]: https://grafana.com/docs/agent/latest/static/operation-guide/#host-filtering-beta
7 |
8 | ## Agent Version
9 |
10 | `>= v0.34`
11 |
12 | ## Module arguments
13 |
14 | The following arguments are supported when passing arguments to the module
15 | loader:
16 |
17 | | Name | Type | Description | Default | Required
18 | | ---- | ---- | ----------- | ------- | --------
19 | | `targets` | `list(map(string))` | Targets to filter. | | yes
20 | | `hostname` | `string` | Hostname to use for filtering. | _See below_ | no
21 |
22 | The `targets` argument determines the set of targets to perform host filtering
23 | against. The following labels are used for host filtering:
24 |
25 | * `__meta_consul_node`
26 | * `__meta_dockerswarm_node_id`
27 | * `__meta_dockerswarm_node_hostname`
28 | * `__meta_dockerswarm_node_address`
29 | * `__meta_kubernetes_pod_node_name`
30 | * `__meta_kubernetes_node_name`
31 | * `__host__`
32 |
33 | Targets are kept if the target has one of the above labels set to one of the
34 | following values:
35 |
36 | * `localhost` or `127.0.0.1`
37 | * The value of the `hostname` argument.
38 |
39 | The `hostname` argument defaults to the first of the following:
40 |
41 | * The `HOSTNAME` environment variable, if set.
42 | * The system-reported hostname.
43 |
44 | ## Module exports
45 |
46 | The following exports are exposed and can be used:
47 |
48 | | Name | Type | Description
49 | | ---- | ---- | -----------
50 | | `output` | `list(map(string))` | Filtered targets.
51 |
52 | ## Example
53 |
54 | This example scrapes Kubernetes pods which are running on the same Kubernetes
55 | Node as Grafana Agent:
56 |
57 | ```river
58 | discovery.kubernetes "pods" {
59 | role = "pod"
60 | }
61 |
62 | module.git "host_filter" {
63 | repository = "https://github.com/grafana/agent-modules.git"
64 | revision = "main"
65 | path = "modules/host-filter/module.river"
66 |
67 | arguments {
68 | targets = discovery.kubernetes.pods.targets
69 | }
70 | }
71 |
72 | prometheus.scrape "pods" {
73 | targets = module.git.host_filter.exports.output
74 | forward_to = [prometheus.remote_write.example.receiver]
75 | }
76 |
77 | prometheus.remote_write "example" {
78 | endpoint {
79 | url = PROMETHEUS_URL
80 | }
81 | }
82 | ```
83 |
--------------------------------------------------------------------------------
/modules/host-filter/module.river:
--------------------------------------------------------------------------------
1 | argument "targets" { }
2 |
3 | argument "hostname" {
4 | optional = true
5 |
6 | // Match static mode's behavior for how the hostname was determined, where
7 | // the $HOSTNAME environment variable took precedence over the
8 | // machine-reported hostname.
9 | default = coalesce(
10 | env("HOSTNAME"),
11 | constants.hostname,
12 | )
13 | }
14 |
15 | export "output" {
16 | value = discovery.relabel.host_filter.output
17 | }
18 |
19 | discovery.relabel "host_filter" {
20 | targets = argument.targets.value
21 |
22 | // Provide a set of labels which may indicate that the target comes from
23 | // the same host as the one Grafana Agent is running on.
24 | rule {
25 | source_labels = [
26 | // Labels from Consul SD.
27 | "__meta_consul_node",
28 |
29 | // Labels from Docker Swarm SD.
30 | "__meta_dockerswarm_node_id",
31 | "__meta_dockerswarm_node_hostname",
32 | "__meta_dockerswarm_node_address",
33 |
34 | // Labels from Kubernetes SD. Labels for `role: service` are omitted as
35 | // service targets have labels merged with discovered pods.
36 | "__meta_kubernetes_pod_node_name",
37 | "__meta_kubernetes_node_name",
38 |
39 | // Custom host label.
40 | "__host__",
41 | ]
42 |
43 | // Our in-memory string will be something like A;B;C;D;E;F, where any of
44 | // the letters could be replaced with a label value or be empty if the
45 | // label value did not exist.
46 | //
47 | // We want to search for one of the following:
48 | //
49 | // - localhost or 127.0.0.1
50 | // - The hostname to check against.
51 | //
52 | // Where that text is either preceded by a colon (;B) or the start of the
53 | // string (A), and succeeded by a colon (B;) or the end of the string (F).
54 | regex = ".*(?:^|;)(localhost|127\\.0\\.0\\.1|" + argument.hostname.value + ")(?:;|$).*"
55 |
56 | action = "keep"
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/modules/k8s_api/README.md:
--------------------------------------------------------------------------------
1 | # k8s_api
2 |
3 | The `k8s_api` module collects Kubernetes API server metrics and forwards them
4 | to a Prometheus-compatible Grafana Agent Flow component.
5 |
6 | > **NOTE**: `k8s_api` must be used with a module loader which can pass arguments
7 | > to loaded modules, such as `module.git`.
8 |
9 | ## Agent Version
10 |
11 | `>= v0.34`
12 |
13 | ## Module arguments
14 |
15 | The following arguments are supported when passing arguments to the module
16 | loader:
17 |
18 | | Name | Type | Description | Default | Required
19 | | ---- | ---- | ----------- | ------- | --------
20 | | `forward_metrics_to` | `list(MetricsReceiver)` | Receivers to forward collected metrics to. | | yes
21 | | `scrape_interval` | `duration` | How often to collect metrics. | `"60s"` | no
22 | | `scrape_timeout` | `duration` | Timeout period for collecting metrics. | `"10s"` | no
23 |
24 | `k8s_api` uses in-cluster authentication for connecting to Kubernetes, and
25 | expects to be running inside the Kubernetes cluster.
26 |
27 | ## Module exports
28 |
29 | `k8s_api` does not export any fields.
30 |
31 | ## Example
32 |
33 | This example uses the `module.git` loader to run the module and forward metrics
34 | to a [`prometheus.remote_write` component][prometheus.remote_write]:
35 |
36 | ```river
37 | module.git "k8s_api" {
38 | repository = "https://github.com/grafana/agent-modules.git"
39 | revision = "main"
40 | path = "modules/k8s_api/module.river"
41 |
42 | arguments {
43 | forward_metrics_to = [prometheus.remote_write.default.receiver]
44 | }
45 | }
46 |
47 | prometheus.remote_write "default" {
48 | endpoint {
49 | url = env("PROMETHEUS_URL")
50 | }
51 | }
52 | ```
53 |
54 | [prometheus.remote_write]: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write
--------------------------------------------------------------------------------
/modules/k8s_pods/README.md:
--------------------------------------------------------------------------------
1 | # k8s_pods
2 |
3 | The `k8s_pods` module collects metrics and logs from Kubernetes Pods and
4 | forwards them to a Prometheus-compatible Grafana Agent Flow component.
5 |
6 | Kubernetes pods must follow a convention to be discovered; see [Module
7 | arguments](#module-arguments) for specifics.
8 |
9 | > **NOTE**: `k8s_pods` must be used with a module loader which can pass
10 | > arguments to loaded modules, such as `module.git`.
11 |
12 | ## Agent Version
13 |
14 | `>= v0.34`
15 |
16 | ## Module arguments
17 |
18 | The following arguments are supported when passing arguments to the module
19 | loader:
20 |
21 | | Name | Type | Description | Default | Required
22 | | ---- | ---- | ----------- | ------- | --------
23 | | `forward_metrics_to` | `list(MetricsReceiver)` | Receivers to forward collected metrics to. | | yes
24 | | `forward_logs_to` | `list(LogsReceiver)` | Receivers to forward collected logs to. | | yes
25 | | `scrape_interval` | `duration` | How often to collect metrics. | `"60s"` | no
26 | | `scrape_timeout` | `duration` | Timeout period for collecting metrics. | `"10s"` | no
27 |
28 | `k8s_pods` uses in-cluster authentication for connecting to Kubernetes, and
29 | expects to be running inside the Kubernetes cluster.
30 |
31 | `k8s_pods` will collect logs from all discovered Pods in the cluster. Metrics
32 | will only be collected from Pods that have a port name ending in `-metrics`.
33 |
34 | All telemetry data will have the following labels:
35 |
36 | * `job`: set to `POD_NAMESPACE/POD_SERVICE_NAME`. `POD_SERVICE_NAME` is
37 | set to the value of the first set from the following:
38 | * A label named `k8s-app`.
39 | * A label named `app`.
40 | * A label named `name`.
41 | * A combination of the `kubernetes.io/instance` and `kubernetes.io/name`
42 | labels, concatenated with a hyphen.
43 | * The pod controller name.
44 | * The pod name.
45 | * `namespace`: set to `POD_NAMESPACE`.
46 | * `pod`: set to `POD_NAME`.
47 | * `container`: set to `POD_CONTAINER_NAME`.
48 | * `app`: set to the value of the `app` label if present.
49 | * `name`: set to the value of the `app.kubernetes.io/name` label if present.
50 |
51 | Additionally, when collecting metrics, the `instance` label is set to
52 | `POD_NAME:POD_CONTAINER_NAME:POD_CONTAINER_PORT_NAME`.
53 |
54 | ## Module exports
55 |
56 | `k8s_pods` does not export any fields.
57 |
58 | ## Example
59 |
60 | This example uses the `module.git` loader to run the module and forward metrics
61 | to a [`prometheus.remote_write` component][prometheus.remote_write] and forward
62 | logs to a [`loki.write` component][loki.write]:
63 |
64 | ```river
65 | module.git "k8s_pods" {
66 | repository = "https://github.com/grafana/agent-modules.git"
67 | revision = "main"
68 | path = "modules/k8s_pods/module.river"
69 |
70 | arguments {
71 | forward_metrics_to = [prometheus.remote_write.default.receiver]
72 | forward_logs_to = [loki.write.default.receiver]
73 | }
74 | }
75 |
76 | prometheus.remote_write "default" {
77 | endpoint {
78 | url = env("PROMETHEUS_URL")
79 | }
80 | }
81 |
82 | loki.write "default" {
83 | endpoint {
84 | url = env("LOKI_URL")
85 | }
86 | }
87 | ```
88 |
89 | [prometheus.remote_write]: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write
90 | [loki.write]: https://grafana.com/docs/agent/latest/flow/reference/components/loki.write
--------------------------------------------------------------------------------
/modules/kubernetes/logs/drops/level-debug.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: drop-debug
3 | Description: The default behavior is to drop debug level messaging automatically, however, debug level
4 | messages can still be logged by adding the annotation:
5 |
6 | logs.agent.grafana.com/drop-debug: false
7 | */
8 | argument "forward_to" {
9 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
10 | optional = false
11 | }
12 |
13 | export "process" {
14 | value = loki.process.drop_debug
15 | }
16 |
17 | loki.process "drop_debug" {
18 | forward_to = argument.forward_to.value
19 |
20 | // check logs.agent.grafana.com/drop-debug annotation, if not set or set to true then drop
21 | // any log message with level=debug
22 | stage.match {
23 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/drop-debug: true"
24 | selector = "{level=~\"(?i)debug?\",logs_agent_grafana_com_drop_debug!=\"false\"}"
25 | action = "drop"
26 | drop_counter_reason = "debug"
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/drops/level-info.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: drop-info
3 | Description: The default behavior is to keep info level messaging automatically, however, info level
4 | messages can dropped by adding the annotation:
5 |
6 | logs.agent.grafana.com/drop-info: true
7 | */
8 | argument "forward_to" {
9 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
10 | optional = false
11 | }
12 |
13 | export "process" {
14 | value = loki.process.drop_info
15 | }
16 |
17 | loki.process "drop_info" {
18 | forward_to = argument.forward_to.value
19 |
20 | // check logs.agent.grafana.com/drop-info annotation, if not set or set to true then drop
21 | // any log message with level=info
22 | stage.match {
23 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/drop-info: true"
24 | selector = "{level=~\"(?i)info?\",logs_agent_grafana_com_drop_info=\"true\"}"
25 | action = "drop"
26 | drop_counter_reason = "info"
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/drops/level-trace.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: drop-trace
3 | Description: The default behavior is to drop trace level messaging automatically, however, trace level
4 | messages can still be logged by adding the annotation:
5 |
6 | logs.agent.grafana.com/drop-trace: false
7 | */
8 | argument "forward_to" {
9 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
10 | optional = false
11 | }
12 |
13 | export "process" {
14 | value = loki.process.drop_trace
15 | }
16 |
17 | loki.process "drop_trace" {
18 | forward_to = argument.forward_to.value
19 |
20 | // check logs.agent.grafana.com/drop-trace annotation, if not set or set to true then drop
21 | // any log message with level=trace
22 | stage.match {
23 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/drop-trace: true"
24 | selector = "{level=~\"(?i)trace?\",logs_agent_grafana_com_drop_trace!=\"false\"}"
25 | action = "drop"
26 | drop_counter_reason = "trace"
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/drops/levels.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: drop-levels
3 | Description: Wrapper module to include all drop level modules
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "git_repo" {
11 | optional = true
12 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
13 | }
14 |
15 | argument "git_rev" {
16 | optional = true
17 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
18 | }
19 |
20 | argument "git_pull_freq" {
21 | optional = true
22 | default = "5m"
23 | }
24 |
25 | export "process" {
26 | value = module.git.drop_trace.exports.process
27 | }
28 |
29 | module.git "drop_trace" {
30 | repository = argument.git_repo.value
31 | revision = argument.git_rev.value
32 | pull_frequency = argument.git_pull_freq.value
33 | path = "modules/kubernetes/logs/drops/level-trace.river"
34 |
35 | arguments {
36 | forward_to = [module.git.drop_debug.exports.process.receiver]
37 | }
38 | }
39 |
40 | module.git "drop_debug" {
41 | repository = argument.git_repo.value
42 | revision = argument.git_rev.value
43 | pull_frequency = argument.git_pull_freq.value
44 | path = "modules/kubernetes/logs/drops/level-debug.river"
45 |
46 | arguments {
47 | forward_to = [module.git.drop_info.exports.process.receiver]
48 | }
49 | }
50 |
51 | module.git "drop_info" {
52 | repository = argument.git_repo.value
53 | revision = argument.git_rev.value
54 | pull_frequency = argument.git_pull_freq.value
55 | path = "modules/kubernetes/logs/drops/level-info.river"
56 |
57 | arguments {
58 | forward_to = argument.forward_to.value
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/embed/pod.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: embed-pod
3 | Description: Embeds the name of the pod to the json or text log line
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | export "process" {
11 | value = loki.process.embed_pod
12 | }
13 |
14 | loki.process "embed_pod" {
15 | forward_to = argument.forward_to.value
16 |
17 | // check logs.agent.grafana.com/embed-pod annotation, if true embed the name of the pod to the end of the log line
18 | // this can reduce the overall cardinality, by not using a label of "pod", individual pods can still be searched
19 | // using a line selector i.e. __pod=your-pod-name
20 | stage.match {
21 | selector = "{logs_agent_grafana_com_embed_pod=~\"(?i)true\"}"
22 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/embed-pod: true"
23 |
24 | // embed as json property
25 | stage.match {
26 | selector = "{logs_agent_grafana_com_log_format=~\"(?i)(.*json|istio|otel|open-?telemetry)\"}"
27 | // render a new label called log_line, and add the name of the pod to the end of the log message
28 | // knowing the pod name can be valuable for debugging, but it should not be a label in Loki due
29 | // to the high cardinality it would create.
30 | // note: .Entry is a special key that is used to reference the current line
31 | stage.replace {
32 | expression = "\\}$"
33 | replace = ""
34 | }
35 | stage.template {
36 | source = "log_line"
37 | template = "{{ .Entry }},\"__pod\":\"{{ .pod }}\"}"
38 | }
39 | }
40 |
41 | // embed as text property
42 | stage.match {
43 | selector = "{logs_agent_grafana_com_log_format!~\"(?i)(.*json|istio|otel|open-?telemetry)\"}"
44 | // render a new label called log_line, and add the name of the pod to the end of the log message
45 | // knowing the pod name can be valuable for debugging, but it should not be a label in Loki due
46 | // to the high cardinality it would create.
47 | // note: .Entry is a special key that is used to reference the current line
48 | stage.template {
49 | source = "log_line"
50 | template = "{{ .Entry }} __pod={{ .pod }}"
51 | }
52 | }
53 |
54 | // reset the output to the log_line
55 | stage.output {
56 | source = "log_line"
57 | }
58 |
59 | }
60 |
61 | }
62 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/labels/keep-labels.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: keep-labels
3 | Description: Pre-defined set of labels to keep, this stage should always be in-place as the previous relabeing
4 | stages make every pod label and annotation a label in the pipeline, which we do not want created
5 | in Loki as that would have extremely high-cardinality.
6 | */
7 | argument "forward_to" {
8 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
9 | optional = false
10 | }
11 |
12 | argument "keep_labels" {
13 | optional = true
14 | // comment = "List of labels to keep before the log message is written to Loki"
15 | default = [
16 | "app",
17 | "cluster",
18 | "component",
19 | "container",
20 | "deployment",
21 | "env",
22 | "filename",
23 | "instance",
24 | "job",
25 | "level",
26 | "log_type",
27 | "namespace",
28 | "region",
29 | "service",
30 | "squad",
31 | "team",
32 | ]
33 | }
34 |
35 | export "process" {
36 | value = loki.process.keep_labels
37 | }
38 |
39 | /*
40 | As all of the pod labels and annotations we transformed into labels in the previous relabelings to make
41 | them available to the pipeline processing we need to ensure they are not automatically created in Loki.
42 | This would result in an extremely high number of labels and values severely impacting query performance.
43 | Not every log has to contain these labels, but this list should reflect the set of labels that you want
44 | to explicitly allow.
45 | */
46 | loki.process "keep_labels" {
47 | forward_to = argument.forward_to.value
48 |
49 | stage.label_keep {
50 | values = argument.keep_labels.value
51 | }
52 |
53 | }
54 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/labels/log-level.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-level
3 | Description: Sets a default log level of "unknown", then based on known patterns attempts to assign an appropriate log
4 | log level based on the contents of the log line. This should be considered as default/initial processing
5 | as there are modules for parsing specific log patterns.
6 | */
7 | argument "forward_to" {
8 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
9 | optional = false
10 | }
11 |
12 | export "process" {
13 | value = loki.process.log_level
14 | }
15 |
16 | loki.process "log_level" {
17 | forward_to = argument.forward_to.value
18 |
19 | // if a log level is not set, default it to unknown
20 | stage.match {
21 | selector = "{level=\"\"}"
22 |
23 | // default level to unknown
24 | stage.static_labels {
25 | values = {
26 | level = "unknown",
27 | }
28 | }
29 |
30 | }
31 |
32 | // if a log_type is not set, default it to unknown
33 | stage.match {
34 | selector = "{log_type=\"\"}"
35 |
36 | // default level to unknown
37 | stage.static_labels {
38 | values = {
39 | log_type = "unknown",
40 | }
41 | }
42 |
43 | }
44 |
45 | // check to see if the log line matches the klog format (https://github.com/kubernetes/klog)
46 | stage.match {
47 | // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+)
48 | selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\""
49 |
50 | // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119
51 | stage.regex {
52 | expression = "((?P[A-Z])[0-9])"
53 | }
54 |
55 | // if the extracted level is I set INFO
56 | stage.replace {
57 | source = "level"
58 | expression = "(I)"
59 | replace = "INFO"
60 | }
61 |
62 | // if the extracted level is W set WARN
63 | stage.replace {
64 | source = "level"
65 | expression = "(W)"
66 | replace = "WARN"
67 | }
68 |
69 | // if the extracted level is E set ERROR
70 | stage.replace {
71 | source = "level"
72 | expression = "(E)"
73 | replace = "ERROR"
74 | }
75 |
76 | // if the extracted level is I set INFO
77 | stage.replace {
78 | source = "level"
79 | expression = "(D)"
80 | replace = "DEBUG"
81 | }
82 |
83 | // set the extracted level to be a label
84 | stage.labels {
85 | values = {
86 | level = "",
87 | }
88 | }
89 | }
90 |
91 | // if the level is still unknown, do one last attempt at detecting it based on common levels
92 | stage.match {
93 | selector = "{level=\"unknown\"}"
94 |
95 | // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|SeverityText)":\s*"|\s+(?:level|loglevel|lvl)="?|\s+\[?)(?P(DEBUG?|INFO|WARN(ING)?|ERR(OR)?|CRITICAL|FATAL|NOTICE|TRACE))("|\s+|-|\s*\])
96 | stage.regex {
97 | expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|SeverityText)\":\\s*\"|\\s+(?:level|loglevel|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|INFO|WARN(ING)?|ERR(OR)?|CRITICAL|FATAL|NOTICE|TRACE))(\"|\\s+|-|\\s*\\])"
98 | }
99 |
100 | // set the extracted level to be a label
101 | stage.labels {
102 | values = {
103 | level = "",
104 | }
105 | }
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/labels/normalize-filename.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: normalize-filename
3 | Description: Normalizes the kubernetes filename name, and reduces cardinality of the filename
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | export "process" {
11 | value = loki.process.normalize_filename
12 | }
13 |
14 | /*
15 | Normalize the filename, the label "filename" is automatically created from
16 | discovered files in the matching path based on the __path__ label from the
17 | relabel_configs. This has extremely high cardinality, it can be useful
18 | for a pod with multiple containers/sidecars to know where the log came from
19 | but we can greatly reduce the cardinality.
20 | Example:
21 | Filename: /var/log/pods/agents_agent-logs-grafana-agent-k8hpm_5cafa323-a7ed-4703-9220-640d3e44a5e3/config-reloader/0.log
22 | Becomes: /var/log/pods/agents/agent-logs-grafana-agent/config-reloader.log
23 | */
24 | loki.process "normalize_filename" {
25 | forward_to = argument.forward_to.value
26 |
27 | stage.regex {
28 | // unescaped regex: ^(?P\/([^\/_]+\/)+)[^\/]+\/(?P[^\/]+)\/[0-9]+\.log
29 | expression = "^(?P\\/([^\\/_]+\\/)+)[^\\/]+\\/(?P[^\\/]+)\\/[0-9]+\\.log"
30 | source = "filename"
31 | }
32 |
33 | stage.template {
34 | source = "normalized_filename"
35 | template = "{{ .path }}{{ .job }}/{{ .container_folder }}.log"
36 | }
37 |
38 | stage.labels {
39 | values = {
40 | filename = "normalized_filename",
41 | }
42 | }
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/common-log.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-clf
3 | Description: Log Processing for common-log (apache/nginx)
4 | Docs: https://www.w3.org/Daemon/User/Config/Logging.html#common-logfile-format
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.log_format_clf
13 | }
14 |
15 | loki.process "log_format_clf" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains clf and the line matches the format, then process the line as clf
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: clf"
21 | // unescaped regex: \S+\s+\S+\s+\S+\s+\[\S+\s+\S+\]\s+"[^"]+"\s+\d+\s+\d+
22 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*((apache|nginx|common-?log|clf)).*\"} |~ \"^\\\\S+\\\\s+\\\\S+\\\\s+\\\\S+\\\\s+\\\\[\\\\S+\\\\s+\\\\S+\\\\]\\\\s+\\\"[^\\\"]+\\\"\\\\s+\\\\d+\\\\s+\\\\d+$\""
23 |
24 | // clf doesn't have a log level, set default to info, set the log_type
25 | stage.static_labels{
26 | values = {
27 | level = "info",
28 | log_type = "clf",
29 | }
30 | }
31 |
32 | // extract the http response code and request method as they might want to be used as labels
33 | stage.regex {
34 | // unescaped regex: (?P\d{3}) "(?P\S+)
35 | expression = "(?P[0-9]{3}) \"(?P\\S+)"
36 | }
37 |
38 | // set the extracted response code and request method as labels
39 | stage.labels {
40 | values = {
41 | response_code = "",
42 | request_method = "",
43 | }
44 | }
45 |
46 | // check to see if the string failed is found in the log line, if so set the level to error
47 | stage.match {
48 | selector = "{logs_agent_grafana_com_log_format=~\"(?i)(apache|nginx|common-log|clf)\"} |~ \" (failed|error) \""
49 |
50 | stage.static_labels {
51 | values = {
52 | level = "error",
53 | }
54 | }
55 | }
56 |
57 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
58 | // this can reduce the overall # of bytes sent and stored in Loki
59 | stage.match {
60 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
61 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
62 |
63 | // remove timestamp from the log line
64 | // unescaped regex: (\[([^\]]+)\])
65 | stage.replace {
66 | expression = "(\\[([^\\]]+)\\])"
67 | replace = ""
68 | }
69 | }
70 |
71 | }
72 |
73 | }
74 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/dotnet.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-dotnet
3 | Description: Log Processing for .Net
4 | Docs: https://learn.microsoft.com/en-us/dotnet/core/extensions/console-log-formatter#json
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.log_format_dotnet
13 | }
14 |
15 | loki.process "log_format_dotnet" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains python-json and the line matches the format, then process the line as python-json
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: dotnet-json"
21 | // unescaped regex: ^\s*\{.+\}\s*$
22 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*(dotnet-?json).*\"} |~ \"^\\\\s*\\\\{.+\\\\}\\\\s*$\""
23 |
24 | // set the log_type
25 | stage.static_labels{
26 | values = {
27 | log_type = "dotnet",
28 | }
29 | }
30 |
31 | // extract the level, response_code, method if they exist
32 | stage.json {
33 | expressions = {
34 | level = "LogLevel",
35 | category = "Category",
36 | }
37 | }
38 |
39 | // set the extracted level and category as labels
40 | stage.labels {
41 | values = {
42 | level = "",
43 | category = "",
44 | }
45 | }
46 |
47 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
48 | // this can reduce the overall # of bytes sent and stored in Loki
49 | // remove timestamp from the log line, depending on the entry it can be "start_time" or "time"
50 | stage.match {
51 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
52 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
53 |
54 | // remove timestamp from the log line
55 | // unescaped regex: (?i)("(Timestamp)"\s*:\s*\[?"[^"]+"\]?,?)
56 | stage.replace {
57 | expression = "(?i)(\"(Timestamp)\"\\s*:\\s*\\[?\"[^\"]+\"\\]?,?)"
58 | replace = ""
59 | }
60 | }
61 |
62 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
63 | // this can reduce the overall # of bytes sent and stored in Loki
64 | stage.match {
65 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
66 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
67 |
68 | // remove level from the log line
69 | stage.replace {
70 | // unescaped regex: (?i)"LogLevel"\s*:\s*"[^"]+",?
71 | expression = "(?i)(\"LogLevel\"\\s*:\\s*\"[^\"]+\",?)"
72 | replace = ""
73 | }
74 | }
75 |
76 | }
77 |
78 | }
79 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/istio.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-istio
3 | Description: Log Processing for istio
4 | Docs: https://istio.io/latest/docs/tasks/observability/logs/access-log/
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.log_format_istio
13 | }
14 |
15 | loki.process "log_format_istio" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains istio and the line matches the format, then process the line as json
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: istio"
21 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*(istio-?(json)?).*\"} |~ \"^\\\\s*\\\\{.+\\\\}\\\\s*$\""
22 |
23 | // not all istio logs contain a level, default to info and set the log_type
24 | stage.static_labels{
25 | values = {
26 | level = "info",
27 | log_type = "istio",
28 | }
29 | }
30 |
31 | // extract the level, response_code, method if they exist
32 | stage.json {
33 | expressions = {
34 | level = "level",
35 | response_code = "response_code",
36 | request_method = "method",
37 | }
38 | }
39 |
40 | // set the extracted level, response code and request method as labels
41 | stage.labels {
42 | values = {
43 | level = "",
44 | response_code = "",
45 | request_method = "",
46 | }
47 | }
48 |
49 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
50 | // this can reduce the overall # of bytes sent and stored in Loki
51 | // remove timestamp from the log line, depending on the entry it can be "start_time" or "time"
52 | stage.match {
53 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
54 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
55 |
56 | // remove timestamp from the log line
57 | // unescaped regex: ("(start_)?time"\s*:\s*"[^"]+",)
58 | stage.replace {
59 | expression = "(\"(start_)?time\"\\s*:\\s*\"[^\"]+\",)"
60 | replace = ""
61 | }
62 | }
63 |
64 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
65 | // this can reduce the overall # of bytes sent and stored in Loki
66 | stage.match {
67 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
68 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
69 |
70 | // remove level from the log line
71 | stage.replace {
72 | // unescaped regex: "level"\s*:\s*"[^"]+",?
73 | expression = "(?i)(\"level\"\\s*:\\s*\"[^\"]+\",?)"
74 | replace = ""
75 | }
76 | }
77 |
78 | }
79 |
80 | }
81 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/json.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-json
3 | Description: Log Processing for Generic JSON
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | export "process" {
11 | value = loki.process.log_format_json
12 | }
13 |
14 | loki.process "log_format_json" {
15 | forward_to = argument.forward_to.value
16 |
17 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains json and the line matches the format, then process the line as json
18 | stage.match {
19 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: json"
20 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*((generic-?)?json).*\"} |~ \"^\\\\s*\\\\{.+\\\\}\\\\s*$\""
21 |
22 | // set the log_type
23 | stage.static_labels{
24 | values = {
25 | log_type = "json",
26 | }
27 | }
28 |
29 | // extract the level
30 | stage.json {
31 | expressions = {
32 | level = "level || lvl || loglevel || LogLevel || log_level || logLevel || log_lvl || logLvl || levelname || levelName || LevelName",
33 | }
34 | }
35 |
36 | // set the extracted level as a label
37 | stage.labels {
38 | values = {
39 | level = "",
40 | }
41 | }
42 |
43 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
44 | // this can reduce the overall # of bytes sent and stored in Loki
45 | // remove timestamp from the log line, depending on the entry it can be "start_time" or "time"
46 | stage.match {
47 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
48 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
49 |
50 | // remove timestamp from the log line
51 | // unescaped regex: (?i)("(timestamp|ts|logdate|time)"\s*:\s*"[^"]+",?)
52 | stage.replace {
53 | expression = "(?i)(\"(timestamp|ts|logdate|time)\"\\s*:\\s*\"[^\"]+\",?)"
54 | replace = ""
55 | }
56 | }
57 |
58 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
59 | // this can reduce the overall # of bytes sent and stored in Loki
60 | stage.match {
61 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
62 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
63 |
64 | // remove level from the log line
65 | stage.replace {
66 | // unescaped regex: (?i)"(log)?(level|lvl)"\s*:\s*"[^"]+",?
67 | expression = "(?i)(\"(log)?(level|lvl)\"\\s*:\\s*\"[^\"]+\",?)"
68 | replace = ""
69 | }
70 | }
71 |
72 | }
73 |
74 | }
75 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/klog.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-klog
3 | Description: Log Processing for klog (used by kube-state-metrics and more in kube-system)
4 | Docs: https://github.com/kubernetes/klog
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.log_format_klog
13 | }
14 |
15 | loki.process "log_format_klog" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains klog and the line matches the format, then process the line as
19 | // a klog (https://github.com/kubernetes/klog)
20 | stage.match {
21 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: klog"
22 | // unescaped regex: ^[IWED]\d+\s+\d{2}:\d{2}:\d{2}\.\d+\s+\d+\s+\S+:\d+\]\s+.*$
23 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*(klog).*\"} |~ \"^[IWED]\\\\d+\\\\s+\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+\\\\s+\\\\d+\\\\s+\\\\S+:\\\\d+\\\\]\\\\s+.*$\""
24 |
25 | // set the log_type
26 | stage.static_labels{
27 | values = {
28 | log_type = "klog",
29 | }
30 | }
31 |
32 | // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119
33 | stage.regex {
34 | expression = "((?P[A-Z])[0-9])"
35 | }
36 |
37 | // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119
38 | stage.regex {
39 | expression = "((?P[A-Z])[0-9])"
40 | }
41 |
42 | // if the extracted level is I set INFO
43 | stage.replace {
44 | source = "level"
45 | expression = "(I)"
46 | replace = "INFO"
47 | }
48 |
49 | // if the extracted level is W set WARN
50 | stage.replace {
51 | source = "level"
52 | expression = "(W)"
53 | replace = "WARN"
54 | }
55 |
56 | // if the extracted level is E set ERROR
57 | stage.replace {
58 | source = "level"
59 | expression = "(E)"
60 | replace = "ERROR"
61 | }
62 |
63 | // if the extracted level is D set DEBUG
64 | stage.replace {
65 | source = "level"
66 | expression = "(D)"
67 | replace = "DEBUG"
68 | }
69 |
70 | // set the extracted level to be a label
71 | stage.labels {
72 | values = {
73 | level = "",
74 | }
75 | }
76 |
77 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
78 | // this can reduce the overall # of bytes sent and stored in Loki
79 | stage.match {
80 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
81 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
82 |
83 | // remove timestamp from the log line
84 |
85 | // unescaped regex: ([0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+\s+)
86 | stage.replace {
87 | expression = "([0-9]{4}\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\.[0-9]+\\s+)"
88 | replace = ""
89 | }
90 | }
91 |
92 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
93 | // this can reduce the overall # of bytes sent and stored in Loki
94 | stage.match {
95 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
96 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
97 |
98 | // remove level from the log line
99 | stage.replace {
100 | // unescaped regex: (log)?(lvl|level)="?[^\s]+\s"?
101 | expression = "(^(I|W|E|D))"
102 | replace = ""
103 | }
104 | }
105 |
106 | }
107 |
108 | }
109 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/logfmt.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-logfmt
3 | Description: Handles formatting for log format of logfmt which is the default Golang format
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | export "process" {
11 | value = loki.process.log_format_logfmt
12 | }
13 |
14 | loki.process "log_format_logfmt" {
15 | forward_to = argument.forward_to.value
16 |
17 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains logfmt and the line matches the format, then process the line as
18 | // a logfmt (https://github.com/go-logfmt/logfmt)
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: logfmt"
21 | // unescaped regex: (\w+=("[^"]*"|\S+))(\s+(\w+=("[^"]*"|\S+)))*\s*
22 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*(logfmt).*\"} |~ \"(\\\\w+=(\\\"[^\\\"]*\\\"|\\\\S+))(\\\\s+(\\\\w+=(\\\"[^\\\"]*\\\"|\\\\S+)))*\\\\s*\""
23 |
24 | // set the log_type
25 | stage.static_labels{
26 | values = {
27 | log_type = "logfmt",
28 | }
29 | }
30 |
31 | // while the level could be extracted as logfmt, this allows for multiple possible log levels formats
32 | // i.e. loglevel=info, level=info, lvl=info, loglvl=info
33 | stage.regex {
34 | expression = "(log)?(level|lvl)=\"?(?P\\S+)\"?"
35 | }
36 |
37 | // set the extracted level value as a label
38 | stage.labels {
39 | values = {
40 | level = "",
41 | }
42 | }
43 |
44 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
45 | // this can reduce the overall # of bytes sent and stored in Loki
46 | stage.match {
47 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
48 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
49 |
50 | // remove timestamp from the log line
51 |
52 | // unescaped regex: ((ts?|timestamp)=\d{4}-\d{2}-\d{2}(T|\s+)\d{2}:\d{2}:\d{2}(\.\d+)?(Z|(\+|-)\d+)?\s+)
53 | stage.replace {
54 | expression = "((ts?|timestamp)=[0-9]{4}-[0-9]{2}-[0-9]{2}(T|\\s+)[0-9]{2}:[0-9]{2}:[0-9]{2}(\\.[0-9]+)?(Z|(\\+|-)[0-9]+)?\\s+)"
55 | replace = ""
56 | }
57 | }
58 |
59 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
60 | // this can reduce the overall # of bytes sent and stored in Loki
61 | stage.match {
62 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
63 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
64 |
65 | // remove level from the log line
66 | stage.replace {
67 | // unescaped regex: (log)?(lvl|level)="?[^\s]+\s"?
68 | expression = "(?i)((log)?(lvl|level)=\"?[^\\s]+\\s\"?)"
69 | replace = ""
70 | }
71 | }
72 |
73 | }
74 |
75 | }
76 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/otel.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-otel
3 | Description: Log Processing for OpenTelemetry
4 | Docs: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.log_format_otel
13 | }
14 |
15 | loki.process "log_format_otel" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains otel and the line matches the format, then process the line as otel
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: otel"
21 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*((otel|open-?telemetry)(-?json)).*\"} |~ \"^\\\\s*\\\\{.+\\\\}\\\\s*$\""
22 |
23 | // set the log_type
24 | stage.static_labels{
25 | values = {
26 | log_type = "otel",
27 | }
28 | }
29 |
30 | // extract the SeverityText (level), and service.name
31 | // Docs: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md#service
32 | stage.json {
33 | expressions = {
34 | level = "SeverityText",
35 | service = "Resource.\"service.name\"",
36 | }
37 | }
38 |
39 | // set the extracted level and service as labels
40 | stage.labels {
41 | values = {
42 | level = "",
43 | service = "",
44 | }
45 | }
46 |
47 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
48 | // this can reduce the overall # of bytes sent and stored in Loki
49 | // remove timestamp from the log line, depending on the entry it can be "start_time" or "time"
50 | stage.match {
51 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
52 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
53 |
54 | // remove timestamp from the log line
55 | // unescaped regex: ("Timestamp"\s*:\s*"[^"]+",)
56 | stage.replace {
57 | expression = "(\"Timestamp\"\\s*:\\s*\"[^\"]+\",)"
58 | replace = ""
59 | }
60 | }
61 |
62 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
63 | // this can reduce the overall # of bytes sent and stored in Loki
64 | stage.match {
65 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
66 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
67 |
68 | // remove level from the log line
69 | stage.replace {
70 | // unescaped regex: ("SeverityText"\s*:\s*"[^"]+",)
71 | expression = "(?i)(\"SeverityText\"\\s*:\\s*\"[^\"]+\",)"
72 | replace = ""
73 | }
74 | }
75 |
76 | }
77 |
78 | }
79 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/postgres.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-postgres
3 | Description: Handles formatting for log format of Postgres
4 | Docs: https://www.postgresql.org/docs/current/runtime-config-logging.html
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.log_format_postgres
13 | }
14 |
15 | loki.process "log_format_postgres" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains postgres then process the line
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: postgres"
21 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*(postgres).*\"}"
22 |
23 | // set the log_type
24 | stage.static_labels{
25 | values = {
26 | log_type = "postgres",
27 | }
28 | }
29 |
30 | // extract the level and process_id from the log
31 | // unescaped regex: \[?(?P\d{4}-\d{2}-\d{2}(T|\s+)\d{2}:\d{2}:\d{2}.\d+\s+\w+)\]?\s+(\[(?P\d+)\]\s+|.+)(?P(INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC|DEBUG)\d*):\s*
32 | stage.regex {
33 | expression = "\\[?(?P\\d{4}-\\d{2}-\\d{2}(T|\\s+)\\d{2}:\\d{2}:\\d{2}.\\d+\\s+\\w+)\\]?\\s+(\\[(?P\\d+)\\]\\s+|.+)(?P(INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC|DEBUG)\\d*):\\s*"
34 | }
35 |
36 | // set the extracted level and process_id as labels
37 | stage.labels {
38 | values = {
39 | level = "",
40 | process_id = "",
41 | }
42 | }
43 |
44 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
45 | // this can reduce the overall # of bytes sent and stored in Loki
46 | stage.match {
47 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
48 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
49 |
50 | // remove timestamp from the log line
51 |
52 | // unescaped regex: (\[?[0-9]{4}-[0-9]{2}-[0-9]{2}(T|\s+)[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]+\s+\w+\]?)
53 | stage.replace {
54 | expression = "(\\[?[0-9]{4}-[0-9]{2}-[0-9]{2}(T|\\s+)[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]+\\s+\\w+\\]?)"
55 | replace = ""
56 | }
57 | }
58 |
59 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
60 | // this can reduce the overall # of bytes sent and stored in Loki
61 | stage.match {
62 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
63 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
64 |
65 | // remove level from the log line
66 | stage.replace {
67 | // unescaped regex: ((INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC|DEBUG)\d*:\s+)
68 | expression = "((INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC|DEBUG)\\d*:\\s+)"
69 | replace = ""
70 | }
71 | }
72 |
73 | }
74 |
75 | }
76 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/python.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-python
3 | Description: Log Processing for Python
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | export "process" {
11 | value = loki.process.log_format_python
12 | }
13 |
14 | loki.process "log_format_python" {
15 | forward_to = argument.forward_to.value
16 |
17 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains python-json and the line matches the format, then process the line as python-json
18 | stage.match {
19 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: python-json"
20 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*(python-?json).*\"} |~ \"^\\\\s*\\\\{.+\\\\}\\\\s*$\""
21 |
22 | // set the log_type
23 | stage.static_labels{
24 | values = {
25 | log_type = "python",
26 | }
27 | }
28 |
29 | // extract the level, response_code, method if they exist
30 | stage.json {
31 | expressions = {
32 | level = "level || lvl || loglevel || log_level || logLevel || log_lvl || logLvl || levelname || levelName",
33 | process = "processName || process_name || process",
34 | module = "module || moduleName || module_name",
35 | func = "funcName || func_name || func",
36 | }
37 | }
38 |
39 | // set the extracted level, process, module and func as labels
40 | stage.labels {
41 | values = {
42 | level = "",
43 | process = "",
44 | module = "",
45 | func = "",
46 | }
47 | }
48 |
49 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
50 | // this can reduce the overall # of bytes sent and stored in Loki
51 | // remove timestamp from the log line, depending on the entry it can be "start_time" or "time"
52 | stage.match {
53 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
54 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
55 |
56 | // remove timestamp from the log line
57 | // unescaped regex: (?i)("(@?timestamp|asctime)"\s*:\s*\[?"[^"]+"\]?,?)
58 | stage.replace {
59 | expression = "(?i)(\"(@?timestamp|asctime)\"\\s*:\\s*\\[?\"[^\"]+\"\\]?,?)"
60 | replace = ""
61 | }
62 | }
63 |
64 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
65 | // this can reduce the overall # of bytes sent and stored in Loki
66 | stage.match {
67 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
68 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
69 |
70 | // remove level from the log line
71 | stage.replace {
72 | // unescaped regex: (?i)"(log)?(level|lvl)(name)?"\s*:\s*"[^"]+",?
73 | expression = "(?i)(\"(log)?(level|lvl)(name)?\"\\s*:\\s*\"[^\"]+\",?)"
74 | replace = ""
75 | }
76 | }
77 |
78 | }
79 |
80 | }
81 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/spring-boot.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-spring-boot
3 | Description: Log Processing for Spring Boot
4 | Docs: https://docs.spring.io/spring-boot/docs/2.1.13.RELEASE/reference/html/boot-features-logging.html
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.log_format_spring_boot
13 | }
14 |
15 | loki.process "log_format_spring_boot" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains springboot and the line matches the format, then process the line as spring-boot
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: spring-boot"
21 | // unescaped regex: ^\d{4}.+(INFO|ERROR|WARN|DEBUG|TRACE)\s+\d+\s+[^\[]+\[\S+\]\s+\S+\s+:\s+.*$
22 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*(spring-?boot).*\"} |~ \"^\\\\d{4}.+(INFO|ERROR|WARN|DEBUG|TRACE)\\\\s+\\\\d+\\\\s+[^\\\\[]+\\\\[\\\\S+\\\\]\\\\s+\\\\S+\\\\s+:\\\\s+.*$\""
23 |
24 | // set the log_type
25 | stage.static_labels{
26 | values = {
27 | log_type = "spring-boot",
28 | }
29 | }
30 |
31 | // extract the timestamp, level, traceId, spanId, processId, thread, logger from the log line
32 | stage.regex {
33 | // unescaped regex: (?P[0-9]{4}-[0-9]{2}-[0-9]{2}(T|\s+)[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)\s+(?P\w+)\s+(?P\[(\S*\-?),(?P\S*),(?P\S*)\])\s+(?P[0-9]+)\s+-+\s+\[\s*(?P\S+)\]\s+(?P\S+)\s+:\s+(?P.+)
34 | expression = "(?P[0-9]{4}-[0-9]{2}-[0-9]{2}(T|\\s+)[0-9]{2}:[0-9]{2}:[0-9]{2}(\\.[0-9]+)?)\\s+(?P\\w+)\\s+(?P\\[(\\S*\\-?),(?P\\S*),(?P\\S*)\\])\\s+(?P[0-9]+)\\s+-+\\s+\\[\\s*(?P\\S+)\\]\\s+(?P\\S+)\\s+:\\s+(?P.+)"
35 | }
36 |
37 | // set the extracted values as labels so they can be used by downstream components, most likely several labels
38 | // will be dropped before being written to Loki
39 | stage.labels {
40 | values = {
41 | level = "",
42 | trace = "",
43 | traceId = "",
44 | spanId = "",
45 | processId = "",
46 | thread = "",
47 | logger = "",
48 | }
49 | }
50 |
51 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
52 | // this can reduce the overall # of bytes sent and stored in Loki
53 | // remove timestamp from the log line, depending on the entry it can be "start_time" or "time"
54 | stage.match {
55 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
56 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
57 |
58 | // remove timestamp from the log line
59 | // unescaped regex: ^([0-9]{4}-[0-9]{2}-[0-9]{2}(T|\s+)[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?(Z|(\+|-)[0-9:]+)?)\s+
60 | stage.replace {
61 | expression = "^([0-9]{4}-[0-9]{2}-[0-9]{2}(T|\\s+)[0-9]{2}:[0-9]{2}:[0-9]{2}(\\.[0-9]+)?(Z|(\\+|-)[0-9:]+)?)\\s+"
62 | replace = ""
63 | }
64 | }
65 |
66 | // check logs.agent.grafana.com/scrub-level annotation, if true remove the level from the log line (it is still a label)
67 | // this can reduce the overall # of bytes sent and stored in Loki
68 | stage.match {
69 | selector = "{logs_agent_grafana_com_scrub_level=~\"(?i)true\"}"
70 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-level: true"
71 |
72 | // remove level from the log line
73 | stage.replace {
74 | // unescaped regex: (ERROR|WARN|INFO|DEBUG|TRACE)\s+
75 | expression = "(ERROR|WARN|INFO|DEBUG|TRACE)\\s+"
76 | replace = ""
77 | }
78 | }
79 |
80 | }
81 |
82 | }
83 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/log-formats/syslog.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: log-format-syslog
3 | Description: Handles formatting for log format of syslog
4 | Docs: https://datatracker.ietf.org/doc/html/rfc5424
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.log_format_syslog
13 | }
14 |
15 | loki.process "log_format_syslog" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/log-format annotation, if the log_type is empty the line hasn't been processed, if it contains postgres then process the line
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/log-format: syslog"
21 | // unescaped regex: ^[A-Za-z]{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2}\s+\S+\s+\S+\[\d+\]:\s+.*$
22 | selector = "{log_type=\"\", logs_agent_grafana_com_log_format=~\"(?i).*(syslog).*\"} |~ \"^[A-Za-z]{3}\\\\s+\\\\d{1,2}\\\\s+\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\s+\\\\S+\\\\s+\\\\S+\\\\[\\\\d+\\\\]:\\\\s+.*$\""
23 |
24 | stage.static_labels{
25 | values = {
26 | // set the log_type
27 | log_type = "syslog",
28 | level = "info",
29 | }
30 | }
31 |
32 | // check logs.agent.grafana.com/scrub-timestamp annotation, if true remove the timestamp from the log line
33 | // this can reduce the overall # of bytes sent and stored in Loki
34 | stage.match {
35 | selector = "{logs_agent_grafana_com_scrub_timestamp=\"true\"}"
36 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-timestamp: true"
37 |
38 | // remove timestamp from the log line
39 | // unescaped regex: ^[A-Za-z]{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2}
40 | stage.replace {
41 | expression = "(^[A-Za-z]{3}\\s+\\d{1,2}\\s+\\d{2}:\\d{2}:\\d{2})"
42 | replace = ""
43 | }
44 | }
45 |
46 | }
47 |
48 | }
49 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/masks/all.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: mask-all
3 | Description: Wrapper module to include all masking modules
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "git_repo" {
11 | optional = true
12 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
13 | }
14 |
15 | argument "git_rev" {
16 | optional = true
17 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
18 | }
19 |
20 | argument "git_pull_freq" {
21 | optional = true
22 | default = "5m"
23 | }
24 |
25 | export "process" {
26 | value = module.git.mask_ssn.exports.process
27 | }
28 |
29 | module.git "mask_ssn" {
30 | repository = argument.git_repo.value
31 | revision = argument.git_rev.value
32 | pull_frequency = argument.git_pull_freq.value
33 | path = "modules/kubernetes/logs/masks/ssn.river"
34 |
35 | arguments {
36 | forward_to = [module.git.mask_credit_card.exports.process.receiver]
37 | }
38 | }
39 |
40 | module.git "mask_credit_card" {
41 | repository = argument.git_repo.value
42 | revision = argument.git_rev.value
43 | pull_frequency = argument.git_pull_freq.value
44 | path = "modules/kubernetes/logs/masks/credit-card.river"
45 |
46 | arguments {
47 | forward_to = [module.git.mask_email.exports.process.receiver]
48 | }
49 | }
50 |
51 | module.git "mask_email" {
52 | repository = argument.git_repo.value
53 | revision = argument.git_rev.value
54 | pull_frequency = argument.git_pull_freq.value
55 | path = "modules/kubernetes/logs/masks/email.river"
56 |
57 | arguments {
58 | forward_to = [module.git.mask_phone.exports.process.receiver]
59 | }
60 | }
61 |
62 | module.git "mask_phone" {
63 | repository = argument.git_repo.value
64 | revision = argument.git_rev.value
65 | pull_frequency = argument.git_pull_freq.value
66 | path = "modules/kubernetes/logs/masks/phone.river"
67 |
68 | arguments {
69 | forward_to = [module.git.mask_ipv4.exports.process.receiver]
70 | }
71 | }
72 |
73 | module.git "mask_ipv4" {
74 | repository = argument.git_repo.value
75 | revision = argument.git_rev.value
76 | pull_frequency = argument.git_pull_freq.value
77 | path = "modules/kubernetes/logs/masks/ipv4.river"
78 |
79 | arguments {
80 | forward_to = [module.git.mask_ipv6.exports.process.receiver]
81 | }
82 | }
83 |
84 | module.git "mask_ipv6" {
85 | repository = argument.git_repo.value
86 | revision = argument.git_rev.value
87 | pull_frequency = argument.git_pull_freq.value
88 | path = "modules/kubernetes/logs/masks/ipv6.river"
89 |
90 | arguments {
91 | forward_to = argument.forward_to.value
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/masks/credit-card.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: mask-credit-card
3 | Description: Checks the logs.agent.grafana.com/mask-credit-card annotation, if set to "true" any logs that match the credit
4 | card pattern will have the value of the credit card replaced with "*credit-card*hash*
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.mask_credit_card
13 | }
14 |
15 | loki.process "mask_credit_card" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/mask-credit-card annotation, if true the data will be masked as *credit-card*salt*
19 | // Formats:
20 | // Visa: 4[0-9]{15}
21 | // MasterCard: 5[1-5][0-9]{14}
22 | // American Express: 3[47][0-9]{13}
23 | // Discover: 6[0-9]{15}
24 | // JCB: 3[51-55][0-9]{14}
25 | stage.match {
26 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/mask-credit-card: true"
27 | selector = "{logs_agent_grafana_com_mask_credit_card=~\"(?i)true\"}"
28 |
29 | stage.replace {
30 | // unescaped regex: (4[0-9]{15}|5[1-5][0-9]{14}|3[47][0-9]{13}|6[0-9]{15}|3[51-55][0-9]{14})
31 | expression = "(4[0-9]{15}|5[1-5][0-9]{14}|3[47][0-9]{13}|6[0-9]{15}|3[51-55][0-9]{14})"
32 | replace = "*credit-card*{{ .Value | Hash \"salt\" }}*"
33 | }
34 | }
35 |
36 | }
37 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/masks/email.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: mask-email
3 | Description: Checks the logs.agent.grafana.com/mask-email annotation, if set to "true" any logs that match the email
4 | pattern will have the value of the email replaced with "*email*hash*
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.mask_email
13 | }
14 |
15 | loki.process "mask_email" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/mask-email annotation, if true the data will be masked as *email*salt*
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/mask-email: true"
21 | selector = "{logs_agent_grafana_com_mask_email=~\"(?i)true\"}"
22 |
23 | stage.replace {
24 | // unescaped regex: ([\w\.=-]+@[\w\.-]+\.[\w]{2,64})
25 | expression = "([\\w\\.=-]+@[\\w\\.-]+\\.[\\w]{2,64})"
26 | replace = "*email*{{ .Value | Hash \"salt\" }}*"
27 | }
28 | }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/masks/ipv4.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: mask-ipv4
3 | Description: Checks the logs.agent.grafana.com/mask-ipv4 annotation, if set to "true" any logs that match the ipv4
4 | pattern will have the value of the ipv4 replaced with "*ipv4*hash*
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.mask_ipv4
13 | }
14 |
15 | loki.process "mask_ipv4" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/mask-ipv4 annotation, if true the data will be masked as *ipv4*salt*
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/mask-ipv4: true"
21 | selector = "{logs_agent_grafana_com_mask_ipv4=~\"(?i)true\"}"
22 |
23 | stage.replace {
24 | // unescaped regex: ((\b25[0-5]|\b2[0-4][0-9]|\b[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})
25 | expression = "((\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})"
26 | replace = "*ipv4*{{ .Value | Hash \"salt\" }}*"
27 | }
28 | }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/masks/ipv6.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: mask-ipv6
3 | Description: Checks the logs.agent.grafana.com/mask-ipv6 annotation, if set to "true" any logs that match the ipv6
4 | pattern will have the value of the ipv6 replaced with "*ipv6*hash*
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.mask_ipv6
13 | }
14 |
15 | loki.process "mask_ipv6" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/mask-ipv6 annotation, if true the data will be masked as *ipv6*salt*
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/mask-ipv6: true"
21 | selector = "{logs_agent_grafana_com_mask_ipv6=~\"(?i)true\"}"
22 |
23 | stage.replace {
24 | // unescaped regex: (([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))
25 | expression = "(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
26 | replace = "*ipv6*{{ .Value | Hash \"salt\" }}*"
27 | }
28 | }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/masks/phone.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: mask-phone
3 | Description: Checks the logs.agent.grafana.com/mask-phone annotation, if set to "true" any logs that match the phone
4 | pattern will have the value of the phone replaced with "*phone*hash*
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.mask_phone
13 | }
14 |
15 | loki.process "mask_phone" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/mask-phone annotation, if true the data will be masked as *phone*salt*
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/mask-phone: true"
21 | selector = "{logs_agent_grafana_com_mask_phone=~\"(?i)true\"}"
22 |
23 | stage.replace {
24 | // unescaped regex: ([\+]?[(]?[0-9]{3}[)]?[-\s\.]?[0-9]{3}[-\s\.]?[0-9]{4,6})
25 | expression = "([\\+]?[(]?[0-9]{3}[)]?[-\\s\\.]?[0-9]{3}[-\\s\\.]?[0-9]{4,6})"
26 | replace = "*phone*{{ .Value | Hash \"salt\" }}*"
27 | }
28 | }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/masks/ssn.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: mask-ssn
3 | Description: Checks the logs.agent.grafana.com/mask-ssn annotation, if set to "true" any logs that match the ssn
4 | pattern will have the value of the ssn replaced with "*ssn*hash*
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.mask_ssn
13 | }
14 |
15 | loki.process "mask_ssn" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/mask-ssn annotation, if true the data will be masked as *ssn*salt*
19 | stage.match {
20 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/mask-ssn: true"
21 | selector = "{logs_agent_grafana_com_mask_ssn=~\"(?i)true\"}"
22 |
23 | stage.replace {
24 | // unescaped regex: ([0-9]{3}-[0-9]{2}-[0-9]{4})
25 | expression = "([0-9]{3}-[0-9]{2}-[0-9]{4})"
26 | replace = "*ssn*{{ .Value | Hash \"salt\" }}*"
27 | }
28 | }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/metrics/post-process-bytes-lines.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: pre-process-lines-bytes-metrics
3 | Description: Generates metrics for the number of lines and bytes in the log line before any processing is done
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | export "process" {
11 | value = loki.process.pre_process_lines_bytes_metrics
12 | }
13 |
14 | loki.process "pre_process_lines_bytes_metrics" {
15 | forward_to = argument.forward_to.value
16 |
17 | stage.metrics {
18 | metric.counter {
19 | name = "lines_total"
20 | description = "total number of log lines ingested, processed and forwarded for storage"
21 | prefix = "log_"
22 | match_all = true
23 | action = "inc"
24 | max_idle_duration = "24h"
25 | }
26 | }
27 |
28 | stage.metrics {
29 | metric.counter {
30 | name = "bytes_total"
31 | description = "total log bytes ingested, processed and forwarded for storage"
32 | prefix = "log_"
33 | match_all = true
34 | count_entry_bytes = true
35 | action = "add"
36 | max_idle_duration = "24h"
37 | }
38 | }
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/metrics/pre-process-bytes-lines.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: pre-process-lines-bytes-metrics
3 | Description: Generates metrics for the number of lines and bytes in the log line before any processing is done
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "keep_labels" {
11 | optional = true
12 | // comment = "List of labels to keep before the log message is written to Loki"
13 | default = [
14 | "app",
15 | "cluster",
16 | "component",
17 | "container",
18 | "deployment",
19 | "env",
20 | "filename",
21 | "instance",
22 | "job",
23 | "level",
24 | "log_type",
25 | "namespace",
26 | "region",
27 | "service",
28 | "squad",
29 | "team",
30 | ]
31 | }
32 |
33 | argument "git_repo" {
34 | optional = true
35 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
36 | }
37 |
38 | argument "git_rev" {
39 | optional = true
40 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
41 | }
42 |
43 | argument "git_pull_freq" {
44 | // comment = "How often to pull the git repo, the default is 0s which means never pull"
45 | optional = true
46 | default = "0s"
47 | }
48 |
49 | export "process" {
50 | value = module.git.label_keep.exports.process
51 | }
52 |
53 | // drop any labels that are not in the keep_labels list
54 | // this is because the metrics generated below will keep the full set of labels currently attached to the log line
55 | // we want those to line up with what we're keeping
56 | module.git "label_keep" {
57 | repository = argument.git_repo.value
58 | revision = argument.git_rev.value
59 | pull_frequency = argument.git_pull_freq.value
60 | path = "modules/kubernetes/logs/labels/keep-labels.river"
61 |
62 | arguments {
63 | forward_to = [loki.process.pre_process_lines_bytes_metrics.receiver]
64 | keep_labels = argument.keep_labels.value
65 | }
66 | }
67 |
68 | loki.process "pre_process_lines_bytes_metrics" {
69 | forward_to = [] // does not forward anywhere, just generates metrics
70 |
71 | stage.metrics {
72 | metric.counter {
73 | name = "lines_pre_total"
74 | description = "total number of log lines ingested before processing"
75 | prefix = "log_"
76 | match_all = true
77 | action = "inc"
78 | max_idle_duration = "24h"
79 | }
80 | }
81 |
82 | stage.metrics {
83 | metric.counter {
84 | name = "bytes_pre_total"
85 | description = "total number of log bytes ingested before processing"
86 | prefix = "log_"
87 | match_all = true
88 | count_entry_bytes = true
89 | action = "add"
90 | max_idle_duration = "24h"
91 | }
92 | }
93 |
94 | }
95 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/scrubs/all.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrub-all
3 | Description: Wrapper module to include all scrubing modules
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "git_repo" {
11 | optional = true
12 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
13 | }
14 |
15 | argument "git_rev" {
16 | optional = true
17 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
18 | }
19 |
20 | argument "git_pull_freq" {
21 | optional = true
22 | default = "5m"
23 | }
24 |
25 | export "process" {
26 | value = module.git.scrub_json_empties.exports.process
27 | }
28 |
29 | module.git "scrub_json_empties" {
30 | repository = argument.git_repo.value
31 | revision = argument.git_rev.value
32 | pull_frequency = argument.git_pull_freq.value
33 | path = "modules/kubernetes/logs/scrubs/json-empties.river"
34 |
35 | arguments {
36 | forward_to = [module.git.scrub_json_nulls.exports.process.receiver]
37 | }
38 | }
39 |
40 | module.git "scrub_json_nulls" {
41 | repository = argument.git_repo.value
42 | revision = argument.git_rev.value
43 | pull_frequency = argument.git_pull_freq.value
44 | path = "modules/kubernetes/logs/scrubs/json-nulls.river"
45 |
46 | arguments {
47 | forward_to = argument.forward_to.value
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/scrubs/json-empties.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrub-json-empties
3 | Description: Checks for the annotation logs.agent.grafana.com/scrub-empties, if set to "true"
4 | Removes any json properties with empty values i.e. "foo": "", "bar": [], "baz": {}
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.scrub_json_empties
13 | }
14 |
15 | loki.process "scrub_json_empties" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/scrub-empties annotation, if true remove any json property whose value is set to
19 | // an empty string "", empty object {} or empty array [] is removed
20 | // this can reduce the overall # of bytes sent and stored in Loki
21 | stage.match {
22 | selector = "{logs_agent_grafana_com_scrub_empties=~\"(?i)(dotnet-?json|istio|(generic-?)?json|log4j-?json|(otel|open-?telemetry)(-?json)?|python-?json)\",logs_agent_grafana_com_scrub_nulls=~\"(?i)true\"}"
23 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-null: true"
24 |
25 | // remove null properties
26 | stage.replace {
27 | // unescaped regex: (\s*,\s*("[^"]+"\s*:\s*(\[\s*\]|\{\s*\}|"\s*"))|("[^"]+"\s*:\s*(\[\s*\]|\{\s*\}|"\s*"))\s*,\s*)
28 | expression = "(\\s*,\\s*(\"[^\"]+\"\\s*:\\s*(\\[\\s*\\]|\\{\\s*\\}|\"\\s*\"))|(\"[^\"]+\"\\s*:\\s*(\\[\\s*\\]|\\{\\s*\\}|\"\\s*\"))\\s*,\\s*)"
29 | replace = ""
30 | }
31 | }
32 |
33 | }
34 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/scrubs/json-nulls.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrub-json-nulls
3 | Description: Checks for the annotation logs.agent.grafana.com/scrub-nulls, if set to "true"
4 | Removes any json properties with a null value
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | export "process" {
12 | value = loki.process.scrub_json_nulls
13 | }
14 |
15 | loki.process "scrub_json_nulls" {
16 | forward_to = argument.forward_to.value
17 |
18 | // check logs.agent.grafana.com/scrub-nulls annotation, if true remove any json property whose value is set to null
19 | // this can reduce the overall # of bytes sent and stored in Loki
20 | stage.match {
21 | selector = "{logs_agent_grafana_com_scrub_nulls=~\"(?i)(dotnet-?json|istio|(generic-?)?json|log4j-?json|(otel|open-?telemetry)(-?json)?|python-?json)\",logs_agent_grafana_com_scrub_nulls=~\"(?i)true\"}"
22 | pipeline_name = "pipeline for annotation || logs.agent.grafana.com/scrub-null: true"
23 |
24 | // remove null properties
25 | stage.replace {
26 | // unescaped regex: (\s*,\s*("[^"]+"\s*:\s*null)|("[^"]+"\s*:\s*null)\s*,\s*)
27 | expression = "(\\s*,\\s*(\"[^\"]+\"\\s*:\\s*null)|(\"[^\"]+\"\\s*:\\s*null)\\s*,\\s*)"
28 | replace = ""
29 | }
30 | }
31 |
32 | }
33 |
--------------------------------------------------------------------------------
/modules/kubernetes/logs/targets/logs-from-api.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: logs-from-api
3 | Description: Performs Kubernetes service discovery for pods, applies relabelings, the discovered target logs are
4 | then retrieved from the kubernetes api
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | argument "tenant" {
12 | // comment = "The tenant to filter logs to. This does not have to be the tenantId, this is the value to look for in the logs.agent.grafana.com/tenant annotation, and this can be a regex."
13 | optional = true
14 | default = ".*"
15 | }
16 |
17 | argument "git_repo" {
18 | optional = true
19 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
20 | }
21 |
22 | argument "git_rev" {
23 | optional = true
24 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
25 | }
26 |
27 | argument "git_pull_freq" {
28 | optional = true
29 | default = "5m"
30 | }
31 |
32 | discovery.kubernetes "pods" {
33 | role = "pod"
34 | }
35 |
36 | module.git "relabelings_log" {
37 | repository = argument.git_repo.value
38 | revision = argument.git_rev.value
39 | pull_frequency = argument.git_pull_freq.value
40 | path = "modules/kubernetes/logs/relabelings.river"
41 |
42 | arguments {
43 | targets = discovery.kubernetes.pods.targets
44 | tenant = argument.tenant.value
45 | git_repo = argument.git_repo.value
46 | git_rev = argument.git_rev.value
47 | git_pull_freq = argument.git_pull_freq.value
48 | }
49 | }
50 |
51 | loki.source.kubernetes "pods" {
52 | targets = module.git.relabelings_log.exports.relabelings.output
53 | forward_to = argument.forward_to.value
54 | }
55 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/jobs/prometheus-operator.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: job-prometheus operators
3 | Description: Ingests Prometheus Operator ServiceMonitors, PodMonitors, and Probes
4 |
5 | Note: Every argument except for "forward_to" is optional, and does have a defined default value. However, the values for these
6 | arguments are not defined using the default = " ... " argument syntax, but rather using the coalesce(argument.value, " ... ").
7 | This is because if the argument passed in from another consuming module is set to null, the default = " ... " syntax will
8 | does not override the value passed in, where coalesce() will return the first non-null value.
9 | */
10 | argument "forward_to" {
11 | comment = "Must be a list(MetricsReceiver) where collected logs should be forwarded to"
12 | optional = false
13 | }
14 |
15 | argument "namespaces" {
16 | comment = "List of namespaces to search for prometheus operator resources in (default: [] all namespaces)"
17 | optional = true
18 | }
19 |
20 | argument "servicemonitor_namespaces" {
21 | comment = "List of namespaces to search for just servicemonitors resources in (default: [] all namespaces)"
22 | optional = true
23 | }
24 |
25 | argument "podmonitor_namespaces" {
26 | comment = "List of namespaces to search for just podmonitors resources in (default: [] all namespaces)"
27 | optional = true
28 | }
29 |
30 | argument "probe_namespaces" {
31 | comment = "List of namespaces to search for just probes resources in (default: [] all namespaces)"
32 | optional = true
33 | }
34 |
35 | argument "scrape_interval" {
36 | comment = "How often to scrape metrics from the targets (default: 60s)"
37 | optional = true
38 | }
39 |
40 | argument "clustering" {
41 | // Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
42 | comment = "Whether or not clustering should be enabled (default: false)"
43 | optional = true
44 | }
45 |
46 | // Prometheus Operator ServiceMonitor objects
47 | prometheus.operator.servicemonitors "service_monitors" {
48 | forward_to = argument.forward_to.value
49 | namespaces = concat(
50 | coalesce(argument.namespaces.value, []),
51 | coalesce(argument.servicemonitor_namespaces.value, []),
52 | )
53 |
54 | clustering {
55 | enabled = coalesce(argument.clustering.value, false)
56 | }
57 |
58 | scrape {
59 | default_scrape_interval = coalesce(argument.scrape_interval.value, "60s")
60 | }
61 | }
62 |
63 | // Prometheus Operator PodMonitor objects
64 | prometheus.operator.podmonitors "pod_monitors" {
65 | forward_to = argument.forward_to.value
66 | namespaces = concat(
67 | coalesce(argument.namespaces.value, []),
68 | coalesce(argument.podmonitor_namespaces.value, []),
69 | )
70 |
71 | clustering {
72 | enabled = coalesce(argument.clustering.value, false)
73 | }
74 |
75 | scrape {
76 | default_scrape_interval = coalesce(argument.scrape_interval.value, "60s")
77 | }
78 | }
79 |
80 | // Prometheus Operator Probe objects
81 | prometheus.operator.probes "probes" {
82 | forward_to = argument.forward_to.value
83 | namespaces = concat(
84 | coalesce(argument.namespaces.value, []),
85 | coalesce(argument.probe_namespaces.value, []),
86 | )
87 |
88 | clustering {
89 | enabled = coalesce(argument.clustering.value, false)
90 | }
91 |
92 | scrape {
93 | default_scrape_interval = coalesce(argument.scrape_interval.value, "60s")
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/relabelings/annotations/metrics/node.river:
--------------------------------------------------------------------------------
1 | argument "targets" {
2 | // comment = "Discovered targets to apply relabelings to"
3 | optional = false
4 | }
5 |
6 | export "relabelings" {
7 | value = discovery.relabel.metric_annotations
8 | }
9 |
10 | discovery.relabel "metric_annotations" {
11 | targets = argument.targets.value
12 |
13 | // allow resources to declare their metrics the tenant their metrics should be sent to, the following annotation is supported:
14 | // metrics.agent.grafana.com/tenant: primary
15 | //
16 | // Note: This does not necessarily have to be the actual tenantId, it can be a friendly name as well that is simply used
17 | // to determine if the metrics should be gathered for the current tenant
18 | rule {
19 | action = "replace"
20 | replacement = ""
21 | target_label = "__tmp_tenant"
22 | }
23 | rule {
24 | action = "replace"
25 | source_labels = ["__meta_kubernetes_node_annotation_metrics_agent_grafana_com_tenant"]
26 | separator = ";"
27 | regex = "^(?:;*)?([^;]+).*$"
28 | replacement = "$1"
29 | target_label = "__tmp_tenant"
30 | }
31 |
32 | // allow resources to declare how often their metrics should be collected, the default value is 1m,
33 | // the following annotations are supporte with the value provided in duration format:
34 | // metrics.agent.grafana.com/interval: 5m
35 | // or
36 | // prometheus.io/interval: 5m
37 | rule {
38 | action = "replace"
39 | replacement = "1m"
40 | target_label = "__tmp_interval"
41 | }
42 | rule {
43 | action = "replace"
44 | source_labels = [
45 | "__meta_kubernetes_node_annotation_metrics_agent_grafana_com_interval",
46 | "__meta_kubernetes_node_annotation_prometheus_io_interval",
47 | ]
48 | separator = ";"
49 | regex = "^(?:;*)?(\\d+(s|m|ms|h|d)).*$"
50 | replacement = "$1"
51 | target_label = "__tmp_interval"
52 | }
53 |
54 | // allow resources to declare the timeout of the scrape request, the default value is 10s,
55 | // the following annotations are supporte with the value provided in duration format:
56 | // metrics.agent.grafana.com/timeout: 30s
57 | // or
58 | // prometheus.io/timeout: 10s
59 | rule {
60 | action = "replace"
61 | replacement = "1m"
62 | target_label = "__tmp_timeout"
63 | }
64 | rule {
65 | action = "replace"
66 | source_labels = [
67 | "__meta_kubernetes_node_annotation_metrics_agent_grafana_com_timeout",
68 | "__meta_kubernetes_node_annotation_prometheus_io_timeout",
69 | ]
70 | separator = ";"
71 | regex = "^(?:;*)?(\\d+(s|m|ms|h|d)).*$"
72 | replacement = "$1"
73 | target_label = "__tmp_timeout"
74 | }
75 |
76 | }
77 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/relabelings/auto-scrape.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-auto-scrape
3 | Description: Handles metric relabelings for collected metrics from auto-scraped targets via annotations
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "job_label" {
11 | optional = true
12 | default = "auto-scrape"
13 | // comment = "The default job label to add"
14 | }
15 |
16 | export "metric_relabelings" {
17 | value = prometheus.relabel.auto_scrape
18 | }
19 |
20 | prometheus.relabel "auto_scrape" {
21 | forward_to = argument.forward_to.value
22 |
23 | // set the job label, only if job is not specified or contains "module.", the value of the annotation
24 | // metrics.agent.grafana.com/job or prometheus.io/job takes precedence
25 | rule {
26 | action = "replace"
27 | source_labels = ["job"]
28 | separator = ";"
29 | regex = "(?i)^(.*module\\..*|)$"
30 | replacement = argument.job_label.value
31 | target_label = "job"
32 | }
33 |
34 | // set the job label, only if job is the default job_label and the label app.kubernetes.io/name exists
35 | // on the service, endpoint or pod, set the value to ${namespace}/${name}
36 | rule {
37 | action = "replace"
38 | source_labels = [
39 | "job",
40 | "namespace",
41 | "app",
42 | ]
43 | separator = ";"
44 | regex = "^" + argument.job_label.value + ";([^;]+);(.+)$"
45 | replacement = "$1/$2"
46 | target_label = "job"
47 | }
48 |
49 | }
50 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/relabelings/blackbox.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-blackbox
3 | Description: Handles metric relabelings for collected metrics from blackbox-exporter
4 | Docs: https://github.com/prometheus/blackbox_exporter
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | argument "job_label" {
12 | optional = true
13 | // from Grafana Cloud Integration:
14 | default = "integrations/blackbox_exporter"
15 | // comment = "The job label to add for all blackbox-exporter"
16 | }
17 |
18 | argument "drop_metrics" {
19 | optional = true
20 | // blackbox does not return that many metrics, however if certain metrics should be dropped they can be specified here
21 | // the default is "none" which will not match any of the returned metric names
22 | default = "probe_ip_addr_hash"
23 | // comment = "Regex of metrics to drop"
24 | }
25 |
26 | export "metric_relabelings" {
27 | value = prometheus.relabel.blackbox_exporter
28 | }
29 |
30 | prometheus.relabel "blackbox_exporter" {
31 | forward_to = argument.forward_to.value
32 |
33 | rule {
34 | action = "drop"
35 | source_labels = ["__name__"]
36 | regex = argument.drop_metrics.value
37 | }
38 |
39 | // set the job label, only if job label is not set or contains "module." is not specified, the value of the annotation
40 | // probes.agent.grafana.com/job takes precedence
41 | rule {
42 | action = "replace"
43 | source_labels = ["job"]
44 | regex = "|.*module\\..*"
45 | replacement = argument.job_label.value
46 | target_label = "job"
47 | }
48 |
49 | }
50 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/relabelings/json-exporter.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-json
3 | Description: Handles metric relabelings for collected metrics from json-exporter
4 | Docs: https://github.com/prometheus-community/json_exporter
5 | */
6 | argument "forward_to" {
7 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
8 | optional = false
9 | }
10 |
11 | argument "job_label" {
12 | optional = true
13 | // from Grafana Cloud Integration:
14 | default = "integrations/json_exporter"
15 | // comment = "The job label to add for all blackbox-exporter"
16 | }
17 |
18 | argument "drop_metrics" {
19 | optional = true
20 | // json does not return that many metrics, however if certain metrics should be dropped they can be specified here
21 | // the default is "none" which will not match any of the returned metric names
22 | default = "none"
23 | // comment = "Regex of metrics to drop"
24 | }
25 |
26 | export "metric_relabelings" {
27 | value = prometheus.relabel.json_exporter
28 | }
29 |
30 | prometheus.relabel "json_exporter" {
31 | forward_to = argument.forward_to.value
32 |
33 | rule {
34 | action = "drop"
35 | source_labels = ["__name__"]
36 | regex = argument.drop_metrics.value
37 | }
38 |
39 | // set the job label, only if job label is not set or contains "module." is not specified, the value of the annotation
40 | // probes.agent.grafana.com/job takes precedence
41 | rule {
42 | action = "replace"
43 | source_labels = ["job"]
44 | regex = "|.*module\\..*"
45 | replacement = argument.job_label.value
46 | target_label = "job"
47 | }
48 |
49 | }
50 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/relabelings/kube-dns.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-kube-coredns
3 | Description: Handles metric relabelings for collected metrics from kubernetes coredns
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "job_label" {
11 | optional = true
12 | default = "integrations/kubernetes/coredns"
13 | // comment = "The job label to add for all coredns"
14 | }
15 |
16 | // drop metrics and les from kube-prometheus
17 | // https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/kubernetesControlPlane-serviceMonitorcoredns.yaml
18 | argument "drop_metrics" {
19 | optional = true
20 | default = "coredns_cache_misses_total"
21 | // comment = "Regex of metrics to drop"
22 | }
23 |
24 | export "metric_relabelings" {
25 | value = prometheus.relabel.kube_coredns
26 | }
27 |
28 | prometheus.relabel "kube_coredns" {
29 | forward_to = argument.forward_to.value
30 |
31 | // drop metrics
32 | rule {
33 | action = "drop"
34 | source_labels = ["__name__"]
35 | regex = argument.drop_metrics.value
36 | }
37 |
38 | // set the job label, only if job label is not set or contains "module." is not specified
39 | rule {
40 | action = "replace"
41 | source_labels = ["job"]
42 | regex = "|.*module\\..*"
43 | replacement = argument.job_label.value
44 | target_label = "job"
45 | }
46 |
47 | }
48 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/relabelings/kubelet-probes.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-kubelet-probes
3 | Description: Handles metric relabelings for collected metrics from kubelet-probes
4 | */
5 | argument "forward_to" {
6 | optional = false
7 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
8 | }
9 |
10 | argument "job_label" {
11 | optional = true
12 | // from Grafana Cloud Integration:
13 | default = "integrations/kubernetes/probes"
14 | // comment = "The job label to add for all cadvisor metrics"
15 | }
16 |
17 | export "metric_relabelings" {
18 | value = prometheus.relabel.kubelet_probes
19 | }
20 |
21 | prometheus.relabel "kubelet_probes" {
22 | forward_to = argument.forward_to.value
23 |
24 | // set the job label
25 | rule {
26 | action = "replace"
27 | replacement = argument.job_label.value
28 | target_label = "job"
29 | }
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/relabelings/kubelet-resource.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-kubelet-resource
3 | Description: Handles metric relabelings for collected metrics from kubelet-resource
4 | */
5 | argument "forward_to" {
6 | optional = false
7 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
8 | }
9 |
10 | argument "job_label" {
11 | optional = true
12 | // from Grafana Cloud Integration:
13 | default = "integrations/kubernetes/resource"
14 | // comment = "The job label to add for all cadvisor metrics"
15 | }
16 |
17 | export "metric_relabelings" {
18 | value = prometheus.relabel.kubelet_resource
19 | }
20 |
21 | prometheus.relabel "kubelet_resource" {
22 | forward_to = argument.forward_to.value
23 |
24 | // set the job label
25 | rule {
26 | action = "replace"
27 | replacement = argument.job_label.value
28 | target_label = "job"
29 | }
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/scrapes/kube-apiserver.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-kube-apiserver
3 | Description: Scrapes Kube apiserver, most of these same metrics can come from cAdvisor use only if necessary
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "scrape_interval" {
11 | // comment = "How frequently to scrape the targets of this scrape configuration."
12 | optional = true
13 | default = "60s"
14 | }
15 |
16 | argument "tenant" {
17 | // comment = "The tenant to filter logs to. This does not have to be the tenantId, this is the value to look for in the logs.agent.grafana.com/tenant annotation, and this can be a regex."
18 | optional = true
19 | default = ".*"
20 | }
21 |
22 | argument "job_label" {
23 | optional = true
24 | default = "integrations/kubernetes/apiserver"
25 | // comment = "The job label to add for all apiserver"
26 | }
27 |
28 | argument "clustering" {
29 | // comment = "Whether or not clustering should be enabled"
30 | optional = true
31 | default = false
32 | }
33 |
34 | argument "git_repo" {
35 | optional = true
36 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
37 | }
38 |
39 | argument "git_rev" {
40 | optional = true
41 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
42 | }
43 |
44 | argument "git_pull_freq" {
45 | optional = true
46 | default = "0s"
47 | }
48 |
49 | // get the available endpoints
50 | discovery.kubernetes "endpoints" {
51 | role = "endpoints"
52 | }
53 |
54 | discovery.relabel "kube_apiserver" {
55 | targets = discovery.kubernetes.endpoints.targets
56 |
57 | // only keep namespace=default, service=kubernetes, port=https
58 | rule {
59 | action = "keep"
60 | source_labels = [
61 | "__meta_kubernetes_namespace",
62 | "__meta_kubernetes_service_name",
63 | "__meta_kubernetes_endpoint_port_name",
64 | ]
65 | regex = "default;kubernetes;https"
66 | }
67 |
68 | // set the namespace
69 | rule {
70 | action = "replace"
71 | source_labels = ["__meta_kubernetes_namespace"]
72 | target_label = "namespace"
73 | }
74 |
75 | // set the service_name
76 | rule {
77 | action = "replace"
78 | source_labels = ["__meta_kubernetes_service_name"]
79 | target_label = "service"
80 | }
81 |
82 | }
83 |
84 | prometheus.scrape "kube_apiserver" {
85 | targets = discovery.relabel.kube_apiserver.output
86 | scheme = "https"
87 | bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
88 | forward_to = [module.git.relabelings_kube_apiserver.exports.metric_relabelings.receiver]
89 | scrape_interval = argument.scrape_interval.value
90 |
91 | tls_config {
92 | ca_file = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
93 | insecure_skip_verify = false
94 | server_name = "kubernetes"
95 | }
96 |
97 | clustering {
98 | enabled = argument.clustering.value
99 | }
100 | }
101 |
102 | // metric relabelings
103 | module.git "relabelings_kube_apiserver" {
104 | repository = argument.git_repo.value
105 | revision = argument.git_rev.value
106 | pull_frequency = argument.git_pull_freq.value
107 | path = "modules/kubernetes/metrics/relabelings/kube-apiserver.river"
108 |
109 | arguments {
110 | forward_to = argument.forward_to.value
111 | job_label = argument.job_label.value
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/scrapes/kube-dns.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-kube-dns
3 | Description: Scrapes Kube dns, most of these same metrics can come from cAdvisor use only if necessary. If using annotations
4 | then this module does not need to be used as the kube-dns pods most likely already have the annotation
5 | prometheus.io/scrape: true set
6 |
7 | !!Note!!
8 | This module most likely does not need to be included, if you're using the auto-scrape-pods.river module, as
9 | most kube-dns pods have the annotation prometheus.io/scrape: "true" set already on the pods but not the services.
10 |
11 | */
12 | argument "forward_to" {
13 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
14 | optional = false
15 | }
16 |
17 | argument "scrape_interval" {
18 | // comment = "How frequently to scrape the targets of this scrape configuration."
19 | optional = true
20 | default = "60s"
21 | }
22 |
23 | argument "tenant" {
24 | // comment = "The tenant to filter logs to. This does not have to be the tenantId, this is the value to look for in the logs.agent.grafana.com/tenant annotation, and this can be a regex."
25 | optional = true
26 | default = ".*"
27 | }
28 |
29 | argument "job_label" {
30 | optional = true
31 | default = "integrations/kubernetes/coredns"
32 | // comment = "The job label to add for all coredns"
33 | }
34 |
35 | argument "clustering" {
36 | // comment = "Whether or not clustering should be enabled"
37 | optional = true
38 | default = false
39 | }
40 |
41 | argument "git_repo" {
42 | optional = true
43 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
44 | }
45 |
46 | argument "git_rev" {
47 | optional = true
48 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
49 | }
50 |
51 | argument "git_pull_freq" {
52 | optional = true
53 | default = "0s"
54 | }
55 |
56 | // get the available endpoints
57 | discovery.kubernetes "endpoints" {
58 | role = "endpoints"
59 | }
60 |
61 | discovery.relabel "kube_dns" {
62 | targets = discovery.kubernetes.endpoints.targets
63 |
64 | // only keep namespace=kube-system, k8s-app=kube-dns, port=metrics
65 | rule {
66 | action = "keep"
67 | source_labels = [
68 | "__meta_kubernetes_namespace",
69 | "__meta_kubernetes_pod_label_k8s_app",
70 | "__meta_kubernetes_pod_container_port_name",
71 | ]
72 | regex = "kube-system;kube-dns;.*metrics"
73 | }
74 |
75 | // set the namespace
76 | rule {
77 | action = "replace"
78 | source_labels = ["__meta_kubernetes_namespace"]
79 | target_label = "namespace"
80 | }
81 |
82 | // set the pod
83 | rule {
84 | action = "replace"
85 | source_labels = ["__meta_kubernetes_pod_name"]
86 | target_label = "pod"
87 | }
88 |
89 | // set the service_name
90 | rule {
91 | action = "replace"
92 | source_labels = ["__meta_kubernetes_service_name"]
93 | target_label = "service"
94 | }
95 |
96 | }
97 |
98 | prometheus.scrape "kube_dns" {
99 | targets = discovery.relabel.kube_dns.output
100 | forward_to = [module.git.relabelings_kube_dns.exports.metric_relabelings.receiver]
101 | scrape_interval = argument.scrape_interval.value
102 |
103 | clustering {
104 | enabled = argument.clustering.value
105 | }
106 | }
107 |
108 | // metric relabelings
109 | module.git "relabelings_kube_dns" {
110 | repository = argument.git_repo.value
111 | revision = argument.git_rev.value
112 | pull_frequency = argument.git_pull_freq.value
113 | path = "modules/kubernetes/metrics/relabelings/kube-dns.river"
114 |
115 | arguments {
116 | forward_to = argument.forward_to.value
117 | job_label = argument.job_label.value
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/scrapes/kubelet-cadvisor.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-kubelet-cadvisor
3 | Description: Scrapes cAdvisor (Container Advisor Metrics)
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "scrape_interval" {
11 | // comment = "How frequently to scrape the targets of this scrape configuration."
12 | optional = true
13 | default = "60s"
14 | }
15 |
16 | argument "tenant" {
17 | // comment = "The tenant to filter logs to. This does not have to be the tenantId, this is the value to look for in the logs.agent.grafana.com/tenant annotation, and this can be a regex."
18 | optional = true
19 | default = ".*"
20 | }
21 |
22 | argument "job_label" {
23 | optional = true
24 | // from Grafana Cloud Integration:
25 | default = "integrations/kubernetes/cadvisor"
26 | // comment = "The job label to add for all cadvisor metrics"
27 | }
28 |
29 | argument "clustering" {
30 | // comment = "Whether or not clustering should be enabled"
31 | optional = true
32 | default = false
33 | }
34 |
35 | argument "git_repo" {
36 | optional = true
37 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
38 | }
39 |
40 | argument "git_rev" {
41 | optional = true
42 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
43 | }
44 |
45 | argument "git_pull_freq" {
46 | optional = true
47 | default = "0s"
48 | }
49 |
50 | module.git "node_targets" {
51 | repository = argument.git_repo.value
52 | revision = argument.git_rev.value
53 | pull_frequency = argument.git_pull_freq.value
54 | path = "modules/kubernetes/metrics/targets/nodes.river"
55 |
56 | arguments {
57 | tenant = argument.tenant.value
58 | git_repo = argument.git_repo.value
59 | git_rev = argument.git_rev.value
60 | git_pull_freq = argument.git_pull_freq.value
61 | }
62 | }
63 |
64 | discovery.relabel "kubelet_cadvisor" {
65 | targets = module.git.node_targets.exports.relabelings.output
66 |
67 | // set the path to use for cadvisor
68 | rule {
69 | action = "replace"
70 | source_labels = ["__meta_kubernetes_node_name"]
71 | regex = "(.+)"
72 | replacement = "/api/v1/nodes/${1}/proxy/metrics/cadvisor"
73 | target_label = "__metrics_path__"
74 | }
75 |
76 | }
77 |
78 | prometheus.scrape "kubelet_cadvisor" {
79 | targets = discovery.relabel.kubelet_cadvisor.output
80 | scheme = "https"
81 | bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
82 | forward_to = [module.git.relabelings_kubelet_cadvisor.exports.metric_relabelings.receiver]
83 | scrape_interval = argument.scrape_interval.value
84 |
85 | tls_config {
86 | ca_file = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
87 | insecure_skip_verify = false
88 | server_name = "kubernetes"
89 | }
90 |
91 | clustering {
92 | enabled = argument.clustering.value
93 | }
94 | }
95 |
96 | // metric relabelings
97 | module.git "relabelings_kubelet_cadvisor" {
98 | repository = argument.git_repo.value
99 | revision = argument.git_rev.value
100 | pull_frequency = argument.git_pull_freq.value
101 | path = "modules/kubernetes/metrics/relabelings/kubelet-cadvisor.river"
102 |
103 | arguments {
104 | forward_to = argument.forward_to.value
105 | job_label = argument.job_label.value
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/scrapes/kubelet-probes.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-kubelet-probes
3 | Description: Scrapes Kube probes
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "scrape_interval" {
11 | // comment = "How frequently to scrape the targets of this scrape configuration."
12 | optional = true
13 | default = "60s"
14 | }
15 |
16 | argument "tenant" {
17 | // comment = "The tenant to filter logs to. This does not have to be the tenantId, this is the value to look for in the logs.agent.grafana.com/tenant annotation, and this can be a regex."
18 | optional = true
19 | default = ".*"
20 | }
21 |
22 | argument "job_label" {
23 | optional = true
24 | // from Grafana Cloud Integration:
25 | default = "integrations/kubernetes/probes"
26 | // comment = "The job label to add for all cadvisor metrics"
27 | }
28 |
29 | argument "clustering" {
30 | // comment = "Whether or not clustering should be enabled"
31 | optional = true
32 | default = false
33 | }
34 |
35 | argument "git_repo" {
36 | optional = true
37 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
38 | }
39 |
40 | argument "git_rev" {
41 | optional = true
42 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
43 | }
44 |
45 | argument "git_pull_freq" {
46 | optional = true
47 | default = "0s"
48 | }
49 |
50 | module.git "node_targets" {
51 | repository = argument.git_repo.value
52 | revision = argument.git_rev.value
53 | pull_frequency = argument.git_pull_freq.value
54 | path = "modules/kubernetes/metrics/targets/nodes.river"
55 |
56 | arguments {
57 | tenant = argument.tenant.value
58 | git_repo = argument.git_repo.value
59 | git_rev = argument.git_rev.value
60 | git_pull_freq = argument.git_pull_freq.value
61 | }
62 | }
63 |
64 | discovery.relabel "kubelet_probes" {
65 | targets = module.git.node_targets.exports.relabelings.output
66 |
67 | // set the path to use for kubelet-probes
68 | rule {
69 | action = "replace"
70 | source_labels = ["__meta_kubernetes_node_name"]
71 | regex = "(.+)"
72 | replacement = "/api/v1/nodes/${1}/proxy/metrics/probes"
73 | target_label = "__metrics_path__"
74 | }
75 |
76 | }
77 |
78 | prometheus.scrape "kubelet_probes" {
79 | targets = discovery.relabel.kubelet_probes.output
80 | scheme = "https"
81 | bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
82 | forward_to = [module.git.relabelings_kubelet_probes.exports.metric_relabelings.receiver]
83 | scrape_interval = argument.scrape_interval.value
84 |
85 | tls_config {
86 | ca_file = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
87 | insecure_skip_verify = false
88 | server_name = "kubernetes"
89 | }
90 |
91 | clustering {
92 | enabled = argument.clustering.value
93 | }
94 | }
95 |
96 | // metric relabelings
97 | module.git "relabelings_kubelet_probes" {
98 | repository = argument.git_repo.value
99 | revision = argument.git_rev.value
100 | pull_frequency = argument.git_pull_freq.value
101 | path = "modules/kubernetes/metrics/relabelings/kubelet-probes.river"
102 |
103 | arguments {
104 | forward_to = argument.forward_to.value
105 | job_label = argument.job_label.value
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/scrapes/kubelet-resource.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-kubelet-resource
3 | Description: Scrapes Kube resource, most of these same metrics can come from cAdvisor use only if necessary
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "scrape_interval" {
11 | // comment = "How frequently to scrape the targets of this scrape configuration."
12 | optional = true
13 | default = "60s"
14 | }
15 |
16 | argument "tenant" {
17 | // comment = "The tenant to filter logs to. This does not have to be the tenantId, this is the value to look for in the logs.agent.grafana.com/tenant annotation, and this can be a regex."
18 | optional = true
19 | default = ".*"
20 | }
21 |
22 | argument "clustering" {
23 | // comment = "Whether or not clustering should be enabled"
24 | optional = true
25 | default = false
26 | }
27 |
28 | argument "git_repo" {
29 | optional = true
30 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
31 | }
32 |
33 | argument "git_rev" {
34 | optional = true
35 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
36 | }
37 |
38 | argument "git_pull_freq" {
39 | optional = true
40 | default = "0s"
41 | }
42 |
43 | module.git "node_targets" {
44 | repository = argument.git_repo.value
45 | revision = argument.git_rev.value
46 | pull_frequency = argument.git_pull_freq.value
47 | path = "modules/kubernetes/metrics/targets/nodes.river"
48 |
49 | arguments {
50 | tenant = argument.tenant.value
51 | git_repo = argument.git_repo.value
52 | git_rev = argument.git_rev.value
53 | git_pull_freq = argument.git_pull_freq.value
54 | }
55 | }
56 |
57 | discovery.relabel "kubelet_resource" {
58 | targets = module.git.node_targets.exports.relabelings.output
59 |
60 | // set the path to use for kubelet-resource
61 | rule {
62 | action = "replace"
63 | source_labels = ["__meta_kubernetes_node_name"]
64 | regex = "(.+)"
65 | replacement = "/api/v1/nodes/${1}/proxy/metrics/resource"
66 | target_label = "__metrics_path__"
67 | }
68 |
69 | }
70 |
71 | prometheus.scrape "kubelet_resource" {
72 | targets = discovery.relabel.kubelet_resource.output
73 | scheme = "https"
74 | bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
75 | forward_to = [module.git.relabelings_kubelet_resource.exports.metric_relabelings.receiver]
76 | scrape_interval = argument.scrape_interval.value
77 |
78 | tls_config {
79 | ca_file = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
80 | insecure_skip_verify = false
81 | server_name = "kubernetes"
82 | }
83 |
84 | clustering {
85 | enabled = argument.clustering.value
86 | }
87 | }
88 |
89 | // metric relabelings
90 | module.git "relabelings_kubelet_resource" {
91 | repository = argument.git_repo.value
92 | revision = argument.git_rev.value
93 | pull_frequency = argument.git_pull_freq.value
94 | path = "modules/kubernetes/metrics/relabelings/kubelet-resource.river"
95 |
96 | arguments {
97 | forward_to = argument.forward_to.value
98 | }
99 | }
100 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/scrapes/kubelet.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-kubelet
3 | Description: Scrapes Kublet Metrics
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "scrape_interval" {
11 | // comment = "How frequently to scrape the targets of this scrape configuration."
12 | optional = true
13 | default = "60s"
14 | }
15 |
16 | argument "tenant" {
17 | // comment = "The tenant to filter logs to. This does not have to be the tenantId, this is the value to look for in the logs.agent.grafana.com/tenant annotation, and this can be a regex."
18 | optional = true
19 | default = ".*"
20 | }
21 |
22 | argument "job_label" {
23 | optional = true
24 | // from Grafana Cloud Integration:
25 | default = "integrations/kubernetes/kubelet"
26 | // comment = "The job label to add for all kubelet metrics"
27 | }
28 |
29 | argument "clustering" {
30 | // comment = "Whether or not clustering should be enabled"
31 | optional = true
32 | default = false
33 | }
34 |
35 | argument "git_repo" {
36 | optional = true
37 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
38 | }
39 |
40 | argument "git_rev" {
41 | optional = true
42 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
43 | }
44 |
45 | argument "git_pull_freq" {
46 | optional = true
47 | default = "0s"
48 | }
49 |
50 | module.git "node_targets" {
51 | repository = argument.git_repo.value
52 | revision = argument.git_rev.value
53 | pull_frequency = argument.git_pull_freq.value
54 | path = "modules/kubernetes/metrics/targets/nodes.river"
55 |
56 | arguments {
57 | tenant = argument.tenant.value
58 | git_repo = argument.git_repo.value
59 | git_rev = argument.git_rev.value
60 | git_pull_freq = argument.git_pull_freq.value
61 | }
62 | }
63 |
64 | discovery.relabel "kubelet" {
65 | targets = module.git.node_targets.exports.relabelings.output
66 |
67 | // set the path to use for kubelet
68 | rule {
69 | action = "replace"
70 | source_labels = ["__meta_kubernetes_node_name"]
71 | regex = "(.+)"
72 | replacement = "/api/v1/nodes/${1}/proxy/metrics"
73 | target_label = "__metrics_path__"
74 | }
75 |
76 | }
77 |
78 | prometheus.scrape "kubelet" {
79 | targets = discovery.relabel.kubelet.output
80 | scheme = "https"
81 | bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
82 | forward_to = [module.git.relabelings_kubelet.exports.metric_relabelings.receiver]
83 | scrape_interval = argument.scrape_interval.value
84 |
85 | tls_config {
86 | ca_file = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
87 | insecure_skip_verify = false
88 | server_name = "kubernetes"
89 | }
90 |
91 | clustering {
92 | enabled = argument.clustering.value
93 | }
94 | }
95 |
96 | // metric relabelings
97 | module.git "relabelings_kubelet" {
98 | repository = argument.git_repo.value
99 | revision = argument.git_rev.value
100 | pull_frequency = argument.git_pull_freq.value
101 | path = "modules/kubernetes/metrics/relabelings/kubelet.river"
102 |
103 | arguments {
104 | forward_to = argument.forward_to.value
105 | job_label = argument.job_label.value
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/scrapes/opencost.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-opencost
3 | Description: Scrapes opencost, this is a separate scrape job, if you are also using annotation based scraping, you will want to explicitly
4 | disable opencost from being scraped by this module and annotations by setting the following annotation on the opencost
5 | metrics.agent.grafana.cmo/scrape: "false"
6 | */
7 | argument "forward_to" {
8 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
9 | optional = false
10 | }
11 |
12 | argument "scrape_interval" {
13 | // comment = "How frequently to scrape the targets of this scrape configuration."
14 | optional = true
15 | default = "60s"
16 | }
17 |
18 | argument "app_name" {
19 | // comment = "The name of the opencost app"
20 | optional = true
21 | default = "opencost"
22 | }
23 |
24 | argument "job_label" {
25 | // comment = "The job label to add for all opencost, see ../relabelings/opencost.river for the default value""
26 | optional = true
27 | }
28 |
29 | argument "keep_metrics" {
30 | // comment = "Regex of metrics to keep, see ../relabelings/opencost.river for the default value"
31 | optional = true
32 | }
33 |
34 | argument "clustering" {
35 | // comment = "Whether or not clustering should be enabled"
36 | optional = true
37 | default = false
38 | }
39 |
40 | argument "git_repo" {
41 | optional = true
42 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
43 | }
44 |
45 | argument "git_rev" {
46 | optional = true
47 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
48 | }
49 |
50 | argument "git_pull_freq" {
51 | optional = true
52 | default = "0s"
53 | }
54 |
55 | // get the available endpoints
56 | discovery.kubernetes "endpoints" {
57 | role = "endpoints"
58 | }
59 |
60 | discovery.relabel "opencost" {
61 | targets = discovery.kubernetes.endpoints.targets
62 |
63 | // endpoints get all of the service labels they are attached to, filter to just the opencost service
64 | rule {
65 | action = "keep"
66 | source_labels = [
67 | "__meta_kubernetes_service_label_app_kubernetes_io_name",
68 | ]
69 | regex = argument.app_name.value
70 | }
71 |
72 | // set the namespace
73 | rule {
74 | action = "replace"
75 | source_labels = ["__meta_kubernetes_namespace"]
76 | target_label = "namespace"
77 | }
78 |
79 | // set the service_name
80 | rule {
81 | action = "replace"
82 | source_labels = ["__meta_kubernetes_service_name"]
83 | target_label = "service"
84 | }
85 |
86 | }
87 |
88 | prometheus.scrape "opencost" {
89 | targets = discovery.relabel.opencost.output
90 | forward_to = [module.git.opencost.exports.metric_relabelings.receiver]
91 | scrape_interval = argument.scrape_interval.value
92 |
93 | clustering {
94 | enabled = argument.clustering.value
95 | }
96 | }
97 |
98 | // metric relabelings
99 | module.git "relabelings_opencost" {
100 | repository = argument.git_repo.value
101 | revision = argument.git_rev.value
102 | pull_frequency = argument.git_pull_freq.value
103 | path = "modules/kubernetes/metrics/relabelings/opencost.river"
104 |
105 | arguments {
106 | forward_to = argument.forward_to.value
107 | job_label = argument.job_label.value
108 | keep_metrics = argument.keep_metrics.value
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/modules/kubernetes/metrics/targets/nodes.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: metrics-target-nodes
3 | Description: Performs Kubernetes service discovery for nodes, applies relabelings
4 | */
5 | argument "tenant" {
6 | // comment = "The tenant to write metrics to. This does not have to be the tenantId, this is the value to look for in the logs.agent.grafana.com/tenant annotation, and this can be a regex."
7 | optional = true
8 | default = ".*"
9 | }
10 |
11 | argument "git_repo" {
12 | optional = true
13 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
14 | }
15 |
16 | argument "git_rev" {
17 | optional = true
18 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
19 | }
20 |
21 | argument "git_pull_freq" {
22 | optional = true
23 | default = "0s"
24 | }
25 |
26 | export "relabelings" {
27 | value = module.git.relabelings_common.exports.relabelings
28 | }
29 |
30 | // get the available nodes
31 | discovery.kubernetes "nodes" {
32 | role = "node"
33 | }
34 |
35 | // apply metric annotation relabelings
36 | module.git "relabelings_annotations" {
37 | repository = argument.git_repo.value
38 | revision = argument.git_rev.value
39 | pull_frequency = argument.git_pull_freq.value
40 | path = "modules/kubernetes/metrics/relabelings/annotations/metrics/node.river"
41 |
42 | arguments {
43 | targets = discovery.kubernetes.nodes.targets
44 | }
45 | }
46 |
47 | discovery.relabel "scrape_targets" {
48 | targets = module.git.relabelings_annotations.exports.relabelings.output
49 |
50 | // set the instance label to the node name
51 | rule {
52 | action = "replace"
53 | source_labels = ["__meta_kubernetes_node_name"]
54 | target_label = "instance"
55 | }
56 |
57 | // allow nodes to declare what tenant their metrics should be written to, the following annotations are supported:
58 | // metrics.agent.grafana.com/tenant: "primary"
59 | rule {
60 | action = "keep"
61 | source_labels = ["__tmp_tenant"]
62 | regex = "^(" + argument.tenant.value + ")$"
63 | }
64 |
65 | // for nodes always use https
66 | rule {
67 | action = "replace"
68 | source_labels = ["__tmp_scheme"]
69 | replacement = "https"
70 | target_label = "__scheme__"
71 | }
72 |
73 | // for nodes always use the address of
74 | rule {
75 | action = "replace"
76 | replacement = "kubernetes.default.svc.cluster.local:443"
77 | target_label = "__address__"
78 | }
79 |
80 | // allow nodes to declare how often to scrape metrics, the following annotations are supported:
81 | // metrics.agent.grafana.com/interval: 5m
82 | // prometheus.io/interval: 5m
83 | rule{
84 | action = "replace"
85 | source_labels = ["__tmp_interval"]
86 | target_label = "__scrape_interval__"
87 | }
88 |
89 | // allow nodes to declare how long before a scrape times out, the following annotations are supported:
90 | // metrics.agent.grafana.com/timeout: 30s
91 | // prometheus.io/timeout: 30s
92 | rule{
93 | action = "replace"
94 | source_labels = ["__tmp_timeout"]
95 | target_label = "__scrape_timeout__"
96 | }
97 | }
98 |
99 | // apply common relabelings
100 | module.git "relabelings_common" {
101 | repository = argument.git_repo.value
102 | revision = argument.git_rev.value
103 | pull_frequency = argument.git_pull_freq.value
104 | path = "modules/kubernetes/relabelings/common.river"
105 |
106 | arguments {
107 | targets = discovery.relabel.scrape_targets.output
108 | git_repo = argument.git_repo.value
109 | git_rev = argument.git_rev.value
110 | git_pull_freq = argument.git_pull_freq.value
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/modules/kubernetes/relabelings/endpoints.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-endpoints
3 | Description: Handles relabelings for endpoints that are common across sources i.e. metrics and logs. The labels
4 | may still be dropped through metric relabelings, pipeline stages, etc.
5 | */
6 | argument "targets" {
7 | // comment = "Discovered targets to apply relabelings to"
8 | optional = false
9 | }
10 |
11 | export "relabelings" {
12 | value = discovery.relabel.endpoints
13 | }
14 |
15 | discovery.relabel "endpoints" {
16 | targets = argument.targets.value
17 |
18 | // the endpoints name
19 | rule {
20 | action = "replace"
21 | source_labels = ["__meta_kubernetes_endpoints_name"]
22 | target_label = "endpoints_name"
23 | }
24 |
25 | // the endpoints name
26 | rule {
27 | action = "replace"
28 | source_labels = ["__meta_kubernetes_endpoints_name"]
29 | target_label = "endpoints_name"
30 | }
31 |
32 | // set whether or not the service the endpoint is attached to is headless, by checking for the exist of a label:
33 | // service.kubernetes.io/headless: ""
34 | rule {
35 | action = "replace"
36 | replacement = "false"
37 | target_label = "__tmp_headless"
38 | }
39 | rule {
40 | action = "replace"
41 | source_labels = ["__meta_kubernetes_endpoints_labelpresent_service_kubernetes_io_headless"]
42 | regex = "^(true)$"
43 | replacement = "$1"
44 | target_label = "__tmp_headless"
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/modules/kubernetes/relabelings/endpointslice.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-endpointslice
3 | Description: Handles relabelings for endpointslice that are common across sources i.e. metrics and logs. The labels
4 | may still be dropped through metric relabelings, pipeline stages, etc.
5 | */
6 | argument "targets" {
7 | // comment = "Discovered targets to apply relabelings to"
8 | optional = false
9 | }
10 |
11 | export "relabelings" {
12 | value = discovery.relabel.endpointslice
13 | }
14 |
15 | discovery.relabel "endpointslice" {
16 | targets = argument.targets.value
17 |
18 | // the endpoints name
19 | rule {
20 | action = "replace"
21 | source_labels = ["__meta_kubernetes_endpointslice_name"]
22 | target_label = "endpointslice_name"
23 | }
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/modules/kubernetes/relabelings/ingress.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-ingress
3 | Description: Handles relabelings for ingress that are common across sources i.e. metrics and logs. The labels
4 | may still be dropped through metric relabelings, pipeline stages, etc.
5 | */
6 | argument "targets" {
7 | // comment = "Discovered targets to apply relabelings to"
8 | optional = false
9 | }
10 |
11 | export "relabelings" {
12 | value = discovery.relabel.ingress
13 | }
14 |
15 | discovery.relabel "ingress" {
16 | targets = argument.targets.value
17 |
18 | // the ingress name
19 | rule {
20 | action = "replace"
21 | source_labels = ["__meta_kubernetes_ingress_name"]
22 | target_label = "ingress_name"
23 | }
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/modules/kubernetes/relabelings/pod.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-pod
3 | Description: Handles relabelings for pods that are common across sources i.e. metrics and logs. The labels
4 | may still be dropped through metric relabelings, pipeline stages, etc.
5 | */
6 | argument "targets" {
7 | // comment = "Discovered targets to apply relabelings to"
8 | optional = false
9 | }
10 |
11 | export "relabelings" {
12 | value = discovery.relabel.pods
13 | }
14 |
15 | discovery.relabel "pods" {
16 | targets = argument.targets.value
17 |
18 | // set the instance label as the name of the worker node the pod is on
19 | rule {
20 | action = "replace"
21 | source_labels = ["__meta_kubernetes_pod_node_name"]
22 | target_label = "instance"
23 | }
24 |
25 | // set the pod name label
26 | rule {
27 | action = "replace"
28 | source_labels = ["__meta_kubernetes_pod_name"]
29 | target_label = "pod"
30 | }
31 |
32 | // set the container label
33 | rule {
34 | action = "replace"
35 | source_labels = ["__meta_kubernetes_pod_container_name"]
36 | target_label = "container"
37 | }
38 |
39 | // add a deployment label for DaemonSets and drop the last 5 chars (hash) from the pod name
40 | // example: grafana-agent-68nv9 becomes DaemonSet/grafana-agent
41 | rule {
42 | source_labels = [
43 | "__meta_kubernetes_pod_controller_kind",
44 | "__meta_kubernetes_pod_controller_name",
45 | ]
46 | action = "replace"
47 | regex = "^(DaemonSet);(.+)$"
48 | replacement = "$1/$2"
49 | target_label = "deployment"
50 | }
51 |
52 | // add a deployment label for ReplicaSets and drop the last 10 chars for the ReplicaSet and last 5 chars from the pod
53 | // example: grafana-58b546d457-dkq99 becomes ReplicaSet/grafana
54 | // example: my-service-square-7dd7f4bd2pr8p becomes ReplicaSet/my-service-square
55 | rule {
56 | source_labels = [
57 | "__meta_kubernetes_pod_controller_kind",
58 | "__meta_kubernetes_pod_controller_name",
59 | ]
60 | action = "replace"
61 | regex = "^(ReplicaSet);((?:[^-]+-?)+)(?:-[a-f0-9]{9,10}-[^-]{5}|-[a-z0-9]{6,15})$"
62 | replacement = "$1/$2"
63 | target_label = "deployment"
64 | }
65 |
66 | // add a deployment label for StatefulSet/CronJob and drop the last digits
67 | // example: enterprise-metrics-backend-0 becomes StatefulSet/enterprise-metrics-backend
68 | rule {
69 | source_labels = [
70 | "__meta_kubernetes_pod_controller_kind",
71 | "__meta_kubernetes_pod_controller_name",
72 | ]
73 | action = "replace"
74 | regex = "^(StatefulSet|CronJob);(.+)$"
75 | replacement = "$1/$2"
76 | target_label = "deployment"
77 | }
78 |
79 | // add a deployment label for pods created from Jobs
80 | // example: enterprise-logs-tokengen-8z8xm becomes Job/enterprise-logs-tokengen
81 | rule {
82 | source_labels = [
83 | "__meta_kubernetes_pod_controller_kind",
84 | "__meta_kubernetes_pod_name",
85 | ]
86 | action = "replace"
87 | regex = "^(Job);(.*)(-\\d+)$"
88 | replacement = "$1/$2"
89 | target_label = "deployment"
90 | }
91 |
92 | // add a deployment label for bare pods, created outside of a controller
93 | rule {
94 | source_labels = [
95 | "deployment",
96 | "__meta_kubernetes_pod_name",
97 | ]
98 | action = "replace"
99 | regex = "^;(.+)$"
100 | replacement = "Pod/$1"
101 | target_label = "deployment"
102 | }
103 |
104 | }
105 |
--------------------------------------------------------------------------------
/modules/kubernetes/relabelings/service.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-service
3 | Description: Handles relabelings for service that are common across sources i.e. metrics and logs. The labels
4 | may still be dropped through metric relabelings, pipeline stages, etc.
5 | */
6 | argument "targets" {
7 | // comment = "Discovered targets to apply relabelings to"
8 | optional = false
9 | }
10 |
11 | export "relabelings" {
12 | value = discovery.relabel.service
13 | }
14 |
15 | discovery.relabel "service" {
16 | targets = argument.targets.value
17 |
18 | // the service name
19 | rule {
20 | action = "replace"
21 | source_labels = ["__meta_kubernetes_service_name"]
22 | target_label = "service"
23 | }
24 |
25 | // set whether or not the service is headless, by checking for the exist of a label:
26 | // service.kubernetes.io/headless: ""
27 | rule {
28 | action = "replace"
29 | replacement = "false"
30 | target_label = "__tmp_headless"
31 | }
32 | rule {
33 | action = "replace"
34 | source_labels = [
35 | "__meta_kubernetes_service_labelpresent_service_kubernetes_io_headless",
36 | "__meta_kubernetes_endpoints_labelpresent_service_kubernetes_io_headless",
37 | ]
38 | regex = "^(?:;*)?(true).*$"
39 | replacement = "$1"
40 | target_label = "__tmp_headless"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/modules/kubernetes/relabelings/static.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: relabelings-static
3 | Description: Handles relabelings to add static labels to all targets, this isn't possible to do dynamically at the moment,
4 | so we simply look for commonly accepted labels that might be added i.e. cluster, env, team, squad, region, etc.
5 | */
6 | argument "targets" {
7 | // comment = "Discovered targets to apply relabelings to"
8 | optional = false
9 | }
10 |
11 | argument "label_cluster" {
12 | // comment = "Static cluster label to add to all collected metrics"
13 | optional = true
14 | default = ""
15 | }
16 |
17 | argument "label_env" {
18 | // comment = "Static env label to add to all collected metrics"
19 | optional = true
20 | default = ""
21 | }
22 |
23 | argument "label_region" {
24 | // comment = "Static region label to add to all collected metrics"
25 | optional = true
26 | default = ""
27 | }
28 |
29 | export "relabelings" {
30 | value = discovery.relabel.static
31 | }
32 |
33 | discovery.relabel "static" {
34 | targets = argument.targets.value
35 |
36 | rule {
37 | action = "replace"
38 | replacement = argument.label_cluster.value
39 | target_label = "cluster"
40 | }
41 |
42 | rule {
43 | action = "replace"
44 | replacement = argument.label_env.value
45 | target_label = "env"
46 | }
47 |
48 | rule {
49 | action = "replace"
50 | replacement = argument.label_region.value
51 | target_label = "region"
52 | }
53 |
54 | }
55 |
--------------------------------------------------------------------------------
/modules/meta-monitoring/metrics/scrape-agent.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-grafana-agent-metrics
3 | Description: Scrapes Grafana Agent Metrics
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "app_name" {
11 | // comment = "The name of the kube-state-metrics app"
12 | optional = true
13 | default = "grafana-agent"
14 | }
15 |
16 | argument "port_name" {
17 | // comment = "The name of the port to keep metrics for"
18 | optional = true
19 | default = "http-metrics"
20 | }
21 |
22 | argument "namespaces" {
23 | // comment = "List of namespaces to search for grafana metrics in"
24 | optional = true
25 | default = ["agents"]
26 | }
27 |
28 | argument "scrape_interval" {
29 | // comment = "How often to scrape targets"
30 | optional = true
31 | default = "60s"
32 | }
33 |
34 | argument "keep_metrics" {
35 | // comment = "Regex of metrics to keep, see ../relabelings/kube-state-metrics.river for the default value"
36 | optional = true
37 | default = "^.+$"
38 | }
39 |
40 | argument "clustering" {
41 | // comment = "Whether or not clustering should be enabled"
42 | optional = true
43 | default = false
44 | }
45 |
46 | argument "git_repo" {
47 | optional = true
48 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
49 | }
50 |
51 | argument "git_rev" {
52 | optional = true
53 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
54 | }
55 |
56 | argument "git_pull_freq" {
57 | optional = true
58 | default = "0s"
59 | }
60 |
61 | module.git "scrape_resource" {
62 | repository = argument.git_repo.value
63 | revision = argument.git_rev.value
64 | pull_frequency = argument.git_pull_freq.value
65 | path = "modules/meta-monitoring/metrics/scrape-resource.river"
66 |
67 | arguments {
68 | forward_to = argument.forward_to.value
69 | app_name = argument.app_name.value
70 | port_name = argument.port_name.value
71 | namespaces = argument.namespaces.value
72 | scrape_interval = argument.scrape_interval.value
73 | keep_metrics = argument.keep_metrics.value
74 | clustering = argument.clustering.value
75 | git_repo = argument.git_repo.value
76 | git_rev = argument.git_rev.value
77 | git_pull_freq = argument.git_pull_freq.value
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/modules/meta-monitoring/metrics/scrape-grafana.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-grafana-metrics
3 | Description: Scrapes Grafana Metrics
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "app_name" {
11 | // comment = "The name of the kube-state-metrics app"
12 | optional = true
13 | default = "grafana"
14 | }
15 |
16 | argument "port_name" {
17 | // comment = "The name of the port to keep metrics for"
18 | optional = true
19 | default = "grafana"
20 | }
21 |
22 | argument "namespaces" {
23 | // comment = "List of namespaces to search for grafana metrics in"
24 | optional = true
25 | default = ["grafana"]
26 | }
27 |
28 | argument "scrape_interval" {
29 | // comment = "How often to scrape targets"
30 | optional = true
31 | default = "60s"
32 | }
33 |
34 | argument "keep_metrics" {
35 | // comment = "Regex of metrics to keep, see ../relabelings/kube-state-metrics.river for the default value"
36 | optional = true
37 | default = "^.+$"
38 | }
39 |
40 | argument "clustering" {
41 | // comment = "Whether or not clustering should be enabled"
42 | optional = true
43 | default = false
44 | }
45 |
46 | argument "git_repo" {
47 | optional = true
48 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
49 | }
50 |
51 | argument "git_rev" {
52 | optional = true
53 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
54 | }
55 |
56 | argument "git_pull_freq" {
57 | optional = true
58 | default = "0s"
59 | }
60 |
61 | module.git "scrape_resource" {
62 | repository = argument.git_repo.value
63 | revision = argument.git_rev.value
64 | pull_frequency = argument.git_pull_freq.value
65 | path = "modules/meta-monitoring/metrics/scrape-resource.river"
66 |
67 | arguments {
68 | forward_to = argument.forward_to.value
69 | app_name = argument.app_name.value
70 | port_name = argument.port_name.value
71 | namespaces = argument.namespaces.value
72 | scrape_interval = argument.scrape_interval.value
73 | keep_metrics = argument.keep_metrics.value
74 | clustering = argument.clustering.value
75 | git_repo = argument.git_repo.value
76 | git_rev = argument.git_rev.value
77 | git_pull_freq = argument.git_pull_freq.value
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/modules/meta-monitoring/metrics/scrape-loki.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-loki-metrics
3 | Description: Scrapes Loki Metrics
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "app_name" {
11 | // comment = "The name of the kube-state-metrics app"
12 | optional = true
13 | default = "loki"
14 | }
15 |
16 | argument "port_name" {
17 | // comment = "The name of the port to keep metrics for"
18 | optional = true
19 | default = "http-metrics"
20 | }
21 |
22 | argument "namespaces" {
23 | // comment = "List of namespaces to search for loki metrics in"
24 | optional = true
25 | default = ["loki"]
26 | }
27 |
28 | argument "scrape_interval" {
29 | // comment = "How often to scrape targets"
30 | optional = true
31 | default = "60s"
32 | }
33 |
34 | argument "job_label" {
35 | // comment = "The job label to add for all loki metrics"
36 | optional = true
37 | }
38 |
39 | argument "keep_metrics" {
40 | // comment = "Regex of metrics to keep, see ../relabelings/kube-state-metrics.river for the default value"
41 | optional = true
42 | default = "^.+$"
43 | }
44 |
45 | argument "clustering" {
46 | // comment = "Whether or not clustering should be enabled"
47 | optional = true
48 | default = false
49 | }
50 |
51 | argument "git_repo" {
52 | optional = true
53 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
54 | }
55 |
56 | argument "git_rev" {
57 | optional = true
58 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
59 | }
60 |
61 | argument "git_pull_freq" {
62 | optional = true
63 | default = "0s"
64 | }
65 |
66 | module.git "scrape_resource" {
67 | repository = argument.git_repo.value
68 | revision = argument.git_rev.value
69 | pull_frequency = argument.git_pull_freq.value
70 | path = "modules/meta-monitoring/metrics/scrape-resource.river"
71 |
72 | arguments {
73 | forward_to = argument.forward_to.value
74 | app_name = argument.app_name.value
75 | port_name = argument.port_name.value
76 | namespaces = argument.namespaces.value
77 | scrape_interval = argument.scrape_interval.value
78 | keep_metrics = argument.keep_metrics.value
79 | clustering = argument.clustering.value
80 | git_repo = argument.git_repo.value
81 | git_rev = argument.git_rev.value
82 | git_pull_freq = argument.git_pull_freq.value
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/modules/meta-monitoring/metrics/scrape-mimir.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-mimir-metrics
3 | Description: Scrapes Mimir Metrics
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "app_name" {
11 | // comment = "The name of the kube-state-metrics app"
12 | optional = true
13 | default = "mimir"
14 | }
15 |
16 | argument "port_name" {
17 | // comment = "The name of the port to keep metrics for"
18 | optional = true
19 | default = "http-metrics"
20 | }
21 |
22 | argument "namespaces" {
23 | // comment = "List of namespaces to search for mimir metrics in"
24 | optional = true
25 | default = ["mimir"]
26 | }
27 |
28 | argument "scrape_interval" {
29 | // comment = "How often to scrape targets"
30 | optional = true
31 | default = "60s"
32 | }
33 |
34 | argument "job_label" {
35 | // comment = "The job label to add for all mimir metrics"
36 | optional = true
37 | default = ""
38 | }
39 |
40 | argument "keep_metrics" {
41 | // comment = "Regex of metrics to keep, see ../relabelings/kube-state-metrics.river for the default value"
42 | optional = true
43 | default = "^.+$"
44 | }
45 |
46 | argument "clustering" {
47 | // comment = "Whether or not clustering should be enabled"
48 | optional = true
49 | default = false
50 | }
51 |
52 | argument "git_repo" {
53 | optional = true
54 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
55 | }
56 |
57 | argument "git_rev" {
58 | optional = true
59 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
60 | }
61 |
62 | argument "git_pull_freq" {
63 | optional = true
64 | default = "0s"
65 | }
66 |
67 | module.git "scrape_resource" {
68 | repository = argument.git_repo.value
69 | revision = argument.git_rev.value
70 | pull_frequency = argument.git_pull_freq.value
71 | path = "modules/meta-monitoring/metrics/scrape-resource.river"
72 |
73 | arguments {
74 | forward_to = argument.forward_to.value
75 | app_name = argument.app_name.value
76 | port_name = argument.port_name.value
77 | namespaces = argument.namespaces.value
78 | scrape_interval = argument.scrape_interval.value
79 | keep_metrics = argument.keep_metrics.value
80 | clustering = argument.clustering.value
81 | git_repo = argument.git_repo.value
82 | git_rev = argument.git_rev.value
83 | git_pull_freq = argument.git_pull_freq.value
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/modules/meta-monitoring/metrics/scrape-resource.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-grafana-metrics
3 | Description: Scrapes LGTM Resource Metrics
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "app_name" {
11 | // comment = "The name of the kube-state-metrics app"
12 | optional = true
13 | default = "grafana-agent"
14 | }
15 |
16 | argument "port_name" {
17 | // comment = "The name of the port to keep metrics for"
18 | optional = true
19 | default = "http-metrics"
20 | }
21 |
22 | argument "namespaces" {
23 | // comment = "List of namespaces to search for grafana metrics in"
24 | optional = true
25 | default = ["agents"]
26 | }
27 |
28 | argument "scrape_interval" {
29 | // comment = "How often to scrape targets"
30 | optional = true
31 | default = "60s"
32 | }
33 |
34 | argument "job_label" {
35 | // comment = "The job label to add for all grafana metrics"
36 | optional = true
37 | }
38 |
39 | argument "keep_metrics" {
40 | // comment = "Regex of metrics to keep, see ../relabelings/kube-state-metrics.river for the default value"
41 | optional = true
42 | default = "^.+$"
43 | }
44 |
45 | argument "clustering" {
46 | // comment = "Whether or not clustering should be enabled"
47 | optional = true
48 | default = false
49 | }
50 |
51 | argument "git_repo" {
52 | optional = true
53 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
54 | }
55 |
56 | argument "git_rev" {
57 | optional = true
58 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
59 | }
60 |
61 | argument "git_pull_freq" {
62 | optional = true
63 | default = "0s"
64 | }
65 |
66 | module.git "lgtm_targets" {
67 | repository = argument.git_repo.value
68 | revision = argument.git_rev.value
69 | pull_frequency = argument.git_pull_freq.value
70 | path = "modules/meta-monitoring/metrics/targets-lgtm.river"
71 |
72 | arguments {
73 | git_repo = argument.git_repo.value
74 | git_rev = argument.git_rev.value
75 | git_pull_freq = argument.git_pull_freq.value
76 | app_name = argument.app_name.value
77 | namespaces = argument.namespaces.value
78 | port_name = argument.port_name.value
79 | }
80 | }
81 |
82 | prometheus.scrape "lgtm_resource" {
83 | targets = module.git.lgtm_targets.exports.relabelings.output
84 | scrape_interval = argument.scrape_interval.value
85 | forward_to = [prometheus.relabel.lgtm_resource.receiver]
86 |
87 | clustering {
88 | enabled = argument.clustering.value
89 | }
90 | }
91 |
92 | // metric relabelings
93 | prometheus.relabel "lgtm_resource" {
94 | forward_to = argument.forward_to.value
95 |
96 | rule {
97 | action = "keep"
98 | source_labels = ["__name__"]
99 | regex = argument.keep_metrics.value
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/modules/meta-monitoring/metrics/scrape-tempo.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: scrape-tempo-metrics
3 | Description: Scrapes Tempo Metrics
4 | */
5 | argument "forward_to" {
6 | // comment = "Must be a list(MetricssReceiver) where collected logs should be forwarded to"
7 | optional = false
8 | }
9 |
10 | argument "app_name" {
11 | // comment = "The name of the kube-state-metrics app"
12 | optional = true
13 | default = "tempo"
14 | }
15 |
16 | argument "port_name" {
17 | // comment = "The name of the port to keep metrics for"
18 | optional = true
19 | default = "http-metrics"
20 | }
21 |
22 | argument "namespaces" {
23 | // comment = "List of namespaces to search for tempo metrics in"
24 | optional = true
25 | default = ["tempo"]
26 | }
27 |
28 | argument "scrape_interval" {
29 | // comment = "How often to scrape targets"
30 | optional = true
31 | default = "60s"
32 | }
33 |
34 | argument "job_label" {
35 | // comment = "The job label to add for all tempo metrics"
36 | optional = true
37 | default = ""
38 | }
39 |
40 | argument "keep_metrics" {
41 | // comment = "Regex of metrics to keep, see ../relabelings/kube-state-metrics.river for the default value"
42 | optional = true
43 | default = "^.+$"
44 | }
45 |
46 | argument "clustering" {
47 | // comment = "Whether or not clustering should be enabled"
48 | optional = true
49 | default = false
50 | }
51 |
52 | argument "git_repo" {
53 | optional = true
54 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
55 | }
56 |
57 | argument "git_rev" {
58 | optional = true
59 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
60 | }
61 |
62 | argument "git_pull_freq" {
63 | optional = true
64 | default = "0s"
65 | }
66 |
67 | module.git "scrape_resource" {
68 | repository = argument.git_repo.value
69 | revision = argument.git_rev.value
70 | pull_frequency = argument.git_pull_freq.value
71 | path = "modules/meta-monitoring/metrics/scrape-resource.river"
72 |
73 | arguments {
74 | forward_to = argument.forward_to.value
75 | app_name = argument.app_name.value
76 | port_name = argument.port_name.value
77 | namespaces = argument.namespaces.value
78 | scrape_interval = argument.scrape_interval.value
79 | keep_metrics = argument.keep_metrics.value
80 | clustering = argument.clustering.value
81 | git_repo = argument.git_repo.value
82 | git_rev = argument.git_rev.value
83 | git_pull_freq = argument.git_pull_freq.value
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/modules/meta-monitoring/metrics/targets-lgtm.river:
--------------------------------------------------------------------------------
1 | /*
2 | Module: lgtm-targets
3 | Description: Performs Kubernetes service discovery for endpoints, applies relabelings
4 | */
5 | argument "app_name" {
6 | // comment = "The name of the lgtm app"
7 | optional = true
8 | default = "loki"
9 | }
10 |
11 | argument "port_name" {
12 | // comment = "The name of the port to keep metrics for"
13 | optional = true
14 | default = "http-metrics"
15 | }
16 |
17 | argument "namespaces" {
18 | // comment = "List of namespaces to search for loki metrics in"
19 | optional = true
20 | default = ["loki"]
21 | }
22 |
23 | argument "git_repo" {
24 | optional = true
25 | default = coalesce(env("GIT_REPO"), "https://github.com/grafana/agent-modules.git")
26 | }
27 |
28 | argument "git_rev" {
29 | optional = true
30 | default = coalesce(env("GIT_REV"), env("GIT_REVISION"), env("GIT_BRANCH"), "main")
31 | }
32 |
33 | argument "git_pull_freq" {
34 | optional = true
35 | default = "0s"
36 | }
37 |
38 | export "relabelings" {
39 | value = discovery.relabel.endpoints
40 | }
41 |
42 | // get the available endpoints
43 | discovery.kubernetes "endpoints" {
44 | role = "endpoints"
45 |
46 | namespaces {
47 | own_namespace = false
48 | names = argument.namespaces.value
49 | }
50 |
51 | selectors {
52 | role = "endpoints"
53 | label = "app.kubernetes.io/name=" + argument.app_name.value
54 | }
55 | }
56 |
57 | discovery.relabel "endpoints" {
58 | targets = discovery.kubernetes.endpoints.targets
59 |
60 | // drop any endpoints which have prometheus.io/service-monitor: "false" or has the label variant: headless
61 | // set, as these are often attached to a headless service, which we do not want to scrape, loki will have both types of services
62 | // and we don't want duplicate metrics
63 | rule {
64 | action = "drop"
65 | source_labels = [
66 | "__meta_kubernetes_service_label_prometheus_io_service_monitor",
67 | "__meta_kubernetes_endpoints_label_prometheus_io_service_monitor",
68 | "__meta_kubernetes_service_label_variant",
69 | "__meta_kubernetes_endpoints_label_variant",
70 | ]
71 | regex = "^(?:;*)?(false).*$"
72 | }
73 | // or check for service.kubernetes.io/headless: "" label, when a label does not exist, it is treated as an empty string, so we need to drop any endpoints that have the headless annotation set
74 | // so we look to see if it is present and not if it is equal to ""
75 | rule {
76 | action = "drop"
77 | source_labels = [
78 | "__meta_kubernetes_service_labelpresent_service_kubernetes_io_headless",
79 | "__meta_kubernetes_endpoints_labelpresent_service_kubernetes_io_headless",
80 | ]
81 | regex = "^(?:;*)?(true).*$"
82 | }
83 |
84 | // there will be a target for each endpoint container AND port, drop any endpoints that do not have the http-metrics port name set
85 | rule {
86 | action = "keep"
87 | source_labels = ["__meta_kubernetes_endpoint_port_name"]
88 | regex = argument.port_name.value
89 | }
90 |
91 | // set the namespace
92 | rule {
93 | action = "replace"
94 | source_labels = ["__meta_kubernetes_namespace"]
95 | target_label = "namespace"
96 | }
97 |
98 | // set the service_name
99 | rule {
100 | action = "replace"
101 | source_labels = ["__meta_kubernetes_service_name"]
102 | target_label = "service"
103 | }
104 |
105 | // set the pod name
106 | rule {
107 | action = "replace"
108 | source_labels = ["__meta_kubernetes_pod_name"]
109 | target_label = "pod"
110 | }
111 |
112 | // set the job label
113 | rule {
114 | source_labels = [
115 | "__meta_kubernetes_namespace",
116 | "__meta_kubernetes_pod_label_app_kubernetes_io_component",
117 | ]
118 | separator = "/"
119 | regex = "(.*)/(.*)"
120 | replacement = "${1}/${2}"
121 | target_label = "job"
122 | }
123 |
124 | }
125 |
--------------------------------------------------------------------------------
/modules/otlp/otlp-to-lgtm/README.md:
--------------------------------------------------------------------------------
1 | # OTLP to LGTM Stack Module
2 |
3 | Module to ingest OTLP data and then send it to Loki, Mimir and Tempo stacks locally or in GrafanaCloud.
4 |
5 | ## Agent Version
6 |
7 | `>= v0.33`
8 |
9 | ## Module arguments
10 |
11 | The following arguments are supported when passing arguments to the module
12 | loader:
13 |
14 | | Name | Type | Description | Default | Required
15 | | ---- | ---- | ----------- | ------- | --------
16 | | `otlp_http_endpoint` | `string` | The OTLP HTTP server URL. | `"0.0.0.0:4318"` | no
17 | | `otlp_grpc_endpoint` | `string` | The OTLP gRPC server URL. | `"0.0.0.0:4317"` | no
18 | | `prometheus_endpoint` | `receiver` | The Prometheus remote write endpoint. | | yes
19 | | `prometheus_user` | `string` | The Prometheus remote write basic auth username. | | yes
20 | | `prometheus_password` | `secret` | The Prometheus remote write basic auth password. | | yes
21 | | `loki_endpoint` | `string` | Loki endpoint | | yes
22 | | `loki_user` | `string` |Loki basic auth username. | | yes
23 | | `loki_password` | `secret` |Loki basic auth password. | | yes
24 | | `tempo_endpoint` | `string` | Tempo Endpoint | | yes
25 | | `tempo_user` | `string` | Tempo basic auth username. | | yes
26 | | `tempo_password` | `secret` | Tempo basic auth password. | | yes
27 |
28 | ## Module exports
29 |
30 | The module has no exports.
31 |
32 | ## Example
33 |
34 | ```
35 | module.git "otlp_to_lgtm" {
36 | repository = "https://github.com/grafana/agent-modules.git"
37 | revision = "main"
38 | path = "modules/otlp/otlp-to-lgtm/module.river"
39 |
40 | arguments {
41 | prometheus_endpoint = "https://prometheus-us-central1.grafana.net/api/prom/push"
42 | prometheus_user = "123456"
43 | prometheus_password = env("GRAFANA_CLOUD_KEY")
44 |
45 | loki_endpoint = "https://logs-prod-us-central1.grafana.net/loki/api/v1/push"
46 | loki_user = "1234567"
47 | loki_password = env("GRAFANA_CLOUD_KEY")
48 |
49 | tempo_endpoint = "tempo-us-central1.grafana.net:443"
50 | tempo_user = "1234"
51 | tempo_password = env("GRAFANA_CLOUD_KEY")
52 | }
53 | }
54 | ```
55 |
--------------------------------------------------------------------------------
/modules/otlp/otlp-to-lgtm/module.river:
--------------------------------------------------------------------------------
1 | argument "otlp_http_endpoint" {
2 | optional = true
3 | default = "0.0.0.0:4318"
4 | }
5 |
6 | argument "otlp_grpc_endpoint" {
7 | optional = true
8 | default = "0.0.0.0:4317"
9 | }
10 |
11 | argument "prometheus_endpoint" { }
12 |
13 | argument "prometheus_user" { }
14 |
15 | argument "prometheus_password" { }
16 |
17 | argument "loki_endpoint" { }
18 |
19 | argument "loki_user" { }
20 |
21 | argument "loki_password" { }
22 |
23 | argument "tempo_endpoint" { }
24 |
25 | argument "tempo_user" { }
26 |
27 | argument "tempo_password" { }
28 |
29 | otelcol.receiver.otlp "default" {
30 | // https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.otlp/
31 |
32 | // configures the default endpoint "0.0.0.0:4317"
33 | grpc {
34 | endpoint = argument.otlp_grpc_endpoint.value
35 | }
36 | // configures the default endpoint "0.0.0.0:4318"
37 | http {
38 | endpoint = argument.otlp_http_endpoint.value
39 | }
40 |
41 | output {
42 | metrics = [otelcol.processor.memory_limiter.default.input]
43 | logs = [otelcol.processor.memory_limiter.default.input]
44 | traces = [otelcol.processor.memory_limiter.default.input]
45 | }
46 | }
47 |
48 | otelcol.processor.memory_limiter "default" {
49 | // https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.memory_limiter/
50 | check_interval = "1s"
51 |
52 | limit = "150MiB" // alternatively, set `limit_percentage` and `spike_limit_percentage`
53 |
54 | output {
55 | metrics = [otelcol.processor.batch.default.input]
56 | logs = [otelcol.processor.batch.default.input]
57 | traces = [otelcol.processor.batch.default.input]
58 | }
59 | }
60 |
61 | // otelcol.processor.batch must run after components which can drop telemetry (e.g. otelcol.processor.memory_limiter).
62 | // Otherwise, if telemetry is dropped, the effect of batching will be lost.
63 | otelcol.processor.batch "default" {
64 | // https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.batch/
65 | output {
66 | metrics = [otelcol.exporter.prometheus.default.input]
67 | logs = [otelcol.exporter.loki.default.input]
68 | traces = [otelcol.exporter.otlp.grafana_cloud_tempo.input]
69 | }
70 | }
71 |
72 | otelcol.exporter.loki "default" {
73 | // https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.loki/
74 | forward_to = [loki.write.default.receiver]
75 | }
76 |
77 | otelcol.exporter.prometheus "default" {
78 | // https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.prometheus/
79 | forward_to = [prometheus.remote_write.default.receiver]
80 | }
81 |
82 | prometheus.remote_write "default" {
83 | // https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/
84 | endpoint {
85 | url = argument.prometheus_endpoint.value
86 |
87 | basic_auth {
88 | username = argument.prometheus_user.value
89 | password = argument.prometheus_password.value
90 | }
91 | }
92 | }
93 |
94 | loki.write "default" {
95 | // https://grafana.com/docs/agent/latest/flow/reference/components/loki.write/
96 | endpoint {
97 | url = argument.loki_endpoint.value
98 |
99 | basic_auth {
100 | username = argument.loki_user.value
101 | password = argument.loki_password.value
102 | }
103 | }
104 | }
105 |
106 | otelcol.exporter.otlp "grafana_cloud_tempo" {
107 | // https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.otlp/
108 | client {
109 | endpoint = argument.tempo_endpoint.value
110 | auth = otelcol.auth.basic.grafana_cloud_tempo.handler
111 | }
112 | }
113 |
114 | otelcol.auth.basic "grafana_cloud_tempo" {
115 | // https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.basic/
116 | username = argument.tempo_user.value
117 | password = argument.tempo_password.value
118 | }
119 |
--------------------------------------------------------------------------------
/util/agentfmt.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -euo pipefail
3 |
4 | #
5 | # Recursively search a directory for .river files and format them.
6 | #
7 | # Prereq: Clone https://github.com/grafana/agent and follow the instructions
8 | # for executing `go run` commands
9 | #
10 | # Example Command: $AGENT_MODULES_REPO_DIR/util/agentfmt.sh $AGENT_REPO_DIR $AGENT_MODULES_REPO_DIR
11 |
12 | AGENT_DIR=$1
13 | TARGET_DIR=$2
14 |
15 | echo "Building agent binary"
16 | pushd "$AGENT_DIR"
17 | make agent
18 | popd
19 |
20 |
21 | find "$TARGET_DIR" -name "*.river" -print0 | while read -rd $'\0' file
22 | do
23 | # This should probably be more clever than having to run the go project for every file but does the job for now...
24 | echo "Formatting $file"
25 | AGENT_MODE=flow "$AGENT_DIR/build/grafana-agent" fmt -w "$file"
26 | done
--------------------------------------------------------------------------------