├── VERSION ├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ ├── golangci-lint.yml │ └── go.yml ├── percona_tests ├── assets │ ├── postgres_exporter.yml │ ├── postgres_exporter_percona.tar.xz │ ├── test.old-flags.txt │ └── test.new-flags.txt ├── custom-queries │ ├── high-resolution │ │ ├── queries-postgres-uptime.yml │ │ └── example-queries-postgres.yml │ ├── low-resolution │ │ └── example-queries-postgres.yml │ └── medium-resolution │ │ └── example-queries-postgres.yml ├── readme.md ├── docker-compose.yml ├── Makefile ├── env_prepare_test.go ├── utils_test.go └── performance_test.go ├── cmd └── postgres_exporter │ ├── tests │ ├── username_file │ ├── userpass_file │ ├── docker-postgres-replication │ │ ├── Dockerfile │ │ ├── Dockerfile.p2 │ │ ├── README.md │ │ ├── setup-replication.sh │ │ ├── docker-compose.yml │ │ └── docker-entrypoint.sh │ ├── user_queries_ok.yaml │ └── user_queries_test.yaml │ ├── percona_compatibility_test.go │ ├── probe.go │ ├── postgres_exporter_integration_test.go │ ├── pg_setting.go │ ├── server.go │ └── util.go ├── postgres_mixin ├── alerts │ ├── alerts.libsonnet │ └── postgres.libsonnet ├── .gitignore ├── go.mod ├── config.libsonnet ├── mixin.libsonnet ├── dashboards │ └── dashboards.libsonnet ├── Makefile └── README.md ├── NOTICE ├── MAINTAINERS.md ├── CODE_OF_CONDUCT.md ├── config ├── testdata │ ├── config-bad-auth-module.yaml │ ├── config-bad-extra-field.yaml │ └── config-good.yaml ├── config_test.go └── config.go ├── SECURITY.md ├── queries-postgres-uptime.yml ├── tools └── tools.go ├── Dockerfile ├── postgres_exporter_integration_test_script ├── .gitignore ├── example-queries-postgres.yml ├── .yamllint ├── gh-assets-clone.sh ├── .promu.yml ├── docker-compose.yml ├── .golangci.yml ├── docker-compose.md ├── gh-metrics-push.sh ├── Makefile ├── postgres-metrics-get-changes.sh ├── testdata └── ssl │ ├── client │ ├── server.crt │ ├── server.key │ └── CA.crt │ └── server │ ├── server.crt │ ├── server.key │ └── CA.crt ├── README-RDS.md ├── collector ├── pg_xlog_location_test.go ├── pg_locks_test.go ├── pg_wal_test.go ├── pg_replication_test.go ├── pg_postmaster.go ├── pg_stat_activity_autovacuum_test.go ├── collector_test.go ├── pg_long_running_transactions_test.go ├── pg_wal.go ├── pg_database_wraparound_test.go ├── pg_replication.go ├── pg_stat_activity_autovacuum.go ├── pg_xlog_location.go ├── pg_long_running_transactions.go ├── pg_postmaster_test.go ├── pg_extensions.go ├── probe.go ├── pg_locks.go ├── pg_database_test.go ├── pg_database_wraparound.go ├── pg_statio_user_indexes.go ├── pg_database.go ├── pg_statio_user_indexes_test.go ├── pg_process_idle.go ├── instance.go ├── pg_replication_slot.go ├── pg_stat_bgwriter_test.go └── pg_statio_user_tables_test.go ├── go.mod ├── postgres_exporter.rc ├── .circleci └── config.yml ├── example.alerts.yml ├── queries-mr.yaml └── queries-lr.yaml /VERSION: -------------------------------------------------------------------------------- 1 | 0.15.0 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @percona/pmm-review-exporters 2 | -------------------------------------------------------------------------------- /percona_tests/assets/postgres_exporter.yml: -------------------------------------------------------------------------------- 1 | auth_modules: -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/username_file: -------------------------------------------------------------------------------- 1 | custom_username$&+,/:;=?@ 2 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/userpass_file: -------------------------------------------------------------------------------- 1 | custom_password$&+,/:;=?@ 2 | -------------------------------------------------------------------------------- /postgres_mixin/alerts/alerts.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'postgres.libsonnet') 2 | -------------------------------------------------------------------------------- /postgres_mixin/.gitignore: -------------------------------------------------------------------------------- 1 | /alerts.yaml 2 | /rules.yaml 3 | dashboards_out 4 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2018 William Rouesnel 2 | Copyright 2021 The Prometheus Authors 3 | -------------------------------------------------------------------------------- /postgres_mixin/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/wrouesnel/postgres_exporter/postgres_mixin 2 | 3 | go 1.15 4 | -------------------------------------------------------------------------------- /postgres_mixin/config.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | _config+:: { 3 | postgresExporterSelector: '', 4 | }, 5 | } 6 | -------------------------------------------------------------------------------- /postgres_mixin/mixin.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'alerts/alerts.libsonnet') + 2 | (import 'dashboards/dashboards.libsonnet') + 3 | (import 'config.libsonnet') 4 | -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | * Ben Kochie @SuperQ 2 | * William Rouesnel @wrouesnel 3 | * Joe Adams @sysadmind 4 | -------------------------------------------------------------------------------- /percona_tests/assets/postgres_exporter_percona.tar.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/percona/postgres_exporter/HEAD/percona_tests/assets/postgres_exporter_percona.tar.xz -------------------------------------------------------------------------------- /postgres_mixin/dashboards/dashboards.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | grafanaDashboards+:: { 3 | 'postgres-overview.json': (import 'postgres-overview.json'), 4 | }, 5 | } 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Prometheus Community Code of Conduct 2 | 3 | Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). 4 | -------------------------------------------------------------------------------- /config/testdata/config-bad-auth-module.yaml: -------------------------------------------------------------------------------- 1 | auth_modules: 2 | foo: 3 | pretendauth: 4 | username: test 5 | password: pass 6 | options: 7 | extra: "1" 8 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Reporting a security issue 2 | 3 | The Prometheus security policy, including how to report vulnerabilities, can be 4 | found here: 5 | 6 | 7 | -------------------------------------------------------------------------------- /config/testdata/config-bad-extra-field.yaml: -------------------------------------------------------------------------------- 1 | auth_modules: 2 | foo: 3 | userpass: 4 | username: test 5 | password: pass 6 | options: 7 | extra: "1" 8 | doesNotExist: test 9 | -------------------------------------------------------------------------------- /config/testdata/config-good.yaml: -------------------------------------------------------------------------------- 1 | auth_modules: 2 | first: 3 | type: userpass 4 | userpass: 5 | username: first 6 | password: firstpass 7 | options: 8 | sslmode: disable 9 | -------------------------------------------------------------------------------- /queries-postgres-uptime.yml: -------------------------------------------------------------------------------- 1 | pg_postmaster_uptime: 2 | query: "select extract(epoch from current_timestamp - pg_postmaster_start_time()) as seconds" 3 | master: true 4 | metrics: 5 | - seconds: 6 | usage: "GAUGE" 7 | description: "Service uptime" 8 | -------------------------------------------------------------------------------- /percona_tests/assets/test.old-flags.txt: -------------------------------------------------------------------------------- 1 | --auto-discover-databases 2 | --collect.custom_query.hr 3 | --collect.custom_query.lr 4 | --collect.custom_query.mr 5 | --exclude-databases=template0,template1,postgres,cloudsqladmin,pmm-managed-dev,azure_maintenance,rdsadmin 6 | --log.level=warn -------------------------------------------------------------------------------- /percona_tests/custom-queries/high-resolution/queries-postgres-uptime.yml: -------------------------------------------------------------------------------- 1 | pg_postmaster_uptime: 2 | query: "select extract(epoch from current_timestamp - pg_postmaster_start_time()) as seconds" 3 | master: true 4 | metrics: 5 | - seconds: 6 | usage: "GAUGE" 7 | description: "Service uptime" 8 | -------------------------------------------------------------------------------- /percona_tests/assets/test.new-flags.txt: -------------------------------------------------------------------------------- 1 | --auto-discover-databases 2 | --collect.custom_query.hr 3 | --collect.custom_query.lr 4 | --collect.custom_query.mr 5 | --exclude-databases=template0,template1,postgres,cloudsqladmin,pmm-managed-dev,azure_maintenance,rdsadmin 6 | --log.level=warn 7 | --config.file=assets/postgres_exporter.yml -------------------------------------------------------------------------------- /tools/tools.go: -------------------------------------------------------------------------------- 1 | // postgres_exporter 2 | 3 | //go:build tools 4 | // +build tools 5 | 6 | package tools 7 | 8 | import ( 9 | _ "github.com/golangci/golangci-lint/v2/cmd/golangci-lint" 10 | _ "github.com/prometheus/promu" 11 | _ "github.com/reviewdog/reviewdog/cmd/reviewdog" 12 | _ "honnef.co/go/tools/cmd/staticcheck" 13 | ) 14 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ARCH="amd64" 2 | ARG OS="linux" 3 | FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest 4 | LABEL maintainer="The Prometheus Authors " 5 | 6 | ARG ARCH="amd64" 7 | ARG OS="linux" 8 | COPY .build/${OS}-${ARCH}/postgres_exporter /bin/postgres_exporter 9 | 10 | EXPOSE 9187 11 | USER nobody 12 | ENTRYPOINT [ "/bin/postgres_exporter" ] 13 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM postgres:11 2 | MAINTAINER Daniel Dent (https://www.danieldent.com) 3 | ENV PG_MAX_WAL_SENDERS 8 4 | ENV PG_WAL_KEEP_SEGMENTS 8 5 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y inetutils-ping 6 | COPY setup-replication.sh /docker-entrypoint-initdb.d/ 7 | COPY docker-entrypoint.sh /docker-entrypoint.sh 8 | RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh 9 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2: -------------------------------------------------------------------------------- 1 | FROM postgres:{{VERSION}} 2 | MAINTAINER Daniel Dent (https://www.danieldent.com) 3 | ENV PG_MAX_WAL_SENDERS 8 4 | ENV PG_WAL_KEEP_SEGMENTS 8 5 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y inetutils-ping 6 | COPY setup-replication.sh /docker-entrypoint-initdb.d/ 7 | COPY docker-entrypoint.sh /docker-entrypoint.sh 8 | RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh 9 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: "gomod" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | - package-ecosystem: "gomod" 9 | directory: "/tools" 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "docker" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | - package-ecosystem: "github-actions" 17 | directory: "/" 18 | schedule: 19 | interval: "weekly" 20 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/docker-postgres-replication/README.md: -------------------------------------------------------------------------------- 1 | # Replicated postgres cluster in docker. 2 | 3 | Upstream is forked from https://github.com/DanielDent/docker-postgres-replication 4 | 5 | My version lives at https://github.com/wrouesnel/docker-postgres-replication 6 | 7 | This very simple docker-compose file lets us stand up a replicated postgres 8 | cluster so we can test streaming. 9 | 10 | # TODO: 11 | Pull in p2 and template the Dockerfile so we can test multiple versions. 12 | -------------------------------------------------------------------------------- /postgres_exporter_integration_test_script: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script wraps the integration test binary so it produces concatenated 3 | # test output. 4 | 5 | test_binary=$1 6 | shift 7 | output_cov=$1 8 | shift 9 | 10 | echo "Test Binary: $test_binary" 1>&2 11 | echo "Coverage File: $output_cov" 1>&2 12 | 13 | echo "mode: count" > $output_cov 14 | 15 | test_cov=$(mktemp) 16 | $test_binary -test.coverprofile=$test_cov $@ || exit 1 17 | tail -n +2 $test_cov >> $output_cov 18 | rm -f $test_cov 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.build 2 | /postgres_exporter 3 | /postgres_exporter_integration_test 4 | *.tar.gz 5 | *.test 6 | *-stamp 7 | /.idea 8 | /.vscode 9 | *.iml 10 | /cover.out 11 | /cover.*.out 12 | /.coverage 13 | /bin 14 | /release 15 | /*.prom 16 | /.metrics.*.*.prom 17 | /.metrics.*.*.prom.unique 18 | /.assets-branch 19 | /.metrics.*.added 20 | /.metrics.*.removed 21 | /tools/src 22 | /vendor 23 | /percona_tests/assets/postgres_exporter 24 | /percona_tests/assets/postgres_exporter_percona 25 | /percona_tests/assets/metrics.* -------------------------------------------------------------------------------- /example-queries-postgres.yml: -------------------------------------------------------------------------------- 1 | ## ###################################################### 2 | ## WARNING: This is an example. Do not edit this file. 3 | ## To create your own Custom Queries - create a new file 4 | ## ###################################################### 5 | ## Custom query example. 6 | #pg_replication: 7 | # query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag" 8 | # metrics: 9 | # - lag: 10 | # usage: "GAUGE" 11 | # description: "Replication lag behind master in seconds" 12 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | rules: 5 | braces: 6 | max-spaces-inside: 1 7 | level: error 8 | brackets: 9 | max-spaces-inside: 1 10 | level: error 11 | commas: disable 12 | comments: disable 13 | comments-indentation: disable 14 | document-start: disable 15 | indentation: 16 | spaces: consistent 17 | indent-sequences: consistent 18 | key-duplicates: 19 | ignore: | 20 | config/testdata/section_key_dup.bad.yml 21 | line-length: disable 22 | truthy: 23 | check-keys: false 24 | -------------------------------------------------------------------------------- /percona_tests/custom-queries/high-resolution/example-queries-postgres.yml: -------------------------------------------------------------------------------- 1 | ## ###################################################### 2 | ## WARNING: This is an example. Do not edit this file. 3 | ## To create your own Custom Queries - create a new file 4 | ## ###################################################### 5 | ## Custom query example. 6 | #pg_replication: 7 | # query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag" 8 | # metrics: 9 | # - lag: 10 | # usage: "GAUGE" 11 | # description: "Replication lag behind master in seconds" 12 | -------------------------------------------------------------------------------- /percona_tests/custom-queries/low-resolution/example-queries-postgres.yml: -------------------------------------------------------------------------------- 1 | ## ###################################################### 2 | ## WARNING: This is an example. Do not edit this file. 3 | ## To create your own Custom Queries - create a new file 4 | ## ###################################################### 5 | ## Custom query example. 6 | #pg_replication: 7 | # query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag" 8 | # metrics: 9 | # - lag: 10 | # usage: "GAUGE" 11 | # description: "Replication lag behind master in seconds" 12 | -------------------------------------------------------------------------------- /percona_tests/custom-queries/medium-resolution/example-queries-postgres.yml: -------------------------------------------------------------------------------- 1 | ## ###################################################### 2 | ## WARNING: This is an example. Do not edit this file. 3 | ## To create your own Custom Queries - create a new file 4 | ## ###################################################### 5 | ## Custom query example. 6 | #pg_replication: 7 | # query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag" 8 | # metrics: 9 | # - lag: 10 | # usage: "GAUGE" 11 | # description: "Replication lag behind master in seconds" 12 | -------------------------------------------------------------------------------- /gh-assets-clone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to setup the assets clone of the repository using GIT_ASSETS_BRANCH and 3 | # GIT_API_KEY. 4 | 5 | [ ! -z "$GIT_ASSETS_BRANCH" ] || exit 1 6 | 7 | setup_git() { 8 | git config --global user.email "travis@travis-ci.org" || exit 1 9 | git config --global user.name "Travis CI" || exit 1 10 | } 11 | 12 | # Constants 13 | ASSETS_DIR=".assets-branch" 14 | 15 | # Clone the assets branch with the correct credentials 16 | git clone --single-branch -b "$GIT_ASSETS_BRANCH" \ 17 | "https://${GIT_API_KEY}@github.com/${TRAVIS_REPO_SLUG}.git" "$ASSETS_DIR" || exit 1 18 | 19 | -------------------------------------------------------------------------------- /postgres_mixin/Makefile: -------------------------------------------------------------------------------- 1 | JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s 2 | 3 | default: build 4 | 5 | all: fmt lint build clean 6 | 7 | fmt: 8 | find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ 9 | xargs -n 1 -- $(JSONNET_FMT) -i 10 | 11 | lint: 12 | find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ 13 | while read f; do \ 14 | $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \ 15 | done 16 | 17 | mixtool lint mixin.libsonnet 18 | 19 | build: 20 | mixtool generate all mixin.libsonnet 21 | 22 | clean: 23 | rm -rf dashboards_out alerts.yaml rules.yaml 24 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "x$REPLICATE_FROM" == "x" ]; then 4 | 5 | cat >> ${PGDATA}/postgresql.conf < ${PGDATA}/recovery.conf < 16 | -c ssl=on 17 | -c ssl_cert_file=/ssl/server.crt 18 | -c ssl_key_file=/ssl/server.key 19 | -c ssl_ca_file=/ssl/CA.crt 20 | -c log_min_messages=DEBUG4 21 | ports: 22 | - 5433:5432 23 | environment: 24 | - POSTGRES_USER=root 25 | - POSTGRES_PASSWORD=root 26 | volumes: 27 | - ./testdata/ssl/server:/ssl 28 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | enable: 4 | - misspell 5 | - revive 6 | settings: 7 | errcheck: 8 | exclude-functions: 9 | - (github.com/go-kit/log.Logger).Log 10 | revive: 11 | rules: 12 | - name: unused-parameter 13 | severity: warning 14 | disabled: true 15 | exclusions: 16 | generated: lax 17 | presets: 18 | - comments 19 | - common-false-positives 20 | - legacy 21 | - std-error-handling 22 | rules: 23 | - linters: 24 | - errcheck 25 | path: _test.go 26 | paths: 27 | - third_party$ 28 | - builtin$ 29 | - examples$ 30 | formatters: 31 | exclusions: 32 | generated: lax 33 | paths: 34 | - third_party$ 35 | - builtin$ 36 | - examples$ 37 | -------------------------------------------------------------------------------- /docker-compose.md: -------------------------------------------------------------------------------- 1 | In order to start PostgreSQL with SSL support, we need to change the file permissions 2 | for the ssl cert and key. 3 | 4 | Please run these commands if you want to run `docker compose` locally. 5 | 6 | sudo chown 999:999 testdata/ssl/server/* 7 | sudo chmod 0600 testdata/ssl/server/* 8 | 9 | Start the container: 10 | ``` 11 | `docker compose` up 12 | ``` 13 | 14 | To be able to connect from pgsql you need to own the certs 15 | 16 | sudo chown ${USER}:${USER} testdata/ssl/client* 17 | sudo chmod 0600 testdata/ssl/client/* 18 | 19 | Connect using psql 20 | 21 | ``` 22 | psql "host=127.0.0.1 port=5433 user=root password=root dbname=postgres sslmode=verify-ca sslcert=${PWD}/testdata/ssl/client/server.crt sslkey=${PWD}/testdata/ssl/client/server.key sslrootcert=${PWD}/testdata/ssl/client/CA.crt" 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /gh-metrics-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to copy and push new metric versions to the assets branch. 3 | 4 | [ ! -z "$GIT_ASSETS_BRANCH" ] || exit 1 5 | [ ! -z "$GIT_API_KEY" ] || exit 1 6 | 7 | version=$(git describe HEAD) || exit 1 8 | 9 | # Constants 10 | ASSETS_DIR=".assets-branch" 11 | METRICS_DIR="$ASSETS_DIR/metriclists" 12 | 13 | # Ensure metrics dir exists 14 | mkdir -p "$METRICS_DIR/" 15 | 16 | # Remove old files so we spot deletions 17 | rm -f "$METRICS_DIR/.*.unique" 18 | 19 | # Copy new files 20 | cp -f -t "$METRICS_DIR/" ./.metrics.*.prom.unique || exit 1 21 | 22 | # Enter the assets dir and push. 23 | cd "$ASSETS_DIR" || exit 1 24 | 25 | git add "metriclists" || exit 1 26 | git commit -m "Added unique metrics for build from $version" || exit 1 27 | git push origin "$GIT_ASSETS_BRANCH" || exit 1 28 | 29 | exit 0 -------------------------------------------------------------------------------- /percona_tests/readme.md: -------------------------------------------------------------------------------- 1 | ## integration tests for exporter update 2 | 3 | ### Fast start: 4 | 5 | run 6 | 7 | make prepare-env-from-repo 8 | 9 | then run any of the ```make test-*``` 10 | 11 | ### A bit of details: 12 | 13 | 1. unpack original exporter 14 | 15 | 16 | make prepare-base-exporter 17 | 18 | 2.a. download updated exporter from specific feature build 19 | 20 | make prepare-exporter-from-fb url="" 21 | 22 | 2.b. or use current repo as updated exporter 23 | 24 | make prepare-exporter-from-repo 25 | 26 | 3. start test postgres_server 27 | 28 | 29 | make start-postgres-db 30 | 31 | 4. run basic performance comparison test 32 | 33 | 34 | make test-performance 35 | 36 | 5. run metrics list compatibility test 37 | 38 | 39 | make test-metrics 40 | 41 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '2' 3 | 4 | services: 5 | pg-master: 6 | build: '.' 7 | image: 'danieldent/postgres-replication' 8 | restart: 'always' 9 | environment: 10 | POSTGRES_USER: 'postgres' 11 | POSTGRES_PASSWORD: 'postgres' 12 | PGDATA: '/var/lib/postgresql/data/pgdata' 13 | volumes: 14 | - '/var/lib/postgresql/data' 15 | expose: 16 | - '5432' 17 | 18 | pg-slave: 19 | build: '.' 20 | image: 'danieldent/postgres-replication' 21 | restart: 'always' 22 | environment: 23 | POSTGRES_USER: 'postgres' 24 | POSTGRES_PASSWORD: 'postgres' 25 | PGDATA: '/var/lib/postgresql/data/pgdata' 26 | REPLICATE_FROM: 'pg-master' 27 | volumes: 28 | - '/var/lib/postgresql/data' 29 | expose: 30 | - '5432' 31 | links: 32 | - 'pg-master' 33 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Ensure that 'all' is the default target otherwise it will be the first target from Makefile.common. 2 | all:: 3 | 4 | # Needs to be defined before including Makefile.common to auto-generate targets 5 | DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le 6 | DOCKER_REPO ?= prometheuscommunity 7 | 8 | include Makefile.common 9 | 10 | DOCKER_IMAGE_NAME ?= postgres-exporter 11 | 12 | GO_BUILD_LDFLAGS = -X github.com/prometheus/common/version.Version=$(shell cat VERSION) -X github.com/prometheus/common/version.Revision=$(shell git rev-parse HEAD) -X github.com/prometheus/common/version.Branch=$(shell git describe --always --contains --all) -X github.com/prometheus/common/version.BuildUser= -X github.com/prometheus/common/version.BuildDate=$(shell date +%FT%T%z) -s -w 13 | 14 | export PMM_RELEASE_PATH?=. 15 | 16 | release: 17 | go build -ldflags="$(GO_BUILD_LDFLAGS)" -o $(PMM_RELEASE_PATH)/postgres_exporter ./cmd/postgres_exporter 18 | -------------------------------------------------------------------------------- /postgres_mixin/README.md: -------------------------------------------------------------------------------- 1 | # Postgres Mixin 2 | 3 | _This is a work in progress. We aim for it to become a good role model for alerts 4 | and dashboards eventually, but it is not quite there yet._ 5 | 6 | The Postgres Mixin is a set of configurable, reusable, and extensible alerts and 7 | dashboards based on the metrics exported by the Postgres Exporter. The mixin creates 8 | recording and alerting rules for Prometheus and suitable dashboard descriptions 9 | for Grafana. 10 | 11 | To use them, you need to have `mixtool` and `jsonnetfmt` installed. If you 12 | have a working Go development environment, it's easiest to run the following: 13 | ```bash 14 | $ go get github.com/monitoring-mixins/mixtool/cmd/mixtool 15 | $ go get github.com/google/go-jsonnet/cmd/jsonnetfmt 16 | ``` 17 | 18 | You can then build the Prometheus rules files `alerts.yaml` and 19 | `rules.yaml` and a directory `dashboard_out` with the JSON dashboard files 20 | for Grafana: 21 | ```bash 22 | $ make build 23 | ``` 24 | 25 | For more advanced uses of mixins, see 26 | https://github.com/monitoring-mixins/docs. 27 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This action is synced from https://github.com/prometheus/prometheus 3 | name: golangci-lint 4 | on: 5 | push: 6 | paths: 7 | - "go.sum" 8 | - "go.mod" 9 | - "**.go" 10 | - "scripts/errcheck_excludes.txt" 11 | - ".github/workflows/golangci-lint.yml" 12 | - ".golangci.yml" 13 | pull_request: 14 | 15 | jobs: 16 | golangci: 17 | name: lint 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout repository 21 | uses: actions/checkout@c2d88d3ecc89a9ef08eebf45d9637801dcee7eb5 # v5.0.1 22 | - name: install Go 23 | uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 24 | with: 25 | go-version-file: ${{ github.workspace }}/go.mod 26 | 27 | - name: Install snmp_exporter/generator dependencies 28 | run: sudo apt-get update && sudo apt-get -y install libsnmp-dev 29 | if: github.repository == 'prometheus/snmp_exporter' 30 | 31 | - name: Lint 32 | uses: golangci/golangci-lint-action@v9.1.0 # v9.1.0 33 | with: 34 | version: v2.6.2 35 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/user_queries_ok.yaml: -------------------------------------------------------------------------------- 1 | pg_locks_mode: 2 | query: "WITH q_locks AS (select * from pg_locks where pid != pg_backend_pid() and database = (select oid from pg_database where datname = current_database())) SELECT (select current_database()) as datname, 3 | lockmodes AS tag_lockmode, coalesce((select count(*) FROM q_locks WHERE mode = lockmodes), 0) AS count FROM 4 | unnest('{AccessShareLock, ExclusiveLock, RowShareLock, RowExclusiveLock, ShareLock, ShareRowExclusiveLock, AccessExclusiveLock, ShareUpdateExclusiveLock}'::text[]) lockmodes;" 5 | metrics: 6 | - datname: 7 | usage: "LABEL" 8 | description: "Database name" 9 | - tag_lockmode: 10 | usage: "LABEL" 11 | description: "Lock type" 12 | - count: 13 | usage: "GAUGE" 14 | description: "Number of lock" 15 | pg_wal: 16 | query: "select current_database() as datname, case when pg_is_in_recovery() = false then pg_xlog_location_diff(pg_current_xlog_location(), '0/0')::int8 else pg_xlog_location_diff(pg_last_xlog_replay_location(), '0/0')::int8 end as xlog_location_b;" 17 | metrics: 18 | - datname: 19 | usage: "LABEL" 20 | description: "Database name" 21 | - xlog_location_b: 22 | usage: "COUNTER" 23 | description: "current transaction log write location" 24 | -------------------------------------------------------------------------------- /postgres-metrics-get-changes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to parse a text exposition format file into a unique list of metrics 3 | # output by the exporter and then build lists of added/removed metrics. 4 | 5 | old_src="$1" 6 | if [ ! -d "$old_src" ] ; then 7 | mkdir -p "$old_src" 8 | fi 9 | 10 | function generate_add_removed() { 11 | type="$1" 12 | pg_version="$2" 13 | old_version="$3" 14 | new_version="$4" 15 | 16 | if [ ! -e "$old_version" ] ; then 17 | touch "$old_version" 18 | fi 19 | 20 | comm -23 "$old_version" "$new_version" > ".metrics.${type}.${pg_version}.removed" 21 | comm -13 "$old_version" "$new_version" > ".metrics.${type}.${pg_version}.added" 22 | } 23 | 24 | for raw_prom in $(echo .*.prom) ; do 25 | # Get the type and version 26 | type=$(echo "$raw_prom" | cut -d'.' -f3) 27 | pg_version=$(echo "$raw_prom" | cut -d'.' -f4- | sed 's/\.prom$//g') 28 | 29 | unique_file="${raw_prom}.unique" 30 | old_unique_file="$old_src/$unique_file" 31 | 32 | # Strip, sort and deduplicate the label names 33 | grep -v '#' "$raw_prom" | \ 34 | rev | cut -d' ' -f2- | \ 35 | rev | cut -d'{' -f1 | \ 36 | sort | \ 37 | uniq > "$unique_file" 38 | 39 | generate_add_removed "$type" "$pg_version" "$old_unique_file" "$unique_file" 40 | done 41 | -------------------------------------------------------------------------------- /percona_tests/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.7' 3 | 4 | services: 5 | postgres: 6 | image: ${POSTGRES_IMAGE:-postgres:11} 7 | container_name: postgres-test-srv 8 | command: > 9 | -c shared_preload_libraries='${PG_PRELOADED_LIBS:-pg_stat_statements}' 10 | -c track_activity_query_size=2048 11 | -c pg_stat_statements.max=10000 12 | -c pg_stat_monitor.pgsm_query_max_len=10000 13 | -c pg_stat_statements.track=all 14 | -c pg_stat_statements.save=off 15 | -c track_io_timing=on 16 | ports: 17 | - "127.0.0.1:5432:5432" 18 | environment: 19 | - POSTGRES_USER=postgres 20 | - POSTGRES_PASSWORD=postgres 21 | volumes: 22 | - postgres-test-srv-vol:/docker-entrypoint-initdb.d/ 23 | networks: 24 | - postgres-test-srv-net 25 | 26 | golang: 27 | image: golang:1.21 28 | container_name: golang-test 29 | command: > 30 | tail -f ./assets/test.new-flags.txt 31 | volumes: 32 | - ../:/usr/src/myapp 33 | - go-modules:/go/pkg/mod # Put modules cache into a separate volume 34 | working_dir: /usr/src/myapp/percona_tests 35 | depends_on: 36 | - postgres 37 | networks: 38 | - postgres-test-srv-net 39 | 40 | volumes: 41 | go-modules: # Define the volume 42 | postgres-test-srv-vol: 43 | 44 | networks: 45 | postgres-test-srv-net: -------------------------------------------------------------------------------- /testdata/ssl/client/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEGTCCAgGgAwIBAgIQGIGv8QdtH0oxH27TNeil7TANBgkqhkiG9w0BAQsFADAN 3 | MQswCQYDVQQDEwJDQTAeFw0yMTA5MjkxMzU2NTFaFw0yMzAzMjkxNDAzMjNaMBEx 4 | DzANBgNVBAMTBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB 5 | ALrWoFyRmJvQnVoczotp39PJnwzMwfOM4o7611sSp/X/0jwa7SoW5nCFn5CCskgR 6 | cCIbmlV16Mf/5WJXNhWHdzHpsS7t24fQIUFjHmsFpd1VQ/S/7vVUIOXLNwl0ZJpl 7 | la3DGKqx9AAP26cyuI34wa09JnA3PF9BkaFDImE5VjujlIt/S4QJXbnBZOnwuO5D 8 | qDJKeldX801GFf9Wvvn9SxGFoIlIPoTKZLhaT0RVHlc8Hoxwglxk/tIVVLChfqed 9 | jSnqOnUmgLN5B7my+4CjXuup7+4MU5qIgvYcmZNwuI++lB5E6uUmX4BDJmquDM4Y 10 | y4ldm0QspKDtHcSC20Pgow8CAwEAAaNxMG8wDgYDVR0PAQH/BAQDAgO4MB0GA1Ud 11 | JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQU65mWMudTWDMs1JjR 12 | aYtoeuu3RWUwHwYDVR0jBBgwFoAUBW2v/wgKxZ4AFmoSn0zOnUymuTowDQYJKoZI 13 | hvcNAQELBQADggIBALlss5WJKauFOv0gomk8yR5o2xweMfNeftQj9NNhjhyy0fkM 14 | I1C6a+pYry2f3gSqc+4haGmhCRVFSoRh8o2A9bsgyKeJ6DfRKeiopiZqBE8XRy+s 15 | LolxWX8v7bOr81rhJgcsvfdgjSxq3ErBSWxB3MFVOq/4vyc96PeZ/74feVfvvgrD 16 | RpE2IRDWygJUMpbtwF64+sE0vRJNwdCwhtA4DX+KQRLfhEhu67Pjc5LvH33kSdl8 17 | J/uj666dWSaqSpAd6qY3EOq9FfAPYRNauzV3M9NHH0BZZPSqZZp0ovJ2PaLHWk/z 18 | fErEPMgc7qlCK9tJ2uDh3RdyshOULx1DFK7xNZ7tdrBSbZvGptS5CUAzAmBN2E06 19 | EnyaWftqsKmSOi9ydz6tngQTuovGi8RPZGdsT03rtrBJ/hDXiM79nlDDd0ofTjb4 20 | o6eRoS+qQZst87SOMWROi0J8ZilrPNz0aBoY4OWjNKZbyqgADlnatkwH+rPM+13f 21 | sDDaNvFG4bFBAFXaaBFMMWet9GVeh9eNrBMF+80p5GmfIhqeXELAijHabuhqBcKD 22 | tlZdSicjsb2h71xVgv8yCoH9zoOkW7SHWGIOXpvPUUiWJz869yBnUOf8cd69IoUT 23 | BWGHKi74uExIdT82A69xYvWsqnRATHXcJoS0j+NNVOWxTI0XdhS7Sbl3xRe2 24 | -----END CERTIFICATE----- 25 | -------------------------------------------------------------------------------- /testdata/ssl/server/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEGTCCAgGgAwIBAgIQGIGv8QdtH0oxH27TNeil7TANBgkqhkiG9w0BAQsFADAN 3 | MQswCQYDVQQDEwJDQTAeFw0yMTA5MjkxMzU2NTFaFw0yMzAzMjkxNDAzMjNaMBEx 4 | DzANBgNVBAMTBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB 5 | ALrWoFyRmJvQnVoczotp39PJnwzMwfOM4o7611sSp/X/0jwa7SoW5nCFn5CCskgR 6 | cCIbmlV16Mf/5WJXNhWHdzHpsS7t24fQIUFjHmsFpd1VQ/S/7vVUIOXLNwl0ZJpl 7 | la3DGKqx9AAP26cyuI34wa09JnA3PF9BkaFDImE5VjujlIt/S4QJXbnBZOnwuO5D 8 | qDJKeldX801GFf9Wvvn9SxGFoIlIPoTKZLhaT0RVHlc8Hoxwglxk/tIVVLChfqed 9 | jSnqOnUmgLN5B7my+4CjXuup7+4MU5qIgvYcmZNwuI++lB5E6uUmX4BDJmquDM4Y 10 | y4ldm0QspKDtHcSC20Pgow8CAwEAAaNxMG8wDgYDVR0PAQH/BAQDAgO4MB0GA1Ud 11 | JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQU65mWMudTWDMs1JjR 12 | aYtoeuu3RWUwHwYDVR0jBBgwFoAUBW2v/wgKxZ4AFmoSn0zOnUymuTowDQYJKoZI 13 | hvcNAQELBQADggIBALlss5WJKauFOv0gomk8yR5o2xweMfNeftQj9NNhjhyy0fkM 14 | I1C6a+pYry2f3gSqc+4haGmhCRVFSoRh8o2A9bsgyKeJ6DfRKeiopiZqBE8XRy+s 15 | LolxWX8v7bOr81rhJgcsvfdgjSxq3ErBSWxB3MFVOq/4vyc96PeZ/74feVfvvgrD 16 | RpE2IRDWygJUMpbtwF64+sE0vRJNwdCwhtA4DX+KQRLfhEhu67Pjc5LvH33kSdl8 17 | J/uj666dWSaqSpAd6qY3EOq9FfAPYRNauzV3M9NHH0BZZPSqZZp0ovJ2PaLHWk/z 18 | fErEPMgc7qlCK9tJ2uDh3RdyshOULx1DFK7xNZ7tdrBSbZvGptS5CUAzAmBN2E06 19 | EnyaWftqsKmSOi9ydz6tngQTuovGi8RPZGdsT03rtrBJ/hDXiM79nlDDd0ofTjb4 20 | o6eRoS+qQZst87SOMWROi0J8ZilrPNz0aBoY4OWjNKZbyqgADlnatkwH+rPM+13f 21 | sDDaNvFG4bFBAFXaaBFMMWet9GVeh9eNrBMF+80p5GmfIhqeXELAijHabuhqBcKD 22 | tlZdSicjsb2h71xVgv8yCoH9zoOkW7SHWGIOXpvPUUiWJz869yBnUOf8cd69IoUT 23 | BWGHKi74uExIdT82A69xYvWsqnRATHXcJoS0j+NNVOWxTI0XdhS7Sbl3xRe2 24 | -----END CERTIFICATE----- 25 | -------------------------------------------------------------------------------- /percona_tests/Makefile: -------------------------------------------------------------------------------- 1 | ######################### 2 | ### tests 3 | 4 | # measures avg scrape time and compares old vs new exporters 5 | test-performance: 6 | go test -v -run '^TestPerformance$$' -args -doRun=true 7 | 8 | extraMetrics = false 9 | multipleLabels = false 10 | dumpMetrics = false 11 | endpoint = '' 12 | 13 | test-metrics: 14 | go test -v -run '^TestMissingMetrics$$' -args -doRun=true 15 | 16 | test-labels: 17 | go test -v -run '^TestMissingLabels$$' -args -doRun=true 18 | 19 | test-resolutions-duplicates: 20 | go test -v -run '^TestResolutionsMetricDuplicates$$' -args -doRun=true 21 | 22 | test-resolutions: 23 | go test -v -run '^TestResolutions$$' -args -doRun=true 24 | 25 | dump-metrics: 26 | go test -v -run '^TestDumpMetrics$$' -args -doRun=true -extraMetrics=$(extraMetrics) -multipleLabels=$(multipleLabels) -endpoint=$(endpoint) -dumpMetrics=true 27 | 28 | test-consistency: test-metrics test-resolutions test-resolutions-duplicates 29 | 30 | ######################### 31 | ### env preparation 32 | 33 | # download exporter from provided feature build's client binary url 34 | prepare-exporter-from-fb: 35 | go test -v -run '^TestPrepareUpdatedExporter$\' -args -doRun=true -url=$(url) 36 | 37 | prepare-exporter-from-repo: 38 | make -C ../ build && cp ../postgres_exporter assets/postgres_exporter 39 | 40 | prepare-base-exporter: 41 | tar -xf assets/postgres_exporter_percona.tar.gz -C assets/ 42 | 43 | start-postgres-db: 44 | docker compose up -d --force-recreate --renew-anon-volumes --remove-orphans 45 | 46 | stop-postgres-db: 47 | docker compose down 48 | 49 | prepare-env-from-repo: prepare-exporter-from-repo prepare-base-exporter start-postgres-db 50 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags: 8 | - v[0-9]+.[0-9]+.[0-9]+* 9 | pull_request: 10 | 11 | jobs: 12 | test: 13 | name: Test 14 | strategy: 15 | matrix: 16 | postgresql-image: 17 | - postgres:10 18 | - postgres:11 19 | - postgres:12 20 | - postgres:13 21 | - postgres:14 22 | runs-on: ubuntu-latest 23 | steps: 24 | - name: Checkout code 25 | uses: actions/checkout@v6.0.0 26 | 27 | - name: Set up Go 28 | uses: actions/setup-go@v6.1.0 29 | with: 30 | go-version-file: ${{ github.workspace }}/go.mod 31 | 32 | - name: Run checks 33 | run: | 34 | go build -modfile=tools/go.mod -o bin/golangci-lint github.com/golangci/golangci-lint/v2/cmd/golangci-lint 35 | go build -modfile=tools/go.mod -o bin/reviewdog github.com/reviewdog/reviewdog/cmd/reviewdog 36 | bin/golangci-lint run -c=.golangci.yml --out-format=line-number | env REVIEWDOG_GITHUB_API_TOKEN=${{ secrets.GITHUB_TOKEN }} bin/reviewdog -f=golangci-lint -level=error -reporter=github-pr-review 37 | 38 | - name: Run Tests 39 | run: | 40 | sudo chown 999:999 testdata/ssl/server/* 41 | sudo chmod 600 testdata/ssl/server/* 42 | docker compose up -d 43 | make 44 | make test 45 | env: 46 | POSTGRESQL_IMAGE: ${{ matrix.postgresql-image }} 47 | 48 | - name: Run debug commands on failure 49 | if: ${{ failure() }} 50 | run: | 51 | env | sort 52 | go env | sort 53 | git status 54 | docker --version 55 | docker compose --version 56 | docker compose logs 57 | -------------------------------------------------------------------------------- /README-RDS.md: -------------------------------------------------------------------------------- 1 | # Using Postgres-Exporter with AWS:RDS 2 | 3 | ### When using postgres-exporter with Amazon Web Services' RDS, the 4 | rolname "rdsadmin" and datname "rdsadmin" must be excluded. 5 | 6 | I had success running docker container 'quay.io/prometheuscommunity/postgres-exporter:latest' 7 | with queries.yaml as the PG_EXPORTER_EXTEND_QUERY_PATH. errors 8 | mentioned in issue#335 appeared and I had to modify the 9 | 'pg_stat_statements' query with the following: 10 | `WHERE t2.rolname != 'rdsadmin'` 11 | 12 | Running postgres-exporter in a container like so: 13 | ``` 14 | DBNAME='postgres' 15 | PGUSER='postgres' 16 | PGPASS='psqlpasswd123' 17 | PGHOST='name.blahblah.us-east-1.rds.amazonaws.com' 18 | docker run --rm --detach \ 19 | --name "postgresql_exporter_rds" \ 20 | --publish 9187:9187 \ 21 | --volume=/etc/prometheus/postgresql-exporter/queries.yaml:/var/lib/postgresql/queries.yaml \ 22 | -e DATA_SOURCE_NAME="postgresql://${PGUSER}:${PGPASS}@${PGHOST}:5432/${DBNAME}?sslmode=disable" \ 23 | -e PG_EXPORTER_EXCLUDE_DATABASES=rdsadmin \ 24 | -e PG_EXPORTER_DISABLE_DEFAULT_METRICS=true \ 25 | -e PG_EXPORTER_DISABLE_SETTINGS_METRICS=true \ 26 | -e PG_EXPORTER_EXTEND_QUERY_PATH='/var/lib/postgresql/queries.yaml' \ 27 | quay.io/prometheuscommunity/postgres-exporter 28 | ``` 29 | 30 | ### Expected changes to RDS: 31 | + see stackoverflow notes 32 | (https://stackoverflow.com/questions/43926499/amazon-postgres-rds-pg-stat-statements-not-loaded#43931885) 33 | + you must also use a specific RDS parameter_group that includes the following: 34 | ``` 35 | shared_preload_libraries = "pg_stat_statements,pg_hint_plan" 36 | ``` 37 | + lastly, you must reboot the RDS instance. 38 | 39 | -------------------------------------------------------------------------------- /testdata/ssl/client/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAutagXJGYm9CdWhzOi2nf08mfDMzB84zijvrXWxKn9f/SPBrt 3 | KhbmcIWfkIKySBFwIhuaVXXox//lYlc2FYd3MemxLu3bh9AhQWMeawWl3VVD9L/u 4 | 9VQg5cs3CXRkmmWVrcMYqrH0AA/bpzK4jfjBrT0mcDc8X0GRoUMiYTlWO6OUi39L 5 | hAlducFk6fC47kOoMkp6V1fzTUYV/1a++f1LEYWgiUg+hMpkuFpPRFUeVzwejHCC 6 | XGT+0hVUsKF+p52NKeo6dSaAs3kHubL7gKNe66nv7gxTmoiC9hyZk3C4j76UHkTq 7 | 5SZfgEMmaq4MzhjLiV2bRCykoO0dxILbQ+CjDwIDAQABAoIBAQCg479V1162Fo/Z 8 | kGLgNLy00LNwv0XpJ5HVE/atC0Stlq0AayN9khjAHqEIPTOu0a1hAaoGG7Hnv9lU 9 | tMrzASNITPfx9IuNyVFJ0EPaXxO0o/8P3NO2WMq3dyYesycKuZ2J8y3jl7gI8Z7x 10 | vMCmKOcG+liGW1pWoMz6NCh/1nMGiN0OAwrY5MuO+K0lGxz2umI9fKTX3BSvd1SK 11 | pvQQK7tRj6Dqntz5j/OTZknFyoV2QNdw5Ng1ImODdgJmazZASLX4B2vETkZoyFrW 12 | gwfGL0Er88WgRvUbFmZMAaLnzDrW9XJssUF34cqITDzh/O9tLVjXj+PvanF6+gp9 13 | P1I0f6GBAoGBAMXfwcLFN+wwaOWlCQQBy/Ngtlp1uinT6kCxJ0e/IzcUQw9TNxY9 14 | heiiZBvkwKShA3emrkYNY2CxGuKGDjqJndMgthVmBNpYWw44cpYqQkIsKd+4wqyb 15 | a1oemNbcEBTweqYepMasSRVYnqtM8+9bPeOn3JVC35rLSm3erf5pXDDPAoGBAPG4 16 | +yeI+s1tXYp1/8etUn3WCJ8cMnQlU2CyuSwWWZ2Ncpp0b6Xm2eEIWWbhp2mzN4q+ 17 | F80ivnYBwyxPwXIuEiIoBn0pinyvqxHv+wgZfEFYWPVuSEWG9PsR4K0DYXrgkAJK 18 | 9s4Bste3JDXPp8qeBcSnOWY2N+savMuZV9RgUnnBAoGAfitapRKTwepsOGGvCfsL 19 | TLhSDgQbHCifqXhMD5d0oN4ulEr/SRMZm2hQZOjLXS29xEFnxgsrXrAO8HmCARlW 20 | pRSqhaJzXbYQ+VRM3Cs97Gu0l457swu2u9PeqMHRD0j3K41Gi9e3EgFbyuZadDi9 21 | kberExF8+nq9jqj6UMplmkkCgYA5DCoiE371eokoA19BVdNxJVFVk8cIiEKcQLHZ 22 | CNFPkLFqaG5tHgVvyZOn5zumg4hpMyHU5Q1ENnhEffIuq1bWPtIBOguYD7F1A0kg 23 | iTs9BMfB7Kwb1JT+qCJ5UqHv6Q2zrNOAnQADTxK5rG9yL0c3OSwfxk3+K4pBFsiW 24 | s8DjQQKBgHXDhkTgAujyurvYURLRzlAFQJ4hc8Shqpl5xXKj9vh0KmxySZIYBlBf 25 | VoFtZML/aPSwjPVNLDeIp9DCxGDxB5pEY2cBJucovlqTYSdI0qwnhoHEqHGTm2Vk 26 | Fo6kyaAnbb8/N7VRES0wHKc5yzaTD0m6BBl2pLm5saQvpjslP6aw 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /testdata/ssl/server/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAutagXJGYm9CdWhzOi2nf08mfDMzB84zijvrXWxKn9f/SPBrt 3 | KhbmcIWfkIKySBFwIhuaVXXox//lYlc2FYd3MemxLu3bh9AhQWMeawWl3VVD9L/u 4 | 9VQg5cs3CXRkmmWVrcMYqrH0AA/bpzK4jfjBrT0mcDc8X0GRoUMiYTlWO6OUi39L 5 | hAlducFk6fC47kOoMkp6V1fzTUYV/1a++f1LEYWgiUg+hMpkuFpPRFUeVzwejHCC 6 | XGT+0hVUsKF+p52NKeo6dSaAs3kHubL7gKNe66nv7gxTmoiC9hyZk3C4j76UHkTq 7 | 5SZfgEMmaq4MzhjLiV2bRCykoO0dxILbQ+CjDwIDAQABAoIBAQCg479V1162Fo/Z 8 | kGLgNLy00LNwv0XpJ5HVE/atC0Stlq0AayN9khjAHqEIPTOu0a1hAaoGG7Hnv9lU 9 | tMrzASNITPfx9IuNyVFJ0EPaXxO0o/8P3NO2WMq3dyYesycKuZ2J8y3jl7gI8Z7x 10 | vMCmKOcG+liGW1pWoMz6NCh/1nMGiN0OAwrY5MuO+K0lGxz2umI9fKTX3BSvd1SK 11 | pvQQK7tRj6Dqntz5j/OTZknFyoV2QNdw5Ng1ImODdgJmazZASLX4B2vETkZoyFrW 12 | gwfGL0Er88WgRvUbFmZMAaLnzDrW9XJssUF34cqITDzh/O9tLVjXj+PvanF6+gp9 13 | P1I0f6GBAoGBAMXfwcLFN+wwaOWlCQQBy/Ngtlp1uinT6kCxJ0e/IzcUQw9TNxY9 14 | heiiZBvkwKShA3emrkYNY2CxGuKGDjqJndMgthVmBNpYWw44cpYqQkIsKd+4wqyb 15 | a1oemNbcEBTweqYepMasSRVYnqtM8+9bPeOn3JVC35rLSm3erf5pXDDPAoGBAPG4 16 | +yeI+s1tXYp1/8etUn3WCJ8cMnQlU2CyuSwWWZ2Ncpp0b6Xm2eEIWWbhp2mzN4q+ 17 | F80ivnYBwyxPwXIuEiIoBn0pinyvqxHv+wgZfEFYWPVuSEWG9PsR4K0DYXrgkAJK 18 | 9s4Bste3JDXPp8qeBcSnOWY2N+savMuZV9RgUnnBAoGAfitapRKTwepsOGGvCfsL 19 | TLhSDgQbHCifqXhMD5d0oN4ulEr/SRMZm2hQZOjLXS29xEFnxgsrXrAO8HmCARlW 20 | pRSqhaJzXbYQ+VRM3Cs97Gu0l457swu2u9PeqMHRD0j3K41Gi9e3EgFbyuZadDi9 21 | kberExF8+nq9jqj6UMplmkkCgYA5DCoiE371eokoA19BVdNxJVFVk8cIiEKcQLHZ 22 | CNFPkLFqaG5tHgVvyZOn5zumg4hpMyHU5Q1ENnhEffIuq1bWPtIBOguYD7F1A0kg 23 | iTs9BMfB7Kwb1JT+qCJ5UqHv6Q2zrNOAnQADTxK5rG9yL0c3OSwfxk3+K4pBFsiW 24 | s8DjQQKBgHXDhkTgAujyurvYURLRzlAFQJ4hc8Shqpl5xXKj9vh0KmxySZIYBlBf 25 | VoFtZML/aPSwjPVNLDeIp9DCxGDxB5pEY2cBJucovlqTYSdI0qwnhoHEqHGTm2Vk 26 | Fo6kyaAnbb8/N7VRES0wHKc5yzaTD0m6BBl2pLm5saQvpjslP6aw 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /testdata/ssl/client/CA.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE2jCCAsKgAwIBAgIBATANBgkqhkiG9w0BAQsFADANMQswCQYDVQQDEwJDQTAe 3 | Fw0yMTA5MjkxMzUzMjZaFw0yMzAzMjkxNDAzMjRaMA0xCzAJBgNVBAMTAkNBMIIC 4 | IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAy3AfoZtrIsDx+qzJAaNcbSv7 5 | s6+gVhw/RYbAT2nJ8w8uRxhjhhkY+0KI16m8TeuJvSS082RQbVTnw7g3viqmSz+P 6 | rc5okrfpjDTT0ArM63SrYsKd53t3JUT0hX/mOGoetQD3pSQDsg/f/mNA0Ezosb6q 7 | 0iO3yIlLDzQ3igMMCBLsPYMYSfIv8+iKOiaWXmjH0sOszNNIvMKXi/u9E1LumjDl 8 | R1zpi05YrWscj1yplBgBVYH5aBxy9V8jU3NR6jPWuAVmOUOjeCS7zSS3Oc31qNpW 9 | t9/PosydAaOfkGVYYXytwHk0Xc7h25SSN6kS/KTxJb6oP9ASGoMXxUk9T0q6xtUZ 10 | gvY8GDGQmUH8+xUazd64jQxEjq3RzIThASgAtG6I1VuH1lWxSRq73cWx4XtDmR4i 11 | Acfv4y4s4LN564zcKjeLGTulJebXdDqnGyZWA6+LqkNMKKR5T9aHNFghSiol34pz 12 | icCe6Z4pt/nkoRTlPv30+cwcqgZF27QP31ozaif/lzxq686T40mdxEneRA0Wpr2P 13 | Zxpj1OdSZ7oqIX6/MFcHR4LLwv2VnLgZ4uBOPVeXBnQ/4LoOsBah09r0fyjvuhSJ 14 | dTqP4+VDBXQG+6l7buU0a1Wl5mehRes6CHMFlq2f0fOvvkW/NoB4RkIOnFdP+g7E 15 | RwefiIR+Y8PEYQPIjc8CAwEAAaNFMEMwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB 16 | /wQIMAYBAf8CAQAwHQYDVR0OBBYEFAVtr/8ICsWeABZqEp9Mzp1Mprk6MA0GCSqG 17 | SIb3DQEBCwUAA4ICAQCdSOYDXmpNdZcX7Vmn05p0szxBM8/PrtL9xwSlsnCWomLj 18 | KE8L1lP9G8qda80RFNp4lEXvF21Iflmc2Xf2wDkQj2//qk678labV2sZapje/JqJ 19 | UCxGbFAVV3V3HGs5lvSgC0mhGZkavGijI5LYgJkQ6C5bxwGA0AUeCmi2DTnWVeYb 20 | LHQyo+Nku/Ko6pVgJ3N7CbxJG6kd7A7i/rZzkXhpz5e8SSWLIDUIRtbytZ/tyU/Z 21 | oUgzDf13pUrt6I40VTFeUQKtaUkmiBNpC3ZhkuO+alGSJeDfa/KSn/CnvkJ398nF 22 | VUmi4ihLSwvC0vSO9VRmYBMwo+JEjLFT9+n5B1uUfzs27snJQ5q40zMKrfTfKYaw 23 | K/sXsVdrIfEjyClM9C4PWhPbSycc4HtVpLPJKCP05l4G+aO7HwfqV+SYbZd3ii+E 24 | yAcoH7UGQw92JCNK2JXhKE1vzZtuzcZNedd1cqYDo/vKCeBrPhR9qVVVOta9gFps 25 | OEWzdLL2YSunMnoFvy34OumeSzzAL5MMughbHfO+fmUwI9vtdKINtRyE9A7j8X3p 26 | H+Mm+avMEErcBbZ7u6LgI2aPfZfQbwoy8fv0VG5JN6bAKAh0n2QBGG99JC8OzI5q 27 | q6Ash0DqwDNzWkM7IQkECeXQ5PASkah2alBg7mewlS8d6R6NzQ3ILzCB6qCRgA== 28 | -----END CERTIFICATE----- 29 | -------------------------------------------------------------------------------- /testdata/ssl/server/CA.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE2jCCAsKgAwIBAgIBATANBgkqhkiG9w0BAQsFADANMQswCQYDVQQDEwJDQTAe 3 | Fw0yMTA5MjkxMzUzMjZaFw0yMzAzMjkxNDAzMjRaMA0xCzAJBgNVBAMTAkNBMIIC 4 | IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAy3AfoZtrIsDx+qzJAaNcbSv7 5 | s6+gVhw/RYbAT2nJ8w8uRxhjhhkY+0KI16m8TeuJvSS082RQbVTnw7g3viqmSz+P 6 | rc5okrfpjDTT0ArM63SrYsKd53t3JUT0hX/mOGoetQD3pSQDsg/f/mNA0Ezosb6q 7 | 0iO3yIlLDzQ3igMMCBLsPYMYSfIv8+iKOiaWXmjH0sOszNNIvMKXi/u9E1LumjDl 8 | R1zpi05YrWscj1yplBgBVYH5aBxy9V8jU3NR6jPWuAVmOUOjeCS7zSS3Oc31qNpW 9 | t9/PosydAaOfkGVYYXytwHk0Xc7h25SSN6kS/KTxJb6oP9ASGoMXxUk9T0q6xtUZ 10 | gvY8GDGQmUH8+xUazd64jQxEjq3RzIThASgAtG6I1VuH1lWxSRq73cWx4XtDmR4i 11 | Acfv4y4s4LN564zcKjeLGTulJebXdDqnGyZWA6+LqkNMKKR5T9aHNFghSiol34pz 12 | icCe6Z4pt/nkoRTlPv30+cwcqgZF27QP31ozaif/lzxq686T40mdxEneRA0Wpr2P 13 | Zxpj1OdSZ7oqIX6/MFcHR4LLwv2VnLgZ4uBOPVeXBnQ/4LoOsBah09r0fyjvuhSJ 14 | dTqP4+VDBXQG+6l7buU0a1Wl5mehRes6CHMFlq2f0fOvvkW/NoB4RkIOnFdP+g7E 15 | RwefiIR+Y8PEYQPIjc8CAwEAAaNFMEMwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB 16 | /wQIMAYBAf8CAQAwHQYDVR0OBBYEFAVtr/8ICsWeABZqEp9Mzp1Mprk6MA0GCSqG 17 | SIb3DQEBCwUAA4ICAQCdSOYDXmpNdZcX7Vmn05p0szxBM8/PrtL9xwSlsnCWomLj 18 | KE8L1lP9G8qda80RFNp4lEXvF21Iflmc2Xf2wDkQj2//qk678labV2sZapje/JqJ 19 | UCxGbFAVV3V3HGs5lvSgC0mhGZkavGijI5LYgJkQ6C5bxwGA0AUeCmi2DTnWVeYb 20 | LHQyo+Nku/Ko6pVgJ3N7CbxJG6kd7A7i/rZzkXhpz5e8SSWLIDUIRtbytZ/tyU/Z 21 | oUgzDf13pUrt6I40VTFeUQKtaUkmiBNpC3ZhkuO+alGSJeDfa/KSn/CnvkJ398nF 22 | VUmi4ihLSwvC0vSO9VRmYBMwo+JEjLFT9+n5B1uUfzs27snJQ5q40zMKrfTfKYaw 23 | K/sXsVdrIfEjyClM9C4PWhPbSycc4HtVpLPJKCP05l4G+aO7HwfqV+SYbZd3ii+E 24 | yAcoH7UGQw92JCNK2JXhKE1vzZtuzcZNedd1cqYDo/vKCeBrPhR9qVVVOta9gFps 25 | OEWzdLL2YSunMnoFvy34OumeSzzAL5MMughbHfO+fmUwI9vtdKINtRyE9A7j8X3p 26 | H+Mm+avMEErcBbZ7u6LgI2aPfZfQbwoy8fv0VG5JN6bAKAh0n2QBGG99JC8OzI5q 27 | q6Ash0DqwDNzWkM7IQkECeXQ5PASkah2alBg7mewlS8d6R6NzQ3ILzCB6qCRgA== 28 | -----END CERTIFICATE----- 29 | -------------------------------------------------------------------------------- /config/config_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package config 15 | 16 | import ( 17 | "testing" 18 | ) 19 | 20 | func TestLoadConfig(t *testing.T) { 21 | ch := &Handler{ 22 | Config: &Config{}, 23 | } 24 | 25 | err := ch.ReloadConfig("testdata/config-good.yaml", nil) 26 | if err != nil { 27 | t.Errorf("Error loading config: %s", err) 28 | } 29 | } 30 | 31 | func TestLoadBadConfigs(t *testing.T) { 32 | ch := &Handler{ 33 | Config: &Config{}, 34 | } 35 | 36 | tests := []struct { 37 | input string 38 | want string 39 | }{ 40 | { 41 | input: "testdata/config-bad-auth-module.yaml", 42 | want: "error parsing config file \"testdata/config-bad-auth-module.yaml\": yaml: unmarshal errors:\n line 3: field pretendauth not found in type config.AuthModule", 43 | }, 44 | { 45 | input: "testdata/config-bad-extra-field.yaml", 46 | want: "error parsing config file \"testdata/config-bad-extra-field.yaml\": yaml: unmarshal errors:\n line 8: field doesNotExist not found in type config.AuthModule", 47 | }, 48 | } 49 | 50 | for _, test := range tests { 51 | t.Run(test.input, func(t *testing.T) { 52 | got := ch.ReloadConfig(test.input, nil) 53 | if got == nil || got.Error() != test.want { 54 | t.Fatalf("ReloadConfig(%q) = %v, want %s", test.input, got, test.want) 55 | } 56 | }) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/user_queries_test.yaml: -------------------------------------------------------------------------------- 1 | random: 2 | query: | 3 | WITH data AS (SELECT floor(random()*10) AS d FROM generate_series(1,100)), 4 | metrics AS (SELECT SUM(d) AS sum, COUNT(*) AS count FROM data), 5 | buckets AS (SELECT le, SUM(CASE WHEN d <= le THEN 1 ELSE 0 END) AS d 6 | FROM data, UNNEST(ARRAY[1, 2, 4, 8]) AS le GROUP BY le) 7 | SELECT 8 | sum AS histogram_sum, 9 | count AS histogram_count, 10 | ARRAY_AGG(le) AS histogram, 11 | ARRAY_AGG(d) AS histogram_bucket, 12 | ARRAY_AGG(le) AS missing, 13 | ARRAY_AGG(le) AS missing_sum, 14 | ARRAY_AGG(d) AS missing_sum_bucket, 15 | ARRAY_AGG(le) AS missing_count, 16 | ARRAY_AGG(d) AS missing_count_bucket, 17 | sum AS missing_count_sum, 18 | ARRAY_AGG(le) AS unexpected_sum, 19 | ARRAY_AGG(d) AS unexpected_sum_bucket, 20 | 'data' AS unexpected_sum_sum, 21 | ARRAY_AGG(le) AS unexpected_count, 22 | ARRAY_AGG(d) AS unexpected_count_bucket, 23 | sum AS unexpected_count_sum, 24 | 'nan'::varchar AS unexpected_count_count, 25 | ARRAY_AGG(le) AS unexpected_bytes, 26 | ARRAY_AGG(d) AS unexpected_bytes_bucket, 27 | sum AS unexpected_bytes_sum, 28 | 'nan'::bytea AS unexpected_bytes_count 29 | FROM metrics, buckets GROUP BY 1,2 30 | metrics: 31 | - histogram: 32 | usage: "HISTOGRAM" 33 | description: "Random data" 34 | - missing: 35 | usage: "HISTOGRAM" 36 | description: "nonfatal error" 37 | - missing_sum: 38 | usage: "HISTOGRAM" 39 | description: "nonfatal error" 40 | - missing_count: 41 | usage: "HISTOGRAM" 42 | description: "nonfatal error" 43 | - unexpected_sum: 44 | usage: "HISTOGRAM" 45 | description: "nonfatal error" 46 | - unexpected_count: 47 | usage: "HISTOGRAM" 48 | description: "nonfatal error" 49 | - unexpected_bytes: 50 | usage: "HISTOGRAM" 51 | description: "nonfatal error" 52 | -------------------------------------------------------------------------------- /collector/pg_xlog_location_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPGXlogLocationCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | inst := &instance{db: db} 32 | columns := []string{ 33 | "bytes", 34 | } 35 | rows := sqlmock.NewRows(columns). 36 | AddRow(53401) 37 | 38 | mock.ExpectQuery(sanitizeQuery(xlogLocationQuery)).WillReturnRows(rows) 39 | 40 | ch := make(chan prometheus.Metric) 41 | go func() { 42 | defer close(ch) 43 | c := PGXlogLocationCollector{} 44 | 45 | if err := c.Update(context.Background(), inst, ch); err != nil { 46 | t.Errorf("Error calling PGXlogLocationCollector.Update: %s", err) 47 | } 48 | }() 49 | expected := []MetricResult{ 50 | {labels: labelMap{}, value: 53401, metricType: dto.MetricType_GAUGE}, 51 | } 52 | convey.Convey("Metrics comparison", t, func() { 53 | for _, expect := range expected { 54 | m := readMetric(<-ch) 55 | convey.So(expect, convey.ShouldResemble, m) 56 | } 57 | }) 58 | if err := mock.ExpectationsWereMet(); err != nil { 59 | t.Errorf("there were unfulfilled exceptions: %s", err) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /collector/pg_locks_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPGLocksCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | 32 | inst := &instance{db: db} 33 | 34 | rows := sqlmock.NewRows([]string{"datname", "mode", "count"}). 35 | AddRow("test", "exclusivelock", 42) 36 | 37 | mock.ExpectQuery(sanitizeQuery(pgLocksQuery)).WillReturnRows(rows) 38 | 39 | ch := make(chan prometheus.Metric) 40 | go func() { 41 | defer close(ch) 42 | c := PGLocksCollector{} 43 | if err := c.Update(context.Background(), inst, ch); err != nil { 44 | t.Errorf("Error calling PGLocksCollector.Update: %s", err) 45 | } 46 | }() 47 | 48 | expected := []MetricResult{ 49 | {labels: labelMap{"datname": "test", "mode": "exclusivelock"}, value: 42, metricType: dto.MetricType_GAUGE}, 50 | } 51 | convey.Convey("Metrics comparison", t, func() { 52 | for _, expect := range expected { 53 | m := readMetric(<-ch) 54 | convey.So(expect, convey.ShouldResemble, m) 55 | } 56 | }) 57 | if err := mock.ExpectationsWereMet(); err != nil { 58 | t.Errorf("there were unfulfilled exceptions: %s", err) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /collector/pg_wal_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPgWALCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | 32 | inst := &instance{db: db} 33 | 34 | columns := []string{"segments", "size"} 35 | rows := sqlmock.NewRows(columns). 36 | AddRow(47, 788529152) 37 | mock.ExpectQuery(sanitizeQuery(pgWALQuery)).WillReturnRows(rows) 38 | 39 | ch := make(chan prometheus.Metric) 40 | go func() { 41 | defer close(ch) 42 | c := PGWALCollector{} 43 | 44 | if err := c.Update(context.Background(), inst, ch); err != nil { 45 | t.Errorf("Error calling PGWALCollector.Update: %s", err) 46 | } 47 | }() 48 | 49 | expected := []MetricResult{ 50 | {labels: labelMap{}, value: 47, metricType: dto.MetricType_GAUGE}, 51 | {labels: labelMap{}, value: 788529152, metricType: dto.MetricType_GAUGE}, 52 | } 53 | 54 | convey.Convey("Metrics comparison", t, func() { 55 | for _, expect := range expected { 56 | m := readMetric(<-ch) 57 | convey.So(expect, convey.ShouldResemble, m) 58 | } 59 | }) 60 | if err := mock.ExpectationsWereMet(); err != nil { 61 | t.Errorf("there were unfulfilled exceptions: %s", err) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /percona_tests/env_prepare_test.go: -------------------------------------------------------------------------------- 1 | package percona_tests 2 | 3 | import ( 4 | "archive/tar" 5 | "compress/gzip" 6 | "io" 7 | "log" 8 | "net/http" 9 | "os" 10 | "os/exec" 11 | "strings" 12 | "testing" 13 | ) 14 | 15 | // TestPrepareExporters extracts exporter from client binary's tar.gz 16 | func TestPrepareUpdatedExporter(t *testing.T) { 17 | if doRun == nil || !*doRun { 18 | t.Skip("For manual runs only through make") 19 | return 20 | } 21 | 22 | if url == nil || *url == "" { 23 | t.Error("URL not defined") 24 | return 25 | } 26 | 27 | prepareExporter(*url, updatedExporterFileName) 28 | } 29 | 30 | func extractExporter(gzipStream io.Reader, fileName string) { 31 | uncompressedStream, err := gzip.NewReader(gzipStream) 32 | if err != nil { 33 | log.Fatal("ExtractTarGz: NewReader failed") 34 | } 35 | 36 | tarReader := tar.NewReader(uncompressedStream) 37 | 38 | exporterFound := false 39 | for !exporterFound { 40 | header, err := tarReader.Next() 41 | 42 | if err == io.EOF { 43 | break 44 | } 45 | 46 | if err != nil { 47 | log.Fatalf("ExtractTarGz: Next() failed: %s", err.Error()) 48 | } 49 | 50 | switch header.Typeflag { 51 | case tar.TypeDir: 52 | continue 53 | case tar.TypeReg: 54 | if strings.HasSuffix(header.Name, "postgres_exporter") { 55 | outFile, err := os.Create(fileName) 56 | if err != nil { 57 | log.Fatalf("ExtractTarGz: Create() failed: %s", err.Error()) 58 | } 59 | defer outFile.Close() 60 | if _, err := io.Copy(outFile, tarReader); err != nil { 61 | log.Fatalf("ExtractTarGz: Copy() failed: %s", err.Error()) 62 | } 63 | 64 | exporterFound = true 65 | } 66 | default: 67 | log.Fatalf( 68 | "ExtractTarGz: uknown type: %d in %s", 69 | header.Typeflag, 70 | header.Name) 71 | } 72 | } 73 | } 74 | 75 | func prepareExporter(url, fileName string) { 76 | resp, err := http.Get(url) 77 | if err != nil { 78 | log.Fatal(err) 79 | } 80 | 81 | defer resp.Body.Close() 82 | 83 | extractExporter(resp.Body, fileName) 84 | 85 | err = exec.Command("chmod", "+x", fileName).Run() 86 | if err != nil { 87 | log.Fatal(err) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /collector/pg_replication_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPgReplicationCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | 32 | inst := &instance{db: db} 33 | 34 | columns := []string{"lag", "is_replica"} 35 | rows := sqlmock.NewRows(columns). 36 | AddRow(1000, 1) 37 | mock.ExpectQuery(sanitizeQuery(pgReplicationQuery)).WillReturnRows(rows) 38 | 39 | ch := make(chan prometheus.Metric) 40 | go func() { 41 | defer close(ch) 42 | c := PGReplicationCollector{} 43 | 44 | if err := c.Update(context.Background(), inst, ch); err != nil { 45 | t.Errorf("Error calling PGReplicationCollector.Update: %s", err) 46 | } 47 | }() 48 | 49 | expected := []MetricResult{ 50 | {labels: labelMap{}, value: 1000, metricType: dto.MetricType_GAUGE}, 51 | {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, 52 | } 53 | 54 | convey.Convey("Metrics comparison", t, func() { 55 | for _, expect := range expected { 56 | m := readMetric(<-ch) 57 | convey.So(expect, convey.ShouldResemble, m) 58 | } 59 | }) 60 | if err := mock.ExpectationsWereMet(); err != nil { 61 | t.Errorf("there were unfulfilled exceptions: %s", err) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /collector/pg_postmaster.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | "database/sql" 19 | 20 | "github.com/prometheus/client_golang/prometheus" 21 | ) 22 | 23 | const postmasterSubsystem = "postmaster" 24 | 25 | func init() { 26 | registerCollector(postmasterSubsystem, defaultDisabled, NewPGPostmasterCollector) 27 | } 28 | 29 | type PGPostmasterCollector struct { 30 | } 31 | 32 | func NewPGPostmasterCollector(collectorConfig) (Collector, error) { 33 | return &PGPostmasterCollector{}, nil 34 | } 35 | 36 | var ( 37 | pgPostMasterStartTimeSeconds = prometheus.NewDesc( 38 | prometheus.BuildFQName( 39 | namespace, 40 | postmasterSubsystem, 41 | "start_time_seconds", 42 | ), 43 | "Time at which postmaster started", 44 | []string{}, nil, 45 | ) 46 | 47 | pgPostmasterQuery = "SELECT extract(epoch from pg_postmaster_start_time) from pg_postmaster_start_time();" 48 | ) 49 | 50 | func (c *PGPostmasterCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 51 | db := instance.getDB() 52 | row := db.QueryRowContext(ctx, 53 | pgPostmasterQuery) 54 | 55 | var startTimeSeconds sql.NullFloat64 56 | err := row.Scan(&startTimeSeconds) 57 | if err != nil { 58 | return err 59 | } 60 | startTimeSecondsMetric := 0.0 61 | if startTimeSeconds.Valid { 62 | startTimeSecondsMetric = startTimeSeconds.Float64 63 | } 64 | ch <- prometheus.MustNewConstMetric( 65 | pgPostMasterStartTimeSeconds, 66 | prometheus.GaugeValue, startTimeSecondsMetric, 67 | ) 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /collector/pg_stat_activity_autovacuum_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPGStatActivityAutovacuumCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | inst := &instance{db: db} 32 | columns := []string{ 33 | "relname", 34 | "timestamp_seconds", 35 | } 36 | rows := sqlmock.NewRows(columns). 37 | AddRow("test", 3600) 38 | 39 | mock.ExpectQuery(sanitizeQuery(statActivityAutovacuumQuery)).WillReturnRows(rows) 40 | 41 | ch := make(chan prometheus.Metric) 42 | go func() { 43 | defer close(ch) 44 | c := PGStatActivityAutovacuumCollector{} 45 | 46 | if err := c.Update(context.Background(), inst, ch); err != nil { 47 | t.Errorf("Error calling PGStatActivityAutovacuumCollector.Update: %s", err) 48 | } 49 | }() 50 | expected := []MetricResult{ 51 | {labels: labelMap{"relname": "test"}, value: 3600, metricType: dto.MetricType_GAUGE}, 52 | } 53 | convey.Convey("Metrics comparison", t, func() { 54 | for _, expect := range expected { 55 | m := readMetric(<-ch) 56 | convey.So(expect, convey.ShouldResemble, m) 57 | } 58 | }) 59 | if err := mock.ExpectationsWereMet(); err != nil { 60 | t.Errorf("there were unfulfilled exceptions: %s", err) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /collector/collector_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "strings" 17 | 18 | "github.com/prometheus/client_golang/prometheus" 19 | dto "github.com/prometheus/client_model/go" 20 | ) 21 | 22 | type labelMap map[string]string 23 | 24 | type MetricResult struct { 25 | labels labelMap 26 | value float64 27 | metricType dto.MetricType 28 | } 29 | 30 | func readMetric(m prometheus.Metric) MetricResult { 31 | pb := &dto.Metric{} 32 | m.Write(pb) 33 | labels := make(labelMap, len(pb.Label)) 34 | for _, v := range pb.Label { 35 | labels[v.GetName()] = v.GetValue() 36 | } 37 | if pb.Gauge != nil { 38 | return MetricResult{labels: labels, value: pb.GetGauge().GetValue(), metricType: dto.MetricType_GAUGE} 39 | } 40 | if pb.Counter != nil { 41 | return MetricResult{labels: labels, value: pb.GetCounter().GetValue(), metricType: dto.MetricType_COUNTER} 42 | } 43 | if pb.Untyped != nil { 44 | return MetricResult{labels: labels, value: pb.GetUntyped().GetValue(), metricType: dto.MetricType_UNTYPED} 45 | } 46 | panic("Unsupported metric type") 47 | } 48 | 49 | func sanitizeQuery(q string) string { 50 | q = strings.Join(strings.Fields(q), " ") 51 | q = strings.ReplaceAll(q, "(", "\\(") 52 | q = strings.ReplaceAll(q, "?", "\\?") 53 | q = strings.ReplaceAll(q, ")", "\\)") 54 | q = strings.ReplaceAll(q, "[", "\\[") 55 | q = strings.ReplaceAll(q, "]", "\\]") 56 | q = strings.ReplaceAll(q, "{", "\\{") 57 | q = strings.ReplaceAll(q, "}", "\\}") 58 | q = strings.ReplaceAll(q, "*", "\\*") 59 | q = strings.ReplaceAll(q, "^", "\\^") 60 | q = strings.ReplaceAll(q, "$", "\\$") 61 | return q 62 | } 63 | -------------------------------------------------------------------------------- /collector/pg_long_running_transactions_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPGLongRunningTransactionsCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | inst := &instance{db: db} 32 | columns := []string{ 33 | "transactions", 34 | "age_in_seconds", 35 | } 36 | rows := sqlmock.NewRows(columns). 37 | AddRow(20, 1200) 38 | 39 | mock.ExpectQuery(sanitizeQuery(longRunningTransactionsQuery)).WillReturnRows(rows) 40 | 41 | ch := make(chan prometheus.Metric) 42 | go func() { 43 | defer close(ch) 44 | c := PGLongRunningTransactionsCollector{} 45 | 46 | if err := c.Update(context.Background(), inst, ch); err != nil { 47 | t.Errorf("Error calling PGLongRunningTransactionsCollector.Update: %s", err) 48 | } 49 | }() 50 | expected := []MetricResult{ 51 | {labels: labelMap{}, value: 20, metricType: dto.MetricType_GAUGE}, 52 | {labels: labelMap{}, value: 1200, metricType: dto.MetricType_GAUGE}, 53 | } 54 | convey.Convey("Metrics comparison", t, func() { 55 | for _, expect := range expected { 56 | m := readMetric(<-ch) 57 | convey.So(expect, convey.ShouldResemble, m) 58 | } 59 | }) 60 | if err := mock.ExpectationsWereMet(); err != nil { 61 | t.Errorf("there were unfulfilled exceptions: %s", err) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /collector/pg_wal.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | 19 | "github.com/prometheus/client_golang/prometheus" 20 | ) 21 | 22 | const walSubsystem = "wal" 23 | 24 | func init() { 25 | registerCollector(walSubsystem, defaultDisabled, NewPGWALCollector) 26 | } 27 | 28 | type PGWALCollector struct { 29 | } 30 | 31 | func NewPGWALCollector(config collectorConfig) (Collector, error) { 32 | return &PGWALCollector{}, nil 33 | } 34 | 35 | var ( 36 | pgWALSegments = prometheus.NewDesc( 37 | prometheus.BuildFQName( 38 | namespace, 39 | walSubsystem, 40 | "segments", 41 | ), 42 | "Number of WAL segments", 43 | []string{}, nil, 44 | ) 45 | pgWALSize = prometheus.NewDesc( 46 | prometheus.BuildFQName( 47 | namespace, 48 | walSubsystem, 49 | "size_bytes", 50 | ), 51 | "Total size of WAL segments", 52 | []string{}, nil, 53 | ) 54 | 55 | pgWALQuery = ` 56 | SELECT 57 | COUNT(*) AS segments, 58 | SUM(size) AS size 59 | FROM pg_ls_waldir() 60 | WHERE name ~ '^[0-9A-F]{24}$'` 61 | ) 62 | 63 | func (c PGWALCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 64 | db := instance.getDB() 65 | row := db.QueryRowContext(ctx, 66 | pgWALQuery, 67 | ) 68 | 69 | var segments uint64 70 | var size uint64 71 | err := row.Scan(&segments, &size) 72 | if err != nil { 73 | return err 74 | } 75 | ch <- prometheus.MustNewConstMetric( 76 | pgWALSegments, 77 | prometheus.GaugeValue, float64(segments), 78 | ) 79 | ch <- prometheus.MustNewConstMetric( 80 | pgWALSize, 81 | prometheus.GaugeValue, float64(size), 82 | ) 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /collector/pg_database_wraparound_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPGDatabaseWraparoundCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | inst := &instance{db: db} 32 | columns := []string{ 33 | "datname", 34 | "age_datfrozenxid", 35 | "age_datminmxid", 36 | } 37 | rows := sqlmock.NewRows(columns). 38 | AddRow("newreddit", 87126426, 0) 39 | 40 | mock.ExpectQuery(sanitizeQuery(databaseWraparoundQuery)).WillReturnRows(rows) 41 | 42 | ch := make(chan prometheus.Metric) 43 | go func() { 44 | defer close(ch) 45 | c := PGDatabaseWraparoundCollector{} 46 | 47 | if err := c.Update(context.Background(), inst, ch); err != nil { 48 | t.Errorf("Error calling PGDatabaseWraparoundCollector.Update: %s", err) 49 | } 50 | }() 51 | expected := []MetricResult{ 52 | {labels: labelMap{"datname": "newreddit"}, value: 87126426, metricType: dto.MetricType_GAUGE}, 53 | {labels: labelMap{"datname": "newreddit"}, value: 0, metricType: dto.MetricType_GAUGE}, 54 | } 55 | convey.Convey("Metrics comparison", t, func() { 56 | for _, expect := range expected { 57 | m := readMetric(<-ch) 58 | convey.So(expect, convey.ShouldResemble, m) 59 | } 60 | }) 61 | if err := mock.ExpectationsWereMet(); err != nil { 62 | t.Errorf("there were unfulfilled exceptions: %s", err) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/prometheus-community/postgres_exporter 2 | 3 | go 1.25.4 4 | 5 | require ( 6 | github.com/DATA-DOG/go-sqlmock v1.5.2 7 | github.com/alecthomas/kingpin/v2 v2.4.0 8 | github.com/blang/semver/v4 v4.0.0 9 | github.com/go-kit/log v0.2.1 10 | github.com/lib/pq v1.10.9 11 | github.com/montanaflynn/stats v0.7.1 12 | github.com/pkg/errors v0.9.1 13 | github.com/prometheus/client_golang v1.20.5 14 | github.com/prometheus/client_model v0.6.1 15 | github.com/prometheus/common v0.60.1 16 | github.com/prometheus/exporter-toolkit v0.13.1 17 | github.com/smartystreets/goconvey v1.8.1 18 | github.com/stretchr/testify v1.11.1 19 | github.com/tklauser/go-sysconf v0.3.16 20 | golang.org/x/sync v0.18.0 21 | golang.org/x/sys v0.38.0 22 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c 23 | gopkg.in/yaml.v2 v2.4.0 24 | gopkg.in/yaml.v3 v3.0.1 25 | ) 26 | 27 | require ( 28 | github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect 29 | github.com/beorn7/perks v1.0.1 // indirect 30 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 31 | github.com/coreos/go-systemd/v22 v22.6.0 // indirect 32 | github.com/davecgh/go-spew v1.1.1 // indirect 33 | github.com/go-logfmt/logfmt v0.6.1 // indirect 34 | github.com/gopherjs/gopherjs v1.17.2 // indirect 35 | github.com/jpillora/backoff v1.0.0 // indirect 36 | github.com/jtolds/gls v4.20.0+incompatible // indirect 37 | github.com/klauspost/compress v1.18.1 // indirect 38 | github.com/kr/pretty v0.3.1 // indirect 39 | github.com/kr/text v0.2.0 // indirect 40 | github.com/mdlayher/socket v0.5.1 // indirect 41 | github.com/mdlayher/vsock v1.2.1 // indirect 42 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 43 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect 44 | github.com/pmezard/go-difflib v1.0.0 // indirect 45 | github.com/prometheus/procfs v0.15.1 // indirect 46 | github.com/rogpeppe/go-internal v1.14.1 // indirect 47 | github.com/smarty/assertions v1.16.0 // indirect 48 | github.com/tklauser/numcpus v0.11.0 // indirect 49 | github.com/xhit/go-str2duration/v2 v2.1.0 // indirect 50 | golang.org/x/crypto v0.45.0 // indirect 51 | golang.org/x/net v0.47.0 // indirect 52 | golang.org/x/oauth2 v0.33.0 // indirect 53 | golang.org/x/text v0.31.0 // indirect 54 | google.golang.org/protobuf v1.36.10 // indirect 55 | ) 56 | -------------------------------------------------------------------------------- /collector/pg_replication.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | 19 | "github.com/prometheus/client_golang/prometheus" 20 | ) 21 | 22 | const replicationSubsystem = "replication" 23 | 24 | func init() { 25 | registerCollector(replicationSubsystem, defaultEnabled, NewPGReplicationCollector) 26 | } 27 | 28 | type PGReplicationCollector struct { 29 | } 30 | 31 | func NewPGReplicationCollector(collectorConfig) (Collector, error) { 32 | return &PGReplicationCollector{}, nil 33 | } 34 | 35 | var ( 36 | pgReplicationLag = prometheus.NewDesc( 37 | prometheus.BuildFQName( 38 | namespace, 39 | replicationSubsystem, 40 | "lag_seconds", 41 | ), 42 | "Replication lag behind master in seconds", 43 | []string{}, nil, 44 | ) 45 | pgReplicationIsReplica = prometheus.NewDesc( 46 | prometheus.BuildFQName( 47 | namespace, 48 | replicationSubsystem, 49 | "is_replica", 50 | ), 51 | "Indicates if the server is a replica", 52 | []string{}, nil, 53 | ) 54 | 55 | pgReplicationQuery = `SELECT 56 | CASE 57 | WHEN NOT pg_is_in_recovery() THEN 0 58 | WHEN pg_last_wal_receive_lsn () = pg_last_wal_replay_lsn () THEN 0 59 | ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) 60 | END AS lag, 61 | CASE 62 | WHEN pg_is_in_recovery() THEN 1 63 | ELSE 0 64 | END as is_replica` 65 | ) 66 | 67 | func (c *PGReplicationCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 68 | db := instance.getDB() 69 | row := db.QueryRowContext(ctx, 70 | pgReplicationQuery, 71 | ) 72 | 73 | var lag float64 74 | var isReplica int64 75 | err := row.Scan(&lag, &isReplica) 76 | if err != nil { 77 | return err 78 | } 79 | ch <- prometheus.MustNewConstMetric( 80 | pgReplicationLag, 81 | prometheus.GaugeValue, lag, 82 | ) 83 | ch <- prometheus.MustNewConstMetric( 84 | pgReplicationIsReplica, 85 | prometheus.GaugeValue, float64(isReplica), 86 | ) 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /collector/pg_stat_activity_autovacuum.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | 19 | "github.com/go-kit/log" 20 | "github.com/prometheus/client_golang/prometheus" 21 | ) 22 | 23 | const statActivityAutovacuumSubsystem = "stat_activity_autovacuum" 24 | 25 | func init() { 26 | registerCollector(statActivityAutovacuumSubsystem, defaultDisabled, NewPGStatActivityAutovacuumCollector) 27 | } 28 | 29 | type PGStatActivityAutovacuumCollector struct { 30 | log log.Logger 31 | } 32 | 33 | func NewPGStatActivityAutovacuumCollector(config collectorConfig) (Collector, error) { 34 | return &PGStatActivityAutovacuumCollector{log: config.logger}, nil 35 | } 36 | 37 | var ( 38 | statActivityAutovacuumAgeInSeconds = prometheus.NewDesc( 39 | prometheus.BuildFQName(namespace, statActivityAutovacuumSubsystem, "timestamp_seconds"), 40 | "Start timestamp of the vacuum process in seconds", 41 | []string{"relname"}, 42 | prometheus.Labels{}, 43 | ) 44 | 45 | statActivityAutovacuumQuery = ` 46 | SELECT 47 | SPLIT_PART(query, '.', 2) AS relname, 48 | EXTRACT(EPOCH FROM xact_start) AS timestamp_seconds 49 | FROM 50 | pg_catalog.pg_stat_activity 51 | WHERE 52 | query LIKE 'autovacuum:%' 53 | ` 54 | ) 55 | 56 | func (PGStatActivityAutovacuumCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 57 | db := instance.getDB() 58 | rows, err := db.QueryContext(ctx, 59 | statActivityAutovacuumQuery) 60 | 61 | if err != nil { 62 | return err 63 | } 64 | defer rows.Close() 65 | 66 | for rows.Next() { 67 | var relname string 68 | var ageInSeconds float64 69 | 70 | if err := rows.Scan(&relname, &ageInSeconds); err != nil { 71 | return err 72 | } 73 | 74 | ch <- prometheus.MustNewConstMetric( 75 | statActivityAutovacuumAgeInSeconds, 76 | prometheus.GaugeValue, 77 | ageInSeconds, relname, 78 | ) 79 | } 80 | if err := rows.Err(); err != nil { 81 | return err 82 | } 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /postgres_exporter.rc: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # PROVIDE: postgres_exporter 4 | # REQUIRE: LOGIN 5 | # KEYWORD: shutdown 6 | # 7 | # rc-script for postgres_exporter 8 | # 9 | # 10 | # Add the following lines to /etc/rc.conf.local or /etc/rc.conf 11 | # to enable this service: 12 | # 13 | # postgres_exporter_enable (bool): Set to NO by default. 14 | # Set it to YES to enable postgres_exporter. 15 | # postgres_exporter_user (string): Set user that postgres_exporter will run under 16 | # Default is "nobody". 17 | # postgres_exporter_group (string): Set group that postgres_exporter will run under 18 | # Default is "nobody". 19 | # postgres_exporter_args (string): Set extra arguments to pass to postgres_exporter 20 | # Default is "". 21 | # postgres_exporter_listen_address (string):Set ip:port to listen on for web interface and telemetry. 22 | # Defaults to ":9187" 23 | # postgres_exporter_pg_user (string): Set the Postgres database user 24 | # Defaults to "postgres_exporter" 25 | # postgres_exporter_pg_pass (string): Set the Postgres datase password 26 | # Default is empty 27 | # postgres_exporter_pg_host (string): Set the Postgres database server 28 | # Defaults to "localhost" 29 | # postgres_exporter_pg_port (string): Set the Postgres database port 30 | # Defaults to "5432" 31 | 32 | # Add extra arguments via "postgres_exporter_args" 33 | # (see $ postgres_exporter --help) 34 | 35 | 36 | . /etc/rc.subr 37 | 38 | name=postgres_exporter 39 | rcvar=postgres_exporter_enable 40 | 41 | load_rc_config $name 42 | 43 | : ${postgres_exporter_enable:="NO"} 44 | : ${postgres_exporter_user:="nobody"} 45 | : ${postgres_exporter_group:="nobody"} 46 | : ${postgres_exporter_args:=""} 47 | : ${postgres_exporter_listen_address:=":9187"} 48 | : ${postgres_exporter_pg_user:="postgres_exporter"} 49 | : ${postgres_exporter_pg_pass:=""} 50 | : ${postgres_exporter_pg_host:="localhost"} 51 | : ${postgres_exporter_pg_port:="5432"} 52 | 53 | postgres_exporter_data_source_name="postgresql://${postgres_exporter_pg_user}:${postgres_exporter_pg_pass}@${postgres_exporter_pg_host}:${postgres_exporter_pg_port}/postgres?sslmode=disable" 54 | 55 | 56 | pidfile=/var/run/postgres_exporter.pid 57 | command="/usr/sbin/daemon" 58 | procname="/usr/local/bin/postgres_exporter" 59 | command_args="-f -p ${pidfile} -T ${name} \ 60 | /usr/bin/env DATA_SOURCE_NAME="${postgres_exporter_data_source_name}" ${procname} \ 61 | --web.listen-address=${postgres_exporter_listen_address} \ 62 | ${postgres_exporter_args}" 63 | 64 | start_precmd=postgres_exporter_startprecmd 65 | 66 | postgres_exporter_startprecmd() 67 | { 68 | if [ ! -e ${pidfile} ]; then 69 | install -o ${postgres_exporter_user} -g ${postgres_exporter_group} /dev/null ${pidfile}; 70 | fi 71 | } 72 | 73 | load_rc_config $name 74 | run_rc_command "$1" 75 | -------------------------------------------------------------------------------- /collector/pg_xlog_location.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | 19 | "github.com/blang/semver/v4" 20 | "github.com/go-kit/log" 21 | "github.com/go-kit/log/level" 22 | "github.com/prometheus/client_golang/prometheus" 23 | ) 24 | 25 | const xlogLocationSubsystem = "xlog_location" 26 | 27 | func init() { 28 | registerCollector(xlogLocationSubsystem, defaultDisabled, NewPGXlogLocationCollector) 29 | } 30 | 31 | type PGXlogLocationCollector struct { 32 | log log.Logger 33 | } 34 | 35 | func NewPGXlogLocationCollector(config collectorConfig) (Collector, error) { 36 | return &PGXlogLocationCollector{log: config.logger}, nil 37 | } 38 | 39 | var ( 40 | xlogLocationBytes = prometheus.NewDesc( 41 | prometheus.BuildFQName(namespace, xlogLocationSubsystem, "bytes"), 42 | "Postgres LSN (log sequence number) being generated on primary or replayed on replica (truncated to low 52 bits)", 43 | []string{}, 44 | prometheus.Labels{}, 45 | ) 46 | 47 | xlogLocationQuery = ` 48 | SELECT CASE 49 | WHEN pg_is_in_recovery() THEN (pg_last_xlog_replay_location() - '0/0') % (2^52)::bigint 50 | ELSE (pg_current_xlog_location() - '0/0') % (2^52)::bigint 51 | END AS bytes 52 | ` 53 | ) 54 | 55 | func (c PGXlogLocationCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 56 | db := instance.getDB() 57 | 58 | // xlog was renmaed to WAL in PostgreSQL 10 59 | // https://wiki.postgresql.org/wiki/New_in_postgres_10#Renaming_of_.22xlog.22_to_.22wal.22_Globally_.28and_location.2Flsn.29 60 | after10 := instance.version.Compare(semver.MustParse("10.0.0")) 61 | if after10 >= 0 { 62 | level.Warn(c.log).Log("msg", "xlog_location collector is not available on PostgreSQL >= 10.0.0, skipping") 63 | return nil 64 | } 65 | 66 | rows, err := db.QueryContext(ctx, 67 | xlogLocationQuery) 68 | 69 | if err != nil { 70 | return err 71 | } 72 | defer rows.Close() 73 | 74 | for rows.Next() { 75 | var bytes float64 76 | 77 | if err := rows.Scan(&bytes); err != nil { 78 | return err 79 | } 80 | 81 | ch <- prometheus.MustNewConstMetric( 82 | xlogLocationBytes, 83 | prometheus.GaugeValue, 84 | bytes, 85 | ) 86 | } 87 | if err := rows.Err(); err != nil { 88 | return err 89 | } 90 | return nil 91 | } 92 | -------------------------------------------------------------------------------- /collector/pg_long_running_transactions.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | 19 | "github.com/go-kit/log" 20 | "github.com/prometheus/client_golang/prometheus" 21 | ) 22 | 23 | const longRunningTransactionsSubsystem = "long_running_transactions" 24 | 25 | func init() { 26 | registerCollector(longRunningTransactionsSubsystem, defaultDisabled, NewPGLongRunningTransactionsCollector) 27 | } 28 | 29 | type PGLongRunningTransactionsCollector struct { 30 | log log.Logger 31 | } 32 | 33 | func NewPGLongRunningTransactionsCollector(config collectorConfig) (Collector, error) { 34 | return &PGLongRunningTransactionsCollector{log: config.logger}, nil 35 | } 36 | 37 | var ( 38 | longRunningTransactionsCount = prometheus.NewDesc( 39 | "pg_long_running_transactions", 40 | "Current number of long running transactions", 41 | []string{}, 42 | prometheus.Labels{}, 43 | ) 44 | 45 | longRunningTransactionsAgeInSeconds = prometheus.NewDesc( 46 | prometheus.BuildFQName(namespace, longRunningTransactionsSubsystem, "oldest_timestamp_seconds"), 47 | "The current maximum transaction age in seconds", 48 | []string{}, 49 | prometheus.Labels{}, 50 | ) 51 | 52 | longRunningTransactionsQuery = ` 53 | SELECT 54 | COUNT(*) as transactions, 55 | MAX(EXTRACT(EPOCH FROM clock_timestamp())) AS oldest_timestamp_seconds 56 | FROM pg_catalog.pg_stat_activity 57 | WHERE state is distinct from 'idle' AND query not like 'autovacuum:%' 58 | ` 59 | ) 60 | 61 | func (PGLongRunningTransactionsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 62 | db := instance.getDB() 63 | rows, err := db.QueryContext(ctx, 64 | longRunningTransactionsQuery) 65 | 66 | if err != nil { 67 | return err 68 | } 69 | defer rows.Close() 70 | 71 | for rows.Next() { 72 | var transactions, ageInSeconds float64 73 | 74 | if err := rows.Scan(&transactions, &ageInSeconds); err != nil { 75 | return err 76 | } 77 | 78 | ch <- prometheus.MustNewConstMetric( 79 | longRunningTransactionsCount, 80 | prometheus.GaugeValue, 81 | transactions, 82 | ) 83 | ch <- prometheus.MustNewConstMetric( 84 | longRunningTransactionsAgeInSeconds, 85 | prometheus.GaugeValue, 86 | ageInSeconds, 87 | ) 88 | } 89 | if err := rows.Err(); err != nil { 90 | return err 91 | } 92 | return nil 93 | } 94 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2.1 3 | 4 | orbs: 5 | prometheus: prometheus/prometheus@0.17.1 6 | 7 | executors: 8 | # This must match .promu.yml. 9 | golang: 10 | docker: 11 | - image: cimg/go:1.21 12 | 13 | jobs: 14 | test: 15 | executor: golang 16 | 17 | steps: 18 | - prometheus/setup_environment 19 | - run: GOHOSTARCH=386 GOARCH=386 make test 20 | - run: make 21 | - prometheus/store_artifact: 22 | file: postgres_exporter 23 | 24 | integration: 25 | docker: 26 | - image: cimg/go:1.20 27 | - image: << parameters.postgres_image >> 28 | environment: 29 | POSTGRES_DB: circle_test 30 | POSTGRES_USER: postgres 31 | POSTGRES_PASSWORD: test 32 | 33 | parameters: 34 | postgres_image: 35 | type: string 36 | 37 | environment: 38 | DATA_SOURCE_NAME: 'postgresql://postgres:test@localhost:5432/circle_test?sslmode=disable' 39 | GOOPTS: '-v -tags integration' 40 | 41 | steps: 42 | - checkout 43 | - setup_remote_docker 44 | - run: docker version 45 | - run: make build 46 | - run: make test 47 | 48 | workflows: 49 | version: 2 50 | postgres_exporter: 51 | jobs: 52 | - test: 53 | filters: 54 | tags: 55 | only: /.*/ 56 | - integration: 57 | matrix: 58 | parameters: 59 | postgres_image: 60 | - circleci/postgres:11 61 | - circleci/postgres:12 62 | - circleci/postgres:13 63 | - cimg/postgres:14.9 64 | - cimg/postgres:15.4 65 | - cimg/postgres:16.0 66 | - prometheus/build: 67 | name: build 68 | parallelism: 3 69 | promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" 70 | filters: 71 | tags: 72 | ignore: /^v.*/ 73 | branches: 74 | ignore: /^(main|master|release-.*|.*build-all.*)$/ 75 | - prometheus/build: 76 | name: build_all 77 | parallelism: 12 78 | filters: 79 | branches: 80 | only: /^(main|master|release-.*|.*build-all.*)$/ 81 | tags: 82 | only: /^v.*/ 83 | - prometheus/publish_master: 84 | context: org-context 85 | docker_hub_organization: prometheuscommunity 86 | quay_io_organization: prometheuscommunity 87 | requires: 88 | - test 89 | - build_all 90 | filters: 91 | branches: 92 | only: master 93 | - prometheus/publish_release: 94 | context: org-context 95 | docker_hub_organization: prometheuscommunity 96 | quay_io_organization: prometheuscommunity 97 | requires: 98 | - test 99 | - build_all 100 | filters: 101 | tags: 102 | only: /^v.*/ 103 | branches: 104 | ignore: /.*/ 105 | -------------------------------------------------------------------------------- /collector/pg_postmaster_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPgPostmasterCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | 32 | inst := &instance{db: db} 33 | 34 | mock.ExpectQuery(sanitizeQuery(pgPostmasterQuery)).WillReturnRows(sqlmock.NewRows([]string{"pg_postmaster_start_time"}). 35 | AddRow(1685739904)) 36 | 37 | ch := make(chan prometheus.Metric) 38 | go func() { 39 | defer close(ch) 40 | c := PGPostmasterCollector{} 41 | 42 | if err := c.Update(context.Background(), inst, ch); err != nil { 43 | t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) 44 | } 45 | }() 46 | 47 | expected := []MetricResult{ 48 | {labels: labelMap{}, value: 1685739904, metricType: dto.MetricType_GAUGE}, 49 | } 50 | convey.Convey("Metrics comparison", t, func() { 51 | for _, expect := range expected { 52 | m := readMetric(<-ch) 53 | convey.So(expect, convey.ShouldResemble, m) 54 | } 55 | }) 56 | if err := mock.ExpectationsWereMet(); err != nil { 57 | t.Errorf("there were unfulfilled exceptions: %s", err) 58 | } 59 | } 60 | 61 | func TestPgPostmasterCollectorNullTime(t *testing.T) { 62 | db, mock, err := sqlmock.New() 63 | if err != nil { 64 | t.Fatalf("Error opening a stub db connection: %s", err) 65 | } 66 | defer db.Close() 67 | 68 | inst := &instance{db: db} 69 | 70 | mock.ExpectQuery(sanitizeQuery(pgPostmasterQuery)).WillReturnRows(sqlmock.NewRows([]string{"pg_postmaster_start_time"}). 71 | AddRow(nil)) 72 | 73 | ch := make(chan prometheus.Metric) 74 | go func() { 75 | defer close(ch) 76 | c := PGPostmasterCollector{} 77 | 78 | if err := c.Update(context.Background(), inst, ch); err != nil { 79 | t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) 80 | } 81 | }() 82 | 83 | expected := []MetricResult{ 84 | {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, 85 | } 86 | convey.Convey("Metrics comparison", t, func() { 87 | for _, expect := range expected { 88 | m := readMetric(<-ch) 89 | convey.So(expect, convey.ShouldResemble, m) 90 | } 91 | }) 92 | if err := mock.ExpectationsWereMet(); err != nil { 93 | t.Errorf("there were unfulfilled exceptions: %s", err) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /collector/pg_extensions.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "github.com/go-kit/log" 7 | "github.com/prometheus/client_golang/prometheus" 8 | "strconv" 9 | ) 10 | 11 | func init() { 12 | registerCollector("extensions", defaultEnabled, NewExtensionsCollector) 13 | } 14 | 15 | var pgExtensions = map[string]*prometheus.Desc{ 16 | "pg_available_extensions": prometheus.NewDesc( 17 | "pg_available_extensions", 18 | "Extensions that are available for installation", 19 | []string{ 20 | "name", 21 | "default_version", 22 | "installed_version", 23 | }, 24 | prometheus.Labels{}, 25 | ), 26 | "pg_extensions": prometheus.NewDesc( 27 | "pg_extensions", 28 | "Installed extensions", 29 | []string{ 30 | "name", 31 | "relocatable", 32 | "version", 33 | }, 34 | prometheus.Labels{}, 35 | ), 36 | } 37 | 38 | type ExtensionsCollector struct { 39 | logger log.Logger 40 | } 41 | 42 | func NewExtensionsCollector(collectorConfig collectorConfig) (Collector, error) { 43 | return &ExtensionsCollector{logger: collectorConfig.logger}, nil 44 | } 45 | 46 | func (e *ExtensionsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 47 | err := e.scrapeAvailableExtensions(ctx, instance.db, ch) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | err = e.scrapeInstalledExtensions(ctx, instance.db, ch) 53 | if err != nil { 54 | return err 55 | } 56 | 57 | return nil 58 | } 59 | 60 | func (e *ExtensionsCollector) scrapeInstalledExtensions(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { 61 | rowsExtensions, err := db.QueryContext(ctx, `SELECT extname, extrelocatable, extversion FROM pg_extension`) 62 | 63 | if err != nil { 64 | return err 65 | } 66 | defer rowsExtensions.Close() 67 | 68 | for rowsExtensions.Next() { 69 | var extname string 70 | var extrelocatable bool 71 | var extversion string 72 | if err := rowsExtensions.Scan(&extname, &extrelocatable, &extversion); err != nil { 73 | return err 74 | } 75 | 76 | ch <- prometheus.MustNewConstMetric( 77 | pgExtensions["pg_extensions"], 78 | prometheus.GaugeValue, 79 | 1, 80 | extname, 81 | strconv.FormatBool(extrelocatable), 82 | extversion, 83 | ) 84 | } 85 | 86 | return nil 87 | } 88 | 89 | func (e *ExtensionsCollector) scrapeAvailableExtensions(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { 90 | rows, err := db.QueryContext(ctx, `SELECT name, default_version, installed_version FROM pg_available_extensions`) 91 | if err != nil { 92 | return err 93 | } 94 | defer rows.Close() 95 | 96 | for rows.Next() { 97 | var name sql.NullString 98 | var defaultVersion sql.NullString 99 | var installedVersion sql.NullString 100 | if err := rows.Scan(&name, &defaultVersion, &installedVersion); err != nil { 101 | return err 102 | } 103 | 104 | ch <- prometheus.MustNewConstMetric( 105 | pgExtensions["pg_available_extensions"], 106 | prometheus.GaugeValue, 107 | 1, 108 | name.String, 109 | defaultVersion.String, 110 | installedVersion.String, 111 | ) 112 | } 113 | 114 | return nil 115 | } 116 | 117 | var _ = (Collector)(&ExtensionsCollector{}) 118 | -------------------------------------------------------------------------------- /example.alerts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | groups: 3 | - name: PostgreSQL 4 | rules: 5 | - alert: PostgreSQLMaxConnectionsReached 6 | expr: sum(pg_stat_activity_count) by (instance) > sum(pg_settings_max_connections) by (instance) 7 | for: 1m 8 | labels: 9 | severity: email 10 | annotations: 11 | summary: "{{ $labels.instance }} has maxed out Postgres connections." 12 | description: "{{ $labels.instance }} is exceeding the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Services may be degraded - please take immediate action (you probably need to increase max_connections in the Docker image and re-deploy." 13 | 14 | - alert: PostgreSQLHighConnections 15 | expr: sum(pg_stat_activity_count) by (instance) > sum(pg_settings_max_connections * 0.8) by (instance) 16 | for: 10m 17 | labels: 18 | severity: email 19 | annotations: 20 | summary: "{{ $labels.instance }} is over 80% of max Postgres connections." 21 | description: "{{ $labels.instance }} is exceeding 80% of the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Please check utilization graphs and confirm if this is normal service growth, abuse or an otherwise temporary condition or if new resources need to be provisioned (or the limits increased, which is mostly likely)." 22 | 23 | - alert: PostgreSQLDown 24 | expr: pg_up != 1 25 | for: 1m 26 | labels: 27 | severity: email 28 | annotations: 29 | summary: "PostgreSQL is not processing queries: {{ $labels.instance }}" 30 | description: "{{ $labels.instance }} is rejecting query requests from the exporter, and thus probably not allowing DNS requests to work either. User services should not be effected provided at least 1 node is still alive." 31 | 32 | - alert: PostgreSQLSlowQueries 33 | expr: avg(rate(pg_stat_activity_max_tx_duration{datname!~"template.*"}[2m])) by (datname) > 2 * 60 34 | for: 2m 35 | labels: 36 | severity: email 37 | annotations: 38 | summary: "PostgreSQL high number of slow on {{ $labels.cluster }} for database {{ $labels.datname }} " 39 | description: "PostgreSQL high number of slow queries {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }} " 40 | 41 | - alert: PostgreSQLQPS 42 | expr: avg(irate(pg_stat_database_xact_commit{datname!~"template.*"}[5m]) + irate(pg_stat_database_xact_rollback{datname!~"template.*"}[5m])) by (datname) > 10000 43 | for: 5m 44 | labels: 45 | severity: email 46 | annotations: 47 | summary: "PostgreSQL high number of queries per second {{ $labels.cluster }} for database {{ $labels.datname }}" 48 | description: "PostgreSQL high number of queries per second on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}" 49 | 50 | - alert: PostgreSQLCacheHitRatio 51 | expr: avg(rate(pg_stat_database_blks_hit{datname!~"template.*"}[5m]) / (rate(pg_stat_database_blks_hit{datname!~"template.*"}[5m]) + rate(pg_stat_database_blks_read{datname!~"template.*"}[5m]))) by (datname) < 0.98 52 | for: 5m 53 | labels: 54 | severity: email 55 | annotations: 56 | summary: "PostgreSQL low cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }}" 57 | description: "PostgreSQL low on cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}" 58 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package config 15 | 16 | import ( 17 | "fmt" 18 | "os" 19 | "sync" 20 | 21 | "github.com/go-kit/log" 22 | "github.com/prometheus/client_golang/prometheus" 23 | "github.com/prometheus/client_golang/prometheus/promauto" 24 | "gopkg.in/yaml.v3" 25 | ) 26 | 27 | var ( 28 | configReloadSuccess = promauto.NewGauge(prometheus.GaugeOpts{ 29 | Namespace: "postgres_exporter", 30 | Name: "config_last_reload_successful", 31 | Help: "Postgres exporter config loaded successfully.", 32 | }) 33 | 34 | configReloadSeconds = promauto.NewGauge(prometheus.GaugeOpts{ 35 | Namespace: "postgres_exporter", 36 | Name: "config_last_reload_success_timestamp_seconds", 37 | Help: "Timestamp of the last successful configuration reload.", 38 | }) 39 | ) 40 | 41 | type Config struct { 42 | AuthModules map[string]AuthModule `yaml:"auth_modules"` 43 | } 44 | 45 | type AuthModule struct { 46 | Type string `yaml:"type"` 47 | UserPass UserPass `yaml:"userpass,omitempty"` 48 | // Add alternative auth modules here 49 | Options map[string]string `yaml:"options"` 50 | } 51 | 52 | type UserPass struct { 53 | Username string `yaml:"username"` 54 | Password string `yaml:"password"` 55 | } 56 | 57 | type Handler struct { 58 | sync.RWMutex 59 | Config *Config 60 | } 61 | 62 | func (ch *Handler) GetConfig() *Config { 63 | ch.RLock() 64 | defer ch.RUnlock() 65 | return ch.Config 66 | } 67 | 68 | func (ch *Handler) ReloadConfig(f string, logger log.Logger) error { 69 | config := &Config{} 70 | var err error 71 | defer func() { 72 | if err != nil { 73 | configReloadSuccess.Set(0) 74 | } else { 75 | configReloadSuccess.Set(1) 76 | configReloadSeconds.SetToCurrentTime() 77 | } 78 | }() 79 | 80 | yamlReader, err := os.Open(f) 81 | if err != nil { 82 | return fmt.Errorf("error opening config file %q: %s", f, err) 83 | } 84 | defer yamlReader.Close() 85 | decoder := yaml.NewDecoder(yamlReader) 86 | decoder.KnownFields(true) 87 | 88 | if err = decoder.Decode(config); err != nil { 89 | return fmt.Errorf("error parsing config file %q: %s", f, err) 90 | } 91 | 92 | ch.Lock() 93 | ch.Config = config 94 | ch.Unlock() 95 | return nil 96 | } 97 | 98 | func (m AuthModule) ConfigureTarget(target string) (DSN, error) { 99 | dsn, err := dsnFromString(target) 100 | if err != nil { 101 | return DSN{}, err 102 | } 103 | 104 | // Set the credentials from the authentication module 105 | // TODO(@sysadmind): What should the order of precedence be? 106 | if m.Type == "userpass" { 107 | if m.UserPass.Username != "" { 108 | dsn.username = m.UserPass.Username 109 | } 110 | if m.UserPass.Password != "" { 111 | dsn.password = m.UserPass.Password 112 | } 113 | } 114 | 115 | for k, v := range m.Options { 116 | dsn.query.Set(k, v) 117 | } 118 | 119 | return dsn, nil 120 | } 121 | -------------------------------------------------------------------------------- /collector/probe.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | "sync" 19 | 20 | "github.com/go-kit/log" 21 | "github.com/go-kit/log/level" 22 | "github.com/prometheus-community/postgres_exporter/config" 23 | "github.com/prometheus/client_golang/prometheus" 24 | "golang.org/x/sync/semaphore" 25 | ) 26 | 27 | type ProbeCollector struct { 28 | registry *prometheus.Registry 29 | collectors map[string]Collector 30 | logger log.Logger 31 | instance *instance 32 | connSema *semaphore.Weighted 33 | ctx context.Context 34 | } 35 | 36 | func NewProbeCollector(ctx context.Context, logger log.Logger, excludeDatabases []string, registry *prometheus.Registry, dsn config.DSN, connSema *semaphore.Weighted) (*ProbeCollector, error) { 37 | collectors := make(map[string]Collector) 38 | initiatedCollectorsMtx.Lock() 39 | defer initiatedCollectorsMtx.Unlock() 40 | for key, enabled := range collectorState { 41 | // TODO: Handle filters 42 | // if !*enabled || (len(f) > 0 && !f[key]) { 43 | // continue 44 | // } 45 | if !*enabled { 46 | continue 47 | } 48 | if collector, ok := initiatedCollectors[key]; ok { 49 | collectors[key] = collector 50 | } else { 51 | collector, err := factories[key]( 52 | collectorConfig{ 53 | logger: log.With(logger, "collector", key), 54 | excludeDatabases: excludeDatabases, 55 | }) 56 | if err != nil { 57 | return nil, err 58 | } 59 | collectors[key] = collector 60 | initiatedCollectors[key] = collector 61 | } 62 | } 63 | 64 | instance, err := newInstance(dsn.GetConnectionString()) 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | return &ProbeCollector{ 70 | registry: registry, 71 | collectors: collectors, 72 | logger: logger, 73 | instance: instance, 74 | connSema: connSema, 75 | ctx: ctx, 76 | }, nil 77 | } 78 | 79 | func (pc *ProbeCollector) Describe(ch chan<- *prometheus.Desc) { 80 | } 81 | 82 | func (pc *ProbeCollector) Collect(ch chan<- prometheus.Metric) { 83 | if err := pc.connSema.Acquire(pc.ctx, 1); err != nil { 84 | level.Warn(pc.logger).Log("msg", "Failed to acquire semaphore", "err", err) 85 | return 86 | } 87 | defer pc.connSema.Release(1) 88 | 89 | // Set up the database connection for the collector. 90 | err := pc.instance.setup() 91 | if err != nil { 92 | level.Error(pc.logger).Log("msg", "Error opening connection to database", "err", err) 93 | return 94 | } 95 | defer pc.instance.Close() 96 | 97 | wg := sync.WaitGroup{} 98 | wg.Add(len(pc.collectors)) 99 | for name, c := range pc.collectors { 100 | go func(name string, c Collector) { 101 | execute(pc.ctx, name, c, pc.instance, ch, pc.logger) 102 | wg.Done() 103 | }(name, c) 104 | } 105 | wg.Wait() 106 | } 107 | 108 | func (pc *ProbeCollector) Close() error { 109 | return pc.instance.Close() 110 | } 111 | -------------------------------------------------------------------------------- /collector/pg_locks.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | "database/sql" 19 | 20 | "github.com/go-kit/log" 21 | "github.com/prometheus/client_golang/prometheus" 22 | ) 23 | 24 | const locksSubsystem = "locks" 25 | 26 | func init() { 27 | registerCollector(locksSubsystem, defaultEnabled, NewPGLocksCollector) 28 | } 29 | 30 | type PGLocksCollector struct { 31 | log log.Logger 32 | } 33 | 34 | func NewPGLocksCollector(config collectorConfig) (Collector, error) { 35 | return &PGLocksCollector{ 36 | log: config.logger, 37 | }, nil 38 | } 39 | 40 | var ( 41 | pgLocksDesc = prometheus.NewDesc( 42 | prometheus.BuildFQName( 43 | namespace, 44 | locksSubsystem, 45 | "count", 46 | ), 47 | "Number of locks", 48 | []string{"datname", "mode"}, nil, 49 | ) 50 | 51 | pgLocksQuery = ` 52 | SELECT 53 | pg_database.datname as datname, 54 | tmp.mode as mode, 55 | COALESCE(count, 0) as count 56 | FROM 57 | ( 58 | VALUES 59 | ('accesssharelock'), 60 | ('rowsharelock'), 61 | ('rowexclusivelock'), 62 | ('shareupdateexclusivelock'), 63 | ('sharelock'), 64 | ('sharerowexclusivelock'), 65 | ('exclusivelock'), 66 | ('accessexclusivelock'), 67 | ('sireadlock') 68 | ) AS tmp(mode) 69 | CROSS JOIN pg_database 70 | LEFT JOIN ( 71 | SELECT 72 | database, 73 | lower(mode) AS mode, 74 | count(*) AS count 75 | FROM 76 | pg_locks 77 | WHERE 78 | database IS NOT NULL 79 | GROUP BY 80 | database, 81 | lower(mode) 82 | ) AS tmp2 ON tmp.mode = tmp2.mode 83 | and pg_database.oid = tmp2.database 84 | ORDER BY 85 | 1 86 | ` 87 | ) 88 | 89 | // Update implements Collector and exposes database locks. 90 | // It is called by the Prometheus registry when collecting metrics. 91 | func (c PGLocksCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 92 | db := instance.getDB() 93 | // Query the list of databases 94 | rows, err := db.QueryContext(ctx, 95 | pgLocksQuery, 96 | ) 97 | if err != nil { 98 | return err 99 | } 100 | defer rows.Close() 101 | 102 | var datname, mode sql.NullString 103 | var count sql.NullInt64 104 | 105 | for rows.Next() { 106 | if err := rows.Scan(&datname, &mode, &count); err != nil { 107 | return err 108 | } 109 | 110 | if !datname.Valid || !mode.Valid { 111 | continue 112 | } 113 | 114 | countMetric := 0.0 115 | if count.Valid { 116 | countMetric = float64(count.Int64) 117 | } 118 | 119 | ch <- prometheus.MustNewConstMetric( 120 | pgLocksDesc, 121 | prometheus.GaugeValue, countMetric, 122 | datname.String, mode.String, 123 | ) 124 | } 125 | if err := rows.Err(); err != nil { 126 | return err 127 | } 128 | return nil 129 | } 130 | -------------------------------------------------------------------------------- /collector/pg_database_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPGDatabaseCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | 32 | inst := &instance{db: db} 33 | 34 | mock.ExpectQuery(sanitizeQuery(pgDatabaseQuery)).WillReturnRows(sqlmock.NewRows([]string{"datname"}). 35 | AddRow("postgres")) 36 | 37 | mock.ExpectQuery(sanitizeQuery(pgDatabaseSizeQuery)).WithArgs("postgres").WillReturnRows(sqlmock.NewRows([]string{"pg_database_size"}). 38 | AddRow(1024)) 39 | 40 | ch := make(chan prometheus.Metric) 41 | go func() { 42 | defer close(ch) 43 | c := PGDatabaseCollector{} 44 | if err := c.Update(context.Background(), inst, ch); err != nil { 45 | t.Errorf("Error calling PGDatabaseCollector.Update: %s", err) 46 | } 47 | }() 48 | 49 | expected := []MetricResult{ 50 | {labels: labelMap{"datname": "postgres"}, value: 1024, metricType: dto.MetricType_GAUGE}, 51 | } 52 | convey.Convey("Metrics comparison", t, func() { 53 | for _, expect := range expected { 54 | m := readMetric(<-ch) 55 | convey.So(expect, convey.ShouldResemble, m) 56 | } 57 | }) 58 | if err := mock.ExpectationsWereMet(); err != nil { 59 | t.Errorf("there were unfulfilled exceptions: %s", err) 60 | } 61 | } 62 | 63 | // TODO add a null db test 64 | 65 | func TestPGDatabaseCollectorNullMetric(t *testing.T) { 66 | db, mock, err := sqlmock.New() 67 | if err != nil { 68 | t.Fatalf("Error opening a stub db connection: %s", err) 69 | } 70 | defer db.Close() 71 | 72 | inst := &instance{db: db} 73 | 74 | mock.ExpectQuery(sanitizeQuery(pgDatabaseQuery)).WillReturnRows(sqlmock.NewRows([]string{"datname"}). 75 | AddRow("postgres")) 76 | 77 | mock.ExpectQuery(sanitizeQuery(pgDatabaseSizeQuery)).WithArgs("postgres").WillReturnRows(sqlmock.NewRows([]string{"pg_database_size"}). 78 | AddRow(nil)) 79 | 80 | ch := make(chan prometheus.Metric) 81 | go func() { 82 | defer close(ch) 83 | c := PGDatabaseCollector{} 84 | if err := c.Update(context.Background(), inst, ch); err != nil { 85 | t.Errorf("Error calling PGDatabaseCollector.Update: %s", err) 86 | } 87 | }() 88 | 89 | expected := []MetricResult{ 90 | {labels: labelMap{"datname": "postgres"}, value: 0, metricType: dto.MetricType_GAUGE}, 91 | } 92 | convey.Convey("Metrics comparison", t, func() { 93 | for _, expect := range expected { 94 | m := readMetric(<-ch) 95 | convey.So(expect, convey.ShouldResemble, m) 96 | } 97 | }) 98 | if err := mock.ExpectationsWereMet(); err != nil { 99 | t.Errorf("there were unfulfilled exceptions: %s", err) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /collector/pg_database_wraparound.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | "database/sql" 19 | 20 | "github.com/go-kit/log" 21 | "github.com/go-kit/log/level" 22 | "github.com/prometheus/client_golang/prometheus" 23 | ) 24 | 25 | const databaseWraparoundSubsystem = "database_wraparound" 26 | 27 | func init() { 28 | registerCollector(databaseWraparoundSubsystem, defaultEnabled, NewPGDatabaseWraparoundCollector) 29 | } 30 | 31 | type PGDatabaseWraparoundCollector struct { 32 | log log.Logger 33 | } 34 | 35 | func NewPGDatabaseWraparoundCollector(config collectorConfig) (Collector, error) { 36 | return &PGDatabaseWraparoundCollector{log: config.logger}, nil 37 | } 38 | 39 | var ( 40 | databaseWraparoundAgeDatfrozenxid = prometheus.NewDesc( 41 | prometheus.BuildFQName(namespace, databaseWraparoundSubsystem, "age_datfrozenxid_seconds"), 42 | "Age of the oldest transaction ID that has not been frozen.", 43 | []string{"datname"}, 44 | prometheus.Labels{}, 45 | ) 46 | databaseWraparoundAgeDatminmxid = prometheus.NewDesc( 47 | prometheus.BuildFQName(namespace, databaseWraparoundSubsystem, "age_datminmxid_seconds"), 48 | "Age of the oldest multi-transaction ID that has been replaced with a transaction ID.", 49 | []string{"datname"}, 50 | prometheus.Labels{}, 51 | ) 52 | 53 | databaseWraparoundQuery = ` 54 | SELECT 55 | datname, 56 | age(d.datfrozenxid) as age_datfrozenxid, 57 | mxid_age(d.datminmxid) as age_datminmxid 58 | FROM 59 | pg_catalog.pg_database d 60 | WHERE 61 | d.datallowconn 62 | ` 63 | ) 64 | 65 | func (c *PGDatabaseWraparoundCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 66 | db := instance.getDB() 67 | rows, err := db.QueryContext(ctx, 68 | databaseWraparoundQuery) 69 | 70 | if err != nil { 71 | return err 72 | } 73 | defer rows.Close() 74 | 75 | for rows.Next() { 76 | var datname sql.NullString 77 | var ageDatfrozenxid, ageDatminmxid sql.NullFloat64 78 | 79 | if err := rows.Scan(&datname, &ageDatfrozenxid, &ageDatminmxid); err != nil { 80 | return err 81 | } 82 | 83 | if !datname.Valid { 84 | level.Debug(c.log).Log("msg", "Skipping database with NULL name") 85 | continue 86 | } 87 | if !ageDatfrozenxid.Valid { 88 | level.Debug(c.log).Log("msg", "Skipping stat emission with NULL age_datfrozenxid") 89 | continue 90 | } 91 | if !ageDatminmxid.Valid { 92 | level.Debug(c.log).Log("msg", "Skipping stat emission with NULL age_datminmxid") 93 | continue 94 | } 95 | 96 | ageDatfrozenxidMetric := ageDatfrozenxid.Float64 97 | 98 | ch <- prometheus.MustNewConstMetric( 99 | databaseWraparoundAgeDatfrozenxid, 100 | prometheus.GaugeValue, 101 | ageDatfrozenxidMetric, datname.String, 102 | ) 103 | 104 | ageDatminmxidMetric := ageDatminmxid.Float64 105 | ch <- prometheus.MustNewConstMetric( 106 | databaseWraparoundAgeDatminmxid, 107 | prometheus.GaugeValue, 108 | ageDatminmxidMetric, datname.String, 109 | ) 110 | } 111 | if err := rows.Err(); err != nil { 112 | return err 113 | } 114 | return nil 115 | } 116 | -------------------------------------------------------------------------------- /collector/pg_statio_user_indexes.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "database/sql" 18 | 19 | "github.com/go-kit/log" 20 | "github.com/prometheus/client_golang/prometheus" 21 | ) 22 | 23 | func init() { 24 | registerCollector(statioUserIndexesSubsystem, defaultDisabled, NewPGStatioUserIndexesCollector) 25 | } 26 | 27 | type PGStatioUserIndexesCollector struct { 28 | log log.Logger 29 | } 30 | 31 | const statioUserIndexesSubsystem = "statio_user_indexes" 32 | 33 | func NewPGStatioUserIndexesCollector(config collectorConfig) (Collector, error) { 34 | return &PGStatioUserIndexesCollector{log: config.logger}, nil 35 | } 36 | 37 | var ( 38 | statioUserIndexesIdxBlksRead = prometheus.NewDesc( 39 | prometheus.BuildFQName(namespace, statioUserIndexesSubsystem, "idx_blks_read_total"), 40 | "Number of disk blocks read from this index", 41 | []string{"schemaname", "relname", "indexrelname"}, 42 | prometheus.Labels{}, 43 | ) 44 | statioUserIndexesIdxBlksHit = prometheus.NewDesc( 45 | prometheus.BuildFQName(namespace, statioUserIndexesSubsystem, "idx_blks_hit_total"), 46 | "Number of buffer hits in this index", 47 | []string{"schemaname", "relname", "indexrelname"}, 48 | prometheus.Labels{}, 49 | ) 50 | 51 | statioUserIndexesQuery = ` 52 | SELECT 53 | schemaname, 54 | relname, 55 | indexrelname, 56 | idx_blks_read, 57 | idx_blks_hit 58 | FROM pg_statio_user_indexes 59 | ` 60 | ) 61 | 62 | func (c *PGStatioUserIndexesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 63 | db := instance.getDB() 64 | rows, err := db.QueryContext(ctx, 65 | statioUserIndexesQuery) 66 | 67 | if err != nil { 68 | return err 69 | } 70 | defer rows.Close() 71 | for rows.Next() { 72 | var schemaname, relname, indexrelname sql.NullString 73 | var idxBlksRead, idxBlksHit sql.NullFloat64 74 | 75 | if err := rows.Scan(&schemaname, &relname, &indexrelname, &idxBlksRead, &idxBlksHit); err != nil { 76 | return err 77 | } 78 | schemanameLabel := "unknown" 79 | if schemaname.Valid { 80 | schemanameLabel = schemaname.String 81 | } 82 | relnameLabel := "unknown" 83 | if relname.Valid { 84 | relnameLabel = relname.String 85 | } 86 | indexrelnameLabel := "unknown" 87 | if indexrelname.Valid { 88 | indexrelnameLabel = indexrelname.String 89 | } 90 | labels := []string{schemanameLabel, relnameLabel, indexrelnameLabel} 91 | 92 | idxBlksReadMetric := 0.0 93 | if idxBlksRead.Valid { 94 | idxBlksReadMetric = idxBlksRead.Float64 95 | } 96 | ch <- prometheus.MustNewConstMetric( 97 | statioUserIndexesIdxBlksRead, 98 | prometheus.CounterValue, 99 | idxBlksReadMetric, 100 | labels..., 101 | ) 102 | 103 | idxBlksHitMetric := 0.0 104 | if idxBlksHit.Valid { 105 | idxBlksHitMetric = idxBlksHit.Float64 106 | } 107 | ch <- prometheus.MustNewConstMetric( 108 | statioUserIndexesIdxBlksHit, 109 | prometheus.CounterValue, 110 | idxBlksHitMetric, 111 | labels..., 112 | ) 113 | } 114 | if err := rows.Err(); err != nil { 115 | return err 116 | } 117 | return nil 118 | } 119 | -------------------------------------------------------------------------------- /collector/pg_database.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | "database/sql" 19 | 20 | "github.com/go-kit/log" 21 | "github.com/prometheus/client_golang/prometheus" 22 | ) 23 | 24 | const databaseSubsystem = "database" 25 | 26 | func init() { 27 | registerCollector(databaseSubsystem, defaultEnabled, NewPGDatabaseCollector) 28 | } 29 | 30 | type PGDatabaseCollector struct { 31 | log log.Logger 32 | excludedDatabases []string 33 | } 34 | 35 | func NewPGDatabaseCollector(config collectorConfig) (Collector, error) { 36 | exclude := config.excludeDatabases 37 | if exclude == nil { 38 | exclude = []string{} 39 | } 40 | return &PGDatabaseCollector{ 41 | log: config.logger, 42 | excludedDatabases: exclude, 43 | }, nil 44 | } 45 | 46 | var ( 47 | pgDatabaseSizeDesc = prometheus.NewDesc( 48 | prometheus.BuildFQName( 49 | namespace, 50 | databaseSubsystem, 51 | "size_bytes", 52 | ), 53 | "Disk space used by the database", 54 | []string{"datname"}, nil, 55 | ) 56 | 57 | pgDatabaseQuery = "SELECT pg_database.datname FROM pg_database;" 58 | pgDatabaseSizeQuery = "SELECT pg_database_size($1)" 59 | ) 60 | 61 | // Update implements Collector and exposes database size. 62 | // It is called by the Prometheus registry when collecting metrics. 63 | // The list of databases is retrieved from pg_database and filtered 64 | // by the excludeDatabase config parameter. The tradeoff here is that 65 | // we have to query the list of databases and then query the size of 66 | // each database individually. This is because we can't filter the 67 | // list of databases in the query because the list of excluded 68 | // databases is dynamic. 69 | func (c PGDatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 70 | db := instance.getDB() 71 | // Query the list of databases 72 | rows, err := db.QueryContext(ctx, 73 | pgDatabaseQuery, 74 | ) 75 | if err != nil { 76 | return err 77 | } 78 | defer rows.Close() 79 | 80 | var databases []string 81 | 82 | for rows.Next() { 83 | var datname sql.NullString 84 | if err := rows.Scan(&datname); err != nil { 85 | return err 86 | } 87 | 88 | if !datname.Valid { 89 | continue 90 | } 91 | // Ignore excluded databases 92 | // Filtering is done here instead of in the query to avoid 93 | // a complicated NOT IN query with a variable number of parameters 94 | if sliceContains(c.excludedDatabases, datname.String) { 95 | continue 96 | } 97 | 98 | databases = append(databases, datname.String) 99 | } 100 | 101 | // Query the size of the databases 102 | for _, datname := range databases { 103 | var size sql.NullFloat64 104 | err = db.QueryRowContext(ctx, pgDatabaseSizeQuery, datname).Scan(&size) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | sizeMetric := 0.0 110 | if size.Valid { 111 | sizeMetric = size.Float64 112 | } 113 | ch <- prometheus.MustNewConstMetric( 114 | pgDatabaseSizeDesc, 115 | prometheus.GaugeValue, sizeMetric, datname, 116 | ) 117 | } 118 | return rows.Err() 119 | } 120 | 121 | func sliceContains(slice []string, s string) bool { 122 | for _, item := range slice { 123 | if item == s { 124 | return true 125 | } 126 | } 127 | return false 128 | } 129 | -------------------------------------------------------------------------------- /collector/pg_statio_user_indexes_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPgStatioUserIndexesCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | inst := &instance{db: db} 32 | columns := []string{ 33 | "schemaname", 34 | "relname", 35 | "indexrelname", 36 | "idx_blks_read", 37 | "idx_blks_hit", 38 | } 39 | rows := sqlmock.NewRows(columns). 40 | AddRow("public", "pgtest_accounts", "pgtest_accounts_pkey", 8, 9) 41 | 42 | mock.ExpectQuery(sanitizeQuery(statioUserIndexesQuery)).WillReturnRows(rows) 43 | 44 | ch := make(chan prometheus.Metric) 45 | go func() { 46 | defer close(ch) 47 | c := PGStatioUserIndexesCollector{} 48 | 49 | if err := c.Update(context.Background(), inst, ch); err != nil { 50 | t.Errorf("Error calling PGStatioUserIndexesCollector.Update: %s", err) 51 | } 52 | }() 53 | expected := []MetricResult{ 54 | {labels: labelMap{"schemaname": "public", "relname": "pgtest_accounts", "indexrelname": "pgtest_accounts_pkey"}, value: 8, metricType: dto.MetricType_COUNTER}, 55 | {labels: labelMap{"schemaname": "public", "relname": "pgtest_accounts", "indexrelname": "pgtest_accounts_pkey"}, value: 9, metricType: dto.MetricType_COUNTER}, 56 | } 57 | convey.Convey("Metrics comparison", t, func() { 58 | for _, expect := range expected { 59 | m := readMetric(<-ch) 60 | convey.So(expect, convey.ShouldResemble, m) 61 | } 62 | }) 63 | if err := mock.ExpectationsWereMet(); err != nil { 64 | t.Errorf("there were unfulfilled exceptions: %s", err) 65 | } 66 | } 67 | 68 | func TestPgStatioUserIndexesCollectorNull(t *testing.T) { 69 | db, mock, err := sqlmock.New() 70 | if err != nil { 71 | t.Fatalf("Error opening a stub db connection: %s", err) 72 | } 73 | defer db.Close() 74 | inst := &instance{db: db} 75 | columns := []string{ 76 | "schemaname", 77 | "relname", 78 | "indexrelname", 79 | "idx_blks_read", 80 | "idx_blks_hit", 81 | } 82 | rows := sqlmock.NewRows(columns). 83 | AddRow(nil, nil, nil, nil, nil) 84 | 85 | mock.ExpectQuery(sanitizeQuery(statioUserIndexesQuery)).WillReturnRows(rows) 86 | 87 | ch := make(chan prometheus.Metric) 88 | go func() { 89 | defer close(ch) 90 | c := PGStatioUserIndexesCollector{} 91 | 92 | if err := c.Update(context.Background(), inst, ch); err != nil { 93 | t.Errorf("Error calling PGStatioUserIndexesCollector.Update: %s", err) 94 | } 95 | }() 96 | expected := []MetricResult{ 97 | {labels: labelMap{"schemaname": "unknown", "relname": "unknown", "indexrelname": "unknown"}, value: 0, metricType: dto.MetricType_COUNTER}, 98 | {labels: labelMap{"schemaname": "unknown", "relname": "unknown", "indexrelname": "unknown"}, value: 0, metricType: dto.MetricType_COUNTER}, 99 | } 100 | convey.Convey("Metrics comparison", t, func() { 101 | for _, expect := range expected { 102 | m := readMetric(<-ch) 103 | convey.So(expect, convey.ShouldResemble, m) 104 | } 105 | }) 106 | if err := mock.ExpectationsWereMet(); err != nil { 107 | t.Errorf("there were unfulfilled exceptions: %s", err) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/percona_compatibility_test.go: -------------------------------------------------------------------------------- 1 | //go:build manual 2 | 3 | package main 4 | 5 | import ( 6 | "bufio" 7 | _ "embed" 8 | "fmt" 9 | "net/http" 10 | "os" 11 | "regexp" 12 | "sort" 13 | "strings" 14 | "testing" 15 | "time" 16 | 17 | "github.com/stretchr/testify/assert" 18 | ) 19 | 20 | //go:embed percona-reference-metrics.txt 21 | var referenceMetrics string 22 | 23 | // TestReferenceCompatibility checks that exposed metrics are not missed. 24 | // 25 | // Used to make sure that metrics are present after updating from upstream. 26 | // You need you run exporter locally on port 42002. 27 | func TestReferenceCompatibility(t *testing.T) { 28 | client := &http.Client{ 29 | Timeout: time.Second * 10, 30 | } 31 | req, err := http.NewRequest("GET", "http://localhost:42000/metrics", nil) 32 | assert.Nil(t, err) 33 | req.SetBasicAuth("pmm", "/agent_id/825dcdbf-af1c-4eb4-9e96-21699aa6ff7b") 34 | resp, err := client.Do(req) 35 | assert.Nil(t, err) 36 | defer resp.Body.Close() 37 | currentMetricsBytes, err := os.ReadAll(resp.Body) 38 | assert.Nil(t, err) 39 | 40 | currentMetrics := toMap(t, string(currentMetricsBytes)) 41 | referenceMetrics := toMap(t, referenceMetrics) 42 | 43 | //remove matches 44 | for m := range currentMetrics { 45 | _, found := referenceMetrics[m] 46 | if found { 47 | delete(referenceMetrics, m) 48 | delete(currentMetrics, m) 49 | } 50 | } 51 | 52 | fmt.Printf("Extra metrics [%d]:\n", len(currentMetrics)) 53 | for _, metric := range sortedKeys(currentMetrics) { 54 | fmt.Printf("\t%s\n", metric) 55 | } 56 | if len(referenceMetrics) != 0 { 57 | fmt.Printf("Not Supported metrics [%d]:\n", len(referenceMetrics)) 58 | for _, metric := range sortedKeys(referenceMetrics) { 59 | fmt.Printf("\t%s\n", metric) 60 | } 61 | assert.FailNowf(t, "Found not supported metrics", "Count: %d", len(referenceMetrics)) 62 | } 63 | } 64 | 65 | func sortedKeys(m map[string]string) []string { 66 | keys := make([]string, 0, len(m)) 67 | for k := range m { 68 | keys = append(keys, k) 69 | } 70 | sort.Strings(keys) 71 | return keys 72 | } 73 | 74 | func toMap(t *testing.T, rawMetrics string) map[string]string { 75 | result := make(map[string]string) 76 | 77 | scanner := bufio.NewScanner(strings.NewReader(rawMetrics)) 78 | scanner.Split(bufio.ScanLines) 79 | 80 | for scanner.Scan() { 81 | next := scanner.Text() 82 | isComment := strings.HasPrefix(next, "#") 83 | if isComment { 84 | continue 85 | } 86 | next = cleanKeyOrValue(next) 87 | if next != "" { 88 | items := strings.Split(next, " ") 89 | if len(items) > 1 { 90 | result[items[0]] = items[1] 91 | } else { 92 | fmt.Println("WARN: ") 93 | } 94 | } 95 | } 96 | 97 | return result 98 | } 99 | 100 | func cleanKeyOrValue(s string) (res string) { 101 | res = s 102 | 103 | itemsToIgnore := []string{ 104 | "example-queries", 105 | } 106 | 107 | for _, each := range itemsToIgnore { 108 | if strings.Contains(s, each) { 109 | return "" 110 | } 111 | } 112 | 113 | regexpsToRemove := []*regexp.Regexp{ 114 | regexp.MustCompile(`[+-]?(\d*[.])?\d+(e[+-]?\d*)?`), 115 | regexp.MustCompile(`\d*\.\d*\.\d*\.\d*:\d*`), 116 | regexp.MustCompile(`go1.\d*.\d*`), 117 | regexp.MustCompile(`filename=".*",`), 118 | regexp.MustCompile(`hashsum=".*"`), 119 | } 120 | for _, each := range regexpsToRemove { 121 | res = each.ReplaceAllString(res, "") 122 | } 123 | 124 | stringsToRemove := []string{ 125 | "PostgreSQL 11.15 (Debian 11.15-1.pgdg90+1) on x86_64-pc-linux-gnu, compiled by gcc (Debian 6.3.0-18+deb9u1) 6.3.0 20170516, 64-bit", 126 | "PostgreSQL 11.16 (Debian 11.16-1.pgdg90+1) on x86_64-pc-linux-gnu, compiled by gcc (Debian 6.3.0-18+deb9u1) 6.3.0 20170516, 64-bit", 127 | "collector=\"exporter\",", 128 | "fastpath function call", 129 | "idle in transaction (aborted)", 130 | "idle in transaction", 131 | "+Inf", 132 | "0.0.1", 133 | "collector=\"custom_query.mr\",", 134 | "datname=\"pmm-managed\"", 135 | "datname=\"pmm-agent\"", 136 | } 137 | for _, each := range stringsToRemove { 138 | res = strings.ReplaceAll(res, each, "") 139 | } 140 | 141 | return 142 | } 143 | -------------------------------------------------------------------------------- /collector/pg_process_idle.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | "database/sql" 19 | 20 | "github.com/go-kit/log" 21 | "github.com/lib/pq" 22 | "github.com/prometheus/client_golang/prometheus" 23 | ) 24 | 25 | func init() { 26 | // Making this default disabled because we have no tests for it 27 | registerCollector(processIdleSubsystem, defaultDisabled, NewPGProcessIdleCollector) 28 | } 29 | 30 | type PGProcessIdleCollector struct { 31 | log log.Logger 32 | } 33 | 34 | const processIdleSubsystem = "process_idle" 35 | 36 | func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) { 37 | return &PGProcessIdleCollector{log: config.logger}, nil 38 | } 39 | 40 | var pgProcessIdleSeconds = prometheus.NewDesc( 41 | prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"), 42 | "Idle time of server processes", 43 | []string{"state", "application_name"}, 44 | prometheus.Labels{}, 45 | ) 46 | 47 | func (PGProcessIdleCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 48 | db := instance.getDB() 49 | row := db.QueryRowContext(ctx, 50 | `WITH 51 | metrics AS ( 52 | SELECT 53 | state, 54 | application_name, 55 | SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_idle_seconds_sum, 56 | COUNT(*) AS process_idle_seconds_count 57 | FROM pg_stat_activity 58 | WHERE state ~ '^idle' 59 | GROUP BY state, application_name 60 | ), 61 | buckets AS ( 62 | SELECT 63 | state, 64 | application_name, 65 | le, 66 | SUM( 67 | CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le 68 | THEN 1 69 | ELSE 0 70 | END 71 | )::bigint AS bucket 72 | FROM 73 | pg_stat_activity, 74 | UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le 75 | GROUP BY state, application_name, le 76 | ORDER BY state, application_name, le 77 | ) 78 | SELECT 79 | state, 80 | application_name, 81 | process_idle_seconds_sum as seconds_sum, 82 | process_idle_seconds_count as seconds_count, 83 | ARRAY_AGG(le) AS seconds, 84 | ARRAY_AGG(bucket) AS seconds_bucket 85 | FROM metrics JOIN buckets USING (state, application_name) 86 | GROUP BY 1, 2, 3, 4;`) 87 | 88 | var state sql.NullString 89 | var applicationName sql.NullString 90 | var secondsSum sql.NullFloat64 91 | var secondsCount sql.NullInt64 92 | var seconds []float64 93 | var secondsBucket []int64 94 | 95 | err := row.Scan(&state, &applicationName, &secondsSum, &secondsCount, pq.Array(&seconds), pq.Array(&secondsBucket)) 96 | if err != nil { 97 | return err 98 | } 99 | 100 | var buckets = make(map[float64]uint64, len(seconds)) 101 | for i, second := range seconds { 102 | if i >= len(secondsBucket) { 103 | break 104 | } 105 | buckets[second] = uint64(secondsBucket[i]) 106 | } 107 | 108 | stateLabel := "unknown" 109 | if state.Valid { 110 | stateLabel = state.String 111 | } 112 | 113 | applicationNameLabel := "unknown" 114 | if applicationName.Valid { 115 | applicationNameLabel = applicationName.String 116 | } 117 | 118 | var secondsCountMetric uint64 119 | if secondsCount.Valid { 120 | secondsCountMetric = uint64(secondsCount.Int64) 121 | } 122 | secondsSumMetric := 0.0 123 | if secondsSum.Valid { 124 | secondsSumMetric = secondsSum.Float64 125 | } 126 | ch <- prometheus.MustNewConstHistogram( 127 | pgProcessIdleSeconds, 128 | secondsCountMetric, secondsSumMetric, buckets, 129 | stateLabel, applicationNameLabel, 130 | ) 131 | return nil 132 | } 133 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/probe.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "fmt" 18 | "net/http" 19 | 20 | "github.com/go-kit/log" 21 | "github.com/go-kit/log/level" 22 | "github.com/prometheus-community/postgres_exporter/collector" 23 | "github.com/prometheus-community/postgres_exporter/config" 24 | "github.com/prometheus/client_golang/prometheus" 25 | "github.com/prometheus/client_golang/prometheus/promhttp" 26 | "golang.org/x/sync/semaphore" 27 | ) 28 | 29 | func handleProbe(logger log.Logger, excludeDatabases []string, connSema *semaphore.Weighted) http.HandlerFunc { 30 | return func(w http.ResponseWriter, r *http.Request) { 31 | ctx := r.Context() 32 | conf := c.GetConfig() 33 | params := r.URL.Query() 34 | target := params.Get("target") 35 | if target == "" { 36 | http.Error(w, "target is required", http.StatusBadRequest) 37 | return 38 | } 39 | var authModule config.AuthModule 40 | authModuleName := params.Get("auth_module") 41 | if authModuleName == "" { 42 | level.Info(logger).Log("msg", "no auth_module specified, using default") 43 | } else { 44 | var ok bool 45 | authModule, ok = conf.AuthModules[authModuleName] 46 | if !ok { 47 | http.Error(w, fmt.Sprintf("auth_module %s not found", authModuleName), http.StatusBadRequest) 48 | return 49 | } 50 | if authModule.UserPass.Username == "" || authModule.UserPass.Password == "" { 51 | http.Error(w, fmt.Sprintf("auth_module %s has no username or password", authModuleName), http.StatusBadRequest) 52 | return 53 | } 54 | } 55 | 56 | dsn, err := authModule.ConfigureTarget(target) 57 | if err != nil { 58 | level.Error(logger).Log("msg", "failed to configure target", "err", err) 59 | http.Error(w, fmt.Sprintf("could not configure dsn for target: %v", err), http.StatusBadRequest) 60 | return 61 | } 62 | 63 | // TODO(@sysadmind): Timeout 64 | 65 | tl := log.With(logger, "target", target) 66 | 67 | registry := prometheus.NewRegistry() 68 | 69 | opts := []ExporterOpt{ 70 | DisableDefaultMetrics(*disableDefaultMetrics), 71 | DisableSettingsMetrics(*disableSettingsMetrics), 72 | AutoDiscoverDatabases(*autoDiscoverDatabases), 73 | // WithUserQueriesPath(*queriesPath), 74 | WithConstantLabels(*constantLabelsList), 75 | ExcludeDatabases(excludeDatabases), 76 | IncludeDatabases(*includeDatabases), 77 | WithContext(ctx), 78 | WithConnectionsSemaphore(connSema), 79 | } 80 | 81 | dsns := []string{dsn.GetConnectionString()} 82 | exporter := NewExporter(dsns, opts...) 83 | 84 | // defer func() { 85 | // exporter.servers.Close() 86 | // }() 87 | registry.MustRegister(exporter) 88 | 89 | // Run the probe 90 | pc, err := collector.NewProbeCollector(ctx, tl, excludeDatabases, registry, dsn, connSema) 91 | if err != nil { 92 | level.Error(logger).Log("msg", "Error creating probe collector", "err", err) 93 | http.Error(w, err.Error(), http.StatusInternalServerError) 94 | return 95 | } 96 | 97 | // Cleanup underlying connections to prevent connection leaks 98 | defer pc.Close() 99 | 100 | // TODO(@sysadmind): Remove the registry.MustRegister() call below and instead handle the collection here. That will allow 101 | // for the passing of context, handling of timeouts, and more control over the collection. 102 | // The current NewProbeCollector() implementation relies on the MustNewConstMetric() call to create the metrics which is not 103 | // ideal to use without the registry.MustRegister() call. 104 | _ = ctx 105 | 106 | registry.MustRegister(pc) 107 | 108 | // TODO check success, etc 109 | h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{}) 110 | h.ServeHTTP(w, r) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /collector/instance.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "database/sql" 18 | "fmt" 19 | "regexp" 20 | "strings" 21 | 22 | "github.com/blang/semver/v4" 23 | "github.com/lib/pq" 24 | ) 25 | 26 | type instance struct { 27 | dsn string 28 | name string 29 | db *sql.DB 30 | version semver.Version 31 | } 32 | 33 | func newInstance(dsn string) (*instance, error) { 34 | i := &instance{ 35 | dsn: dsn, 36 | } 37 | 38 | // "Create" a database handle to verify the DSN provided is valid. 39 | // Open is not guaranteed to create a connection. 40 | db, err := sql.Open("postgres", dsn) 41 | if err != nil { 42 | return nil, err 43 | } 44 | db.Close() 45 | 46 | i.name, err = parseServerName(dsn) 47 | if err != nil { 48 | return nil, err 49 | } 50 | return i, nil 51 | } 52 | 53 | // copy returns a copy of the instance. 54 | func (i *instance) copy() *instance { 55 | return &instance{ 56 | dsn: i.dsn, 57 | name: i.name, 58 | } 59 | } 60 | 61 | func (i *instance) setup() error { 62 | db, err := sql.Open("postgres", i.dsn) 63 | if err != nil { 64 | return err 65 | } 66 | db.SetMaxOpenConns(1) 67 | db.SetMaxIdleConns(1) 68 | i.db = db 69 | 70 | version, err := queryVersion(i.db) 71 | if err != nil { 72 | return fmt.Errorf("error querying postgresql version: %w", err) 73 | } else { 74 | i.version = version 75 | } 76 | return nil 77 | } 78 | 79 | func (i *instance) getDB() *sql.DB { 80 | return i.db 81 | } 82 | 83 | func (i *instance) Close() error { 84 | return i.db.Close() 85 | } 86 | 87 | // Regex used to get the "short-version" from the postgres version field. 88 | // The result of SELECT version() is something like "PostgreSQL 9.6.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 6.2.1 20160830, 64-bit" 89 | var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`) 90 | var serverVersionRegex = regexp.MustCompile(`^((\d+)(\.\d+)?(\.\d+)?)`) 91 | 92 | func queryVersion(db *sql.DB) (semver.Version, error) { 93 | var version string 94 | err := db.QueryRow("SELECT version();").Scan(&version) 95 | if err != nil { 96 | return semver.Version{}, err 97 | } 98 | submatches := versionRegex.FindStringSubmatch(version) 99 | if len(submatches) > 1 { 100 | return semver.ParseTolerant(submatches[1]) 101 | } 102 | 103 | // We could also try to parse the version from the server_version field. 104 | // This is of the format 13.3 (Debian 13.3-1.pgdg100+1) 105 | err = db.QueryRow("SHOW server_version;").Scan(&version) 106 | if err != nil { 107 | return semver.Version{}, err 108 | } 109 | submatches = serverVersionRegex.FindStringSubmatch(version) 110 | if len(submatches) > 1 { 111 | return semver.ParseTolerant(submatches[1]) 112 | } 113 | return semver.Version{}, fmt.Errorf("could not parse version from %q", version) 114 | } 115 | 116 | func parseServerName(url string) (string, error) { 117 | dsn, err := pq.ParseURL(url) 118 | if err != nil { 119 | dsn = url 120 | } 121 | 122 | pairs := strings.Split(dsn, " ") 123 | kv := make(map[string]string, len(pairs)) 124 | for _, pair := range pairs { 125 | splitted := strings.SplitN(pair, "=", 2) 126 | if len(splitted) != 2 { 127 | return "", fmt.Errorf("malformed dsn %q", dsn) 128 | } 129 | // Newer versions of pq.ParseURL quote values so trim them off if they exist 130 | key := strings.Trim(splitted[0], "'\"") 131 | value := strings.Trim(splitted[1], "'\"") 132 | kv[key] = value 133 | } 134 | 135 | var fingerprint string 136 | 137 | if host, ok := kv["host"]; ok { 138 | fingerprint += host 139 | } else { 140 | fingerprint += "localhost" 141 | } 142 | 143 | if port, ok := kv["port"]; ok { 144 | fingerprint += ":" + port 145 | } else { 146 | fingerprint += ":5432" 147 | } 148 | 149 | return fingerprint, nil 150 | } 151 | -------------------------------------------------------------------------------- /collector/pg_replication_slot.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | import ( 17 | "context" 18 | "database/sql" 19 | 20 | "github.com/go-kit/log" 21 | "github.com/prometheus/client_golang/prometheus" 22 | ) 23 | 24 | const replicationSlotSubsystem = "replication_slot" 25 | 26 | func init() { 27 | registerCollector(replicationSlotSubsystem, defaultEnabled, NewPGReplicationSlotCollector) 28 | } 29 | 30 | type PGReplicationSlotCollector struct { 31 | log log.Logger 32 | } 33 | 34 | func NewPGReplicationSlotCollector(config collectorConfig) (Collector, error) { 35 | return &PGReplicationSlotCollector{log: config.logger}, nil 36 | } 37 | 38 | var ( 39 | pgReplicationSlotCurrentWalDesc = prometheus.NewDesc( 40 | prometheus.BuildFQName( 41 | namespace, 42 | replicationSlotSubsystem, 43 | "slot_current_wal_lsn", 44 | ), 45 | "current wal lsn value", 46 | []string{"slot_name", "plugin", "slot_type"}, nil, 47 | ) 48 | pgReplicationSlotCurrentFlushDesc = prometheus.NewDesc( 49 | prometheus.BuildFQName( 50 | namespace, 51 | replicationSlotSubsystem, 52 | "slot_confirmed_flush_lsn", 53 | ), 54 | "last lsn confirmed flushed to the replication slot", 55 | []string{"slot_name", "plugin", "slot_type"}, nil, 56 | ) 57 | pgReplicationSlotIsActiveDesc = prometheus.NewDesc( 58 | prometheus.BuildFQName( 59 | namespace, 60 | replicationSlotSubsystem, 61 | "slot_is_active", 62 | ), 63 | "whether the replication slot is active or not", 64 | []string{"slot_name", "plugin", "slot_type"}, nil, 65 | ) 66 | 67 | pgReplicationSlotQuery = `SELECT 68 | slot_name, 69 | plugin, 70 | slot_type, 71 | CASE WHEN pg_is_in_recovery() THEN 72 | pg_last_wal_receive_lsn() - '0/0' 73 | ELSE 74 | pg_current_wal_lsn() - '0/0' 75 | END AS current_wal_lsn, 76 | COALESCE(confirmed_flush_lsn, '0/0') - '0/0' AS confirmed_flush_lsn, 77 | active 78 | FROM pg_replication_slots;` 79 | ) 80 | 81 | func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { 82 | db := instance.getDB() 83 | rows, err := db.QueryContext(ctx, 84 | pgReplicationSlotQuery) 85 | if err != nil { 86 | return err 87 | } 88 | defer rows.Close() 89 | 90 | for rows.Next() { 91 | var slotName sql.NullString 92 | var plugin sql.NullString 93 | var slotType sql.NullString 94 | var walLSN sql.NullFloat64 95 | var flushLSN sql.NullFloat64 96 | var isActive sql.NullBool 97 | if err := rows.Scan(&slotName, &plugin, &slotType, &walLSN, &flushLSN, &isActive); err != nil { 98 | return err 99 | } 100 | 101 | isActiveValue := 0.0 102 | if isActive.Valid && isActive.Bool { 103 | isActiveValue = 1.0 104 | } 105 | slotNameLabel := "unknown" 106 | if slotName.Valid { 107 | slotNameLabel = slotName.String 108 | } 109 | pluginLabel := "unknown" 110 | if plugin.Valid { 111 | pluginLabel = plugin.String 112 | } 113 | slotTypeLabel := "unknown" 114 | if slotType.Valid { 115 | slotTypeLabel = slotType.String 116 | } 117 | 118 | var walLSNMetric float64 119 | if walLSN.Valid { 120 | walLSNMetric = walLSN.Float64 121 | } 122 | ch <- prometheus.MustNewConstMetric( 123 | pgReplicationSlotCurrentWalDesc, 124 | prometheus.GaugeValue, walLSNMetric, slotNameLabel, pluginLabel, slotTypeLabel, 125 | ) 126 | if isActive.Valid && isActive.Bool { 127 | var flushLSNMetric float64 128 | if flushLSN.Valid { 129 | flushLSNMetric = flushLSN.Float64 130 | } 131 | ch <- prometheus.MustNewConstMetric( 132 | pgReplicationSlotCurrentFlushDesc, 133 | prometheus.GaugeValue, flushLSNMetric, slotNameLabel, pluginLabel, slotTypeLabel, 134 | ) 135 | } 136 | ch <- prometheus.MustNewConstMetric( 137 | pgReplicationSlotIsActiveDesc, 138 | prometheus.GaugeValue, isActiveValue, slotNameLabel, pluginLabel, slotTypeLabel, 139 | ) 140 | } 141 | return rows.Err() 142 | } 143 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Backwards compatibility for old variable names (deprecated) 4 | if [ "x$PGUSER" != "x" ]; then 5 | POSTGRES_USER=$PGUSER 6 | fi 7 | if [ "x$PGPASSWORD" != "x" ]; then 8 | POSTGRES_PASSWORD=$PGPASSWORD 9 | fi 10 | 11 | # Forwards-compatibility for old variable names (pg_basebackup uses them) 12 | if [ "x$PGPASSWORD" = "x" ]; then 13 | export PGPASSWORD=$POSTGRES_PASSWORD 14 | fi 15 | 16 | # Based on official postgres package's entrypoint script (https://hub.docker.com/_/postgres/) 17 | # Modified to be able to set up a slave. The docker-entrypoint-initdb.d hook provided is inadequate. 18 | 19 | set -e 20 | 21 | if [ "${1:0:1}" = '-' ]; then 22 | set -- postgres "$@" 23 | fi 24 | 25 | if [ "$1" = 'postgres' ]; then 26 | mkdir -p "$PGDATA" 27 | chmod 700 "$PGDATA" 28 | chown -R postgres "$PGDATA" 29 | 30 | mkdir -p /run/postgresql 31 | chmod g+s /run/postgresql 32 | chown -R postgres /run/postgresql 33 | 34 | # look specifically for PG_VERSION, as it is expected in the DB dir 35 | if [ ! -s "$PGDATA/PG_VERSION" ]; then 36 | if [ "x$REPLICATE_FROM" == "x" ]; then 37 | eval "gosu postgres initdb $POSTGRES_INITDB_ARGS" 38 | else 39 | until /bin/ping -c 1 -W 1 ${REPLICATE_FROM} 40 | do 41 | echo "Waiting for master to ping..." 42 | sleep 1s 43 | done 44 | until gosu postgres pg_basebackup -h ${REPLICATE_FROM} -D ${PGDATA} -U ${POSTGRES_USER} -vP -w 45 | do 46 | echo "Waiting for master to connect..." 47 | sleep 1s 48 | done 49 | fi 50 | 51 | # check password first so we can output the warning before postgres 52 | # messes it up 53 | if [ ! -z "$POSTGRES_PASSWORD" ]; then 54 | pass="PASSWORD '$POSTGRES_PASSWORD'" 55 | authMethod=md5 56 | else 57 | # The - option suppresses leading tabs but *not* spaces. :) 58 | cat >&2 <<-'EOWARN' 59 | **************************************************** 60 | WARNING: No password has been set for the database. 61 | This will allow anyone with access to the 62 | Postgres port to access your database. In 63 | Docker's default configuration, this is 64 | effectively any other container on the same 65 | system. 66 | 67 | Use "-e POSTGRES_PASSWORD=password" to set 68 | it in "docker run". 69 | **************************************************** 70 | EOWARN 71 | 72 | pass= 73 | authMethod=trust 74 | fi 75 | 76 | if [ "x$REPLICATE_FROM" == "x" ]; then 77 | 78 | { echo; echo "host replication all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null 79 | { echo; echo "host all all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null 80 | 81 | # internal start of server in order to allow set-up using psql-client 82 | # does not listen on external TCP/IP and waits until start finishes 83 | gosu postgres pg_ctl -D "$PGDATA" \ 84 | -o "-c listen_addresses='localhost'" \ 85 | -w start 86 | 87 | : ${POSTGRES_USER:=postgres} 88 | : ${POSTGRES_DB:=$POSTGRES_USER} 89 | export POSTGRES_USER POSTGRES_DB 90 | 91 | psql=( "psql" "-v" "ON_ERROR_STOP=1" ) 92 | 93 | if [ "$POSTGRES_DB" != 'postgres' ]; then 94 | "${psql[@]}" --username postgres <<-EOSQL 95 | CREATE DATABASE "$POSTGRES_DB" ; 96 | EOSQL 97 | echo 98 | fi 99 | 100 | if [ "$POSTGRES_USER" = 'postgres' ]; then 101 | op='ALTER' 102 | else 103 | op='CREATE' 104 | fi 105 | "${psql[@]}" --username postgres <<-EOSQL 106 | $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ; 107 | EOSQL 108 | echo 109 | 110 | fi 111 | 112 | psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" ) 113 | 114 | echo 115 | for f in /docker-entrypoint-initdb.d/*; do 116 | case "$f" in 117 | *.sh) echo "$0: running $f"; . "$f" ;; 118 | *.sql) echo "$0: running $f"; "${psql[@]}" < "$f"; echo ;; 119 | *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;; 120 | *) echo "$0: ignoring $f" ;; 121 | esac 122 | echo 123 | done 124 | 125 | if [ "x$REPLICATE_FROM" == "x" ]; then 126 | gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop 127 | fi 128 | 129 | echo 130 | echo 'PostgreSQL init process complete; ready for start up.' 131 | echo 132 | fi 133 | 134 | # We need this health check so we know when it's started up. 135 | touch /tmp/.postgres_init_complete 136 | 137 | exec gosu postgres "$@" 138 | fi 139 | 140 | exec "$@" 141 | -------------------------------------------------------------------------------- /queries-mr.yaml: -------------------------------------------------------------------------------- 1 | #### Queries are commented due to PMM-8859 2 | pg_replication: 3 | query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag" 4 | master: true 5 | metrics: 6 | - lag: 7 | usage: "GAUGE" 8 | description: "Replication lag behind master in seconds" 9 | 10 | pg_postmaster: 11 | query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()" 12 | master: true 13 | metrics: 14 | - start_time_seconds: 15 | usage: "GAUGE" 16 | description: "Time at which postmaster started" 17 | 18 | pg_database: 19 | query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size_bytes FROM pg_database" 20 | master: true 21 | cache_seconds: 30 22 | metrics: 23 | - datname: 24 | usage: "LABEL" 25 | description: "Name of the database" 26 | - size_bytes: 27 | usage: "GAUGE" 28 | description: "Disk space used by the database" 29 | #### 30 | #pg_stat_statements: 31 | # query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin'" 32 | # master: true 33 | # metrics: 34 | # - rolname: 35 | # usage: "LABEL" 36 | # description: "Name of user" 37 | # - datname: 38 | # usage: "LABEL" 39 | # description: "Name of database" 40 | # - queryid: 41 | # usage: "LABEL" 42 | # description: "Query ID" 43 | # - calls: 44 | # usage: "COUNTER" 45 | # description: "Number of times executed" 46 | # - total_time_seconds: 47 | # usage: "COUNTER" 48 | # description: "Total time spent in the statement, in milliseconds" 49 | # - min_time_seconds: 50 | # usage: "GAUGE" 51 | # description: "Minimum time spent in the statement, in milliseconds" 52 | # - max_time_seconds: 53 | # usage: "GAUGE" 54 | # description: "Maximum time spent in the statement, in milliseconds" 55 | # - mean_time_seconds: 56 | # usage: "GAUGE" 57 | # description: "Mean time spent in the statement, in milliseconds" 58 | # - stddev_time_seconds: 59 | # usage: "GAUGE" 60 | # description: "Population standard deviation of time spent in the statement, in milliseconds" 61 | # - rows: 62 | # usage: "COUNTER" 63 | # description: "Total number of rows retrieved or affected by the statement" 64 | # - shared_blks_hit: 65 | # usage: "COUNTER" 66 | # description: "Total number of shared block cache hits by the statement" 67 | # - shared_blks_read: 68 | # usage: "COUNTER" 69 | # description: "Total number of shared blocks read by the statement" 70 | # - shared_blks_dirtied: 71 | # usage: "COUNTER" 72 | # description: "Total number of shared blocks dirtied by the statement" 73 | # - shared_blks_written: 74 | # usage: "COUNTER" 75 | # description: "Total number of shared blocks written by the statement" 76 | # - local_blks_hit: 77 | # usage: "COUNTER" 78 | # description: "Total number of local block cache hits by the statement" 79 | # - local_blks_read: 80 | # usage: "COUNTER" 81 | # description: "Total number of local blocks read by the statement" 82 | # - local_blks_dirtied: 83 | # usage: "COUNTER" 84 | # description: "Total number of local blocks dirtied by the statement" 85 | # - local_blks_written: 86 | # usage: "COUNTER" 87 | # description: "Total number of local blocks written by the statement" 88 | # - temp_blks_read: 89 | # usage: "COUNTER" 90 | # description: "Total number of temp blocks read by the statement" 91 | # - temp_blks_written: 92 | # usage: "COUNTER" 93 | # description: "Total number of temp blocks written by the statement" 94 | # - blk_read_time_seconds: 95 | # usage: "COUNTER" 96 | # description: "Total time the statement spent reading blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)" 97 | # - blk_write_time_seconds: 98 | # usage: "COUNTER" 99 | # description: "Total time the statement spent writing blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)" 100 | -------------------------------------------------------------------------------- /queries-lr.yaml: -------------------------------------------------------------------------------- 1 | pg_stat_user_tables: 2 | query: | 3 | SELECT 4 | current_database() datname, 5 | schemaname, 6 | relname, 7 | seq_scan, 8 | seq_tup_read, 9 | idx_scan, 10 | idx_tup_fetch, 11 | n_tup_ins, 12 | n_tup_upd, 13 | n_tup_del, 14 | n_tup_hot_upd, 15 | n_live_tup, 16 | n_dead_tup, 17 | n_mod_since_analyze, 18 | COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, 19 | COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, 20 | COALESCE(last_analyze, '1970-01-01Z') as last_analyze, 21 | COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, 22 | vacuum_count, 23 | autovacuum_count, 24 | analyze_count, 25 | autoanalyze_count 26 | FROM 27 | pg_stat_user_tables 28 | metrics: 29 | - datname: 30 | usage: "LABEL" 31 | description: "Name of current database" 32 | - schemaname: 33 | usage: "LABEL" 34 | description: "Name of the schema that this table is in" 35 | - relname: 36 | usage: "LABEL" 37 | description: "Name of this table" 38 | - seq_scan: 39 | usage: "COUNTER" 40 | description: "Number of sequential scans initiated on this table" 41 | - seq_tup_read: 42 | usage: "COUNTER" 43 | description: "Number of live rows fetched by sequential scans" 44 | - idx_scan: 45 | usage: "COUNTER" 46 | description: "Number of index scans initiated on this table" 47 | - idx_tup_fetch: 48 | usage: "COUNTER" 49 | description: "Number of live rows fetched by index scans" 50 | - n_tup_ins: 51 | usage: "COUNTER" 52 | description: "Number of rows inserted" 53 | - n_tup_upd: 54 | usage: "COUNTER" 55 | description: "Number of rows updated" 56 | - n_tup_del: 57 | usage: "COUNTER" 58 | description: "Number of rows deleted" 59 | - n_tup_hot_upd: 60 | usage: "COUNTER" 61 | description: "Number of rows HOT updated (i.e., with no separate index update required)" 62 | - n_live_tup: 63 | usage: "GAUGE" 64 | description: "Estimated number of live rows" 65 | - n_dead_tup: 66 | usage: "GAUGE" 67 | description: "Estimated number of dead rows" 68 | - n_mod_since_analyze: 69 | usage: "GAUGE" 70 | description: "Estimated number of rows changed since last analyze" 71 | - last_vacuum: 72 | usage: "GAUGE" 73 | description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)" 74 | - last_autovacuum: 75 | usage: "GAUGE" 76 | description: "Last time at which this table was vacuumed by the autovacuum daemon" 77 | - last_analyze: 78 | usage: "GAUGE" 79 | description: "Last time at which this table was manually analyzed" 80 | - last_autoanalyze: 81 | usage: "GAUGE" 82 | description: "Last time at which this table was analyzed by the autovacuum daemon" 83 | - vacuum_count: 84 | usage: "COUNTER" 85 | description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)" 86 | - autovacuum_count: 87 | usage: "COUNTER" 88 | description: "Number of times this table has been vacuumed by the autovacuum daemon" 89 | - analyze_count: 90 | usage: "COUNTER" 91 | description: "Number of times this table has been manually analyzed" 92 | - autoanalyze_count: 93 | usage: "COUNTER" 94 | description: "Number of times this table has been analyzed by the autovacuum daemon" 95 | 96 | pg_statio_user_tables: 97 | query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables" 98 | metrics: 99 | - datname: 100 | usage: "LABEL" 101 | description: "Name of current database" 102 | - schemaname: 103 | usage: "LABEL" 104 | description: "Name of the schema that this table is in" 105 | - relname: 106 | usage: "LABEL" 107 | description: "Name of this table" 108 | - heap_blks_read: 109 | usage: "COUNTER" 110 | description: "Number of disk blocks read from this table" 111 | - heap_blks_hit: 112 | usage: "COUNTER" 113 | description: "Number of buffer hits in this table" 114 | - idx_blks_read: 115 | usage: "COUNTER" 116 | description: "Number of disk blocks read from all indexes on this table" 117 | - idx_blks_hit: 118 | usage: "COUNTER" 119 | description: "Number of buffer hits in all indexes on this table" 120 | - toast_blks_read: 121 | usage: "COUNTER" 122 | description: "Number of disk blocks read from this table's TOAST table (if any)" 123 | - toast_blks_hit: 124 | usage: "COUNTER" 125 | description: "Number of buffer hits in this table's TOAST table (if any)" 126 | - tidx_blks_read: 127 | usage: "COUNTER" 128 | description: "Number of disk blocks read from this table's TOAST table indexes (if any)" 129 | - tidx_blks_hit: 130 | usage: "COUNTER" 131 | description: "Number of buffer hits in this table's TOAST table indexes (if any)" 132 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/postgres_exporter_integration_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // These are specialized integration tests. We only build them when we're doing 15 | // a lot of additional work to keep the external docker environment they require 16 | // working. 17 | //go:build integration 18 | 19 | package main 20 | 21 | import ( 22 | "fmt" 23 | "os" 24 | "strings" 25 | "testing" 26 | 27 | _ "github.com/lib/pq" 28 | "github.com/prometheus/client_golang/prometheus" 29 | . "gopkg.in/check.v1" 30 | ) 31 | 32 | // Hook up gocheck into the "go test" runner. 33 | func Test(t *testing.T) { TestingT(t) } 34 | 35 | type IntegrationSuite struct { 36 | e *Exporter 37 | } 38 | 39 | var _ = Suite(&IntegrationSuite{}) 40 | 41 | func (s *IntegrationSuite) SetUpSuite(c *C) { 42 | dsn := os.Getenv("DATA_SOURCE_NAME") 43 | c.Assert(dsn, Not(Equals), "") 44 | 45 | exporter := NewExporter(strings.Split(dsn, ",")) 46 | c.Assert(exporter, NotNil) 47 | // Assign the exporter to the suite 48 | s.e = exporter 49 | 50 | prometheus.MustRegister(exporter) 51 | } 52 | 53 | // TODO: it would be nice if cu didn't mostly just recreate the scrape function 54 | func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) { 55 | // Setup a dummy channel to consume metrics 56 | ch := make(chan prometheus.Metric, 100) 57 | go func() { 58 | for range ch { 59 | } 60 | }() 61 | 62 | for _, dsn := range s.e.dsn { 63 | // Open a database connection 64 | server, err := NewServer(dsn) 65 | c.Assert(server, NotNil) 66 | c.Assert(err, IsNil) 67 | 68 | // Do a version update 69 | err = s.e.checkMapVersions(ch, server) 70 | c.Assert(err, IsNil) 71 | 72 | err = querySettings(ch, server) 73 | if !c.Check(err, Equals, nil) { 74 | fmt.Println("## ERRORS FOUND") 75 | fmt.Println(err) 76 | } 77 | 78 | // This should never happen in our test cases. 79 | errMap := queryNamespaceMappings(ch, server) 80 | if !c.Check(len(errMap), Equals, 0) { 81 | fmt.Println("## NAMESPACE ERRORS FOUND") 82 | for namespace, err := range errMap { 83 | fmt.Println(namespace, ":", err) 84 | } 85 | } 86 | server.Close() 87 | } 88 | } 89 | 90 | // TestInvalidDsnDoesntCrash tests that specifying an invalid DSN doesn't crash 91 | // the exporter. Related to https://github.com/prometheus-community/postgres_exporter/issues/93 92 | // although not a replication of the scenario. 93 | func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) { 94 | // Setup a dummy channel to consume metrics 95 | ch := make(chan prometheus.Metric, 100) 96 | go func() { 97 | for range ch { 98 | } 99 | }() 100 | 101 | // Send a bad DSN 102 | exporter := NewExporter([]string{"invalid dsn"}) 103 | c.Assert(exporter, NotNil) 104 | exporter.scrape(ch) 105 | 106 | // Send a DSN to a non-listening port. 107 | exporter = NewExporter([]string{"postgresql://nothing:nothing@127.0.0.1:1/nothing"}) 108 | c.Assert(exporter, NotNil) 109 | exporter.scrape(ch) 110 | } 111 | 112 | // TestUnknownMetricParsingDoesntCrash deliberately deletes all the column maps out 113 | // of an exporter to test that the default metric handling code can cope with unknown columns. 114 | func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) { 115 | // Setup a dummy channel to consume metrics 116 | ch := make(chan prometheus.Metric, 100) 117 | go func() { 118 | for range ch { 119 | } 120 | }() 121 | 122 | dsn := os.Getenv("DATA_SOURCE_NAME") 123 | c.Assert(dsn, Not(Equals), "") 124 | 125 | exporter := NewExporter(strings.Split(dsn, ",")) 126 | c.Assert(exporter, NotNil) 127 | 128 | // Convert the default maps into a list of empty maps. 129 | emptyMaps := make(map[string]intermediateMetricMap, 0) 130 | for k := range exporter.builtinMetricMaps { 131 | emptyMaps[k] = intermediateMetricMap{ 132 | map[string]ColumnMapping{}, 133 | true, 134 | 0, 135 | } 136 | } 137 | exporter.builtinMetricMaps = emptyMaps 138 | 139 | // scrape the exporter and make sure it works 140 | exporter.scrape(ch) 141 | } 142 | 143 | // TestExtendQueriesDoesntCrash tests that specifying extend.query-path doesn't 144 | // crash. 145 | func (s *IntegrationSuite) TestExtendQueriesDoesntCrash(c *C) { 146 | // Setup a dummy channel to consume metrics 147 | ch := make(chan prometheus.Metric, 100) 148 | go func() { 149 | for range ch { 150 | } 151 | }() 152 | 153 | dsn := os.Getenv("DATA_SOURCE_NAME") 154 | c.Assert(dsn, Not(Equals), "") 155 | 156 | exporter := NewExporter( 157 | strings.Split(dsn, ","), 158 | WithUserQueriesPath(map[MetricResolution]string{ 159 | HR: "../user_queries_test.yaml", 160 | MR: "../user_queries_test.yaml", 161 | LR: "../user_queries_test.yaml", 162 | }), 163 | ) 164 | c.Assert(exporter, NotNil) 165 | 166 | // scrape the exporter and make sure it works 167 | exporter.scrape(ch) 168 | } 169 | 170 | func (s *IntegrationSuite) TestAutoDiscoverDatabases(c *C) { 171 | dsn := os.Getenv("DATA_SOURCE_NAME") 172 | 173 | exporter := NewExporter( 174 | strings.Split(dsn, ","), 175 | ) 176 | c.Assert(exporter, NotNil) 177 | 178 | dsns := exporter.discoverDatabaseDSNs() 179 | 180 | c.Assert(len(dsns), Equals, 2) 181 | } 182 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/pg_setting.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "fmt" 18 | "math" 19 | "strconv" 20 | "strings" 21 | 22 | "github.com/go-kit/log/level" 23 | "github.com/prometheus/client_golang/prometheus" 24 | ) 25 | 26 | var ( 27 | settingUnits = []string{ 28 | "ms", "s", "min", "h", "d", 29 | "B", "kB", "MB", "GB", "TB", 30 | } 31 | ) 32 | 33 | // Query the pg_settings view containing runtime variables 34 | func querySettings(ch chan<- prometheus.Metric, server *Server) error { 35 | level.Debug(logger).Log("msg", "Querying pg_setting view", "server", server) 36 | 37 | // pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html 38 | // 39 | // NOTE: If you add more vartypes here, you must update the supported 40 | // types in normaliseUnit() below 41 | query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real') AND name != 'sync_commit_cancel_wait';" 42 | 43 | rows, err := server.db.Query(query) 44 | if err != nil { 45 | return fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) 46 | } 47 | defer rows.Close() // nolint: errcheck 48 | 49 | for rows.Next() { 50 | s := &pgSetting{} 51 | err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype) 52 | if err != nil { 53 | return fmt.Errorf("Error retrieving rows on %q: %s %v", server, namespace, err) 54 | } 55 | 56 | ch <- s.metric(server.labels) 57 | } 58 | 59 | return nil 60 | } 61 | 62 | // pgSetting is represents a PostgreSQL runtime variable as returned by the 63 | // pg_settings view. 64 | type pgSetting struct { 65 | name, setting, unit, shortDesc, vartype string 66 | } 67 | 68 | func (s *pgSetting) metric(labels prometheus.Labels) prometheus.Metric { 69 | var ( 70 | err error 71 | name = strings.ReplaceAll(s.name, ".", "_") 72 | unit = s.unit // nolint: ineffassign 73 | shortDesc = fmt.Sprintf("Server Parameter: %s", s.name) 74 | subsystem = "settings" 75 | val float64 76 | ) 77 | 78 | switch s.vartype { 79 | case "bool": 80 | if s.setting == "on" { 81 | val = 1 82 | } 83 | case "integer", "real": 84 | if val, unit, err = s.normaliseUnit(); err != nil { 85 | // Panic, since we should recognise all units 86 | // and don't want to silently exlude metrics 87 | panic(err) 88 | } 89 | 90 | if len(unit) > 0 { 91 | name = fmt.Sprintf("%s_%s", name, unit) 92 | shortDesc = fmt.Sprintf("%s [Units converted to %s.]", shortDesc, unit) 93 | } 94 | default: 95 | // Panic because we got a type we didn't ask for 96 | panic(fmt.Sprintf("Unsupported vartype %q", s.vartype)) 97 | } 98 | 99 | desc := newDesc(subsystem, name, shortDesc, labels) 100 | return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val) 101 | } 102 | 103 | // Removes units from any of the setting values. 104 | // This is mostly because of a irregularity regarding AWS RDS Aurora 105 | // https://github.com/prometheus-community/postgres_exporter/issues/619 106 | func (s *pgSetting) sanitizeValue() { 107 | for _, unit := range settingUnits { 108 | if strings.HasSuffix(s.setting, unit) { 109 | endPos := len(s.setting) - len(unit) - 1 110 | s.setting = s.setting[:endPos] 111 | return 112 | } 113 | } 114 | } 115 | 116 | // TODO: fix linter override 117 | // nolint: nakedret 118 | func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) { 119 | s.sanitizeValue() 120 | 121 | val, err = strconv.ParseFloat(s.setting, 64) 122 | if err != nil { 123 | return val, unit, fmt.Errorf("Error converting setting %q value %q to float: %s", s.name, s.setting, err) 124 | } 125 | 126 | // Units defined in: https://www.postgresql.org/docs/current/static/config-setting.html 127 | switch s.unit { 128 | case "": 129 | return 130 | case "ms", "s", "min", "h", "d": 131 | unit = "seconds" 132 | case "B", "kB", "MB", "GB", "TB", "1kB", "2kB", "4kB", "8kB", "16kB", "32kB", "64kB", "16MB", "32MB", "64MB": 133 | unit = "bytes" 134 | default: 135 | err = fmt.Errorf("unknown unit for runtime variable: %q", s.unit) 136 | return 137 | } 138 | 139 | // -1 is special, don't modify the value 140 | if val == -1 { 141 | return 142 | } 143 | 144 | switch s.unit { 145 | case "ms": 146 | val /= 1000 147 | case "min": 148 | val *= 60 149 | case "h": 150 | val *= 60 * 60 151 | case "d": 152 | val *= 60 * 60 * 24 153 | case "kB": 154 | val *= math.Pow(2, 10) 155 | case "MB": 156 | val *= math.Pow(2, 20) 157 | case "GB": 158 | val *= math.Pow(2, 30) 159 | case "TB": 160 | val *= math.Pow(2, 40) 161 | case "1kB": 162 | val *= math.Pow(2, 10) 163 | case "2kB": 164 | val *= math.Pow(2, 11) 165 | case "4kB": 166 | val *= math.Pow(2, 12) 167 | case "8kB": 168 | val *= math.Pow(2, 13) 169 | case "16kB": 170 | val *= math.Pow(2, 14) 171 | case "32kB": 172 | val *= math.Pow(2, 15) 173 | case "64kB": 174 | val *= math.Pow(2, 16) 175 | case "16MB": 176 | val *= math.Pow(2, 24) 177 | case "32MB": 178 | val *= math.Pow(2, 25) 179 | case "64MB": 180 | val *= math.Pow(2, 26) 181 | } 182 | 183 | return 184 | } 185 | -------------------------------------------------------------------------------- /collector/pg_stat_bgwriter_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | "time" 19 | 20 | "github.com/DATA-DOG/go-sqlmock" 21 | "github.com/prometheus/client_golang/prometheus" 22 | dto "github.com/prometheus/client_model/go" 23 | "github.com/smartystreets/goconvey/convey" 24 | ) 25 | 26 | func TestPGStatBGWriterCollector(t *testing.T) { 27 | db, mock, err := sqlmock.New() 28 | if err != nil { 29 | t.Fatalf("Error opening a stub db connection: %s", err) 30 | } 31 | defer db.Close() 32 | 33 | inst := &instance{db: db, name: "test"} 34 | 35 | columns := []string{ 36 | "checkpoints_timed", 37 | "checkpoints_req", 38 | "checkpoint_write_time", 39 | "checkpoint_sync_time", 40 | "buffers_checkpoint", 41 | "buffers_clean", 42 | "maxwritten_clean", 43 | "buffers_backend", 44 | "buffers_backend_fsync", 45 | "buffers_alloc", 46 | "stats_reset"} 47 | 48 | srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") 49 | if err != nil { 50 | t.Fatalf("Error parsing time: %s", err) 51 | } 52 | 53 | rows := sqlmock.NewRows(columns). 54 | AddRow(354, 4945, 289097744, 1242257, int64(3275602074), 89320867, 450139, 2034563757, 0, int64(2725688749), srT) 55 | mock.ExpectQuery(sanitizeQuery(statBGWriterQueryPrePG17)).WillReturnRows(rows) 56 | 57 | ch := make(chan prometheus.Metric) 58 | go func() { 59 | defer close(ch) 60 | c := PGStatBGWriterCollector{} 61 | 62 | if err := c.Update(context.Background(), inst, ch); err != nil { 63 | t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err) 64 | } 65 | }() 66 | 67 | labels := labelMap{"collector": "exporter", "server": "test"} 68 | expected := []MetricResult{ 69 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 354}, 70 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 4945}, 71 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 289097744}, 72 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 1242257}, 73 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 3275602074}, 74 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 89320867}, 75 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 450139}, 76 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 2034563757}, 77 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 78 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 2725688749}, 79 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 1685059842}, 80 | } 81 | 82 | convey.Convey("Metrics comparison", t, func() { 83 | for _, expect := range expected { 84 | m := readMetric(<-ch) 85 | convey.So(expect, convey.ShouldResemble, m) 86 | } 87 | }) 88 | if err := mock.ExpectationsWereMet(); err != nil { 89 | t.Errorf("there were unfulfilled exceptions: %s", err) 90 | } 91 | } 92 | 93 | func TestPGStatBGWriterCollectorNullValues(t *testing.T) { 94 | db, mock, err := sqlmock.New() 95 | if err != nil { 96 | t.Fatalf("Error opening a stub db connection: %s", err) 97 | } 98 | defer db.Close() 99 | 100 | inst := &instance{db: db, name: "test"} 101 | 102 | columns := []string{ 103 | "checkpoints_timed", 104 | "checkpoints_req", 105 | "checkpoint_write_time", 106 | "checkpoint_sync_time", 107 | "buffers_checkpoint", 108 | "buffers_clean", 109 | "maxwritten_clean", 110 | "buffers_backend", 111 | "buffers_backend_fsync", 112 | "buffers_alloc", 113 | "stats_reset"} 114 | 115 | rows := sqlmock.NewRows(columns). 116 | AddRow(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) 117 | mock.ExpectQuery(sanitizeQuery(statBGWriterQueryPrePG17)).WillReturnRows(rows) 118 | 119 | ch := make(chan prometheus.Metric) 120 | go func() { 121 | defer close(ch) 122 | c := PGStatBGWriterCollector{} 123 | 124 | if err := c.Update(context.Background(), inst, ch); err != nil { 125 | t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err) 126 | } 127 | }() 128 | 129 | labels := labelMap{"collector": "exporter", "server": "test"} 130 | expected := []MetricResult{ 131 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 132 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 133 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 134 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 135 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 136 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 137 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 138 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 139 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 140 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 141 | {labels: labels, metricType: dto.MetricType_COUNTER, value: 0}, 142 | } 143 | 144 | convey.Convey("Metrics comparison", t, func() { 145 | for _, expect := range expected { 146 | m := readMetric(<-ch) 147 | convey.So(expect, convey.ShouldResemble, m) 148 | } 149 | }) 150 | if err := mock.ExpectationsWereMet(); err != nil { 151 | t.Errorf("there were unfulfilled exceptions: %s", err) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/server.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "database/sql" 18 | "fmt" 19 | "sync" 20 | "time" 21 | 22 | "github.com/blang/semver/v4" 23 | "github.com/go-kit/log/level" 24 | "github.com/prometheus/client_golang/prometheus" 25 | ) 26 | 27 | // Server describes a connection to Postgres. 28 | // Also it contains metrics map and query overrides. 29 | type Server struct { 30 | db *sql.DB 31 | labels prometheus.Labels 32 | master bool 33 | runonserver string 34 | 35 | // Last version used to calculate metric map. If mismatch on scrape, 36 | // then maps are recalculated. 37 | lastMapVersion semver.Version 38 | // Currently active metric map 39 | metricMap map[string]MetricMapNamespace 40 | // Currently active query overrides 41 | queryOverrides map[string]string 42 | mappingMtx sync.RWMutex 43 | // Currently cached metrics 44 | metricCache map[string]cachedMetrics 45 | cacheMtx sync.Mutex 46 | } 47 | 48 | // ServerOpt configures a server. 49 | type ServerOpt func(*Server) 50 | 51 | // ServerWithLabels configures a set of labels. 52 | func ServerWithLabels(labels prometheus.Labels) ServerOpt { 53 | return func(s *Server) { 54 | for k, v := range labels { 55 | s.labels[k] = v 56 | } 57 | s.labels["collector"] = "exporter" 58 | } 59 | } 60 | 61 | // NewServer establishes a new connection using DSN. 62 | func NewServer(dsn string, opts ...ServerOpt) (*Server, error) { 63 | fingerprint, err := parseFingerprint(dsn) 64 | if err != nil { 65 | return nil, err 66 | } 67 | 68 | db, err := sql.Open("postgres", dsn) 69 | if err != nil { 70 | return nil, err 71 | } 72 | db.SetMaxOpenConns(1) 73 | db.SetMaxIdleConns(1) 74 | 75 | level.Debug(logger).Log("msg", "Established new database connection", "fingerprint", fingerprint) 76 | 77 | s := &Server{ 78 | db: db, 79 | master: false, 80 | labels: prometheus.Labels{ 81 | serverLabelName: fingerprint, 82 | }, 83 | metricCache: make(map[string]cachedMetrics), 84 | } 85 | 86 | for _, opt := range opts { 87 | opt(s) 88 | } 89 | 90 | return s, nil 91 | } 92 | 93 | // Close disconnects from Postgres. 94 | func (s *Server) Close() error { 95 | return s.db.Close() 96 | } 97 | 98 | // Ping checks connection availability and possibly invalidates the connection if it fails. 99 | func (s *Server) Ping() error { 100 | if err := s.db.Ping(); err != nil { 101 | if cerr := s.Close(); cerr != nil { 102 | level.Error(logger).Log("msg", "Error while closing non-pinging DB connection", "server", s, "err", cerr) 103 | } 104 | return err 105 | } 106 | return nil 107 | } 108 | 109 | // String returns server's fingerprint. 110 | func (s *Server) String() string { 111 | return s.labels[serverLabelName] 112 | } 113 | 114 | // Scrape loads metrics. 115 | func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool) error { 116 | s.mappingMtx.RLock() 117 | defer s.mappingMtx.RUnlock() 118 | 119 | var err error 120 | 121 | if !disableSettingsMetrics && s.master { 122 | if err = querySettings(ch, s); err != nil { 123 | err = fmt.Errorf("error retrieving settings: %s", err) 124 | } 125 | } 126 | 127 | errMap := queryNamespaceMappings(ch, s) 128 | if len(errMap) > 0 { 129 | err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap)) 130 | level.Error(logger).Log("msg", "NAMESPACE ERRORS FOUND") 131 | for namespace, err := range errMap { 132 | level.Error(logger).Log("namespace", namespace, "msg", err) 133 | } 134 | } 135 | 136 | return err 137 | } 138 | 139 | // Servers contains a collection of servers to Postgres. 140 | type Servers struct { 141 | m sync.Mutex 142 | servers map[string]*Server 143 | opts []ServerOpt 144 | } 145 | 146 | // NewServers creates a collection of servers to Postgres. 147 | func NewServers(opts ...ServerOpt) *Servers { 148 | return &Servers{ 149 | servers: make(map[string]*Server), 150 | opts: opts, 151 | } 152 | } 153 | 154 | // GetServer returns established connection from a collection. 155 | func (s *Servers) GetServer(dsn string) (*Server, error) { 156 | s.m.Lock() 157 | defer s.m.Unlock() 158 | var err error 159 | var ok bool 160 | errCount := 0 // start at zero because we increment before doing work 161 | retries := 1 162 | var server *Server 163 | for { 164 | if errCount++; errCount > retries { 165 | return nil, err 166 | } 167 | server, ok = s.servers[dsn] 168 | if !ok { 169 | server, err = NewServer(dsn, s.opts...) 170 | if err != nil { 171 | time.Sleep(time.Duration(errCount) * time.Second) 172 | continue 173 | } 174 | s.servers[dsn] = server 175 | } 176 | if err = server.Ping(); err != nil { 177 | server.Close() 178 | delete(s.servers, dsn) 179 | time.Sleep(time.Duration(errCount) * time.Second) 180 | continue 181 | } 182 | break 183 | } 184 | return server, nil 185 | } 186 | 187 | // Close disconnects from all known servers. 188 | func (s *Servers) Close() { 189 | s.m.Lock() 190 | defer s.m.Unlock() 191 | for _, server := range s.servers { 192 | if err := server.Close(); err != nil { 193 | level.Error(logger).Log("msg", "Failed to close connection", "server", server, "err", err) 194 | } 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /cmd/postgres_exporter/util.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "fmt" 18 | "math" 19 | "net/url" 20 | "strconv" 21 | "strings" 22 | "time" 23 | 24 | "github.com/go-kit/log/level" 25 | "github.com/lib/pq" 26 | ) 27 | 28 | func contains(a []string, x string) bool { 29 | for _, n := range a { 30 | if x == n { 31 | return true 32 | } 33 | } 34 | return false 35 | } 36 | 37 | // convert a string to the corresponding ColumnUsage 38 | func stringToColumnUsage(s string) (ColumnUsage, error) { 39 | var u ColumnUsage 40 | var err error 41 | switch s { 42 | case "DISCARD": 43 | u = DISCARD 44 | 45 | case "LABEL": 46 | u = LABEL 47 | 48 | case "COUNTER": 49 | u = COUNTER 50 | 51 | case "GAUGE": 52 | u = GAUGE 53 | 54 | case "HISTOGRAM": 55 | u = HISTOGRAM 56 | 57 | case "MAPPEDMETRIC": 58 | u = MAPPEDMETRIC 59 | 60 | case "DURATION": 61 | u = DURATION 62 | 63 | default: 64 | err = fmt.Errorf("wrong ColumnUsage given : %s", s) 65 | } 66 | 67 | return u, err 68 | } 69 | 70 | // Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte 71 | // types are mapped as NaN and !ok 72 | func dbToFloat64(t interface{}) (float64, bool) { 73 | switch v := t.(type) { 74 | case int64: 75 | return float64(v), true 76 | case float64: 77 | return v, true 78 | case time.Time: 79 | return float64(v.Unix()), true 80 | case []byte: 81 | // Try and convert to string and then parse to a float64 82 | strV := string(v) 83 | result, err := strconv.ParseFloat(strV, 64) 84 | if err != nil { 85 | level.Info(logger).Log("msg", "Could not parse []byte", "err", err) 86 | return math.NaN(), false 87 | } 88 | return result, true 89 | case string: 90 | result, err := strconv.ParseFloat(v, 64) 91 | if err != nil { 92 | level.Info(logger).Log("msg", "Could not parse string", "err", err) 93 | return math.NaN(), false 94 | } 95 | return result, true 96 | case bool: 97 | if v { 98 | return 1.0, true 99 | } 100 | return 0.0, true 101 | case nil: 102 | return math.NaN(), true 103 | default: 104 | return math.NaN(), false 105 | } 106 | } 107 | 108 | // Convert database.sql types to uint64 for Prometheus consumption. Null types are mapped to 0. string and []byte 109 | // types are mapped as 0 and !ok 110 | func dbToUint64(t interface{}) (uint64, bool) { 111 | switch v := t.(type) { 112 | case uint64: 113 | return v, true 114 | case int64: 115 | return uint64(v), true 116 | case float64: 117 | return uint64(v), true 118 | case time.Time: 119 | return uint64(v.Unix()), true 120 | case []byte: 121 | // Try and convert to string and then parse to a uint64 122 | strV := string(v) 123 | result, err := strconv.ParseUint(strV, 10, 64) 124 | if err != nil { 125 | level.Info(logger).Log("msg", "Could not parse []byte", "err", err) 126 | return 0, false 127 | } 128 | return result, true 129 | case string: 130 | result, err := strconv.ParseUint(v, 10, 64) 131 | if err != nil { 132 | level.Info(logger).Log("msg", "Could not parse string", "err", err) 133 | return 0, false 134 | } 135 | return result, true 136 | case bool: 137 | if v { 138 | return 1, true 139 | } 140 | return 0, true 141 | case nil: 142 | return 0, true 143 | default: 144 | return 0, false 145 | } 146 | } 147 | 148 | // Convert database.sql to string for Prometheus labels. Null types are mapped to empty strings. 149 | func dbToString(t interface{}) (string, bool) { 150 | switch v := t.(type) { 151 | case int64: 152 | return fmt.Sprintf("%v", v), true 153 | case float64: 154 | return fmt.Sprintf("%v", v), true 155 | case time.Time: 156 | return fmt.Sprintf("%v", v.Unix()), true 157 | case nil: 158 | return "", true 159 | case []byte: 160 | // Try and convert to string 161 | return string(v), true 162 | case string: 163 | return v, true 164 | case bool: 165 | if v { 166 | return "true", true 167 | } 168 | return "false", true 169 | default: 170 | return "", false 171 | } 172 | } 173 | 174 | func parseFingerprint(url string) (string, error) { 175 | dsn, err := pq.ParseURL(url) 176 | if err != nil { 177 | dsn = url 178 | } 179 | 180 | pairs := strings.Split(dsn, " ") 181 | kv := make(map[string]string, len(pairs)) 182 | for _, pair := range pairs { 183 | splitted := strings.SplitN(pair, "=", 2) 184 | if len(splitted) != 2 { 185 | return "", fmt.Errorf("malformed dsn %q", dsn) 186 | } 187 | // Newer versions of pq.ParseURL quote values so trim them off if they exist 188 | key := strings.Trim(splitted[0], "'\"") 189 | value := strings.Trim(splitted[1], "'\"") 190 | kv[key] = value 191 | } 192 | 193 | var fingerprint string 194 | 195 | if host, ok := kv["host"]; ok { 196 | fingerprint += host 197 | } else { 198 | fingerprint += "localhost" 199 | } 200 | 201 | if port, ok := kv["port"]; ok { 202 | fingerprint += ":" + port 203 | } else { 204 | fingerprint += ":5432" 205 | } 206 | 207 | return fingerprint, nil 208 | } 209 | 210 | func loggableDSN(dsn string) string { 211 | pDSN, err := url.Parse(dsn) 212 | if err != nil { 213 | return "could not parse DATA_SOURCE_NAME" 214 | } 215 | // Blank user info if not nil 216 | if pDSN.User != nil { 217 | pDSN.User = url.UserPassword(pDSN.User.Username(), "PASSWORD_REMOVED") 218 | } 219 | 220 | return pDSN.String() 221 | } 222 | -------------------------------------------------------------------------------- /postgres_mixin/alerts/postgres.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | prometheusAlerts+:: { 3 | groups+: [ 4 | { 5 | name: 'PostgreSQL', 6 | rules: [ 7 | { 8 | alert: 'PostgreSQLMaxConnectionsReached', 9 | annotations: { 10 | description: '{{ $labels.instance }} is exceeding the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Services may be degraded - please take immediate action (you probably need to increase max_connections in the Docker image and re-deploy.', 11 | summary: '{{ $labels.instance }} has maxed out Postgres connections.', 12 | }, 13 | expr: ||| 14 | sum by (instance) (pg_stat_activity_count{%(postgresExporterSelector)s}) 15 | >= 16 | sum by (instance) (pg_settings_max_connections{%(postgresExporterSelector)s}) 17 | - 18 | sum by (instance) (pg_settings_superuser_reserved_connections{%(postgresExporterSelector)s}) 19 | ||| % $._config, 20 | 'for': '1m', 21 | labels: { 22 | severity: 'warning', 23 | }, 24 | }, 25 | { 26 | alert: 'PostgreSQLHighConnections', 27 | annotations: { 28 | description: '{{ $labels.instance }} is exceeding 80% of the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Please check utilization graphs and confirm if this is normal service growth, abuse or an otherwise temporary condition or if new resources need to be provisioned (or the limits increased, which is mostly likely).', 29 | summary: '{{ $labels.instance }} is over 80% of max Postgres connections.', 30 | }, 31 | expr: ||| 32 | sum by (instance) (pg_stat_activity_count{%(postgresExporterSelector)s}) 33 | > 34 | ( 35 | sum by (instance) (pg_settings_max_connections{%(postgresExporterSelector)s}) 36 | - 37 | sum by (instance) (pg_settings_superuser_reserved_connections{%(postgresExporterSelector)s}) 38 | ) * 0.8 39 | ||| % $._config, 40 | 'for': '10m', 41 | labels: { 42 | severity: 'warning', 43 | }, 44 | }, 45 | { 46 | alert: 'PostgreSQLDown', 47 | annotations: { 48 | description: '{{ $labels.instance }} is rejecting query requests from the exporter, and thus probably not allowing DNS requests to work either. User services should not be effected provided at least 1 node is still alive.', 49 | summary: 'PostgreSQL is not processing queries: {{ $labels.instance }}', 50 | }, 51 | expr: 'pg_up{%(postgresExporterSelector)s} != 1' % $._config, 52 | 'for': '1m', 53 | labels: { 54 | severity: 'warning', 55 | }, 56 | }, 57 | { 58 | alert: 'PostgreSQLSlowQueries', 59 | annotations: { 60 | description: 'PostgreSQL high number of slow queries {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }} ', 61 | summary: 'PostgreSQL high number of slow on {{ $labels.cluster }} for database {{ $labels.datname }} ', 62 | }, 63 | expr: ||| 64 | avg by (datname) ( 65 | rate ( 66 | pg_stat_activity_max_tx_duration{datname!~"template.*",%(postgresExporterSelector)s}[2m] 67 | ) 68 | ) > 2 * 60 69 | ||| % $._config, 70 | 'for': '2m', 71 | labels: { 72 | severity: 'warning', 73 | }, 74 | }, 75 | { 76 | alert: 'PostgreSQLQPS', 77 | annotations: { 78 | description: 'PostgreSQL high number of queries per second on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}', 79 | summary: 'PostgreSQL high number of queries per second {{ $labels.cluster }} for database {{ $labels.datname }}', 80 | }, 81 | expr: ||| 82 | avg by (datname) ( 83 | irate( 84 | pg_stat_database_xact_commit{datname!~"template.*",%(postgresExporterSelector)s}[5m] 85 | ) 86 | + 87 | irate( 88 | pg_stat_database_xact_rollback{datname!~"template.*",%(postgresExporterSelector)s}[5m] 89 | ) 90 | ) > 10000 91 | ||| % $._config, 92 | 'for': '5m', 93 | labels: { 94 | severity: 'warning', 95 | }, 96 | }, 97 | { 98 | alert: 'PostgreSQLCacheHitRatio', 99 | annotations: { 100 | description: 'PostgreSQL low on cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}', 101 | summary: 'PostgreSQL low cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }}', 102 | }, 103 | expr: ||| 104 | avg by (datname) ( 105 | rate(pg_stat_database_blks_hit{datname!~"template.*",%(postgresExporterSelector)s}[5m]) 106 | / 107 | ( 108 | rate( 109 | pg_stat_database_blks_hit{datname!~"template.*",%(postgresExporterSelector)s}[5m] 110 | ) 111 | + 112 | rate( 113 | pg_stat_database_blks_read{datname!~"template.*",%(postgresExporterSelector)s}[5m] 114 | ) 115 | ) 116 | ) < 0.98 117 | ||| % $._config, 118 | 'for': '5m', 119 | labels: { 120 | severity: 'warning', 121 | }, 122 | }, 123 | ], 124 | }, 125 | ], 126 | }, 127 | } 128 | -------------------------------------------------------------------------------- /collector/pg_statio_user_tables_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | package collector 14 | 15 | import ( 16 | "context" 17 | "testing" 18 | 19 | "github.com/DATA-DOG/go-sqlmock" 20 | "github.com/prometheus/client_golang/prometheus" 21 | dto "github.com/prometheus/client_model/go" 22 | "github.com/smartystreets/goconvey/convey" 23 | ) 24 | 25 | func TestPGStatIOUserTablesCollector(t *testing.T) { 26 | db, mock, err := sqlmock.New() 27 | if err != nil { 28 | t.Fatalf("Error opening a stub db connection: %s", err) 29 | } 30 | defer db.Close() 31 | 32 | inst := &instance{db: db} 33 | 34 | columns := []string{ 35 | "datname", 36 | "schemaname", 37 | "relname", 38 | "heap_blks_read", 39 | "heap_blks_hit", 40 | "idx_blks_read", 41 | "idx_blks_hit", 42 | "toast_blks_read", 43 | "toast_blks_hit", 44 | "tidx_blks_read", 45 | "tidx_blks_hit", 46 | } 47 | rows := sqlmock.NewRows(columns). 48 | AddRow("postgres", 49 | "public", 50 | "a_table", 51 | 1, 52 | 2, 53 | 3, 54 | 4, 55 | 5, 56 | 6, 57 | 7, 58 | 8) 59 | mock.ExpectQuery(sanitizeQuery(statioUserTablesQuery)).WillReturnRows(rows) 60 | ch := make(chan prometheus.Metric) 61 | go func() { 62 | defer close(ch) 63 | c := PGStatIOUserTablesCollector{} 64 | 65 | if err := c.Update(context.Background(), inst, ch); err != nil { 66 | t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err) 67 | } 68 | }() 69 | 70 | expected := []MetricResult{ 71 | {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 1}, 72 | {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 2}, 73 | {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 3}, 74 | {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 4}, 75 | {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 5}, 76 | {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 6}, 77 | {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 7}, 78 | {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 8}, 79 | } 80 | 81 | convey.Convey("Metrics comparison", t, func() { 82 | for _, expect := range expected { 83 | m := readMetric(<-ch) 84 | convey.So(expect, convey.ShouldResemble, m) 85 | } 86 | }) 87 | if err := mock.ExpectationsWereMet(); err != nil { 88 | t.Errorf("there were unfulfilled exceptions: %s", err) 89 | } 90 | } 91 | 92 | func TestPGStatIOUserTablesCollectorNullValues(t *testing.T) { 93 | db, mock, err := sqlmock.New() 94 | if err != nil { 95 | t.Fatalf("Error opening a stub db connection: %s", err) 96 | } 97 | defer db.Close() 98 | 99 | inst := &instance{db: db} 100 | 101 | columns := []string{ 102 | "datname", 103 | "schemaname", 104 | "relname", 105 | "heap_blks_read", 106 | "heap_blks_hit", 107 | "idx_blks_read", 108 | "idx_blks_hit", 109 | "toast_blks_read", 110 | "toast_blks_hit", 111 | "tidx_blks_read", 112 | "tidx_blks_hit", 113 | } 114 | rows := sqlmock.NewRows(columns). 115 | AddRow(nil, 116 | nil, 117 | nil, 118 | nil, 119 | nil, 120 | nil, 121 | nil, 122 | nil, 123 | nil, 124 | nil, 125 | nil) 126 | mock.ExpectQuery(sanitizeQuery(statioUserTablesQuery)).WillReturnRows(rows) 127 | ch := make(chan prometheus.Metric) 128 | go func() { 129 | defer close(ch) 130 | c := PGStatIOUserTablesCollector{} 131 | 132 | if err := c.Update(context.Background(), inst, ch); err != nil { 133 | t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err) 134 | } 135 | }() 136 | 137 | expected := []MetricResult{ 138 | {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, 139 | {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, 140 | {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, 141 | {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, 142 | {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, 143 | {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, 144 | {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, 145 | {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, 146 | } 147 | 148 | convey.Convey("Metrics comparison", t, func() { 149 | for _, expect := range expected { 150 | m := readMetric(<-ch) 151 | convey.So(expect, convey.ShouldResemble, m) 152 | } 153 | }) 154 | if err := mock.ExpectationsWereMet(); err != nil { 155 | t.Errorf("there were unfulfilled exceptions: %s", err) 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /percona_tests/utils_test.go: -------------------------------------------------------------------------------- 1 | package percona_tests 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "fmt" 7 | "io" 8 | "net" 9 | "net/http" 10 | "os" 11 | "os/exec" 12 | "path/filepath" 13 | "strings" 14 | "time" 15 | 16 | "github.com/pkg/errors" 17 | "golang.org/x/sys/unix" 18 | ) 19 | 20 | const ( 21 | postgresHost = "postgres" 22 | postgresPort = 5432 23 | postgresUser = "postgres" 24 | postgresPassword = "postgres" 25 | 26 | portRangeStart = 20000 // exporter web interface listening port 27 | portRangeEnd = 20100 // exporter web interface listening port 28 | 29 | exporterWaitTimeoutMs = 3000 // time to wait for exporter process start 30 | 31 | updatedExporterFileName = "/usr/src/myapp/percona_tests/assets/postgres_exporter" 32 | oldExporterFileName = "/usr/src/myapp/percona_tests/assets/postgres_exporter_percona" 33 | updatedExporterArgs = "/usr/src/myapp/percona_tests/assets/test.new-flags.txt" 34 | oldExporterArgs = "/usr/src/myapp/percona_tests/assets/test.old-flags.txt" 35 | updatedExporterMetrics = "/usr/src/myapp/percona_tests/assets/metrics.new" 36 | oldExporterMetrics = "/usr/src/myapp/percona_tests/assets/metrics.old" 37 | ) 38 | 39 | func getBool(val *bool) bool { 40 | return val != nil && *val 41 | } 42 | 43 | func launchExporter(fileName string, argsFile string) (cmd *exec.Cmd, port int, collectOutput func() string, _ error) { 44 | lines, err := os.ReadFile(argsFile) 45 | if err != nil { 46 | return nil, 0, nil, errors.Wrapf(err, "Unable to read exporter args file") 47 | } 48 | 49 | port = -1 50 | for i := portRangeStart; i < portRangeEnd; i++ { 51 | if checkPort(i) { 52 | port = i 53 | break 54 | } 55 | } 56 | 57 | if port == -1 { 58 | return nil, 0, nil, errors.Wrapf(err, "Failed to find free port in range [%d..%d]", portRangeStart, portRangeEnd) 59 | } 60 | 61 | linesStr := string(lines) 62 | linesStr += fmt.Sprintf("\n--web.listen-address=127.0.0.1:%d", port) 63 | 64 | absolutePath, _ := filepath.Abs("custom-queries") 65 | linesStr += fmt.Sprintf("\n--collect.custom_query.hr.directory=%s/high-resolution", absolutePath) 66 | linesStr += fmt.Sprintf("\n--collect.custom_query.mr.directory=%s/medium-resolution", absolutePath) 67 | linesStr += fmt.Sprintf("\n--collect.custom_query.lr.directory=%s/low-resolution", absolutePath) 68 | 69 | linesArr := strings.Split(linesStr, "\n") 70 | 71 | dsn := fmt.Sprintf("DATA_SOURCE_NAME=postgresql://%s:%s@%s:%d/postgres?sslmode=disable", postgresUser, postgresPassword, postgresHost, postgresPort) 72 | 73 | cmd = exec.Command(fileName, linesArr...) 74 | cmd.Env = os.Environ() 75 | cmd.Env = append(cmd.Env, dsn) 76 | 77 | var outBuffer, errorBuffer bytes.Buffer 78 | cmd.Stdout = &outBuffer 79 | cmd.Stderr = &errorBuffer 80 | 81 | collectOutput = func() string { 82 | result := "" 83 | outStr := outBuffer.String() 84 | if outStr == "" { 85 | result = "Process stdOut was empty. " 86 | } else { 87 | result = fmt.Sprintf("Process stdOut:\n%s\n", outStr) 88 | } 89 | errStr := errorBuffer.String() 90 | if errStr == "" { 91 | result += "Process stdErr was empty." 92 | } else { 93 | result += fmt.Sprintf("Process stdErr:\n%s\n", errStr) 94 | } 95 | 96 | return result 97 | } 98 | 99 | err = cmd.Start() 100 | if err != nil { 101 | return nil, 0, nil, errors.Wrapf(err, "Failed to start exporter.%s", collectOutput()) 102 | } 103 | 104 | err = waitForExporter(port) 105 | if err != nil { 106 | return nil, 0, nil, errors.Wrapf(err, "Failed to wait for exporter.%s", collectOutput()) 107 | } 108 | 109 | return cmd, port, collectOutput, nil 110 | } 111 | 112 | func stopExporter(cmd *exec.Cmd, collectOutput func() string) error { 113 | err := cmd.Process.Signal(unix.SIGINT) 114 | if err != nil { 115 | return errors.Wrapf(err, "Failed to send SIGINT to exporter process.%s\n", collectOutput()) 116 | } 117 | 118 | err = cmd.Wait() 119 | if err != nil && err.Error() != "signal: interrupt" { 120 | return errors.Wrapf(err, "Failed to wait for exporter process termination.%s\n", collectOutput()) 121 | } 122 | 123 | fmt.Println(collectOutput()) 124 | 125 | return nil 126 | } 127 | func tryGetMetrics(port int) (string, error) { 128 | return tryGetMetricsFrom(port, "metrics") 129 | } 130 | 131 | func tryGetMetricsFrom(port int, endpoint string) (string, error) { 132 | uri := fmt.Sprintf("http://127.0.0.1:%d/%s", port, endpoint) 133 | client := new(http.Client) 134 | 135 | request, err := http.NewRequest("GET", uri, nil) 136 | if err != nil { 137 | return "", err 138 | } 139 | request.Header.Add("Accept-Encoding", "gzip") 140 | 141 | response, err := client.Do(request) 142 | 143 | if err != nil { 144 | return "", fmt.Errorf("failed to get response from exporters web interface: %w", err) 145 | } 146 | 147 | if response.StatusCode != http.StatusOK { 148 | return "", fmt.Errorf("failed to get response from exporters web interface: %w", err) 149 | } 150 | 151 | // Check that the server actually sent compressed data 152 | var reader io.ReadCloser 153 | enc := response.Header.Get("Content-Encoding") 154 | switch enc { 155 | case "gzip": 156 | reader, err = gzip.NewReader(response.Body) 157 | if err != nil { 158 | return "", fmt.Errorf("failed to create gzip reader: %w", err) 159 | } 160 | defer reader.Close() 161 | default: 162 | reader = response.Body 163 | } 164 | 165 | buf := new(strings.Builder) 166 | _, err = io.Copy(buf, reader) 167 | if err != nil { 168 | return "", err 169 | } 170 | 171 | rr := buf.String() 172 | if rr == "" { 173 | return "", fmt.Errorf("failed to read response") 174 | } 175 | 176 | err = response.Body.Close() 177 | if err != nil { 178 | return "", fmt.Errorf("failed to close response: %w", err) 179 | } 180 | 181 | return rr, nil 182 | } 183 | 184 | func checkPort(port int) bool { 185 | ln, err := net.Listen("tcp", ":"+fmt.Sprint(port)) 186 | if err != nil { 187 | return false 188 | } 189 | 190 | _ = ln.Close() 191 | return true 192 | } 193 | 194 | func waitForExporter(port int) error { 195 | watchdog := exporterWaitTimeoutMs 196 | 197 | _, e := tryGetMetrics(port) 198 | for ; e != nil && watchdog > 0; watchdog-- { 199 | time.Sleep(1 * time.Millisecond) 200 | _, e = tryGetMetrics(port) 201 | } 202 | 203 | if watchdog == 0 { 204 | return fmt.Errorf("failed to wait for exporter (on port %d)", port) 205 | } 206 | 207 | return nil 208 | } 209 | -------------------------------------------------------------------------------- /percona_tests/performance_test.go: -------------------------------------------------------------------------------- 1 | package percona_tests 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | "strconv" 8 | "strings" 9 | "testing" 10 | "time" 11 | 12 | "github.com/montanaflynn/stats" 13 | "github.com/pkg/errors" 14 | "github.com/stretchr/testify/assert" 15 | "github.com/tklauser/go-sysconf" 16 | ) 17 | 18 | const ( 19 | repeatCount = 5 20 | scrapesCount = 50 21 | ) 22 | 23 | var doRun = flag.Bool("doRun", false, "") 24 | var url = flag.String("url", "", "") 25 | 26 | type StatsData struct { 27 | meanMs float64 28 | stdDevMs float64 29 | stdDevPerc float64 30 | 31 | meanHwm float64 32 | stdDevHwmBytes float64 33 | stdDevHwmPerc float64 34 | 35 | meanData float64 36 | stdDevDataBytes float64 37 | stdDevDataPerc float64 38 | } 39 | 40 | func TestPerformance(t *testing.T) { 41 | // put postgres_exporter and postgres_exporter_percona files in 'percona' folder 42 | // or use TestPrepareExporters to download exporters from feature build 43 | if !getBool(doRun) { 44 | t.Skip("For manual runs only through make") 45 | return 46 | } 47 | 48 | var updated, original *StatsData 49 | t.Run("upstream exporter", func(t *testing.T) { 50 | updated = doTestStats(t, repeatCount, scrapesCount, updatedExporterFileName, updatedExporterArgs) 51 | }) 52 | 53 | t.Run("percona exporter", func(t *testing.T) { 54 | original = doTestStats(t, repeatCount, scrapesCount, oldExporterFileName, oldExporterArgs) 55 | }) 56 | 57 | printStats(original, updated) 58 | } 59 | 60 | func calculatePerc(base, updated float64) float64 { 61 | diff := base - updated 62 | diffPerc := float64(100) / base * diff 63 | diffPerc = diffPerc * -1 64 | 65 | return diffPerc 66 | } 67 | 68 | func doTestStats(t *testing.T, cnt, size int, fileName, argsFile string) *StatsData { 69 | var durations []float64 70 | var hwms []float64 71 | var datas []float64 72 | 73 | for i := 0; i < cnt; i++ { 74 | d, hwm, data, err := doTest(size, fileName, argsFile) 75 | if !assert.NoError(t, err) { 76 | return nil 77 | } 78 | 79 | durations = append(durations, float64(d)) 80 | hwms = append(hwms, float64(hwm)) 81 | datas = append(datas, float64(data)) 82 | } 83 | 84 | mean, _ := stats.Mean(durations) 85 | stdDev, _ := stats.StandardDeviation(durations) 86 | stdDev = float64(100) / mean * stdDev 87 | 88 | clockTicks, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) 89 | if err != nil { 90 | panic(err) 91 | } 92 | 93 | mean = mean * float64(1000) / float64(clockTicks) / float64(size) 94 | stdDevMs := stdDev / float64(100) * mean 95 | 96 | meanHwm, _ := stats.Mean(hwms) 97 | stdDevHwm, _ := stats.StandardDeviation(hwms) 98 | stdDevHwmPerc := float64(100) / meanHwm * stdDevHwm 99 | 100 | meanData, _ := stats.Mean(datas) 101 | stdDevData, _ := stats.StandardDeviation(datas) 102 | stdDevDataPerc := float64(100) / meanData * stdDevData 103 | 104 | st := StatsData{ 105 | meanMs: mean, 106 | stdDevMs: stdDevMs, 107 | stdDevPerc: stdDev, 108 | 109 | meanHwm: meanHwm, 110 | stdDevHwmBytes: stdDevHwm, 111 | stdDevHwmPerc: stdDevHwmPerc, 112 | 113 | meanData: meanData, 114 | stdDevDataBytes: stdDevData, 115 | stdDevDataPerc: stdDevDataPerc, 116 | } 117 | 118 | //fmt.Printf("loop %dx%d: sample time: %.2fms [deviation ±%.2fms, %.1f%%]\n", cnt, scrapesCount, st.meanMs, st.stdDevMs, st.stdDevPerc) 119 | fmt.Printf("running %d scrapes %d times\n", size, cnt) 120 | fmt.Printf("CPU\t%.1fms [±%.1fms, %.1f%%]\n", st.meanMs, st.stdDevMs, st.stdDevPerc) 121 | fmt.Printf("HWM\t%.1fkB [±%.1f kB, %.1f%%]\n", st.meanHwm, st.stdDevHwmBytes, st.stdDevHwmPerc) 122 | fmt.Printf("Data\t%.1fkB [±%.1f kB, %.1f%%]\n", st.meanData, st.stdDevDataBytes, st.stdDevDataPerc) 123 | 124 | return &st 125 | } 126 | 127 | func doTest(iterations int, fileName, argsFile string) (cpu, hwm, data int64, _ error) { 128 | cmd, port, collectOutput, err := launchExporter(fileName, argsFile) 129 | if err != nil { 130 | return 0, 0, 0, err 131 | } 132 | 133 | total1 := getCPUTime(cmd.Process.Pid) 134 | 135 | for i := 0; i < iterations; i++ { 136 | _, err = tryGetMetrics(port) 137 | if err != nil { 138 | return 0, 0, 0, errors.Wrapf(err, "Failed to perform test iteration %d.%s", i, collectOutput()) 139 | } 140 | 141 | time.Sleep(1 * time.Millisecond) 142 | } 143 | 144 | total2 := getCPUTime(cmd.Process.Pid) 145 | 146 | hwm, data = getCPUMem(cmd.Process.Pid) 147 | 148 | err = stopExporter(cmd, collectOutput) 149 | if err != nil { 150 | return 0, 0, 0, err 151 | } 152 | 153 | return total2 - total1, hwm, data, nil 154 | } 155 | 156 | func getCPUMem(pid int) (hwm, data int64) { 157 | contents, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid)) 158 | if err != nil { 159 | return 0, 0 160 | } 161 | 162 | lines := strings.Split(string(contents), "\n") 163 | 164 | for _, v := range lines { 165 | if strings.HasPrefix(v, "VmHWM") { 166 | val := strings.ReplaceAll(strings.ReplaceAll(strings.Split(v, ":\t")[1], " kB", ""), " ", "") 167 | hwm, _ = strconv.ParseInt(val, 10, 64) 168 | continue 169 | } 170 | if strings.HasPrefix(v, "VmData") { 171 | val := strings.ReplaceAll(strings.ReplaceAll(strings.Split(v, ":\t")[1], " kB", ""), " ", "") 172 | data, _ = strconv.ParseInt(val, 10, 64) 173 | continue 174 | } 175 | } 176 | 177 | return hwm, data 178 | } 179 | 180 | func getCPUTime(pid int) (total int64) { 181 | contents, err := os.ReadFile(fmt.Sprintf("/proc/%d/stat", pid)) 182 | if err != nil { 183 | return 184 | } 185 | lines := strings.Split(string(contents), "\n") 186 | for _, line := range lines { 187 | fields := strings.Fields(line) 188 | numFields := len(fields) 189 | if numFields > 3 { 190 | i, err := strconv.ParseInt(fields[13], 10, 64) 191 | if err != nil { 192 | panic(err) 193 | } 194 | 195 | totalTime := i 196 | 197 | i, err = strconv.ParseInt(fields[14], 10, 64) 198 | if err != nil { 199 | panic(err) 200 | } 201 | 202 | totalTime += i 203 | 204 | total = totalTime 205 | 206 | return 207 | } 208 | } 209 | return 210 | } 211 | 212 | func printStats(original, updated *StatsData) { 213 | fmt.Println() 214 | fmt.Println(" \told\tnew\tdiff") 215 | fmt.Printf("CPU, ms \t%.1f\t%.1f\t%+.0f%%\n", original.meanMs, updated.meanMs, calculatePerc(original.meanMs, updated.meanMs)) 216 | fmt.Printf("HWM, kB \t%.1f\t%.1f\t%+.0f%%\n", original.meanHwm, updated.meanHwm, calculatePerc(original.meanHwm, updated.meanHwm)) 217 | fmt.Printf("DATA, kB\t%.1f\t%.1f\t%+.0f%%\n", original.meanData, updated.meanData, calculatePerc(original.meanData, updated.meanData)) 218 | fmt.Println() 219 | } 220 | --------------------------------------------------------------------------------