├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── config.yml │ └── feature_request.md ├── dependabot.yml └── workflows │ ├── build.yml │ ├── codeql-analysis.yml │ ├── helm-workflow.yaml │ └── release.yml ├── .gitignore ├── .promu.yml ├── Dockerfile ├── Dockerfile.multi-arch ├── LICENSE ├── Makefile ├── README.md ├── VERSION ├── cmd └── sql_exporter │ ├── content.go │ ├── log.go │ ├── main.go │ ├── promhttp.go │ └── util.go ├── collector.go ├── config ├── collector_config.go ├── config.go ├── config_test.go ├── global_config.go ├── job_config.go ├── metric_config.go ├── query_config.go ├── secret_config.go ├── target_config.go └── util.go ├── documentation └── sql_exporter.yml ├── drivers.go ├── drivers_gen.go ├── errors └── errors.go ├── examples ├── azure-sql-mi │ ├── grafana-dashboard │ │ ├── azure-sql-mi.json │ │ ├── cpu-and-queuing.png │ │ ├── log-activity.png │ │ ├── memory.png │ │ ├── overview.png │ │ ├── sql-activity.png │ │ └── waits-and-queues.png │ ├── mssql_mi_clerk.collector.yml │ ├── mssql_mi_perf.collector.yml │ ├── mssql_mi_properties.collector.yml │ ├── mssql_mi_size.collector.yml │ ├── mssql_mi_wait.collector.yml │ └── sql_exporter.yml ├── mssql_standard.collector.yml ├── postgres-16.yml └── sql_exporter.yml ├── exporter.go ├── go.mod ├── go.sum ├── helm ├── .gitignore ├── .helmignore ├── Chart.yaml ├── Makefile ├── README.md ├── README.md.gotmpl ├── ci │ ├── helmfile.yaml │ └── postgresql-values.yaml ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── configmap.collectors.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ ├── secret.configuration.yaml │ ├── secret.tls.yaml │ ├── service.yaml │ ├── serviceaccount.yaml │ ├── servicemonitor.yaml │ └── tests │ │ ├── test-connection.yaml │ │ └── test-servicemonitor.yaml └── values.yaml ├── job.go ├── metric.go ├── packaging ├── conf │ ├── nfpm.yaml │ └── sql_exporter.default ├── deb │ ├── postinstall │ └── sql_exporter.service └── rpm │ ├── postinstall │ └── sql_exporter.service ├── query.go ├── reload.go ├── sql.go └── target.go /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Configuration** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Additional context** 27 | Add any other context about the problem here. 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Questions and Answers 4 | url: https://github.com/burningalchemist/sql_exporter/discussions 5 | about: Please ask your questions on installation and usage here. 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an improvement for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | schedule: 6 | interval: "monthly" 7 | labels: 8 | - "dependencies" 9 | commit-message: 10 | prefix: "deps" 11 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | build: 13 | if: ${{ !startsWith(github.event.head_commit.message, 'docs:') }} 14 | name: Build 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Set up Go 1.x 18 | uses: actions/setup-go@v5 19 | with: 20 | go-version: ^1.24 21 | check-latest: true 22 | id: go 23 | - name: Check out code into the Go module directory 24 | uses: actions/checkout@v4 25 | - name: Style 26 | run: make style 27 | - name: Vet 28 | run: make vet 29 | - name: Test 30 | run: make test 31 | - name: Build 32 | run: make build 33 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | # The branches below must be a subset of the branches above 8 | branches: [master] 9 | schedule: 10 | - cron: '0 23 * * 5' 11 | 12 | jobs: 13 | analyze: 14 | if: | 15 | ${{ !startsWith(github.event.head_commit.message, 'docs:') }} || 16 | ${{ !startsWith(github.event.head_commit.message, 'build:') }} 17 | name: Analyze 18 | runs-on: ubuntu-latest 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | # Override automatic language detection by changing the below list 24 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] 25 | language: ['go'] 26 | # Learn more... 27 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 28 | 29 | steps: 30 | - name: Checkout repository 31 | uses: actions/checkout@v4 32 | with: 33 | # We must fetch at least the immediate parents so that if this is 34 | # a pull request then we can checkout the head. 35 | fetch-depth: 2 36 | 37 | - name: Set up Go 38 | uses: actions/setup-go@v5 39 | with: 40 | go-version: ^1.20 41 | 42 | # Initializes the CodeQL tools for scanning. 43 | - name: Initialize CodeQL 44 | uses: github/codeql-action/init@v3 45 | with: 46 | languages: ${{ matrix.language }} 47 | 48 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 49 | # If this step fails, then you should remove it and run the build manually (see below) 50 | - name: Autobuild 51 | uses: github/codeql-action/autobuild@v3 52 | 53 | # ℹ️ Command-line programs to run using the OS shell. 54 | # 📚 https://git.io/JvXDl 55 | 56 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 57 | # and modify them (or add more) to build your code if your project 58 | # uses a compiled language 59 | 60 | #- run: | 61 | # make bootstrap 62 | # make release 63 | 64 | - name: Perform CodeQL Analysis 65 | uses: github/codeql-action/analyze@v3 66 | -------------------------------------------------------------------------------- /.github/workflows/helm-workflow.yaml: -------------------------------------------------------------------------------- 1 | name: Helm 2 | on: 3 | push: 4 | branches: 5 | - master 6 | pull_request: 7 | branches: 8 | - master 9 | env: 10 | HELM_VERSION: 3.12.1 11 | PYTHON_VERSION: 3.9 12 | TARGET_BRANCH: chart-testing-target-branch 13 | TARGET_REMOTE: test 14 | jobs: 15 | helm-jobs: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | - name: Set up Helm 23 | uses: azure/setup-helm@v4 24 | with: 25 | version: "v${{ env.HELM_VERSION }}" 26 | - uses: actions/setup-python@v5 27 | with: 28 | python-version: ${{ env.PYTHON_VERSION }} 29 | check-latest: true 30 | # --------------------------------------------------------------- 31 | # -- Instead of comparing to the master branch, I'm getting 32 | # -- the commit hash set in the previous step from a 33 | # -- currently released chart. If it doesn't exists, then 34 | # -- I assume that chart is not released and compare to the 35 | # -- previous commit 36 | # -- 37 | # -- Also, I'm setting the RepoURL here. Since we plan to support 38 | # -- the official chart in this git repository, the helm 39 | # -- repository is expected to belong to this repo as well. 40 | # --------------------------------------------------------------- 41 | - name: Retrieve the latest commit sha from the helm chart 42 | run: | 43 | HELM_REPO_URL="https://${GITHUB_REPOSITORY_OWNER}.github.io/${GITHUB_REPOSITORY#*/}" 44 | if helm repo add sql-exporter $HELM_REPO_URL 45 | then 46 | helm repo update 47 | echo "TARGET_COMMIT=$(helm show chart sql-exporter/sql-exporter | yq '.annotations.git/commit-sha')" >> "${GITHUB_ENV}" 48 | else 49 | echo "TARGET_COMMIT=$(git show HEAD^1 --pretty=format:%H --no-patch)" >> "${GITHUB_ENV}" 50 | fi 51 | # --------------------------------------------------------------- 52 | # -- As I could find CT doesn't support testing against commits 53 | # -- directly, so I'm creating a new fake remote from a commit 54 | # -- and testing the chart against it. This workaround doesn't 55 | # -- support maintainers validation, but we have it disabled 56 | # -- anyway 57 | # --------------------------------------------------------------- 58 | - name: Prepare a dummy remote to test the chart 59 | run: | 60 | DUMMY_REMOTE=$(mktemp -d) 61 | git init "${DUMMY_REMOTE}" 62 | git remote add "${TARGET_REMOTE}" "${DUMMY_REMOTE}" 63 | git checkout -b "${TARGET_BRANCH}" "${TARGET_COMMIT}" 64 | git push --set-upstream "${TARGET_REMOTE}" "${TARGET_BRANCH}" 65 | git checkout "${GITHUB_SHA}" 66 | - name: Set up chart-testing 67 | uses: helm/chart-testing-action@v2.6.0 68 | - name: Run chart-testing (list-changed) 69 | id: list-changed 70 | run: | 71 | changed=$(ct list-changed --chart-dirs . --target-branch "${TARGET_BRANCH}" --remote "${TARGET_REMOTE}") 72 | if [[ -n "$changed" ]]; then 73 | echo "changed=true" >> "$GITHUB_OUTPUT" 74 | fi 75 | - name: Run chart-testing (lint) 76 | if: steps.list-changed.outputs.changed == 'true' 77 | run: ct lint --target-branch "${TARGET_BRANCH}" --remote "${TARGET_REMOTE}" --validate-maintainers=false --chart-dirs . 78 | - name: Setup helmfile 79 | if: steps.list-changed.outputs.changed == 'true' 80 | uses: mamezou-tech/setup-helmfile@v2.0.0 81 | - name: Create kind cluster 82 | if: steps.list-changed.outputs.changed == 'true' 83 | uses: helm/kind-action@v1.9.0 84 | - name: Init postgres server 85 | if: steps.list-changed.outputs.changed == 'true' 86 | run: | 87 | helmfile -f helm/ci/helmfile.yaml sync 88 | - name: Run chart-testing (install) 89 | if: steps.list-changed.outputs.changed == 'true' 90 | run: ct install --target-branch "${TARGET_BRANCH}" --remote "${TARGET_REMOTE}" --chart-dirs . 91 | - name: Run chart-testing (upgrade) 92 | if: steps.list-changed.outputs.changed == 'true' 93 | run: ct install --target-branch "${TARGET_BRANCH}" --remote "${TARGET_REMOTE}" --chart-dirs . --upgrade 94 | - name: Configure Git 95 | run: | 96 | git config user.name "$GITHUB_ACTOR" 97 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 98 | # --------------------------------------------------------------- 99 | # -- On each run we're setting an annotation with the current 100 | # -- commit hash, so in case it's released, we will see it 101 | # -- running `$ helm show sql-exporter/sql-exporter` 102 | # --------------------------------------------------------------- 103 | - name: Set the git sha annotations in the helm chart 104 | run: yq -i ".annotations.git/commit-sha = \"${GITHUB_SHA}\"" ./helm/Chart.yaml 105 | 106 | - name: Release charts 107 | if: ${{ github.event.repository.default_branch && github.event_name == 'push' }} 108 | uses: helm/chart-releaser-action@main 109 | with: 110 | charts_dir: . 111 | mark_as_latest: false 112 | packages_with_index: true 113 | env: 114 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 115 | CR_RELEASE_NAME_TEMPLATE: "chart-{{ .Name }}-{{ .Version }}" 116 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Build and Publish artifacts 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - '*.*.*' 8 | 9 | jobs: 10 | build: 11 | name: Build and upload artifacts 12 | runs-on: ubuntu-latest 13 | env: 14 | VERSION: ${{ github.ref_name }} 15 | steps: 16 | - name: Set up Go 1.x 17 | uses: actions/setup-go@v5 18 | with: 19 | go-version: ^1.24 20 | check-latest: true 21 | - name: Check out code into the Go module directory 22 | uses: actions/checkout@v4 23 | 24 | - name: Promu - Crossbuild 25 | run: make crossbuild 26 | 27 | - name: Promu - Prepare packages 28 | run: make crossbuild-tarballs 29 | 30 | - name: Create deb package 31 | uses: burningalchemist/action-gh-nfpm@v1 32 | with: 33 | packager: deb 34 | config: packaging/conf/nfpm.yaml 35 | target: .tarballs/ 36 | - name: Create rpm package 37 | uses: burningalchemist/action-gh-nfpm@v1 38 | with: 39 | packager: rpm 40 | config: packaging/conf/nfpm.yaml 41 | target: .tarballs/ 42 | 43 | - name: Calculate checksums 44 | run: make crossbuild-checksum 45 | 46 | - name: Upload artifacts 47 | uses: softprops/action-gh-release@v2 48 | with: 49 | files: | 50 | .tarballs/* 51 | docker: 52 | name: Push Docker image to Docker Hub 53 | runs-on: ubuntu-latest 54 | needs: build 55 | steps: 56 | - name: Check out the repo 57 | uses: actions/checkout@v4 58 | 59 | - name: Get Go version from the '.promu.yml' config 60 | id: promu-go-version 61 | run: printf "version=%s" "$(yq '.go.version' .promu.yml)" >> $GITHUB_OUTPUT 62 | 63 | - name: Set up QEMU 64 | uses: docker/setup-qemu-action@v3 65 | 66 | - name: Set up Docker Buildx 67 | uses: docker/setup-buildx-action@v3 68 | 69 | - name: Log in to Docker Hub 70 | uses: docker/login-action@v3 71 | with: 72 | username: ${{ secrets.DOCKER_USERNAME }} 73 | password: ${{ secrets.DOCKER_TOKEN }} 74 | 75 | - name: Extract metadata (tags, labels) for Docker 76 | id: meta 77 | uses: docker/metadata-action@v5 78 | with: 79 | images: burningalchemist/sql_exporter 80 | tags: | 81 | type=semver,pattern={{version}} 82 | type=semver,pattern={{major}}.{{minor}} 83 | 84 | - name: Build and push Docker image 85 | uses: docker/build-push-action@v5 86 | with: 87 | context: . 88 | file: Dockerfile.multi-arch 89 | push: true 90 | tags: ${{ steps.meta.outputs.tags }} 91 | labels: ${{ steps.meta.outputs.labels }} 92 | platforms: linux/amd64,linux/arm64 93 | provenance: false 94 | build-args: | 95 | GOVERSION=${{ steps.promu-go-version.outputs.version }} 96 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.project 2 | /.settings 3 | /.vscode 4 | /sql_exporter 5 | /sql_exporter.yml 6 | .idea/* 7 | -------------------------------------------------------------------------------- /.promu.yml: -------------------------------------------------------------------------------- 1 | go: 2 | cgo: false 3 | version: 1.24 4 | repository: 5 | path: github.com/burningalchemist/sql_exporter 6 | build: 7 | binaries: 8 | - name: sql_exporter 9 | path: ./cmd/sql_exporter 10 | flags: -a -tags netgo 11 | ldflags: | 12 | -X github.com/prometheus/common/version.Version={{.Version}} 13 | -X github.com/prometheus/common/version.Revision={{.Revision}} 14 | -X github.com/prometheus/common/version.Branch={{.Branch}} 15 | -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} 16 | -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} 17 | -s 18 | -w 19 | tarball: 20 | files: 21 | - LICENSE 22 | - README.md 23 | - examples/sql_exporter.yml 24 | - examples/mssql_standard.collector.yml 25 | crossbuild: 26 | platforms: 27 | - linux/amd64 28 | - darwin/amd64 29 | - windows/amd64 30 | - linux/armv7 31 | - linux/arm64 32 | - darwin/arm64 33 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ARCH="amd64" 2 | ARG OS="linux" 3 | FROM quay.io/prometheus/golang-builder AS builder 4 | 5 | # Get sql_exporter 6 | ADD . /go/src/github.com/burningalchemist/sql_exporter 7 | WORKDIR /go/src/github.com/burningalchemist/sql_exporter 8 | 9 | # Do makefile 10 | RUN make 11 | 12 | # Make image and copy build sql_exporter 13 | FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest 14 | LABEL maintainer="The Prometheus Authors " 15 | COPY --from=builder /go/src/github.com/burningalchemist/sql_exporter/sql_exporter /bin/sql_exporter 16 | 17 | EXPOSE 9399 18 | USER nobody 19 | ENTRYPOINT [ "/bin/sql_exporter" ] 20 | -------------------------------------------------------------------------------- /Dockerfile.multi-arch: -------------------------------------------------------------------------------- 1 | ARG GOVERSION=latest 2 | 3 | FROM --platform=$BUILDPLATFORM quay.io/prometheus/golang-builder:${GOVERSION}-main AS builder 4 | 5 | # Get sql_exporter 6 | ADD . /go/src/github.com/burningalchemist/sql_exporter 7 | WORKDIR /go/src/github.com/burningalchemist/sql_exporter 8 | 9 | # Do makefile 10 | ARG TARGETOS 11 | ARG TARGETARCH 12 | 13 | RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make 14 | 15 | # Make image and copy build sql_exporter 16 | FROM --platform=$TARGETPLATFORM quay.io/prometheus/busybox:latest 17 | LABEL maintainer="The Prometheus Authors " 18 | COPY --from=builder /go/src/github.com/burningalchemist/sql_exporter/sql_exporter /bin/sql_exporter 19 | 20 | EXPOSE 9399 21 | USER nobody 22 | ENTRYPOINT [ "/bin/sql_exporter" ] 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Sergei Zyubin 4 | Copyright (c) 2017 Alin Sinpalean 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The Prometheus Authors 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | # To distinguish between native Windows and Windows Subsystem for Linux (WSL), 15 | # we have to check how PATH is separated. For WSL and Unix-based systems it's 16 | # a colon; for native Windows it's a semicolon. 17 | ifeq '$(findstring ;,$(PATH))' ';' 18 | GOPATH = $(firstword $(subst ;, ,$(shell $(GO) env GOPATH))) 19 | PREFIX = $(shell cd) 20 | endif 21 | 22 | GO := go 23 | GOPATH ?= $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) 24 | PROMU := $(GOPATH)/bin/promu 25 | PROMU_VERSION := v0.17.0 26 | pkgs = $(shell $(GO) list ./... | grep -v /vendor/) 27 | 28 | PREFIX ?= $(shell pwd) 29 | BIN_DIR ?= $(shell pwd) 30 | DOCKER_IMAGE_NAME ?= sql-exporter 31 | DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) 32 | 33 | 34 | all: format build test 35 | 36 | style: 37 | @echo ">> checking code style" 38 | @! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' 39 | 40 | test: 41 | @echo ">> running tests" 42 | @$(GO) test -short $(pkgs) 43 | 44 | format: 45 | @echo ">> formatting code" 46 | @$(GO) fmt $(pkgs) 47 | 48 | vet: 49 | @echo ">> vetting code" 50 | @$(GO) vet $(pkgs) 51 | 52 | build: promu 53 | @echo ">> building binaries" 54 | @$(PROMU) build --prefix $(PREFIX) 55 | 56 | drivers-%: 57 | @echo ">> generating drivers.go with selected drivers" 58 | @$(GO) get github.com/dave/jennifer/jen 59 | @$(GO) run drivers_gen.go -- $* 60 | @$(GO) get ./... 61 | @$(GO) mod tidy 62 | 63 | tarball: promu 64 | @echo ">> building release tarball" 65 | @$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) 66 | 67 | crossbuild: promu 68 | @echo ">> building crossbuild release" 69 | @$(PROMU) crossbuild 70 | 71 | crossbuild-tarballs: promu 72 | @echo ">> building crossbuild release tarballs" 73 | @$(PROMU) crossbuild tarballs 74 | 75 | crossbuild-checksum: promu 76 | @echo ">> calculating checksums for released packages" 77 | @$(PROMU) checksum .tarballs 78 | 79 | crossbuild-release: promu crossbuild crossbuild-tarballs crossbuild-checksum 80 | 81 | docker: 82 | @echo ">> building docker image" 83 | @docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . 84 | 85 | # Override for native Windows, where the path separator is a semicolon. 86 | ifeq '$(findstring ;,$(PATH))' ';' 87 | promu: 88 | @set GOOS=windows 89 | @set GOARCH=$(subst AMD64,amd64,$(patsubst i%86,386,$(shell echo %PROCESSOR_ARCHITECTURE%))) 90 | @$(GO) install github.com/prometheus/promu@$(PROMU_VERSION) 91 | else 92 | promu: 93 | @GOOS=$(shell uname -s | tr A-Z a-z) \ 94 | GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \ 95 | $(GO) install github.com/prometheus/promu@$(PROMU_VERSION) 96 | endif 97 | 98 | .PHONY: all style format build test vet tarball docker promu 99 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 0.17.3 2 | -------------------------------------------------------------------------------- /cmd/sql_exporter/content.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "html/template" 6 | "net/http" 7 | 8 | "github.com/burningalchemist/sql_exporter" 9 | "github.com/prometheus/common/version" 10 | ) 11 | 12 | const ( 13 | docsURL = "https://github.com/burningalchemist/sql_exporter#readme" 14 | templates = ` 15 | {{ define "page" -}} 16 | 17 | 18 | Prometheus SQL Exporter 19 | 33 | 34 | 35 | 41 | {{template "content" .}} 42 | 43 | 44 | {{- end }} 45 | 46 | {{ define "content.home" -}} 47 |

This is a Prometheus SQL Exporter instance. 48 | You are probably looking for its metrics handler.

49 | {{- end }} 50 | 51 | {{ define "content.config" -}} 52 |

Configuration

53 |
{{ .Config }}
54 | {{- end }} 55 | 56 | {{ define "content.error" -}} 57 |

Error

58 |
{{ .Err }}
59 | {{- end }} 60 | ` 61 | ) 62 | 63 | type tdata struct { 64 | MetricsPath string 65 | DocsURL string 66 | Version string 67 | 68 | // `/config` only 69 | Config string 70 | 71 | // `/error` only 72 | Err error 73 | } 74 | 75 | var ( 76 | allTemplates = template.Must(template.New("").Parse(templates)) 77 | homeTemplate = pageTemplate("home") 78 | configTemplate = pageTemplate("config") 79 | errorTemplate = pageTemplate("error") 80 | ) 81 | 82 | func pageTemplate(name string) *template.Template { 83 | pageTemplate := fmt.Sprintf(`{{define "content"}}{{template "content.%s" .}}{{end}}{{template "page" .}}`, name) 84 | return template.Must(template.Must(allTemplates.Clone()).Parse(pageTemplate)) 85 | } 86 | 87 | // HomeHandlerFunc is the HTTP handler for the home page (`/`). 88 | func HomeHandlerFunc(metricsPath string) func(http.ResponseWriter, *http.Request) { 89 | return func(w http.ResponseWriter, r *http.Request) { 90 | _ = homeTemplate.Execute(w, &tdata{ 91 | MetricsPath: metricsPath, 92 | DocsURL: docsURL, 93 | Version: version.Version, 94 | }) 95 | } 96 | } 97 | 98 | // ConfigHandlerFunc is the HTTP handler for the `/config` page. It outputs the configuration marshaled in YAML format. 99 | func ConfigHandlerFunc(metricsPath string, exporter sql_exporter.Exporter) func(http.ResponseWriter, *http.Request) { 100 | return func(w http.ResponseWriter, r *http.Request) { 101 | config, err := exporter.Config().YAML() 102 | if err != nil { 103 | HandleError(err, metricsPath, w, r) 104 | return 105 | } 106 | _ = configTemplate.Execute(w, &tdata{ 107 | MetricsPath: metricsPath, 108 | DocsURL: docsURL, 109 | Version: version.Version, 110 | Config: string(config), 111 | }) 112 | } 113 | } 114 | 115 | // HandleError is an error handler that other handlers defer to in case of error. It is important to not have written 116 | // anything to w before calling HandleError(), or the 500 status code won't be set (and the content might be mixed up). 117 | func HandleError(err error, metricsPath string, w http.ResponseWriter, r *http.Request) { 118 | w.WriteHeader(http.StatusInternalServerError) 119 | _ = errorTemplate.Execute(w, &tdata{ 120 | MetricsPath: metricsPath, 121 | DocsURL: docsURL, 122 | Version: version.Version, 123 | Err: err, 124 | }) 125 | } 126 | -------------------------------------------------------------------------------- /cmd/sql_exporter/log.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "os" 7 | 8 | "github.com/prometheus/common/promslog" 9 | ) 10 | 11 | type logConfig struct { 12 | logger *slog.Logger 13 | logFileHandler *os.File 14 | } 15 | 16 | // initLogFile opens the log file for writing if a log file is specified. 17 | func initLogFile(logFile string) (*os.File, error) { 18 | if logFile == "" { 19 | return nil, nil 20 | } 21 | logFileHandler, err := os.OpenFile(logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644) 22 | if err != nil { 23 | return nil, fmt.Errorf("error opening log file: %w", err) 24 | } 25 | return logFileHandler, nil 26 | } 27 | 28 | // initLogConfig configures and initializes the logging system. 29 | func initLogConfig(logLevel, logFormat string, logFile string) (*logConfig, error) { 30 | logFileHandler, err := initLogFile(logFile) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | if logFileHandler == nil { 36 | logFileHandler = os.Stderr 37 | } 38 | 39 | promslogConfig := &promslog.Config{ 40 | Level: promslog.NewLevel(), 41 | Format: promslog.NewFormat(), 42 | Style: promslog.SlogStyle, 43 | Writer: logFileHandler, 44 | } 45 | 46 | if err := promslogConfig.Level.Set(logLevel); err != nil { 47 | return nil, err 48 | } 49 | 50 | if err := promslogConfig.Format.Set(logFormat); err != nil { 51 | return nil, err 52 | } 53 | // Initialize logger. 54 | logger := promslog.New(promslogConfig) 55 | 56 | return &logConfig{ 57 | logger: logger, 58 | logFileHandler: logFileHandler, 59 | }, nil 60 | } 61 | -------------------------------------------------------------------------------- /cmd/sql_exporter/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log/slog" 7 | "net/http" 8 | "os" 9 | "os/signal" 10 | "runtime" 11 | "syscall" 12 | "time" 13 | 14 | "github.com/burningalchemist/sql_exporter" 15 | cfg "github.com/burningalchemist/sql_exporter/config" 16 | _ "github.com/kardianos/minwinsvc" 17 | "github.com/prometheus/client_golang/prometheus" 18 | info "github.com/prometheus/client_golang/prometheus/collectors/version" 19 | "github.com/prometheus/client_golang/prometheus/promhttp" 20 | "github.com/prometheus/common/model" 21 | "github.com/prometheus/common/version" 22 | "github.com/prometheus/exporter-toolkit/web" 23 | ) 24 | 25 | const ( 26 | appName string = "sql_exporter" 27 | 28 | httpReadHeaderTimeout time.Duration = time.Duration(time.Second * 60) 29 | ) 30 | 31 | var ( 32 | showVersion = flag.Bool("version", false, "Print version information") 33 | listenAddress = flag.String("web.listen-address", ":9399", "Address to listen on for web interface and telemetry") 34 | metricsPath = flag.String("web.metrics-path", "/metrics", "Path under which to expose metrics") 35 | enableReload = flag.Bool("web.enable-reload", false, "Enable reload collector data handler") 36 | webConfigFile = flag.String("web.config.file", "", "[EXPERIMENTAL] TLS/BasicAuth configuration file path") 37 | configFile = flag.String("config.file", "sql_exporter.yml", "SQL Exporter configuration file path") 38 | configCheck = flag.Bool("config.check", false, "Check configuration and exit") 39 | logFormat = flag.String("log.format", "logfmt", "Set log output format") 40 | logLevel = flag.String("log.level", "info", "Set log level") 41 | logFile = flag.String("log.file", "", "Log file to write to, leave empty to write to stderr") 42 | ) 43 | 44 | func init() { 45 | prometheus.MustRegister(info.NewCollector("sql_exporter")) 46 | flag.BoolVar(&cfg.EnablePing, "config.enable-ping", true, "Enable ping for targets") 47 | flag.BoolVar(&cfg.IgnoreMissingVals, "config.ignore-missing-values", false, "[EXPERIMENTAL] Ignore results with missing values for the requested columns") 48 | flag.StringVar(&cfg.DsnOverride, "config.data-source-name", "", "Data source name to override the value in the configuration file with") 49 | flag.StringVar(&cfg.TargetLabel, "config.target-label", "target", "Target label name") 50 | } 51 | 52 | func main() { 53 | if os.Getenv(cfg.EnvDebug) != "" { 54 | runtime.SetBlockProfileRate(1) 55 | runtime.SetMutexProfileFraction(1) 56 | } 57 | 58 | flag.Parse() 59 | 60 | // Show version and exit. 61 | if *showVersion { 62 | fmt.Println(version.Print(appName)) 63 | os.Exit(0) 64 | } 65 | 66 | // Setup logging. 67 | logConfig, err := initLogConfig(*logLevel, *logFormat, *logFile) 68 | if err != nil { 69 | fmt.Printf("Error initializing exporter: %s\n", err) 70 | os.Exit(1) 71 | } 72 | 73 | defer func() { 74 | if logConfig.logFileHandler != nil { 75 | logConfig.logFileHandler.Close() 76 | } 77 | }() 78 | 79 | slog.SetDefault(logConfig.logger) 80 | 81 | // Override the config.file default with the SQLEXPORTER_CONFIG environment variable if set. 82 | if val, ok := os.LookupEnv(cfg.EnvConfigFile); ok { 83 | *configFile = val 84 | } 85 | 86 | if *configCheck { 87 | slog.Info("Checking configuration file", "configFile", *configFile) 88 | if _, err := cfg.Load(*configFile); err != nil { 89 | slog.Error("Configuration check failed", "error", err) 90 | os.Exit(1) 91 | } 92 | slog.Info("Configuration check successful") 93 | os.Exit(0) 94 | } 95 | 96 | slog.Warn("Starting SQL exporter", "versionInfo", version.Info(), "buildContext", version.BuildContext()) 97 | exporter, err := sql_exporter.NewExporter(*configFile) 98 | if err != nil { 99 | slog.Error("Error creating exporter", "error", err) 100 | os.Exit(1) 101 | } 102 | 103 | // Start the scrape_errors_total metric drop ticker if configured. 104 | startScrapeErrorsDropTicker(exporter, exporter.Config().Globals.ScrapeErrorDropInterval) 105 | 106 | // Start signal handler to reload collector and target data. 107 | signalHandler(exporter, *configFile) 108 | 109 | // Setup and start webserver. 110 | http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { http.Error(w, "OK", http.StatusOK) }) 111 | http.HandleFunc("/", HomeHandlerFunc(*metricsPath)) 112 | http.HandleFunc("/config", ConfigHandlerFunc(*metricsPath, exporter)) 113 | http.Handle(*metricsPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, ExporterHandlerFor(exporter))) 114 | // Expose exporter metrics separately, for debugging purposes. 115 | http.Handle("/sql_exporter_metrics", promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{})) 116 | // Expose refresh handler to reload collectors and targets 117 | if *enableReload { 118 | http.HandleFunc("/reload", reloadHandler(exporter, *configFile)) 119 | } 120 | 121 | server := &http.Server{Addr: *listenAddress, ReadHeaderTimeout: httpReadHeaderTimeout} 122 | if err := web.ListenAndServe(server, &web.FlagConfig{ 123 | WebListenAddresses: &([]string{*listenAddress}), 124 | WebConfigFile: webConfigFile, WebSystemdSocket: OfBool(false), 125 | }, logConfig.logger); err != nil { 126 | slog.Error("Error starting web server", "error", err) 127 | os.Exit(1) 128 | 129 | } 130 | } 131 | 132 | // reloadHandler returns a handler that reloads collector and target data. 133 | func reloadHandler(e sql_exporter.Exporter, configFile string) http.HandlerFunc { 134 | return func(w http.ResponseWriter, r *http.Request) { 135 | if err := sql_exporter.Reload(e, &configFile); err != nil { 136 | slog.Error("Error reloading collector and target data", "error", err) 137 | http.Error(w, err.Error(), http.StatusInternalServerError) 138 | return 139 | } 140 | w.WriteHeader(http.StatusOK) 141 | } 142 | } 143 | 144 | // signalHandler listens for SIGHUP signals and reloads the collector and target data. 145 | func signalHandler(e sql_exporter.Exporter, configFile string) { 146 | c := make(chan os.Signal, 1) 147 | signal.Notify(c, syscall.SIGHUP) 148 | go func() { 149 | for range c { 150 | if err := sql_exporter.Reload(e, &configFile); err != nil { 151 | slog.Error("Error reloading collector and target data", "error", err) 152 | } 153 | } 154 | }() 155 | } 156 | 157 | // startScrapeErrorsDropTicker starts a ticker that periodically drops scrape error metrics. 158 | func startScrapeErrorsDropTicker(exporter sql_exporter.Exporter, interval model.Duration) { 159 | if interval <= 0 { 160 | return 161 | } 162 | 163 | ticker := time.NewTicker(time.Duration(interval)) 164 | slog.Warn("Started scrape_errors_total metrics drop ticker", "interval", interval) 165 | go func() { 166 | defer ticker.Stop() 167 | for range ticker.C { 168 | exporter.DropErrorMetrics() 169 | } 170 | }() 171 | } 172 | -------------------------------------------------------------------------------- /cmd/sql_exporter/promhttp.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "log/slog" 8 | "net/http" 9 | "strconv" 10 | "time" 11 | 12 | "github.com/burningalchemist/sql_exporter" 13 | "github.com/prometheus/client_golang/prometheus" 14 | "github.com/prometheus/common/expfmt" 15 | ) 16 | 17 | const ( 18 | contentTypeHeader string = "Content-Type" 19 | contentLengthHeader string = "Content-Length" 20 | contentEncodingHeader string = "Content-Encoding" 21 | acceptEncodingHeader string = "Accept-Encoding" 22 | scrapeTimeoutHeader string = "X-Prometheus-Scrape-Timeout-Seconds" 23 | ) 24 | 25 | const ( 26 | prometheusHeaderErr = "Failed to parse timeout from Prometheus header" 27 | noMetricsGathered = "No metrics gathered" 28 | noMetricsEncoded = "No metrics encoded" 29 | ) 30 | 31 | // ExporterHandlerFor returns an http.Handler for the provided Exporter. 32 | func ExporterHandlerFor(exporter sql_exporter.Exporter) http.Handler { 33 | return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { 34 | ctx, cancel := contextFor(req, exporter) 35 | defer cancel() 36 | 37 | // Parse the query params and set the job filters if any 38 | jobFilters := req.URL.Query()["jobs[]"] 39 | exporter.SetJobFilters(jobFilters) 40 | 41 | // Go through prometheus.Gatherers to sanitize and sort metrics. 42 | gatherer := prometheus.Gatherers{exporter.WithContext(ctx), sql_exporter.SvcRegistry} 43 | mfs, err := gatherer.Gather() 44 | if err != nil { 45 | switch t := err.(type) { 46 | case prometheus.MultiError: 47 | for _, err := range t { 48 | if errors.Is(err, context.DeadlineExceeded) { 49 | slog.Error("Timeout while collecting metrics", "error", err) 50 | 51 | } else { 52 | slog.Error("Error gathering metrics", "error", err) 53 | } 54 | } 55 | default: 56 | slog.Error("Error gathering metrics", "error", err) 57 | } 58 | if len(mfs) == 0 { 59 | slog.Error("No metrics gathered", "error", err) 60 | http.Error(w, noMetricsGathered+", "+err.Error(), http.StatusInternalServerError) 61 | return 62 | } 63 | } 64 | 65 | contentType := expfmt.Negotiate(req.Header) 66 | buf := getBuf() 67 | defer giveBuf(buf) 68 | writer, encoding := decorateWriter(req, buf) 69 | enc := expfmt.NewEncoder(writer, contentType) 70 | var errs prometheus.MultiError 71 | for _, mf := range mfs { 72 | if err := enc.Encode(mf); err != nil { 73 | errs = append(errs, err) 74 | slog.Error("Error encoding metric family", "name", mf.GetName(), "error", err) 75 | 76 | } 77 | } 78 | if closer, ok := writer.(io.Closer); ok { 79 | closer.Close() 80 | } 81 | if errs.MaybeUnwrap() != nil && buf.Len() == 0 { 82 | slog.Error("No metrics encoded", "error", errs) 83 | http.Error(w, noMetricsEncoded+", "+errs.Error(), http.StatusInternalServerError) 84 | return 85 | } 86 | header := w.Header() 87 | header.Set(contentTypeHeader, string(contentType)) 88 | header.Set(contentLengthHeader, strconv.Itoa(buf.Len())) 89 | if encoding != "" { 90 | header.Set(contentEncodingHeader, encoding) 91 | } 92 | _, _ = w.Write(buf.Bytes()) 93 | }) 94 | } 95 | 96 | func contextFor(req *http.Request, exporter sql_exporter.Exporter) (context.Context, context.CancelFunc) { 97 | timeout := time.Duration(0) 98 | configTimeout := time.Duration(exporter.Config().Globals.ScrapeTimeout) 99 | // If a timeout is provided in the Prometheus header, use it. 100 | if v := req.Header.Get(scrapeTimeoutHeader); v != "" { 101 | timeoutSeconds, err := strconv.ParseFloat(v, 64) 102 | if err != nil { 103 | switch { 104 | case errors.Is(err, strconv.ErrSyntax): 105 | slog.Error("Failed to parse timeout from Prometheus header", "error", err) 106 | case errors.Is(err, strconv.ErrRange): 107 | slog.Error(prometheusHeaderErr, "error", err) 108 | } 109 | } else { 110 | timeout = time.Duration(timeoutSeconds * float64(time.Second)) 111 | 112 | // Subtract the timeout offset, unless the result would be negative or zero. 113 | timeoutOffset := time.Duration(exporter.Config().Globals.TimeoutOffset) 114 | if timeoutOffset > timeout { 115 | slog.Error("global.scrape_timeout_offset is greater than Prometheus' scraping timeout, ignoring", "timeout", timeout, "timeoutOffset", timeoutOffset) 116 | } else { 117 | timeout -= timeoutOffset 118 | } 119 | } 120 | } 121 | 122 | // If the configured scrape timeout is more restrictive, use that instead. 123 | if configTimeout > 0 && (timeout <= 0 || configTimeout < timeout) { 124 | timeout = configTimeout 125 | } 126 | 127 | if timeout <= 0 { 128 | return context.Background(), func() {} 129 | } 130 | return context.WithTimeout(context.Background(), timeout) 131 | } 132 | -------------------------------------------------------------------------------- /cmd/sql_exporter/util.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "io" 7 | "net/http" 8 | "strings" 9 | "sync" 10 | ) 11 | 12 | var bufPool sync.Pool 13 | 14 | func getBuf() *bytes.Buffer { 15 | buf := bufPool.Get() 16 | if buf == nil { 17 | return &bytes.Buffer{} 18 | } 19 | return buf.(*bytes.Buffer) 20 | } 21 | 22 | func giveBuf(buf *bytes.Buffer) { 23 | buf.Reset() 24 | bufPool.Put(buf) 25 | } 26 | 27 | // decorateWriter wraps a writer to handle gzip compression if requested. It 28 | // returns the decorated writer and the appropriate "Content-Encoding" header 29 | // (which is empty if no compression is enabled). 30 | func decorateWriter(request *http.Request, writer io.Writer) (w io.Writer, encoding string) { 31 | header := request.Header.Get(acceptEncodingHeader) 32 | parts := strings.Split(header, ",") 33 | for _, part := range parts { 34 | part := strings.TrimSpace(part) 35 | if part == "gzip" || strings.HasPrefix(part, "gzip;") { 36 | return gzip.NewWriter(writer), "gzip" 37 | } 38 | } 39 | return writer, "" 40 | } 41 | 42 | // LogFunc is an adapter to allow the use of any function as a promhttp.Logger. If f is a function, LogFunc(f) is a 43 | // promhttp.Logger that calls f. 44 | type LogFunc func(args ...interface{}) 45 | 46 | // Println implements promhttp.Logger. 47 | func (log LogFunc) Println(args ...interface{}) { 48 | log(args) 49 | } 50 | 51 | // OfBool returns bool address. 52 | func OfBool(i bool) *bool { 53 | return &i 54 | } 55 | -------------------------------------------------------------------------------- /collector.go: -------------------------------------------------------------------------------- 1 | package sql_exporter 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | "log/slog" 8 | "sync" 9 | "time" 10 | 11 | "github.com/burningalchemist/sql_exporter/config" 12 | "github.com/burningalchemist/sql_exporter/errors" 13 | dto "github.com/prometheus/client_model/go" 14 | ) 15 | 16 | // Collector is a self-contained group of SQL queries and metric families to collect from a specific database. It is 17 | // conceptually similar to a prometheus.Collector. 18 | type Collector interface { 19 | // Collect is the equivalent of prometheus.Collector.Collect() but takes a context to run in and a database to run on. 20 | Collect(context.Context, *sql.DB, chan<- Metric) 21 | } 22 | 23 | // collector implements Collector. It wraps a collection of queries, metrics and the database to collect them from. 24 | type collector struct { 25 | config *config.CollectorConfig 26 | queries []*Query 27 | logContext string 28 | } 29 | 30 | // NewCollector returns a new Collector with the given configuration and database. The metrics it creates will all have 31 | // the provided const labels applied. 32 | func NewCollector(logContext string, cc *config.CollectorConfig, constLabels []*dto.LabelPair) (Collector, errors.WithContext) { 33 | logContext = TrimMissingCtx(fmt.Sprintf(`%s,collector=%s`, logContext, cc.Name)) 34 | 35 | // Maps each query to the list of metric families it populates. 36 | queryMFs := make(map[*config.QueryConfig][]*MetricFamily, len(cc.Metrics)) 37 | 38 | // Instantiate metric families. 39 | for _, mc := range cc.Metrics { 40 | mf, err := NewMetricFamily(logContext, mc, constLabels) 41 | if err != nil { 42 | return nil, err 43 | } 44 | mfs, found := queryMFs[mc.Query()] 45 | if !found { 46 | mfs = make([]*MetricFamily, 0, 2) 47 | } 48 | queryMFs[mc.Query()] = append(mfs, mf) 49 | } 50 | 51 | // Instantiate queries. 52 | queries := make([]*Query, 0, len(cc.Metrics)) 53 | for qc, mfs := range queryMFs { 54 | q, err := NewQuery(logContext, qc, mfs...) 55 | if err != nil { 56 | return nil, err 57 | } 58 | queries = append(queries, q) 59 | } 60 | 61 | c := collector{ 62 | config: cc, 63 | queries: queries, 64 | logContext: logContext, 65 | } 66 | if c.config.MinInterval > 0 { 67 | slog.Warn("Non-zero min_interval, using cached collector.", "logContext", logContext, "min_interval", c.config.MinInterval) 68 | return newCachingCollector(&c), nil 69 | } 70 | return &c, nil 71 | } 72 | 73 | // Collect implements Collector. 74 | func (c *collector) Collect(ctx context.Context, conn *sql.DB, ch chan<- Metric) { 75 | var wg sync.WaitGroup 76 | wg.Add(len(c.queries)) 77 | for _, q := range c.queries { 78 | go func(q *Query) { 79 | defer wg.Done() 80 | q.Collect(ctx, conn, ch) 81 | }(q) 82 | } 83 | // Only return once all queries have been processed 84 | wg.Wait() 85 | } 86 | 87 | // newCachingCollector returns a new Collector wrapping the provided raw Collector. 88 | func newCachingCollector(rawColl *collector) Collector { 89 | cc := &cachingCollector{ 90 | rawColl: rawColl, 91 | minInterval: time.Duration(rawColl.config.MinInterval), 92 | cacheSem: make(chan time.Time, 1), 93 | } 94 | cc.cacheSem <- time.Time{} 95 | return cc 96 | } 97 | 98 | // Collector with a cache for collected metrics. Only used when min_interval is non-zero. 99 | type cachingCollector struct { 100 | // Underlying collector, which is being cached. 101 | rawColl *collector 102 | // Convenience copy of rawColl.config.MinInterval. 103 | minInterval time.Duration 104 | 105 | // Used as a non=blocking semaphore protecting the cache. The value in the channel is the time of the cached metrics. 106 | cacheSem chan time.Time 107 | // Metrics saved from the last Collect() call. 108 | cache []Metric 109 | } 110 | 111 | // Collect implements Collector. 112 | func (cc *cachingCollector) Collect(ctx context.Context, conn *sql.DB, ch chan<- Metric) { 113 | if ctx.Err() != nil { 114 | ch <- NewInvalidMetric(errors.Wrap(cc.rawColl.logContext, ctx.Err())) 115 | return 116 | } 117 | slog.Debug("Cache size", "length", len(cc.cache)) 118 | collTime := time.Now() 119 | select { 120 | case cacheTime := <-cc.cacheSem: 121 | // Have the lock. 122 | if age := collTime.Sub(cacheTime); age > cc.minInterval || len(cc.cache) == 0 { 123 | // Cache contents are older than minInterval, collect fresh metrics, cache them and pipe them through. 124 | slog.Debug("Collecting fresh metrics", "logContext", cc.rawColl.logContext, "min_interval", cc.minInterval.Seconds(), "cache_age", age.Seconds()) 125 | cacheChan := make(chan Metric, capMetricChan) 126 | cc.cache = make([]Metric, 0, len(cc.cache)) 127 | go func() { 128 | cc.rawColl.Collect(ctx, conn, cacheChan) 129 | close(cacheChan) 130 | }() 131 | for metric := range cacheChan { 132 | // catch invalid metrics and return them immediately, don't cache them 133 | if ctx.Err() != nil { 134 | slog.Debug("Context closed, returning invalid metric", "logContext", cc.rawColl.logContext) 135 | ch <- NewInvalidMetric(errors.Wrap(cc.rawColl.logContext, ctx.Err())) 136 | continue 137 | } 138 | 139 | cc.cache = append(cc.cache, metric) 140 | ch <- metric 141 | } 142 | cacheTime = collTime 143 | } else { 144 | slog.Debug("Returning cached metrics", "logContext", cc.rawColl.logContext, "min_interval", cc.minInterval.Seconds(), "cache_age", age.Seconds()) 145 | for _, metric := range cc.cache { 146 | ch <- metric 147 | } 148 | } 149 | // Always replace the value in the semaphore channel. 150 | cc.cacheSem <- cacheTime 151 | 152 | case <-ctx.Done(): 153 | // Context closed, record an error and return 154 | ch <- NewInvalidMetric(errors.Wrap(cc.rawColl.logContext, ctx.Err())) 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /config/collector_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/prometheus/common/model" 7 | ) 8 | 9 | // 10 | // Collectors 11 | // 12 | 13 | // CollectorConfig defines a set of metrics and how they are collected. 14 | type CollectorConfig struct { 15 | Name string `yaml:"collector_name"` // name of this collector 16 | MinInterval model.Duration `yaml:"min_interval,omitempty"` // minimum interval between query executions 17 | Metrics []*MetricConfig `yaml:"metrics"` // metrics/queries defined by this collector 18 | Queries []*QueryConfig `yaml:"queries,omitempty"` // named queries defined by this collector 19 | 20 | // Catches all undefined fields and must be empty after parsing. 21 | XXX map[string]any `yaml:",inline" json:"-"` 22 | } 23 | 24 | // UnmarshalYAML implements the yaml.Unmarshaler interface for CollectorConfig. 25 | func (c *CollectorConfig) UnmarshalYAML(unmarshal func(any) error) error { 26 | // Default to undefined (a negative value) so it can be overridden by the global default when not explicitly set. 27 | c.MinInterval = -1 28 | 29 | type plain CollectorConfig 30 | if err := unmarshal((*plain)(c)); err != nil { 31 | return err 32 | } 33 | 34 | if len(c.Metrics) == 0 { 35 | return fmt.Errorf("no metrics defined for collector %q", c.Name) 36 | } 37 | 38 | // Set metric.query for all metrics: resolve query references (if any) and generate QueryConfigs for literal queries. 39 | queries := make(map[string]*QueryConfig, len(c.Queries)) 40 | for _, query := range c.Queries { 41 | queries[query.Name] = query 42 | } 43 | for _, metric := range c.Metrics { 44 | if metric.QueryRef != "" { 45 | query, found := queries[metric.QueryRef] 46 | if !found { 47 | return fmt.Errorf("unresolved query_ref %q in metric %q of collector %q", metric.QueryRef, metric.Name, c.Name) 48 | } 49 | metric.query = query 50 | query.metrics = append(query.metrics, metric) 51 | } else { 52 | // For literal queries generate a QueryConfig with a name based off collector and metric name. 53 | metric.query = &QueryConfig{ 54 | Name: metric.Name, 55 | Query: metric.QueryLiteral, 56 | NoPreparedStatement: metric.NoPreparedStatement, 57 | } 58 | } 59 | } 60 | 61 | return checkOverflow(c.XXX, "collector") 62 | } 63 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/sethvargo/go-envconfig" 11 | "gopkg.in/yaml.v3" 12 | ) 13 | 14 | // MaxInt32 defines the maximum value of allowed integers 15 | // and serves to help us avoid overflow/wraparound issues. 16 | const MaxInt32 int = 1<<31 - 1 17 | 18 | // EnvPrefix is the prefix for environment variables. 19 | const ( 20 | EnvPrefix string = "SQLEXPORTER_" 21 | 22 | EnvConfigFile string = EnvPrefix + "CONFIG" 23 | EnvDebug string = EnvPrefix + "DEBUG" 24 | ) 25 | 26 | var ( 27 | EnablePing bool 28 | IgnoreMissingVals bool 29 | DsnOverride string 30 | TargetLabel string 31 | ) 32 | 33 | // Load attempts to parse the given config file and return a Config object. 34 | func Load(configFile string) (*Config, error) { 35 | slog.Debug("Loading configuration", "file", configFile) 36 | buf, err := os.ReadFile(configFile) 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | c := Config{configFile: configFile} 42 | err = yaml.Unmarshal(buf, &c) 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | if c.Globals == nil { 48 | return nil, fmt.Errorf("empty or no configuration provided") 49 | } 50 | 51 | return &c, nil 52 | } 53 | 54 | // 55 | // Top-level config 56 | // 57 | 58 | // Config is a collection of jobs and collectors. 59 | type Config struct { 60 | Globals *GlobalConfig `yaml:"global,omitempty" env:", prefix=GLOBAL_"` 61 | CollectorFiles []string `yaml:"collector_files,omitempty" env:"COLLECTOR_FILES"` 62 | Target *TargetConfig `yaml:"target,omitempty" env:", prefix=TARGET_"` 63 | Jobs []*JobConfig `yaml:"jobs,omitempty"` 64 | Collectors []*CollectorConfig `yaml:"collectors,omitempty"` 65 | 66 | configFile string 67 | 68 | // Catches all undefined fields and must be empty after parsing. 69 | XXX map[string]any `yaml:",inline" json:"-"` 70 | } 71 | 72 | // UnmarshalYAML implements the yaml.Unmarshaler interface for Config. 73 | func (c *Config) UnmarshalYAML(unmarshal func(any) error) error { 74 | // unmarshalConfig does the actual unmarshalling 75 | if err := c.unmarshalConfig(unmarshal); err != nil { 76 | return err 77 | } 78 | // Populate global defaults. 79 | if err := c.populateGlobalDefaults(); err != nil { 80 | return err 81 | } 82 | 83 | // Load any externally defined collectors. 84 | if err := c.loadCollectorFiles(); err != nil { 85 | return err 86 | } 87 | 88 | // Process environment variables. 89 | if err := c.processEnvConfig(); err != nil { 90 | return err 91 | } 92 | 93 | // Check required fields 94 | if err := c.checkRequiredFields(); err != nil { 95 | return err 96 | } 97 | 98 | // Populate collector references for the target/jobs. 99 | if err := c.populateCollectorReferences(); err != nil { 100 | return err 101 | } 102 | 103 | return checkOverflow(c.XXX, "config") 104 | } 105 | 106 | // unmarshalConfig unmarshals the config, but does not populate global defaults, process environment variables, or check required fields. 107 | func (c *Config) unmarshalConfig(unmarshal func(any) error) error { 108 | type plain Config 109 | return unmarshal((*plain)(c)) 110 | } 111 | 112 | // populateGlobalDefaults populates any unset global defaults. 113 | func (c *Config) populateGlobalDefaults() error { 114 | if c.Globals == nil { 115 | c.Globals = &GlobalConfig{} 116 | // Force a dummy unmarshall to populate global defaults 117 | return c.Globals.UnmarshalYAML(func(any) error { return nil }) 118 | } 119 | return nil 120 | } 121 | 122 | // processEnvConfig processes environment variables. 123 | func (c *Config) processEnvConfig() error { 124 | return envconfig.ProcessWith(context.Background(), &envconfig.Config{ 125 | Target: c, 126 | Lookuper: envconfig.PrefixLookuper(EnvPrefix, envconfig.OsLookuper()), 127 | DefaultNoInit: true, 128 | DefaultOverwrite: true, 129 | DefaultDelimiter: ";", 130 | }) 131 | } 132 | 133 | // checkRequiredFields checks that all required fields are present. 134 | func (c *Config) checkRequiredFields() error { 135 | if (len(c.Jobs) == 0) == (c.Target == nil) { 136 | return fmt.Errorf("exactly one of `jobs` and `target` must be defined") 137 | } 138 | return nil 139 | } 140 | 141 | // populateCollectorReferences populates collector references for the target/jobs. 142 | func (c *Config) populateCollectorReferences() error { 143 | colls := make(map[string]*CollectorConfig) 144 | for _, coll := range c.Collectors { 145 | if coll.MinInterval < 0 { 146 | coll.MinInterval = c.Globals.MinInterval 147 | } 148 | if _, found := colls[coll.Name]; found { 149 | return fmt.Errorf("duplicate collector name: %s", coll.Name) 150 | } 151 | colls[coll.Name] = coll 152 | } 153 | 154 | if c.Target != nil { 155 | cs, err := resolveCollectorRefs(c.Target.CollectorRefs, colls, "target") 156 | if err != nil { 157 | return err 158 | } 159 | c.Target.collectors = cs 160 | } 161 | 162 | for _, j := range c.Jobs { 163 | cs, err := resolveCollectorRefs(j.CollectorRefs, colls, fmt.Sprintf("job %q", j.Name)) 164 | if err != nil { 165 | return err 166 | } 167 | j.collectors = cs 168 | } 169 | return nil 170 | } 171 | 172 | // YAML marshals the config into YAML format. 173 | func (c *Config) YAML() ([]byte, error) { 174 | return yaml.Marshal(c) 175 | } 176 | 177 | // loadCollectorFiles resolves all collector file globs to files and loads the collectors they define. 178 | func (c *Config) loadCollectorFiles() error { 179 | baseDir := filepath.Dir(c.configFile) 180 | for _, cfglob := range c.CollectorFiles { 181 | // Resolve relative paths by joining them to the configuration file's directory. 182 | if len(cfglob) > 0 && !filepath.IsAbs(cfglob) { 183 | cfglob = filepath.Join(baseDir, cfglob) 184 | } 185 | 186 | // Resolve the glob to actual filenames. 187 | cfs, err := filepath.Glob(cfglob) 188 | slog.Debug("External collector files found", "count", len(cfs), "glob", cfglob) 189 | if err != nil { 190 | // The only error can be a bad pattern. 191 | return fmt.Errorf("error resolving collector files for %s: %w", cfglob, err) 192 | } 193 | 194 | // And load the CollectorConfig defined in each file. 195 | for _, cf := range cfs { 196 | buf, err := os.ReadFile(cf) 197 | if err != nil { 198 | return err 199 | } 200 | 201 | cc := CollectorConfig{} 202 | err = yaml.Unmarshal(buf, &cc) 203 | if err != nil { 204 | return err 205 | } 206 | 207 | c.Collectors = append(c.Collectors, &cc) 208 | slog.Debug("Loaded collector", "name", cc.Name, "file", cf) 209 | } 210 | } 211 | 212 | return nil 213 | } 214 | -------------------------------------------------------------------------------- /config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestResolveCollectorRefs(t *testing.T) { 9 | colls := map[string]*CollectorConfig{ 10 | "a": {Name: "a"}, 11 | "b": {Name: "b"}, 12 | "c": {Name: "b"}, 13 | "aa": {Name: "aa"}, 14 | } 15 | 16 | t.Run("NoGlobbing", func(t *testing.T) { 17 | crefs := []string{ 18 | "a", 19 | "b", 20 | } 21 | cs, err := resolveCollectorRefs(crefs, colls, "target") 22 | if err != nil { 23 | t.Fatalf("expected no error but got: %v", err) 24 | } 25 | if len(cs) != 2 { 26 | t.Fatalf("expected len(cs)=2 but got len(cs)=%d", len(cs)) 27 | } 28 | expected := []*CollectorConfig{ 29 | colls["a"], 30 | colls["b"], 31 | } 32 | if !reflect.DeepEqual(cs, expected) { 33 | t.Fatalf("expected cs=%v but got cs=%v", expected, cs) 34 | } 35 | }) 36 | 37 | t.Run("Globbing", func(t *testing.T) { 38 | crefs := []string{ 39 | "a*", 40 | "b", 41 | } 42 | cs, err := resolveCollectorRefs(crefs, colls, "target") 43 | if err != nil { 44 | t.Fatalf("expected no error but got: %v", err) 45 | } 46 | if len(cs) != 3 { 47 | t.Fatalf("expected len(cs)=3 but got len(cs)=%d", len(cs)) 48 | } 49 | expected1 := []*CollectorConfig{ 50 | colls["a"], 51 | colls["aa"], 52 | colls["b"], 53 | } 54 | expected2 := []*CollectorConfig{ // filepath.Match() is non-deterministic 55 | colls["aa"], 56 | colls["a"], 57 | colls["b"], 58 | } 59 | if !reflect.DeepEqual(cs, expected1) && !reflect.DeepEqual(cs, expected2) { 60 | t.Fatalf("expected cs=%v or cs=%v but got cs=%v", expected1, expected2, cs) 61 | } 62 | }) 63 | 64 | t.Run("NoCollectorRefs", func(t *testing.T) { 65 | crefs := []string{} 66 | cs, err := resolveCollectorRefs(crefs, colls, "target") 67 | if err != nil { 68 | t.Fatalf("expected no error but got: %v", err) 69 | } 70 | if len(cs) != 0 { 71 | t.Fatalf("expected len(cs)=0 but got len(cs)=%d", len(cs)) 72 | } 73 | }) 74 | 75 | t.Run("UnknownCollector", func(t *testing.T) { 76 | crefs := []string{ 77 | "a", 78 | "x", 79 | } 80 | _, err := resolveCollectorRefs(crefs, colls, "target") 81 | if err == nil { 82 | t.Fatalf("expected error but got none") 83 | } 84 | // TODO: Code should use error types and check with 'errors.Is(err1, err2)'. 85 | expected := "unknown collector \"x\" referenced in target" 86 | if err.Error() != expected { 87 | t.Fatalf("expected err=%q but got err=%q", expected, err.Error()) 88 | } 89 | }) 90 | } 91 | -------------------------------------------------------------------------------- /config/global_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/prometheus/common/model" 8 | ) 9 | 10 | // GlobalConfig contains globally applicable defaults. 11 | type GlobalConfig struct { 12 | MinInterval model.Duration `yaml:"min_interval" env:"MIN_INTERVAL"` // minimum interval between query executions, default is 0 13 | ScrapeTimeout model.Duration `yaml:"scrape_timeout" env:"SCRAPE_TIMEOUT"` // per-scrape timeout, global 14 | TimeoutOffset model.Duration `yaml:"scrape_timeout_offset" env:"SCRAPE_TIMEOUT_OFFSET"` // offset to subtract from timeout in seconds 15 | ScrapeErrorDropInterval model.Duration `yaml:"scrape_error_drop_interval" env:"SCRAPE_ERROR_DROP_INTERVAL"` // interval to drop scrape errors from the error counter, default is 0 16 | MaxConnLifetime time.Duration `yaml:"max_connection_lifetime" env:"MAX_CONNECTION_LIFETIME"` // maximum amount of time a connection may be reused to any one target 17 | 18 | MaxConns int `yaml:"max_connections" env:"MAX_CONNECTIONS"` // maximum number of open connections to any one target 19 | MaxIdleConns int `yaml:"max_idle_connections" env:"MAX_IDLE_CONNECTIONS"` // maximum number of idle connections to any one target 20 | 21 | // Catches all undefined fields and must be empty after parsing. 22 | XXX map[string]any `yaml:",inline" json:"-"` 23 | } 24 | 25 | // UnmarshalYAML implements the yaml.Unmarshaler interface for GlobalConfig. 26 | func (g *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error { 27 | // Default to running the queries on every scrape. 28 | g.MinInterval = model.Duration(0) 29 | // Default to 10 seconds, since Prometheus has a 10 second scrape timeout default. 30 | g.ScrapeTimeout = model.Duration(10 * time.Second) 31 | // Default to 0 for scrape error drop interval. 32 | g.ScrapeErrorDropInterval = model.Duration(0) 33 | // Default to .5 seconds. 34 | g.TimeoutOffset = model.Duration(500 * time.Millisecond) 35 | g.MaxConns = 3 36 | g.MaxIdleConns = 3 37 | g.MaxConnLifetime = time.Duration(0) 38 | 39 | type plain GlobalConfig 40 | if err := unmarshal((*plain)(g)); err != nil { 41 | return err 42 | } 43 | 44 | if g.TimeoutOffset <= 0 { 45 | return fmt.Errorf("global.scrape_timeout_offset must be strictly positive, have %s", g.TimeoutOffset) 46 | } 47 | 48 | return checkOverflow(g.XXX, "global") 49 | } 50 | -------------------------------------------------------------------------------- /config/job_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "fmt" 4 | 5 | // 6 | // Jobs 7 | // 8 | 9 | // JobConfig defines a set of collectors to be executed on a set of targets. 10 | type JobConfig struct { 11 | Name string `yaml:"job_name"` // name of this job 12 | CollectorRefs []string `yaml:"collectors"` // names of collectors to apply to all targets in this job 13 | StaticConfigs []*StaticConfig `yaml:"static_configs"` // collections of statically defined targets 14 | 15 | collectors []*CollectorConfig // resolved collector references 16 | 17 | EnablePing *bool `yaml:"enable_ping,omitempty"` // ping the target before executing the collectors 18 | 19 | // Catches all undefined fields and must be empty after parsing. 20 | XXX map[string]any `yaml:",inline" json:"-"` 21 | } 22 | 23 | // Collectors returns the collectors referenced by the job, resolved. 24 | func (j *JobConfig) Collectors() []*CollectorConfig { 25 | return j.collectors 26 | } 27 | 28 | // UnmarshalYAML implements the yaml.Unmarshaler interface for JobConfig. 29 | func (j *JobConfig) UnmarshalYAML(unmarshal func(any) error) error { 30 | type plain JobConfig 31 | if err := unmarshal((*plain)(j)); err != nil { 32 | return err 33 | } 34 | 35 | // Check required fields 36 | if j.Name == "" { 37 | return fmt.Errorf("missing name for job %+v", j) 38 | } 39 | if err := checkCollectorRefs(j.CollectorRefs, fmt.Sprintf("job %q", j.Name)); err != nil { 40 | return err 41 | } 42 | 43 | if len(j.StaticConfigs) == 0 { 44 | return fmt.Errorf("no targets defined for job %q", j.Name) 45 | } 46 | 47 | return checkOverflow(j.XXX, "job") 48 | } 49 | 50 | // checkLabelCollisions checks for label collisions between StaticConfig labels and Metric labels. 51 | // 52 | //lint:ignore U1000 - it's unused so far 53 | func (j *JobConfig) checkLabelCollisions() error { 54 | sclabels := make(map[string]any) 55 | for _, s := range j.StaticConfigs { 56 | for _, l := range s.Labels { 57 | sclabels[l] = nil 58 | } 59 | } 60 | 61 | for _, c := range j.collectors { 62 | for _, m := range c.Metrics { 63 | for _, l := range m.KeyLabels { 64 | if _, ok := sclabels[l]; ok { 65 | return fmt.Errorf( 66 | "label collision in job %q: label %q is defined both by a static_config and by metric %q of collector %q", 67 | j.Name, l, m.Name, c.Name) 68 | } 69 | } 70 | } 71 | } 72 | return nil 73 | } 74 | 75 | // StaticConfig defines a set of targets and optional labels to apply to the metrics collected from them. 76 | type StaticConfig struct { 77 | Targets map[string]Secret `yaml:"targets"` // map of target names to data source names 78 | Labels map[string]string `yaml:"labels,omitempty"` // labels to apply to all metrics collected from the targets 79 | 80 | // Catches all undefined fields and must be empty after parsing. 81 | XXX map[string]any `yaml:",inline" json:"-"` 82 | } 83 | 84 | // UnmarshalYAML implements the yaml.Unmarshaler interface for StaticConfig. 85 | func (s *StaticConfig) UnmarshalYAML(unmarshal func(any) error) error { 86 | type plain StaticConfig 87 | if err := unmarshal((*plain)(s)); err != nil { 88 | return err 89 | } 90 | 91 | // Check for empty/duplicate target names/data source names 92 | tnames := make(map[string]any) 93 | dsns := make(map[string]any) 94 | for tname, dsn := range s.Targets { 95 | if tname == "" { 96 | return fmt.Errorf("empty target name in static config %+v", s) 97 | } 98 | if _, ok := tnames[tname]; ok { 99 | return fmt.Errorf("duplicate target name %q in static_config %+v", tname, s) 100 | } 101 | tnames[tname] = nil 102 | if dsn == "" { 103 | return fmt.Errorf("empty data source name in static config %+v", s) 104 | } 105 | if _, ok := dsns[string(dsn)]; ok { 106 | return fmt.Errorf("duplicate data source name %q in static_config %+v", tname, s) 107 | } 108 | dsns[string(dsn)] = nil 109 | } 110 | 111 | return checkOverflow(s.XXX, "static_config") 112 | } 113 | -------------------------------------------------------------------------------- /config/metric_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/prometheus/client_golang/prometheus" 8 | ) 9 | 10 | // MetricConfig defines a Prometheus metric, the SQL query to populate it and the mapping of columns to metric 11 | // keys/values. 12 | type MetricConfig struct { 13 | Name string `yaml:"metric_name"` // the Prometheus metric name 14 | TypeString string `yaml:"type"` // the Prometheus metric type 15 | Help string `yaml:"help"` // the Prometheus metric help text 16 | KeyLabels []string `yaml:"key_labels,omitempty"` // expose these columns as labels from SQL 17 | StaticLabels map[string]string `yaml:"static_labels,omitempty"` // fixed key/value pairs as static labels 18 | ValueLabel string `yaml:"value_label,omitempty"` // with multiple value columns, map their names under this label 19 | Values []string `yaml:"values"` // expose each of these columns as a value, keyed by column name 20 | QueryLiteral string `yaml:"query,omitempty"` // a literal query 21 | QueryRef string `yaml:"query_ref,omitempty"` // references a query in the query map 22 | 23 | NoPreparedStatement bool `yaml:"no_prepared_statement,omitempty"` // do not prepare statement 24 | StaticValue *float64 `yaml:"static_value,omitempty"` 25 | TimestampValue string `yaml:"timestamp_value,omitempty"` // optional column name containing a valid timestamp value 26 | 27 | valueType prometheus.ValueType // TypeString converted to prometheus.ValueType 28 | query *QueryConfig // QueryConfig resolved from QueryRef or generated from Query 29 | 30 | // Catches all undefined fields and must be empty after parsing. 31 | XXX map[string]any `yaml:",inline" json:"-"` 32 | } 33 | 34 | // ValueType returns the metric type, converted to a prometheus.ValueType. 35 | func (m *MetricConfig) ValueType() prometheus.ValueType { 36 | return m.valueType 37 | } 38 | 39 | // Query returns the query defined (as a literal) or referenced by the metric. 40 | func (m *MetricConfig) Query() *QueryConfig { 41 | return m.query 42 | } 43 | 44 | // UnmarshalYAML implements the yaml.Unmarshaler interface for MetricConfig. 45 | func (m *MetricConfig) UnmarshalYAML(unmarshal func(any) error) error { 46 | type plain MetricConfig 47 | if err := unmarshal((*plain)(m)); err != nil { 48 | return err 49 | } 50 | 51 | if err := m.validateRequiredFields(); err != nil { 52 | return err 53 | } 54 | if err := m.setValueType(); err != nil { 55 | return err 56 | } 57 | if err := m.validateKeyLabels(); err != nil { 58 | return err 59 | } 60 | if err := m.validateValues(); err != nil { 61 | return err 62 | } 63 | 64 | return checkOverflow(m.XXX, "metric") 65 | } 66 | 67 | // Check required fields 68 | func (m *MetricConfig) validateRequiredFields() error { 69 | if m.Name == "" { 70 | return fmt.Errorf("missing name for metric %+v", m) 71 | } 72 | if m.TypeString == "" { 73 | return fmt.Errorf("missing type for metric %q", m.Name) 74 | } 75 | if m.Help == "" { 76 | return fmt.Errorf("missing help for metric %q", m.Name) 77 | } 78 | if (m.QueryLiteral == "") == (m.QueryRef == "") { 79 | return fmt.Errorf("exactly one of query and query_ref must be specified for metric %q", m.Name) 80 | } 81 | 82 | return nil 83 | } 84 | 85 | // Set the metric type 86 | func (m *MetricConfig) setValueType() error { 87 | switch strings.ToLower(m.TypeString) { 88 | case "counter": 89 | m.valueType = prometheus.CounterValue 90 | case "gauge": 91 | m.valueType = prometheus.GaugeValue 92 | default: 93 | return fmt.Errorf("unsupported metric type: %s", m.TypeString) 94 | } 95 | 96 | return nil 97 | } 98 | 99 | // Check for duplicate key labels 100 | func (m *MetricConfig) validateKeyLabels() error { 101 | for i, li := range m.KeyLabels { 102 | if err := checkLabel(li, "metric", m.Name); err != nil { 103 | return err 104 | } 105 | for _, lj := range m.KeyLabels[i+1:] { 106 | if li == lj { 107 | return fmt.Errorf("duplicate key label %q for metric %q", li, m.Name) 108 | } 109 | } 110 | if m.ValueLabel == li { 111 | return fmt.Errorf("duplicate label %q (defined in both key_labels and value_label) for metric %q", li, m.Name) 112 | } 113 | } 114 | 115 | return nil 116 | } 117 | 118 | // Check for duplicate values 119 | func (m *MetricConfig) validateValues() error { 120 | if len(m.Values) == 0 && m.StaticValue == nil { 121 | return fmt.Errorf("no values defined for metric %q", m.Name) 122 | } 123 | 124 | if len(m.Values) > 0 && m.StaticValue != nil { 125 | return fmt.Errorf("metric %q cannot have both static_value and values defined", m.Name) 126 | } 127 | 128 | if len(m.Values) > 1 { 129 | // Multiple value columns but no value label to identify them 130 | if m.ValueLabel == "" { 131 | return fmt.Errorf("value_label must be defined for metric with multiple values %q", m.Name) 132 | } 133 | if err := checkLabel(m.ValueLabel, "value_label for metric", m.Name); err != nil { 134 | return err 135 | } 136 | } 137 | 138 | return nil 139 | } 140 | -------------------------------------------------------------------------------- /config/query_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "fmt" 4 | 5 | // QueryConfig defines a named query, to be referenced by one or multiple metrics. 6 | type QueryConfig struct { 7 | Name string `yaml:"query_name"` // the query name, to be referenced via `query_ref` 8 | Query string `yaml:"query"` // the named query 9 | 10 | NoPreparedStatement bool `yaml:"no_prepared_statement,omitempty"` // do not prepare statement 11 | 12 | metrics []*MetricConfig // metrics referencing this query 13 | 14 | // Catches all undefined fields and must be empty after parsing. 15 | XXX map[string]any `yaml:",inline" json:"-"` 16 | } 17 | 18 | // UnmarshalYAML implements the yaml.Unmarshaler interface for QueryConfig. 19 | func (q *QueryConfig) UnmarshalYAML(unmarshal func(any) error) error { 20 | type plain QueryConfig 21 | if err := unmarshal((*plain)(q)); err != nil { 22 | return err 23 | } 24 | 25 | // Check required fields 26 | if q.Name == "" { 27 | return fmt.Errorf("missing name for query %+v", *q) 28 | } 29 | if q.Query == "" { 30 | return fmt.Errorf("missing query literal for query %q", q.Name) 31 | } 32 | 33 | q.metrics = make([]*MetricConfig, 0, 2) 34 | 35 | return checkOverflow(q.XXX, "metric") 36 | } 37 | -------------------------------------------------------------------------------- /config/secret_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | // Secret special type for storing secrets. 4 | type Secret string 5 | 6 | // UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. 7 | func (s *Secret) UnmarshalYAML(unmarshal func(any) error) error { 8 | type plain Secret 9 | return unmarshal((*plain)(s)) 10 | } 11 | 12 | // MarshalYAML implements the yaml.Marshaler interface for Secrets. 13 | func (s Secret) MarshalYAML() (any, error) { 14 | if s != "" { 15 | return "", nil 16 | } 17 | return nil, nil 18 | } 19 | -------------------------------------------------------------------------------- /config/target_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "log/slog" 8 | "os" 9 | 10 | "github.com/aws/aws-sdk-go-v2/aws" 11 | awsConfig "github.com/aws/aws-sdk-go-v2/config" 12 | "github.com/aws/aws-sdk-go-v2/service/secretsmanager" 13 | ) 14 | 15 | // 16 | // Target 17 | // 18 | 19 | // TargetConfig defines a DSN and a set of collectors to be executed on it. 20 | type TargetConfig struct { 21 | Name string `yaml:"name,omitempty" env:"NAME"` // name of the target 22 | DSN Secret `yaml:"data_source_name" env:"DSN"` // data source name to connect to 23 | AwsSecretName string `yaml:"aws_secret_name" env:"AWS_SECRET_NAME"` // AWS secret name 24 | CollectorRefs []string `yaml:"collectors" env:"COLLECTORS"` // names of collectors to execute on the target 25 | EnablePing *bool `yaml:"enable_ping,omitempty" env:"ENABLE_PING"` // ping the target before executing the collectors 26 | 27 | collectors []*CollectorConfig // resolved collector references 28 | 29 | // Catches all undefined fields and must be empty after parsing. 30 | XXX map[string]any `yaml:",inline" json:"-"` 31 | } 32 | 33 | // Collectors returns the collectors referenced by the target, resolved. 34 | func (t *TargetConfig) Collectors() []*CollectorConfig { 35 | return t.collectors 36 | } 37 | 38 | // UnmarshalYAML implements the yaml.Unmarshaler interface for TargetConfig. 39 | func (t *TargetConfig) UnmarshalYAML(unmarshal func(any) error) error { 40 | type plain TargetConfig 41 | if err := unmarshal((*plain)(t)); err != nil { 42 | return err 43 | } 44 | 45 | if t.AwsSecretName != "" { 46 | t.DSN = readDSNFromAwsSecretManager(t.AwsSecretName) 47 | } 48 | 49 | // Check required fields 50 | if t.DSN == "" { 51 | return fmt.Errorf("missing data_source_name for target %+v", t) 52 | } 53 | if err := checkCollectorRefs(t.CollectorRefs, "target"); err != nil { 54 | return err 55 | } 56 | 57 | return checkOverflow(t.XXX, "target") 58 | } 59 | 60 | // AWS Secret 61 | type AwsSecret struct { 62 | DSN Secret `json:"data_source_name"` 63 | } 64 | 65 | func readDSNFromAwsSecretManager(secretName string) Secret { 66 | config, err := awsConfig.LoadDefaultConfig(context.TODO(), awsConfig.WithEC2IMDSRegion()) 67 | if err != nil { 68 | slog.Error("unable to load AWS config", "error", err) 69 | os.Exit(1) 70 | } 71 | 72 | // Create Secrets Manager client 73 | svc := secretsmanager.NewFromConfig(config) 74 | 75 | input := &secretsmanager.GetSecretValueInput{ 76 | SecretId: aws.String(secretName), 77 | VersionStage: aws.String("AWSCURRENT"), // VersionStage defaults to AWSCURRENT if unspecified 78 | } 79 | 80 | slog.Debug("reading AWS Secret", "name", secretName) 81 | result, err := svc.GetSecretValue(context.TODO(), input) 82 | if err != nil { 83 | // For a list of exceptions thrown, see 84 | // https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html 85 | slog.Error("unable to read AWS Secret", "error", err) 86 | os.Exit(1) 87 | } 88 | 89 | // Decrypts secret using the associated KMS key. 90 | var secretString string = *result.SecretString 91 | 92 | var awsSecret AwsSecret 93 | jsonErr := json.Unmarshal([]byte(secretString), &awsSecret) 94 | 95 | if jsonErr != nil { 96 | slog.Error("unable to unmarshal AWS Secret") 97 | os.Exit(1) 98 | } 99 | return Secret(awsSecret.DSN) 100 | } 101 | -------------------------------------------------------------------------------- /config/util.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "path/filepath" 7 | "strings" 8 | ) 9 | 10 | func checkCollectorRefs(collectorRefs []string, ctx string) error { 11 | // At least one collector, no duplicates 12 | if len(collectorRefs) == 0 { 13 | return fmt.Errorf("no collectors defined for %s", ctx) 14 | } 15 | for i, ci := range collectorRefs { 16 | for _, cj := range collectorRefs[i+1:] { 17 | if ci == cj { 18 | return fmt.Errorf("duplicate collector reference %q in %s", ci, ctx) 19 | } 20 | } 21 | } 22 | return nil 23 | } 24 | 25 | func resolveCollectorRefs( 26 | collectorRefs []string, collectors map[string]*CollectorConfig, ctx string, 27 | ) ([]*CollectorConfig, error) { 28 | resolved := make([]*CollectorConfig, 0, len(collectorRefs)) 29 | found := make(map[*CollectorConfig]bool) 30 | for _, cref := range collectorRefs { 31 | cref_resolved := false 32 | for k, c := range collectors { 33 | matched, err := filepath.Match(cref, k) 34 | if err != nil { 35 | return nil, fmt.Errorf("bad collector %q referenced in %s: %w", cref, ctx, err) 36 | } 37 | if matched && !found[c] { 38 | resolved = append(resolved, c) 39 | found[c] = true 40 | cref_resolved = true 41 | } 42 | } 43 | if !cref_resolved { 44 | return nil, fmt.Errorf("unknown collector %q referenced in %s", cref, ctx) 45 | } 46 | } 47 | slog.Debug("Resolved collectors", "context", ctx, "count", len(resolved)) 48 | return resolved, nil 49 | } 50 | 51 | func checkLabel(label string, ctx ...string) error { 52 | if label == "" { 53 | return fmt.Errorf("empty label defined in %s", strings.Join(ctx, " ")) 54 | } 55 | if label == "job" || label == TargetLabel { 56 | return fmt.Errorf("reserved label %q redefined in %s", label, strings.Join(ctx, " ")) 57 | } 58 | return nil 59 | } 60 | 61 | func checkOverflow(m map[string]any, ctx string) error { 62 | if len(m) > 0 { 63 | var keys []string 64 | for k := range m { 65 | keys = append(keys, k) 66 | } 67 | return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", ")) 68 | } 69 | return nil 70 | } 71 | -------------------------------------------------------------------------------- /documentation/sql_exporter.yml: -------------------------------------------------------------------------------- 1 | # Global settings and defaults. 2 | global: 3 | # Scrape timeouts ensure that: 4 | # (i) scraping completes in reasonable time and 5 | # (ii) slow queries are canceled early when the database is already under heavy load 6 | # Prometheus informs targets of its own scrape timeout (via the "X-Prometheus-Scrape-Timeout-Seconds" request header) 7 | # so the actual timeout is computed as: 8 | # min(scrape_timeout, X-Prometheus-Scrape-Timeout-Seconds - scrape_timeout_offset) 9 | # 10 | # If scrape_timeout <= 0, no timeout is set unless Prometheus provides one. The default is 10s. 11 | scrape_timeout: 10s 12 | # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. 13 | # 14 | # Must be strictly positive. The default is 500ms. 15 | scrape_timeout_offset: 500ms 16 | # Interval between dropping scrape_errors_total metric: by default (0s) metrics are persistent. 17 | scrape_error_drop_interval: 0s 18 | # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. 19 | min_interval: 0s 20 | # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, 21 | # as will concurrent scrapes. 22 | # 23 | # If max_connections <= 0, then there is no limit on the number of open connections. The default is 3. 24 | max_connections: 3 25 | # Maximum number of idle connections to any one target. Unless you use very long collection intervals, this should 26 | # always be the same as max_connections. 27 | # 28 | # If max_idle_connections <= 0, no idle connections are retained. The default is 3. 29 | max_idle_connections: 3 30 | 31 | # The target to monitor and the collectors to execute on it. 32 | target: 33 | # Target name (optional). Setting this field enables extra metrics e.g. `up` and `scrape_duration` with the `target` 34 | # label that are always returned on a scrape. If set, sql_exporter always returns HTTP 200 with these metrics populated 35 | name: mssql_database 36 | # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) 37 | # the schema gets dropped or replaced to match the driver expected DSN format. 38 | data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433/dbname' 39 | 40 | # Collectors (referenced by name) to execute on the target. 41 | collectors: [mssql_standard] 42 | 43 | # In case you need to connect to a backend that only responds to a limited set of commands (e.g. pgbouncer) or 44 | # a data warehouse you don't want to keep online all the time (due to the extra cost), you might want to disable `ping` 45 | enable_ping: true 46 | 47 | # A collector is a named set of related metrics that are collected together. It can be referenced by name, possibly 48 | # along with other collectors. 49 | # 50 | # Collectors may be defined inline (under `collectors`) or loaded from `collector_files` (one collector per file). 51 | collectors: 52 | # A collector defining standard metrics for Microsoft SQL Server. 53 | - collector_name: mssql_standard 54 | 55 | # Similar to global.min_interval, but applies to this collector only. 56 | #min_interval: 0s 57 | 58 | # A metric is a Prometheus metric with name, type, help text and (optional) additional labels, paired with exactly 59 | # one query to populate the metric labels and values from. 60 | # 61 | # The result columns conceptually fall into two categories: 62 | # * zero or more key columns: their values will be directly mapped to labels of the same name; 63 | # * one or more value columns: 64 | # * if exactly one value column, the column name is ignored and its value becomes the metric value 65 | # * with multiple value columns, a `value_label` must be defined; the column name will populate this label and 66 | # the column value will populate the metric value. 67 | metrics: 68 | # The metric name, type and help text, as exported to /metrics. 69 | - metric_name: mssql_log_growths 70 | # This is a Prometheus counter (monotonically increasing value). 71 | type: counter 72 | help: 'Total number of times the transaction log has been expanded since last restart, per database.' 73 | # Optional set of labels derived from key columns. 74 | key_labels: 75 | # Populated from the `db` column of each row. 76 | - db 77 | static_labels: 78 | # Arbitrary key/value pair 79 | env: dev 80 | region: europe 81 | # Optional timestamp_value to point at the existing timestamp column to return a metric with an explicit 82 | # timestamp. 83 | # timestamp_value: CreatedAt 84 | # This query returns exactly one value per row, in the `counter` column. 85 | values: [counter] 86 | query: | 87 | SELECT rtrim(instance_name) AS db, cntr_value AS counter 88 | FROM sys.dm_os_performance_counters 89 | WHERE counter_name = 'Log Growths' AND instance_name <> '_Total' 90 | 91 | # A different metric, with multiple values produced from each result row. 92 | - metric_name: mssql_io_stall_seconds 93 | type: counter 94 | help: 'Stall time in seconds per database and I/O operation.' 95 | key_labels: 96 | # Populated from the `db` column of the result. 97 | - db 98 | # Label populated with the value column name, configured via `values` (e.g. `operation="io_stall_read_ms"`). 99 | # 100 | # Required when multiple value columns are configured. 101 | value_label: operation 102 | # Multiple value columns: their name is recorded in the label defined by `attribute_label` (e.g. 103 | # `operation="io_stall_read_ms"`). 104 | values: 105 | - io_stall_read 106 | - io_stall_write 107 | query_ref: io_stall 108 | 109 | # Another metric, uses same named query (referenced through query_ref) as mssql_io_stall_seconds. 110 | - metric_name: mssql_io_stall_total_seconds 111 | type: counter 112 | help: 'Total stall time in seconds per database.' 113 | key_labels: 114 | # Populated from the `db` column of the result. 115 | - db 116 | # Only one value, populated from the `io_stall` column. 117 | values: 118 | - io_stall 119 | query_ref: io_stall 120 | 121 | # Metric with a static value to retrieve string data. 122 | - metric_name: mssql_hostname 123 | type: gauge 124 | help: 'Database server hostname' 125 | key_labels: 126 | # Populated from the `hostname` column of the result. 127 | - hostname 128 | # Static value, always set to `1`. 129 | static_value: 1 130 | query: | 131 | SELECT @@SERVERNAME AS hostname 132 | 133 | 134 | # Named queries, referenced by one or more metrics, through query_ref. 135 | queries: 136 | # Populates `mssql_io_stall` and `mssql_io_stall_total` 137 | - query_name: io_stall 138 | query: | 139 | SELECT 140 | cast(DB_Name(a.database_id) as varchar) AS db, 141 | sum(io_stall_read_ms) / 1000.0 AS io_stall_read, 142 | sum(io_stall_write_ms) / 1000.0 AS io_stall_write, 143 | sum(io_stall) / 1000.0 AS io_stall 144 | FROM 145 | sys.dm_io_virtual_file_stats(null, null) a 146 | INNER JOIN sys.master_files b ON a.database_id = b.database_id AND a.file_id = b.file_id 147 | GROUP BY a.database_id 148 | 149 | # Collector files specifies a list of globs. One collector definition per file. 150 | collector_files: 151 | - "*.collector.yml" 152 | -------------------------------------------------------------------------------- /drivers.go: -------------------------------------------------------------------------------- 1 | // Code generated by "drivers_gen.go" 2 | 3 | package sql_exporter 4 | 5 | import ( 6 | _ "github.com/ClickHouse/clickhouse-go/v2" 7 | _ "github.com/go-sql-driver/mysql" 8 | _ "github.com/jackc/pgx/v5/stdlib" 9 | _ "github.com/lib/pq" 10 | _ "github.com/microsoft/go-mssqldb/azuread" 11 | _ "github.com/sijms/go-ora/v2" 12 | _ "github.com/snowflakedb/gosnowflake" 13 | _ "github.com/vertica/vertica-sql-go" 14 | ) 15 | -------------------------------------------------------------------------------- /drivers_gen.go: -------------------------------------------------------------------------------- 1 | //go:build ignore 2 | 3 | package main 4 | 5 | import ( 6 | "fmt" 7 | "os" 8 | 9 | . "github.com/dave/jennifer/jen" 10 | ) 11 | 12 | const ( 13 | packageName string = "sql_exporter" 14 | filename string = "drivers.go" 15 | ) 16 | 17 | var driverList = map[string][]string{ 18 | "minimal": { 19 | "github.com/go-sql-driver/mysql", 20 | "github.com/lib/pq", 21 | "github.com/microsoft/go-mssqldb/azuread", 22 | }, 23 | "extra": { 24 | "github.com/ClickHouse/clickhouse-go/v2", 25 | "github.com/jackc/pgx/v5/stdlib", 26 | "github.com/snowflakedb/gosnowflake", 27 | "github.com/vertica/vertica-sql-go", 28 | "github.com/sijms/go-ora/v2", 29 | }, 30 | "custom": { 31 | "github.com/mithrandie/csvq-driver", 32 | }, 33 | } 34 | 35 | func main() { 36 | var enabledDrivers []string 37 | 38 | args := os.Args[2:] 39 | 40 | if args[0] == "all" { 41 | for k := range driverList { 42 | if k != "custom" { 43 | enabledDrivers = append(enabledDrivers, driverList[k]...) 44 | } 45 | } 46 | } else { 47 | var ok bool 48 | enabledDrivers, ok = driverList[args[0]] 49 | if !ok { 50 | fmt.Printf("Nonexistent key. Do nothing.\n") 51 | os.Exit(0) 52 | } 53 | } 54 | 55 | f := NewFile(packageName) 56 | f.HeaderComment("// Code generated by \"drivers_gen.go\"") 57 | f.Anon(enabledDrivers...) 58 | fmt.Println("Following drivers are to be added:") 59 | 60 | for _, v := range enabledDrivers { 61 | fmt.Printf("> %s\n", v) 62 | } 63 | 64 | fmt.Printf("Save to '%s'\n", filename) 65 | if err := f.Save(filename); err != nil { 66 | fmt.Println(err.Error()) 67 | os.Exit(1) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /errors/errors.go: -------------------------------------------------------------------------------- 1 | package errors 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // WithContext is an error associated with a logging context string (e.g. `job="foo", instance="bar"`). It is formatted 8 | // as: 9 | // 10 | // fmt.Sprintf("[%s] %s", Context(), RawError()) 11 | type WithContext interface { 12 | error 13 | 14 | Context() string 15 | RawError() error 16 | Unwrap() error 17 | } 18 | 19 | // withContext implements WithContext. 20 | type withContext struct { 21 | context string 22 | err error 23 | } 24 | 25 | // New creates a new WithContext. 26 | func New(context string, err string) WithContext { 27 | return &withContext{context, fmt.Errorf(err)} 28 | } 29 | 30 | // Errorf formats according to a format specifier and returns a new WithContext. 31 | func Errorf(context, format string, a ...any) WithContext { 32 | return &withContext{context, fmt.Errorf(format, a...)} 33 | } 34 | 35 | // Wrap returns a WithContext wrapping err. If err is nil, it returns nil. If err is a WithContext, it is returned 36 | // unchanged. 37 | func Wrap(context string, err error) WithContext { 38 | if err == nil { 39 | return nil 40 | } 41 | if w, ok := err.(WithContext); ok { 42 | return w 43 | } 44 | return &withContext{context, err} 45 | } 46 | 47 | // Wrapf returns a WithContext that prepends a formatted message to err.Error(). If err is nil, it returns nil. If err 48 | // is a WithContext, the returned WithContext will have the message prepended but the same context as err (presumed to 49 | // be more specific). 50 | func Wrapf(context string, err error, format string, a ...any) WithContext { 51 | if err == nil { 52 | return nil 53 | } 54 | prefix := format 55 | if len(a) > 0 { 56 | prefix = fmt.Sprintf(format, a...) 57 | } 58 | if w, ok := err.(WithContext); ok { 59 | return &withContext{w.Context(), fmt.Errorf("%s: %w", prefix, w.RawError())} 60 | } 61 | return &withContext{context, err} 62 | } 63 | 64 | // Error implements error. 65 | func (w *withContext) Error() string { 66 | if len(w.context) == 0 { 67 | return w.err.Error() 68 | } 69 | return "[" + w.context + "] " + w.err.Error() 70 | } 71 | 72 | // Context implements WithContext. 73 | func (w *withContext) Context() string { 74 | return w.context 75 | } 76 | 77 | // RawError implements WithContext. 78 | func (w *withContext) RawError() error { 79 | return w.err 80 | } 81 | 82 | // Unwrap implements WithContext. 83 | func (w *withContext) Unwrap() error { 84 | return fmt.Errorf("[%s] %w", w.context, w.err) 85 | } 86 | -------------------------------------------------------------------------------- /examples/azure-sql-mi/grafana-dashboard/cpu-and-queuing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/burningalchemist/sql_exporter/7862cae19fe053e1de8e81003cba660a42f39a63/examples/azure-sql-mi/grafana-dashboard/cpu-and-queuing.png -------------------------------------------------------------------------------- /examples/azure-sql-mi/grafana-dashboard/log-activity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/burningalchemist/sql_exporter/7862cae19fe053e1de8e81003cba660a42f39a63/examples/azure-sql-mi/grafana-dashboard/log-activity.png -------------------------------------------------------------------------------- /examples/azure-sql-mi/grafana-dashboard/memory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/burningalchemist/sql_exporter/7862cae19fe053e1de8e81003cba660a42f39a63/examples/azure-sql-mi/grafana-dashboard/memory.png -------------------------------------------------------------------------------- /examples/azure-sql-mi/grafana-dashboard/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/burningalchemist/sql_exporter/7862cae19fe053e1de8e81003cba660a42f39a63/examples/azure-sql-mi/grafana-dashboard/overview.png -------------------------------------------------------------------------------- /examples/azure-sql-mi/grafana-dashboard/sql-activity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/burningalchemist/sql_exporter/7862cae19fe053e1de8e81003cba660a42f39a63/examples/azure-sql-mi/grafana-dashboard/sql-activity.png -------------------------------------------------------------------------------- /examples/azure-sql-mi/grafana-dashboard/waits-and-queues.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/burningalchemist/sql_exporter/7862cae19fe053e1de8e81003cba660a42f39a63/examples/azure-sql-mi/grafana-dashboard/waits-and-queues.png -------------------------------------------------------------------------------- /examples/azure-sql-mi/mssql_mi_clerk.collector.yml: -------------------------------------------------------------------------------- 1 | # A collector defining memory clerk metrics for Microsoft SQL Server (Managed Instance). 2 | # 3 | # It is required that the SQL Server user has the following permissions: 4 | # 5 | # GRANT VIEW ANY DEFINITION TO 6 | # GRANT VIEW SERVER STATE TO 7 | # 8 | collector_name: mssqlmi_clerk 9 | 10 | # Similar to global.min_interval, but applies to the queries defined by this collector only. 11 | #min_interval: 0s 12 | 13 | metrics: 14 | # 15 | # Collected from sys.dm_os_memory_clerks 16 | # 17 | - metric_name: mssqlmi_clerk_size_kilobytes 18 | type: gauge 19 | help: 'Memory Clerk' 20 | key_labels: 21 | - clerk_type 22 | values: [size_kb] 23 | query_ref: mssqlmi_clerk 24 | 25 | queries: 26 | - query_name: mssqlmi_clerk 27 | query: | 28 | SELECT 29 | mc.[type] AS [clerk_type] 30 | ,SUM(mc.[pages_kb]) AS [size_kb] 31 | FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) 32 | GROUP BY 33 | mc.[type] 34 | HAVING 35 | SUM(mc.[pages_kb]) >= 1024 36 | OPTION(RECOMPILE); 37 | -------------------------------------------------------------------------------- /examples/azure-sql-mi/mssql_mi_properties.collector.yml: -------------------------------------------------------------------------------- 1 | # A collector defining standard metrics for Microsoft SQL Server (Managed Instance). 2 | # 3 | # It is required that the SQL Server user has the following permissions: 4 | # 5 | # GRANT VIEW ANY DEFINITION TO 6 | # GRANT VIEW SERVER STATE TO 7 | # 8 | collector_name: mssqlmi_properties 9 | 10 | # Similar to global.min_interval, but applies to the queries defined by this collector only. 11 | #min_interval: 0s 12 | 13 | metrics: 14 | # 15 | # Collected from sys.server_resource_stats 16 | # 17 | - metric_name: mssqlmi_cpu_count 18 | type: gauge 19 | help: 'Virtual Cores' 20 | values: [cpu_count] 21 | query_ref: mssqlmi_properties 22 | 23 | - metric_name: mssqlmi_server_memory_bytes 24 | type: gauge 25 | help: 'Server Memory in bytes' 26 | values: [server_memory] 27 | query_ref: mssqlmi_properties 28 | 29 | - metric_name: mssqlmi_total_storage_bytes 30 | type: gauge 31 | help: 'Total Storage in bytes' 32 | key_labels: 33 | # populated from sku column 34 | - sku 35 | - hardware_type 36 | values: [total_storage] 37 | query_ref: mssqlmi_properties 38 | 39 | - metric_name: mssqlmi_available_storage_bytes 40 | type: gauge 41 | help: 'Available Storage in bytes' 42 | values: [available_storage] 43 | query_ref: mssqlmi_properties 44 | 45 | - metric_name: mssqlmi_db_online 46 | type: gauge 47 | help: '# of Online Databases' 48 | values: [db_online] 49 | query_ref: mssqlmi_properties 50 | 51 | queries: 52 | - query_name: mssqlmi_properties 53 | query: | 54 | IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ 55 | DECLARE @ErrorMessage AS nvarchar(500) = 'sql_exporter - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance.'; 56 | RAISERROR (@ErrorMessage,11,1) 57 | RETURN 58 | END 59 | SELECT TOP 1 60 | [virtual_core_count] AS [cpu_count] 61 | ,(SELECT [process_memory_limit_mb]*1000000 FROM sys.dm_os_job_object) AS [server_memory] 62 | ,[sku] 63 | ,[hardware_generation] AS [hardware_type] 64 | ,cast([reserved_storage_mb]*1000000 as bigint) AS [total_storage] 65 | ,cast(([reserved_storage_mb] - [storage_space_used_mb])*1000000 as bigint) AS [available_storage] 66 | ,[db_online] 67 | FROM sys.server_resource_stats 68 | CROSS APPLY ( 69 | SELECT 70 | SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online] 71 | FROM sys.databases 72 | ) AS dbs 73 | ORDER BY 74 | [start_time] DESC; 75 | -------------------------------------------------------------------------------- /examples/azure-sql-mi/mssql_mi_size.collector.yml: -------------------------------------------------------------------------------- 1 | # A collector defining database size metrics for Microsoft SQL Server (Managed Instance). 2 | # 3 | # It is required that the SQL Server user has the following permissions: 4 | # 5 | # GRANT VIEW ANY DEFINITION TO 6 | # GRANT VIEW SERVER STATE TO 7 | # 8 | collector_name: mssqlmi_size 9 | 10 | # Similar to global.min_interval, but applies to the queries defined by this collector only. 11 | #min_interval: 0s 12 | 13 | metrics: 14 | # 15 | # Collected from sys.dm_io_virtual_file_stats 16 | # 17 | - metric_name: mssqlmi_database_size_bytes 18 | type: gauge 19 | help: 'Database Size in Bytes' 20 | key_labels: 21 | - database 22 | - file_type 23 | values: [size_on_disk_bytes] 24 | query_ref: mssqlmi_size 25 | 26 | queries: 27 | - query_name: mssqlmi_size 28 | query: | 29 | IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ 30 | DECLARE @ErrorMessage AS nvarchar(500) = 'sql_exporter - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance.'; 31 | RAISERROR (@ErrorMessage,11,1) 32 | RETURN 33 | END 34 | SELECT 35 | DB_NAME(mf.database_id) AS [database] 36 | -- ,name AS 'File Logical Name' 37 | ,'file_type' = CASE WHEN type_desc = 'LOG' THEN 'Log File' WHEN type_desc = 'ROWS' THEN 'Data File' ELSE type_desc END 38 | -- ,mf.physical_name AS 'File Physical Name' 39 | ,size_on_disk_bytes 40 | -- ,size_on_disk_bytes/ 1024 AS 'Size(KB)' 41 | -- size_on_disk_bytes/ 1024 / 1024 AS 'Size(MB)', 42 | -- size_on_disk_bytes/ 1024 / 1024 / 1024 AS 'Size(GB)' 43 | FROM 44 | sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs 45 | JOIN sys.master_files AS mf 46 | ON mf.database_id = divfs.database_id 47 | AND mf.file_id = divfs.file_id 48 | WHERE DB_NAME(mf.database_id) NOT IN ('master', 'model','tempdb', 'msdb') 49 | AND name in ('data_0', 'log') 50 | -- ORDER BY size_on_disk_bytes DESC 51 | -------------------------------------------------------------------------------- /examples/azure-sql-mi/mssql_mi_wait.collector.yml: -------------------------------------------------------------------------------- 1 | # A collector defining wait metrics for Microsoft SQL Server (Managed Instance). 2 | # 3 | # It is required that the SQL Server user has the following permissions: 4 | # 5 | # GRANT VIEW ANY DEFINITION TO 6 | # GRANT VIEW SERVER STATE TO 7 | # 8 | collector_name: mssqlmi_wait 9 | 10 | # Similar to global.min_interval, but applies to the queries defined by this collector only. 11 | #min_interval: 0s 12 | 13 | metrics: 14 | # 15 | # Collected from sys.dm_os_wait_stats 16 | # 17 | - metric_name: mssqlmi_wait_time_seconds 18 | type: gauge 19 | help: 'Wait Time in Seconds' 20 | key_labels: 21 | - wait_type 22 | - wait_category 23 | values: [wait_time_seconds] 24 | query_ref: mssqlmi_wait 25 | - metric_name: mssqlmi_signal_wait_time_seconds 26 | type: gauge 27 | help: 'Signal Wait Time in Seconds' 28 | key_labels: 29 | - wait_type 30 | - wait_category 31 | values: [signal_wait_time_seconds] 32 | query_ref: mssqlmi_wait 33 | - metric_name: mssqlmi_waiting_tasks_count 34 | type: gauge 35 | help: 'Wait Tasks Count' 36 | key_labels: 37 | - wait_type 38 | - wait_category 39 | values: [waiting_tasks_count] 40 | query_ref: mssqlmi_wait 41 | 42 | queries: 43 | - query_name: mssqlmi_wait 44 | query: | 45 | IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ 46 | DECLARE @ErrorMessage AS nvarchar(500) = 'sql_exporter - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance.'; 47 | RAISERROR (@ErrorMessage,11,1) 48 | RETURN 49 | END 50 | SELECT 51 | ws.[wait_type] 52 | ,CAST([wait_time_ms] / 1000.0 AS FLOAT(10)) AS [wait_time_seconds] 53 | --,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] 54 | ,CAST([signal_wait_time_ms] / 1000.0 AS FLOAT(10)) AS [signal_wait_time_seconds] 55 | -- ,[max_wait_time_ms] 56 | ,[waiting_tasks_count] 57 | ,CASE 58 | WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' 59 | WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' 60 | WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' 61 | WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' 62 | WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' 63 | WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' 64 | WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' 65 | WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' 66 | WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' 67 | WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' 68 | or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' 69 | WHEN ws.[wait_type] LIKE 'SLEEP[_]%' 70 | or ws.[wait_type] IN ( 71 | 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', 72 | 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', 73 | 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', 74 | 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' 75 | WHEN ws.[wait_type] IN( 76 | 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', 77 | 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' 78 | WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' 79 | WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' 80 | WHEN ws.[wait_type] IN ( 81 | 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', 82 | 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' 83 | WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' 84 | WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' 85 | or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' 86 | WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' 87 | WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' 88 | WHEN ws.[wait_type] IN( 89 | 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', 90 | 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' 91 | WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') 92 | or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' 93 | or ws.[wait_type] like 'BP%' THEN 'Parallelism' 94 | WHEN ws.[wait_type] IN( 95 | 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', 96 | 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', 97 | 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' 98 | WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' 99 | WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' 100 | or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' 101 | or ws.[wait_type] LIKE 'SE_REPL[_]%' 102 | or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' 103 | WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' 104 | or ws.[wait_type] IN ( 105 | 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', 106 | 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', 107 | 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' 108 | WHEN ws.[wait_type] IN ( 109 | 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', 110 | 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', 111 | 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', 112 | 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' 113 | ELSE 'Other' 114 | END as [wait_category] 115 | FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) 116 | WHERE 117 | ws.[wait_type] NOT IN ( 118 | N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', 119 | N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', 120 | N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', 121 | N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', 122 | N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', 123 | N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', 124 | N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', 125 | N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', 126 | N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', 127 | N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', 128 | N'PARALLEL_REDO_WORKER_WAIT_WORK', 129 | N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', 130 | N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', 131 | N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', 132 | N'PREEMPTIVE_OS_DEVICEOPS', 133 | N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', 134 | N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', 135 | N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', 136 | N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', 137 | N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', 138 | N'QDS_ASYNC_QUEUE', 139 | N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', 140 | N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', 141 | N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', 142 | N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', 143 | N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', 144 | N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', 145 | N'SQLTRACE_WAIT_ENTRIES', 146 | N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', 147 | N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', 148 | N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', 149 | N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', 150 | N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', 151 | N'RBIO_COMM_RETRY') 152 | AND [waiting_tasks_count] > 10 153 | AND [wait_time_ms] > 100; 154 | -------------------------------------------------------------------------------- /examples/azure-sql-mi/sql_exporter.yml: -------------------------------------------------------------------------------- 1 | # Global defaults. 2 | global: 3 | # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. 4 | scrape_timeout_offset: 500ms 5 | # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. 6 | min_interval: 0s 7 | # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, 8 | # as will concurrent scrapes. 9 | max_connections: 3 10 | # Maximum number of idle connections to any one target. Unless you use very long collection intervals, this should 11 | # always be the same as max_connections. 12 | max_idle_connections: 3 13 | # Maximum number of maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. 14 | # If 0, connections are not closed due to a connection's age. 15 | max_connection_lifetime: 5m 16 | 17 | # The target to monitor and the collectors to execute on it. 18 | target: 19 | # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) 20 | # the schema gets dropped or replaced to match the driver expected DSN format. 21 | data_source_name: 'sqlserver://USERNAME_HERE:PASSWORD_HERE@SQLMI_HERE_ENDPOINT.database.windows.net:1433?encrypt=true&hostNameInCertificate=%2A.SQL_MI_DOMAIN_HERE.database.windows.net&trustservercertificate=true' 22 | 23 | # Collectors (referenced by name) to execute on the target. 24 | collectors: [mssqlmi_*] 25 | 26 | # Collector files specifies a list of globs. One collector definition is read from each matching file. 27 | collector_files: 28 | - "*.collector.yml" 29 | -------------------------------------------------------------------------------- /examples/mssql_standard.collector.yml: -------------------------------------------------------------------------------- 1 | # A collector defining standard metrics for Microsoft SQL Server. 2 | # 3 | # It is required that the SQL Server user has the following permissions: 4 | # 5 | # GRANT VIEW ANY DEFINITION TO 6 | # GRANT VIEW SERVER STATE TO 7 | # 8 | collector_name: mssql_standard 9 | 10 | # Similar to global.min_interval, but applies to the queries defined by this collector only. 11 | #min_interval: 0s 12 | 13 | metrics: 14 | - metric_name: mssql_local_time_seconds 15 | type: gauge 16 | help: 'Local time in seconds since epoch (Unix time).' 17 | values: [unix_time] 18 | query: | 19 | SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time 20 | 21 | - metric_name: mssql_connections 22 | type: gauge 23 | help: 'Number of active connections.' 24 | key_labels: 25 | - db 26 | values: [count] 27 | query: | 28 | SELECT DB_NAME(sp.dbid) AS db, COUNT(sp.spid) AS count 29 | FROM sys.sysprocesses sp 30 | GROUP BY DB_NAME(sp.dbid) 31 | 32 | # 33 | # Collected from sys.dm_os_performance_counters 34 | # 35 | - metric_name: mssql_deadlocks 36 | type: counter 37 | help: 'Number of lock requests that resulted in a deadlock.' 38 | values: [cntr_value] 39 | query: | 40 | SELECT cntr_value 41 | FROM sys.dm_os_performance_counters WITH (NOLOCK) 42 | WHERE counter_name = 'Number of Deadlocks/sec' AND instance_name = '_Total' 43 | 44 | - metric_name: mssql_user_errors 45 | type: counter 46 | help: 'Number of user errors.' 47 | values: [cntr_value] 48 | query: | 49 | SELECT cntr_value 50 | FROM sys.dm_os_performance_counters WITH (NOLOCK) 51 | WHERE counter_name = 'Errors/sec' AND instance_name = 'User Errors' 52 | 53 | - metric_name: mssql_kill_connection_errors 54 | type: counter 55 | help: 'Number of severe errors that caused SQL Server to kill the connection.' 56 | values: [cntr_value] 57 | query: | 58 | SELECT cntr_value 59 | FROM sys.dm_os_performance_counters WITH (NOLOCK) 60 | WHERE counter_name = 'Errors/sec' AND instance_name = 'Kill Connection Errors' 61 | 62 | - metric_name: mssql_page_life_expectancy_seconds 63 | type: gauge 64 | help: 'The minimum number of seconds a page will stay in the buffer pool on this node without references.' 65 | values: [cntr_value] 66 | query: | 67 | SELECT top(1) cntr_value 68 | FROM sys.dm_os_performance_counters WITH (NOLOCK) 69 | WHERE counter_name = 'Page life expectancy' 70 | 71 | - metric_name: mssql_batch_requests 72 | type: counter 73 | help: 'Number of command batches received.' 74 | values: [cntr_value] 75 | query: | 76 | SELECT cntr_value 77 | FROM sys.dm_os_performance_counters WITH (NOLOCK) 78 | WHERE counter_name = 'Batch Requests/sec' 79 | 80 | - metric_name: mssql_log_growths 81 | type: counter 82 | help: 'Number of times the transaction log has been expanded, per database.' 83 | key_labels: 84 | - db 85 | values: [cntr_value] 86 | query: | 87 | SELECT rtrim(instance_name) AS db, cntr_value 88 | FROM sys.dm_os_performance_counters WITH (NOLOCK) 89 | WHERE counter_name = 'Log Growths' AND instance_name <> '_Total' 90 | 91 | - metric_name: mssql_buffer_cache_hit_ratio 92 | type: gauge 93 | help: 'Ratio of requests that hit the buffer cache' 94 | values: [cntr_value] 95 | query: | 96 | SELECT cntr_value 97 | FROM sys.dm_os_performance_counters 98 | WHERE [counter_name] = 'Buffer cache hit ratio' 99 | 100 | - metric_name: mssql_checkpoint_pages_sec 101 | type: gauge 102 | help: 'Checkpoint Pages Per Second' 103 | values: [cntr_value] 104 | query: | 105 | SELECT cntr_value 106 | FROM sys.dm_os_performance_counters 107 | WHERE [counter_name] = 'Checkpoint pages/sec' 108 | 109 | # 110 | # Collected from sys.dm_io_virtual_file_stats 111 | # 112 | - metric_name: mssql_io_stall_seconds 113 | type: counter 114 | help: 'Stall time in seconds per database and I/O operation.' 115 | key_labels: 116 | - db 117 | value_label: operation 118 | values: 119 | - read 120 | - write 121 | query_ref: mssql_io_stall 122 | - metric_name: mssql_io_stall_total_seconds 123 | type: counter 124 | help: 'Total stall time in seconds per database.' 125 | key_labels: 126 | - db 127 | values: 128 | - io_stall 129 | query_ref: mssql_io_stall 130 | 131 | # 132 | # Collected from sys.dm_os_process_memory 133 | # 134 | - metric_name: mssql_resident_memory_bytes 135 | type: gauge 136 | help: 'SQL Server resident memory size (AKA working set).' 137 | values: [resident_memory_bytes] 138 | query_ref: mssql_process_memory 139 | 140 | - metric_name: mssql_virtual_memory_bytes 141 | type: gauge 142 | help: 'SQL Server committed virtual memory size.' 143 | values: [virtual_memory_bytes] 144 | query_ref: mssql_process_memory 145 | 146 | - metric_name: mssql_memory_utilization_percentage 147 | type: gauge 148 | help: 'The percentage of committed memory that is in the working set.' 149 | values: [memory_utilization_percentage] 150 | query_ref: mssql_process_memory 151 | 152 | - metric_name: mssql_page_fault_count 153 | type: counter 154 | help: 'The number of page faults that were incurred by the SQL Server process.' 155 | values: [page_fault_count] 156 | query_ref: mssql_process_memory 157 | 158 | # 159 | # Collected from sys.dm_os_sys_memory 160 | # 161 | - metric_name: mssql_os_memory 162 | type: gauge 163 | help: 'OS physical memory, used and available.' 164 | value_label: 'state' 165 | values: [used, available] 166 | query: | 167 | SELECT 168 | (total_physical_memory_kb - available_physical_memory_kb) * 1024 AS used, 169 | available_physical_memory_kb * 1024 AS available 170 | FROM sys.dm_os_sys_memory 171 | 172 | - metric_name: mssql_os_page_file 173 | type: gauge 174 | help: 'OS page file, used and available.' 175 | value_label: 'state' 176 | values: [used, available] 177 | query: | 178 | SELECT 179 | (total_page_file_kb - available_page_file_kb) * 1024 AS used, 180 | available_page_file_kb * 1024 AS available 181 | FROM sys.dm_os_sys_memory 182 | 183 | queries: 184 | # Populates `mssql_io_stall` and `mssql_io_stall_total` 185 | - query_name: mssql_io_stall 186 | query: | 187 | SELECT 188 | cast(DB_Name(a.database_id) as varchar) AS [db], 189 | sum(io_stall_read_ms) / 1000.0 AS [read], 190 | sum(io_stall_write_ms) / 1000.0 AS [write], 191 | sum(io_stall) / 1000.0 AS io_stall 192 | FROM 193 | sys.dm_io_virtual_file_stats(null, null) a 194 | INNER JOIN sys.master_files b ON a.database_id = b.database_id AND a.file_id = b.file_id 195 | GROUP BY a.database_id 196 | 197 | # Populates `mssql_resident_memory_bytes`, `mssql_virtual_memory_bytes`, `mssql_memory_utilization_percentage` and 198 | # `mssql_page_fault_count`. 199 | - query_name: mssql_process_memory 200 | query: | 201 | SELECT 202 | physical_memory_in_use_kb * 1024 AS resident_memory_bytes, 203 | virtual_address_space_committed_kb * 1024 AS virtual_memory_bytes, 204 | memory_utilization_percentage, 205 | page_fault_count 206 | FROM sys.dm_os_process_memory 207 | 208 | -------------------------------------------------------------------------------- /examples/sql_exporter.yml: -------------------------------------------------------------------------------- 1 | # Global defaults. 2 | global: 3 | # If scrape_timeout <= 0, no timeout is set unless Prometheus provides one. The default is 10s. 4 | scrape_timeout: 10s 5 | # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. 6 | scrape_timeout_offset: 500ms 7 | # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. 8 | min_interval: 0s 9 | # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, 10 | # as will concurrent scrapes. 11 | max_connections: 3 12 | # Maximum number of idle connections to any one target. Unless you use very long collection intervals, this should 13 | # always be the same as max_connections. 14 | max_idle_connections: 3 15 | # Maximum number of maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. 16 | # If 0, connections are not closed due to a connection's age. 17 | max_connection_lifetime: 5m 18 | 19 | # The target to monitor and the collectors to execute on it. 20 | target: 21 | # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) 22 | # the schema gets dropped or replaced to match the driver expected DSN format. 23 | data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433/dbname' 24 | 25 | # Collectors (referenced by name) to execute on the target. 26 | # Glob patterns are supported (see for syntax). 27 | collectors: [mssql_*] 28 | 29 | # Collector files specifies a list of globs. One collector definition is read from each matching file. 30 | # Glob patterns are supported (see for syntax). 31 | collector_files: 32 | - "*.collector.yml" 33 | -------------------------------------------------------------------------------- /exporter.go: -------------------------------------------------------------------------------- 1 | package sql_exporter 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log/slog" 8 | "strings" 9 | "sync" 10 | 11 | "github.com/burningalchemist/sql_exporter/config" 12 | "github.com/prometheus/client_golang/prometheus" 13 | dto "github.com/prometheus/client_model/go" 14 | 15 | "google.golang.org/protobuf/proto" 16 | ) 17 | 18 | var ( 19 | SvcRegistry = prometheus.NewRegistry() 20 | svcMetricLabels = []string{"job", "target", "collector", "query"} 21 | scrapeErrorsMetric *prometheus.CounterVec 22 | ) 23 | 24 | // Exporter is a prometheus.Gatherer that gathers SQL metrics from targets and merges them with the default registry. 25 | type Exporter interface { 26 | prometheus.Gatherer 27 | 28 | // WithContext returns a (single use) copy of the Exporter, which will use the provided context for Gather() calls. 29 | WithContext(context.Context) Exporter 30 | // Config returns the Exporter's underlying Config object. 31 | Config() *config.Config 32 | // UpdateTarget updates the targets field 33 | UpdateTarget([]Target) 34 | // SetJobFilters sets the jobFilters field 35 | SetJobFilters([]string) 36 | // DropErrorMetrics resets the scrape_errors_total metric 37 | DropErrorMetrics() 38 | } 39 | 40 | type exporter struct { 41 | config *config.Config 42 | targets []Target 43 | jobFilters []string 44 | 45 | ctx context.Context 46 | } 47 | 48 | // NewExporter returns a new Exporter with the provided config. 49 | func NewExporter(configFile string) (Exporter, error) { 50 | c, err := config.Load(configFile) 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | // Override the DSN if requested (and in single target mode). 56 | if config.DsnOverride != "" { 57 | if len(c.Jobs) > 0 { 58 | return nil, errors.New("the config.data-source-name flag only applies in single target mode") 59 | } 60 | c.Target.DSN = config.Secret(config.DsnOverride) 61 | } 62 | 63 | var targets []Target 64 | if c.Target != nil { 65 | target, err := NewTarget("", c.Target.Name, "", string(c.Target.DSN), c.Target.Collectors(), nil, c.Globals, c.Target.EnablePing) 66 | if err != nil { 67 | return nil, err 68 | } 69 | targets = []Target{target} 70 | } else { 71 | if len(c.Jobs) > (config.MaxInt32 / 3) { 72 | return nil, errors.New("'jobs' list is too large") 73 | } 74 | targets = make([]Target, 0, len(c.Jobs)*3) 75 | for _, jc := range c.Jobs { 76 | job, err := NewJob(jc, c.Globals) 77 | if err != nil { 78 | return nil, err 79 | } 80 | targets = append(targets, job.Targets()...) 81 | } 82 | } 83 | 84 | scrapeErrorsMetric = registerScrapeErrorMetric() 85 | 86 | return &exporter{ 87 | config: c, 88 | targets: targets, 89 | jobFilters: []string{}, 90 | ctx: context.Background(), 91 | }, nil 92 | } 93 | 94 | func (e *exporter) WithContext(ctx context.Context) Exporter { 95 | return &exporter{ 96 | config: e.config, 97 | targets: e.targets, 98 | jobFilters: e.jobFilters, 99 | ctx: ctx, 100 | } 101 | } 102 | 103 | // Gather implements prometheus.Gatherer. 104 | func (e *exporter) Gather() ([]*dto.MetricFamily, error) { 105 | var ( 106 | metricChan = make(chan Metric, capMetricChan) 107 | errs prometheus.MultiError 108 | ) 109 | 110 | // Filter out jobs that are not in the jobFilters list 111 | e.filterTargets(e.jobFilters) 112 | 113 | if len(e.targets) == 0 { 114 | return nil, errors.New("no targets found") 115 | } 116 | 117 | var wg sync.WaitGroup 118 | wg.Add(len(e.targets)) 119 | for _, t := range e.targets { 120 | go func(target Target) { 121 | defer wg.Done() 122 | target.Collect(e.ctx, metricChan) 123 | }(t) 124 | } 125 | 126 | // Wait for all collectors to complete, then close the channel. 127 | go func() { 128 | wg.Wait() 129 | close(metricChan) 130 | }() 131 | 132 | // Drain metricChan in case of premature return. 133 | defer func() { 134 | for range metricChan { 135 | } 136 | }() 137 | 138 | // Gather. 139 | dtoMetricFamilies := make(map[string]*dto.MetricFamily, 10) 140 | for metric := range metricChan { 141 | dtoMetric := &dto.Metric{} 142 | if err := metric.Write(dtoMetric); err != nil { 143 | errs = append(errs, err) 144 | if err.Context() != "" { 145 | ctxLabels := parseContextLog(err.Context()) 146 | values := make([]string, len(svcMetricLabels)) 147 | for i, label := range svcMetricLabels { 148 | values[i] = ctxLabels[label] 149 | } 150 | scrapeErrorsMetric.WithLabelValues(values...).Inc() 151 | } 152 | continue 153 | } 154 | metricDesc := metric.Desc() 155 | dtoMetricFamily, ok := dtoMetricFamilies[metricDesc.Name()] 156 | if !ok { 157 | dtoMetricFamily = &dto.MetricFamily{} 158 | dtoMetricFamily.Name = proto.String(metricDesc.Name()) 159 | dtoMetricFamily.Help = proto.String(metricDesc.Help()) 160 | switch { 161 | case dtoMetric.Gauge != nil: 162 | dtoMetricFamily.Type = dto.MetricType_GAUGE.Enum() 163 | case dtoMetric.Counter != nil: 164 | dtoMetricFamily.Type = dto.MetricType_COUNTER.Enum() 165 | default: 166 | errs = append(errs, fmt.Errorf("don't know how to handle metric %v", dtoMetric)) 167 | continue 168 | } 169 | dtoMetricFamilies[metricDesc.Name()] = dtoMetricFamily 170 | } 171 | dtoMetricFamily.Metric = append(dtoMetricFamily.Metric, dtoMetric) 172 | } 173 | 174 | // No need to sort metric families, prometheus.Gatherers will do that for us when merging. 175 | result := make([]*dto.MetricFamily, 0, len(dtoMetricFamilies)) 176 | for _, mf := range dtoMetricFamilies { 177 | result = append(result, mf) 178 | } 179 | return result, errs 180 | } 181 | 182 | func (e *exporter) filterTargets(jf []string) { 183 | if len(jf) > 0 { 184 | var filteredTargets []Target 185 | for _, target := range e.targets { 186 | for _, jobFilter := range jf { 187 | if jobFilter == target.JobGroup() { 188 | filteredTargets = append(filteredTargets, target) 189 | break 190 | } 191 | } 192 | } 193 | if len(filteredTargets) == 0 { 194 | slog.Error("No targets found for job filters. Nothing to scrape.") 195 | } 196 | e.targets = filteredTargets 197 | } 198 | } 199 | 200 | // Config implements Exporter. 201 | func (e *exporter) Config() *config.Config { 202 | return e.config 203 | } 204 | 205 | // UpdateTarget implements Exporter. 206 | func (e *exporter) UpdateTarget(target []Target) { 207 | e.targets = target 208 | } 209 | 210 | // SetJobFilters implements Exporter. 211 | func (e *exporter) SetJobFilters(filters []string) { 212 | e.jobFilters = filters 213 | } 214 | 215 | // DropErrorMetrics implements Exporter. 216 | func (e *exporter) DropErrorMetrics() { 217 | scrapeErrorsMetric.Reset() 218 | slog.Debug("Dropped scrape_errors_total metric") 219 | } 220 | 221 | // registerScrapeErrorMetric registers the metrics for the exporter itself. 222 | func registerScrapeErrorMetric() *prometheus.CounterVec { 223 | scrapeErrors := prometheus.NewCounterVec(prometheus.CounterOpts{ 224 | Name: "scrape_errors_total", 225 | Help: "Total number of scrape errors per job, target, collector and query", 226 | }, svcMetricLabels) 227 | SvcRegistry.MustRegister(scrapeErrors) 228 | return scrapeErrors 229 | } 230 | 231 | // split comma separated list of key=value pairs and return a map of key value pairs 232 | func parseContextLog(list string) map[string]string { 233 | m := make(map[string]string) 234 | for _, item := range strings.Split(list, ",") { 235 | parts := strings.SplitN(item, "=", 2) 236 | m[parts[0]] = parts[1] 237 | } 238 | return m 239 | } 240 | 241 | // Leading comma appears when previous parameter is undefined, which is a side-effect of running in single target mode. 242 | // Let's trim to avoid confusions. 243 | func TrimMissingCtx(logContext string) string { 244 | if strings.HasPrefix(logContext, ",") { 245 | logContext = strings.TrimLeft(logContext, ", ") 246 | } 247 | return logContext 248 | } 249 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/burningalchemist/sql_exporter 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/ClickHouse/clickhouse-go/v2 v2.35.0 7 | github.com/aws/aws-sdk-go-v2 v1.36.3 8 | github.com/aws/aws-sdk-go-v2/config v1.29.14 9 | github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.4 10 | github.com/go-sql-driver/mysql v1.9.2 11 | github.com/jackc/pgx/v5 v5.7.5 12 | github.com/kardianos/minwinsvc v1.0.2 13 | github.com/lib/pq v1.10.9 14 | github.com/microsoft/go-mssqldb v1.8.2 15 | github.com/prometheus/client_golang v1.22.0 16 | github.com/prometheus/client_model v0.6.2 17 | github.com/prometheus/common v0.64.0 18 | github.com/prometheus/exporter-toolkit v0.14.0 19 | github.com/sethvargo/go-envconfig v1.3.0 20 | github.com/sijms/go-ora/v2 v2.8.24 21 | github.com/snowflakedb/gosnowflake v1.14.1 22 | github.com/vertica/vertica-sql-go v1.3.3 23 | github.com/xo/dburl v0.23.8 24 | google.golang.org/protobuf v1.36.6 25 | gopkg.in/yaml.v3 v3.0.1 26 | ) 27 | 28 | require ( 29 | filippo.io/edwards25519 v1.1.0 // indirect 30 | github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect 31 | github.com/99designs/keyring v1.2.2 // indirect 32 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect 33 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect 34 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect 35 | github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect 36 | github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect 37 | github.com/BurntSushi/toml v1.4.0 // indirect 38 | github.com/ClickHouse/ch-go v0.66.0 // indirect 39 | github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect 40 | github.com/andybalholm/brotli v1.1.1 // indirect 41 | github.com/apache/arrow-go/v18 v18.0.0 // indirect 42 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect 43 | github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect 44 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect 45 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect 46 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect 47 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect 48 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect 49 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect 50 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect 51 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect 52 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect 53 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect 54 | github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect 55 | github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect 56 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect 57 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect 58 | github.com/aws/smithy-go v1.22.2 // indirect 59 | github.com/beorn7/perks v1.0.1 // indirect 60 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 61 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 62 | github.com/danieljoos/wincred v1.2.2 // indirect 63 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 64 | github.com/dvsekhvalnov/jose2go v1.6.0 // indirect 65 | github.com/elastic/go-sysinfo v1.8.1 // indirect 66 | github.com/elastic/go-windows v1.0.0 // indirect 67 | github.com/gabriel-vasile/mimetype v1.4.7 // indirect 68 | github.com/go-faster/city v1.0.1 // indirect 69 | github.com/go-faster/errors v0.7.1 // indirect 70 | github.com/goccy/go-json v0.10.4 // indirect 71 | github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect 72 | github.com/golang-jwt/jwt/v5 v5.2.2 // indirect 73 | github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect 74 | github.com/golang-sql/sqlexp v0.1.0 // indirect 75 | github.com/google/flatbuffers v24.12.23+incompatible // indirect 76 | github.com/google/uuid v1.6.0 // indirect 77 | github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect 78 | github.com/jackc/pgpassfile v1.0.0 // indirect 79 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 80 | github.com/jackc/puddle/v2 v2.2.2 // indirect 81 | github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect 82 | github.com/jpillora/backoff v1.0.0 // indirect 83 | github.com/klauspost/compress v1.18.0 // indirect 84 | github.com/klauspost/cpuid/v2 v2.2.9 // indirect 85 | github.com/kylelemons/godebug v1.1.0 // indirect 86 | github.com/mdlayher/socket v0.4.1 // indirect 87 | github.com/mdlayher/vsock v1.2.1 // indirect 88 | github.com/mtibben/percent v0.2.1 // indirect 89 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 90 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect 91 | github.com/paulmach/orb v0.11.1 // indirect 92 | github.com/pierrec/lz4/v4 v4.1.22 // indirect 93 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect 94 | github.com/pkg/errors v0.9.1 // indirect 95 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 96 | github.com/prometheus/procfs v0.15.1 // indirect 97 | github.com/segmentio/asm v1.2.0 // indirect 98 | github.com/shopspring/decimal v1.4.0 // indirect 99 | github.com/sirupsen/logrus v1.9.3 // indirect 100 | github.com/zeebo/xxh3 v1.0.2 // indirect 101 | go.opentelemetry.io/otel v1.35.0 // indirect 102 | go.opentelemetry.io/otel/trace v1.35.0 // indirect 103 | golang.org/x/crypto v0.38.0 // indirect 104 | golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect 105 | golang.org/x/mod v0.23.0 // indirect 106 | golang.org/x/net v0.40.0 // indirect 107 | golang.org/x/oauth2 v0.30.0 // indirect 108 | golang.org/x/sync v0.14.0 // indirect 109 | golang.org/x/sys v0.33.0 // indirect 110 | golang.org/x/term v0.32.0 // indirect 111 | golang.org/x/text v0.25.0 // indirect 112 | golang.org/x/tools v0.30.0 // indirect 113 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect 114 | gopkg.in/yaml.v2 v2.4.0 // indirect 115 | howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect 116 | ) 117 | -------------------------------------------------------------------------------- /helm/.gitignore: -------------------------------------------------------------------------------- 1 | bin 2 | -------------------------------------------------------------------------------- /helm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | 5 | .DS_Store 6 | 7 | # Common VCS dirs 8 | .git/ 9 | .gitignore 10 | .bzr/ 11 | .bzrignore 12 | .hg/ 13 | .hgignore 14 | .svn/ 15 | 16 | # Common backup files 17 | *.swp 18 | *.bak 19 | *.tmp 20 | *.orig 21 | *~ 22 | 23 | # Various IDEs 24 | .project 25 | .idea/ 26 | *.tmproj 27 | .vscode/ 28 | 29 | # Dir with local binaries 30 | bin 31 | 32 | # Development files (including CI) 33 | ci/ 34 | README.md.gotmpl 35 | Makefile 36 | -------------------------------------------------------------------------------- /helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: sql-exporter 3 | description: Database-agnostic SQL exporter for Prometheus 4 | type: application 5 | version: 0.12.3 6 | appVersion: 0.17.3 7 | keywords: 8 | - exporter 9 | - servicemonitor 10 | - sql 11 | - metrics 12 | home: https://github.com/burningalchemist/sql_exporter 13 | sources: 14 | - https://github.com/burningalchemist/sql_exporter 15 | maintainers: 16 | - name: Nikolai Rodionov 17 | email: allanger@zohomail.com 18 | url: https://badhouseplants.net 19 | -------------------------------------------------------------------------------- /helm/Makefile: -------------------------------------------------------------------------------- 1 | LOCALBIN ?= $(shell pwd)/bin 2 | $(LOCALBIN): 3 | mkdir -p $(LOCALBIN) 4 | 5 | .PHONY: gen_docs 6 | gen_docs: ## Generate helm documentation 7 | test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install github.com/norwoodj/helm-docs/cmd/helm-docs@latest 8 | ./bin/helm-docs --template-files=./README.md.gotmpl --sort-values-order file 9 | -------------------------------------------------------------------------------- /helm/README.md: -------------------------------------------------------------------------------- 1 | # sql-exporter 2 | 3 | ![Version: 0.12.3](https://img.shields.io/badge/Version-0.12.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.17.3](https://img.shields.io/badge/AppVersion-0.17.3-informational?style=flat-square) 4 | 5 | Database-agnostic SQL exporter for Prometheus 6 | 7 | ## Source Code 8 | 9 | * 10 | 11 | ## Maintainers 12 | 13 | | Name | Email | Url | 14 | | ---- | ------ | --- | 15 | | Nikolai Rodionov | | | 16 | 17 | ## Installing the Chart 18 | 19 | To install the chart with the release name `sql-exporter`: 20 | 21 | ```console 22 | helm repo add sql_exporter https://burningalchemist.github.io/sql_exporter/ 23 | helm install sql_exporter/sql-exporter 24 | ``` 25 | 26 | ### Ingress support 27 | 28 | It's possible to enable the ingress creation by setting 29 | 30 | ```yaml 31 | #Values 32 | ingress: 33 | enabled: true 34 | ``` 35 | 36 | But as the sql_operator has a direct connection to databases, 37 | it might expose the database servers to possible DDoS attacks. 38 | It's not recommended by maintainers to use ingress for accessing the exporter, 39 | but if there are no other options, 40 | security measures should be taken. 41 | 42 | For example, a user might enable the basic auth on the ingress level. 43 | Take a look on how it's done at the 44 | [nginx ingress controller](https://kubernetes.github.io/ingress-nginx/examples/auth/basic/) 45 | as an example. 46 | 47 | ## Chart Values 48 | 49 | ### General parameters 50 | 51 | | Key | Type | Default | Description | 52 | |-----|------|---------|-------------| 53 | | nameOverride | string | `""` | Provide a name in place of `sql-exporter` | 54 | | fullnameOverride | string | `""` | String to fully override "sql-exporter.fullname" | 55 | | image.repository | string | `"burningalchemist/sql_exporter"` | Image repository | 56 | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | 57 | | image.tag | string | `appVersion` value from `Chart.yaml` | Image tag | 58 | | imagePullSecrets | list | `[]` | Secrets with credentials to pull images from a private registry | 59 | | service.type | string | `"ClusterIP"` | Service type | 60 | | service.port | int | `80` | Service port | 61 | | service.labels | object | `{}` | Service labels | 62 | | service.annotations | object | `{}` | Service annotations | 63 | | ingress.enabled | bool | `false` | | 64 | | ingress.labels | object | `{}` | Ingress labels | 65 | | ingress.annotations | object | `{}` | Ingress annotations | 66 | | ingress.ingressClassName | string | `""` | Ingress class name | 67 | | ingress.host | string | `""` | Ingress host | 68 | | ingress.path | string | `"/"` | Ingress path | 69 | | ingress.tls | object | `{"crt":"","enabled":false,"key":"","secretName":""}` | Ingress TLS, can be defined by cert secret, or by key and cert. | 70 | | ingress.tls.secretName | string | `""` | Ingress tls secret if already exists. | 71 | | ingress.tls.crt | string | `""` | Ingress tls.crt, required if you don't have secret name. | 72 | | ingress.tls.key | string | `""` | Ingress tls.key, required if you don't have secret name. | 73 | | extraContainers | object | `{}` | Arbitrary sidecar containers list | 74 | | initContainers | object | `{}` | Arbitrary sidecar containers list for 1.29+ kubernetes | 75 | | serviceAccount.create | bool | `true` | Specifies whether a Service Account should be created, creates "sql-exporter" service account if true, unless overriden. Otherwise, set to `default` if false, and custom service account name is not provided. Check all the available parameters. | 76 | | serviceAccount.annotations | object | `{}` | Annotations to add to the Service Account | 77 | | livenessProbe.initialDelaySeconds | int | `5` | | 78 | | livenessProbe.timeoutSeconds | int | `30` | | 79 | | readinessProbe.initialDelaySeconds | int | `5` | | 80 | | readinessProbe.timeoutSeconds | int | `30` | | 81 | | resources | object | `{}` | Resource limits and requests for the application controller pods | 82 | | podLabels | object | `{}` | Pod labels | 83 | | podAnnotations | object | `{}` | Pod annotations | 84 | | podSecurityContext | object | `{}` | Pod security context | 85 | | createConfig | bool | `true` | Set to true to create a config as a part of the helm chart | 86 | | logLevel | string | `"debug"` | Set log level (info if unset) | 87 | | logFormat | string | `"logfmt"` | Set log format (logfmt if unset) | 88 | | reloadEnabled | bool | `false` | Enable reload collector data handler (endpoint /reload) | 89 | 90 | ### Prometheus ServiceMonitor 91 | 92 | | Key | Type | Default | Description | 93 | |-----|------|---------|-------------| 94 | | serviceMonitor.enabled | bool | `true` | Enable ServiceMonitor | 95 | | serviceMonitor.interval | string | `"15s"` | ServiceMonitor interval | 96 | | serviceMonitor.path | string | `"/metrics"` | ServiceMonitor path | 97 | | serviceMonitor.metricRelabelings | object | `{}` | ServiceMonitor metric relabelings | 98 | | serviceMonitor.relabelings | object | `{}` | ServiceMonitor relabelings | 99 | | serviceMonitor.namespace | string | `nil` | ServiceMonitor namespace override (default is .Release.Namespace) | 100 | | serviceMonitor.scrapeTimeout | string | `nil` | ServiceMonitor scrape timeout | 101 | 102 | ### Configuration 103 | 104 | | Key | Type | Default | Description | 105 | |-----|------|---------|-------------| 106 | | config | object | `{"global":{"max_connections":3,"max_idle_connections":3,"min_interval":"0s","scrape_error_drop_interval":"0s","scrape_timeout":"10s","scrape_timeout_offset":"500ms"}}` | SQL Exporter configuration, can be a dictionary, or a template yaml string. | 107 | | config.global.scrape_timeout | string | `"10s"` | Scrape timeout | 108 | | config.global.scrape_timeout_offset | string | `"500ms"` | Scrape timeout offset. Must be strictly positive. | 109 | | config.global.scrape_error_drop_interval | string | `"0s"` | Interval between dropping scrape_errors_total metric: by default the metric is persistent. | 110 | | config.global.min_interval | string | `"0s"` | Minimum interval between collector runs. | 111 | | config.global.max_connections | int | `3` | Number of open connections. | 112 | | config.global.max_idle_connections | int | `3` | Number of idle connections. | 113 | | target | object | `nil` | Check documentation. Mutually exclusive with `jobs` | 114 | | jobs | list | `nil` | Check documentation. Mutually exclusive with `target` | 115 | | collector_files | list | `[]` | Check documentation | 116 | 117 | To generate the config as a part of a helm release, please set the `.Values.createConfig` to true, and define a config under the `.Values.config` property. 118 | 119 | To configure `target`, `jobs`, `collector_files` please refer to the [documentation](https://github.com/burningalchemist/sql_exporter/blob/master/documentation/sql_exporter.yml) in the source repository. These values are not set by default. 120 | 121 | It's also possible to define collectors (i.e. metrics and queries) in separate files, and specify the filenames in the `collector_files` list. For that we can use `CollectorFiles` field (check `values.yaml` for the available example). 122 | 123 | ## Dev Notes 124 | 125 | After changing default `Values`, please execute `make gen_docs` to update the `README.md` file. Readme file is generated by the `helm-docs` tool, so make sure not to edit it manually. 126 | -------------------------------------------------------------------------------- /helm/README.md.gotmpl: -------------------------------------------------------------------------------- 1 | {{ template "chart.header" . }} 2 | {{ template "chart.deprecationWarning" . }} 3 | 4 | {{ template "chart.badgesSection" . }} 5 | 6 | {{ template "chart.description" . }} 7 | 8 | {{ template "chart.sourcesSection" . }} 9 | 10 | {{ template "chart.maintainersSection" . }} 11 | 12 | {{ template "chart.requirementsSection" . }} 13 | 14 | 15 | ## Installing the Chart 16 | 17 | To install the chart with the release name `sql-exporter`: 18 | 19 | ```console 20 | helm repo add sql_exporter https://burningalchemist.github.io/sql_exporter/ 21 | helm install sql_exporter/sql-exporter 22 | ``` 23 | 24 | ### Ingress support 25 | 26 | It's possible to enable the ingress creation by setting 27 | 28 | ```yaml 29 | #Values 30 | ingress: 31 | enabled: true 32 | ``` 33 | 34 | But as the sql_operator has a direct connection to databases, 35 | it might expose the database servers to possible DDoS attacks. 36 | It's not recommended by maintainers to use ingress for accessing the exporter, 37 | but if there are no other options, 38 | security measures should be taken. 39 | 40 | For example, a user might enable the basic auth on the ingress level. 41 | Take a look on how it's done at the 42 | [nginx ingress controller](https://kubernetes.github.io/ingress-nginx/examples/auth/basic/) 43 | as an example. 44 | 45 | ## Chart Values 46 | 47 | ### General parameters 48 | 49 | | Key | Type | Default | Description | 50 | |-----|------|---------|-------------| 51 | {{- range .Values }} 52 | {{- if not (or (hasPrefix "serviceMonitor" .Key) (hasPrefix "config" .Key) (hasPrefix "collectorFiles" .Key))}} 53 | | {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | 54 | {{- end }} 55 | {{- end }} 56 | 57 | 58 | ### Prometheus ServiceMonitor 59 | 60 | | Key | Type | Default | Description | 61 | |-----|------|---------|-------------| 62 | {{- range .Values }} 63 | {{- if hasPrefix "serviceMonitor" .Key }} 64 | | {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | 65 | {{- end }} 66 | {{- end }} 67 | | serviceMonitor.scrapeTimeout | string | `nil` | ServiceMonitor scrape timeout | 68 | 69 | ### Configuration 70 | 71 | | Key | Type | Default | Description | 72 | |-----|------|---------|-------------| 73 | {{- range .Values }} 74 | {{- if or (hasPrefix "config" .Key) }} 75 | | {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | 76 | {{- end }} 77 | {{- end }} 78 | | target | object | `nil` | Check documentation. Mutually exclusive with `jobs` | 79 | | jobs | list | `nil` | Check documentation. Mutually exclusive with `target` | 80 | | collector_files | list | `[]` | Check documentation | 81 | 82 | To generate the config as a part of a helm release, please set the `.Values.createConfig` to true, and define a config under the `.Values.config` property. 83 | 84 | To configure `target`, `jobs`, `collector_files` please refer to the [documentation](https://github.com/burningalchemist/sql_exporter/blob/master/documentation/sql_exporter.yml) in the source repository. These values are not set by default. 85 | 86 | It's also possible to define collectors (i.e. metrics and queries) in separate files, and specify the filenames in the `collector_files` list. For that we can use `CollectorFiles` field (check `values.yaml` for the available example). 87 | 88 | ## Dev Notes 89 | 90 | After changing default `Values`, please execute `make gen_docs` to update the `README.md` file. Readme file is generated by the `helm-docs` tool, so make sure not to edit it manually. 91 | -------------------------------------------------------------------------------- /helm/ci/helmfile.yaml: -------------------------------------------------------------------------------- 1 | repositories: 2 | - name: bitnami 3 | url: https://charts.bitnami.com/bitnami 4 | - name: prometheus-community 5 | url: https://prometheus-community.github.io/helm-charts 6 | 7 | releases: 8 | - name: postgres-instance 9 | installed: true 10 | namespace: postgres 11 | createNamespace: true 12 | chart: bitnami/postgresql 13 | values: 14 | - global: 15 | postgresql: 16 | auth: 17 | postgresPassword: 123123!! 18 | - name: prometheus-stack 19 | namespace: monitoring 20 | createNamespace: true 21 | chart: prometheus-community/kube-prometheus-stack 22 | values: 23 | - prometheus: 24 | prometheusSpec: 25 | enableAdminAPI: true 26 | podMonitorNamespaceSelector: 27 | any: true 28 | podMonitorSelector: {} 29 | podMonitorSelectorNilUsesHelmValues: false 30 | ruleNamespaceSelector: 31 | any: true 32 | ruleSelector: {} 33 | ruleSelectorNilUsesHelmValues: false 34 | serviceMonitorNamespaceSelector: 35 | any: true 36 | serviceMonitorSelector: {} 37 | serviceMonitorSelectorNilUsesHelmValues: false 38 | -------------------------------------------------------------------------------- /helm/ci/postgresql-values.yaml: -------------------------------------------------------------------------------- 1 | tests: 2 | serviceMonitor: 3 | enabled: true 4 | prom: 5 | service: prometheus-operated 6 | namespace: monitoring 7 | metricsEndpoint: 8 | enabled: true 9 | service: 10 | labels: 11 | deployment: ci 12 | annotations: 13 | prometheus.io/scrape: "true" 14 | 15 | podLabels: 16 | test-label: test-value 17 | podAnnotations: 18 | test/annotation: test-value 19 | 20 | config: 21 | target: 22 | data_source_name: 'postgresql://postgres:123123!!@postgres-instance-postgresql.postgres.svc.cluster.local:5432?sslmode=disable' 23 | collectors: [active_connections] 24 | collectors: 25 | - collector_name: active_connections 26 | metrics: 27 | - metric_name: active_connections 28 | type: gauge 29 | help: 'Active connections' 30 | key_labels: 31 | - "datname" 32 | - "usename" 33 | - "state" 34 | values: 35 | - "count" 36 | query_ref: active_connections 37 | queries: 38 | - query_name: active_connections 39 | query: | 40 | SELECT 41 | datname::text, 42 | usename::text, 43 | state::text, 44 | COUNT(state)::float AS count 45 | FROM pg_stat_activity 46 | GROUP BY datname, usename, state; 47 | collector_files: 48 | - "*.collector.yml" 49 | collectorFiles: 50 | pricing_data_freshness.collector.yml: 51 | collector_name: pricing_data_freshness 52 | metrics: 53 | - metric_name: pricing_update_time 54 | type: gauge 55 | help: 'Time when prices for a market were last updated.' 56 | key_labels: 57 | # Populated from the `market` column of each row. 58 | - Market 59 | static_labels: 60 | # Arbitrary key/value pair 61 | portfolio: income 62 | values: [LastUpdateTime] 63 | query: | 64 | SELECT Market, max(UpdateTime) AS LastUpdateTime 65 | FROM MarketPrices 66 | GROUP BY Market 67 | -------------------------------------------------------------------------------- /helm/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | {{- $conf := include "sql_exporter.config.yaml" . | fromYaml -}} 2 | ------------------------------ 3 | Hello there! 4 | 5 | {{- if and (not $conf.target ) (not $conf.jobs)}} 6 | ------------------------------ 7 | 8 | It seems like you haven't configured the target, please check the example here: 9 | 10 | https://github.com/burningalchemist/sql_exporter/blob/master/documentation/sql_exporter.yml#L30 11 | 12 | In case you need to have multiple targets, you can confiure jobs instead, have a look here 13 | 14 | https://github.com/burningalchemist/sql_exporter#multiple-database-connections 15 | 16 | {{- end}} 17 | 18 | {{- if and (not $conf.collectors) (not $conf.collectorFiles)}} 19 | 20 | ------------------------------ 21 | You need to configure either collectors or collectorFiles (or both), please have a look at the example here: 22 | 23 | https://github.com/burningalchemist/sql_exporter#multiple-database-connections 24 | 25 | {{- end }} 26 | 27 | -------------------------------------------------------------------------------- /helm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "sql-exporter.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "sql-exporter.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "sql-exporter.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Create tls secret name based on the chart name 35 | */}} 36 | {{- define "sql-exporter.tls.name" -}} 37 | {{- if ((.Values.ingress).tls).secretName -}} 38 | {{- .Values.ingress.tls.secretName }} 39 | {{- else -}} 40 | {{- printf "%s-%s" (include "sql-exporter.fullname" .) "tls" }} 41 | {{- end -}} 42 | {{- end -}} 43 | 44 | {{/* 45 | Common labels 46 | */}} 47 | {{- define "sql-exporter.labels" -}} 48 | helm.sh/chart: {{ include "sql-exporter.chart" . }} 49 | {{ include "sql-exporter.selectorLabels" . }} 50 | {{- if .Chart.AppVersion }} 51 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 52 | {{- end }} 53 | app.kubernetes.io/managed-by: {{ .Release.Service }} 54 | {{- end }} 55 | 56 | {{/* 57 | Selector labels 58 | */}} 59 | {{- define "sql-exporter.selectorLabels" -}} 60 | app.kubernetes.io/name: {{ include "sql-exporter.name" . }} 61 | app.kubernetes.io/instance: {{ .Release.Name }} 62 | {{- end }} 63 | 64 | {{/* 65 | Create the name of the service account to use 66 | */}} 67 | {{- define "sql-exporter.serviceAccountName" -}} 68 | {{- default "default" .Values.serviceAccount.name }} 69 | {{- end }} 70 | 71 | {{- define "sql-exporter.volumes" -}} 72 | {{- if or .Values.createConfig .Values.collectorFiles -}} 73 | {{- true | quote -}} 74 | {{- else if .Values.extraVolumes -}} 75 | {{- true | quote -}} 76 | {{- else -}} 77 | {{- false | quote -}} 78 | {{- end -}} 79 | {{- end -}} 80 | 81 | {{- define "sql_exporter.config.yaml" -}} 82 | {{- $conf := "" -}} 83 | {{- if typeIsLike "string" .Values.config -}} 84 | {{- $conf = (tpl .Values.config .) | fromYaml -}} 85 | {{- else -}} 86 | {{- $conf = .Values.config -}} 87 | {{- end -}} 88 | {{- /* 89 | Do the wired "fromYaml | toYaml" to reformat the config. 90 | Reformat '100s' to 100s for example. 91 | */ -}} 92 | {{- tpl ($conf | toYaml ) . | fromYaml | toYaml -}} 93 | {{- end -}} 94 | -------------------------------------------------------------------------------- /helm/templates/configmap.collectors.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.collectorFiles }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ include "sql-exporter.fullname" . }} 6 | labels: 7 | {{- include "sql-exporter.labels" . | nindent 4 }} 8 | data: 9 | {{- range $k, $v := .Values.collectorFiles }} 10 | {{ $k }}: |- 11 | {{- if typeIsLike "string" $v -}} 12 | {{- $v = (tpl $v $ | fromYaml) -}} 13 | {{- end -}} 14 | {{- /* 15 | Do the wired "fromYaml | toYaml" to reformat the config. 16 | Reformat '100s' to 100s for example. 17 | */ -}} 18 | {{- tpl (toYaml $v) $ | fromYaml | toYaml | nindent 4}} 19 | {{- end}} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "sql-exporter.fullname" . }} 5 | labels: 6 | {{- include "sql-exporter.labels" . | nindent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | {{- include "sql-exporter.selectorLabels" . | nindent 6 }} 12 | template: 13 | metadata: 14 | annotations: 15 | checksum/config: {{ include (print $.Template.BasePath "/secret.configuration.yaml") . | sha256sum }} 16 | checksum/collectors: {{ include (print $.Template.BasePath "/configmap.collectors.yaml") . | sha256sum }} 17 | {{- with .Values.podAnnotations }} 18 | {{- toYaml . | nindent 8 }} 19 | {{- end }} 20 | labels: 21 | {{- include "sql-exporter.selectorLabels" . | nindent 8 }} 22 | {{- with .Values.podLabels }} 23 | {{- toYaml . | nindent 8 }} 24 | {{- end }} 25 | spec: 26 | {{- with .Values.imagePullSecrets }} 27 | imagePullSecrets: 28 | {{- toYaml . | nindent 8 }} 29 | {{- end }} 30 | securityContext: 31 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 32 | serviceAccountName: {{ if .Values.serviceAccount.create }}{{ template "sql-exporter.fullname" . }}{{ else }}{{ include "sql-exporter.serviceAccountName" . }}{{end}} 33 | {{- if eq (include "sql-exporter.volumes" .) "\"true\"" }} 34 | volumes: 35 | {{- if .Values.createConfig }} 36 | - name: sql-exporter 37 | secret: 38 | secretName: {{ include "sql-exporter.fullname" . }} 39 | {{- end }} 40 | {{- if .Values.collectorFiles }} 41 | - name: sql-collector 42 | configMap: 43 | name: {{ include "sql-exporter.fullname" . }} 44 | {{- end }} 45 | {{- end }} 46 | {{- range $v := .Values.extraVolumes }} 47 | - name: {{ $v.name }} 48 | {{- toYaml $v.volume | nindent 10 }} 49 | {{- end }} 50 | {{- if .Values.initContainers }} 51 | initContainers: 52 | {{ toYaml .Values.initContainers | nindent 8 }} 53 | {{- end }} 54 | containers: 55 | - name: {{ .Chart.Name }} 56 | securityContext: 57 | {{- toYaml .Values.securityContext | nindent 12 }} 58 | image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" 59 | imagePullPolicy: {{ .Values.image.pullPolicy }} 60 | args: 61 | - "-config.file=/etc/sql_exporter/sql_exporter.yml" 62 | - "-log.level={{ .Values.logLevel | default "info" }}" 63 | - "-log.format={{ .Values.logFormat | default "logfmt" }}" 64 | {{- if .Values.reloadEnabled }} 65 | - "-web.enable-reload" 66 | {{- end }} 67 | {{- if eq (include "sql-exporter.volumes" .) "\"true\"" }} 68 | volumeMounts: 69 | {{- if .Values.createConfig }} 70 | - name: sql-exporter 71 | readOnly: true 72 | mountPath: /etc/sql_exporter/ 73 | {{- end }} 74 | {{- if .Values.collectorFiles }} 75 | - name: sql-collector 76 | readOnly: true 77 | mountPath: /etc/sql_exporter/collectors/ 78 | {{- end }} 79 | {{- range $v := .Values.extraVolumes }} 80 | - name: {{ $v.name }} 81 | {{- toYaml $v.mount | nindent 12 }} 82 | {{- end }} 83 | {{- end }} 84 | {{- with .Values.envFrom }} 85 | envFrom: 86 | {{- toYaml . | nindent 12 }} 87 | {{- end }} 88 | {{- if .Values.env }} 89 | env: 90 | {{- range $key, $value := .Values.env }} 91 | - name: {{ $key }} 92 | {{- if $value.value }} 93 | value: {{ $value.value }} 94 | {{- else }} 95 | valueFrom: 96 | {{- if eq $value.from.kind "Secret" }} 97 | secretKeyRef: 98 | {{- else if eq $value.from.kind "ConfigMap" }} 99 | configMapKeyRef: 100 | {{- else }} 101 | {{- fail "Values.env[].from.kind should be either Secret or ConfigMap" }} 102 | {{- end }} 103 | name: {{ $value.from.name }} 104 | key: {{ $value.from.key }} 105 | {{- end }} 106 | {{- end }} 107 | {{- end }} 108 | livenessProbe: 109 | initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} 110 | timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} 111 | httpGet: 112 | path: /healthz 113 | port: 9399 114 | readinessProbe: 115 | initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} 116 | timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} 117 | httpGet: 118 | path: /healthz 119 | port: 9399 120 | ports: 121 | - name: http 122 | containerPort: 9399 123 | protocol: TCP 124 | resources: 125 | {{- toYaml .Values.resources | nindent 12 }} 126 | {{- with .Values.extraContainers }} 127 | {{- toYaml . | nindent 8 }} 128 | {{- end }} 129 | {{- with .Values.nodeSelector }} 130 | nodeSelector: 131 | {{- toYaml . | nindent 8 }} 132 | {{- end }} 133 | {{- with .Values.affinity }} 134 | affinity: 135 | {{- toYaml . | nindent 8 }} 136 | {{- end }} 137 | {{- with .Values.tolerations }} 138 | tolerations: 139 | {{- toYaml . | nindent 8 }} 140 | {{- end }} 141 | -------------------------------------------------------------------------------- /helm/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if (.Values.ingress).enabled -}} 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: {{ include "sql-exporter.fullname" . }} 6 | labels: 7 | {{- include "sql-exporter.labels" . | nindent 4 }} 8 | {{- with .Values.ingress.labels }} 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | {{- with .Values.ingress.annotations }} 12 | annotations: 13 | {{- toYaml . | nindent 4 }} 14 | {{- end }} 15 | spec: 16 | {{- if .Values.ingress.ingressClassName }} 17 | ingressClassName: {{ .Values.ingress.ingressClassName }} 18 | {{- end }} 19 | {{- if (.Values.ingress.tls).enabled }} 20 | tls: 21 | - hosts: 22 | - {{ .Values.ingress.host | required "Ingress host is required if tls is enabled!" }} 23 | secretName: {{ include "sql-exporter.tls.name" . }} 24 | {{- end }} 25 | rules: 26 | - http: 27 | paths: 28 | - path: {{ .Values.ingress.path }} 29 | pathType: Prefix 30 | backend: 31 | service: 32 | name: {{ include "sql-exporter.fullname" . }} 33 | port: 34 | number: {{ .Values.service.port }} 35 | {{- if .Values.ingress.host }} 36 | host: {{ .Values.ingress.host }} 37 | {{- end }} 38 | {{- end -}} 39 | -------------------------------------------------------------------------------- /helm/templates/secret.configuration.yaml: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # -- This secret holds the config file of sql_exporter 3 | # --------------------------------------------------------------------- 4 | {{- if .Values.createConfig }} 5 | apiVersion: v1 6 | kind: Secret 7 | metadata: 8 | name: {{ include "sql-exporter.fullname" . }} 9 | labels: 10 | {{- include "sql-exporter.labels" . | nindent 4 }} 11 | type: Opaque 12 | stringData: 13 | sql_exporter.yml: |- 14 | {{- include "sql_exporter.config.yaml" . | nindent 4 }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/templates/secret.tls.yaml: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------- 2 | # -- This secret holds the tls key and cert of sql_exporter's ingress 3 | # --------------------------------------------------------------------- 4 | {{- if and (((.Values.ingress).tls).enabled) (not ((.Values.ingress).tls).secretName) -}} 5 | apiVersion: v1 6 | kind: Secret 7 | metadata: 8 | name: {{ include "sql-exporter.tls.name" . }} 9 | labels: 10 | {{- include "sql-exporter.labels" . | nindent 4 }} 11 | type: Opaque 12 | data: 13 | tls.crt: {{ (tpl (.Values.ingress.tls.crt | required "crt is required if you want to create tls secret.") .) | required "crt is required if you want to create tls secret." | b64enc | quote }} 14 | tls.key: {{ (tpl (.Values.ingress.tls.key | required "private key is required if you want to create tls secret.") .) | required "private key is required if you want to create tls secret." | b64enc | quote }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "sql-exporter.fullname" . }} 5 | labels: 6 | {{- include "sql-exporter.labels" . | nindent 4 }} 7 | {{- with .Values.service.labels }} 8 | {{- toYaml . | nindent 4 }} 9 | {{- end }} 10 | {{- with .Values.service.annotations }} 11 | annotations: 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | spec: 15 | type: {{ .Values.service.type }} 16 | ports: 17 | - port: {{ .Values.service.port }} 18 | targetPort: http 19 | protocol: TCP 20 | name: http 21 | selector: 22 | {{- include "sql-exporter.selectorLabels" . | nindent 4 }} 23 | -------------------------------------------------------------------------------- /helm/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "sql-exporter.fullname" . }} 6 | {{- with .Values.serviceAccount.annotations}} 7 | annotations: 8 | {{- toYaml . | nindent 4 }} 9 | {{- end }} 10 | labels: 11 | {{- with .Values.serviceAccount.labels }} 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | {{- include "sql-exporter.labels" . | nindent 4 }} 15 | automountServiceAccountToken: {{ default "false" .Values.serviceAccount.automountServiceAccountToken }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "sql-exporter.fullname" . }} 6 | {{- if .Values.serviceMonitor.namespace }} 7 | namespace: {{ .Values.serviceMonitor.namespace }} 8 | {{- else }} 9 | namespace: {{ .Release.Namespace }} 10 | {{- end }} 11 | labels: 12 | {{- include "sql-exporter.labels" . | nindent 4 }} 13 | {{- range $key, $value := .Values.serviceMonitor.selector }} 14 | {{ $key }}: {{ $value | quote }} 15 | {{- end }} 16 | spec: 17 | selector: 18 | matchLabels: 19 | {{- include "sql-exporter.selectorLabels" . | nindent 6 }} 20 | endpoints: 21 | - port: http 22 | {{- if .Values.serviceMonitor.path }} 23 | path: {{ .Values.serviceMonitor.path }} 24 | {{- end }} 25 | {{- if .Values.serviceMonitor.interval }} 26 | interval: {{ .Values.serviceMonitor.interval }} 27 | {{- end }} 28 | {{- if .Values.serviceMonitor.scrapeTimeout }} 29 | scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} 30 | {{- end }} 31 | {{- if .Values.serviceMonitor.metricRelabelings }} 32 | metricRelabelings: 33 | {{- toYaml .Values.serviceMonitor.metricRelabelings | nindent 8 }} 34 | {{- end }} 35 | {{- if .Values.serviceMonitor.relabelings }} 36 | relabelings: 37 | {{- toYaml .Values.serviceMonitor.relabelings | nindent 8 }} 38 | {{- end }} 39 | namespaceSelector: 40 | matchNames: 41 | {{- if .Values.serviceMonitor.namespace }} 42 | - {{ .Values.serviceMonitor.namespace }} 43 | {{- else }} 44 | - {{ .Release.Namespace }} 45 | {{- end }} 46 | {{- end }} 47 | -------------------------------------------------------------------------------- /helm/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | {{- if (((.Values.tests).metricsEndpoint).enabled) }} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: {{ include "sql-exporter.fullname" . }}-test-script 7 | labels: 8 | {{- include "sql-exporter.labels" . | nindent 4 }} 9 | annotations: 10 | "helm.sh/hook": test 11 | "helm.sh/hook-weight": "1" 12 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 13 | data: 14 | test.sh: |- 15 | #! /bin/sh 16 | STATUS=$(curl {{ include "sql-exporter.fullname" . }}:80/metrics --head -s | awk '/^HTTP/{print $2}') 17 | if [ "$STATUS" != 200 ]; then 18 | echo "sql-exporter didn't return code 200, probably something is broken" 19 | exit 1; 20 | fi 21 | echo "metrics endpoint returned 200" 22 | --- 23 | apiVersion: v1 24 | kind: Pod 25 | metadata: 26 | name: "{{ include "sql-exporter.fullname" . }}-test-connection" 27 | labels: 28 | {{- include "sql-exporter.labels" . | nindent 4 }} 29 | annotations: 30 | "helm.sh/hook": test 31 | "helm.sh/hook-weight": "2" 32 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 33 | spec: 34 | volumes: 35 | - name: test-script 36 | configMap: 37 | name: "{{ include "sql-exporter.fullname" . }}-test-script" 38 | containers: 39 | - name: check-metrics-endpoint 40 | image: alpine/curl 41 | command: ['sh'] 42 | volumeMounts: 43 | - name: test-script 44 | readOnly: true 45 | mountPath: /test.sh 46 | subPath: test.sh 47 | args: 48 | - /test.sh 49 | restartPolicy: Never 50 | {{- end }} 51 | -------------------------------------------------------------------------------- /helm/templates/tests/test-servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if (((.Values.tests).serviceMonitor).enabled) }} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: {{ include "sql-exporter.fullname" . }}-test-prom-script 7 | labels: 8 | {{- include "sql-exporter.labels" . | nindent 4 }} 9 | annotations: 10 | "helm.sh/hook": test 11 | "helm.sh/hook-weight": "1" 12 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 13 | data: 14 | test.sh: |- 15 | #! /bin/sh 16 | # ----------------------------------------------------------------- 17 | # -- JQ is required for this test, but since the jq image doesn't 18 | # -- have curl installed, it's not an option to use it. 19 | # -- Also, it doesn't have any shell installed, so we can't use 20 | # -- it to copy the binary to an emptydir. 21 | # -- That's why I'm using apk add here 22 | # ----------------------------------------------------------------- 23 | apk update && apk add jq 24 | sleep {{ .Values.serviceMonitor.interval }} 25 | URL="{{.Values.tests.serviceMonitor.prom.service}}.{{.Values.tests.serviceMonitor.prom.namespace}}.svc.cluster.local" 26 | CURL_RES=$(curl -q "http://${URL}:9090/api/v1/query?query=active_connections") 27 | STATUS=$(echo "$CURL_RES" | jq -r '.status') 28 | if [ "$STATUS" != "success" ]; then 29 | echo "metric doesn't have a status 'success' in the prometheus" 30 | echo "curl output is: $CURL_RES" 31 | exit 1; 32 | fi 33 | echo "Prometheus rerturns success for the sql-exporter metric" 34 | 35 | --- 36 | apiVersion: v1 37 | kind: Pod 38 | metadata: 39 | name: "{{ include "sql-exporter.fullname" . }}-test-prom" 40 | labels: 41 | {{- include "sql-exporter.labels" . | nindent 4 }} 42 | annotations: 43 | "helm.sh/hook": test 44 | "helm.sh/hook-weight": "2" 45 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 46 | spec: 47 | volumes: 48 | - name: test-script 49 | configMap: 50 | name: "{{ include "sql-exporter.fullname" . }}-test-prom-script" 51 | containers: 52 | - name: check-metrics-endpoint 53 | image: alpine/curl 54 | command: 55 | - sh 56 | volumeMounts: 57 | - name: test-script 58 | readOnly: true 59 | mountPath: /test.sh 60 | subPath: test.sh 61 | args: 62 | - /test.sh 63 | restartPolicy: Never 64 | {{- end}} 65 | -------------------------------------------------------------------------------- /helm/values.yaml: -------------------------------------------------------------------------------- 1 | # -- Provide a name in place of `sql-exporter` 2 | nameOverride: "" 3 | # -- String to fully override "sql-exporter.fullname" 4 | fullnameOverride: "" 5 | image: 6 | # -- Image repository 7 | repository: burningalchemist/sql_exporter 8 | # -- Image pull policy 9 | pullPolicy: IfNotPresent 10 | # -- Image tag 11 | # @default -- `appVersion` value from `Chart.yaml` 12 | tag: "" 13 | # -- Secrets with credentials to pull images from a private registry 14 | imagePullSecrets: [] 15 | service: 16 | # -- Service type 17 | type: ClusterIP 18 | # -- Service port 19 | port: 80 20 | # -- Service labels 21 | labels: {} 22 | # -- Service annotations 23 | annotations: {} 24 | # example of prometheus usage 25 | # prometheus.io/scrape: "true" 26 | # prometheus.io/path: "/metrics" 27 | ingress: 28 | enabled: false 29 | # -- Ingress labels 30 | labels: {} 31 | # -- Ingress annotations 32 | annotations: {} 33 | # -- Ingress class name 34 | ingressClassName: "" 35 | # -- Ingress host 36 | host: "" 37 | # -- Ingress path 38 | path: "/" 39 | # -- Ingress TLS, can be defined by cert secret, or by key and cert. 40 | tls: 41 | enabled: false 42 | # -- Ingress tls secret if already exists. 43 | secretName: "" 44 | # -- Ingress tls.crt, required if you don't have secret name. 45 | crt: "" 46 | # crt: "{{- .Files.Get \"tls.crt\" -}}" 47 | # -- Ingress tls.key, required if you don't have secret name. 48 | key: "" 49 | # key: "{{- .Files.Get \"tls.key\" -}}" 50 | 51 | # -- Arbitrary sidecar containers list 52 | extraContainers: {} 53 | # - name: your_sidecar 54 | # image: gcr.io/your_image:your_tag 55 | # args: 56 | # resources: 57 | # requests:{} 58 | 59 | # -- Arbitrary sidecar containers list for 1.29+ kubernetes 60 | initContainers: {} 61 | 62 | serviceAccount: 63 | # -- Specifies whether a Service Account should be created, creates "sql-exporter" service account if true, unless 64 | # overriden. Otherwise, set to `default` if false, and custom service account name is not provided. Check all the 65 | # available parameters. 66 | create: true 67 | # -- References a custom Service Account if it already exists 68 | # name: "sql-exporter-custom-sa" 69 | # -- Annotations to add to the Service Account 70 | annotations: {} 71 | ## example annotations ## 72 | # annotations: 73 | # iam.gke.io/gcp-service-account: my-service-account@gke.url 74 | # -- Defines if token is automatically mounted to the pod after it has been created 75 | # automountServiceAccountToken: false 76 | # Liveness and readiness probes for the application controller pods 77 | livenessProbe: 78 | initialDelaySeconds: 5 79 | timeoutSeconds: 30 80 | 81 | readinessProbe: 82 | initialDelaySeconds: 5 83 | timeoutSeconds: 30 84 | # -- Resource limits and requests for the application controller pods 85 | resources: {} 86 | # limits: 87 | # cpu: 100m 88 | # memory: 128Mi 89 | # requests: 90 | # cpu: 100m 91 | # memory: 128Mi 92 | # -- Pod labels 93 | podLabels: {} 94 | # -- Pod annotations 95 | podAnnotations: {} 96 | # -- Pod security context 97 | podSecurityContext: {} 98 | # capabilities: 99 | # drop: 100 | # - ALL 101 | # readOnlyRootFilesystem: true 102 | # runAsNonRoot: true 103 | # runAsUser: 1000 104 | # @ignored 105 | securityContext: {} 106 | # Prometheus Operator values 107 | serviceMonitor: 108 | # -- Enable ServiceMonitor 109 | enabled: true 110 | # -- ServiceMonitor interval 111 | interval: 15s 112 | # -- ServiceMonitor path 113 | path: /metrics 114 | # -- ServiceMonitor scrape timeout 115 | # scrapeTimeout: 10s 116 | # -- ServiceMonitor metric relabelings 117 | metricRelabelings: {} 118 | # -- ServiceMonitor relabelings 119 | relabelings: {} 120 | # -- ServiceMonitor namespace override (default is .Release.Namespace) 121 | namespace: ~ 122 | # Additional env variables 123 | # - kind should be either Secret or ConfigMap 124 | # - name is the name of the Secret or ConfigMap that should be used 125 | # - key is the key of the object inside of a Secret or ConfigMap 126 | # env: 127 | # SQLEXPORTER_TARGET_DSN: 128 | # from: 129 | # kind: Secret 130 | # name: sql_exporter_secret 131 | # key: CONNECTION_STRING 132 | # envFrom: 133 | # - configMapRef: 134 | # name: env-configmap 135 | # - secretRef: 136 | # name: env-secrets 137 | # extraVolumes: 138 | # - name: configmap-mount 139 | # volume: 140 | # configMap: 141 | # name: log-config 142 | # items: 143 | # - key: log_level 144 | # path: log_level 145 | # mount: 146 | # readOnly: true 147 | # mountPath: /etc/config 148 | # -- Set to true to create a config as a part of the helm chart 149 | createConfig: true 150 | # -- Set log level (info if unset) 151 | logLevel: debug 152 | # -- Set log format (logfmt if unset) 153 | logFormat: logfmt 154 | # -- Enable reload collector data handler (endpoint /reload) 155 | reloadEnabled: false 156 | # -- SQL Exporter configuration, can be a dictionary, or a template yaml string. 157 | config: 158 | global: 159 | # -- Scrape timeout 160 | scrape_timeout: 10s 161 | # -- Scrape timeout offset. Must be strictly positive. 162 | scrape_timeout_offset: 500ms 163 | # -- Interval between dropping scrape_errors_total metric: by default the metric is persistent. 164 | scrape_error_drop_interval: 0s 165 | # -- Minimum interval between collector runs. 166 | min_interval: 0s 167 | # -- Number of open connections. 168 | max_connections: 3 169 | # -- Number of idle connections. 170 | max_idle_connections: 3 171 | # Target and collectors are not set so the chart is more flexible. Please configure it yourself. 172 | # target: 173 | # data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433' 174 | # collectors: [active_connections] 175 | # collectors: 176 | # - collector_name: active_connections 177 | # metrics: 178 | # - metric_name: active_connections 179 | # type: gauge 180 | # help: 'Active connections' 181 | # key_labels: 182 | # - "datname" 183 | # - "usename" 184 | # - "state" 185 | # values: 186 | # - "count" 187 | # query_ref: active_connections 188 | # queries: 189 | # - query_name: active_connections 190 | # query: | 191 | # SELECT 192 | # datname::text, 193 | # usename::text, 194 | # state::text, 195 | # COUNT(state)::float AS count 196 | # FROM pg_stat_activity 197 | # GROUP BY datname, usename, state; 198 | # collector_files: 199 | # - "*.collector.yml" 200 | # --------------------------------------------------------------------- 201 | # -- Collector Files 202 | # (can be dictionaries or yaml string templates) 203 | # --------------------------------------------------------------------- 204 | # -- Collector files are mounted to /etc/sql_exporter/collectors dir 205 | # --------------------------------------------------------------------- 206 | # collectorFiles: 207 | # pricing_data_freshness.collector.yml: 208 | # collector_name: pricing_data_freshness 209 | # metrics: 210 | # - metric_name: pricing_update_time 211 | # type: gauge 212 | # help: 'Time when prices for a market were last updated.' 213 | # key_labels: 214 | # # Populated from the `market` column of each row. 215 | # - Market 216 | # static_labels: 217 | # # Arbitrary key/value pair 218 | # portfolio: income 219 | # values: [LastUpdateTime] 220 | # query: | 221 | # SELECT Market, max(UpdateTime) AS LastUpdateTime 222 | # FROM MarketPrices 223 | # GROUP BY Market 224 | -------------------------------------------------------------------------------- /job.go: -------------------------------------------------------------------------------- 1 | package sql_exporter 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/burningalchemist/sql_exporter/config" 7 | "github.com/burningalchemist/sql_exporter/errors" 8 | "github.com/prometheus/client_golang/prometheus" 9 | ) 10 | 11 | // Job is a collection of targets with the same collectors applied. 12 | type Job interface { 13 | Targets() []Target 14 | } 15 | 16 | // job implements Job. It wraps the corresponding JobConfig and a set of Targets. 17 | type job struct { 18 | config *config.JobConfig 19 | targets []Target 20 | logContext string 21 | } 22 | 23 | // NewJob returns a new Job with the given configuration. 24 | func NewJob(jc *config.JobConfig, gc *config.GlobalConfig) (Job, errors.WithContext) { 25 | j := job{ 26 | config: jc, 27 | targets: make([]Target, 0, 10), 28 | logContext: fmt.Sprintf(`job=%s`, jc.Name), 29 | } 30 | 31 | if jc.EnablePing == nil { 32 | jc.EnablePing = &config.EnablePing 33 | } 34 | 35 | for _, sc := range jc.StaticConfigs { 36 | for tname, dsn := range sc.Targets { 37 | constLabels := prometheus.Labels{ 38 | "job": jc.Name, 39 | config.TargetLabel: tname, 40 | } 41 | for name, value := range sc.Labels { 42 | // Shouldn't happen as there are sanity checks in config, but check nonetheless. 43 | if _, found := constLabels[name]; found { 44 | return nil, errors.Errorf(j.logContext, "duplicate label %q", name) 45 | } 46 | constLabels[name] = value 47 | } 48 | t, err := NewTarget(j.logContext, tname, jc.Name, string(dsn), jc.Collectors(), constLabels, gc, jc.EnablePing) 49 | if err != nil { 50 | return nil, err 51 | } 52 | j.targets = append(j.targets, t) 53 | } 54 | } 55 | 56 | return &j, nil 57 | } 58 | 59 | func (j *job) Targets() []Target { 60 | return j.targets 61 | } 62 | -------------------------------------------------------------------------------- /metric.go: -------------------------------------------------------------------------------- 1 | package sql_exporter 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "sort" 7 | "time" 8 | 9 | "github.com/burningalchemist/sql_exporter/config" 10 | "github.com/burningalchemist/sql_exporter/errors" 11 | "github.com/prometheus/client_golang/prometheus" 12 | dto "github.com/prometheus/client_model/go" 13 | "google.golang.org/protobuf/proto" 14 | ) 15 | 16 | // MetricDesc is a descriptor for a family of metrics, sharing the same name, help, labes, type. 17 | type MetricDesc interface { 18 | Name() string 19 | Help() string 20 | ValueType() prometheus.ValueType 21 | ConstLabels() []*dto.LabelPair 22 | Labels() []string 23 | LogContext() string 24 | } 25 | 26 | // 27 | // MetricFamily 28 | // 29 | 30 | // MetricFamily implements MetricDesc for SQL metrics, with logic for populating its labels and values from sql.Rows. 31 | type MetricFamily struct { 32 | config *config.MetricConfig 33 | constLabels []*dto.LabelPair 34 | labels []string 35 | logContext string 36 | } 37 | 38 | // NewMetricFamily creates a new MetricFamily with the given metric config and const labels (e.g. job and instance). 39 | func NewMetricFamily(logContext string, mc *config.MetricConfig, constLabels []*dto.LabelPair) (*MetricFamily, errors.WithContext) { 40 | logContext = TrimMissingCtx(fmt.Sprintf(`%s,metric=%s`, logContext, mc.Name)) 41 | 42 | if len(mc.Values) == 0 && mc.StaticValue == nil { 43 | return nil, errors.New(logContext, "no value column defined") 44 | } 45 | if len(mc.Values) > 1 && mc.ValueLabel == "" { 46 | return nil, errors.New(logContext, "multiple values but no value label") 47 | } 48 | if len(mc.KeyLabels) > config.MaxInt32 { 49 | return nil, errors.New(logContext, "key_labels list is too large") 50 | } 51 | 52 | labels := make([]string, 0, len(mc.KeyLabels)+1) 53 | labels = append(labels, mc.KeyLabels...) 54 | if mc.ValueLabel != "" { 55 | labels = append(labels, mc.ValueLabel) 56 | } 57 | 58 | // Create a copy of original slice to avoid modifying constLabels 59 | sortedLabels := append(constLabels[:0:0], constLabels...) 60 | 61 | for k, v := range mc.StaticLabels { 62 | sortedLabels = append(sortedLabels, &dto.LabelPair{ 63 | Name: proto.String(k), 64 | Value: proto.String(v), 65 | }) 66 | } 67 | sort.Sort(labelPairSorter(sortedLabels)) 68 | 69 | return &MetricFamily{ 70 | config: mc, 71 | constLabels: sortedLabels, 72 | labels: labels, 73 | logContext: logContext, 74 | }, nil 75 | } 76 | 77 | // Collect is the equivalent of prometheus.Collector.Collect() but takes a Query output map to populate values from. 78 | func (mf MetricFamily) Collect(row map[string]any, ch chan<- Metric) { 79 | labelValues := make([]string, len(mf.labels)) 80 | for i, label := range mf.config.KeyLabels { 81 | labelValues[i] = row[label].(sql.NullString).String 82 | } 83 | for _, v := range mf.config.Values { 84 | if mf.config.ValueLabel != "" { 85 | labelValues[len(labelValues)-1] = v 86 | } 87 | value := row[v].(sql.NullFloat64) 88 | if value.Valid { 89 | metric := NewMetric(&mf, value.Float64, labelValues...) 90 | if mf.config.TimestampValue == "" { 91 | ch <- metric 92 | } else { 93 | ts := row[mf.config.TimestampValue].(sql.NullTime) 94 | if ts.Valid { 95 | ch <- NewMetricWithTimestamp(ts.Time, metric) 96 | } 97 | } 98 | } 99 | } 100 | if mf.config.StaticValue != nil { 101 | value := *mf.config.StaticValue 102 | ch <- NewMetric(&mf, value, labelValues...) 103 | } 104 | } 105 | 106 | // Name implements MetricDesc. 107 | func (mf MetricFamily) Name() string { 108 | return mf.config.Name 109 | } 110 | 111 | // Help implements MetricDesc. 112 | func (mf MetricFamily) Help() string { 113 | return mf.config.Help 114 | } 115 | 116 | // ValueType implements MetricDesc. 117 | func (mf MetricFamily) ValueType() prometheus.ValueType { 118 | return mf.config.ValueType() 119 | } 120 | 121 | // ConstLabels implements MetricDesc. 122 | func (mf MetricFamily) ConstLabels() []*dto.LabelPair { 123 | return mf.constLabels 124 | } 125 | 126 | // Labels implements MetricDesc. 127 | func (mf MetricFamily) Labels() []string { 128 | return mf.labels 129 | } 130 | 131 | // LogContext implements MetricDesc. 132 | func (mf MetricFamily) LogContext() string { 133 | return mf.logContext 134 | } 135 | 136 | // 137 | // automaticMetricDesc 138 | // 139 | 140 | // automaticMetric is a MetricDesc for automatically generated metrics (e.g. `up` and `scrape_duration`). 141 | type automaticMetricDesc struct { 142 | name string 143 | help string 144 | valueType prometheus.ValueType 145 | labels []string 146 | constLabels []*dto.LabelPair 147 | logContext string 148 | } 149 | 150 | // NewAutomaticMetricDesc creates a MetricDesc for automatically generated metrics. 151 | func NewAutomaticMetricDesc( 152 | logContext, name, help string, valueType prometheus.ValueType, constLabels []*dto.LabelPair, labels ...string, 153 | ) MetricDesc { 154 | return &automaticMetricDesc{ 155 | name: name, 156 | help: help, 157 | valueType: valueType, 158 | constLabels: constLabels, 159 | labels: labels, 160 | logContext: logContext, 161 | } 162 | } 163 | 164 | // Name implements MetricDesc. 165 | func (a automaticMetricDesc) Name() string { 166 | return a.name 167 | } 168 | 169 | // Help implements MetricDesc. 170 | func (a automaticMetricDesc) Help() string { 171 | return a.help 172 | } 173 | 174 | // ValueType implements MetricDesc. 175 | func (a automaticMetricDesc) ValueType() prometheus.ValueType { 176 | return a.valueType 177 | } 178 | 179 | // ConstLabels implements MetricDesc. 180 | func (a automaticMetricDesc) ConstLabels() []*dto.LabelPair { 181 | return a.constLabels 182 | } 183 | 184 | // Labels implements MetricDesc. 185 | func (a automaticMetricDesc) Labels() []string { 186 | return a.labels 187 | } 188 | 189 | // LogContext implements MetricDesc. 190 | func (a automaticMetricDesc) LogContext() string { 191 | return a.logContext 192 | } 193 | 194 | // 195 | // Metric 196 | // 197 | 198 | // A Metric models a single sample value with its meta data being exported to Prometheus. 199 | type Metric interface { 200 | Desc() MetricDesc 201 | Write(out *dto.Metric) errors.WithContext 202 | } 203 | 204 | // NewMetric returns a metric with one fixed value that cannot be changed. 205 | // 206 | // NewMetric panics if the length of labelValues is not consistent with desc.labels(). 207 | func NewMetric(desc MetricDesc, value float64, labelValues ...string) Metric { 208 | if len(desc.Labels()) != len(labelValues) { 209 | panic(fmt.Sprintf("[%s] expected %d labels, got %d", desc.LogContext(), len(desc.Labels()), len(labelValues))) 210 | } 211 | return &constMetric{ 212 | desc: desc, 213 | val: value, 214 | labelPairs: makeLabelPairs(desc, labelValues), 215 | } 216 | } 217 | 218 | // constMetric is a metric with one fixed value that cannot be changed. 219 | type constMetric struct { 220 | desc MetricDesc 221 | val float64 222 | labelPairs []*dto.LabelPair 223 | } 224 | 225 | // Desc implements Metric. 226 | func (m *constMetric) Desc() MetricDesc { 227 | return m.desc 228 | } 229 | 230 | // Write implements Metric. 231 | func (m *constMetric) Write(out *dto.Metric) errors.WithContext { 232 | out.Label = m.labelPairs 233 | switch t := m.desc.ValueType(); t { 234 | case prometheus.CounterValue: 235 | out.Counter = &dto.Counter{Value: proto.Float64(m.val)} 236 | case prometheus.GaugeValue: 237 | out.Gauge = &dto.Gauge{Value: proto.Float64(m.val)} 238 | default: 239 | return errors.Errorf(m.desc.LogContext(), "encountered unknown type %v", t) 240 | } 241 | return nil 242 | } 243 | 244 | func makeLabelPairs(desc MetricDesc, labelValues []string) []*dto.LabelPair { 245 | labels := desc.Labels() 246 | constLabels := desc.ConstLabels() 247 | 248 | totalLen := len(labels) + len(constLabels) 249 | if totalLen == 0 { 250 | // Super fast path. 251 | return nil 252 | } 253 | if len(labels) == 0 { 254 | // Moderately fast path. 255 | return constLabels 256 | } 257 | labelPairs := make([]*dto.LabelPair, 0, totalLen) 258 | for i, label := range labels { 259 | labelPairs = append(labelPairs, &dto.LabelPair{ 260 | Name: proto.String(label), 261 | Value: proto.String(labelValues[i]), 262 | }) 263 | } 264 | labelPairs = append(labelPairs, constLabels...) 265 | sort.Sort(labelPairSorter(labelPairs)) 266 | return labelPairs 267 | } 268 | 269 | // labelPairSorter implements sort.Interface. 270 | // It provides a sortable version of a slice of dto.LabelPair pointers. 271 | 272 | type labelPairSorter []*dto.LabelPair 273 | 274 | func (s labelPairSorter) Len() int { 275 | return len(s) 276 | } 277 | 278 | func (s labelPairSorter) Swap(i, j int) { 279 | s[i], s[j] = s[j], s[i] 280 | } 281 | 282 | func (s labelPairSorter) Less(i, j int) bool { 283 | return s[i].GetName() < s[j].GetName() 284 | } 285 | 286 | type invalidMetric struct { 287 | err errors.WithContext 288 | } 289 | 290 | // NewInvalidMetric returns a metric whose Write method always returns the provided error. 291 | func NewInvalidMetric(err errors.WithContext) Metric { 292 | return invalidMetric{err} 293 | } 294 | 295 | func (m invalidMetric) Desc() MetricDesc { return nil } 296 | 297 | func (m invalidMetric) Write(*dto.Metric) errors.WithContext { return m.err } 298 | 299 | type timestampedMetric struct { 300 | Metric 301 | t time.Time 302 | } 303 | 304 | func (m timestampedMetric) Write(pb *dto.Metric) errors.WithContext { 305 | e := m.Metric.Write(pb) 306 | pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) 307 | return e 308 | } 309 | 310 | func NewMetricWithTimestamp(t time.Time, m Metric) Metric { 311 | return timestampedMetric{Metric: m, t: t} 312 | } 313 | -------------------------------------------------------------------------------- /packaging/conf/nfpm.yaml: -------------------------------------------------------------------------------- 1 | name: "sql_exporter" 2 | arch: "amd64" 3 | platform: "linux" 4 | version: "${VERSION}" 5 | section: "default" 6 | priority: "extra" 7 | replaces: 8 | - sql_exporter 9 | maintainer: "Sergei Zyubin " 10 | description: | 11 | SQL Exporter for Prometheus 12 | homepage: "https://github.com/burningalchemist/sql_exporter" 13 | license: "MIT" 14 | contents: 15 | - src: .build/linux-amd64/sql_exporter 16 | dst: /usr/bin/sql_exporter 17 | 18 | - src: ./examples/sql_exporter.yml 19 | dst: /usr/share/sql_exporter/sql_exporter.yml 20 | type: config 21 | - src: ./examples/mssql_standard.collector.yml 22 | dst: /usr/share/sql_exporter/mssql_example.collector.yml 23 | type: config 24 | 25 | - src: ./packaging/conf/sql_exporter.default 26 | dst: /etc/default/sql_exporter 27 | type: config 28 | packager: deb 29 | - src: ./packaging/deb/sql_exporter.service 30 | dst: /usr/lib/systemd/system/sql_exporter.service 31 | type: config 32 | packager: deb 33 | 34 | - src: ./packaging/conf/sql_exporter.default 35 | dst: /etc/sysconfig/sql_exporter 36 | type: config 37 | packager: rpm 38 | - src: ./packaging/rpm/sql_exporter.service 39 | dst: /usr/lib/systemd/system/sql_exporter.service 40 | type: config 41 | packager: rpm 42 | 43 | overrides: 44 | deb: 45 | scripts: 46 | postinstall: ./packaging/deb/postinstall 47 | rpm: 48 | scripts: 49 | postinstall: ./packaging/rpm/postinstall 50 | -------------------------------------------------------------------------------- /packaging/conf/sql_exporter.default: -------------------------------------------------------------------------------- 1 | CONF_FILE=/etc/sql_exporter/sql_exporter.yml 2 | LISTEN_ADDRESS=0.0.0.0:9399 3 | LOG_FORMAT=logfmt 4 | LOG_LEVEL=debug 5 | ENABLE_RELOAD=false 6 | METRICS_PATH=/metrics 7 | WEB_CONFIG_FILE= 8 | RESTART_ON_UPGRADE=true 9 | -------------------------------------------------------------------------------- /packaging/deb/postinstall: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | [ -f /etc/default/sql_exporter ] && . /etc/default/sql_exporter 6 | 7 | IS_UPGRADE=false 8 | 9 | case "$1" in 10 | configure) 11 | [ -z "$SQL_EXPORTER_USER" ] && SQL_EXPORTER_USER="sql_exporter" 12 | [ -z "$SQL_EXPORTER_GROUP" ] && SQL_EXPORTER_GROUP="sql_exporter" 13 | if ! getent group "$SQL_EXPORTER_GROUP" >/dev/null 2>&1; then 14 | addgroup --system "$SQL_EXPORTER_GROUP" --quiet 15 | fi 16 | if ! id $SQL_EXPORTER_USER >/dev/null 2>&1; then 17 | adduser --system --home /usr/share/sql_exporter \ 18 | --ingroup "$SQL_EXPORTER_GROUP" --disabled-password --shell /bin/false \ 19 | "$SQL_EXPORTER_USER" 20 | fi 21 | 22 | # copy user config files 23 | if [ ! -f $CONF_FILE ]; then 24 | CONF_PATH=$(dirname "$CONF_FILE") 25 | mkdir -p $CONF_PATH 26 | cp /usr/share/sql_exporter/sql_exporter.yml $CONF_FILE 27 | cp /usr/share/sql_exporter/mssql_example.collector.yml $CONF_PATH 28 | fi 29 | 30 | # configuration files should not be modifiable by sql_exporter user, as this can be a security issue 31 | chown -Rh root:$SQL_EXPORTER_GROUP /etc/sql_exporter/ 32 | chmod 755 /etc/sql_exporter 33 | find /etc/sql_exporter -type f -exec chmod 640 {} ';' 34 | find /etc/sql_exporter -type d -exec chmod 755 {} ';' 35 | 36 | # If $1=configure and $2 is set, this is an upgrade 37 | if [ "$2" != "" ]; then 38 | IS_UPGRADE=true 39 | fi 40 | 41 | if [ "x$IS_UPGRADE" != "xtrue" ]; then 42 | if command -v systemctl >/dev/null; then 43 | echo "### NOT starting on installation, please execute the following statements to configure sql_exporter to start automatically using systemd" 44 | echo " sudo /bin/systemctl daemon-reload" 45 | echo " sudo /bin/systemctl enable sql_exporter" 46 | echo "### You can start sql_exporter by executing" 47 | echo " sudo /bin/systemctl start sql_exporter" 48 | fi 49 | elif [ "$RESTART_ON_UPGRADE" = "true" ]; then 50 | 51 | echo -n "Restarting sql_exporter service..." 52 | 53 | if command -v systemctl >/dev/null; then 54 | systemctl daemon-reload 55 | systemctl restart sql_exporter || true 56 | fi 57 | echo " OK" 58 | 59 | fi 60 | ;; 61 | esac 62 | -------------------------------------------------------------------------------- /packaging/deb/sql_exporter.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=SQL Exporter for Prometheus 3 | Documentation=https://github.com/burningalchemist/sql_exporter 4 | Wants=network-online.target 5 | After=network-online.target 6 | 7 | [Service] 8 | EnvironmentFile=/etc/default/sql_exporter 9 | User=sql_exporter 10 | Group=sql_exporter 11 | Type=simple 12 | Restart=on-failure 13 | WorkingDirectory=/usr/share/sql_exporter 14 | RuntimeDirectory=sql_exporter 15 | RuntimeDirectoryMode=0750 16 | ExecStart=/usr/bin/sql_exporter -config.file=${CONF_FILE} -web.listen-address=${LISTEN_ADDRESS} -log.format=${LOG_FORMAT} -log.level=${LOG_LEVEL} -web.enable-reload=${ENABLE_RELOAD} -web.metrics-path=${METRICS_PATH} -web.config.file=${WEB_CONFIG_FILE} 17 | LimitNOFILE=10000 18 | TimeoutStopSec=20 19 | CapabilityBoundingSet= 20 | DeviceAllow= 21 | LockPersonality=true 22 | MemoryDenyWriteExecute=false 23 | NoNewPrivileges=true 24 | PrivateDevices=true 25 | PrivateTmp=true 26 | ProtectClock=true 27 | ProtectControlGroups=true 28 | ProtectHome=true 29 | ProtectHostname=true 30 | ProtectKernelLogs=true 31 | ProtectKernelModules=true 32 | ProtectKernelTunables=true 33 | ProtectProc=invisible 34 | ProtectSystem=full 35 | RemoveIPC=true 36 | RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX 37 | RestrictNamespaces=true 38 | RestrictRealtime=true 39 | RestrictSUIDSGID=true 40 | SystemCallArchitectures=native 41 | UMask=0027 42 | 43 | [Install] 44 | WantedBy=multi-user.target 45 | -------------------------------------------------------------------------------- /packaging/rpm/postinstall: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | [ -f /etc/sysconfig/sql_exporter ] && . /etc/sysconfig/sql_exporter 6 | 7 | IS_UPGRADE=false 8 | 9 | case "$1" in 10 | configure) 11 | [ -z "$SQL_EXPORTER_USER" ] && SQL_EXPORTER_USER="sql_exporter" 12 | [ -z "$SQL_EXPORTER_GROUP" ] && SQL_EXPORTER_GROUP="sql_exporter" 13 | if ! getent group "$SQL_EXPORTER_GROUP" >/dev/null 2>&1; then 14 | groupadd -r "$SQL_EXPORTER_GROUP" 15 | fi 16 | if ! getent passwd $SQL_EXPORTER_USER >/dev/null 2>&1; then 17 | useradd -r -d /usr/share/sql_exporter \ 18 | -g "$SQL_EXPORTER_GROUP" -s /sbin/nologin \ 19 | "$SQL_EXPORTER_USER" 20 | fi 21 | 22 | # copy user config files 23 | if [ ! -f $CONF_FILE ]; then 24 | CONF_PATH=$(dirname "$CONF_FILE") 25 | mkdir -p $CONF_PATH 26 | cp /usr/share/sql_exporter/sql_exporter.yml "$CONF_FILE" 27 | cp /usr/share/sql_exporter/mssql_example.collector.yml "$CONF_PATH" 28 | fi 29 | 30 | # configuration files should not be modifiable by sql_exporter user, as this can be a security issue 31 | chown -Rh root:$SQL_EXPORTER_GROUP /etc/sql_exporter/ 32 | chmod 755 /etc/sql_exporter 33 | find /etc/sql_exporter -type f -exec chmod 640 {} ';' 34 | find /etc/sql_exporter -type d -exec chmod 755 {} ';' 35 | 36 | # If $1=configure and $2 is set, this is an upgrade 37 | if [ "$2" != "" ]; then 38 | IS_UPGRADE=true 39 | fi 40 | 41 | if [ "x$IS_UPGRADE" != "xtrue" ]; then 42 | if command -v systemctl >/dev/null; then 43 | echo "### NOT starting on installation, please execute the following statements to configure sql_exporter to start automatically using systemd" 44 | echo " sudo /bin/systemctl daemon-reload" 45 | echo " sudo /bin/systemctl enable sql_exporter" 46 | echo "### You can start sql_exporter by executing" 47 | echo " sudo /bin/systemctl start sql_exporter" 48 | fi 49 | elif [ "$RESTART_ON_UPGRADE" = "true" ]; then 50 | 51 | echo -n "Restarting sql_exporter service..." 52 | 53 | if command -v systemctl >/dev/null; then 54 | systemctl daemon-reload 55 | systemctl restart sql_exporter || true 56 | fi 57 | echo " OK" 58 | 59 | fi 60 | ;; 61 | esac 62 | -------------------------------------------------------------------------------- /packaging/rpm/sql_exporter.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=SQL Exporter for Prometheus 3 | Documentation=https://github.com/burningalchemist/sql_exporter 4 | Wants=network-online.target 5 | After=network-online.target 6 | 7 | [Service] 8 | EnvironmentFile=/etc/sysconfig/sql_exporter 9 | User=sql_exporter 10 | Group=sql_exporter 11 | Type=simple 12 | Restart=on-failure 13 | WorkingDirectory=/usr/share/sql_exporter 14 | RuntimeDirectory=sql_exporter 15 | RuntimeDirectoryMode=0750 16 | ExecStart=/usr/bin/sql_exporter -config.file=${CONF_FILE} -web.listen-address=${LISTEN_ADDRESS} -log.format=${LOG_FORMAT} -log.level=${LOG_LEVEL} -web.enable-reload=${ENABLE_RELOAD} -web.metrics-path=${METRICS_PATH} -web.config.file=${WEB_CONFIG_FILE} 17 | LimitNOFILE=10000 18 | TimeoutStopSec=20 19 | CapabilityBoundingSet= 20 | DeviceAllow= 21 | LockPersonality=true 22 | MemoryDenyWriteExecute=false 23 | NoNewPrivileges=true 24 | PrivateDevices=true 25 | PrivateTmp=true 26 | ProtectClock=true 27 | ProtectControlGroups=true 28 | ProtectHome=true 29 | ProtectHostname=true 30 | ProtectKernelLogs=true 31 | ProtectKernelModules=true 32 | ProtectKernelTunables=true 33 | ProtectProc=invisible 34 | ProtectSystem=full 35 | RemoveIPC=true 36 | RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX 37 | RestrictNamespaces=true 38 | RestrictRealtime=true 39 | RestrictSUIDSGID=true 40 | SystemCallArchitectures=native 41 | UMask=0027 42 | 43 | [Install] 44 | WantedBy=multi-user.target 45 | -------------------------------------------------------------------------------- /query.go: -------------------------------------------------------------------------------- 1 | package sql_exporter 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | "log/slog" 8 | "time" 9 | 10 | "github.com/burningalchemist/sql_exporter/config" 11 | "github.com/burningalchemist/sql_exporter/errors" 12 | ) 13 | 14 | // Query wraps a sql.Stmt and all the metrics populated from it. It helps extract keys and values from result rows. 15 | type Query struct { 16 | config *config.QueryConfig 17 | metricFamilies []*MetricFamily 18 | // columnTypes maps column names to the column type expected by metrics: key (string) or value (float64). 19 | columnTypes columnTypeMap 20 | logContext string 21 | 22 | conn *sql.DB 23 | stmt *sql.Stmt 24 | } 25 | 26 | type ( 27 | columnType int 28 | columnTypeMap map[string]columnType 29 | ) 30 | 31 | const ( 32 | columnTypeKey columnType = 1 33 | columnTypeValue columnType = 2 34 | columnTypeTime columnType = 3 35 | ) 36 | 37 | // NewQuery returns a new Query that will populate the given metric families. 38 | func NewQuery(logContext string, qc *config.QueryConfig, metricFamilies ...*MetricFamily) (*Query, errors.WithContext) { 39 | logContext = TrimMissingCtx(fmt.Sprintf(`%s,query=%s`, logContext, qc.Name)) 40 | 41 | columnTypes := make(columnTypeMap) 42 | 43 | for _, mf := range metricFamilies { 44 | for _, kcol := range mf.config.KeyLabels { 45 | if err := setColumnType(logContext, kcol, columnTypeKey, columnTypes); err != nil { 46 | return nil, err 47 | } 48 | } 49 | for _, vcol := range mf.config.Values { 50 | if err := setColumnType(logContext, vcol, columnTypeValue, columnTypes); err != nil { 51 | return nil, err 52 | } 53 | } 54 | if mf.config.TimestampValue != "" { 55 | if err := setColumnType(logContext, mf.config.TimestampValue, columnTypeTime, columnTypes); err != nil { 56 | return nil, err 57 | } 58 | } 59 | } 60 | 61 | q := Query{ 62 | config: qc, 63 | metricFamilies: metricFamilies, 64 | columnTypes: columnTypes, 65 | logContext: logContext, 66 | } 67 | return &q, nil 68 | } 69 | 70 | // setColumnType stores the provided type for a given column, checking for conflicts in the process. 71 | func setColumnType(logContext, columnName string, ctype columnType, columnTypes columnTypeMap) errors.WithContext { 72 | previousType, found := columnTypes[columnName] 73 | if found { 74 | if previousType != ctype { 75 | return errors.Errorf(logContext, "column %q used both as key and value", columnName) 76 | } 77 | } else { 78 | columnTypes[columnName] = ctype 79 | } 80 | return nil 81 | } 82 | 83 | // Collect is the equivalent of prometheus.Collector.Collect() but takes a context to run in and a database to run on. 84 | func (q *Query) Collect(ctx context.Context, conn *sql.DB, ch chan<- Metric) { 85 | if ctx.Err() != nil { 86 | ch <- NewInvalidMetric(errors.Wrap(q.logContext, ctx.Err())) 87 | 88 | return 89 | } 90 | rows, err := q.run(ctx, conn) 91 | if err != nil { 92 | ch <- NewInvalidMetric(err) 93 | return 94 | } 95 | defer rows.Close() 96 | 97 | dest, err := q.scanDest(rows) 98 | if err != nil { 99 | if config.IgnoreMissingVals { 100 | slog.Warn("Ignoring missing values", "logContext", q.logContext) 101 | return 102 | } 103 | ch <- NewInvalidMetric(err) 104 | return 105 | } 106 | for rows.Next() { 107 | row, err := q.scanRow(rows, dest) 108 | if err != nil { 109 | ch <- NewInvalidMetric(err) 110 | continue 111 | } 112 | for _, mf := range q.metricFamilies { 113 | mf.Collect(row, ch) 114 | } 115 | } 116 | if err1 := rows.Err(); err1 != nil { 117 | ch <- NewInvalidMetric(errors.Wrap(q.logContext, err1)) 118 | } 119 | } 120 | 121 | // run executes the query on the provided database, in the provided context. 122 | func (q *Query) run(ctx context.Context, conn *sql.DB) (*sql.Rows, errors.WithContext) { 123 | if slog.Default().Enabled(ctx, slog.LevelDebug) { 124 | start := time.Now() 125 | defer func() { 126 | slog.Debug("Query execution time", "logContext", q.logContext, "duration", time.Since(start)) 127 | }() 128 | } 129 | 130 | if q.conn != nil && q.conn != conn { 131 | panic(fmt.Sprintf("[%s] Expecting to always run on the same database handle", q.logContext)) 132 | } 133 | 134 | if q.config.NoPreparedStatement { 135 | rows, err := conn.QueryContext(ctx, q.config.Query) 136 | return rows, errors.Wrap(q.logContext, err) 137 | } 138 | 139 | if q.stmt == nil { 140 | stmt, err := conn.PrepareContext(ctx, q.config.Query) 141 | if err != nil { 142 | return nil, errors.Wrapf(q.logContext, err, "prepare query failed") 143 | } 144 | q.conn = conn 145 | q.stmt = stmt 146 | } 147 | rows, err := q.stmt.QueryContext(ctx) 148 | return rows, errors.Wrap(q.logContext, err) 149 | } 150 | 151 | // scanDest creates a slice to scan the provided rows into, with strings for keys, float64s for values and interface{} 152 | // for any extra columns. 153 | func (q *Query) scanDest(rows *sql.Rows) ([]any, errors.WithContext) { 154 | columns, err := rows.Columns() 155 | if err != nil { 156 | return nil, errors.Wrap(q.logContext, err) 157 | } 158 | slog.Debug("Returned columns", "logContext", q.logContext, "columns", columns) 159 | // Create the slice to scan the row into, with strings for keys and float64s for values. 160 | dest := make([]any, 0, len(columns)) 161 | have := make(map[string]bool, len(q.columnTypes)) 162 | for i, column := range columns { 163 | switch q.columnTypes[column] { 164 | case columnTypeKey: 165 | dest = append(dest, new(sql.NullString)) 166 | have[column] = true 167 | case columnTypeValue: 168 | dest = append(dest, new(sql.NullFloat64)) 169 | have[column] = true 170 | case columnTypeTime: 171 | dest = append(dest, new(sql.NullTime)) 172 | have[column] = true 173 | default: 174 | if column == "" { 175 | slog.Debug("Unnamed column", "logContext", q.logContext, "column", i) 176 | } else { 177 | slog.Debug("Extra column returned by query", "logContext", q.logContext, "column", column) 178 | } 179 | dest = append(dest, new(any)) 180 | } 181 | } 182 | 183 | // Not all requested columns could be mapped, fail. 184 | if len(have) != len(q.columnTypes) { 185 | missing := make([]string, 0, len(q.columnTypes)-len(have)) 186 | for c := range q.columnTypes { 187 | if !have[c] { 188 | missing = append(missing, c) 189 | } 190 | } 191 | return nil, errors.Errorf(q.logContext, "Missing values for the requested columns: %q", missing) 192 | } 193 | 194 | return dest, nil 195 | } 196 | 197 | // scanRow scans the current row into a map of column name to value, with string values for key columns and float64 198 | // values for value columns, using dest as a buffer. 199 | func (q *Query) scanRow(rows *sql.Rows, dest []any) (map[string]any, errors.WithContext) { 200 | columns, err := rows.Columns() 201 | if err != nil { 202 | return nil, errors.Wrap(q.logContext, err) 203 | } 204 | 205 | // Scan the row content into dest. 206 | if err := rows.Scan(dest...); err != nil { 207 | return nil, errors.Wrapf(q.logContext, err, "scanning of query result failed") 208 | } 209 | 210 | // Pick all values we're interested in into a map. 211 | result := make(map[string]any, len(q.columnTypes)) 212 | for i, column := range columns { 213 | switch q.columnTypes[column] { 214 | case columnTypeKey: 215 | if !dest[i].(*sql.NullString).Valid { 216 | slog.Warn("Key column is NULL", "logContext", q.logContext, "column", column) 217 | } 218 | result[column] = *dest[i].(*sql.NullString) 219 | case columnTypeTime: 220 | if !dest[i].(*sql.NullTime).Valid { 221 | slog.Warn("Time column is NULL", "logContext", q.logContext, "column", column) 222 | } 223 | result[column] = *dest[i].(*sql.NullTime) 224 | case columnTypeValue: 225 | if !dest[i].(*sql.NullFloat64).Valid { 226 | slog.Warn("Value column is NULL", "logContext", q.logContext, "column", column) 227 | } 228 | result[column] = *dest[i].(*sql.NullFloat64) 229 | } 230 | } 231 | return result, nil 232 | } 233 | -------------------------------------------------------------------------------- /reload.go: -------------------------------------------------------------------------------- 1 | package sql_exporter 2 | 3 | import ( 4 | "errors" 5 | "log/slog" 6 | 7 | cfg "github.com/burningalchemist/sql_exporter/config" 8 | ) 9 | 10 | // Reload function is used to reload the exporter configuration without restarting the exporter 11 | func Reload(e Exporter, configFile *string) error { 12 | slog.Warn("Reloading collectors has started...") 13 | slog.Warn("Connections will not be changed upon the restart of the exporter") 14 | configNext, err := cfg.Load(*configFile) 15 | if err != nil { 16 | slog.Error("Error reading config file", "error", err) 17 | return err 18 | } 19 | 20 | configCurrent := e.Config() 21 | 22 | // Clear current collectors and replace with new ones 23 | if len(configCurrent.Collectors) > 0 { 24 | configCurrent.Collectors = configCurrent.Collectors[:0] 25 | } 26 | configCurrent.Collectors = configNext.Collectors 27 | slog.Debug("Total collector size change", "from", len(configCurrent.Collectors), "to", len(configNext.Collectors)) 28 | 29 | // Reload targets 30 | switch { 31 | case configCurrent.Target != nil && configNext.Target != nil: 32 | if err = reloadTarget(e, configNext, configCurrent); err != nil { 33 | return err 34 | } 35 | case len(configCurrent.Jobs) > 0 && len(configNext.Jobs) > 0: 36 | if err = reloadJobs(e, configNext, configCurrent); err != nil { 37 | return err 38 | } 39 | case configCurrent.Target != nil && len(configNext.Jobs) > 0: 40 | case len(configCurrent.Jobs) > 0 && configNext.Target != nil: 41 | return errors.New("changing scrape mode is not allowed. Please restart the exporter") 42 | default: 43 | slog.Warn("No target or jobs have been found - nothing to reload") 44 | } 45 | return nil 46 | } 47 | 48 | func reloadTarget(e Exporter, nc, cc *cfg.Config) error { 49 | slog.Warn("Recreating target...") 50 | 51 | // We want to preserve DSN from the previous config revision to avoid any connection changes 52 | nc.Target.DSN = cc.Target.DSN 53 | // Apply the new target configuration 54 | cc.Target = nc.Target 55 | // Recreate the target object 56 | target, err := NewTarget("", cc.Target.Name, "", string(cc.Target.DSN), 57 | cc.Target.Collectors(), nil, cc.Globals, cc.Target.EnablePing) 58 | if err != nil { 59 | slog.Error("Error recreating a target", "error", err) 60 | return err 61 | } 62 | 63 | // Populate the target list 64 | e.UpdateTarget([]Target{target}) 65 | slog.Warn("Collectors have been successfully updated for the target") 66 | return nil 67 | } 68 | 69 | func reloadJobs(e Exporter, nc, cc *cfg.Config) error { 70 | slog.Warn("Recreating jobs...") 71 | // We want to preserve `static_configs`` from the previous config revision to avoid any connection changes 72 | for _, currentJob := range cc.Jobs { 73 | for _, newJob := range nc.Jobs { 74 | if newJob.Name == currentJob.Name { 75 | newJob.StaticConfigs = currentJob.StaticConfigs 76 | } 77 | } 78 | } 79 | cc.Jobs = nc.Jobs 80 | var updateErr error 81 | targets := make([]Target, 0, len(cc.Jobs)) 82 | 83 | for _, jobConfigItem := range cc.Jobs { 84 | job, err := NewJob(jobConfigItem, cc.Globals) 85 | if err != nil { 86 | updateErr = err 87 | break 88 | } 89 | targets = append(targets, job.Targets()...) 90 | slog.Debug("Recreated Job", "name", jobConfigItem.Name) 91 | } 92 | 93 | if updateErr != nil { 94 | slog.Error("Error recreating jobs", "error", updateErr) 95 | return updateErr 96 | } 97 | 98 | e.UpdateTarget(targets) 99 | slog.Warn("Collectors have been successfully updated for the jobs") 100 | return nil 101 | } 102 | -------------------------------------------------------------------------------- /sql.go: -------------------------------------------------------------------------------- 1 | package sql_exporter 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "errors" 7 | "fmt" 8 | "log/slog" 9 | "net/url" 10 | "os" 11 | "time" 12 | 13 | "github.com/xo/dburl" 14 | ) 15 | 16 | // OpenConnection parses a provided DSN, and opens a DB handle ensuring early termination if the context is closed 17 | // (this is actually prevented by `database/sql` implementation), sets connection limits and returns the handle. 18 | func OpenConnection(ctx context.Context, logContext, dsn string, maxConns, maxIdleConns int, maxConnLifetime time.Duration) (*sql.DB, error) { 19 | var ( 20 | url *dburl.URL 21 | conn *sql.DB 22 | err error 23 | ch = make(chan error) 24 | ) 25 | 26 | url, err = safeParse(dsn) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | driver := url.Driver 32 | if url.GoDriver != "" { 33 | driver = url.GoDriver 34 | } 35 | 36 | // Open the DB handle in a separate goroutine so we can terminate early if the context closes. 37 | go func() { 38 | conn, err = sql.Open(driver, url.DSN) 39 | close(ch) 40 | }() 41 | 42 | select { 43 | case <-ctx.Done(): 44 | return nil, ctx.Err() 45 | case <-ch: 46 | if err != nil { 47 | return nil, err 48 | } 49 | } 50 | 51 | conn.SetMaxIdleConns(maxIdleConns) 52 | conn.SetMaxOpenConns(maxConns) 53 | conn.SetConnMaxLifetime(maxConnLifetime) 54 | 55 | slog.Debug("Database handle successfully opened", "logContext", logContext, "driver", driver) 56 | return conn, nil 57 | } 58 | 59 | // PingDB is a wrapper around sql.DB.PingContext() that terminates as soon as the context is closed. 60 | // 61 | // sql.DB does not actually pass along the context to the driver when opening a connection (which always happens if the 62 | // database is down) and the driver uses an arbitrary timeout which may well be longer than ours. So we run the ping 63 | // call in a goroutine and terminate immediately if the context is closed. 64 | func PingDB(ctx context.Context, conn *sql.DB) error { 65 | ch := make(chan error, 1) 66 | 67 | go func() { 68 | ch <- conn.PingContext(ctx) 69 | close(ch) 70 | }() 71 | 72 | select { 73 | case <-ctx.Done(): 74 | return ctx.Err() 75 | case err := <-ch: 76 | return err 77 | } 78 | } 79 | 80 | // safeParse wraps dburl.Parse method in order to prevent leaking credentials 81 | // if underlying url parse failed. By default it returns a raw url string in error message, 82 | // which most likely contains a password. It's undesired here. 83 | func safeParse(rawURL string) (*dburl.URL, error) { 84 | 85 | parsed, err := dburl.Parse(expandEnv(rawURL)) 86 | if err != nil { 87 | if uerr := new(url.Error); errors.As(err, &uerr) { 88 | return nil, uerr.Err 89 | } 90 | return nil, errors.New("invalid URL") 91 | } 92 | return parsed, nil 93 | } 94 | 95 | // expandEnv falls back to the original env variable if not found for better readability 96 | func expandEnv(env string) string { 97 | lookupFunc := func(env string) string { 98 | if value, ok := os.LookupEnv(env); ok { 99 | return value 100 | } 101 | slog.Error("Environment variable is not found, cannot expand", "env", env) 102 | return fmt.Sprintf("$%s", env) 103 | } 104 | return os.Expand(env, lookupFunc) 105 | } 106 | -------------------------------------------------------------------------------- /target.go: -------------------------------------------------------------------------------- 1 | package sql_exporter 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "database/sql/driver" 7 | "fmt" 8 | "log/slog" 9 | "sort" 10 | "sync" 11 | "time" 12 | 13 | "github.com/burningalchemist/sql_exporter/config" 14 | "github.com/burningalchemist/sql_exporter/errors" 15 | "github.com/prometheus/client_golang/prometheus" 16 | dto "github.com/prometheus/client_model/go" 17 | "google.golang.org/protobuf/proto" 18 | ) 19 | 20 | const ( 21 | // Capacity for the channel to collect metrics. 22 | capMetricChan = 1000 23 | 24 | upMetricName = "up" 25 | upMetricHelp = "1 if the target is reachable, or 0 if the scrape failed" 26 | scrapeDurationName = "scrape_duration_seconds" 27 | scrapeDurationHelp = "How long it took to scrape the target in seconds" 28 | ) 29 | 30 | // Target collects SQL metrics from a single sql.DB instance. It aggregates one or more Collectors and it looks much 31 | // like a prometheus.Collector, except its Collect() method takes a Context to run in. 32 | type Target interface { 33 | // Collect is the equivalent of prometheus.Collector.Collect(), but takes a context to run in. 34 | Collect(ctx context.Context, ch chan<- Metric) 35 | JobGroup() string 36 | } 37 | 38 | // target implements Target. It wraps a sql.DB, which is initially nil but never changes once instantianted. 39 | type target struct { 40 | name string 41 | jobGroup string 42 | dsn string 43 | collectors []Collector 44 | constLabels prometheus.Labels 45 | globalConfig *config.GlobalConfig 46 | upDesc MetricDesc 47 | scrapeDurationDesc MetricDesc 48 | logContext string 49 | enablePing *bool 50 | 51 | conn *sql.DB 52 | } 53 | 54 | // NewTarget returns a new Target with the given target name, data source name, collectors and constant labels. 55 | // An empty target name means the exporter is running in single target mode: no synthetic metrics will be exported. 56 | func NewTarget( 57 | logContext, tname, jg, dsn string, ccs []*config.CollectorConfig, constLabels prometheus.Labels, gc *config.GlobalConfig, ep *bool) ( 58 | Target, errors.WithContext, 59 | ) { 60 | if tname != "" { 61 | logContext = TrimMissingCtx(fmt.Sprintf(`%s,target=%s`, logContext, tname)) 62 | if constLabels == nil { 63 | constLabels = prometheus.Labels{config.TargetLabel: tname} 64 | } 65 | } 66 | 67 | if ep == nil { 68 | ep = &config.EnablePing 69 | } 70 | slog.Debug("target ping enabled", "logContext", logContext, "enabled", *ep) 71 | 72 | // Sort const labels by name to ensure consistent ordering. 73 | constLabelPairs := make([]*dto.LabelPair, 0, len(constLabels)) 74 | for n, v := range constLabels { 75 | constLabelPairs = append(constLabelPairs, &dto.LabelPair{ 76 | Name: proto.String(n), 77 | Value: proto.String(v), 78 | }) 79 | } 80 | sort.Sort(labelPairSorter(constLabelPairs)) 81 | 82 | collectors := make([]Collector, 0, len(ccs)) 83 | for _, cc := range ccs { 84 | c, err := NewCollector(logContext, cc, constLabelPairs) 85 | if err != nil { 86 | return nil, err 87 | } 88 | collectors = append(collectors, c) 89 | } 90 | 91 | upDesc := NewAutomaticMetricDesc(logContext, upMetricName, upMetricHelp, prometheus.GaugeValue, constLabelPairs) 92 | scrapeDurationDesc := NewAutomaticMetricDesc(logContext, scrapeDurationName, scrapeDurationHelp, prometheus.GaugeValue, constLabelPairs) 93 | t := target{ 94 | name: tname, 95 | jobGroup: jg, 96 | dsn: dsn, 97 | collectors: collectors, 98 | constLabels: constLabels, 99 | globalConfig: gc, 100 | upDesc: upDesc, 101 | scrapeDurationDesc: scrapeDurationDesc, 102 | logContext: logContext, 103 | enablePing: ep, 104 | } 105 | return &t, nil 106 | } 107 | 108 | // Collect implements Target. 109 | func (t *target) Collect(ctx context.Context, ch chan<- Metric) { 110 | var ( 111 | scrapeStart = time.Now() 112 | targetUp = true 113 | ) 114 | 115 | err := t.ping(ctx) 116 | if err != nil { 117 | ch <- NewInvalidMetric(errors.Wrap(t.logContext, err)) 118 | targetUp = false 119 | } 120 | if t.name != "" { 121 | // Export the target's `up` metric as early as we know what it should be. 122 | ch <- NewMetric(t.upDesc, boolToFloat64(targetUp)) 123 | } 124 | 125 | var wg sync.WaitGroup 126 | // Don't bother with the collectors if target is down. 127 | if targetUp { 128 | wg.Add(len(t.collectors)) 129 | for _, c := range t.collectors { 130 | // If using a single DB connection, collectors will likely run sequentially anyway. But we might have more. 131 | go func(collector Collector) { 132 | defer wg.Done() 133 | collector.Collect(ctx, t.conn, ch) 134 | }(c) 135 | } 136 | } 137 | // Wait for all collectors (if any) to complete. 138 | wg.Wait() 139 | 140 | if t.name != "" { 141 | // And export a `scrape duration` metric once we're done scraping. 142 | ch <- NewMetric(t.scrapeDurationDesc, float64(time.Since(scrapeStart))*1e-9) 143 | } 144 | } 145 | 146 | func (t *target) ping(ctx context.Context) errors.WithContext { 147 | // Create the DB handle, if necessary. It won't usually open an actual connection, so we'll need to ping afterwards. 148 | // We cannot do this only once at creation time because the sql.Open() documentation says it "may" open an actual 149 | // connection, so it "may" actually fail to open a handle to a DB that's initially down. 150 | if t.conn == nil { 151 | conn, err := OpenConnection(ctx, t.logContext, t.dsn, t.globalConfig.MaxConns, t.globalConfig.MaxIdleConns, t.globalConfig.MaxConnLifetime) 152 | if err != nil { 153 | if err != ctx.Err() { 154 | return errors.Wrap(t.logContext, err) 155 | } 156 | // if err == ctx.Err() fall through 157 | } else { 158 | t.conn = conn 159 | } 160 | } 161 | 162 | // If we have a handle and the context is not closed, test whether the database is up. 163 | // FIXME: we ping the database during each request even with cacheCollector. It leads 164 | // to additional charges for paid database services. 165 | if t.conn != nil && ctx.Err() == nil && *t.enablePing { 166 | var err error 167 | // Ping up to max_connections + 1 times as long as the returned error is driver.ErrBadConn, to purge the connection 168 | // pool of bad connections. This might happen if the previous scrape timed out and in-flight queries got canceled. 169 | for i := 0; i <= t.globalConfig.MaxConns; i++ { 170 | if err = PingDB(ctx, t.conn); err != driver.ErrBadConn { 171 | break 172 | } 173 | } 174 | if err != nil { 175 | return errors.Wrap(t.logContext, err) 176 | } 177 | } 178 | 179 | if ctx.Err() != nil { 180 | return errors.Wrap(t.logContext, ctx.Err()) 181 | } 182 | return nil 183 | } 184 | 185 | // boolToFloat64 converts a boolean flag to a float64 value (0.0 or 1.0). 186 | func boolToFloat64(value bool) float64 { 187 | if value { 188 | return 1.0 189 | } 190 | return 0.0 191 | } 192 | 193 | // OfBool returns bool address. 194 | func OfBool(i bool) *bool { 195 | return &i 196 | } 197 | 198 | func (t *target) JobGroup() string { 199 | return t.jobGroup 200 | } 201 | --------------------------------------------------------------------------------