├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── build.yml │ ├── ci.yml │ └── release.yml ├── .gitignore ├── .promu.yml ├── .travis.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── VERSION ├── alipay.jpg ├── charts └── kafka-exporter │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── deployment.yaml │ ├── secret.yaml │ ├── service.yaml │ └── servicemonitor.yaml │ └── values.yaml ├── deploy └── base │ ├── deployment.yaml │ ├── kustomization.yaml │ └── service.yaml ├── dev └── docker-compose.yml ├── go.mod ├── go.sum ├── kafka_exporter.go ├── kafka_exporter_overview.json ├── kafka_exporter_overview.png ├── scram_client.go └── simple_test.go /.dockerignore: -------------------------------------------------------------------------------- 1 | vendor 2 | Dockerfile 3 | Makefile 4 | README.md 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | assignees: 13 | - "danielqsj" 14 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build 3 | on: 4 | push: 5 | branches: 6 | - 'master' 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v3 14 | - name: Set up Go 15 | uses: actions/setup-go@v4 16 | with: 17 | go-version: 1.24.0 18 | - name: Build Docker Image 19 | run: make push 20 | env: 21 | DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} 22 | DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} 23 | GIT_TAG_NAME: latest -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: CI 3 | on: 4 | # Run this workflow every time a new commit pushed to upstream/fork repository. 5 | # Run workflow on fork repository will help contributors find and resolve issues before sending a PR. 6 | push: 7 | pull_request: 8 | jobs: 9 | golangci: 10 | name: pull-kafka-exporter-golang-ci 11 | env: 12 | GOPATH: ${{ github.workspace }} 13 | GO111MODULE: on 14 | defaults: 15 | run: 16 | working-directory: ${{ env.GOPATH }}/src/github.com/${{ github.repository }} 17 | strategy: 18 | max-parallel: 3 19 | ## this will contain a matrix of all of the combinations 20 | ## we wish to test again: 21 | matrix: 22 | go-version: [ 1.24.x ] 23 | os: [ ubuntu-latest ] 24 | runs-on: ${{ matrix.os }} 25 | steps: 26 | - name: Install Go 27 | uses: actions/setup-go@v4 28 | with: 29 | go-version: ${{ matrix.go-version }} 30 | - name: Checkout code 31 | uses: actions/checkout@v3 32 | with: 33 | fetch-depth: 1 34 | path: ${{ env.GOPATH }}/src/github.com/${{ github.repository }} 35 | - name: Cache go modules and build cache 36 | uses: actions/cache@v3.0.0 37 | with: 38 | # In order: 39 | # * Module download cache 40 | # * Build cache (Linux) 41 | # * Build cache (Mac) 42 | # * Build cache (Windows) 43 | path: | 44 | ${{ env.GOPATH }}/pkg/mod 45 | ${{ env.GOPATH }}/pkg/sumdb 46 | ~/.cache/go-build 47 | ~/Library/Caches/go-build 48 | # %LocalAppData%\go-build 49 | key: ${{ matrix.os }}-go-${{ hashFiles('**/go.sum') }} 50 | restore-keys: | 51 | ${{ matrix.os }}-go- 52 | - name: Golang Format 53 | run: | 54 | make fmt 55 | STATUS=$(git status --porcelain go.mod go.sum) 56 | if [ ! -z "$STATUS" ]; then 57 | echo "Running 'make fmt' to format your codes" 58 | exit 1 59 | fi 60 | - name: Golang Lint 61 | run: make lint 62 | - name: Golang Vet 63 | run: make vet 64 | - name: Tidy Go Module 65 | run: | 66 | echo "verifying if there is any unused dependency in go module" 67 | make tidy 68 | STATUS=$(git status --porcelain go.mod go.sum) 69 | if [ ! -z "$STATUS" ]; then 70 | echo "Running 'make tidy' to fix your 'go.mod' and/or 'go.sum'" 71 | exit 1 72 | fi 73 | echo "go module is tidy." 74 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release 3 | on: 4 | push: 5 | # Sequence of patterns matched against refs/tags 6 | tags: 7 | - '*' # Push events to matching v*, i.e. v1.0, v20.15.10 8 | 9 | jobs: 10 | release: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | - name: Set up Go 16 | uses: actions/setup-go@v4 17 | with: 18 | go-version: 1.24.0 19 | - uses: olegtarasov/get-tag@v2.1 20 | id: tagName 21 | - name: Release Docker Image 22 | run: make push 23 | env: 24 | DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} 25 | DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} 26 | - name: Release Binary 27 | run: make release 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Vendor dir is pulled from 'go mod 'vendor' 2 | vendor/ 3 | go.sum 4 | 5 | # Binaries for programs and plugins 6 | *.exe 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, build with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | .build/ 17 | 18 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 19 | .glide/ 20 | 21 | # Binary 22 | kafka_exporter 23 | 24 | # Project IDE 25 | .idea/ 26 | *.iml 27 | 28 | # Test configuration 29 | test/ 30 | .DS_Store 31 | 32 | 33 | -------------------------------------------------------------------------------- /.promu.yml: -------------------------------------------------------------------------------- 1 | repository: 2 | path: github.com/danielqsj/kafka_exporter 3 | build: 4 | flags: -a -tags netgo 5 | ldflags: | 6 | -X github.com/prometheus/common/version.Version={{.Version}} 7 | -X github.com/prometheus/common/version.Revision={{.Revision}} 8 | -X github.com/prometheus/common/version.Branch={{.Branch}} 9 | -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} 10 | -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} 11 | tarball: 12 | files: 13 | - LICENSE 14 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | #sudo: required 2 | # 3 | #language: go 4 | # 5 | #go: 6 | #- 1.19 7 | # 8 | #env: 9 | #- GOFLAGS="-mod=readonly" 10 | # 11 | #before_install: 12 | # - sudo rm -rf /var/lib/apt/lists/* 13 | # - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 14 | # - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) edge" 15 | # - sudo apt-get update 16 | # - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce 17 | # - mkdir -vp ~/.docker/cli-plugins/ 18 | # - curl --silent -L "https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-amd64" > ~/.docker/cli-plugins/docker-buildx 19 | # - chmod a+x ~/.docker/cli-plugins/docker-buildx 20 | # 21 | #after_success: 22 | #- if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then 23 | # make push; 24 | # fi 25 | #- if [[ -n "$TRAVIS_TAG" ]]; then 26 | # make crossbuild release; 27 | # fi 28 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/prometheus/busybox:latest 2 | MAINTAINER Daniel Qian 3 | 4 | ARG TARGETARCH 5 | ARG BIN_DIR=.build/linux-${TARGETARCH}/ 6 | 7 | COPY ${BIN_DIR}/kafka_exporter /bin/kafka_exporter 8 | 9 | EXPOSE 9308 10 | USER nobody 11 | ENTRYPOINT [ "/bin/kafka_exporter" ] 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GO := GO111MODULE=on go 2 | PROMU := $(GOPATH)/bin/promu 3 | pkgs = $(shell $(GO) list ./... | grep -v /vendor/) 4 | UNAME_S := $(shell uname -s | tr A-Z a-z) 5 | UNAME_M := $(shell uname -m) 6 | 7 | ifeq ($(findstring aarch64,$(UNAME_M)),aarch64) 8 | ARCH := arm64 9 | else 10 | ARCH := $(subst x86_64,amd64,$(patsubst i%86,386,$(UNAME_M))) 11 | endif 12 | 13 | PREFIX ?= $(shell pwd) 14 | BIN_DIR ?= $(shell pwd) 15 | DOCKER_IMAGE_NAME ?= kafka-exporter 16 | DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) 17 | #TAG := $(shell echo `if [ "$(TRAVIS_BRANCH)" = "master" ] || [ "$(TRAVIS_BRANCH)" = "" ] ; then echo "latest"; else echo $(TRAVIS_BRANCH) ; fi`) 18 | 19 | PUSHTAG ?= type=registry,push=true 20 | DOCKER_PLATFORMS ?= linux/amd64,linux/s390x,linux/arm64,linux/ppc64le 21 | 22 | all: format build test 23 | 24 | style: 25 | @echo ">> checking code style" 26 | @! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' 27 | 28 | test: 29 | @echo ">> running tests" 30 | @$(GO) test -short $(pkgs) 31 | 32 | format: 33 | @echo ">> formatting code" 34 | @$(GO) fmt $(pkgs) 35 | 36 | vet: 37 | @echo ">> vetting code" 38 | @$(GO) vet $(pkgs) 39 | 40 | build: promu 41 | @echo ">> building binaries" 42 | @$(GO) mod vendor 43 | @$(PROMU) build --prefix $(PREFIX) 44 | 45 | 46 | crossbuild: promu 47 | @echo ">> crossbuilding binaries" 48 | @$(PROMU) crossbuild --go=1.23 49 | 50 | tarball: promu 51 | @echo ">> building release tarball" 52 | @$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) 53 | 54 | docker: build 55 | @echo ">> building docker image" 56 | @docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" --build-arg BIN_DIR=. . 57 | 58 | push: crossbuild 59 | @echo ">> building and pushing multi-arch docker images, $(DOCKER_USERNAME),$(DOCKER_IMAGE_NAME),$(GIT_TAG_NAME)" 60 | @docker login -u $(DOCKER_USERNAME) -p $(DOCKER_PASSWORD) 61 | @docker buildx create --use 62 | @docker buildx build -t "$(DOCKER_USERNAME)/$(DOCKER_IMAGE_NAME):$(GIT_TAG_NAME)" \ 63 | --output "$(PUSHTAG)" \ 64 | --platform "$(DOCKER_PLATFORMS)" \ 65 | . 66 | 67 | release: promu github-release 68 | @echo ">> pushing binary to github with ghr" 69 | @$(PROMU) crossbuild tarballs 70 | @$(PROMU) release .tarballs 71 | 72 | promu: 73 | @GOOS=$(UNAME_S) GOARCH=$(ARCH) $(GO) install github.com/prometheus/promu@v0.14.0 74 | PROMU=$(shell go env GOPATH)/bin/promu 75 | 76 | github-release: 77 | @GOOS=$(shell uname -s | tr A-Z a-z) \ 78 | GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \ 79 | $(GO) install github.com/github-release/github-release@v0.10.0 80 | $(GO) mod tidy 81 | 82 | # Run go fmt against code 83 | .PHONY: fmt 84 | fmt: 85 | @find . -type f -name '*.go'| grep -v "/vendor/" | xargs gofmt -w -s 86 | 87 | # Run mod tidy against code 88 | .PHONY: tidy 89 | tidy: 90 | @go mod tidy 91 | 92 | # Run golang lint against code 93 | .PHONY: lint 94 | lint: golangci-lint 95 | @$(GOLANG_LINT) run \ 96 | --timeout 30m \ 97 | --disable-all \ 98 | -E unused \ 99 | -E ineffassign \ 100 | -E goimports \ 101 | -E gofmt \ 102 | -E misspell \ 103 | -E unparam \ 104 | -E unconvert \ 105 | -E govet \ 106 | -E errcheck 107 | 108 | # Run gosec security checks 109 | .PHONY: sec 110 | sec: gosec 111 | @$(GOSEC) ./... 112 | 113 | # Run staticcheck 114 | .PHONY: staticcheck 115 | staticcheck: staticcheck-bin 116 | @$(STATICCHECK) ./... 117 | 118 | # find or download golangci-lint 119 | # download golangci-lint if necessary 120 | golangci-lint: 121 | ifeq (, $(shell which golangci-lint)) 122 | @GOOS=$(shell uname -s | tr A-Z a-z) \ 123 | GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \ 124 | $(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.64.5 125 | GOLANG_LINT=$(shell go env GOPATH)/bin/golangci-lint 126 | else 127 | GOLANG_LINT=$(shell which golangci-lint) 128 | endif 129 | 130 | # Ensure gosec is installed 131 | gosec: 132 | ifeq (, $(shell which gosec)) 133 | @GOOS=$(shell uname -s | tr A-Z a-z) \ 134 | GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \ 135 | $(GO) install github.com/securego/gosec/v2/cmd/gosec@latest 136 | GOSEC=$(shell go env GOPATH)/bin/gosec 137 | else 138 | GOSEC=$(shell which gosec) 139 | endif 140 | 141 | # Ensure staticcheck is installed 142 | staticcheck-bin: 143 | ifeq (, $(shell which staticcheck)) 144 | @GOOS=$(shell uname -s | tr A-Z a-z) \ 145 | GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \ 146 | $(GO) install honnef.co/go/tools/cmd/staticcheck@latest 147 | STATICCHECK=$(shell go env GOPATH)/bin/staticcheck 148 | else 149 | STATICCHECK=$(shell which staticcheck) 150 | endif 151 | 152 | 153 | .PHONY: all style format build test vet tarball docker promu sec staticcheck 154 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![kafka_exporter](https://socialify.git.ci/danielqsj/kafka_exporter/image?description=1&font=Inter&forks=1&pattern=Signal&stargazers=1&theme=Light) 2 | 3 | kafka_exporter 4 | ============== 5 | 6 | [![CI](https://github.com/danielqsj/kafka_exporter/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/danielqsj/kafka_exporter/actions/workflows/ci.yml)[![Docker Pulls](https://img.shields.io/docker/pulls/danielqsj/kafka-exporter.svg)](https://hub.docker.com/r/danielqsj/kafka-exporter)[![Go Report Card](https://goreportcard.com/badge/github.com/danielqsj/kafka_exporter)](https://goreportcard.com/report/github.com/danielqsj/kafka_exporter)[![Language](https://img.shields.io/badge/language-Go-red.svg)](https://github.com/danielqsj/kafka-exporter)[![GitHub release](https://img.shields.io/badge/release-1.9.0-green.svg)](https://github.com/danielqsj/kafka-exporter/releases)[![License](https://img.shields.io/badge/license-Apache%202-4EB1BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html) 7 | 8 | Kafka exporter for Prometheus. For other metrics from Kafka, have a look at the [JMX exporter](https://github.com/prometheus/jmx_exporter). 9 | 10 | Table of Contents 11 | ----------------- 12 | 13 | - [Compatibility](#compatibility) 14 | - [Dependency](#dependency) 15 | - [Download](#download) 16 | - [Compile](#compile) 17 | - [Build Binary](#build-binary) 18 | - [Build Docker Image](#build-docker-image) 19 | - [Run](#run) 20 | - [Run Binary](#run-binary) 21 | - [Run Docker Image](#run-docker-image) 22 | - [Run Docker Compose](#run-docker-compose) 23 | - [Flags](#flags) 24 | - [Notes](#notes) 25 | - [Metrics](#metrics) 26 | - [Brokers](#brokers) 27 | - [Topics](#topics) 28 | - [Consumer Groups](#consumer-groups) 29 | - [Grafana Dashboard](#grafana-dashboard) 30 | - [Contribute](#contribute) 31 | - [Donation](#donation) 32 | - [License](#license) 33 | 34 | Compatibility 35 | ------------- 36 | 37 | Support [Apache Kafka](https://kafka.apache.org) version 0.10.1.0 (and later). 38 | 39 | Dependency 40 | ---------- 41 | 42 | - [Prometheus](https://prometheus.io) 43 | - [Sarama](https://shopify.github.io/sarama) 44 | - [Golang](https://golang.org) 45 | 46 | Download 47 | -------- 48 | 49 | Binary can be downloaded from [Releases](https://github.com/danielqsj/kafka_exporter/releases) page. 50 | 51 | Compile 52 | ------- 53 | 54 | ### Build Binary 55 | 56 | ```shell 57 | make 58 | ``` 59 | 60 | ### Build Docker Image 61 | 62 | ```shell 63 | make docker 64 | ``` 65 | 66 | Docker Hub Image 67 | ---------------- 68 | 69 | ```shell 70 | docker pull danielqsj/kafka-exporter:latest 71 | ``` 72 | 73 | It can be used directly instead of having to build the image yourself. ([Docker Hub danielqsj/kafka-exporter](https://hub.docker.com/r/danielqsj/kafka-exporter)\) 74 | 75 | Run 76 | --- 77 | 78 | ### Run Binary 79 | 80 | ```shell 81 | kafka_exporter --kafka.server=kafka:9092 [--kafka.server=another-server ...] 82 | ``` 83 | 84 | ### Run Docker Image 85 | 86 | ``` 87 | docker run -ti --rm -p 9308:9308 danielqsj/kafka-exporter --kafka.server=kafka:9092 [--kafka.server=another-server ...] 88 | ``` 89 | 90 | ### Run Docker Compose 91 | make a `docker-compose.yml` flie 92 | ``` 93 | services: 94 | kafka-exporter: 95 | image: danielqsj/kafka-exporter 96 | command: ["--kafka.server=kafka:9092", "[--kafka.server=another-server ...]"] 97 | ports: 98 | - 9308:9308 99 | ``` 100 | then run it 101 | ``` 102 | docker-compose up -d 103 | ``` 104 | 105 | Flags 106 | ----- 107 | 108 | This image is configurable using different flags 109 | 110 | | Flag name | Default | Description | 111 | |--------------------------------|----------------|------------------------------------------------------------------------------------------------------------------------------------------------| 112 | | kafka.server | kafka:9092 | Addresses (host:port) of Kafka server | 113 | | kafka.version | 2.0.0 | Kafka broker version | 114 | | sasl.enabled | false | Connect using SASL/PLAIN | 115 | | sasl.handshake | true | Only set this to false if using a non-Kafka SASL proxy | 116 | | sasl.username | | SASL user name | 117 | | sasl.password | | SASL user password | 118 | | sasl.mechanism | plain | SASL SCRAM SHA algorithm: sha256 or sha512 or SASL mechanism: gssapi or awsiam | 119 | | sasl.aws-region | AWS_REGION env | The AWS region for IAM SASL authentication | 120 | | sasl.service-name | | Service name when using Kerberos Auth | 121 | | sasl.kerberos-config-path | | Kerberos config path | 122 | | sasl.realm | | Kerberos realm | 123 | | sasl.keytab-path | | Kerberos keytab file path | 124 | | sasl.kerberos-auth-type | | Kerberos auth type. Either 'keytabAuth' or 'userAuth' | 125 | | tls.enabled | false | Connect to Kafka using TLS | 126 | | tls.server-name | | Used to verify the hostname on the returned certificates unless tls.insecure-skip-tls-verify is given. The kafka server's name should be given | 127 | | tls.ca-file | | The optional certificate authority file for Kafka TLS client authentication | 128 | | tls.cert-file | | The optional certificate file for Kafka client authentication | 129 | | tls.key-file | | The optional key file for Kafka client authentication | 130 | | tls.insecure-skip-tls-verify | false | If true, the server's certificate will not be checked for validity | 131 | | server.tls.enabled | false | Enable TLS for web server | 132 | | server.tls.mutual-auth-enabled | false | Enable TLS client mutual authentication | 133 | | server.tls.ca-file | | The certificate authority file for the web server | 134 | | server.tls.cert-file | | The certificate file for the web server | 135 | | server.tls.key-file | | The key file for the web server | 136 | | topic.filter | .* | Regex that determines which topics to collect | 137 | | topic.exclude | ^$ | Regex that determines which topics to exclude | 138 | | group.filter | .* | Regex that determines which consumer groups to collect | 139 | | group.exclude | ^$ | Regex that determines which consumer groups to exclude | 140 | | web.listen-address | :9308 | Address to listen on for web interface and telemetry | 141 | | web.telemetry-path | /metrics | Path under which to expose metrics | 142 | | log.enable-sarama | false | Turn on Sarama logging | 143 | | use.consumelag.zookeeper | false | if you need to use a group from zookeeper | 144 | | zookeeper.server | localhost:2181 | Address (hosts) of zookeeper server | 145 | | kafka.labels | | Kafka cluster name | 146 | | refresh.metadata | 30s | Metadata refresh interval | 147 | | offset.show-all | true | Whether show the offset/lag for all consumer group, otherwise, only show connected consumer groups | 148 | | concurrent.enable | false | If true, all scrapes will trigger kafka operations otherwise, they will share results. WARN: This should be disabled on large clusters | 149 | | topic.workers | 100 | Number of topic workers | 150 | | verbosity | 0 | Verbosity log level | 151 | 152 | ### Notes 153 | 154 | Boolean values are uniquely managed by [Kingpin](https://github.com/alecthomas/kingpin/blob/master/README.md#boolean-values). Each boolean flag will have a negative complement: 155 | `--` and `--no-`. 156 | 157 | For example: 158 | 159 | If you need to disable `sasl.handshake`, you could add flag `--no-sasl.handshake` 160 | 161 | Metrics 162 | ------- 163 | 164 | Documents about exposed Prometheus metrics. 165 | 166 | For details on the underlying metrics please see [Apache Kafka](https://kafka.apache.org/documentation). 167 | 168 | ### Brokers 169 | 170 | **Metrics details** 171 | 172 | | Name | Exposed informations | 173 | |---------------------|----------------------------------------| 174 | | `kafka_brokers` | Number of Brokers in the Kafka Cluster | 175 | | `kafka_broker_info` | Information about the Kafka Broker | 176 | 177 | **Metrics output example** 178 | 179 | ```txt 180 | # HELP kafka_brokers Number of Brokers in the Kafka Cluster. 181 | # TYPE kafka_brokers gauge 182 | kafka_brokers 3 183 | # HELP kafka_broker_info Information about the Kafka Broker. 184 | # TYPE kafka_broker_info gauge 185 | kafka_broker_info{address="b-1.kafka-example.org:9092",id="1"} 1 186 | kafka_broker_info{address="b-2.kafka-example.org:9092",id="2"} 2 187 | kafka_broker_info{address="b-3.kafka-example.org:9092",id="3"} 3 188 | ``` 189 | 190 | ### Topics 191 | 192 | **Required permissions** 193 | 194 | Describe all topics. 195 | 196 | **Metrics details** 197 | 198 | | Name | Exposed informations | 199 | |----------------------------------------------------|-----------------------------------------------------| 200 | | `kafka_topic_partitions` | Number of partitions for this Topic | 201 | | `kafka_topic_partition_current_offset` | Current Offset of a Broker at Topic/Partition | 202 | | `kafka_topic_partition_oldest_offset` | Oldest Offset of a Broker at Topic/Partition | 203 | | `kafka_topic_partition_in_sync_replica` | Number of In-Sync Replicas for this Topic/Partition | 204 | | `kafka_topic_partition_leader` | Leader Broker ID of this Topic/Partition | 205 | | `kafka_topic_partition_leader_is_preferred` | 1 if Topic/Partition is using the Preferred Broker | 206 | | `kafka_topic_partition_replicas` | Number of Replicas for this Topic/Partition | 207 | | `kafka_topic_partition_under_replicated_partition` | 1 if Topic/Partition is under Replicated | 208 | 209 | **Metrics output example** 210 | 211 | ```txt 212 | # HELP kafka_topic_partitions Number of partitions for this Topic 213 | # TYPE kafka_topic_partitions gauge 214 | kafka_topic_partitions{topic="__consumer_offsets"} 50 215 | 216 | # HELP kafka_topic_partition_current_offset Current Offset of a Broker at Topic/Partition 217 | # TYPE kafka_topic_partition_current_offset gauge 218 | kafka_topic_partition_current_offset{partition="0",topic="__consumer_offsets"} 0 219 | 220 | # HELP kafka_topic_partition_oldest_offset Oldest Offset of a Broker at Topic/Partition 221 | # TYPE kafka_topic_partition_oldest_offset gauge 222 | kafka_topic_partition_oldest_offset{partition="0",topic="__consumer_offsets"} 0 223 | 224 | # HELP kafka_topic_partition_in_sync_replica Number of In-Sync Replicas for this Topic/Partition 225 | # TYPE kafka_topic_partition_in_sync_replica gauge 226 | kafka_topic_partition_in_sync_replica{partition="0",topic="__consumer_offsets"} 3 227 | 228 | # HELP kafka_topic_partition_leader Leader Broker ID of this Topic/Partition 229 | # TYPE kafka_topic_partition_leader gauge 230 | kafka_topic_partition_leader{partition="0",topic="__consumer_offsets"} 0 231 | 232 | # HELP kafka_topic_partition_leader_is_preferred 1 if Topic/Partition is using the Preferred Broker 233 | # TYPE kafka_topic_partition_leader_is_preferred gauge 234 | kafka_topic_partition_leader_is_preferred{partition="0",topic="__consumer_offsets"} 1 235 | 236 | # HELP kafka_topic_partition_replicas Number of Replicas for this Topic/Partition 237 | # TYPE kafka_topic_partition_replicas gauge 238 | kafka_topic_partition_replicas{partition="0",topic="__consumer_offsets"} 3 239 | 240 | # HELP kafka_topic_partition_under_replicated_partition 1 if Topic/Partition is under Replicated 241 | # TYPE kafka_topic_partition_under_replicated_partition gauge 242 | kafka_topic_partition_under_replicated_partition{partition="0",topic="__consumer_offsets"} 0 243 | ``` 244 | 245 | ### Consumer Groups 246 | 247 | **Required permissions** 248 | 249 | Describe all groups. 250 | 251 | **Metrics details** 252 | 253 | | Name | Exposed informations | 254 | |----------------------------------------------|--------------------------------------------------------------------------| 255 | | `kafka_consumergroup_current_offset` | Current Offset of a ConsumerGroup at Topic/Partition | 256 | | `kafka_consumergroup_current_offset_sum` | Current Offset of a ConsumerGroup at Topic for all partitions | 257 | | `kafka_consumergroup_lag` | Current Approximate Lag of a ConsumerGroup at Topic/Partition | 258 | | `kafka_consumergroup_lag_sum` | Current Approximate Lag of a ConsumerGroup at Topic for all partitions | 259 | | `kafka_consumergroupzookeeper_lag_zookeeper` | Current Approximate Lag(zookeeper) of a ConsumerGroup at Topic/Partition | 260 | | `kafka_consumergroup_members` | Amount of members in a consumer group | 261 | 262 | #### Important Note 263 | 264 | To be able to collect the metrics `kafka_consumergroupzookeeper_lag_zookeeper`, you must set the following flags: 265 | 266 | * `use.consumelag.zookeeper`: enable collect consume lag from zookeeper 267 | * `zookeeper.server`: address for connection to zookeeper 268 | 269 | **Metrics output example** 270 | 271 | ```txt 272 | # HELP kafka_consumergroup_current_offset Current Offset of a ConsumerGroup at Topic/Partition 273 | # TYPE kafka_consumergroup_current_offset gauge 274 | kafka_consumergroup_current_offset{consumergroup="KMOffsetCache-kafka-manager-3806276532-ml44w",partition="0",topic="__consumer_offsets"} -1 275 | 276 | # HELP kafka_consumergroup_current_offset_sum Current Offset of a ConsumerGroup at Topic for all partitions 277 | # TYPE kafka_consumergroup_current_offset_sum gauge 278 | kafka_consumergroup_current_offset_sum{consumergroup="KMOffsetCache-kafka-manager-3806276532-ml44w",topic="__consumer_offsets"} -1 279 | 280 | # HELP kafka_consumergroup_lag Current Approximate Lag of a ConsumerGroup at Topic/Partition 281 | # TYPE kafka_consumergroup_lag gauge 282 | kafka_consumergroup_lag{consumergroup="KMOffsetCache-kafka-manager-3806276532-ml44w",partition="0",topic="__consumer_offsets"} 1 283 | 284 | # HELP kafka_consumergroup_lag_sum Current Approximate Lag of a ConsumerGroup at Topic for all partitions 285 | # TYPE kafka_consumergroup_lag_sum gauge 286 | kafka_consumergroup_lag_sum{consumergroup="KMOffsetCache-kafka-manager-3806276532-ml44w",topic="__consumer_offsets"} 1 287 | 288 | # HELP kafka_consumergroup_members Amount of members in a consumer group 289 | # TYPE kafka_consumergroup_members gauge 290 | kafka_consumergroup_members{consumergroup="KMOffsetCache-kafka-manager-3806276532-ml44w"} 1 291 | 292 | ``` 293 | 294 | #### Do not see any Consumer group or Lag information 295 | 296 | The consumer group metrics would not be available, if there is no consumer with a consumer group. 297 | 298 | Run consumer with a consumer group using command line tool 299 | ```bash 300 | kafka-console-consumer.sh \ 301 | --consumer.config /path/to/client.properties \ 302 | --bootstrap-server localhost:9092 \ 303 | --topic test \ 304 | --group test-conusmer-group \ 305 | --from-beginning 306 | ``` 307 | 308 | Grafana Dashboard 309 | ------- 310 | 311 | Grafana Dashboard ID: 7589, name: Kafka Exporter Overview. 312 | 313 | For details of the dashboard please see [Kafka Exporter Overview](https://grafana.com/grafana/dashboards/7589-kafka-exporter-overview/). 314 | 315 | Contribute 316 | ---------- 317 | 318 | If you like Kafka Exporter, please give me a star. This will help more people know Kafka Exporter. 319 | 320 | Please feel free to send me [pull requests](https://github.com/danielqsj/kafka_exporter/pulls). 321 | 322 | Contributors ✨ 323 | ---------- 324 | 325 | Thanks goes to these wonderful people: 326 | 327 | 328 | 329 | 330 | 331 | Star ⭐ 332 | ---------- 333 | 334 | [![Stargazers over time](https://starchart.cc/danielqsj/kafka_exporter.svg)](https://starchart.cc/danielqsj/kafka_exporter) 335 | 336 | Donation 337 | -------- 338 | 339 | Your donation will encourage me to continue to improve Kafka Exporter. Support Alipay donation. 340 | 341 | ![](https://github.com/danielqsj/kafka_exporter/raw/master/alipay.jpg) 342 | 343 | License 344 | ------- 345 | 346 | Code is licensed under the [Apache License 2.0](https://github.com/danielqsj/kafka_exporter/blob/master/LICENSE). 347 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 1.9.0 2 | -------------------------------------------------------------------------------- /alipay.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielqsj/kafka_exporter/8ec24078707d4d3b349e71522ebd7f179746072d/alipay.jpg -------------------------------------------------------------------------------- /charts/kafka-exporter/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /charts/kafka-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: kafka-exporter 5 | version: 1.2.0 6 | home: https://github.com/abhishekjiitr/kafka-exporter-helm 7 | maintainers: 8 | - name: abhishekjiitr 9 | email: abhi2254015@gmail.com 10 | -------------------------------------------------------------------------------- /charts/kafka-exporter/README.md: -------------------------------------------------------------------------------- 1 | HELM INSTALL 2 | ============== 3 | 4 | ### Install Basic 5 | 6 | ```shell 7 | helm upgrade -i kafka-exporter kafka_exporter/charts/kafka-exporter --namespace=kafka-exporter --create-namespace \ 8 | --set kafkaExporter.kafka.servers="{kafka1:9092,kafka2:9092,.....}" 9 | ``` 10 | 11 | ### Install with Datadog support 12 | 13 | ```shell 14 | helm upgrade -i kafka-exporter kafka_exporter/charts/kafka-exporter --namespace=kafka-exporter --create-namespace \ 15 | --set kafkaExporter.kafka.servers="{kafka1:9092,kafka2:9092,.....}" \ 16 | --set datadog.prefix=testing-kafka-cluster \ 17 | --set datadog.use_datadog=true \ 18 | --set prometheus.serviceMonitor.enabled=false 19 | ``` 20 | 21 | Sample Datadog collector installation: 22 | ```shell 23 | helm repo add datadog https://helm.datadoghq.com 24 | helm repo update 25 | 26 | helm upgrade -i datadog datadog/datadog --namespace=kafka-exporter \ 27 | --set datadog.apiKey= \ 28 | --set targetSystem=linux \ 29 | --set datadog.prometheusScrape.enabled=true \ 30 | --set datadog.prometheusScrape.serviceEndpoints=true 31 | ``` 32 | 33 | 34 | ### Install with Azure Managed Prometheus support 35 | 36 | ```shell 37 | helm upgrade -i kafka-exporter kafka_exporter/charts/kafka-exporter --namespace=kafka-exporter --create-namespace \ 38 | --set kafkaExporter.kafka.servers="{kafka1:9092,kafka2:9092,.....}" --set prometheus.serviceMonitor.enabled=true --set azuremanagedprometheus.use_azuremanagedprometheus=true 39 | ``` 40 | -------------------------------------------------------------------------------- /charts/kafka-exporter/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1.To see the metrics 2 | {{- if contains "ClusterIP" .Values.service.type }} 3 | kubectl port-forward svc/{{ include "kafka-exporter.fullname" . }} {{ .Values.service.port }} 4 | echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application" 5 | {{- end }} 6 | -------------------------------------------------------------------------------- /charts/kafka-exporter/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "kafka-exporter.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "kafka-exporter.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "kafka-exporter.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /charts/kafka-exporter/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "kafka-exporter.fullname" . }} 5 | labels: 6 | app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} 7 | helm.sh/chart: {{ include "kafka-exporter.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | {{- if .Values.labels -}} 11 | {{ .Values.labels | toYaml | nindent 4 -}} 12 | {{- end }} 13 | spec: 14 | replicas: {{ .Values.replicaCount }} 15 | selector: 16 | matchLabels: 17 | app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} 18 | app.kubernetes.io/instance: {{ .Release.Name }} 19 | template: 20 | metadata: 21 | {{- if eq .Values.datadog.use_datadog true }} 22 | annotations: 23 | ad.datadoghq.com/{{ .Chart.Name }}.check_names: | 24 | ["openmetrics"] 25 | ad.datadoghq.com/{{ .Chart.Name }}.init_configs: | 26 | [{}] 27 | ad.datadoghq.com/{{ .Chart.Name }}.instances: | 28 | [ 29 | { 30 | "openmetrics_endpoint": "http://{{ include "kafka-exporter.fullname" . }}:{{ .Values.service.port }}/metrics", 31 | "namespace": "{{ .Values.datadog.prefix }}", 32 | "metrics": {{ .Values.datadog.metrics | toJson | nindent 16 -}} 33 | } 34 | ] 35 | {{- end }} 36 | labels: 37 | app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} 38 | app.kubernetes.io/instance: {{ .Release.Name }} 39 | {{- if .Values.podLabels -}} 40 | {{ .Values.podLabels | toYaml | nindent 8 -}} 41 | {{- end }} 42 | spec: 43 | containers: 44 | - name: {{ .Chart.Name }} 45 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 46 | imagePullPolicy: {{ .Values.image.pullPolicy }} 47 | args: 48 | {{- if .Values.kafkaExporter}} 49 | {{- range .Values.kafkaExporter.kafka.servers }} 50 | - "--kafka.server={{ . }}" 51 | {{- end }} 52 | {{- if .Values.kafkaExporter.kafka.version }} 53 | - --kafka.version={{ .Values.kafkaExporter.kafka.version }} 54 | {{- end }} 55 | {{- end}} 56 | {{- if .Values.kafkaExporter.sasl.enabled }} 57 | - --sasl.enabled 58 | {{- if not .Values.kafkaExporter.sasl.handshake }} 59 | - --sasl.handshake=false 60 | {{- end }} 61 | - --sasl.username={{ .Values.kafkaExporter.sasl.username }} 62 | - --sasl.password={{ .Values.kafkaExporter.sasl.password }} 63 | - --sasl.mechanism={{ .Values.kafkaExporter.sasl.mechanism }} 64 | {{- end }} 65 | {{- if .Values.kafkaExporter.tls.enabled}} 66 | - --tls.enabled 67 | {{- if .Values.kafkaExporter.tls.insecureSkipTlsVerify}} 68 | - --tls.insecure-skip-tls-verify 69 | {{- else }} 70 | - --tls.ca-file=/etc/tls-certs/ca-file 71 | - --tls.cert-file=/etc/tls-certs/cert-file 72 | - --tls.key-file=/etc/tls-certs/key-file 73 | {{- end }} 74 | {{- end }} 75 | {{- if .Values.kafkaExporter.log }} 76 | - --verbosity={{ .Values.kafkaExporter.log.verbosity }} 77 | {{- end }} 78 | {{- if .Values.kafkaExporter.log.enableSarama }} 79 | - --log.enable-sarama 80 | {{- end }} 81 | ports: 82 | - name: metrics 83 | containerPort: 9308 84 | protocol: TCP 85 | livenessProbe: 86 | failureThreshold: 1 87 | httpGet: 88 | path: /healthz 89 | port: metrics 90 | scheme: HTTP 91 | initialDelaySeconds: 3 92 | periodSeconds: 30 93 | successThreshold: 1 94 | timeoutSeconds: 9 95 | readinessProbe: 96 | failureThreshold: 1 97 | httpGet: 98 | path: /healthz 99 | port: metrics 100 | scheme: HTTP 101 | initialDelaySeconds: 3 102 | periodSeconds: 15 103 | successThreshold: 1 104 | timeoutSeconds: 9 105 | 106 | {{- if and .Values.kafkaExporter.tls.enabled (not .Values.kafkaExporter.tls.insecureSkipTlsVerify) }} 107 | volumeMounts: 108 | - name: tls-certs 109 | mountPath: "/etc/tls-certs/" 110 | readOnly: true 111 | {{- end }} 112 | resources: 113 | {{- toYaml .Values.resources | nindent 12 }} 114 | {{- with .Values.nodeSelector }} 115 | 116 | nodeSelector: 117 | {{- toYaml . | nindent 8 }} 118 | {{- end }} 119 | {{- with .Values.affinity }} 120 | affinity: 121 | {{- toYaml . | nindent 8 }} 122 | {{- end }} 123 | {{- with .Values.tolerations }} 124 | tolerations: 125 | {{- toYaml . | nindent 8 }} 126 | {{- end }} 127 | {{- if and .Values.kafkaExporter.tls.enabled (not .Values.kafkaExporter.tls.insecureSkipTlsVerify) }} 128 | volumes: 129 | - name: tls-certs 130 | secret: 131 | secretName: {{ include "kafka-exporter.fullname" . }} 132 | {{- end }} 133 | -------------------------------------------------------------------------------- /charts/kafka-exporter/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kafkaExporter.tls.enabled (not .Values.kafkaExporter.tls.insecureSkipTlsVerify) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ include "kafka-exporter.fullname" . }} 6 | labels: 7 | app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} 8 | helm.sh/chart: {{ include "kafka-exporter.chart" . }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | {{- if .Values.labels -}} 12 | {{- .Values.labels | toYaml | nindent 4 }} 13 | {{- end }} 14 | data: 15 | ca-file: {{ .Values.kafkaExporter.tls.caFile | b64enc }} 16 | cert-file: {{ .Values.kafkaExporter.tls.certFile | b64enc }} 17 | key-file: {{ .Values.kafkaExporter.tls.keyFile | b64enc }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /charts/kafka-exporter/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "kafka-exporter.fullname" . }} 5 | labels: 6 | app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} 7 | helm.sh/chart: {{ include "kafka-exporter.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | {{- if .Values.labels -}} 11 | {{ .Values.labels | toYaml | nindent 4 -}} 12 | {{- end }} 13 | spec: 14 | type: {{ .Values.service.type }} 15 | ports: 16 | - port: {{ .Values.service.port }} 17 | targetPort: metrics 18 | protocol: TCP 19 | name: metrics 20 | selector: 21 | app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} 22 | app.kubernetes.io/instance: {{ .Release.Name }} 23 | -------------------------------------------------------------------------------- /charts/kafka-exporter/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheus.serviceMonitor.enabled }} 2 | {{- if .Values.azuremanagedprometheus.use_azuremanagedprometheus }} 3 | apiVersion: azmonitoring.coreos.com/v1 4 | {{- else }} 5 | apiVersion: monitoring.coreos.com/v1 6 | {{- end }} 7 | kind: ServiceMonitor 8 | metadata: 9 | name: {{ include "kafka-exporter.fullname" . }} 10 | {{- if .Values.prometheus.serviceMonitor.namespace }} 11 | namespace: {{ .Values.prometheus.serviceMonitor.namespace }} 12 | {{- end }} 13 | labels: 14 | app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} 15 | helm.sh/chart: {{ include "kafka-exporter.chart" . }} 16 | app.kubernetes.io/instance: {{ .Release.Name }} 17 | app.kubernetes.io/managed-by: {{ .Release.Service }} 18 | {{- if .Values.labels -}} 19 | {{ .Values.labels | toYaml | nindent 4 -}} 20 | {{- end }} 21 | {{- if .Values.prometheus.serviceMonitor.additionalLabels }} 22 | {{ toYaml .Values.prometheus.serviceMonitor.additionalLabels | indent 4 -}} 23 | {{- end }} 24 | spec: 25 | jobLabel: jobLabel 26 | selector: 27 | matchLabels: 28 | app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} 29 | helm.sh/chart: {{ include "kafka-exporter.chart" . }} 30 | namespaceSelector: 31 | matchNames: 32 | - {{ .Release.Namespace }} 33 | endpoints: 34 | - port: metrics 35 | interval: {{ .Values.prometheus.serviceMonitor.interval }} 36 | {{- if .Values.prometheus.serviceMonitor.scrapeTimeout }} 37 | scrapeTimeout: {{ .Values.prometheus.serviceMonitor.scrapeTimeout }} 38 | {{- end }} 39 | {{- if .Values.prometheus.serviceMonitor.metricRelabelings }} 40 | metricRelabelings: 41 | {{- toYaml .Values.prometheus.serviceMonitor.metricRelabelings | nindent 4 }} 42 | {{- end }} 43 | {{- end }} 44 | -------------------------------------------------------------------------------- /charts/kafka-exporter/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for kafka-exporter. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: danielqsj/kafka-exporter 9 | tag: latest 10 | pullPolicy: IfNotPresent 11 | 12 | nameOverride: "" 13 | fullnameOverride: "" 14 | 15 | service: 16 | type: ClusterIP 17 | port: 9308 18 | 19 | kafkaExporter: 20 | kafka: 21 | servers: [] 22 | # - kafka:9092 23 | # version: "1.0.0" 24 | 25 | sasl: 26 | enabled: false 27 | handshake: true 28 | username: "" 29 | password: "" 30 | mechanism: "" 31 | 32 | tls: 33 | enabled: false 34 | insecureSkipTlsVerify: false 35 | caFile: "" 36 | certFile: "" 37 | keyFile: "" 38 | 39 | log: 40 | verbosity: 0 41 | enableSarama: false 42 | 43 | prometheus: 44 | serviceMonitor: 45 | enabled: true 46 | namespace: monitoring 47 | interval: "30s" 48 | additionalLabels: 49 | app: kafka-exporter 50 | metricRelabelings: {} 51 | 52 | labels: {} 53 | podLabels: {} 54 | 55 | # Adds in Datadog annotations needed to scrape the prometheus endpoint. 56 | # prefix is required. If added, will provide a metric such as test.kafka_brokers. 57 | # Example metrics below. map metric name to a potential new metric name in dd. 58 | datadog: 59 | use_datadog: false 60 | prefix: 61 | metrics: [ 62 | {"kafka_brokers": "kafka_brokers"}, 63 | {"kafka_consumergroup_lag": "kafka_consumergroup_lag"}, 64 | {"kafka_consumergroup_current_offset": "kafka_consumergroup_current_offset"} 65 | ] 66 | 67 | # Add support for azure managed prometheus by creating service monitor with the supported api group 68 | azuremanagedprometheus: 69 | use_azuremanagedprometheus: false 70 | 71 | resources: {} 72 | # We usually recommend not to specify default resources and to leave this as a conscious 73 | # choice for the user. This also increases chances charts run on environments with little 74 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 75 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 76 | # limits: 77 | # cpu: 100m 78 | # memory: 128Mi 79 | # requests: 80 | # cpu: 100m 81 | # memory: 128Mi 82 | 83 | nodeSelector: {} 84 | 85 | tolerations: [] 86 | 87 | affinity: {} 88 | -------------------------------------------------------------------------------- /deploy/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kafka-exporter 5 | spec: 6 | replicas: 1 7 | template: 8 | spec: 9 | containers: 10 | - name: kafka-exporter 11 | imagePullPolicy: IfNotPresent 12 | image: danielqsj/kafka-exporter 13 | ports: 14 | - name: http-metrics 15 | containerPort: 9308 16 | protocol: TCP -------------------------------------------------------------------------------- /deploy/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------- 2 | # apiVersion and kind of Kustomization 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | 6 | commonLabels: 7 | app: kafka-exporter 8 | 9 | resources: 10 | - deployment.yaml 11 | - service.yaml 12 | 13 | images: 14 | - name: danielqsj/kafka-exporter 15 | newTag: latest -------------------------------------------------------------------------------- /deploy/base/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kafka-exporter 5 | spec: 6 | ports: 7 | - name: http-metrics 8 | port: 80 9 | protocol: TCP 10 | targetPort: 9308 -------------------------------------------------------------------------------- /dev/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | zookeeper: 4 | image: zookeeper:3.5.8 5 | ports: 6 | - "2181:2181" 7 | kafka: 8 | image: wurstmeister/kafka:2.13-2.7.0 9 | ports: 10 | - "9092:9092" 11 | environment: 12 | DOCKER_API_VERSION: 1.22 13 | KAFKA_ADVERTISED_HOST_NAME: 127.0.0.1 14 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 15 | volumes: 16 | - /var/run/docker.sock:/var/run/docker.sock 17 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/danielqsj/kafka_exporter 2 | 3 | go 1.24 4 | 5 | require ( 6 | github.com/IBM/sarama v1.45.0 7 | github.com/alecthomas/kingpin/v2 v2.4.0 8 | github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 9 | github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e 10 | github.com/pkg/errors v0.9.1 11 | github.com/prometheus/client_golang v1.20.0 12 | github.com/prometheus/common v0.55.0 13 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 14 | github.com/xdg-go/scram v1.1.2 15 | k8s.io/klog/v2 v2.130.1 16 | ) 17 | 18 | require ( 19 | github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect 20 | github.com/aws/aws-sdk-go-v2 v1.19.0 // indirect 21 | github.com/aws/aws-sdk-go-v2/config v1.18.28 // indirect 22 | github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect 23 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect 24 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect 25 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect 26 | github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 // indirect 27 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 // indirect 28 | github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect 29 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect 30 | github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 // indirect 31 | github.com/aws/smithy-go v1.13.5 // indirect 32 | github.com/beorn7/perks v1.0.1 // indirect 33 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 34 | github.com/davecgh/go-spew v1.1.1 // indirect 35 | github.com/eapache/go-resiliency v1.7.0 // indirect 36 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect 37 | github.com/eapache/queue v1.1.0 // indirect 38 | github.com/go-kit/log v0.2.1 // indirect 39 | github.com/go-logfmt/logfmt v0.6.0 // indirect 40 | github.com/go-logr/logr v1.4.2 // indirect 41 | github.com/golang/snappy v0.0.4 // indirect 42 | github.com/hashicorp/errwrap v1.1.0 // indirect 43 | github.com/hashicorp/go-multierror v1.1.1 // indirect 44 | github.com/hashicorp/go-uuid v1.0.3 // indirect 45 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 46 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 47 | github.com/jcmturner/gofork v1.7.6 // indirect 48 | github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect 49 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 50 | github.com/klauspost/compress v1.17.11 // indirect 51 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 52 | github.com/pierrec/lz4/v4 v4.1.22 // indirect 53 | github.com/prometheus/client_model v0.6.1 // indirect 54 | github.com/prometheus/procfs v0.15.1 // indirect 55 | github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 // indirect 56 | github.com/xdg-go/pbkdf2 v1.0.0 // indirect 57 | github.com/xdg-go/stringprep v1.0.4 // indirect 58 | github.com/xhit/go-str2duration/v2 v2.1.0 // indirect 59 | golang.org/x/crypto v0.32.0 // indirect 60 | golang.org/x/net v0.34.0 // indirect 61 | golang.org/x/sys v0.29.0 // indirect 62 | golang.org/x/text v0.21.0 // indirect 63 | google.golang.org/protobuf v1.34.2 // indirect 64 | ) 65 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/IBM/sarama v1.45.0 h1:IzeBevTn809IJ/dhNKhP5mpxEXTmELuezO2tgHD9G5E= 2 | github.com/IBM/sarama v1.45.0/go.mod h1:EEay63m8EZkeumco9TDXf2JT3uDnZsZqFgV46n4yZdY= 3 | github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= 4 | github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= 5 | github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= 6 | github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= 7 | github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 h1:UyjtGmO0Uwl/K+zpzPwLoXzMhcN9xmnR2nrqJoBrg3c= 8 | github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0/go.mod h1:TJAXuFs2HcMib3sN5L0gUC+Q01Qvy3DemvA55WuC+iA= 9 | github.com/aws/aws-sdk-go-v2 v1.19.0 h1:klAT+y3pGFBU/qVf1uzwttpBbiuozJYWzNLHioyDJ+k= 10 | github.com/aws/aws-sdk-go-v2 v1.19.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= 11 | github.com/aws/aws-sdk-go-v2/config v1.18.28 h1:TINEaKyh1Td64tqFvn09iYpKiWjmHYrG1fa91q2gnqw= 12 | github.com/aws/aws-sdk-go-v2/config v1.18.28/go.mod h1:nIL+4/8JdAuNHEjn/gPEXqtnS02Q3NXB/9Z7o5xE4+A= 13 | github.com/aws/aws-sdk-go-v2/credentials v1.13.27 h1:dz0yr/yR1jweAnsCx+BmjerUILVPQ6FS5AwF/OyG1kA= 14 | github.com/aws/aws-sdk-go-v2/credentials v1.13.27/go.mod h1:syOqAek45ZXZp29HlnRS/BNgMIW6uiRmeuQsz4Qh2UE= 15 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 h1:kP3Me6Fy3vdi+9uHd7YLr6ewPxRL+PU6y15urfTaamU= 16 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5/go.mod h1:Gj7tm95r+QsDoN2Fhuz/3npQvcZbkEf5mL70n3Xfluc= 17 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 h1:hMUCiE3Zi5AHrRNGf5j985u0WyqI6r2NULhUfo0N/No= 18 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35/go.mod h1:ipR5PvpSPqIqL5Mi82BxLnfMkHVbmco8kUwO2xrCi0M= 19 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 h1:yOpYx+FTBdpk/g+sBU6Cb1H0U/TLEcYYp66mYqsPpcc= 20 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29/go.mod h1:M/eUABlDbw2uVrdAn+UsI6M727qp2fxkp8K0ejcBDUY= 21 | github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 h1:8r5m1BoAWkn0TDC34lUculryf7nUF25EgIMdjvGCkgo= 22 | github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36/go.mod h1:Rmw2M1hMVTwiUhjwMoIBFWFJMhvJbct06sSidxInkhY= 23 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 h1:IiDolu/eLmuB18DRZibj77n1hHQT7z12jnGO7Ze3pLc= 24 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29/go.mod h1:fDbkK4o7fpPXWn8YAPmTieAMuB9mk/VgvW64uaUqxd4= 25 | github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 h1:sWDv7cMITPcZ21QdreULwxOOAmE05JjEsT6fCDtDA9k= 26 | github.com/aws/aws-sdk-go-v2/service/sso v1.12.13/go.mod h1:DfX0sWuT46KpcqbMhJ9QWtxAIP1VozkDWf8VAkByjYY= 27 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 h1:BFubHS/xN5bjl818QaroN6mQdjneYQ+AOx44KNXlyH4= 28 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13/go.mod h1:BzqsVVFduubEmzrVtUFQQIQdFqvUItF8XUq2EnS8Wog= 29 | github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 h1:e5mnydVdCVWxP+5rPAGi2PYxC7u2OZgH1ypC114H04U= 30 | github.com/aws/aws-sdk-go-v2/service/sts v1.19.3/go.mod h1:yVGZA1CPkmUhBdA039jXNJJG7/6t+G+EBWmFq23xqnY= 31 | github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= 32 | github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= 33 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 34 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 35 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 36 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 37 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 38 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 39 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 40 | github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= 41 | github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= 42 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= 43 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= 44 | github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= 45 | github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= 46 | github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= 47 | github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= 48 | github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= 49 | github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= 50 | github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= 51 | github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= 52 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 53 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 54 | github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= 55 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 56 | github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 57 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 58 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 59 | github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= 60 | github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= 61 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 62 | github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 63 | github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 64 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= 65 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= 66 | github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 67 | github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= 68 | github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 69 | github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= 70 | github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= 71 | github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= 72 | github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= 73 | github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= 74 | github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= 75 | github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= 76 | github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= 77 | github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= 78 | github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= 79 | github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= 80 | github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= 81 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 82 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 83 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= 84 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= 85 | github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e h1:IWiVY66Xy9YrDZ28qJMt1UTlh6x9UGW0aDH/o58CSnA= 86 | github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e/go.mod h1:Rq6003vCNoJNrT6ol0hMebQ3GWLWXSHrD/QcMlXt0EE= 87 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 88 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 89 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 90 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 91 | github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= 92 | github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= 93 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 94 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 95 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 96 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 97 | github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= 98 | github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= 99 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 100 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 101 | github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= 102 | github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= 103 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 104 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 105 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= 106 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= 107 | github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 h1:AJNDS0kP60X8wwWFvbLPwDuojxubj9pbfK7pjHw0vKg= 108 | github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= 109 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 110 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 111 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 112 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 113 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 114 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 115 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 116 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 117 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 118 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 119 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 120 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 121 | github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= 122 | github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= 123 | github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= 124 | github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= 125 | github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= 126 | github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= 127 | github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= 128 | github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= 129 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 130 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 131 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 132 | golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= 133 | golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= 134 | golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= 135 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 136 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 137 | golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 138 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 139 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 140 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 141 | golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 142 | golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= 143 | golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= 144 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 145 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 146 | golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= 147 | golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 148 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 149 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 150 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 151 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 152 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 153 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 154 | golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= 155 | golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 156 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 157 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 158 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 159 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 160 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 161 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 162 | golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= 163 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 164 | golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= 165 | golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= 166 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 167 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 168 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 169 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 170 | google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= 171 | google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= 172 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 173 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 174 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 175 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 176 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 177 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 178 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 179 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 180 | k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= 181 | k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= 182 | -------------------------------------------------------------------------------- /kafka_exporter.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "crypto/x509" 7 | "flag" 8 | "fmt" 9 | "log" 10 | "net/http" 11 | "os" 12 | "regexp" 13 | "strconv" 14 | "strings" 15 | "sync" 16 | "time" 17 | 18 | "github.com/IBM/sarama" 19 | kingpin "github.com/alecthomas/kingpin/v2" 20 | "github.com/aws/aws-msk-iam-sasl-signer-go/signer" 21 | "github.com/krallistic/kazoo-go" 22 | "github.com/pkg/errors" 23 | "github.com/prometheus/client_golang/prometheus" 24 | "github.com/prometheus/client_golang/prometheus/promhttp" 25 | plog "github.com/prometheus/common/promlog" 26 | plogflag "github.com/prometheus/common/promlog/flag" 27 | 28 | versionCollector "github.com/prometheus/client_golang/prometheus/collectors/version" 29 | "github.com/prometheus/common/version" 30 | "github.com/rcrowley/go-metrics" 31 | "k8s.io/klog/v2" 32 | ) 33 | 34 | const ( 35 | namespace = "kafka" 36 | clientID = "kafka_exporter" 37 | ) 38 | 39 | const ( 40 | INFO = 0 41 | DEBUG = 1 42 | TRACE = 2 43 | ) 44 | 45 | var ( 46 | clusterBrokers *prometheus.Desc 47 | clusterBrokerInfo *prometheus.Desc 48 | topicPartitions *prometheus.Desc 49 | topicCurrentOffset *prometheus.Desc 50 | topicOldestOffset *prometheus.Desc 51 | topicPartitionLeader *prometheus.Desc 52 | topicPartitionReplicas *prometheus.Desc 53 | topicPartitionInSyncReplicas *prometheus.Desc 54 | topicPartitionUsesPreferredReplica *prometheus.Desc 55 | topicUnderReplicatedPartition *prometheus.Desc 56 | consumergroupCurrentOffset *prometheus.Desc 57 | consumergroupCurrentOffsetSum *prometheus.Desc 58 | consumergroupLag *prometheus.Desc 59 | consumergroupLagSum *prometheus.Desc 60 | consumergroupLagZookeeper *prometheus.Desc 61 | consumergroupMembers *prometheus.Desc 62 | ) 63 | 64 | // Exporter collects Kafka stats from the given server and exports them using 65 | // the prometheus metrics package. 66 | type Exporter struct { 67 | client sarama.Client 68 | topicFilter *regexp.Regexp 69 | topicExclude *regexp.Regexp 70 | groupFilter *regexp.Regexp 71 | groupExclude *regexp.Regexp 72 | mu sync.Mutex 73 | useZooKeeperLag bool 74 | zookeeperClient *kazoo.Kazoo 75 | nextMetadataRefresh time.Time 76 | metadataRefreshInterval time.Duration 77 | offsetShowAll bool 78 | topicWorkers int 79 | allowConcurrent bool 80 | sgMutex sync.Mutex 81 | sgWaitCh chan struct{} 82 | sgChans []chan<- prometheus.Metric 83 | consumerGroupFetchAll bool 84 | } 85 | 86 | type kafkaOpts struct { 87 | uri []string 88 | useSASL bool 89 | useSASLHandshake bool 90 | saslUsername string 91 | saslPassword string 92 | saslMechanism string 93 | saslDisablePAFXFast bool 94 | saslAwsRegion string 95 | useTLS bool 96 | tlsServerName string 97 | tlsCAFile string 98 | tlsCertFile string 99 | tlsKeyFile string 100 | serverUseTLS bool 101 | serverMutualAuthEnabled bool 102 | serverTlsCAFile string 103 | serverTlsCertFile string 104 | serverTlsKeyFile string 105 | tlsInsecureSkipTLSVerify bool 106 | kafkaVersion string 107 | useZooKeeperLag bool 108 | uriZookeeper []string 109 | labels string 110 | metadataRefreshInterval string 111 | serviceName string 112 | kerberosConfigPath string 113 | realm string 114 | keyTabPath string 115 | kerberosAuthType string 116 | offsetShowAll bool 117 | topicWorkers int 118 | allowConcurrent bool 119 | allowAutoTopicCreation bool 120 | verbosityLogLevel int 121 | } 122 | 123 | type MSKAccessTokenProvider struct { 124 | region string 125 | } 126 | 127 | func (m *MSKAccessTokenProvider) Token() (*sarama.AccessToken, error) { 128 | token, _, err := signer.GenerateAuthToken(context.TODO(), m.region) 129 | return &sarama.AccessToken{Token: token}, err 130 | } 131 | 132 | // CanReadCertAndKey returns true if the certificate and key files already exists, 133 | // otherwise returns false. If lost one of cert and key, returns error. 134 | func CanReadCertAndKey(certPath, keyPath string) (bool, error) { 135 | certReadable := canReadFile(certPath) 136 | keyReadable := canReadFile(keyPath) 137 | 138 | if !certReadable && !keyReadable { 139 | return false, nil 140 | } 141 | 142 | if !certReadable { 143 | return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath) 144 | } 145 | 146 | if !keyReadable { 147 | return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath) 148 | } 149 | 150 | return true, nil 151 | } 152 | 153 | // If the file represented by path exists and 154 | // readable, returns true otherwise returns false. 155 | func canReadFile(path string) bool { 156 | f, err := os.Open(path) 157 | if err != nil { 158 | return false 159 | } 160 | 161 | defer f.Close() 162 | 163 | return true 164 | } 165 | 166 | // NewExporter returns an initialized Exporter. 167 | func NewExporter(opts kafkaOpts, topicFilter string, topicExclude string, groupFilter string, groupExclude string) (*Exporter, error) { 168 | var zookeeperClient *kazoo.Kazoo 169 | config := sarama.NewConfig() 170 | config.ClientID = clientID 171 | kafkaVersion, err := sarama.ParseKafkaVersion(opts.kafkaVersion) 172 | if err != nil { 173 | return nil, err 174 | } 175 | config.Version = kafkaVersion 176 | 177 | if opts.useSASL { 178 | // Convert to lowercase so that SHA512 and SHA256 is still valid 179 | opts.saslMechanism = strings.ToLower(opts.saslMechanism) 180 | 181 | saslPassword := opts.saslPassword 182 | if saslPassword == "" { 183 | saslPassword = os.Getenv("SASL_USER_PASSWORD") 184 | } 185 | 186 | switch opts.saslMechanism { 187 | case "scram-sha512": 188 | config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} } 189 | config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) 190 | case "scram-sha256": 191 | config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} } 192 | config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) 193 | case "gssapi": 194 | config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeGSSAPI) 195 | config.Net.SASL.GSSAPI.ServiceName = opts.serviceName 196 | config.Net.SASL.GSSAPI.KerberosConfigPath = opts.kerberosConfigPath 197 | config.Net.SASL.GSSAPI.Realm = opts.realm 198 | config.Net.SASL.GSSAPI.Username = opts.saslUsername 199 | if opts.kerberosAuthType == "keytabAuth" { 200 | config.Net.SASL.GSSAPI.AuthType = sarama.KRB5_KEYTAB_AUTH 201 | config.Net.SASL.GSSAPI.KeyTabPath = opts.keyTabPath 202 | } else { 203 | config.Net.SASL.GSSAPI.AuthType = sarama.KRB5_USER_AUTH 204 | config.Net.SASL.GSSAPI.Password = saslPassword 205 | } 206 | if opts.saslDisablePAFXFast { 207 | config.Net.SASL.GSSAPI.DisablePAFXFAST = true 208 | } 209 | case "awsiam": 210 | config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeOAuth) 211 | config.Net.SASL.TokenProvider = &MSKAccessTokenProvider{region: opts.saslAwsRegion} 212 | case "plain": 213 | default: 214 | return nil, fmt.Errorf( 215 | `invalid sasl mechanism "%s": can only be "scram-sha256", "scram-sha512", "gssapi", "awsiam" or "plain"`, 216 | opts.saslMechanism, 217 | ) 218 | } 219 | 220 | config.Net.SASL.Enable = true 221 | config.Net.SASL.Handshake = opts.useSASLHandshake 222 | 223 | if opts.saslUsername != "" { 224 | config.Net.SASL.User = opts.saslUsername 225 | } 226 | 227 | if saslPassword != "" { 228 | config.Net.SASL.Password = saslPassword 229 | } 230 | } 231 | 232 | if opts.useTLS { 233 | config.Net.TLS.Enable = true 234 | 235 | config.Net.TLS.Config = &tls.Config{ 236 | ServerName: opts.tlsServerName, 237 | InsecureSkipVerify: opts.tlsInsecureSkipTLSVerify, 238 | } 239 | 240 | if opts.tlsCAFile != "" { 241 | if ca, err := os.ReadFile(opts.tlsCAFile); err == nil { 242 | config.Net.TLS.Config.RootCAs = x509.NewCertPool() 243 | config.Net.TLS.Config.RootCAs.AppendCertsFromPEM(ca) 244 | } else { 245 | return nil, err 246 | } 247 | } 248 | 249 | canReadCertAndKey, err := CanReadCertAndKey(opts.tlsCertFile, opts.tlsKeyFile) 250 | if err != nil { 251 | return nil, errors.Wrap(err, "error reading cert and key") 252 | } 253 | if canReadCertAndKey { 254 | cert, err := tls.LoadX509KeyPair(opts.tlsCertFile, opts.tlsKeyFile) 255 | if err == nil { 256 | config.Net.TLS.Config.Certificates = []tls.Certificate{cert} 257 | } else { 258 | return nil, err 259 | } 260 | } 261 | } 262 | 263 | if opts.useZooKeeperLag { 264 | klog.V(DEBUG).Infoln("Using zookeeper lag, so connecting to zookeeper") 265 | zookeeperClient, err = kazoo.NewKazoo(opts.uriZookeeper, nil) 266 | if err != nil { 267 | return nil, errors.Wrap(err, "error connecting to zookeeper") 268 | } 269 | } 270 | 271 | interval, err := time.ParseDuration(opts.metadataRefreshInterval) 272 | if err != nil { 273 | return nil, errors.Wrap(err, "Cannot parse metadata refresh interval") 274 | } 275 | 276 | config.Metadata.RefreshFrequency = interval 277 | 278 | config.Metadata.AllowAutoTopicCreation = opts.allowAutoTopicCreation 279 | 280 | client, err := sarama.NewClient(opts.uri, config) 281 | 282 | if err != nil { 283 | return nil, errors.Wrap(err, "Error Init Kafka Client") 284 | } 285 | 286 | klog.V(TRACE).Infoln("Done Init Clients") 287 | // Init our exporter. 288 | return &Exporter{ 289 | client: client, 290 | topicFilter: regexp.MustCompile(topicFilter), 291 | topicExclude: regexp.MustCompile(topicExclude), 292 | groupFilter: regexp.MustCompile(groupFilter), 293 | groupExclude: regexp.MustCompile(groupExclude), 294 | useZooKeeperLag: opts.useZooKeeperLag, 295 | zookeeperClient: zookeeperClient, 296 | nextMetadataRefresh: time.Now(), 297 | metadataRefreshInterval: interval, 298 | offsetShowAll: opts.offsetShowAll, 299 | topicWorkers: opts.topicWorkers, 300 | allowConcurrent: opts.allowConcurrent, 301 | sgMutex: sync.Mutex{}, 302 | sgWaitCh: nil, 303 | sgChans: []chan<- prometheus.Metric{}, 304 | consumerGroupFetchAll: config.Version.IsAtLeast(sarama.V2_0_0_0), 305 | }, nil 306 | } 307 | 308 | func (e *Exporter) fetchOffsetVersion() int16 { 309 | version := e.client.Config().Version 310 | if e.client.Config().Version.IsAtLeast(sarama.V2_0_0_0) { 311 | return 4 312 | } else if version.IsAtLeast(sarama.V0_10_2_0) { 313 | return 2 314 | } else if version.IsAtLeast(sarama.V0_8_2_2) { 315 | return 1 316 | } 317 | return 0 318 | } 319 | 320 | // Describe describes all the metrics ever exported by the Kafka exporter. It 321 | // implements prometheus.Collector. 322 | func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { 323 | ch <- clusterBrokers 324 | ch <- topicCurrentOffset 325 | ch <- topicOldestOffset 326 | ch <- topicPartitions 327 | ch <- topicPartitionLeader 328 | ch <- topicPartitionReplicas 329 | ch <- topicPartitionInSyncReplicas 330 | ch <- topicPartitionUsesPreferredReplica 331 | ch <- topicUnderReplicatedPartition 332 | ch <- consumergroupCurrentOffset 333 | ch <- consumergroupCurrentOffsetSum 334 | ch <- consumergroupLag 335 | ch <- consumergroupLagZookeeper 336 | ch <- consumergroupLagSum 337 | } 338 | 339 | // Collect fetches the stats from configured Kafka location and delivers them 340 | // as Prometheus metrics. It implements prometheus.Collector. 341 | func (e *Exporter) Collect(ch chan<- prometheus.Metric) { 342 | if e.allowConcurrent { 343 | e.collect(ch) 344 | return 345 | } 346 | // Locking to avoid race add 347 | e.sgMutex.Lock() 348 | e.sgChans = append(e.sgChans, ch) 349 | // Safe to compare length since we own the Lock 350 | if len(e.sgChans) == 1 { 351 | e.sgWaitCh = make(chan struct{}) 352 | go e.collectChans(e.sgWaitCh) 353 | } else { 354 | klog.V(TRACE).Info("concurrent calls detected, waiting for first to finish") 355 | } 356 | // Put in another variable to ensure not overwriting it in another Collect once we wait 357 | waiter := e.sgWaitCh 358 | e.sgMutex.Unlock() 359 | // Released lock, we have insurance that our chan will be part of the collectChan slice 360 | <-waiter 361 | // collectChan finished 362 | } 363 | 364 | func (e *Exporter) collectChans(quit chan struct{}) { 365 | original := make(chan prometheus.Metric) 366 | container := make([]prometheus.Metric, 0, 100) 367 | go func() { 368 | for metric := range original { 369 | container = append(container, metric) 370 | } 371 | }() 372 | e.collect(original) 373 | close(original) 374 | // Lock to avoid modification on the channel slice 375 | e.sgMutex.Lock() 376 | for _, ch := range e.sgChans { 377 | for _, metric := range container { 378 | ch <- metric 379 | } 380 | } 381 | // Reset the slice 382 | e.sgChans = e.sgChans[:0] 383 | // Notify remaining waiting Collect they can return 384 | close(quit) 385 | // Release the lock so Collect can append to the slice again 386 | e.sgMutex.Unlock() 387 | } 388 | 389 | func (e *Exporter) collect(ch chan<- prometheus.Metric) { 390 | var wg = sync.WaitGroup{} 391 | ch <- prometheus.MustNewConstMetric( 392 | clusterBrokers, prometheus.GaugeValue, float64(len(e.client.Brokers())), 393 | ) 394 | for _, b := range e.client.Brokers() { 395 | ch <- prometheus.MustNewConstMetric( 396 | clusterBrokerInfo, prometheus.GaugeValue, 1, strconv.Itoa(int(b.ID())), b.Addr(), 397 | ) 398 | } 399 | 400 | offset := make(map[string]map[int32]int64) 401 | 402 | now := time.Now() 403 | 404 | if now.After(e.nextMetadataRefresh) { 405 | klog.V(DEBUG).Info("Refreshing client metadata") 406 | 407 | if err := e.client.RefreshMetadata(); err != nil { 408 | klog.Errorf("Cannot refresh topics, using cached data: %v", err) 409 | } 410 | 411 | e.nextMetadataRefresh = now.Add(e.metadataRefreshInterval) 412 | } 413 | 414 | topics, err := e.client.Topics() 415 | if err != nil { 416 | klog.Errorf("Cannot get topics: %v", err) 417 | return 418 | } 419 | 420 | topicChannel := make(chan string) 421 | 422 | getTopicMetrics := func(topic string) { 423 | defer wg.Done() 424 | 425 | if !e.topicFilter.MatchString(topic) || e.topicExclude.MatchString(topic) { 426 | return 427 | } 428 | 429 | partitions, err := e.client.Partitions(topic) 430 | if err != nil { 431 | klog.Errorf("Cannot get partitions of topic %s: %v", topic, err) 432 | return 433 | } 434 | ch <- prometheus.MustNewConstMetric( 435 | topicPartitions, prometheus.GaugeValue, float64(len(partitions)), topic, 436 | ) 437 | e.mu.Lock() 438 | offset[topic] = make(map[int32]int64, len(partitions)) 439 | e.mu.Unlock() 440 | for _, partition := range partitions { 441 | broker, err := e.client.Leader(topic, partition) 442 | if err != nil { 443 | klog.Errorf("Cannot get leader of topic %s partition %d: %v", topic, partition, err) 444 | } else { 445 | ch <- prometheus.MustNewConstMetric( 446 | topicPartitionLeader, prometheus.GaugeValue, float64(broker.ID()), topic, strconv.FormatInt(int64(partition), 10), 447 | ) 448 | } 449 | 450 | currentOffset, err := e.client.GetOffset(topic, partition, sarama.OffsetNewest) 451 | if err != nil { 452 | klog.Errorf("Cannot get current offset of topic %s partition %d: %v", topic, partition, err) 453 | } else { 454 | e.mu.Lock() 455 | offset[topic][partition] = currentOffset 456 | e.mu.Unlock() 457 | ch <- prometheus.MustNewConstMetric( 458 | topicCurrentOffset, prometheus.GaugeValue, float64(currentOffset), topic, strconv.FormatInt(int64(partition), 10), 459 | ) 460 | } 461 | 462 | oldestOffset, err := e.client.GetOffset(topic, partition, sarama.OffsetOldest) 463 | if err != nil { 464 | klog.Errorf("Cannot get oldest offset of topic %s partition %d: %v", topic, partition, err) 465 | } else { 466 | ch <- prometheus.MustNewConstMetric( 467 | topicOldestOffset, prometheus.GaugeValue, float64(oldestOffset), topic, strconv.FormatInt(int64(partition), 10), 468 | ) 469 | } 470 | 471 | replicas, err := e.client.Replicas(topic, partition) 472 | if err != nil { 473 | klog.Errorf("Cannot get replicas of topic %s partition %d: %v", topic, partition, err) 474 | } else { 475 | ch <- prometheus.MustNewConstMetric( 476 | topicPartitionReplicas, prometheus.GaugeValue, float64(len(replicas)), topic, strconv.FormatInt(int64(partition), 10), 477 | ) 478 | } 479 | 480 | inSyncReplicas, err := e.client.InSyncReplicas(topic, partition) 481 | if err != nil { 482 | klog.Errorf("Cannot get in-sync replicas of topic %s partition %d: %v", topic, partition, err) 483 | } else { 484 | ch <- prometheus.MustNewConstMetric( 485 | topicPartitionInSyncReplicas, prometheus.GaugeValue, float64(len(inSyncReplicas)), topic, strconv.FormatInt(int64(partition), 10), 486 | ) 487 | } 488 | 489 | if broker != nil && replicas != nil && len(replicas) > 0 && broker.ID() == replicas[0] { 490 | ch <- prometheus.MustNewConstMetric( 491 | topicPartitionUsesPreferredReplica, prometheus.GaugeValue, float64(1), topic, strconv.FormatInt(int64(partition), 10), 492 | ) 493 | } else { 494 | ch <- prometheus.MustNewConstMetric( 495 | topicPartitionUsesPreferredReplica, prometheus.GaugeValue, float64(0), topic, strconv.FormatInt(int64(partition), 10), 496 | ) 497 | } 498 | 499 | if replicas != nil && inSyncReplicas != nil && len(inSyncReplicas) < len(replicas) { 500 | ch <- prometheus.MustNewConstMetric( 501 | topicUnderReplicatedPartition, prometheus.GaugeValue, float64(1), topic, strconv.FormatInt(int64(partition), 10), 502 | ) 503 | } else { 504 | ch <- prometheus.MustNewConstMetric( 505 | topicUnderReplicatedPartition, prometheus.GaugeValue, float64(0), topic, strconv.FormatInt(int64(partition), 10), 506 | ) 507 | } 508 | 509 | if e.useZooKeeperLag { 510 | ConsumerGroups, err := e.zookeeperClient.Consumergroups() 511 | 512 | if err != nil { 513 | klog.Errorf("Cannot get consumer group %v", err) 514 | } 515 | 516 | for _, group := range ConsumerGroups { 517 | offset, _ := group.FetchOffset(topic, partition) 518 | if offset > 0 { 519 | 520 | consumerGroupLag := currentOffset - offset 521 | ch <- prometheus.MustNewConstMetric( 522 | consumergroupLagZookeeper, prometheus.GaugeValue, float64(consumerGroupLag), group.Name, topic, strconv.FormatInt(int64(partition), 10), 523 | ) 524 | } 525 | } 526 | } 527 | } 528 | } 529 | 530 | loopTopics := func() { 531 | ok := true 532 | for ok { 533 | topic, open := <-topicChannel 534 | ok = open 535 | if open { 536 | getTopicMetrics(topic) 537 | } 538 | } 539 | } 540 | 541 | minx := func(x int, y int) int { 542 | if x < y { 543 | return x 544 | } else { 545 | return y 546 | } 547 | } 548 | 549 | N := len(topics) 550 | if N > 1 { 551 | N = minx(N/2, e.topicWorkers) 552 | } 553 | 554 | for w := 1; w <= N; w++ { 555 | go loopTopics() 556 | } 557 | 558 | for _, topic := range topics { 559 | if e.topicFilter.MatchString(topic) && !e.topicExclude.MatchString(topic) { 560 | wg.Add(1) 561 | topicChannel <- topic 562 | } 563 | } 564 | close(topicChannel) 565 | 566 | wg.Wait() 567 | 568 | getConsumerGroupMetrics := func(broker *sarama.Broker) { 569 | defer wg.Done() 570 | if err := broker.Open(e.client.Config()); err != nil && err != sarama.ErrAlreadyConnected { 571 | klog.Errorf("Cannot connect to broker %d: %v", broker.ID(), err) 572 | return 573 | } 574 | defer broker.Close() 575 | 576 | groups, err := broker.ListGroups(&sarama.ListGroupsRequest{}) 577 | if err != nil { 578 | klog.Errorf("Cannot get consumer group: %v", err) 579 | return 580 | } 581 | groupIds := make([]string, 0) 582 | for groupId := range groups.Groups { 583 | if e.groupFilter.MatchString(groupId) && !e.groupExclude.MatchString(groupId) { 584 | groupIds = append(groupIds, groupId) 585 | } 586 | } 587 | 588 | describeGroups, err := broker.DescribeGroups(&sarama.DescribeGroupsRequest{Groups: groupIds}) 589 | if err != nil { 590 | klog.Errorf("Cannot get describe groups: %v", err) 591 | return 592 | } 593 | for _, group := range describeGroups.Groups { 594 | if group.Err != 0 { 595 | klog.Errorf("Cannot describe for the group %s with error code %d", group.GroupId, group.Err) 596 | continue 597 | } 598 | offsetFetchRequest := sarama.OffsetFetchRequest{ConsumerGroup: group.GroupId, Version: e.fetchOffsetVersion()} 599 | if e.offsetShowAll { 600 | for topic, partitions := range offset { 601 | for partition := range partitions { 602 | offsetFetchRequest.AddPartition(topic, partition) 603 | } 604 | } 605 | } else { 606 | for _, member := range group.Members { 607 | if len(member.MemberAssignment) == 0 { 608 | klog.Warningf("MemberAssignment is empty for group member: %v in group: %v", member.MemberId, group.GroupId) 609 | continue 610 | } 611 | assignment, err := member.GetMemberAssignment() 612 | if err != nil { 613 | klog.Errorf("Cannot get GetMemberAssignment of group member %v : %v", member, err) 614 | continue 615 | } 616 | for topic, partions := range assignment.Topics { 617 | for _, partition := range partions { 618 | offsetFetchRequest.AddPartition(topic, partition) 619 | } 620 | } 621 | } 622 | } 623 | ch <- prometheus.MustNewConstMetric( 624 | consumergroupMembers, prometheus.GaugeValue, float64(len(group.Members)), group.GroupId, 625 | ) 626 | offsetFetchResponse, err := broker.FetchOffset(&offsetFetchRequest) 627 | if err != nil { 628 | klog.Errorf("Cannot get offset of group %s: %v", group.GroupId, err) 629 | continue 630 | } 631 | 632 | for topic, partitions := range offsetFetchResponse.Blocks { 633 | // If the topic is not consumed by that consumer group, skip it 634 | topicConsumed := false 635 | for _, offsetFetchResponseBlock := range partitions { 636 | // Kafka will return -1 if there is no offset associated with a topic-partition under that consumer group 637 | if offsetFetchResponseBlock.Offset != -1 { 638 | topicConsumed = true 639 | break 640 | } 641 | } 642 | if !topicConsumed { 643 | continue 644 | } 645 | 646 | var currentOffsetSum int64 647 | var lagSum int64 648 | for partition, offsetFetchResponseBlock := range partitions { 649 | err := offsetFetchResponseBlock.Err 650 | if err != sarama.ErrNoError { 651 | klog.Errorf("Error for partition %d :%v", partition, err.Error()) 652 | continue 653 | } 654 | currentOffset := offsetFetchResponseBlock.Offset 655 | currentOffsetSum += currentOffset 656 | ch <- prometheus.MustNewConstMetric( 657 | consumergroupCurrentOffset, prometheus.GaugeValue, float64(currentOffset), group.GroupId, topic, strconv.FormatInt(int64(partition), 10), 658 | ) 659 | e.mu.Lock() 660 | currentPartitionOffset, currentPartitionOffsetError := e.client.GetOffset(topic, partition, sarama.OffsetNewest) 661 | if currentPartitionOffsetError != nil { 662 | klog.Errorf("Cannot get current offset of topic %s partition %d: %v", topic, partition, currentPartitionOffsetError) 663 | } else { 664 | var lag int64 665 | if offsetFetchResponseBlock.Offset == -1 { 666 | lag = -1 667 | } else { 668 | if offset, ok := offset[topic][partition]; ok { 669 | if currentPartitionOffset == -1 { 670 | currentPartitionOffset = offset 671 | } 672 | } 673 | lag = currentPartitionOffset - offsetFetchResponseBlock.Offset 674 | lagSum += lag 675 | } 676 | 677 | ch <- prometheus.MustNewConstMetric( 678 | consumergroupLag, prometheus.GaugeValue, float64(lag), group.GroupId, topic, strconv.FormatInt(int64(partition), 10), 679 | ) 680 | } 681 | e.mu.Unlock() 682 | } 683 | ch <- prometheus.MustNewConstMetric( 684 | consumergroupCurrentOffsetSum, prometheus.GaugeValue, float64(currentOffsetSum), group.GroupId, topic, 685 | ) 686 | ch <- prometheus.MustNewConstMetric( 687 | consumergroupLagSum, prometheus.GaugeValue, float64(lagSum), group.GroupId, topic, 688 | ) 689 | } 690 | } 691 | } 692 | 693 | klog.V(DEBUG).Info("Fetching consumer group metrics") 694 | if len(e.client.Brokers()) > 0 { 695 | uniqueBrokerAddresses := make(map[string]bool) 696 | var servers []string 697 | for _, broker := range e.client.Brokers() { 698 | normalizedAddress := strings.ToLower(broker.Addr()) 699 | if !uniqueBrokerAddresses[normalizedAddress] { 700 | uniqueBrokerAddresses[normalizedAddress] = true 701 | servers = append(servers, broker.Addr()) 702 | } 703 | } 704 | klog.Info(servers) 705 | for _, broker := range e.client.Brokers() { 706 | for _, server := range servers { 707 | if server == broker.Addr() { 708 | wg.Add(1) 709 | go getConsumerGroupMetrics(broker) 710 | } 711 | } 712 | } 713 | wg.Wait() 714 | } else { 715 | klog.Errorln("No valid broker, cannot get consumer group metrics") 716 | } 717 | } 718 | 719 | func init() { 720 | metrics.UseNilMetrics = true 721 | prometheus.MustRegister(versionCollector.NewCollector("kafka_exporter")) 722 | } 723 | 724 | //func toFlag(name string, help string) *kingpin.FlagClause { 725 | // flag.CommandLine.String(name, "", help) // hack around flag.Parse and klog.init flags 726 | // return kingpin.Flag(name, help) 727 | //} 728 | 729 | // hack around flag.Parse and klog.init flags 730 | func toFlagString(name string, help string, value string) *string { 731 | flag.CommandLine.String(name, value, help) // hack around flag.Parse and klog.init flags 732 | return kingpin.Flag(name, help).Default(value).String() 733 | } 734 | 735 | func toFlagBool(name string, help string, value bool, valueString string) *bool { 736 | flag.CommandLine.Bool(name, value, help) // hack around flag.Parse and klog.init flags 737 | return kingpin.Flag(name, help).Default(valueString).Bool() 738 | } 739 | 740 | func toFlagStringsVar(name string, help string, value string, target *[]string) { 741 | flag.CommandLine.String(name, value, help) // hack around flag.Parse and klog.init flags 742 | kingpin.Flag(name, help).Default(value).StringsVar(target) 743 | } 744 | 745 | func toFlagStringVar(name string, help string, value string, target *string) { 746 | flag.CommandLine.String(name, value, help) // hack around flag.Parse and klog.init flags 747 | kingpin.Flag(name, help).Default(value).StringVar(target) 748 | } 749 | 750 | func toFlagBoolVar(name string, help string, value bool, valueString string, target *bool) { 751 | flag.CommandLine.Bool(name, value, help) // hack around flag.Parse and klog.init flags 752 | kingpin.Flag(name, help).Default(valueString).BoolVar(target) 753 | } 754 | 755 | func toFlagIntVar(name string, help string, value int, valueString string, target *int) { 756 | flag.CommandLine.Int(name, value, help) // hack around flag.Parse and klog.init flags 757 | kingpin.Flag(name, help).Default(valueString).IntVar(target) 758 | } 759 | 760 | func main() { 761 | var ( 762 | listenAddress = toFlagString("web.listen-address", "Address to listen on for web interface and telemetry.", ":9308") 763 | metricsPath = toFlagString("web.telemetry-path", "Path under which to expose metrics.", "/metrics") 764 | topicFilter = toFlagString("topic.filter", "Regex that determines which topics to collect.", ".*") 765 | topicExclude = toFlagString("topic.exclude", "Regex that determines which topics to exclude.", "^$") 766 | groupFilter = toFlagString("group.filter", "Regex that determines which consumer groups to collect.", ".*") 767 | groupExclude = toFlagString("group.exclude", "Regex that determines which consumer groups to exclude.", "^$") 768 | logSarama = toFlagBool("log.enable-sarama", "Turn on Sarama logging, default is false.", false, "false") 769 | 770 | opts = kafkaOpts{} 771 | ) 772 | 773 | toFlagStringsVar("kafka.server", "Address (host:port) of Kafka server.", "kafka:9092", &opts.uri) 774 | toFlagBoolVar("sasl.enabled", "Connect using SASL/PLAIN, default is false.", false, "false", &opts.useSASL) 775 | toFlagBoolVar("sasl.handshake", "Only set this to false if using a non-Kafka SASL proxy, default is true.", true, "true", &opts.useSASLHandshake) 776 | toFlagStringVar("sasl.username", "SASL user name.", "", &opts.saslUsername) 777 | toFlagStringVar("sasl.password", "SASL user password.", "", &opts.saslPassword) 778 | toFlagStringVar("sasl.aws-region", "The AWS region for IAM SASL authentication", os.Getenv("AWS_REGION"), &opts.saslAwsRegion) 779 | toFlagStringVar("sasl.mechanism", "SASL SCRAM SHA algorithm: sha256 or sha512 or SASL mechanism: gssapi or awsiam", "", &opts.saslMechanism) 780 | toFlagStringVar("sasl.service-name", "Service name when using kerberos Auth", "", &opts.serviceName) 781 | toFlagStringVar("sasl.kerberos-config-path", "Kerberos config path", "", &opts.kerberosConfigPath) 782 | toFlagStringVar("sasl.realm", "Kerberos realm", "", &opts.realm) 783 | toFlagStringVar("sasl.kerberos-auth-type", "Kerberos auth type. Either 'keytabAuth' or 'userAuth'", "", &opts.kerberosAuthType) 784 | toFlagStringVar("sasl.keytab-path", "Kerberos keytab file path", "", &opts.keyTabPath) 785 | toFlagBoolVar("sasl.disable-PA-FX-FAST", "Configure the Kerberos client to not use PA_FX_FAST, default is false.", false, "false", &opts.saslDisablePAFXFast) 786 | toFlagBoolVar("tls.enabled", "Connect to Kafka using TLS, default is false.", false, "false", &opts.useTLS) 787 | toFlagStringVar("tls.server-name", "Used to verify the hostname on the returned certificates unless tls.insecure-skip-tls-verify is given. The kafka server's name should be given.", "", &opts.tlsServerName) 788 | toFlagStringVar("tls.ca-file", "The optional certificate authority file for Kafka TLS client authentication.", "", &opts.tlsCAFile) 789 | toFlagStringVar("tls.cert-file", "The optional certificate file for Kafka client authentication.", "", &opts.tlsCertFile) 790 | toFlagStringVar("tls.key-file", "The optional key file for Kafka client authentication.", "", &opts.tlsKeyFile) 791 | toFlagBoolVar("server.tls.enabled", "Enable TLS for web server, default is false.", false, "false", &opts.serverUseTLS) 792 | toFlagBoolVar("server.tls.mutual-auth-enabled", "Enable TLS client mutual authentication, default is false.", false, "false", &opts.serverMutualAuthEnabled) 793 | toFlagStringVar("server.tls.ca-file", "The certificate authority file for the web server.", "", &opts.serverTlsCAFile) 794 | toFlagStringVar("server.tls.cert-file", "The certificate file for the web server.", "", &opts.serverTlsCertFile) 795 | toFlagStringVar("server.tls.key-file", "The key file for the web server.", "", &opts.serverTlsKeyFile) 796 | toFlagBoolVar("tls.insecure-skip-tls-verify", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Default is false", false, "false", &opts.tlsInsecureSkipTLSVerify) 797 | toFlagStringVar("kafka.version", "Kafka broker version", sarama.V2_0_0_0.String(), &opts.kafkaVersion) 798 | toFlagBoolVar("use.consumelag.zookeeper", "if you need to use a group from zookeeper, default is false", false, "false", &opts.useZooKeeperLag) 799 | toFlagStringsVar("zookeeper.server", "Address (hosts) of zookeeper server.", "localhost:2181", &opts.uriZookeeper) 800 | toFlagStringVar("kafka.labels", "Kafka cluster name", "", &opts.labels) 801 | toFlagStringVar("refresh.metadata", "Metadata refresh interval", "30s", &opts.metadataRefreshInterval) 802 | toFlagBoolVar("offset.show-all", "Whether show the offset/lag for all consumer group, otherwise, only show connected consumer groups, default is true", true, "true", &opts.offsetShowAll) 803 | toFlagBoolVar("concurrent.enable", "If true, all scrapes will trigger kafka operations otherwise, they will share results. WARN: This should be disabled on large clusters. Default is false", false, "false", &opts.allowConcurrent) 804 | toFlagIntVar("topic.workers", "Number of topic workers", 100, "100", &opts.topicWorkers) 805 | toFlagBoolVar("kafka.allow-auto-topic-creation", "If true, the broker may auto-create topics that we requested which do not already exist, default is false.", false, "false", &opts.allowAutoTopicCreation) 806 | toFlagIntVar("verbosity", "Verbosity log level", 0, "0", &opts.verbosityLogLevel) 807 | 808 | plConfig := plog.Config{} 809 | plogflag.AddFlags(kingpin.CommandLine, &plConfig) 810 | kingpin.Version(version.Print("kafka_exporter")) 811 | kingpin.HelpFlag.Short('h') 812 | kingpin.Parse() 813 | 814 | labels := make(map[string]string) 815 | 816 | // Protect against empty labels 817 | if opts.labels != "" { 818 | for _, label := range strings.Split(opts.labels, ",") { 819 | splitted := strings.Split(label, "=") 820 | if len(splitted) >= 2 { 821 | labels[splitted[0]] = splitted[1] 822 | } 823 | } 824 | } 825 | 826 | setup(*listenAddress, *metricsPath, *topicFilter, *topicExclude, *groupFilter, *groupExclude, *logSarama, opts, labels) 827 | } 828 | 829 | func setup( 830 | listenAddress string, 831 | metricsPath string, 832 | topicFilter string, 833 | topicExclude string, 834 | groupFilter string, 835 | groupExclude string, 836 | logSarama bool, 837 | opts kafkaOpts, 838 | labels map[string]string, 839 | ) { 840 | klog.InitFlags(flag.CommandLine) 841 | if err := flag.Set("logtostderr", "true"); err != nil { 842 | klog.Errorf("Error on setting logtostderr to true: %v", err) 843 | } 844 | err := flag.Set("v", strconv.Itoa(opts.verbosityLogLevel)) 845 | if err != nil { 846 | klog.Errorf("Error on setting v to %v: %v", strconv.Itoa(opts.verbosityLogLevel), err) 847 | } 848 | defer klog.Flush() 849 | 850 | klog.V(INFO).Infoln("Starting kafka_exporter", version.Info()) 851 | klog.V(DEBUG).Infoln("Build context", version.BuildContext()) 852 | 853 | clusterBrokers = prometheus.NewDesc( 854 | prometheus.BuildFQName(namespace, "", "brokers"), 855 | "Number of Brokers in the Kafka Cluster.", 856 | nil, labels, 857 | ) 858 | clusterBrokerInfo = prometheus.NewDesc( 859 | prometheus.BuildFQName(namespace, "", "broker_info"), 860 | "Information about the Kafka Broker.", 861 | []string{"id", "address"}, labels, 862 | ) 863 | topicPartitions = prometheus.NewDesc( 864 | prometheus.BuildFQName(namespace, "topic", "partitions"), 865 | "Number of partitions for this Topic", 866 | []string{"topic"}, labels, 867 | ) 868 | topicCurrentOffset = prometheus.NewDesc( 869 | prometheus.BuildFQName(namespace, "topic", "partition_current_offset"), 870 | "Current Offset of a Broker at Topic/Partition", 871 | []string{"topic", "partition"}, labels, 872 | ) 873 | topicOldestOffset = prometheus.NewDesc( 874 | prometheus.BuildFQName(namespace, "topic", "partition_oldest_offset"), 875 | "Oldest Offset of a Broker at Topic/Partition", 876 | []string{"topic", "partition"}, labels, 877 | ) 878 | 879 | topicPartitionLeader = prometheus.NewDesc( 880 | prometheus.BuildFQName(namespace, "topic", "partition_leader"), 881 | "Leader Broker ID of this Topic/Partition", 882 | []string{"topic", "partition"}, labels, 883 | ) 884 | 885 | topicPartitionReplicas = prometheus.NewDesc( 886 | prometheus.BuildFQName(namespace, "topic", "partition_replicas"), 887 | "Number of Replicas for this Topic/Partition", 888 | []string{"topic", "partition"}, labels, 889 | ) 890 | 891 | topicPartitionInSyncReplicas = prometheus.NewDesc( 892 | prometheus.BuildFQName(namespace, "topic", "partition_in_sync_replica"), 893 | "Number of In-Sync Replicas for this Topic/Partition", 894 | []string{"topic", "partition"}, labels, 895 | ) 896 | 897 | topicPartitionUsesPreferredReplica = prometheus.NewDesc( 898 | prometheus.BuildFQName(namespace, "topic", "partition_leader_is_preferred"), 899 | "1 if Topic/Partition is using the Preferred Broker", 900 | []string{"topic", "partition"}, labels, 901 | ) 902 | 903 | topicUnderReplicatedPartition = prometheus.NewDesc( 904 | prometheus.BuildFQName(namespace, "topic", "partition_under_replicated_partition"), 905 | "1 if Topic/Partition is under Replicated", 906 | []string{"topic", "partition"}, labels, 907 | ) 908 | 909 | consumergroupCurrentOffset = prometheus.NewDesc( 910 | prometheus.BuildFQName(namespace, "consumergroup", "current_offset"), 911 | "Current Offset of a ConsumerGroup at Topic/Partition", 912 | []string{"consumergroup", "topic", "partition"}, labels, 913 | ) 914 | 915 | consumergroupCurrentOffsetSum = prometheus.NewDesc( 916 | prometheus.BuildFQName(namespace, "consumergroup", "current_offset_sum"), 917 | "Current Offset of a ConsumerGroup at Topic for all partitions", 918 | []string{"consumergroup", "topic"}, labels, 919 | ) 920 | 921 | consumergroupLag = prometheus.NewDesc( 922 | prometheus.BuildFQName(namespace, "consumergroup", "lag"), 923 | "Current Approximate Lag of a ConsumerGroup at Topic/Partition", 924 | []string{"consumergroup", "topic", "partition"}, labels, 925 | ) 926 | 927 | consumergroupLagZookeeper = prometheus.NewDesc( 928 | prometheus.BuildFQName(namespace, "consumergroupzookeeper", "lag_zookeeper"), 929 | "Current Approximate Lag(zookeeper) of a ConsumerGroup at Topic/Partition", 930 | []string{"consumergroup", "topic", "partition"}, nil, 931 | ) 932 | 933 | consumergroupLagSum = prometheus.NewDesc( 934 | prometheus.BuildFQName(namespace, "consumergroup", "lag_sum"), 935 | "Current Approximate Lag of a ConsumerGroup at Topic for all partitions", 936 | []string{"consumergroup", "topic"}, labels, 937 | ) 938 | 939 | consumergroupMembers = prometheus.NewDesc( 940 | prometheus.BuildFQName(namespace, "consumergroup", "members"), 941 | "Amount of members in a consumer group", 942 | []string{"consumergroup"}, labels, 943 | ) 944 | 945 | if logSarama { 946 | sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) 947 | } 948 | 949 | exporter, err := NewExporter(opts, topicFilter, topicExclude, groupFilter, groupExclude) 950 | if err != nil { 951 | klog.Fatalln(err) 952 | } 953 | defer exporter.client.Close() 954 | prometheus.MustRegister(exporter) 955 | 956 | http.Handle(metricsPath, promhttp.Handler()) 957 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 958 | _, err := w.Write([]byte(` 959 | Kafka Exporter 960 | 961 |

Kafka Exporter

962 |

Metrics

963 | 964 | `)) 965 | if err != nil { 966 | klog.Error("Error handle / request", err) 967 | } 968 | }) 969 | http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { 970 | // need more specific sarama check 971 | _, err := w.Write([]byte("ok")) 972 | if err != nil { 973 | klog.Error("Error handle /healthz request", err) 974 | } 975 | }) 976 | 977 | if opts.serverUseTLS { 978 | klog.V(INFO).Infoln("Listening on HTTPS", listenAddress) 979 | 980 | _, err := CanReadCertAndKey(opts.serverTlsCertFile, opts.serverTlsKeyFile) 981 | if err != nil { 982 | klog.Error("error reading server cert and key") 983 | } 984 | 985 | clientAuthType := tls.NoClientCert 986 | if opts.serverMutualAuthEnabled { 987 | clientAuthType = tls.RequireAndVerifyClientCert 988 | } 989 | 990 | certPool := x509.NewCertPool() 991 | if opts.serverTlsCAFile != "" { 992 | if caCert, err := os.ReadFile(opts.serverTlsCAFile); err == nil { 993 | certPool.AppendCertsFromPEM(caCert) 994 | } else { 995 | klog.Error("error reading server ca") 996 | } 997 | } 998 | 999 | tlsConfig := &tls.Config{ 1000 | ClientCAs: certPool, 1001 | ClientAuth: clientAuthType, 1002 | MinVersion: tls.VersionTLS12, 1003 | CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, 1004 | PreferServerCipherSuites: true, 1005 | CipherSuites: []uint16{ 1006 | tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 1007 | tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 1008 | tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 1009 | tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, 1010 | tls.TLS_RSA_WITH_AES_256_GCM_SHA384, 1011 | tls.TLS_RSA_WITH_AES_256_CBC_SHA, 1012 | tls.TLS_RSA_WITH_AES_128_CBC_SHA256, 1013 | }, 1014 | } 1015 | server := &http.Server{ 1016 | Addr: listenAddress, 1017 | TLSConfig: tlsConfig, 1018 | } 1019 | klog.Fatal(server.ListenAndServeTLS(opts.serverTlsCertFile, opts.serverTlsKeyFile)) 1020 | } else { 1021 | klog.V(INFO).Infoln("Listening on HTTP", listenAddress) 1022 | klog.Fatal(http.ListenAndServe(listenAddress, nil)) 1023 | } 1024 | } 1025 | -------------------------------------------------------------------------------- /kafka_exporter_overview.json: -------------------------------------------------------------------------------- 1 | { 2 | "__inputs": [ 3 | { 4 | "name": "DS_PROMETHEUS_WH211", 5 | "label": "Prometheus_Wh211", 6 | "description": "", 7 | "type": "datasource", 8 | "pluginId": "prometheus", 9 | "pluginName": "Prometheus" 10 | } 11 | ], 12 | "__requires": [ 13 | { 14 | "type": "grafana", 15 | "id": "grafana", 16 | "name": "Grafana", 17 | "version": "5.1.1" 18 | }, 19 | { 20 | "type": "panel", 21 | "id": "graph", 22 | "name": "Graph", 23 | "version": "5.0.0" 24 | }, 25 | { 26 | "type": "datasource", 27 | "id": "prometheus", 28 | "name": "Prometheus", 29 | "version": "5.0.0" 30 | } 31 | ], 32 | "annotations": { 33 | "list": [ 34 | { 35 | "builtIn": 1, 36 | "datasource": "-- Grafana --", 37 | "enable": true, 38 | "hide": true, 39 | "iconColor": "rgba(0, 211, 255, 1)", 40 | "name": "Annotations & Alerts", 41 | "type": "dashboard" 42 | } 43 | ] 44 | }, 45 | "description": "Kafka resource usage and throughput", 46 | "editable": true, 47 | "gnetId": 721, 48 | "graphTooltip": 0, 49 | "id": null, 50 | "iteration": 1534756791145, 51 | "links": [], 52 | "panels": [ 53 | { 54 | "aliasColors": {}, 55 | "bars": false, 56 | "dashLength": 10, 57 | "dashes": false, 58 | "datasource": "${DS_PROMETHEUS_WH211}", 59 | "fill": 0, 60 | "gridPos": { 61 | "h": 10, 62 | "w": 10, 63 | "x": 0, 64 | "y": 0 65 | }, 66 | "id": 14, 67 | "legend": { 68 | "alignAsTable": true, 69 | "avg": false, 70 | "current": true, 71 | "max": true, 72 | "min": false, 73 | "rightSide": false, 74 | "show": true, 75 | "sideWidth": 480, 76 | "sort": "max", 77 | "sortDesc": true, 78 | "total": false, 79 | "values": true 80 | }, 81 | "lines": true, 82 | "linewidth": 1, 83 | "links": [], 84 | "nullPointMode": "connected", 85 | "percentage": false, 86 | "pointradius": 5, 87 | "points": false, 88 | "renderer": "flot", 89 | "seriesOverrides": [], 90 | "spaceLength": 10, 91 | "stack": false, 92 | "steppedLine": false, 93 | "targets": [ 94 | { 95 | "expr": "sum(rate(kafka_topic_partition_current_offset{instance=\"$instance\", topic=~\"$topic\"}[1m])) by (topic)", 96 | "format": "time_series", 97 | "intervalFactor": 1, 98 | "legendFormat": "{{topic}}", 99 | "refId": "B" 100 | } 101 | ], 102 | "thresholds": [], 103 | "timeFrom": null, 104 | "timeShift": null, 105 | "title": "Message in per second", 106 | "tooltip": { 107 | "shared": true, 108 | "sort": 0, 109 | "value_type": "individual" 110 | }, 111 | "type": "graph", 112 | "xaxis": { 113 | "buckets": null, 114 | "mode": "time", 115 | "name": null, 116 | "show": true, 117 | "values": [] 118 | }, 119 | "yaxes": [ 120 | { 121 | "format": "short", 122 | "label": null, 123 | "logBase": 1, 124 | "max": null, 125 | "min": "0", 126 | "show": true 127 | }, 128 | { 129 | "format": "short", 130 | "label": null, 131 | "logBase": 1, 132 | "max": null, 133 | "min": null, 134 | "show": true 135 | } 136 | ], 137 | "yaxis": { 138 | "align": false, 139 | "alignLevel": null 140 | } 141 | }, 142 | { 143 | "aliasColors": {}, 144 | "bars": false, 145 | "dashLength": 10, 146 | "dashes": false, 147 | "datasource": "${DS_PROMETHEUS_WH211}", 148 | "fill": 0, 149 | "gridPos": { 150 | "h": 10, 151 | "w": 10, 152 | "x": 10, 153 | "y": 0 154 | }, 155 | "id": 12, 156 | "legend": { 157 | "alignAsTable": true, 158 | "avg": false, 159 | "current": true, 160 | "max": true, 161 | "min": false, 162 | "rightSide": false, 163 | "show": true, 164 | "sideWidth": 480, 165 | "sortDesc": true, 166 | "total": false, 167 | "values": true 168 | }, 169 | "lines": true, 170 | "linewidth": 1, 171 | "links": [], 172 | "nullPointMode": "connected", 173 | "percentage": false, 174 | "pointradius": 5, 175 | "points": false, 176 | "renderer": "flot", 177 | "seriesOverrides": [], 178 | "spaceLength": 10, 179 | "stack": false, 180 | "steppedLine": false, 181 | "targets": [ 182 | { 183 | "expr": "sum(kafka_consumergroup_lag{instance=\"$instance\",topic=~\"$topic\"}) by (consumergroup, topic) ", 184 | "format": "time_series", 185 | "instant": false, 186 | "interval": "", 187 | "intervalFactor": 1, 188 | "legendFormat": "{{consumergroup}} (topic: {{topic}})", 189 | "refId": "A" 190 | } 191 | ], 192 | "thresholds": [], 193 | "timeFrom": null, 194 | "timeShift": null, 195 | "title": "Lag by Consumer Group", 196 | "tooltip": { 197 | "shared": true, 198 | "sort": 2, 199 | "value_type": "individual" 200 | }, 201 | "type": "graph", 202 | "xaxis": { 203 | "buckets": null, 204 | "mode": "time", 205 | "name": null, 206 | "show": true, 207 | "values": [] 208 | }, 209 | "yaxes": [ 210 | { 211 | "format": "short", 212 | "label": "", 213 | "logBase": 1, 214 | "max": null, 215 | "min": "0", 216 | "show": true 217 | }, 218 | { 219 | "format": "short", 220 | "label": null, 221 | "logBase": 1, 222 | "max": null, 223 | "min": null, 224 | "show": true 225 | } 226 | ], 227 | "yaxis": { 228 | "align": false, 229 | "alignLevel": null 230 | } 231 | }, 232 | { 233 | "aliasColors": {}, 234 | "bars": false, 235 | "dashLength": 10, 236 | "dashes": false, 237 | "datasource": "${DS_PROMETHEUS_WH211}", 238 | "fill": 0, 239 | "gridPos": { 240 | "h": 10, 241 | "w": 10, 242 | "x": 0, 243 | "y": 10 244 | }, 245 | "id": 16, 246 | "legend": { 247 | "alignAsTable": true, 248 | "avg": false, 249 | "current": true, 250 | "max": true, 251 | "min": false, 252 | "rightSide": false, 253 | "show": true, 254 | "sideWidth": 480, 255 | "total": false, 256 | "values": true 257 | }, 258 | "lines": true, 259 | "linewidth": 1, 260 | "links": [], 261 | "nullPointMode": "connected", 262 | "percentage": false, 263 | "pointradius": 5, 264 | "points": false, 265 | "renderer": "flot", 266 | "seriesOverrides": [], 267 | "spaceLength": 10, 268 | "stack": false, 269 | "steppedLine": false, 270 | "targets": [ 271 | { 272 | "expr": "sum(delta(kafka_topic_partition_current_offset{instance=~'$instance', topic=~\"$topic\"}[5m])/5) by (topic)", 273 | "format": "time_series", 274 | "intervalFactor": 1, 275 | "legendFormat": "{{topic}}", 276 | "refId": "A" 277 | } 278 | ], 279 | "thresholds": [], 280 | "timeFrom": null, 281 | "timeShift": null, 282 | "title": "Message in per minute", 283 | "tooltip": { 284 | "shared": true, 285 | "sort": 0, 286 | "value_type": "individual" 287 | }, 288 | "transparent": false, 289 | "type": "graph", 290 | "xaxis": { 291 | "buckets": null, 292 | "mode": "time", 293 | "name": null, 294 | "show": true, 295 | "values": [] 296 | }, 297 | "yaxes": [ 298 | { 299 | "format": "short", 300 | "label": null, 301 | "logBase": 1, 302 | "max": null, 303 | "min": null, 304 | "show": true 305 | }, 306 | { 307 | "format": "short", 308 | "label": null, 309 | "logBase": 1, 310 | "max": null, 311 | "min": null, 312 | "show": true 313 | } 314 | ], 315 | "yaxis": { 316 | "align": false, 317 | "alignLevel": null 318 | } 319 | }, 320 | { 321 | "aliasColors": {}, 322 | "bars": false, 323 | "dashLength": 10, 324 | "dashes": false, 325 | "datasource": "${DS_PROMETHEUS_WH211}", 326 | "fill": 0, 327 | "gridPos": { 328 | "h": 10, 329 | "w": 10, 330 | "x": 10, 331 | "y": 10 332 | }, 333 | "id": 18, 334 | "legend": { 335 | "alignAsTable": true, 336 | "avg": false, 337 | "current": true, 338 | "max": true, 339 | "min": false, 340 | "rightSide": false, 341 | "show": true, 342 | "sideWidth": 480, 343 | "sort": "current", 344 | "sortDesc": true, 345 | "total": false, 346 | "values": true 347 | }, 348 | "lines": true, 349 | "linewidth": 1, 350 | "links": [], 351 | "nullPointMode": "connected", 352 | "percentage": false, 353 | "pointradius": 5, 354 | "points": false, 355 | "renderer": "flot", 356 | "seriesOverrides": [], 357 | "spaceLength": 10, 358 | "stack": false, 359 | "steppedLine": false, 360 | "targets": [ 361 | { 362 | "expr": "sum(delta(kafka_consumergroup_current_offset{instance=~'$instance',topic=~\"$topic\"}[5m])/5) by (consumergroup, topic)", 363 | "format": "time_series", 364 | "intervalFactor": 1, 365 | "legendFormat": "{{consumergroup}} (topic: {{topic}})", 366 | "refId": "A" 367 | } 368 | ], 369 | "thresholds": [], 370 | "timeFrom": null, 371 | "timeShift": null, 372 | "title": "Message consume per minute", 373 | "tooltip": { 374 | "shared": true, 375 | "sort": 0, 376 | "value_type": "individual" 377 | }, 378 | "type": "graph", 379 | "xaxis": { 380 | "buckets": null, 381 | "mode": "time", 382 | "name": null, 383 | "show": true, 384 | "values": [] 385 | }, 386 | "yaxes": [ 387 | { 388 | "format": "short", 389 | "label": null, 390 | "logBase": 1, 391 | "max": null, 392 | "min": null, 393 | "show": true 394 | }, 395 | { 396 | "format": "short", 397 | "label": null, 398 | "logBase": 1, 399 | "max": null, 400 | "min": null, 401 | "show": true 402 | } 403 | ], 404 | "yaxis": { 405 | "align": false, 406 | "alignLevel": null 407 | } 408 | }, 409 | { 410 | "aliasColors": {}, 411 | "bars": true, 412 | "dashLength": 10, 413 | "dashes": false, 414 | "datasource": "${DS_PROMETHEUS_WH211}", 415 | "fill": 1, 416 | "gridPos": { 417 | "h": 7, 418 | "w": 20, 419 | "x": 0, 420 | "y": 20 421 | }, 422 | "id": 8, 423 | "legend": { 424 | "alignAsTable": true, 425 | "avg": false, 426 | "current": true, 427 | "max": false, 428 | "min": false, 429 | "rightSide": true, 430 | "show": true, 431 | "sideWidth": 420, 432 | "total": false, 433 | "values": true 434 | }, 435 | "lines": false, 436 | "linewidth": 1, 437 | "links": [], 438 | "nullPointMode": "null", 439 | "percentage": false, 440 | "pointradius": 5, 441 | "points": false, 442 | "renderer": "flot", 443 | "seriesOverrides": [], 444 | "spaceLength": 10, 445 | "stack": false, 446 | "steppedLine": false, 447 | "targets": [ 448 | { 449 | "expr": "sum by(topic) (kafka_topic_partitions{instance=\"$instance\",topic=~\"$topic\"})", 450 | "format": "time_series", 451 | "intervalFactor": 1, 452 | "legendFormat": "{{topic}}", 453 | "refId": "A" 454 | } 455 | ], 456 | "thresholds": [], 457 | "timeFrom": null, 458 | "timeShift": null, 459 | "title": "Partitions per Topic", 460 | "tooltip": { 461 | "shared": false, 462 | "sort": 0, 463 | "value_type": "individual" 464 | }, 465 | "type": "graph", 466 | "xaxis": { 467 | "buckets": null, 468 | "mode": "series", 469 | "name": null, 470 | "show": false, 471 | "values": [ 472 | "current" 473 | ] 474 | }, 475 | "yaxes": [ 476 | { 477 | "format": "short", 478 | "label": null, 479 | "logBase": 1, 480 | "max": null, 481 | "min": null, 482 | "show": true 483 | }, 484 | { 485 | "format": "short", 486 | "label": null, 487 | "logBase": 1, 488 | "max": null, 489 | "min": null, 490 | "show": true 491 | } 492 | ], 493 | "yaxis": { 494 | "align": false, 495 | "alignLevel": null 496 | } 497 | } 498 | ], 499 | "refresh": false, 500 | "schemaVersion": 16, 501 | "style": "dark", 502 | "tags": [ 503 | "Kafka" 504 | ], 505 | "templating": { 506 | "list": [ 507 | { 508 | "allValue": null, 509 | "current": {}, 510 | "datasource": "${DS_PROMETHEUS_WH211}", 511 | "hide": 0, 512 | "includeAll": false, 513 | "label": "Job", 514 | "multi": false, 515 | "name": "job", 516 | "options": [], 517 | "query": "label_values(kafka_consumergroup_current_offset, job)", 518 | "refresh": 1, 519 | "regex": "", 520 | "sort": 0, 521 | "tagValuesQuery": "", 522 | "tags": [], 523 | "tagsQuery": "", 524 | "type": "query", 525 | "useTags": false 526 | }, 527 | { 528 | "allValue": null, 529 | "current": {}, 530 | "datasource": "${DS_PROMETHEUS_WH211}", 531 | "hide": 0, 532 | "includeAll": false, 533 | "label": "Instance", 534 | "multi": false, 535 | "name": "instance", 536 | "options": [], 537 | "query": "label_values(kafka_consumergroup_current_offset{job=~\"$job\"}, instance)", 538 | "refresh": 1, 539 | "regex": "", 540 | "sort": 0, 541 | "tagValuesQuery": "", 542 | "tags": [], 543 | "tagsQuery": "", 544 | "type": "query", 545 | "useTags": false 546 | }, 547 | { 548 | "allValue": null, 549 | "current": {}, 550 | "datasource": "${DS_PROMETHEUS_WH211}", 551 | "hide": 0, 552 | "includeAll": true, 553 | "label": "Topic", 554 | "multi": true, 555 | "name": "topic", 556 | "options": [], 557 | "query": "label_values(kafka_topic_partition_current_offset{instance='$instance',topic!='__consumer_offsets',topic!='--kafka'}, topic)", 558 | "refresh": 1, 559 | "regex": "", 560 | "sort": 1, 561 | "tagValuesQuery": "", 562 | "tags": [], 563 | "tagsQuery": "topic", 564 | "type": "query", 565 | "useTags": false 566 | } 567 | ] 568 | }, 569 | "time": { 570 | "from": "now-24h", 571 | "to": "now" 572 | }, 573 | "timepicker": { 574 | "refresh_intervals": [ 575 | "5s", 576 | "10s", 577 | "30s", 578 | "1m", 579 | "5m", 580 | "15m", 581 | "30m", 582 | "1h", 583 | "2h", 584 | "1d" 585 | ], 586 | "time_options": [ 587 | "5m", 588 | "15m", 589 | "1h", 590 | "6h", 591 | "12h", 592 | "24h", 593 | "2d", 594 | "7d", 595 | "30d" 596 | ] 597 | }, 598 | "timezone": "browser", 599 | "title": "Kafka Exporter Overview", 600 | "uid": "jwPKIsniz", 601 | "version": 50 602 | } -------------------------------------------------------------------------------- /kafka_exporter_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielqsj/kafka_exporter/8ec24078707d4d3b349e71522ebd7f179746072d/kafka_exporter_overview.png -------------------------------------------------------------------------------- /scram_client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha256" 5 | "crypto/sha512" 6 | "hash" 7 | 8 | "github.com/xdg-go/scram" 9 | ) 10 | 11 | var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } 12 | var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } 13 | 14 | type XDGSCRAMClient struct { 15 | *scram.Client 16 | *scram.ClientConversation 17 | scram.HashGeneratorFcn 18 | } 19 | 20 | func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { 21 | x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) 22 | if err != nil { 23 | return err 24 | } 25 | x.ClientConversation = x.Client.NewConversation() 26 | return nil 27 | } 28 | 29 | func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { 30 | response, err = x.ClientConversation.Step(challenge) 31 | return 32 | } 33 | 34 | func (x *XDGSCRAMClient) Done() bool { 35 | return x.ClientConversation.Done() 36 | } 37 | -------------------------------------------------------------------------------- /simple_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | "log" 7 | "net/http" 8 | "testing" 9 | "time" 10 | 11 | "github.com/IBM/sarama" 12 | ) 13 | 14 | var bootstrap_servers = []string{"localhost:9092"} 15 | 16 | func TestSmoke(t *testing.T) { 17 | log.Print("testing " + t.Name()) 18 | 19 | if !assumeKafka() { 20 | t.Skip("Kafka is not running ... skipping the test") 21 | return 22 | } 23 | 24 | go runServer() 25 | 26 | execute(func(resp *http.Response) { 27 | log.Println(resp.Status) 28 | 29 | defer resp.Body.Close() 30 | bytes, err := io.ReadAll(resp.Body) 31 | if err != nil { 32 | log.Fatalln(err) 33 | } else { 34 | log.Println(string(bytes)) 35 | } 36 | }) 37 | } 38 | 39 | func assumeKafka() bool { 40 | client, err := sarama.NewClient(bootstrap_servers, nil) 41 | if err != nil { 42 | return false 43 | } 44 | defer client.Close() 45 | _, err = client.Topics() 46 | return err == nil 47 | } 48 | 49 | func execute(handler func(response *http.Response)) { 50 | var e = errors.New("dummy") 51 | for e != nil { 52 | resp, err := http.Get("http://localhost:9304/metrics") 53 | if err != nil { 54 | time.Sleep(time.Millisecond * 100) 55 | } 56 | e = err 57 | if resp != nil { 58 | handler(resp) 59 | } 60 | } 61 | } 62 | 63 | func runServer() { 64 | opts := kafkaOpts{} 65 | opts.uri = bootstrap_servers 66 | opts.uriZookeeper = []string{"localhost:2181"} 67 | opts.kafkaVersion = sarama.V1_0_0_0.String() 68 | opts.metadataRefreshInterval = "30s" 69 | setup("localhost:9304", "/metrics", ".*", "^$", ".*", "^$", false, opts, nil) 70 | } 71 | --------------------------------------------------------------------------------