├── .github ├── CONTRIBUTING.md └── workflows │ ├── release.yml │ └── test.yml ├── .gitignore ├── .golangci.yml ├── .promu.yml ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── Makefile.common ├── README.md ├── VERSION ├── collector ├── cache.go ├── collector.go ├── collector_test.go ├── dataset.go ├── dataset_test.go ├── pool.go ├── pool_test.go ├── transform.go ├── zfs.go └── zfs_test.go ├── go.mod ├── go.sum ├── zfs ├── dataset.go ├── mock_zfs │ └── mock_zfs.go ├── pool.go └── zfs.go └── zfs_exporter.go /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please open an issue with a description of the problem you wish to solve, prior to sending a pull request. 4 | 5 | ## Contributing Code 6 | 7 | Please ensure that all code is formatted prior to committing. 8 | 9 | ### Commit messages 10 | 11 | Commits to this repository should have messages that conform to the [AngularJS Git Commit Guidelines](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines). -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | name: Release 3 | 4 | # Controls when the action will run. Triggers the workflow on push or pull request 5 | # events but only for the master branch 6 | on: 7 | push: 8 | branches: ["master"] 9 | 10 | env: 11 | PARALLELISM: 3 12 | 13 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 14 | jobs: 15 | # This workflow contains a single job called "release" 16 | release: 17 | # The type of runner that the job will run on 18 | runs-on: ubuntu-latest 19 | 20 | # Steps represent a sequence of tasks that will be executed as part of the job 21 | steps: 22 | - name: Go Report Card 23 | uses: creekorful/goreportcard-action@v1.0 24 | 25 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 26 | - name: Checkout 27 | id: checkout 28 | uses: actions/checkout@v2 29 | with: 30 | # Fetch all versions for tag/changelog generation 31 | fetch-depth: 0 32 | 33 | - name: Set up Go 34 | uses: actions/setup-go@v4 35 | with: 36 | go-version: 1.24.2 37 | 38 | - name: Install promu 39 | id: make_promu 40 | run: | 41 | make promu 42 | 43 | - name: Calculate Version 44 | id: calculate_version 45 | uses: mathieudutour/github-tag-action@v4.5 46 | with: 47 | github_token: ${{ secrets.GITHUB_TOKEN }} 48 | dry_run: true 49 | 50 | - name: Update Version 51 | id: update_version 52 | env: 53 | NEW_VERSION: ${{ steps.calculate_version.outputs.new_version }} 54 | run: | 55 | echo "${NEW_VERSION}" > VERSION 56 | 57 | - name: Update Changelog 58 | id: update_changelog 59 | env: 60 | CHANGELOG: ${{ steps.calculate_version.outputs.changelog }} 61 | run: | 62 | mv CHANGELOG.md _CHANGELOG.md || touch _CHANGELOG.md 63 | echo "${CHANGELOG}" > CHANGELOG.md 64 | cat _CHANGELOG.md >> CHANGELOG.md 65 | rm -f _CHANGELOG.md 66 | 67 | - name: Commit Changes 68 | id: commit_changes 69 | uses: EndBug/add-and-commit@v9.1.1 70 | env: 71 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 72 | with: 73 | add: VERSION CHANGELOG.md 74 | message: | 75 | chore(build): Releasing ${{ steps.calculate_version.outputs.new_tag }} 76 | 77 | - name: Commit Tag 78 | id: commit_tag 79 | uses: mathieudutour/github-tag-action@v6.1 80 | with: 81 | github_token: ${{ secrets.GITHUB_TOKEN }} 82 | commit_sha: ${{ steps.commit_changes.outputs.commit_long_sha }} 83 | 84 | - name: Build 85 | id: build 86 | run: | 87 | promu crossbuild --parallelism $PARALLELISM 88 | promu crossbuild --parallelism $PARALLELISM tarballs 89 | promu checksum .tarballs 90 | 91 | - name: Create Release 92 | id: create_release 93 | uses: actions/create-release@v1 94 | env: 95 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 96 | with: 97 | tag_name: ${{ steps.calculate_version.outputs.new_tag }} 98 | release_name: Release ${{ steps.calculate_version.outputs.new_tag }} 99 | body: | 100 | Changes in this release: 101 | ${{ steps.calculate_version.outputs.changelog }} 102 | draft: false 103 | prerelease: false 104 | 105 | - name: Upload Release Assets 106 | id: upload_release_assets 107 | uses: AButler/upload-release-assets@v2.0 108 | with: 109 | files: ".tarballs/*" 110 | repo-token: ${{ secrets.GITHUB_TOKEN }} 111 | release-tag: ${{ steps.calculate_version.outputs.new_tag }} 112 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | # Controls when the action will run. Triggers the workflow on push or pull request 4 | # events but only for the master branch 5 | on: 6 | pull_request: 7 | branches: 8 | - master 9 | push: 10 | branches: 11 | - master 12 | 13 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 14 | jobs: 15 | # This workflow contains a single job called "test" 16 | test: 17 | # The type of runner that the job will run on 18 | runs-on: ubuntu-latest 19 | 20 | # Steps represent a sequence of tasks that will be executed as part of the job 21 | steps: 22 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 23 | - name: Checkout 24 | id: checkout 25 | uses: actions/checkout@v2 26 | with: 27 | # Fetch all versions for tag/changelog generation 28 | fetch-depth: 0 29 | 30 | - name: Set up Go 31 | uses: actions/setup-go@v4 32 | with: 33 | go-version: 1.24.2 34 | 35 | - name: Test 36 | id: test 37 | run: | 38 | make test 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | zfs_exporter 2 | .build/ 3 | .tarballs/ 4 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | enable: 4 | - errorlint 5 | - misspell 6 | - perfsprint 7 | - revive 8 | - testifylint 9 | settings: 10 | perfsprint: 11 | # Optimizes even if it requires an int or uint type cast. 12 | int-conversion: true 13 | # Optimizes into `err.Error()` even if it is only equivalent for non-nil errors. 14 | err-error: true 15 | # Optimizes `fmt.Errorf`. 16 | errorf: true 17 | # Optimizes `fmt.Sprintf` with only one argument. 18 | sprintf1: true 19 | # Optimizes into strings concatenation. 20 | strconcat: false 21 | revive: 22 | rules: 23 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter 24 | - name: unused-parameter 25 | severity: warning 26 | disabled: true 27 | testifylint: 28 | enable-all: true 29 | disable: 30 | - go-require 31 | formatter: 32 | require-f-funcs: true 33 | exclusions: 34 | generated: lax 35 | presets: 36 | - comments 37 | - common-false-positives 38 | - legacy 39 | - std-error-handling 40 | paths: 41 | - third_party$ 42 | - builtin$ 43 | - examples$ 44 | issues: 45 | max-issues-per-linter: 0 46 | max-same-issues: 0 47 | formatters: 48 | enable: 49 | - gofumpt 50 | - goimports 51 | settings: 52 | goimports: 53 | local-prefixes: 54 | - github.com/prometheus/common 55 | exclusions: 56 | generated: lax 57 | paths: 58 | - third_party$ 59 | - builtin$ 60 | - examples$ 61 | -------------------------------------------------------------------------------- /.promu.yml: -------------------------------------------------------------------------------- 1 | go: 2 | # Whenever the Go version is updated here, 3 | # .circle/config.yml should also be updated. 4 | version: 1.23 5 | repository: 6 | path: github.com/pdf/zfs_exporter/v2 7 | build: 8 | flags: -a -tags netgo 9 | ldflags: | 10 | -X github.com/prometheus/common/version.Version={{.Version}} 11 | -X github.com/prometheus/common/version.Revision={{.Revision}} 12 | -X github.com/prometheus/common/version.Branch={{.Branch}} 13 | -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} 14 | -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} 15 | crossbuild: 16 | platforms: 17 | - linux 18 | - illumos 19 | - darwin 20 | - freebsd 21 | - netbsd 22 | - dragonfly 23 | tarball: 24 | files: 25 | - LICENSE 26 | - CHANGELOG.md 27 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## [2.3.8](https://github.com/pdf/zfs_exporter/compare/v2.3.7...v2.3.8) (2025-04-20) 2 | 3 | 4 | ### Bug Fixes 5 | 6 | * **build:** Bump Go version and golangci-lint ([4d46ab3](https://github.com/pdf/zfs_exporter/commit/4d46ab3)) 7 | 8 | 9 | 10 | 11 | ## [2.3.7](https://github.com/pdf/zfs_exporter/compare/v2.3.6...v2.3.7) (2025-04-20) 12 | 13 | 14 | ### Bug Fixes 15 | 16 | * **deps:** Bump dependencies ([6af54d2](https://github.com/pdf/zfs_exporter/commit/6af54d2)) 17 | 18 | 19 | 20 | 21 | ## [2.3.6](https://github.com/pdf/zfs_exporter/compare/v2.3.5...v2.3.6) (2025-01-18) 22 | 23 | 24 | ### Bug Fixes 25 | 26 | * **build:** Bump Go version in actions ([00498df](https://github.com/pdf/zfs_exporter/commit/00498df)) 27 | 28 | 29 | 30 | 31 | ## [2.3.5](https://github.com/pdf/zfs_exporter/compare/v2.3.4...v2.3.5) (2025-01-18) 32 | 33 | 34 | ### Bug Fixes 35 | 36 | * **core:** Bump dependencies, migrate to promslog ([ccc2b21](https://github.com/pdf/zfs_exporter/commit/ccc2b21)) 37 | 38 | 39 | 40 | 41 | ## [2.3.4](https://github.com/pdf/zfs_exporter/compare/v2.3.3...v2.3.4) (2024-04-13) 42 | 43 | 44 | ### Bug Fixes 45 | 46 | * **deps:** Bump deps for security ([1404536](https://github.com/pdf/zfs_exporter/commit/1404536)) 47 | 48 | 49 | 50 | 51 | ## [2.3.3](https://github.com/pdf/zfs_exporter/compare/v2.3.2...v2.3.3) (2024-04-13) 52 | 53 | 54 | ### Bug Fixes 55 | 56 | * **log:** Improve command execution error output ([2277832](https://github.com/pdf/zfs_exporter/commit/2277832)) 57 | 58 | 59 | 60 | 61 | ## [2.3.2](https://github.com/pdf/zfs_exporter/compare/v2.3.1...v2.3.2) (2023-10-13) 62 | 63 | 64 | 65 | 66 | ## [2.3.1](https://github.com/pdf/zfs_exporter/compare/v2.3.0...v2.3.1) (2023-08-12) 67 | 68 | 69 | ### Bug Fixes 70 | 71 | * **build:** Update deps ([ddf8e09](https://github.com/pdf/zfs_exporter/commit/ddf8e09)) 72 | 73 | 74 | 75 | 76 | # [2.3.0](https://github.com/pdf/zfs_exporter/compare/v2.2.8...v2.3.0) (2023-08-12) 77 | 78 | 79 | ### Features 80 | 81 | * **server:** Add exporter toolkit for TLS support ([8102e2e](https://github.com/pdf/zfs_exporter/commit/8102e2e)), closes [#34](https://github.com/pdf/zfs_exporter/issues/34) 82 | 83 | 84 | 85 | 86 | ## [2.2.8](https://github.com/pdf/zfs_exporter/compare/v2.2.7...v2.2.8) (2023-04-22) 87 | 88 | 89 | ### Bug Fixes 90 | 91 | * **build:** Tag correct commit SHA ([0712333](https://github.com/pdf/zfs_exporter/commit/0712333)) 92 | * **security:** Update dependencies for upstream vulnerabilities ([2220da2](https://github.com/pdf/zfs_exporter/commit/2220da2)) 93 | 94 | 95 | 96 | 97 | ## [2.2.7](https://github.com/pdf/zfs_exporter/compare/v2.2.6...v2.2.7) (2023-01-28) 98 | 99 | 100 | ### Bug Fixes 101 | 102 | * **transform:** Add support for ancient ZFS dedupratio metric ([85bdc3b](https://github.com/pdf/zfs_exporter/commit/85bdc3b)), closes [#26](https://github.com/pdf/zfs_exporter/issues/26) 103 | 104 | 105 | 106 | 107 | ## [2.2.6](https://github.com/pdf/zfs_exporter/compare/v2.2.5...v2.2.6) (2023-01-28) 108 | 109 | 110 | ### Bug Fixes 111 | 112 | * **transform:** Add support for ancient ZFS fragmentation metric ([a0240d1](https://github.com/pdf/zfs_exporter/commit/a0240d1)), closes [#26](https://github.com/pdf/zfs_exporter/issues/26) 113 | 114 | 115 | 116 | 117 | ## [2.2.5](https://github.com/pdf/zfs_exporter/compare/v2.2.4...v2.2.5) (2022-01-30) 118 | 119 | 120 | ### Bug Fixes 121 | 122 | * **core:** Correctly handle and report errors listing pools ([efbcceb](https://github.com/pdf/zfs_exporter/commit/efbcceb)), closes [#18](https://github.com/pdf/zfs_exporter/issues/18) 123 | 124 | 125 | 126 | 127 | ## [2.2.4](https://github.com/pdf/zfs_exporter/compare/v2.2.3...v2.2.4) (2022-01-05) 128 | 129 | 130 | ### Bug Fixes 131 | 132 | * **build:** Update promu config to build v2 ([2a38914](https://github.com/pdf/zfs_exporter/commit/2a38914)) 133 | 134 | 135 | 136 | 137 | ## [2.2.3](https://github.com/pdf/zfs_exporter/compare/v2.2.2...v2.2.3) (2022-01-05) 138 | 139 | 140 | ### Bug Fixes 141 | 142 | * **build:** update go module version to match release tag major version ([f709083](https://github.com/pdf/zfs_exporter/commit/f709083)) 143 | 144 | 145 | 146 | 147 | ## [2.2.2](https://github.com/pdf/zfs_exporter/compare/v2.2.1...v2.2.2) (2021-11-16) 148 | 149 | 150 | ### Bug Fixes 151 | 152 | * **metrics:** Fix typo in metric name ([bbd3d91](https://github.com/pdf/zfs_exporter/commit/bbd3d91)) 153 | * **pool:** Add SUSPENDED status ([9b9e655](https://github.com/pdf/zfs_exporter/commit/9b9e655)) 154 | * **tests:** Remove unnecessary duration conversion ([b6a29ab](https://github.com/pdf/zfs_exporter/commit/b6a29ab)) 155 | 156 | 157 | 158 | 159 | ## [2.2.1](https://github.com/pdf/zfs_exporter/compare/v2.2.0...v2.2.1) (2021-09-13) 160 | 161 | 162 | ### Bug Fixes 163 | 164 | * **collector:** Avoid race on upstream channel close, tidy sync points ([e6fbdf5](https://github.com/pdf/zfs_exporter/commit/e6fbdf5)) 165 | * **docs:** Document web.disable-exporter-metrics flag in README ([20182da](https://github.com/pdf/zfs_exporter/commit/20182da)) 166 | 167 | 168 | 169 | 170 | # [2.2.0](https://github.com/pdf/zfs_exporter/compare/v2.1.1...v2.2.0) (2021-09-04) 171 | 172 | 173 | ### Bug Fixes 174 | 175 | * **docs:** Correct misspelling ([066c7d2](https://github.com/pdf/zfs_exporter/commit/066c7d2)) 176 | 177 | 178 | ### Features 179 | 180 | * **metrics:** Allow disabling exporter metrics ([1ca8717](https://github.com/pdf/zfs_exporter/commit/1ca8717)), closes [#2](https://github.com/pdf/zfs_exporter/issues/2) 181 | 182 | 183 | 184 | 185 | ## [2.1.1](https://github.com/pdf/zfs_exporter/compare/v2.1.0...v2.1.1) (2021-08-27) 186 | 187 | 188 | ### Bug Fixes 189 | 190 | * **build:** Update to Go 1.17 for crossbuild, and enable all platforms ([f47b69a](https://github.com/pdf/zfs_exporter/commit/f47b69a)) 191 | * **core:** Update dependencies ([b39382b](https://github.com/pdf/zfs_exporter/commit/b39382b)) 192 | 193 | 194 | 195 | 196 | # [2.1.0](https://github.com/pdf/zfs_exporter/compare/v2.0.0...v2.1.0) (2021-08-18) 197 | 198 | 199 | ### Bug Fixes 200 | 201 | * **logging:** Include collector in warning for unsupported properties ([1760a4a](https://github.com/pdf/zfs_exporter/commit/1760a4a)) 202 | * **metrics:** Invert ratio for multiplier fields, and clarify their docs ([1a7bc3a](https://github.com/pdf/zfs_exporter/commit/1a7bc3a)), closes [#11](https://github.com/pdf/zfs_exporter/issues/11) 203 | 204 | 205 | ### Features 206 | 207 | * **build:** Update to Go 1.17 ([b64115c](https://github.com/pdf/zfs_exporter/commit/b64115c)) 208 | 209 | 210 | 211 | 212 | # [2.0.0](https://github.com/pdf/zfs_exporter/compare/v1.0.1...v2.0.0) (2021-08-14) 213 | 214 | 215 | ### Code Refactoring 216 | 217 | * **collector:** Migrate to internal ZFS CLI implementation ([53b0e98](https://github.com/pdf/zfs_exporter/commit/53b0e98)), closes [#7](https://github.com/pdf/zfs_exporter/issues/7) [#9](https://github.com/pdf/zfs_exporter/issues/9) [#10](https://github.com/pdf/zfs_exporter/issues/10) 218 | 219 | 220 | ### Features 221 | 222 | * **performance:** Execute collection concurrently per pool ([ccc6f22](https://github.com/pdf/zfs_exporter/commit/ccc6f22)) 223 | * **zfs:** Add local ZFS CLI parsing ([f5050b1](https://github.com/pdf/zfs_exporter/commit/f5050b1)) 224 | 225 | 226 | ### BREAKING CHANGES 227 | 228 | * **collector:** Ratio values are now properly calculated in the range 229 | 0-1, rather than being passed verbatim. 230 | 231 | The following metrics are affected by this change: 232 | - zfs_pool_deduplication_ratio 233 | - zfs_pool_capacity_ratio 234 | - zfs_pool_fragmentation_ratio 235 | - zfs_dataset_compression_ratio 236 | - zfs_dataset_referenced_compression_ratio 237 | 238 | Additionally, the zfs_dataset_fragmentation_percent metric has been 239 | renamed to zfs_dataset_fragmentation_ratio. 240 | 241 | 242 | 243 | 244 | ## [1.0.1](https://github.com/pdf/zfs_exporter/compare/v1.0.0...v1.0.1) (2021-08-03) 245 | 246 | 247 | ### Bug Fixes 248 | 249 | * fix copy and paste errors when accessing dataset properties ([c0fc6b2](https://github.com/pdf/zfs_exporter/commit/c0fc6b2)) 250 | 251 | 252 | 253 | 254 | # [1.0.0](https://github.com/pdf/zfs_exporter/compare/v0.0.3...v1.0.0) (2021-06-22) 255 | 256 | 257 | ### Bug Fixes 258 | 259 | * **ci:** Fix syntax error in github actions workflow ([0b6e8bc](https://github.com/pdf/zfs_exporter/commit/0b6e8bc)) 260 | 261 | 262 | ### Code Refactoring 263 | 264 | * **core:** Update prometheus toolchain and refactor internals ([056b386](https://github.com/pdf/zfs_exporter/commit/056b386)) 265 | 266 | 267 | ### Features 268 | 269 | * **enhancement:** Allow excluding datasets by regular expression ([8dd48ba](https://github.com/pdf/zfs_exporter/commit/8dd48ba)), closes [#3](https://github.com/pdf/zfs_exporter/issues/3) 270 | 271 | 272 | ### BREAKING CHANGES 273 | 274 | * **core:** Go API has changed somewhat, but metrics remain 275 | unaffected. 276 | 277 | 278 | 279 | 280 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Peter Fern 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The Prometheus Authors 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | # Needs to be defined before including Makefile.common to auto-generate targets 15 | DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x 16 | DOCKER_IMAGE_NAME ?= zfs-exporter 17 | 18 | .PHONY: all 19 | all:: test build 20 | 21 | .PHONY: test 22 | test:: vet precheck style lint unused common-test 23 | 24 | include Makefile.common 25 | -------------------------------------------------------------------------------- /Makefile.common: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The Prometheus Authors 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | 15 | # A common Makefile that includes rules to be reused in different prometheus projects. 16 | # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! 17 | 18 | # Example usage : 19 | # Create the main Makefile in the root project directory. 20 | # include Makefile.common 21 | # customTarget: 22 | # @echo ">> Running customTarget" 23 | # 24 | 25 | # Ensure GOBIN is not set during build so that promu is installed to the correct path 26 | unexport GOBIN 27 | 28 | GO ?= go 29 | GOFMT ?= $(GO)fmt 30 | FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) 31 | GOOPTS ?= 32 | GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) 33 | GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) 34 | 35 | GO_VERSION ?= $(shell $(GO) version) 36 | GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) 37 | PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') 38 | 39 | PROMU := $(FIRST_GOPATH)/bin/promu 40 | pkgs = ./... 41 | 42 | ifeq (arm, $(GOHOSTARCH)) 43 | GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) 44 | GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) 45 | else 46 | GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) 47 | endif 48 | 49 | GOTEST := $(GO) test 50 | GOTEST_DIR := 51 | ifneq ($(CIRCLE_JOB),) 52 | ifneq ($(shell command -v gotestsum 2> /dev/null),) 53 | GOTEST_DIR := test-results 54 | GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- 55 | endif 56 | endif 57 | 58 | PROMU_VERSION ?= 0.17.0 59 | PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz 60 | 61 | SKIP_GOLANGCI_LINT := 62 | GOLANGCI_LINT := 63 | GOLANGCI_LINT_OPTS ?= 64 | GOLANGCI_LINT_VERSION ?= v2.1.2 65 | # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. 66 | # windows isn't included here because of the path separator being different. 67 | ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) 68 | ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) 69 | # If we're in CI and there is an Actions file, that means the linter 70 | # is being run in Actions, so we don't need to run it here. 71 | ifneq (,$(SKIP_GOLANGCI_LINT)) 72 | GOLANGCI_LINT := 73 | else ifeq (,$(CIRCLE_JOB)) 74 | GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint 75 | else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) 76 | GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint 77 | endif 78 | endif 79 | endif 80 | 81 | PREFIX ?= $(shell pwd) 82 | BIN_DIR ?= $(shell pwd) 83 | DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) 84 | DOCKERFILE_PATH ?= ./Dockerfile 85 | DOCKERBUILD_CONTEXT ?= ./ 86 | DOCKER_REPO ?= prom 87 | 88 | DOCKER_ARCHS ?= amd64 89 | 90 | BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) 91 | PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) 92 | TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) 93 | 94 | SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) 95 | 96 | ifeq ($(GOHOSTARCH),amd64) 97 | ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) 98 | # Only supported on amd64 99 | test-flags := -race 100 | endif 101 | endif 102 | 103 | # This rule is used to forward a target like "build" to "common-build". This 104 | # allows a new "build" target to be defined in a Makefile which includes this 105 | # one and override "common-build" without override warnings. 106 | %: common-% ; 107 | 108 | .PHONY: common-all 109 | common-all: precheck style check_license lint yamllint unused build test 110 | 111 | .PHONY: common-style 112 | common-style: 113 | @echo ">> checking code style" 114 | @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ 115 | if [ -n "$${fmtRes}" ]; then \ 116 | echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ 117 | echo "Please ensure you are using $$($(GO) version) for formatting code."; \ 118 | exit 1; \ 119 | fi 120 | 121 | .PHONY: common-check_license 122 | common-check_license: 123 | @echo ">> checking license header" 124 | @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ 125 | awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ 126 | done); \ 127 | if [ -n "$${licRes}" ]; then \ 128 | echo "license header checking failed:"; echo "$${licRes}"; \ 129 | exit 1; \ 130 | fi 131 | 132 | .PHONY: common-deps 133 | common-deps: 134 | @echo ">> getting dependencies" 135 | $(GO) mod download 136 | 137 | .PHONY: update-go-deps 138 | update-go-deps: 139 | @echo ">> updating Go dependencies" 140 | @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ 141 | $(GO) get -d $$m; \ 142 | done 143 | $(GO) mod tidy 144 | 145 | .PHONY: common-test-short 146 | common-test-short: $(GOTEST_DIR) 147 | @echo ">> running short tests" 148 | $(GOTEST) -short $(GOOPTS) $(pkgs) 149 | 150 | .PHONY: common-test 151 | common-test: $(GOTEST_DIR) 152 | @echo ">> running all tests" 153 | $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) 154 | 155 | $(GOTEST_DIR): 156 | @mkdir -p $@ 157 | 158 | .PHONY: common-format 159 | common-format: 160 | @echo ">> formatting code" 161 | $(GO) fmt $(pkgs) 162 | 163 | .PHONY: common-vet 164 | common-vet: 165 | @echo ">> vetting code" 166 | $(GO) vet $(GOOPTS) $(pkgs) 167 | 168 | .PHONY: common-lint 169 | common-lint: $(GOLANGCI_LINT) 170 | ifdef GOLANGCI_LINT 171 | @echo ">> running golangci-lint" 172 | $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) 173 | endif 174 | 175 | .PHONY: common-lint-fix 176 | common-lint-fix: $(GOLANGCI_LINT) 177 | ifdef GOLANGCI_LINT 178 | @echo ">> running golangci-lint fix" 179 | $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) 180 | endif 181 | 182 | .PHONY: common-yamllint 183 | common-yamllint: 184 | @echo ">> running yamllint on all YAML files in the repository" 185 | ifeq (, $(shell command -v yamllint 2> /dev/null)) 186 | @echo "yamllint not installed so skipping" 187 | else 188 | yamllint . 189 | endif 190 | 191 | # For backward-compatibility. 192 | .PHONY: common-staticcheck 193 | common-staticcheck: lint 194 | 195 | .PHONY: common-unused 196 | common-unused: 197 | @echo ">> running check for unused/missing packages in go.mod" 198 | $(GO) mod tidy 199 | @git diff --exit-code -- go.sum go.mod 200 | 201 | .PHONY: common-build 202 | common-build: promu 203 | @echo ">> building binaries" 204 | $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) 205 | 206 | .PHONY: common-tarball 207 | common-tarball: promu 208 | @echo ">> building release tarball" 209 | $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) 210 | 211 | .PHONY: common-docker-repo-name 212 | common-docker-repo-name: 213 | @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" 214 | 215 | .PHONY: common-docker $(BUILD_DOCKER_ARCHS) 216 | common-docker: $(BUILD_DOCKER_ARCHS) 217 | $(BUILD_DOCKER_ARCHS): common-docker-%: 218 | docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ 219 | -f $(DOCKERFILE_PATH) \ 220 | --build-arg ARCH="$*" \ 221 | --build-arg OS="linux" \ 222 | $(DOCKERBUILD_CONTEXT) 223 | 224 | .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) 225 | common-docker-publish: $(PUBLISH_DOCKER_ARCHS) 226 | $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: 227 | docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" 228 | 229 | DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) 230 | .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) 231 | common-docker-tag-latest: $(TAG_DOCKER_ARCHS) 232 | $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: 233 | docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" 234 | docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" 235 | 236 | .PHONY: common-docker-manifest 237 | common-docker-manifest: 238 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) 239 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" 240 | 241 | .PHONY: promu 242 | promu: $(PROMU) 243 | 244 | $(PROMU): 245 | $(eval PROMU_TMP := $(shell mktemp -d)) 246 | curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) 247 | mkdir -p $(FIRST_GOPATH)/bin 248 | cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu 249 | rm -r $(PROMU_TMP) 250 | 251 | .PHONY: proto 252 | proto: 253 | @echo ">> generating code from proto files" 254 | @./scripts/genproto.sh 255 | 256 | ifdef GOLANGCI_LINT 257 | $(GOLANGCI_LINT): 258 | mkdir -p $(FIRST_GOPATH)/bin 259 | curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ 260 | | sed -e '/install -d/d' \ 261 | | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) 262 | endif 263 | 264 | .PHONY: precheck 265 | precheck:: 266 | 267 | define PRECHECK_COMMAND_template = 268 | precheck:: $(1)_precheck 269 | 270 | PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) 271 | .PHONY: $(1)_precheck 272 | $(1)_precheck: 273 | @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ 274 | echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ 275 | exit 1; \ 276 | fi 277 | endef 278 | 279 | govulncheck: install-govulncheck 280 | govulncheck ./... 281 | 282 | install-govulncheck: 283 | command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest 284 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ZFS Exporter 2 | 3 | [![Test](https://github.com/pdf/zfs_exporter/actions/workflows/test.yml/badge.svg)](https://github.com/pdf/zfs_exporter/actions/workflows/test.yml) 4 | [![Release](https://github.com/pdf/zfs_exporter/actions/workflows/release.yml/badge.svg)](https://github.com/pdf/zfs_exporter/actions/workflows/release.yml) 5 | [![Go Report Card](https://goreportcard.com/badge/github.com/pdf/zfs_exporter)](https://goreportcard.com/report/github.com/pdf/zfs_exporter) 6 | [![License](https://img.shields.io/badge/License-MIT-%23a31f34)](https://github.com/pdf/zfs_exporter/blob/master/LICENSE) 7 | 8 | Prometheus exporter for ZFS (pools, filesystems, snapshots and volumes). Other implementations exist, however performance can be quite variable, producing occasional timeouts (and associated alerts). This exporter was built with a few features aimed at allowing users to avoid collecting more than they need to, and to ensure timeouts cannot occur, but that we eventually return useful data: 9 | 10 | - **Pool selection** - allow the user to select which pools are collected 11 | - **Multiple collectors** - allow the user to select which data types are collected (pools, filesystems, snapshots and volumes) 12 | - **Property selection** - allow the user to select which properties are collected per data type (enabling only required properties will increase collector performance, by reducing metadata queries) 13 | - **Collection deadline and caching** - if the collection duration exceeds the configured deadline, cached data from the last run will be returned for any metrics that have not yet been collected, and the current collection run will continue in the background. Collections will not run concurrently, so that when a system is running slowly, we don't compound the problem - if an existing collection is still running, cached data will be returned. 14 | 15 | ## Installation 16 | 17 | Download the [latest release](https://github.com/pdf/zfs_exporter/releases/latest) for your platform, and unpack it somewhere on your filesystem. 18 | 19 | You may also build the latest version using Go v1.11 - 1.17 via `go get`: 20 | 21 | ```bash 22 | go get -u github.com/pdf/zfs_exporter 23 | ``` 24 | 25 | Installation can also be accomplished using `go install`: 26 | 27 | ```bash 28 | version=latest # or a specific version tag 29 | go install github.com/pdf/zfs_exporter@$version 30 | ``` 31 | 32 | ## Usage 33 | 34 | ``` 35 | usage: zfs_exporter [] 36 | 37 | 38 | Flags: 39 | -h, --[no-]help Show context-sensitive help (also try --help-long and --help-man). 40 | --[no-]collector.dataset-filesystem 41 | Enable the dataset-filesystem collector (default: enabled) 42 | --properties.dataset-filesystem="available,logicalused,quota,referenced,used,usedbydataset,written" 43 | Properties to include for the dataset-filesystem collector, comma-separated. 44 | --[no-]collector.dataset-snapshot 45 | Enable the dataset-snapshot collector (default: disabled) 46 | --properties.dataset-snapshot="logicalused,referenced,used,written" 47 | Properties to include for the dataset-snapshot collector, comma-separated. 48 | --[no-]collector.dataset-volume 49 | Enable the dataset-volume collector (default: enabled) 50 | --properties.dataset-volume="available,logicalused,referenced,used,usedbydataset,volsize,written" 51 | Properties to include for the dataset-volume collector, comma-separated. 52 | --[no-]collector.pool Enable the pool collector (default: enabled) 53 | --properties.pool="allocated,dedupratio,fragmentation,free,freeing,health,leaked,readonly,size" 54 | Properties to include for the pool collector, comma-separated. 55 | --web.telemetry-path="/metrics" 56 | Path under which to expose metrics. 57 | --[no-]web.disable-exporter-metrics 58 | Exclude metrics about the exporter itself (promhttp_*, process_*, go_*). 59 | --deadline=8s Maximum duration that a collection should run before returning cached data. Should be set to a value shorter than your scrape timeout duration. The current collection run will continue and update the cache when 60 | complete (default: 8s) 61 | --pool=POOL ... Name of the pool(s) to collect, repeat for multiple pools (default: all pools). 62 | --exclude=EXCLUDE ... Exclude datasets/snapshots/volumes that match the provided regex (e.g. '^rpool/docker/'), may be specified multiple times. 63 | --[no-]web.systemd-socket Use systemd socket activation listeners instead of port listeners (Linux only). 64 | --web.listen-address=:9134 ... 65 | Addresses on which to expose metrics and web interface. Repeatable for multiple addresses. Examples: `:9100` or `[::1]:9100` for http, `vsock://:9100` for vsock 66 | --web.config.file="" Path to configuration file that can enable TLS or authentication. See: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md 67 | --log.level=info Only log messages with the given severity or above. One of: [debug, info, warn, error] 68 | --log.format=logfmt Output format of log messages. One of: [logfmt, json] 69 | --[no-]version Show application version. 70 | ``` 71 | 72 | Collectors that are enabled by default can be negated by prefixing the flag with `--no-*`, ie: 73 | 74 | ``` 75 | zfs_exporter --no-collector.dataset-filesystem 76 | ``` 77 | 78 | ## TLS endpoint 79 | 80 | **EXPERIMENTAL** 81 | 82 | The exporter supports TLS via a new web configuration file. 83 | 84 | ```console 85 | ./zfs_exporter --web.config.file=web-config.yml 86 | ``` 87 | 88 | See the [exporter-toolkit https package](https://github.com/prometheus/exporter-toolkit/blob/v0.1.0/https/README.md) for more details. 89 | 90 | ## Caveats 91 | 92 | The collector may need to be run as root on some platforms (ie - Linux prior to ZFS v0.7.0). 93 | 94 | Whilst inspiration was taken from some of the alternative ZFS collectors, metric names may not be compatible. 95 | 96 | ## Alternatives 97 | 98 | In no particular order, here are some alternative implementations: 99 | 100 | - https://github.com/eliothedeman/zfs_exporter 101 | - https://github.com/ncabatoff/zfs-exporter 102 | - https://github.com/eripa/prometheus-zfs 103 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 2.3.8 2 | -------------------------------------------------------------------------------- /collector/cache.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | ) 8 | 9 | type metricCache struct { 10 | cache map[string]prometheus.Metric 11 | sync.RWMutex 12 | } 13 | 14 | func (c *metricCache) add(m metric) { 15 | c.Lock() 16 | defer c.Unlock() 17 | c.cache[m.name] = m.prometheus 18 | } 19 | 20 | func (c *metricCache) merge(other *metricCache) { 21 | if c == other { 22 | return 23 | } 24 | c.Lock() 25 | other.RLock() 26 | defer func() { 27 | other.RUnlock() 28 | c.Unlock() 29 | }() 30 | for name, value := range other.cache { 31 | c.cache[name] = value 32 | } 33 | } 34 | 35 | func (c *metricCache) replace(other *metricCache) { 36 | c.Lock() 37 | defer c.Unlock() 38 | c.cache = other.cache 39 | } 40 | 41 | func (c *metricCache) index() map[string]struct{} { 42 | c.RLock() 43 | defer c.RUnlock() 44 | index := make(map[string]struct{}, len(c.cache)) 45 | for name := range c.cache { 46 | index[name] = struct{}{} 47 | } 48 | 49 | return index 50 | } 51 | 52 | func newMetricCache() *metricCache { 53 | return &metricCache{cache: make(map[string]prometheus.Metric)} 54 | } 55 | -------------------------------------------------------------------------------- /collector/collector.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log/slog" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/alecthomas/kingpin/v2" 11 | "github.com/pdf/zfs_exporter/v2/zfs" 12 | "github.com/prometheus/client_golang/prometheus" 13 | ) 14 | 15 | const ( 16 | defaultEnabled = true 17 | defaultDisabled = false 18 | namespace = `zfs` 19 | helpDefaultStateEnabled = `enabled` 20 | helpDefaultStateDisabled = `disabled` 21 | 22 | subsystemDataset = `dataset` 23 | subsystemPool = `pool` 24 | 25 | propertyUnsupportedDesc = `!!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!!` 26 | propertyUnsupportedMsg = `Unsupported dataset property, results are likely to be undesirable` 27 | helpIssue = `Please file an issue at https://github.com/pdf/zfs_exporter/issues` 28 | ) 29 | 30 | var ( 31 | collectorStates = make(map[string]State) 32 | scrapeDurationDescName = prometheus.BuildFQName(namespace, `scrape`, `collector_duration_seconds`) 33 | scrapeDurationDesc = prometheus.NewDesc( 34 | scrapeDurationDescName, 35 | `zfs_exporter: Duration of a collector scrape.`, 36 | []string{`collector`}, 37 | nil, 38 | ) 39 | scrapeSuccessDescName = prometheus.BuildFQName(namespace, `scrape`, `collector_success`) 40 | scrapeSuccessDesc = prometheus.NewDesc( 41 | scrapeSuccessDescName, 42 | `zfs_exporter: Whether a collector succeeded.`, 43 | []string{`collector`}, 44 | nil, 45 | ) 46 | 47 | errUnsupportedProperty = errors.New(`unsupported property`) 48 | ) 49 | 50 | type factoryFunc func(l *slog.Logger, c zfs.Client, properties []string) (Collector, error) 51 | 52 | type transformFunc func(string) (float64, error) 53 | 54 | // State holds metadata for managing collector status 55 | type State struct { 56 | Name string 57 | Enabled *bool 58 | Properties *string 59 | factory factoryFunc 60 | } 61 | 62 | // Collector defines the minimum functionality for registering a collector 63 | type Collector interface { 64 | update(ch chan<- metric, pools []string, excludes regexpCollection) error 65 | describe(ch chan<- *prometheus.Desc) 66 | } 67 | 68 | type metric struct { 69 | name string 70 | prometheus prometheus.Metric 71 | } 72 | 73 | type property struct { 74 | name string 75 | desc *prometheus.Desc 76 | transform transformFunc 77 | } 78 | 79 | func (p property) push(ch chan<- metric, value string, labelValues ...string) error { 80 | v, err := p.transform(value) 81 | if err != nil { 82 | return err 83 | } 84 | ch <- metric{ 85 | name: expandMetricName(p.name, labelValues...), 86 | prometheus: prometheus.MustNewConstMetric( 87 | p.desc, 88 | prometheus.GaugeValue, 89 | v, 90 | labelValues..., 91 | ), 92 | } 93 | 94 | return nil 95 | } 96 | 97 | type propertyStore struct { 98 | defaultSubsystem string 99 | defaultLabels []string 100 | store map[string]property 101 | } 102 | 103 | func (p *propertyStore) find(name string) (property, error) { 104 | prop, ok := p.store[name] 105 | if !ok { 106 | prop = newProperty( 107 | p.defaultSubsystem, 108 | name, 109 | propertyUnsupportedDesc, 110 | transformNumeric, 111 | p.defaultLabels..., 112 | ) 113 | return prop, errUnsupportedProperty 114 | } 115 | return prop, nil 116 | } 117 | 118 | func registerCollector(collector string, isDefaultEnabled bool, defaultProps string, factory factoryFunc) { 119 | helpDefaultState := helpDefaultStateDisabled 120 | if isDefaultEnabled { 121 | helpDefaultState = helpDefaultStateEnabled 122 | } 123 | 124 | enabledFlagName := fmt.Sprintf("collector.%s", collector) 125 | enabledFlagHelp := fmt.Sprintf("Enable the %s collector (default: %s)", collector, helpDefaultState) 126 | enabledDefaultValue := strconv.FormatBool(isDefaultEnabled) 127 | 128 | propsFlagName := fmt.Sprintf("properties.%s", collector) 129 | propsFlagHelp := fmt.Sprintf("Properties to include for the %s collector, comma-separated.", collector) 130 | 131 | enabledFlag := kingpin.Flag(enabledFlagName, enabledFlagHelp).Default(enabledDefaultValue).Bool() 132 | propsFlag := kingpin.Flag(propsFlagName, propsFlagHelp).Default(defaultProps).String() 133 | 134 | collectorStates[collector] = State{ 135 | Enabled: enabledFlag, 136 | Properties: propsFlag, 137 | factory: factory, 138 | } 139 | } 140 | 141 | func expandMetricName(prefix string, context ...string) string { 142 | return strings.Join(append(context, prefix), `-`) 143 | } 144 | 145 | func newProperty(subsystem, metricName, helpText string, transform transformFunc, labels ...string) property { 146 | name := prometheus.BuildFQName(namespace, subsystem, metricName) 147 | return property{ 148 | name: name, 149 | desc: prometheus.NewDesc(name, helpText, labels, nil), 150 | transform: transform, 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /collector/collector_test.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io" 7 | "log/slog" 8 | "time" 9 | 10 | "github.com/pdf/zfs_exporter/v2/zfs" 11 | "github.com/prometheus/client_golang/prometheus" 12 | "github.com/prometheus/client_golang/prometheus/testutil" 13 | ) 14 | 15 | var logger = slog.New(slog.NewTextHandler(io.Discard, nil)) 16 | 17 | func callCollector(ctx context.Context, collector prometheus.Collector, metricResults []byte, metricNames []string) error { 18 | result := make(chan error) 19 | go func() { 20 | result <- testutil.CollectAndCompare(collector, bytes.NewBuffer(metricResults), metricNames...) 21 | }() 22 | 23 | select { 24 | case err := <-result: 25 | return err 26 | case <-ctx.Done(): 27 | return ctx.Err() 28 | } 29 | } 30 | 31 | func defaultConfig(z zfs.Client) ZFSConfig { 32 | return ZFSConfig{ 33 | DisableMetrics: true, 34 | Deadline: 5 * time.Minute, 35 | Logger: logger, 36 | ZFSClient: z, 37 | } 38 | } 39 | 40 | func stringPointer(s string) *string { 41 | return &s 42 | } 43 | 44 | func boolPointer(b bool) *bool { 45 | return &b 46 | } 47 | -------------------------------------------------------------------------------- /collector/dataset.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "sync" 7 | 8 | "github.com/pdf/zfs_exporter/v2/zfs" 9 | "github.com/prometheus/client_golang/prometheus" 10 | ) 11 | 12 | const ( 13 | defaultFilesystemProps = `available,logicalused,quota,referenced,used,usedbydataset,written` 14 | defaultSnapshotProps = `logicalused,referenced,used,written` 15 | defaultVolumeProps = `available,logicalused,referenced,used,usedbydataset,volsize,written` 16 | ) 17 | 18 | var ( 19 | datasetLabels = []string{`name`, `pool`, `type`} 20 | datasetProperties = propertyStore{ 21 | defaultSubsystem: subsystemDataset, 22 | defaultLabels: datasetLabels, 23 | store: map[string]property{ 24 | `available`: newProperty( 25 | subsystemDataset, 26 | `available_bytes`, 27 | `The amount of space in bytes available to the dataset and all its children.`, 28 | transformNumeric, 29 | datasetLabels..., 30 | ), 31 | `compressratio`: newProperty( 32 | subsystemDataset, 33 | `compression_ratio`, 34 | `The ratio of compressed size vs uncompressed size for this dataset.`, 35 | transformMultiplier, 36 | datasetLabels..., 37 | ), 38 | `logicalused`: newProperty( 39 | subsystemDataset, 40 | `logical_used_bytes`, 41 | `The amount of space in bytes that is "logically" consumed by this dataset and all its descendents. See the "used_bytes" property.`, 42 | transformNumeric, 43 | datasetLabels..., 44 | ), 45 | `logicalreferenced`: newProperty( 46 | subsystemDataset, 47 | `logical_referenced_bytes`, 48 | `The amount of space that is "logically" accessible by this dataset. See the "referenced_bytes" property.`, 49 | transformNumeric, 50 | datasetLabels..., 51 | ), 52 | `quota`: newProperty( 53 | subsystemDataset, 54 | `quota_bytes`, 55 | `The maximum amount of space in bytes this dataset and its descendents can consume.`, 56 | transformNumeric, 57 | datasetLabels..., 58 | ), 59 | `refcompressratio`: newProperty( 60 | subsystemDataset, 61 | `referenced_compression_ratio`, 62 | `The ratio of compressed size vs uncompressed size for the referenced space of this dataset. See also the "compression_ratio" property.`, 63 | transformMultiplier, 64 | datasetLabels..., 65 | ), 66 | `referenced`: newProperty( 67 | subsystemDataset, 68 | `referenced_bytes`, 69 | `The amount of data in bytes that is accessible by this dataset, which may or may not be shared with other datasets in the pool.`, 70 | transformNumeric, 71 | datasetLabels..., 72 | ), 73 | `refquota`: newProperty( 74 | subsystemDataset, 75 | `referenced_quota_bytes`, 76 | `The maximum amount of space in bytes this dataset can consume.`, 77 | transformNumeric, 78 | datasetLabels..., 79 | ), 80 | `refreservation`: newProperty( 81 | subsystemDataset, 82 | `referenced_reservation_bytes`, 83 | `The minimum amount of space in bytes guaranteed to this dataset.`, 84 | transformNumeric, 85 | datasetLabels..., 86 | ), 87 | `reservation`: newProperty( 88 | subsystemDataset, 89 | `reservation_bytes`, 90 | `The minimum amount of space in bytes guaranteed to a dataset and its descendants.`, 91 | transformNumeric, 92 | datasetLabels..., 93 | ), 94 | `snapshot_count`: newProperty( 95 | subsystemDataset, 96 | `snapshot_count_total`, 97 | `The total number of snapshots that exist under this location in the dataset tree. This value is only available when a snapshot_limit has been set somewhere in the tree under which the dataset resides.`, 98 | transformNumeric, 99 | datasetLabels..., 100 | ), 101 | `snapshot_limit`: newProperty( 102 | subsystemDataset, 103 | `snapshot_limit_total`, 104 | `The total limit on the number of snapshots that can be created on a dataset and its descendents.`, 105 | transformNumeric, 106 | datasetLabels..., 107 | ), 108 | `used`: newProperty( 109 | subsystemDataset, 110 | `used_bytes`, 111 | `The amount of space in bytes consumed by this dataset and all its descendents.`, 112 | transformNumeric, 113 | datasetLabels..., 114 | ), 115 | `usedbychildren`: newProperty( 116 | subsystemDataset, 117 | `used_by_children_bytes`, 118 | `The amount of space in bytes used by children of this dataset, which would be freed if all the dataset's children were destroyed.`, 119 | transformNumeric, 120 | datasetLabels..., 121 | ), 122 | `usedbydataset`: newProperty( 123 | subsystemDataset, 124 | `used_by_dataset_bytes`, 125 | `The amount of space in bytes used by this dataset itself, which would be freed if the dataset were destroyed.`, 126 | transformNumeric, 127 | datasetLabels..., 128 | ), 129 | `usedbyrefreservation`: newProperty( 130 | subsystemDataset, 131 | `used_by_referenced_reservation_bytes`, 132 | `The amount of space in bytes used by a refreservation set on this dataset, which would be freed if the refreservation was removed.`, 133 | transformNumeric, 134 | datasetLabels..., 135 | ), 136 | `usedbysnapshots`: newProperty( 137 | subsystemDataset, 138 | `used_by_snapshot_bytes`, 139 | `The amount of space in bytes consumed by snapshots of this dataset.`, 140 | transformNumeric, 141 | datasetLabels..., 142 | ), 143 | `volsize`: newProperty( 144 | subsystemDataset, 145 | `volume_size_bytes`, 146 | `The logical size in bytes of this volume.`, 147 | transformNumeric, 148 | datasetLabels..., 149 | ), 150 | `written`: newProperty( 151 | subsystemDataset, 152 | `written_bytes`, 153 | `The amount of referenced space in bytes written to this dataset since the previous snapshot.`, 154 | transformNumeric, 155 | datasetLabels..., 156 | ), 157 | }, 158 | } 159 | ) 160 | 161 | func init() { 162 | registerCollector(`dataset-filesystem`, defaultEnabled, defaultFilesystemProps, newFilesystemCollector) 163 | registerCollector(`dataset-snapshot`, defaultDisabled, defaultSnapshotProps, newSnapshotCollector) 164 | registerCollector(`dataset-volume`, defaultEnabled, defaultVolumeProps, newVolumeCollector) 165 | } 166 | 167 | type datasetCollector struct { 168 | kind zfs.DatasetKind 169 | log *slog.Logger 170 | client zfs.Client 171 | props []string 172 | } 173 | 174 | func (c *datasetCollector) describe(ch chan<- *prometheus.Desc) { 175 | for _, k := range c.props { 176 | prop, err := datasetProperties.find(k) 177 | if err != nil { 178 | c.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, c.kind, `property`, k, `err`, err) 179 | continue 180 | } 181 | ch <- prop.desc 182 | } 183 | } 184 | 185 | func (c *datasetCollector) update(ch chan<- metric, pools []string, excludes regexpCollection) error { 186 | var wg sync.WaitGroup 187 | errChan := make(chan error, len(pools)) 188 | for _, pool := range pools { 189 | wg.Add(1) 190 | go func(pool string) { 191 | if err := c.updatePoolMetrics(ch, pool, excludes); err != nil { 192 | errChan <- err 193 | } 194 | wg.Done() 195 | }(pool) 196 | } 197 | wg.Wait() 198 | 199 | select { 200 | case err := <-errChan: 201 | return err 202 | default: 203 | return nil 204 | } 205 | } 206 | 207 | func (c *datasetCollector) updatePoolMetrics(ch chan<- metric, pool string, excludes regexpCollection) error { 208 | datasets := c.client.Datasets(pool, c.kind) 209 | props, err := datasets.Properties(c.props...) 210 | if err != nil { 211 | return err 212 | } 213 | 214 | for _, dataset := range props { 215 | if excludes.MatchString(dataset.DatasetName()) { 216 | continue 217 | } 218 | if err = c.updateDatasetMetrics(ch, pool, dataset); err != nil { 219 | return err 220 | } 221 | } 222 | 223 | return nil 224 | } 225 | 226 | func (c *datasetCollector) updateDatasetMetrics(ch chan<- metric, pool string, dataset zfs.DatasetProperties) error { 227 | labelValues := []string{dataset.DatasetName(), pool, string(c.kind)} 228 | 229 | for k, v := range dataset.Properties() { 230 | prop, err := datasetProperties.find(k) 231 | if err != nil { 232 | c.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, c.kind, `property`, k, `err`, err) 233 | } 234 | if err = prop.push(ch, v, labelValues...); err != nil { 235 | return err 236 | } 237 | } 238 | 239 | return nil 240 | } 241 | 242 | func newDatasetCollector(kind zfs.DatasetKind, l *slog.Logger, c zfs.Client, props []string) (Collector, error) { 243 | switch kind { 244 | case zfs.DatasetFilesystem, zfs.DatasetSnapshot, zfs.DatasetVolume: 245 | default: 246 | return nil, fmt.Errorf("unknown dataset type: %s", kind) 247 | } 248 | 249 | return &datasetCollector{kind: kind, log: l, client: c, props: props}, nil 250 | } 251 | 252 | func newFilesystemCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) { 253 | return newDatasetCollector(zfs.DatasetFilesystem, l, c, props) 254 | } 255 | 256 | func newSnapshotCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) { 257 | return newDatasetCollector(zfs.DatasetSnapshot, l, c, props) 258 | } 259 | 260 | func newVolumeCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) { 261 | return newDatasetCollector(zfs.DatasetVolume, l, c, props) 262 | } 263 | -------------------------------------------------------------------------------- /collector/dataset_test.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/golang/mock/gomock" 9 | "github.com/pdf/zfs_exporter/v2/zfs" 10 | "github.com/pdf/zfs_exporter/v2/zfs/mock_zfs" 11 | ) 12 | 13 | type datasetResults struct { 14 | name string 15 | results map[string]string 16 | } 17 | 18 | func TestDatsetMetrics(t *testing.T) { 19 | testCases := []struct { 20 | name string 21 | kinds []zfs.DatasetKind 22 | pools []string 23 | explicitPools []string 24 | propsRequested []string 25 | metricNames []string 26 | propsResults map[string][]datasetResults 27 | metricResults string 28 | }{ 29 | { 30 | name: `all metrics`, 31 | kinds: []zfs.DatasetKind{zfs.DatasetFilesystem}, 32 | pools: []string{`testpool`}, 33 | propsRequested: []string{`available`, `compressratio`, `logicalused`, `logicalreferenced`, `quota`, `refcompressratio`, `referenced`, `refquota`, `refreservation`, `reservation`, `snapshot_count`, `snapshot_limit`, `used`, `usedbychildren`, `usedbydataset`, `usedbyrefreservation`, `usedbysnapshots`, `volsize`, `written`}, 34 | metricNames: []string{`zfs_dataset_available_bytes`, `zfs_dataset_compression_ratio`, `zfs_dataset_logical_used_bytes`, `zfs_dataset_logical_referenced_bytes`, `zfs_dataset_quota_bytes`, `zfs_dataset_referenced_compression_ratio`, `zfs_dataset_referenced_bytes`, `zfs_dataset_referenced_quota_bytes`, `zfs_dataset_reservation_bytes`, `zfs_dataset_snapshot_count_total`, `zfs_datset_snapshot_limit_total`, `zfs_dataset_used_bytes`, `zfs_dataset_used_by_children_bytes`, `zfs_dataset_used_by_datset_bytes`, `zfs_datset_used_by_referenced_reservation_bytes`, `zfs_dataset_used_by_snapshot_bytes`, `zfs_dataset_volume_size_bytes`, `zfs_dataset_written_bytes`}, 35 | propsResults: map[string][]datasetResults{ 36 | `testpool`: { 37 | { 38 | name: `testpool/test`, 39 | results: map[string]string{ 40 | `available`: `1024`, 41 | `compressratio`: `2.50`, 42 | `logicalused`: `1024`, 43 | `logicalreferenced`: `512`, 44 | `quota`: `512`, 45 | `refcompressratio`: `24.00`, 46 | `referenced`: `1024`, 47 | `refreservation`: `1024`, 48 | `reservation`: `1024`, 49 | `snapshot_count`: `12`, 50 | `snapshot_limit`: `24`, 51 | `used`: `1024`, 52 | `usedbychildren`: `1024`, 53 | `usedbydataset`: `1024`, 54 | `usedbyrefreservation`: `1024`, 55 | `usedbysnapshots`: `1024`, 56 | `volsize`: `1024`, 57 | `written`: `1024`, 58 | }, 59 | }, 60 | }, 61 | }, 62 | metricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children. 63 | # TYPE zfs_dataset_available_bytes gauge 64 | zfs_dataset_available_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 65 | # HELP zfs_dataset_compression_ratio The ratio of compressed size vs uncompressed size for this dataset. 66 | # TYPE zfs_dataset_compression_ratio gauge 67 | zfs_dataset_compression_ratio{name="testpool/test",pool="testpool",type="filesystem"} 0.4 68 | # HELP zfs_dataset_logical_used_bytes The amount of space in bytes that is "logically" consumed by this dataset and all its descendents. See the "used_bytes" property. 69 | # TYPE zfs_dataset_logical_used_bytes gauge 70 | zfs_dataset_logical_used_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 71 | # HELP zfs_dataset_logical_referenced_bytes The amount of space that is "logically" accessible by this dataset. See the "referenced_bytes" property. 72 | # TYPE zfs_dataset_logical_referenced_bytes gauge 73 | zfs_dataset_logical_referenced_bytes{name="testpool/test",pool="testpool",type="filesystem"} 512 74 | # HELP zfs_dataset_quota_bytes The maximum amount of space in bytes this dataset and its descendents can consume. 75 | # TYPE zfs_dataset_quota_bytes gauge 76 | zfs_dataset_quota_bytes{name="testpool/test",pool="testpool",type="filesystem"} 512 77 | # HELP zfs_dataset_referenced_bytes The amount of data in bytes that is accessible by this dataset, which may or may not be shared with other datasets in the pool. 78 | # TYPE zfs_dataset_referenced_bytes gauge 79 | zfs_dataset_referenced_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 80 | # HELP zfs_dataset_referenced_compression_ratio The ratio of compressed size vs uncompressed size for the referenced space of this dataset. See also the "compression_ratio" property. 81 | # TYPE zfs_dataset_referenced_compression_ratio gauge 82 | zfs_dataset_referenced_compression_ratio{name="testpool/test",pool="testpool",type="filesystem"} 0.041666666666666664 83 | # HELP zfs_dataset_reservation_bytes The minimum amount of space in bytes guaranteed to a dataset and its descendants. 84 | # TYPE zfs_dataset_reservation_bytes gauge 85 | zfs_dataset_reservation_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 86 | # HELP zfs_dataset_snapshot_count_total The total number of snapshots that exist under this location in the dataset tree. This value is only available when a snapshot_limit has been set somewhere in the tree under which the dataset resides. 87 | # TYPE zfs_dataset_snapshot_count_total gauge 88 | zfs_dataset_snapshot_count_total{name="testpool/test",pool="testpool",type="filesystem"} 12 89 | # HELP zfs_dataset_used_by_children_bytes The amount of space in bytes used by children of this dataset, which would be freed if all the dataset's children were destroyed. 90 | # TYPE zfs_dataset_used_by_children_bytes gauge 91 | zfs_dataset_used_by_children_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 92 | # HELP zfs_dataset_used_by_snapshot_bytes The amount of space in bytes consumed by snapshots of this dataset. 93 | # TYPE zfs_dataset_used_by_snapshot_bytes gauge 94 | zfs_dataset_used_by_snapshot_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 95 | # HELP zfs_dataset_used_bytes The amount of space in bytes consumed by this dataset and all its descendents. 96 | # TYPE zfs_dataset_used_bytes gauge 97 | zfs_dataset_used_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 98 | # HELP zfs_dataset_volume_size_bytes The logical size in bytes of this volume. 99 | # TYPE zfs_dataset_volume_size_bytes gauge 100 | zfs_dataset_volume_size_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 101 | # HELP zfs_dataset_written_bytes The amount of referenced space in bytes written to this dataset since the previous snapshot. 102 | # TYPE zfs_dataset_written_bytes gauge 103 | zfs_dataset_written_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 104 | `, 105 | }, 106 | { 107 | name: `multiple pools`, 108 | kinds: []zfs.DatasetKind{zfs.DatasetFilesystem}, 109 | pools: []string{`testpool1`, `testpool2`}, 110 | propsRequested: []string{`available`}, 111 | metricNames: []string{`zfs_dataset_available_bytes`}, 112 | propsResults: map[string][]datasetResults{ 113 | `testpool1`: { 114 | { 115 | name: `testpool1/test`, 116 | results: map[string]string{ 117 | `available`: `1024`, 118 | }, 119 | }, 120 | }, 121 | `testpool2`: { 122 | { 123 | name: `testpool2/test`, 124 | results: map[string]string{ 125 | `available`: `1024`, 126 | }, 127 | }, 128 | }, 129 | }, 130 | metricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children. 131 | # TYPE zfs_dataset_available_bytes gauge 132 | zfs_dataset_available_bytes{name="testpool1/test",pool="testpool1",type="filesystem"} 1024 133 | zfs_dataset_available_bytes{name="testpool2/test",pool="testpool2",type="filesystem"} 1024 134 | `, 135 | }, 136 | { 137 | name: `explicit pools`, 138 | kinds: []zfs.DatasetKind{zfs.DatasetFilesystem}, 139 | pools: []string{`testpool1`, `testpool2`}, 140 | explicitPools: []string{`testpool1`}, 141 | propsRequested: []string{`available`}, 142 | metricNames: []string{`zfs_dataset_available_bytes`}, 143 | propsResults: map[string][]datasetResults{ 144 | `testpool1`: { 145 | { 146 | name: `testpool1/test`, 147 | results: map[string]string{ 148 | `available`: `1024`, 149 | }, 150 | }, 151 | }, 152 | `testpool2`: { 153 | { 154 | name: `testpool2/test`, 155 | results: map[string]string{ 156 | `available`: `1024`, 157 | }, 158 | }, 159 | }, 160 | }, 161 | metricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children. 162 | # TYPE zfs_dataset_available_bytes gauge 163 | zfs_dataset_available_bytes{name="testpool1/test",pool="testpool1",type="filesystem"} 1024 164 | `, 165 | }, 166 | { 167 | name: `multiple collectors`, 168 | kinds: []zfs.DatasetKind{zfs.DatasetFilesystem, zfs.DatasetSnapshot, zfs.DatasetVolume}, 169 | pools: []string{`testpool`}, 170 | propsRequested: []string{`available`}, 171 | metricNames: []string{`zfs_dataset_available_bytes`}, 172 | propsResults: map[string][]datasetResults{ 173 | `testpool`: { 174 | { 175 | name: `testpool/test`, 176 | results: map[string]string{ 177 | `available`: `1024`, 178 | }, 179 | }, 180 | }, 181 | }, 182 | metricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children. 183 | # TYPE zfs_dataset_available_bytes gauge 184 | zfs_dataset_available_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024 185 | zfs_dataset_available_bytes{name="testpool/test",pool="testpool",type="snapshot"} 1024 186 | zfs_dataset_available_bytes{name="testpool/test",pool="testpool",type="volume"} 1024 187 | `, 188 | }, 189 | { 190 | name: `unsupported metric`, 191 | kinds: []zfs.DatasetKind{zfs.DatasetFilesystem}, 192 | pools: []string{`testpool`}, 193 | propsRequested: []string{`unsupported`}, 194 | metricNames: []string{`zfs_dataset_unsupported`}, 195 | propsResults: map[string][]datasetResults{ 196 | `testpool`: { 197 | { 198 | name: `testpool/test`, 199 | results: map[string]string{ 200 | `unsupported`: `1024`, 201 | }, 202 | }, 203 | }, 204 | }, 205 | metricResults: `# HELP zfs_dataset_unsupported !!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!! 206 | # TYPE zfs_dataset_unsupported gauge 207 | zfs_dataset_unsupported{name="testpool/test",pool="testpool",type="filesystem"} 1024 208 | `, 209 | }, 210 | } 211 | 212 | for _, tc := range testCases { 213 | tc := tc 214 | t.Run(tc.name, func(t *testing.T) { 215 | t.Parallel() 216 | ctrl, ctx := gomock.WithContext(context.Background(), t) 217 | zfsClient := mock_zfs.NewMockClient(ctrl) 218 | config := defaultConfig(zfsClient) 219 | if tc.explicitPools != nil { 220 | config.Pools = tc.explicitPools 221 | } 222 | 223 | zfsClient.EXPECT().PoolNames().Return(tc.pools, nil).Times(1) 224 | collector, err := NewZFS(config) 225 | if err != nil { 226 | t.Fatal(err) 227 | } 228 | collector.Collectors = make(map[string]State) 229 | 230 | for _, kind := range tc.kinds { 231 | switch kind { 232 | case zfs.DatasetFilesystem: 233 | collector.Collectors[`dataset-filesystem`] = State{ 234 | Name: "dataset-filesystem", 235 | Enabled: boolPointer(true), 236 | Properties: stringPointer(strings.Join(tc.propsRequested, `,`)), 237 | factory: newFilesystemCollector, 238 | } 239 | case zfs.DatasetSnapshot: 240 | collector.Collectors[`dataset-snapshot`] = State{ 241 | Name: "dataset-snapshot", 242 | Enabled: boolPointer(true), 243 | Properties: stringPointer(strings.Join(tc.propsRequested, `,`)), 244 | factory: newSnapshotCollector, 245 | } 246 | case zfs.DatasetVolume: 247 | collector.Collectors[`dataset-volume`] = State{ 248 | Name: "dataset-volume", 249 | Enabled: boolPointer(true), 250 | Properties: stringPointer(strings.Join(tc.propsRequested, `,`)), 251 | factory: newVolumeCollector, 252 | } 253 | } 254 | for _, pool := range tc.pools { 255 | if tc.explicitPools != nil { 256 | wanted := false 257 | for _, explicit := range tc.explicitPools { 258 | if pool == explicit { 259 | wanted = true 260 | } 261 | break 262 | } 263 | if !wanted { 264 | continue 265 | } 266 | } 267 | zfsDatasetResults := make([]zfs.DatasetProperties, len(tc.propsResults[pool])) 268 | for i, propResults := range tc.propsResults[pool] { 269 | zfsDatasetProperties := mock_zfs.NewMockDatasetProperties(ctrl) 270 | zfsDatasetProperties.EXPECT().DatasetName().Return(propResults.name).Times(2) 271 | zfsDatasetProperties.EXPECT().Properties().Return(propResults.results).Times(1) 272 | zfsDatasetResults[i] = zfsDatasetProperties 273 | } 274 | zfsDatasets := mock_zfs.NewMockDatasets(ctrl) 275 | zfsDatasets.EXPECT().Properties(tc.propsRequested).Return(zfsDatasetResults, nil).Times(1) 276 | zfsClient.EXPECT().Datasets(pool, kind).Return(zfsDatasets).Times(1) 277 | } 278 | } 279 | 280 | if err = callCollector(ctx, collector, []byte(tc.metricResults), tc.metricNames); err != nil { 281 | t.Fatal(err) 282 | } 283 | }) 284 | } 285 | } 286 | -------------------------------------------------------------------------------- /collector/pool.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "sync" 7 | 8 | "github.com/pdf/zfs_exporter/v2/zfs" 9 | "github.com/prometheus/client_golang/prometheus" 10 | ) 11 | 12 | const ( 13 | defaultPoolProps = `allocated,dedupratio,fragmentation,free,freeing,health,leaked,readonly,size` 14 | ) 15 | 16 | var ( 17 | poolLabels = []string{`pool`} 18 | poolProperties = propertyStore{ 19 | defaultSubsystem: subsystemPool, 20 | defaultLabels: poolLabels, 21 | store: map[string]property{ 22 | `allocated`: newProperty( 23 | subsystemPool, 24 | `allocated_bytes`, 25 | `Amount of storage in bytes used within the pool.`, 26 | transformNumeric, 27 | poolLabels..., 28 | ), 29 | `dedupratio`: newProperty( 30 | subsystemPool, 31 | `deduplication_ratio`, 32 | `The ratio of deduplicated size vs undeduplicated size for data in this pool.`, 33 | transformMultiplier, 34 | poolLabels..., 35 | ), 36 | `capacity`: newProperty( 37 | subsystemPool, 38 | `capacity_ratio`, 39 | `Ratio of pool space used.`, 40 | transformPercentage, 41 | poolLabels..., 42 | ), 43 | `expandsize`: newProperty( 44 | subsystemPool, 45 | `expand_size_bytes`, 46 | `Amount of uninitialized space within the pool or device that can be used to increase the total capacity of the pool.`, 47 | transformNumeric, 48 | poolLabels..., 49 | ), 50 | `fragmentation`: newProperty( 51 | subsystemPool, 52 | `fragmentation_ratio`, 53 | `The fragmentation ratio of the pool.`, 54 | transformPercentage, 55 | poolLabels..., 56 | ), 57 | `free`: newProperty( 58 | subsystemPool, 59 | `free_bytes`, 60 | `The amount of free space in bytes available in the pool.`, 61 | transformNumeric, 62 | poolLabels..., 63 | ), 64 | `freeing`: newProperty( 65 | subsystemPool, 66 | `freeing_bytes`, 67 | `The amount of space in bytes remaining to be freed following the destruction of a file system or snapshot.`, 68 | transformNumeric, 69 | poolLabels..., 70 | ), 71 | `health`: newProperty( 72 | subsystemPool, 73 | `health`, 74 | fmt.Sprintf("Health status code for the pool [%d: %s, %d: %s, %d: %s, %d: %s, %d: %s, %d: %s, %d: %s].", 75 | poolOnline, zfs.PoolOnline, 76 | poolDegraded, zfs.PoolDegraded, 77 | poolFaulted, zfs.PoolFaulted, 78 | poolOffline, zfs.PoolOffline, 79 | poolUnavail, zfs.PoolUnavail, 80 | poolRemoved, zfs.PoolRemoved, 81 | poolSuspended, zfs.PoolSuspended, 82 | ), 83 | transformHealthCode, 84 | poolLabels..., 85 | ), 86 | `leaked`: newProperty( 87 | subsystemPool, 88 | `leaked_bytes`, 89 | `Number of leaked bytes in the pool.`, 90 | transformNumeric, 91 | poolLabels..., 92 | ), 93 | `readonly`: newProperty( 94 | subsystemPool, 95 | `readonly`, 96 | `Read-only status of the pool [0: read-write, 1: read-only].`, 97 | transformBool, 98 | poolLabels..., 99 | ), 100 | `size`: newProperty( 101 | subsystemPool, 102 | `size_bytes`, 103 | `Total size in bytes of the storage pool.`, 104 | transformNumeric, 105 | poolLabels..., 106 | ), 107 | }, 108 | } 109 | ) 110 | 111 | func init() { 112 | registerCollector(`pool`, defaultEnabled, defaultPoolProps, newPoolCollector) 113 | } 114 | 115 | type poolCollector struct { 116 | log *slog.Logger 117 | client zfs.Client 118 | props []string 119 | } 120 | 121 | func (c *poolCollector) describe(ch chan<- *prometheus.Desc) { 122 | for _, k := range c.props { 123 | prop, err := poolProperties.find(k) 124 | if err != nil { 125 | c.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, `pool`, `property`, k, `err`, err) 126 | continue 127 | } 128 | ch <- prop.desc 129 | } 130 | } 131 | 132 | func (c *poolCollector) update(ch chan<- metric, pools []string, excludes regexpCollection) error { 133 | var wg sync.WaitGroup 134 | errChan := make(chan error, len(pools)) 135 | for _, pool := range pools { 136 | wg.Add(1) 137 | go func(pool string) { 138 | if err := c.updatePoolMetrics(ch, pool); err != nil { 139 | errChan <- err 140 | } 141 | wg.Done() 142 | }(pool) 143 | } 144 | wg.Wait() 145 | 146 | select { 147 | case err := <-errChan: 148 | return err 149 | default: 150 | return nil 151 | } 152 | } 153 | 154 | func (c *poolCollector) updatePoolMetrics(ch chan<- metric, pool string) error { 155 | p := c.client.Pool(pool) 156 | props, err := p.Properties(c.props...) 157 | if err != nil { 158 | return err 159 | } 160 | 161 | labelValues := []string{pool} 162 | for k, v := range props.Properties() { 163 | prop, err := poolProperties.find(k) 164 | if err != nil { 165 | c.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, `pool`, `property`, k, `err`, err) 166 | } 167 | if err = prop.push(ch, v, labelValues...); err != nil { 168 | return err 169 | } 170 | } 171 | 172 | return nil 173 | } 174 | 175 | func newPoolCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) { 176 | return &poolCollector{log: l, client: c, props: props}, nil 177 | } 178 | -------------------------------------------------------------------------------- /collector/pool_test.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/golang/mock/gomock" 9 | "github.com/pdf/zfs_exporter/v2/zfs/mock_zfs" 10 | ) 11 | 12 | func TestPoolMetrics(t *testing.T) { 13 | testCases := []struct { 14 | name string 15 | pools []string 16 | explicitPools []string 17 | propsRequested []string 18 | metricNames []string 19 | propsResults map[string]map[string]string 20 | metricResults string 21 | }{ 22 | { 23 | name: `all metrics`, 24 | pools: []string{`testpool`}, 25 | propsRequested: []string{`allocated`, `dedupratio`, `capacity`, `expandsize`, `fragmentation`, `free`, `freeing`, `health`, `leaked`, `readonly`, `size`}, 26 | metricNames: []string{`zfs_pool_allocated_bytes`, `zfs_pool_deduplication_ratio`, `zfs_pool_capacity_ratio`, `zfs_pool_expand_size_bytes`, `zfs_pool_fragmentation_ratio`, `zfs_pool_free_bytes`, `zfs_pool_freeing_bytes`, `zfs_pool_health`, `zfs_pool_leaked_bytes`, `zfs_pool_readonly`, `zfs_pool_size_bytes`}, 27 | propsResults: map[string]map[string]string{ 28 | `testpool`: { 29 | `allocated`: `1024`, 30 | `dedupratio`: `2.50`, 31 | `capacity`: `50`, 32 | `expandsize`: `2048`, 33 | `fragmentation`: `25`, 34 | `free`: `1024`, 35 | `freeing`: `0`, 36 | `health`: `ONLINE`, 37 | `leaked`: `1`, 38 | `readonly`: `off`, 39 | `size`: `2048`, 40 | }, 41 | }, 42 | metricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool. 43 | # TYPE zfs_pool_allocated_bytes gauge 44 | zfs_pool_allocated_bytes{pool="testpool"} 1024 45 | # HELP zfs_pool_capacity_ratio Ratio of pool space used. 46 | # TYPE zfs_pool_capacity_ratio gauge 47 | zfs_pool_capacity_ratio{pool="testpool"} 0.5 48 | # HELP zfs_pool_deduplication_ratio The ratio of deduplicated size vs undeduplicated size for data in this pool. 49 | # TYPE zfs_pool_deduplication_ratio gauge 50 | zfs_pool_deduplication_ratio{pool="testpool"} 0.4 51 | # HELP zfs_pool_expand_size_bytes Amount of uninitialized space within the pool or device that can be used to increase the total capacity of the pool. 52 | # TYPE zfs_pool_expand_size_bytes gauge 53 | zfs_pool_expand_size_bytes{pool="testpool"} 2048 54 | # HELP zfs_pool_fragmentation_ratio The fragmentation ratio of the pool. 55 | # TYPE zfs_pool_fragmentation_ratio gauge 56 | zfs_pool_fragmentation_ratio{pool="testpool"} 0.25 57 | # HELP zfs_pool_free_bytes The amount of free space in bytes available in the pool. 58 | # TYPE zfs_pool_free_bytes gauge 59 | zfs_pool_free_bytes{pool="testpool"} 1024 60 | # HELP zfs_pool_freeing_bytes The amount of space in bytes remaining to be freed following the destruction of a file system or snapshot. 61 | # TYPE zfs_pool_freeing_bytes gauge 62 | zfs_pool_freeing_bytes{pool="testpool"} 0 63 | # HELP zfs_pool_health Health status code for the pool [0: ONLINE, 1: DEGRADED, 2: FAULTED, 3: OFFLINE, 4: UNAVAIL, 5: REMOVED, 6: SUSPENDED]. 64 | # TYPE zfs_pool_health gauge 65 | zfs_pool_health{pool="testpool"} 0 66 | # HELP zfs_pool_leaked_bytes Number of leaked bytes in the pool. 67 | # TYPE zfs_pool_leaked_bytes gauge 68 | zfs_pool_leaked_bytes{pool="testpool"} 1 69 | # HELP zfs_pool_readonly Read-only status of the pool [0: read-write, 1: read-only]. 70 | # TYPE zfs_pool_readonly gauge 71 | zfs_pool_readonly{pool="testpool"} 0 72 | # HELP zfs_pool_size_bytes Total size in bytes of the storage pool. 73 | # TYPE zfs_pool_size_bytes gauge 74 | zfs_pool_size_bytes{pool="testpool"} 2048 75 | `, 76 | }, 77 | { 78 | name: `multiple pools`, 79 | pools: []string{`testpool1`, `testpool2`}, 80 | propsRequested: []string{`allocated`}, 81 | metricNames: []string{`zfs_pool_allocated_bytes`}, 82 | propsResults: map[string]map[string]string{ 83 | `testpool1`: { 84 | `allocated`: `1024`, 85 | }, 86 | `testpool2`: { 87 | `allocated`: `2048`, 88 | }, 89 | }, 90 | metricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool. 91 | # TYPE zfs_pool_allocated_bytes gauge 92 | zfs_pool_allocated_bytes{pool="testpool1"} 1024 93 | zfs_pool_allocated_bytes{pool="testpool2"} 2048 94 | `, 95 | }, 96 | { 97 | name: `explicit pools`, 98 | pools: []string{`testpool1`, `testpool2`}, 99 | explicitPools: []string{`testpool1`}, 100 | propsRequested: []string{`allocated`}, 101 | metricNames: []string{`zfs_pool_allocated_bytes`}, 102 | propsResults: map[string]map[string]string{ 103 | `testpool1`: { 104 | `allocated`: `1024`, 105 | }, 106 | `testpool2`: { 107 | `allocated`: `2048`, 108 | }, 109 | }, 110 | metricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool. 111 | # TYPE zfs_pool_allocated_bytes gauge 112 | zfs_pool_allocated_bytes{pool="testpool1"} 1024 113 | `, 114 | }, 115 | { 116 | name: `health status`, 117 | pools: []string{`onlinepool`, `degradedpool`, `faultedpool`, `offlinepool`, `unavailpool`, `removedpool`, `suspendedpool`}, 118 | propsRequested: []string{`health`}, 119 | metricNames: []string{`zfs_pool_health`}, 120 | propsResults: map[string]map[string]string{ 121 | `onlinepool`: { 122 | `health`: `ONLINE`, 123 | }, 124 | `degradedpool`: { 125 | `health`: `DEGRADED`, 126 | }, 127 | `faultedpool`: { 128 | `health`: `FAULTED`, 129 | }, 130 | `offlinepool`: { 131 | `health`: `OFFLINE`, 132 | }, 133 | `unavailpool`: { 134 | `health`: `UNAVAIL`, 135 | }, 136 | `removedpool`: { 137 | `health`: `REMOVED`, 138 | }, 139 | `suspendedpool`: { 140 | `health`: `SUSPENDED`, 141 | }, 142 | }, 143 | metricResults: `# HELP zfs_pool_health Health status code for the pool [0: ONLINE, 1: DEGRADED, 2: FAULTED, 3: OFFLINE, 4: UNAVAIL, 5: REMOVED, 6: SUSPENDED]. 144 | # TYPE zfs_pool_health gauge 145 | zfs_pool_health{pool="onlinepool"} 0 146 | zfs_pool_health{pool="degradedpool"} 1 147 | zfs_pool_health{pool="faultedpool"} 2 148 | zfs_pool_health{pool="offlinepool"} 3 149 | zfs_pool_health{pool="unavailpool"} 4 150 | zfs_pool_health{pool="removedpool"} 5 151 | zfs_pool_health{pool="suspendedpool"} 6 152 | `, 153 | }, 154 | { 155 | name: `unsupported metric`, 156 | pools: []string{`testpool`}, 157 | propsRequested: []string{`unsupported`}, 158 | metricNames: []string{`zfs_pool_unsupported`}, 159 | propsResults: map[string]map[string]string{ 160 | `testpool`: { 161 | `unsupported`: `1024`, 162 | }, 163 | }, 164 | metricResults: `# HELP zfs_pool_unsupported !!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!! 165 | # TYPE zfs_pool_unsupported gauge 166 | zfs_pool_unsupported{pool="testpool"} 1024 167 | `, 168 | }, 169 | { 170 | name: `legacy fragmentation/dedupratio`, 171 | pools: []string{`testpool`}, 172 | propsRequested: []string{`fragmentation`, `dedupratio`}, 173 | metricNames: []string{`zfs_pool_fragmentation_ratio`, `zfs_pool_deduplication_ratio`}, 174 | propsResults: map[string]map[string]string{ 175 | `testpool`: { 176 | `fragmentation`: `5%`, 177 | `dedupratio`: `2.50x`, 178 | }, 179 | }, 180 | metricResults: `# HELP zfs_pool_fragmentation_ratio The fragmentation ratio of the pool. 181 | # TYPE zfs_pool_fragmentation_ratio gauge 182 | zfs_pool_fragmentation_ratio{pool="testpool"} 0.05 183 | # HELP zfs_pool_deduplication_ratio The ratio of deduplicated size vs undeduplicated size for data in this pool. 184 | # TYPE zfs_pool_deduplication_ratio gauge 185 | zfs_pool_deduplication_ratio{pool="testpool"} 0.4 186 | `, 187 | }, 188 | } 189 | 190 | for _, tc := range testCases { 191 | tc := tc 192 | t.Run(tc.name, func(t *testing.T) { 193 | t.Parallel() 194 | ctrl, ctx := gomock.WithContext(context.Background(), t) 195 | zfsClient := mock_zfs.NewMockClient(ctrl) 196 | config := defaultConfig(zfsClient) 197 | if tc.explicitPools != nil { 198 | config.Pools = tc.explicitPools 199 | } 200 | 201 | zfsClient.EXPECT().PoolNames().Return(tc.pools, nil).Times(1) 202 | for _, pool := range tc.pools { 203 | if tc.explicitPools != nil { 204 | wanted := false 205 | for _, explicit := range tc.explicitPools { 206 | if pool == explicit { 207 | wanted = true 208 | } 209 | break 210 | } 211 | if !wanted { 212 | continue 213 | } 214 | } 215 | zfsPoolProperties := mock_zfs.NewMockPoolProperties(ctrl) 216 | zfsPoolProperties.EXPECT().Properties().Return(tc.propsResults[pool]).Times(1) 217 | zfsPool := mock_zfs.NewMockPool(ctrl) 218 | zfsPool.EXPECT().Properties(tc.propsRequested).Return(zfsPoolProperties, nil).Times(1) 219 | zfsClient.EXPECT().Pool(pool).Return(zfsPool).Times(1) 220 | } 221 | 222 | collector, err := NewZFS(config) 223 | if err != nil { 224 | t.Fatal(err) 225 | } 226 | collector.Collectors = map[string]State{ 227 | `pool`: { 228 | Name: "pool", 229 | Enabled: boolPointer(true), 230 | Properties: stringPointer(strings.Join(tc.propsRequested, `,`)), 231 | factory: newPoolCollector, 232 | }, 233 | } 234 | 235 | if err = callCollector(ctx, collector, []byte(tc.metricResults), tc.metricNames); err != nil { 236 | t.Fatal(err) 237 | } 238 | }) 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /collector/transform.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/pdf/zfs_exporter/v2/zfs" 8 | ) 9 | 10 | type poolHealthCode int 11 | 12 | const ( 13 | poolOnline poolHealthCode = iota 14 | poolDegraded 15 | poolFaulted 16 | poolOffline 17 | poolUnavail 18 | poolRemoved 19 | poolSuspended 20 | ) 21 | 22 | func transformNumeric(value string) (float64, error) { 23 | if value == `-` || value == `none` { 24 | return 0, nil 25 | } 26 | return strconv.ParseFloat(value, 64) 27 | } 28 | 29 | func transformHealthCode(status string) (float64, error) { 30 | var result poolHealthCode 31 | switch zfs.PoolStatus(status) { 32 | case zfs.PoolOnline: 33 | result = poolOnline 34 | case zfs.PoolDegraded: 35 | result = poolDegraded 36 | case zfs.PoolFaulted: 37 | result = poolFaulted 38 | case zfs.PoolOffline: 39 | result = poolOffline 40 | case zfs.PoolUnavail: 41 | result = poolUnavail 42 | case zfs.PoolRemoved: 43 | result = poolRemoved 44 | case zfs.PoolSuspended: 45 | result = poolSuspended 46 | default: 47 | return -1, fmt.Errorf(`unknown pool heath status: %s`, status) 48 | } 49 | 50 | return float64(result), nil 51 | } 52 | 53 | func transformBool(value string) (float64, error) { 54 | switch value { 55 | case `on`, `yes`, `enabled`, `active`: 56 | return 1, nil 57 | case `off`, `no`, `disabled`, `inactive`, `-`: 58 | return 0, nil 59 | } 60 | 61 | return -1, fmt.Errorf(`could not convert '%s' to bool`, value) 62 | } 63 | 64 | func transformPercentage(value string) (float64, error) { 65 | if len(value) > 0 && value[len(value)-1] == '%' { 66 | value = value[:len(value)-1] 67 | } 68 | v, err := transformNumeric(value) 69 | if err != nil { 70 | return -1, err 71 | } 72 | 73 | return v / 100, nil 74 | } 75 | 76 | func transformMultiplier(value string) (float64, error) { 77 | if len(value) > 0 && value[len(value)-1] == 'x' { 78 | value = value[:len(value)-1] 79 | } 80 | v, err := transformNumeric(value) 81 | if err != nil { 82 | return -1, err 83 | } 84 | return 1 / v, nil 85 | } 86 | -------------------------------------------------------------------------------- /collector/zfs.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "regexp" 7 | "sort" 8 | "strings" 9 | "sync" 10 | "time" 11 | 12 | "github.com/pdf/zfs_exporter/v2/zfs" 13 | "github.com/prometheus/client_golang/prometheus" 14 | ) 15 | 16 | type regexpCollection []*regexp.Regexp 17 | 18 | func (c regexpCollection) MatchString(input string) bool { 19 | for _, r := range c { 20 | if r.MatchString(input) { 21 | return true 22 | } 23 | } 24 | 25 | return false 26 | } 27 | 28 | // ZFSConfig configures a ZFS collector 29 | type ZFSConfig struct { 30 | DisableMetrics bool 31 | Deadline time.Duration 32 | Pools []string 33 | Excludes []string 34 | Logger *slog.Logger 35 | ZFSClient zfs.Client 36 | } 37 | 38 | // ZFS collector 39 | type ZFS struct { 40 | Pools []string 41 | Collectors map[string]State 42 | client zfs.Client 43 | disableMetrics bool 44 | deadline time.Duration 45 | cache *metricCache 46 | ready chan struct{} 47 | logger *slog.Logger 48 | excludes regexpCollection 49 | } 50 | 51 | // Describe implements the prometheus.Collector interface. 52 | func (c *ZFS) Describe(ch chan<- *prometheus.Desc) { 53 | if !c.disableMetrics { 54 | ch <- scrapeDurationDesc 55 | ch <- scrapeSuccessDesc 56 | } 57 | 58 | for _, state := range c.Collectors { 59 | if !*state.Enabled { 60 | continue 61 | } 62 | 63 | collector, err := state.factory(c.logger, c.client, strings.Split(*state.Properties, `,`)) 64 | if err != nil { 65 | continue 66 | } 67 | collector.describe(ch) 68 | } 69 | } 70 | 71 | // Collect implements the prometheus.Collector interface. 72 | func (c *ZFS) Collect(ch chan<- prometheus.Metric) { 73 | select { 74 | case <-c.ready: 75 | default: 76 | c.sendCached(ch, make(map[string]struct{})) 77 | return 78 | } 79 | ctx, cancel := context.WithTimeout(context.Background(), c.deadline) 80 | defer cancel() 81 | 82 | cache := newMetricCache() 83 | proxy := make(chan metric) 84 | // Synchronize on collector completion. 85 | wg := sync.WaitGroup{} 86 | wg.Add(len(c.Collectors)) 87 | // Synchonize after timeout event, ensuring no writers are still active when we return control. 88 | timeout := make(chan struct{}) 89 | finalized := make(chan struct{}) 90 | finalize := func() { 91 | select { 92 | case <-finalized: 93 | default: 94 | close(finalized) 95 | } 96 | } 97 | 98 | // Close the proxy channel upon collector completion. 99 | go func() { 100 | wg.Wait() 101 | close(proxy) 102 | }() 103 | 104 | // Cache metrics as they come in via the proxy channel, and ship them out if we've not exceeded the deadline. 105 | go func() { 106 | for metric := range proxy { 107 | cache.add(metric) 108 | select { 109 | case <-timeout: 110 | finalize() 111 | default: 112 | ch <- metric.prometheus 113 | } 114 | } 115 | // Signal completion and update full cache. 116 | c.cache.replace(cache) 117 | cancel() 118 | // Notify next collection that we're ready to collect again 119 | c.ready <- struct{}{} 120 | }() 121 | 122 | pools, poolErr := c.getPools(c.Pools) 123 | 124 | for name, state := range c.Collectors { 125 | if !*state.Enabled { 126 | wg.Done() 127 | continue 128 | } 129 | 130 | if poolErr != nil { 131 | c.publishCollectorMetrics(ctx, name, poolErr, 0, proxy) 132 | wg.Done() 133 | continue 134 | } 135 | 136 | collector, err := state.factory(c.logger, c.client, strings.Split(*state.Properties, `,`)) 137 | if err != nil { 138 | c.logger.Error("Error instantiating collector", "collector", name, "err", err) 139 | wg.Done() 140 | continue 141 | } 142 | go func(name string, collector Collector) { 143 | c.execute(ctx, name, collector, proxy, pools) 144 | wg.Done() 145 | }(name, collector) 146 | } 147 | 148 | // Wait for completion or timeout 149 | <-ctx.Done() 150 | err := ctx.Err() 151 | if err == context.Canceled { 152 | finalize() 153 | } else if err != nil { 154 | // Upon exceeding deadline, send cached data for any metrics that have not already been reported. 155 | close(timeout) // assert timeout for flow control in other goroutines 156 | c.cache.merge(cache) 157 | cacheIndex := cache.index() 158 | c.sendCached(ch, cacheIndex) 159 | } 160 | // Ensure there are no in-flight writes to the upstream channel 161 | <-finalized 162 | } 163 | 164 | // sendCached values that do not appear in the current cacheIndex. 165 | func (c *ZFS) sendCached(ch chan<- prometheus.Metric, cacheIndex map[string]struct{}) { 166 | c.cache.RLock() 167 | defer c.cache.RUnlock() 168 | for name, metric := range c.cache.cache { 169 | if _, ok := cacheIndex[name]; ok { 170 | continue 171 | } 172 | ch <- metric 173 | } 174 | } 175 | 176 | func (c *ZFS) getPools(pools []string) ([]string, error) { 177 | poolNames, err := c.client.PoolNames() 178 | if err != nil { 179 | return nil, err 180 | } 181 | // Return all pools if not explicitly configured. 182 | if len(pools) == 0 { 183 | return poolNames, nil 184 | } 185 | 186 | // Configured pools may not exist, so append available pools as they're found, rather than allocating up front. 187 | result := make([]string, 0) 188 | for _, want := range pools { 189 | found := false 190 | for _, avail := range poolNames { 191 | if want == avail { 192 | result = append(result, want) 193 | found = true 194 | break 195 | } 196 | } 197 | if !found { 198 | c.logger.Warn("Pool unavailable", "pool", want) 199 | } 200 | } 201 | 202 | return result, nil 203 | } 204 | 205 | func (c *ZFS) execute(ctx context.Context, name string, collector Collector, ch chan<- metric, pools []string) { 206 | begin := time.Now() 207 | err := collector.update(ch, pools, c.excludes) 208 | duration := time.Since(begin) 209 | 210 | c.publishCollectorMetrics(ctx, name, err, duration, ch) 211 | } 212 | 213 | func (c *ZFS) publishCollectorMetrics(ctx context.Context, name string, err error, duration time.Duration, ch chan<- metric) { 214 | var success float64 215 | 216 | if err != nil { 217 | c.logger.Error("Executing collector", "status", "error", "collector", name, "durationSeconds", duration.Seconds(), "err", err) 218 | success = 0 219 | } else { 220 | select { 221 | case <-ctx.Done(): 222 | err = ctx.Err() 223 | default: 224 | err = nil 225 | } 226 | if err != nil && err != context.Canceled { 227 | c.logger.Warn("Executing collector", "status", "delayed", "collector", name, "durationSeconds", duration.Seconds(), "err", ctx.Err()) 228 | success = 0 229 | } else { 230 | c.logger.Debug("Executing collector", "status", "ok", "collector", name, "durationSeconds", duration.Seconds()) 231 | success = 1 232 | } 233 | } 234 | 235 | if c.disableMetrics { 236 | return 237 | } 238 | ch <- metric{ 239 | name: scrapeDurationDescName, 240 | prometheus: prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name), 241 | } 242 | ch <- metric{ 243 | name: scrapeSuccessDescName, 244 | prometheus: prometheus.MustNewConstMetric(scrapeSuccessDesc, prometheus.GaugeValue, success, name), 245 | } 246 | } 247 | 248 | // NewZFS instantiates a ZFS collector with the provided ZFSConfig 249 | func NewZFS(config ZFSConfig) (*ZFS, error) { 250 | sort.Strings(config.Pools) 251 | sort.Strings(config.Excludes) 252 | excludes := make(regexpCollection, len(config.Excludes)) 253 | for i, v := range config.Excludes { 254 | excludes[i] = regexp.MustCompile(v) 255 | } 256 | ready := make(chan struct{}, 1) 257 | ready <- struct{}{} 258 | return &ZFS{ 259 | disableMetrics: config.DisableMetrics, 260 | client: config.ZFSClient, 261 | deadline: config.Deadline, 262 | Pools: config.Pools, 263 | Collectors: collectorStates, 264 | excludes: excludes, 265 | cache: newMetricCache(), 266 | ready: ready, 267 | logger: config.Logger, 268 | }, nil 269 | } 270 | -------------------------------------------------------------------------------- /collector/zfs_test.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/golang/mock/gomock" 9 | "github.com/pdf/zfs_exporter/v2/zfs/mock_zfs" 10 | ) 11 | 12 | func TestZFSCollectInvalidPools(t *testing.T) { 13 | const result = `# HELP zfs_scrape_collector_duration_seconds zfs_exporter: Duration of a collector scrape. 14 | # TYPE zfs_scrape_collector_duration_seconds gauge 15 | zfs_scrape_collector_duration_seconds{collector="pool"} 0 16 | # HELP zfs_scrape_collector_success zfs_exporter: Whether a collector succeeded. 17 | # TYPE zfs_scrape_collector_success gauge 18 | zfs_scrape_collector_success{collector="pool"} 0 19 | ` 20 | 21 | ctrl, ctx := gomock.WithContext(context.Background(), t) 22 | zfsClient := mock_zfs.NewMockClient(ctrl) 23 | zfsClient.EXPECT().PoolNames().Return(nil, errors.New(`Error returned from PoolNames()`)).Times(1) 24 | 25 | config := defaultConfig(zfsClient) 26 | config.DisableMetrics = false 27 | collector, err := NewZFS(config) 28 | collector.Collectors = map[string]State{ 29 | `pool`: { 30 | Name: "pool", 31 | Enabled: boolPointer(true), 32 | Properties: stringPointer(``), 33 | factory: newPoolCollector, 34 | }, 35 | } 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | 40 | if err = callCollector(ctx, collector, []byte(result), []string{`zfs_scrape_collector_duration_seconds`, `zfs_scrape_collector_success`}); err != nil { 41 | t.Fatal(err) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/pdf/zfs_exporter/v2 2 | 3 | go 1.24 4 | 5 | toolchain go1.24.2 6 | 7 | require ( 8 | github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect 9 | github.com/golang/mock v1.6.0 10 | github.com/prometheus/client_golang v1.22.0 11 | github.com/prometheus/common v0.63.0 12 | golang.org/x/sys v0.32.0 // indirect 13 | ) 14 | 15 | require ( 16 | github.com/alecthomas/kingpin/v2 v2.4.0 17 | github.com/prometheus/exporter-toolkit v0.14.0 18 | ) 19 | 20 | require ( 21 | github.com/beorn7/perks v1.0.1 // indirect 22 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 23 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 24 | github.com/jpillora/backoff v1.0.0 // indirect 25 | github.com/kylelemons/godebug v1.1.0 // indirect 26 | github.com/mdlayher/socket v0.5.1 // indirect 27 | github.com/mdlayher/vsock v1.2.1 // indirect 28 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 29 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect 30 | github.com/prometheus/client_model v0.6.2 // indirect 31 | github.com/prometheus/procfs v0.16.1 // indirect 32 | github.com/rogpeppe/go-internal v1.11.0 // indirect 33 | github.com/xhit/go-str2duration/v2 v2.1.0 // indirect 34 | golang.org/x/crypto v0.37.0 // indirect 35 | golang.org/x/net v0.39.0 // indirect 36 | golang.org/x/oauth2 v0.29.0 // indirect 37 | golang.org/x/sync v0.13.0 // indirect 38 | golang.org/x/text v0.24.0 // indirect 39 | google.golang.org/protobuf v1.36.6 // indirect 40 | gopkg.in/yaml.v2 v2.4.0 // indirect 41 | ) 42 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= 2 | github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= 3 | github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= 4 | github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= 5 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 6 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 7 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 8 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 9 | github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= 10 | github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 11 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 12 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 13 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 14 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 15 | github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= 16 | github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= 17 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 18 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 19 | github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= 20 | github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= 21 | github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 22 | github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 23 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 24 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 25 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 26 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 27 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 28 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 29 | github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= 30 | github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= 31 | github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= 32 | github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= 33 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 34 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 35 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= 36 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 37 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 38 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 39 | github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= 40 | github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= 41 | github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= 42 | github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= 43 | github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= 44 | github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= 45 | github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= 46 | github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= 47 | github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= 48 | github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= 49 | github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= 50 | github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= 51 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 52 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 53 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 54 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 55 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 56 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 57 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 58 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 59 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 60 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 61 | github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= 62 | github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= 63 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 64 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 65 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 66 | golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= 67 | golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= 68 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 69 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 70 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 71 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 72 | golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= 73 | golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= 74 | golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= 75 | golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= 76 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 77 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 78 | golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= 79 | golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= 80 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 81 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 82 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 83 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 84 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 85 | golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= 86 | golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 87 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 88 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 89 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 90 | golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= 91 | golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= 92 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 93 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 94 | golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= 95 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 96 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 97 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 98 | google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= 99 | google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= 100 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 101 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 102 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 103 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 104 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 105 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 106 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 107 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 108 | -------------------------------------------------------------------------------- /zfs/dataset.go: -------------------------------------------------------------------------------- 1 | package zfs 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | // DatasetKind enum of supported dataset types 8 | type DatasetKind string 9 | 10 | const ( 11 | // DatasetFilesystem enum entry 12 | DatasetFilesystem DatasetKind = `filesystem` 13 | // DatasetVolume enum entry 14 | DatasetVolume DatasetKind = `volume` 15 | // DatasetSnapshot enum entry 16 | DatasetSnapshot DatasetKind = `snapshot` 17 | ) 18 | 19 | type datasetsImpl struct { 20 | pool string 21 | kind DatasetKind 22 | } 23 | 24 | func (d datasetsImpl) Pool() string { 25 | return d.pool 26 | } 27 | 28 | func (d datasetsImpl) Kind() DatasetKind { 29 | return d.kind 30 | } 31 | 32 | func (d datasetsImpl) Properties(props ...string) ([]DatasetProperties, error) { 33 | handler := newDatasetHandler() 34 | if err := execute(d.pool, handler, `zfs`, `get`, `-Hprt`, string(d.kind), `-o`, `name,property,value`, strings.Join(props, `,`)); err != nil { 35 | return nil, err 36 | } 37 | return handler.datasets(), nil 38 | } 39 | 40 | type datasetPropertiesImpl struct { 41 | datasetName string 42 | properties map[string]string 43 | } 44 | 45 | func (p *datasetPropertiesImpl) DatasetName() string { 46 | return p.datasetName 47 | } 48 | 49 | func (p *datasetPropertiesImpl) Properties() map[string]string { 50 | return p.properties 51 | } 52 | 53 | // datasetHandler handles parsing of the data returned from the CLI into Dataset structs 54 | type datasetHandler struct { 55 | store map[string]*datasetPropertiesImpl 56 | } 57 | 58 | // processLine implements the handler interface 59 | func (h *datasetHandler) processLine(pool string, line []string) error { 60 | if len(line) != 3 || !strings.HasPrefix(line[0], pool) { 61 | return ErrInvalidOutput 62 | } 63 | if _, ok := h.store[line[0]]; !ok { 64 | h.store[line[0]] = newDatasetPropertiesImpl(line[0]) 65 | } 66 | h.store[line[0]].properties[line[1]] = line[2] 67 | return nil 68 | } 69 | 70 | func (h *datasetHandler) datasets() []DatasetProperties { 71 | result := make([]DatasetProperties, len(h.store)) 72 | i := 0 73 | for _, dataset := range h.store { 74 | result[i] = dataset 75 | i++ 76 | } 77 | return result 78 | } 79 | 80 | func newDatasetPropertiesImpl(name string) *datasetPropertiesImpl { 81 | return &datasetPropertiesImpl{ 82 | datasetName: name, 83 | properties: make(map[string]string), 84 | } 85 | } 86 | 87 | func newDatasetsImpl(pool string, kind DatasetKind) datasetsImpl { 88 | return datasetsImpl{ 89 | pool: pool, 90 | kind: kind, 91 | } 92 | } 93 | 94 | func newDatasetHandler() *datasetHandler { 95 | return &datasetHandler{ 96 | store: make(map[string]*datasetPropertiesImpl), 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /zfs/mock_zfs/mock_zfs.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: zfs.go 3 | 4 | // Package mock_zfs is a generated GoMock package. 5 | package mock_zfs 6 | 7 | import ( 8 | reflect "reflect" 9 | 10 | gomock "github.com/golang/mock/gomock" 11 | zfs "github.com/pdf/zfs_exporter/v2/zfs" 12 | ) 13 | 14 | // MockClient is a mock of Client interface. 15 | type MockClient struct { 16 | ctrl *gomock.Controller 17 | recorder *MockClientMockRecorder 18 | } 19 | 20 | // MockClientMockRecorder is the mock recorder for MockClient. 21 | type MockClientMockRecorder struct { 22 | mock *MockClient 23 | } 24 | 25 | // NewMockClient creates a new mock instance. 26 | func NewMockClient(ctrl *gomock.Controller) *MockClient { 27 | mock := &MockClient{ctrl: ctrl} 28 | mock.recorder = &MockClientMockRecorder{mock} 29 | return mock 30 | } 31 | 32 | // EXPECT returns an object that allows the caller to indicate expected use. 33 | func (m *MockClient) EXPECT() *MockClientMockRecorder { 34 | return m.recorder 35 | } 36 | 37 | // Datasets mocks base method. 38 | func (m *MockClient) Datasets(pool string, kind zfs.DatasetKind) zfs.Datasets { 39 | m.ctrl.T.Helper() 40 | ret := m.ctrl.Call(m, "Datasets", pool, kind) 41 | ret0, _ := ret[0].(zfs.Datasets) 42 | return ret0 43 | } 44 | 45 | // Datasets indicates an expected call of Datasets. 46 | func (mr *MockClientMockRecorder) Datasets(pool, kind interface{}) *gomock.Call { 47 | mr.mock.ctrl.T.Helper() 48 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Datasets", reflect.TypeOf((*MockClient)(nil).Datasets), pool, kind) 49 | } 50 | 51 | // Pool mocks base method. 52 | func (m *MockClient) Pool(name string) zfs.Pool { 53 | m.ctrl.T.Helper() 54 | ret := m.ctrl.Call(m, "Pool", name) 55 | ret0, _ := ret[0].(zfs.Pool) 56 | return ret0 57 | } 58 | 59 | // Pool indicates an expected call of Pool. 60 | func (mr *MockClientMockRecorder) Pool(name interface{}) *gomock.Call { 61 | mr.mock.ctrl.T.Helper() 62 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pool", reflect.TypeOf((*MockClient)(nil).Pool), name) 63 | } 64 | 65 | // PoolNames mocks base method. 66 | func (m *MockClient) PoolNames() ([]string, error) { 67 | m.ctrl.T.Helper() 68 | ret := m.ctrl.Call(m, "PoolNames") 69 | ret0, _ := ret[0].([]string) 70 | ret1, _ := ret[1].(error) 71 | return ret0, ret1 72 | } 73 | 74 | // PoolNames indicates an expected call of PoolNames. 75 | func (mr *MockClientMockRecorder) PoolNames() *gomock.Call { 76 | mr.mock.ctrl.T.Helper() 77 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PoolNames", reflect.TypeOf((*MockClient)(nil).PoolNames)) 78 | } 79 | 80 | // MockPool is a mock of Pool interface. 81 | type MockPool struct { 82 | ctrl *gomock.Controller 83 | recorder *MockPoolMockRecorder 84 | } 85 | 86 | // MockPoolMockRecorder is the mock recorder for MockPool. 87 | type MockPoolMockRecorder struct { 88 | mock *MockPool 89 | } 90 | 91 | // NewMockPool creates a new mock instance. 92 | func NewMockPool(ctrl *gomock.Controller) *MockPool { 93 | mock := &MockPool{ctrl: ctrl} 94 | mock.recorder = &MockPoolMockRecorder{mock} 95 | return mock 96 | } 97 | 98 | // EXPECT returns an object that allows the caller to indicate expected use. 99 | func (m *MockPool) EXPECT() *MockPoolMockRecorder { 100 | return m.recorder 101 | } 102 | 103 | // Name mocks base method. 104 | func (m *MockPool) Name() string { 105 | m.ctrl.T.Helper() 106 | ret := m.ctrl.Call(m, "Name") 107 | ret0, _ := ret[0].(string) 108 | return ret0 109 | } 110 | 111 | // Name indicates an expected call of Name. 112 | func (mr *MockPoolMockRecorder) Name() *gomock.Call { 113 | mr.mock.ctrl.T.Helper() 114 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockPool)(nil).Name)) 115 | } 116 | 117 | // Properties mocks base method. 118 | func (m *MockPool) Properties(props ...string) (zfs.PoolProperties, error) { 119 | m.ctrl.T.Helper() 120 | varargs := []interface{}{} 121 | for _, a := range props { 122 | varargs = append(varargs, a) 123 | } 124 | ret := m.ctrl.Call(m, "Properties", varargs...) 125 | ret0, _ := ret[0].(zfs.PoolProperties) 126 | ret1, _ := ret[1].(error) 127 | return ret0, ret1 128 | } 129 | 130 | // Properties indicates an expected call of Properties. 131 | func (mr *MockPoolMockRecorder) Properties(props ...interface{}) *gomock.Call { 132 | mr.mock.ctrl.T.Helper() 133 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Properties", reflect.TypeOf((*MockPool)(nil).Properties), props...) 134 | } 135 | 136 | // MockPoolProperties is a mock of PoolProperties interface. 137 | type MockPoolProperties struct { 138 | ctrl *gomock.Controller 139 | recorder *MockPoolPropertiesMockRecorder 140 | } 141 | 142 | // MockPoolPropertiesMockRecorder is the mock recorder for MockPoolProperties. 143 | type MockPoolPropertiesMockRecorder struct { 144 | mock *MockPoolProperties 145 | } 146 | 147 | // NewMockPoolProperties creates a new mock instance. 148 | func NewMockPoolProperties(ctrl *gomock.Controller) *MockPoolProperties { 149 | mock := &MockPoolProperties{ctrl: ctrl} 150 | mock.recorder = &MockPoolPropertiesMockRecorder{mock} 151 | return mock 152 | } 153 | 154 | // EXPECT returns an object that allows the caller to indicate expected use. 155 | func (m *MockPoolProperties) EXPECT() *MockPoolPropertiesMockRecorder { 156 | return m.recorder 157 | } 158 | 159 | // Properties mocks base method. 160 | func (m *MockPoolProperties) Properties() map[string]string { 161 | m.ctrl.T.Helper() 162 | ret := m.ctrl.Call(m, "Properties") 163 | ret0, _ := ret[0].(map[string]string) 164 | return ret0 165 | } 166 | 167 | // Properties indicates an expected call of Properties. 168 | func (mr *MockPoolPropertiesMockRecorder) Properties() *gomock.Call { 169 | mr.mock.ctrl.T.Helper() 170 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Properties", reflect.TypeOf((*MockPoolProperties)(nil).Properties)) 171 | } 172 | 173 | // MockDatasets is a mock of Datasets interface. 174 | type MockDatasets struct { 175 | ctrl *gomock.Controller 176 | recorder *MockDatasetsMockRecorder 177 | } 178 | 179 | // MockDatasetsMockRecorder is the mock recorder for MockDatasets. 180 | type MockDatasetsMockRecorder struct { 181 | mock *MockDatasets 182 | } 183 | 184 | // NewMockDatasets creates a new mock instance. 185 | func NewMockDatasets(ctrl *gomock.Controller) *MockDatasets { 186 | mock := &MockDatasets{ctrl: ctrl} 187 | mock.recorder = &MockDatasetsMockRecorder{mock} 188 | return mock 189 | } 190 | 191 | // EXPECT returns an object that allows the caller to indicate expected use. 192 | func (m *MockDatasets) EXPECT() *MockDatasetsMockRecorder { 193 | return m.recorder 194 | } 195 | 196 | // Kind mocks base method. 197 | func (m *MockDatasets) Kind() zfs.DatasetKind { 198 | m.ctrl.T.Helper() 199 | ret := m.ctrl.Call(m, "Kind") 200 | ret0, _ := ret[0].(zfs.DatasetKind) 201 | return ret0 202 | } 203 | 204 | // Kind indicates an expected call of Kind. 205 | func (mr *MockDatasetsMockRecorder) Kind() *gomock.Call { 206 | mr.mock.ctrl.T.Helper() 207 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Kind", reflect.TypeOf((*MockDatasets)(nil).Kind)) 208 | } 209 | 210 | // Pool mocks base method. 211 | func (m *MockDatasets) Pool() string { 212 | m.ctrl.T.Helper() 213 | ret := m.ctrl.Call(m, "Pool") 214 | ret0, _ := ret[0].(string) 215 | return ret0 216 | } 217 | 218 | // Pool indicates an expected call of Pool. 219 | func (mr *MockDatasetsMockRecorder) Pool() *gomock.Call { 220 | mr.mock.ctrl.T.Helper() 221 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pool", reflect.TypeOf((*MockDatasets)(nil).Pool)) 222 | } 223 | 224 | // Properties mocks base method. 225 | func (m *MockDatasets) Properties(props ...string) ([]zfs.DatasetProperties, error) { 226 | m.ctrl.T.Helper() 227 | varargs := []interface{}{} 228 | for _, a := range props { 229 | varargs = append(varargs, a) 230 | } 231 | ret := m.ctrl.Call(m, "Properties", varargs...) 232 | ret0, _ := ret[0].([]zfs.DatasetProperties) 233 | ret1, _ := ret[1].(error) 234 | return ret0, ret1 235 | } 236 | 237 | // Properties indicates an expected call of Properties. 238 | func (mr *MockDatasetsMockRecorder) Properties(props ...interface{}) *gomock.Call { 239 | mr.mock.ctrl.T.Helper() 240 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Properties", reflect.TypeOf((*MockDatasets)(nil).Properties), props...) 241 | } 242 | 243 | // MockDatasetProperties is a mock of DatasetProperties interface. 244 | type MockDatasetProperties struct { 245 | ctrl *gomock.Controller 246 | recorder *MockDatasetPropertiesMockRecorder 247 | } 248 | 249 | // MockDatasetPropertiesMockRecorder is the mock recorder for MockDatasetProperties. 250 | type MockDatasetPropertiesMockRecorder struct { 251 | mock *MockDatasetProperties 252 | } 253 | 254 | // NewMockDatasetProperties creates a new mock instance. 255 | func NewMockDatasetProperties(ctrl *gomock.Controller) *MockDatasetProperties { 256 | mock := &MockDatasetProperties{ctrl: ctrl} 257 | mock.recorder = &MockDatasetPropertiesMockRecorder{mock} 258 | return mock 259 | } 260 | 261 | // EXPECT returns an object that allows the caller to indicate expected use. 262 | func (m *MockDatasetProperties) EXPECT() *MockDatasetPropertiesMockRecorder { 263 | return m.recorder 264 | } 265 | 266 | // DatasetName mocks base method. 267 | func (m *MockDatasetProperties) DatasetName() string { 268 | m.ctrl.T.Helper() 269 | ret := m.ctrl.Call(m, "DatasetName") 270 | ret0, _ := ret[0].(string) 271 | return ret0 272 | } 273 | 274 | // DatasetName indicates an expected call of DatasetName. 275 | func (mr *MockDatasetPropertiesMockRecorder) DatasetName() *gomock.Call { 276 | mr.mock.ctrl.T.Helper() 277 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DatasetName", reflect.TypeOf((*MockDatasetProperties)(nil).DatasetName)) 278 | } 279 | 280 | // Properties mocks base method. 281 | func (m *MockDatasetProperties) Properties() map[string]string { 282 | m.ctrl.T.Helper() 283 | ret := m.ctrl.Call(m, "Properties") 284 | ret0, _ := ret[0].(map[string]string) 285 | return ret0 286 | } 287 | 288 | // Properties indicates an expected call of Properties. 289 | func (mr *MockDatasetPropertiesMockRecorder) Properties() *gomock.Call { 290 | mr.mock.ctrl.T.Helper() 291 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Properties", reflect.TypeOf((*MockDatasetProperties)(nil).Properties)) 292 | } 293 | 294 | // Mockhandler is a mock of handler interface. 295 | type Mockhandler struct { 296 | ctrl *gomock.Controller 297 | recorder *MockhandlerMockRecorder 298 | } 299 | 300 | // MockhandlerMockRecorder is the mock recorder for Mockhandler. 301 | type MockhandlerMockRecorder struct { 302 | mock *Mockhandler 303 | } 304 | 305 | // NewMockhandler creates a new mock instance. 306 | func NewMockhandler(ctrl *gomock.Controller) *Mockhandler { 307 | mock := &Mockhandler{ctrl: ctrl} 308 | mock.recorder = &MockhandlerMockRecorder{mock} 309 | return mock 310 | } 311 | 312 | // EXPECT returns an object that allows the caller to indicate expected use. 313 | func (m *Mockhandler) EXPECT() *MockhandlerMockRecorder { 314 | return m.recorder 315 | } 316 | 317 | // processLine mocks base method. 318 | func (m *Mockhandler) processLine(pool string, line []string) error { 319 | m.ctrl.T.Helper() 320 | ret := m.ctrl.Call(m, "processLine", pool, line) 321 | ret0, _ := ret[0].(error) 322 | return ret0 323 | } 324 | 325 | // processLine indicates an expected call of processLine. 326 | func (mr *MockhandlerMockRecorder) processLine(pool, line interface{}) *gomock.Call { 327 | mr.mock.ctrl.T.Helper() 328 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "processLine", reflect.TypeOf((*Mockhandler)(nil).processLine), pool, line) 329 | } 330 | -------------------------------------------------------------------------------- /zfs/pool.go: -------------------------------------------------------------------------------- 1 | package zfs 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "os/exec" 8 | "strings" 9 | ) 10 | 11 | // PoolStatus enum contains status text 12 | type PoolStatus string 13 | 14 | const ( 15 | // PoolOnline enum entry 16 | PoolOnline PoolStatus = `ONLINE` 17 | // PoolDegraded enum entry 18 | PoolDegraded PoolStatus = `DEGRADED` 19 | // PoolFaulted enum entry 20 | PoolFaulted PoolStatus = `FAULTED` 21 | // PoolOffline enum entry 22 | PoolOffline PoolStatus = `OFFLINE` 23 | // PoolUnavail enum entry 24 | PoolUnavail PoolStatus = `UNAVAIL` 25 | // PoolRemoved enum entry 26 | PoolRemoved PoolStatus = `REMOVED` 27 | // PoolSuspended enum entry 28 | PoolSuspended PoolStatus = `SUSPENDED` 29 | ) 30 | 31 | type poolImpl struct { 32 | name string 33 | } 34 | 35 | func (p poolImpl) Name() string { 36 | return p.name 37 | } 38 | 39 | func (p poolImpl) Properties(props ...string) (PoolProperties, error) { 40 | handler := newPoolPropertiesImpl() 41 | if err := execute(p.name, handler, `zpool`, `get`, `-Hpo`, `name,property,value`, strings.Join(props, `,`)); err != nil { 42 | return handler, err 43 | } 44 | return handler, nil 45 | } 46 | 47 | type poolPropertiesImpl struct { 48 | properties map[string]string 49 | } 50 | 51 | func (p *poolPropertiesImpl) Properties() map[string]string { 52 | return p.properties 53 | } 54 | 55 | // processLine implements the handler interface 56 | func (p *poolPropertiesImpl) processLine(pool string, line []string) error { 57 | if len(line) != 3 || line[0] != pool { 58 | return ErrInvalidOutput 59 | } 60 | p.properties[line[1]] = line[2] 61 | 62 | return nil 63 | } 64 | 65 | // PoolNames returns a list of available pool names 66 | func poolNames() ([]string, error) { 67 | pools := make([]string, 0) 68 | cmd := exec.Command(`zpool`, `list`, `-Ho`, `name`) 69 | out, err := cmd.StdoutPipe() 70 | if err != nil { 71 | return nil, err 72 | } 73 | stderr, err := cmd.StderrPipe() 74 | if err != nil { 75 | return nil, err 76 | } 77 | scanner := bufio.NewScanner(out) 78 | 79 | if err = cmd.Start(); err != nil { 80 | return nil, fmt.Errorf("failed to start command '%s': %w", cmd.String(), err) 81 | } 82 | 83 | for scanner.Scan() { 84 | pools = append(pools, scanner.Text()) 85 | } 86 | 87 | stde, _ := io.ReadAll(stderr) 88 | if err = cmd.Wait(); err != nil { 89 | return nil, fmt.Errorf("failed to execute command '%s'; output: '%s' (%w)", cmd.String(), strings.TrimSpace(string(stde)), err) 90 | } 91 | 92 | return pools, nil 93 | } 94 | 95 | func newPoolImpl(name string) poolImpl { 96 | return poolImpl{ 97 | name: name, 98 | } 99 | } 100 | 101 | func newPoolPropertiesImpl() *poolPropertiesImpl { 102 | return &poolPropertiesImpl{ 103 | properties: make(map[string]string), 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /zfs/zfs.go: -------------------------------------------------------------------------------- 1 | package zfs 2 | 3 | import ( 4 | "encoding/csv" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "os/exec" 9 | "strings" 10 | ) 11 | 12 | // ErrInvalidOutput is returned on unparseable CLI output 13 | var ErrInvalidOutput = errors.New(`invalid output executing command`) 14 | 15 | // Client is the primary entrypoint 16 | type Client interface { 17 | PoolNames() ([]string, error) 18 | Pool(name string) Pool 19 | Datasets(pool string, kind DatasetKind) Datasets 20 | } 21 | 22 | // Pool allows querying pool properties 23 | type Pool interface { 24 | Name() string 25 | Properties(props ...string) (PoolProperties, error) 26 | } 27 | 28 | // PoolProperties provides access to the properties for a pool 29 | type PoolProperties interface { 30 | Properties() map[string]string 31 | } 32 | 33 | // Datasets allows querying properties for datasets in a pool 34 | type Datasets interface { 35 | Pool() string 36 | Kind() DatasetKind 37 | Properties(props ...string) ([]DatasetProperties, error) 38 | } 39 | 40 | // DatasetProperties provides access to the properties for a dataset 41 | type DatasetProperties interface { 42 | DatasetName() string 43 | Properties() map[string]string 44 | } 45 | 46 | type handler interface { 47 | processLine(pool string, line []string) error 48 | } 49 | 50 | type clientImpl struct{} 51 | 52 | func (z clientImpl) PoolNames() ([]string, error) { 53 | return poolNames() 54 | } 55 | 56 | func (z clientImpl) Pool(name string) Pool { 57 | return newPoolImpl(name) 58 | } 59 | 60 | func (z clientImpl) Datasets(pool string, kind DatasetKind) Datasets { 61 | return newDatasetsImpl(pool, kind) 62 | } 63 | 64 | func execute(pool string, h handler, cmd string, args ...string) error { 65 | c := exec.Command(cmd, append(args, pool)...) 66 | out, err := c.StdoutPipe() 67 | if err != nil { 68 | return err 69 | } 70 | 71 | stderr, err := c.StderrPipe() 72 | if err != nil { 73 | return err 74 | } 75 | 76 | r := csv.NewReader(out) 77 | r.Comma = '\t' 78 | r.LazyQuotes = true 79 | r.ReuseRecord = true 80 | r.FieldsPerRecord = 3 81 | 82 | if err = c.Start(); err != nil { 83 | return fmt.Errorf("failed to start command '%s': %w", c.String(), err) 84 | } 85 | 86 | for { 87 | line, err := r.Read() 88 | if errors.Is(err, io.EOF) { 89 | break 90 | } 91 | if err != nil { 92 | return err 93 | } 94 | if err = h.processLine(pool, line); err != nil { 95 | return err 96 | } 97 | } 98 | 99 | stde, _ := io.ReadAll(stderr) 100 | if err = c.Wait(); err != nil { 101 | return fmt.Errorf("failed to execute command '%s'; output: '%s' (%w)", c.String(), strings.TrimSpace(string(stde)), err) 102 | } 103 | return nil 104 | } 105 | 106 | // New instantiates a ZFS Client 107 | func New() Client { 108 | return clientImpl{} 109 | } 110 | -------------------------------------------------------------------------------- /zfs_exporter.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net/http" 5 | "os" 6 | "strings" 7 | 8 | "github.com/pdf/zfs_exporter/v2/collector" 9 | "github.com/pdf/zfs_exporter/v2/zfs" 10 | 11 | "github.com/alecthomas/kingpin/v2" 12 | "github.com/prometheus/client_golang/prometheus" 13 | versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" 14 | "github.com/prometheus/client_golang/prometheus/promhttp" 15 | "github.com/prometheus/exporter-toolkit/web" 16 | "github.com/prometheus/exporter-toolkit/web/kingpinflag" 17 | 18 | "github.com/prometheus/common/promslog" 19 | "github.com/prometheus/common/promslog/flag" 20 | "github.com/prometheus/common/version" 21 | ) 22 | 23 | func main() { 24 | var ( 25 | metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String() 26 | metricsExporterDisabled = kingpin.Flag(`web.disable-exporter-metrics`, `Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).`).Default(`false`).Bool() 27 | deadline = kingpin.Flag("deadline", "Maximum duration that a collection should run before returning cached data. Should be set to a value shorter than your scrape timeout duration. The current collection run will continue and update the cache when complete (default: 8s)").Default("8s").Duration() 28 | pools = kingpin.Flag("pool", "Name of the pool(s) to collect, repeat for multiple pools (default: all pools).").Strings() 29 | excludes = kingpin.Flag("exclude", "Exclude datasets/snapshots/volumes that match the provided regex (e.g. '^rpool/docker/'), may be specified multiple times.").Strings() 30 | toolkitFlags = kingpinflag.AddFlags(kingpin.CommandLine, ":9134") 31 | ) 32 | 33 | promslogConfig := &promslog.Config{} 34 | flag.AddFlags(kingpin.CommandLine, promslogConfig) 35 | kingpin.Version(version.Print("zfs_exporter")) 36 | kingpin.HelpFlag.Short('h') 37 | kingpin.Parse() 38 | logger := promslog.New(promslogConfig) 39 | 40 | logger.Info("Starting zfs_exporter", "version", version.Info()) 41 | logger.Info("Build context", "context", version.BuildContext()) 42 | 43 | c, err := collector.NewZFS(collector.ZFSConfig{ 44 | DisableMetrics: *metricsExporterDisabled, 45 | Deadline: *deadline, 46 | Pools: *pools, 47 | Excludes: *excludes, 48 | Logger: logger, 49 | ZFSClient: zfs.New(), 50 | }) 51 | if err != nil { 52 | logger.Error("Error creating an exporter", "err", err) 53 | os.Exit(1) 54 | } 55 | 56 | if *metricsExporterDisabled { 57 | r := prometheus.NewRegistry() 58 | prometheus.DefaultRegisterer = r 59 | prometheus.DefaultGatherer = r 60 | } 61 | prometheus.MustRegister(c) 62 | prometheus.MustRegister(versioncollector.NewCollector("zfs_exporter")) 63 | 64 | if len(c.Pools) > 0 { 65 | logger.Info("Enabling pools", "pools", strings.Join(c.Pools, ", ")) 66 | } else { 67 | logger.Info("Enabling pools", "pools", "(all)") 68 | } 69 | 70 | collectorNames := make([]string, 0, len(c.Collectors)) 71 | for n, c := range c.Collectors { 72 | if *c.Enabled { 73 | collectorNames = append(collectorNames, n) 74 | } 75 | } 76 | logger.Info("Enabling collectors", "collectors", strings.Join(collectorNames, ", ")) 77 | 78 | http.Handle(*metricsPath, promhttp.Handler()) 79 | if *metricsPath != "/" { 80 | landingConfig := web.LandingConfig{ 81 | Name: "ZFS Exporter", 82 | Description: "Prometheus ZFS Exporter", 83 | Version: version.Info(), 84 | Links: []web.LandingLinks{ 85 | { 86 | Address: *metricsPath, 87 | Text: "Metrics", 88 | }, 89 | }, 90 | } 91 | landingPage, err := web.NewLandingPage(landingConfig) 92 | if err != nil { 93 | logger.Error("Error creating landing page", "err", err) 94 | os.Exit(1) 95 | } 96 | http.Handle("/", landingPage) 97 | } 98 | 99 | server := &http.Server{} 100 | err = web.ListenAndServe(server, toolkitFlags, logger) 101 | if err != nil { 102 | logger.Error("Error starting HTTP server", "err", err) 103 | os.Exit(1) 104 | } 105 | } 106 | --------------------------------------------------------------------------------