├── .circleci └── config.yml ├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── container_description.yml │ └── golangci-lint.yml ├── .gitignore ├── .golangci.yml ├── .promu.yml ├── .yamllint ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── MAINTAINERS.md ├── Makefile ├── Makefile.common ├── NOTICE ├── README.md ├── SECURITY.md ├── VERSION ├── cmd └── memcached_exporter │ ├── main.go │ └── main_test.go ├── go.mod ├── go.sum ├── pkg ├── README.md └── exporter │ ├── exporter.go │ └── exporter_test.go └── scraper ├── scraper.go └── scraper_test.go /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2.1 3 | orbs: 4 | prometheus: prometheus/prometheus@0.17.1 5 | executors: 6 | # Whenever the Go version is updated here, .promu.yml should also be updated. 7 | golang: 8 | docker: 9 | - image: cimg/go:1.24 10 | golang_memcached: 11 | docker: 12 | - image: cimg/go:1.24 13 | - image: memcached 14 | jobs: 15 | test: 16 | executor: golang_memcached 17 | steps: 18 | - prometheus/setup_environment 19 | - setup_remote_docker 20 | - run: make 21 | - prometheus/store_artifact: 22 | file: memcached_exporter 23 | - run: git diff --exit-code 24 | workflows: 25 | version: 2 26 | memcached_exporter: 27 | jobs: 28 | - test: 29 | filters: 30 | tags: 31 | only: /.*/ 32 | - prometheus/build: 33 | name: build 34 | filters: 35 | tags: 36 | only: /.*/ 37 | - prometheus/publish_master: 38 | context: org-context 39 | requires: 40 | - test 41 | - build 42 | filters: 43 | branches: 44 | only: master 45 | - prometheus/publish_release: 46 | context: org-context 47 | requires: 48 | - test 49 | - build 50 | filters: 51 | tags: 52 | only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ 53 | branches: 54 | ignore: /.*/ 55 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .build/ 2 | .tarballs/ 3 | 4 | !.build/linux-amd64/ 5 | !.build/linux-armv7 6 | !.build/linux-arm64 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | schedule: 6 | interval: "monthly" 7 | -------------------------------------------------------------------------------- /.github/workflows/container_description.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Push README to Docker Hub 3 | on: 4 | push: 5 | paths: 6 | - "README.md" 7 | - "README-containers.md" 8 | - ".github/workflows/container_description.yml" 9 | branches: [ main, master ] 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | PushDockerHubReadme: 16 | runs-on: ubuntu-latest 17 | name: Push README to Docker Hub 18 | if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. 19 | steps: 20 | - name: git checkout 21 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 22 | with: 23 | persist-credentials: false 24 | - name: Set docker hub repo name 25 | run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV 26 | - name: Push README to Dockerhub 27 | uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 28 | env: 29 | DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }} 30 | DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }} 31 | with: 32 | destination_container_repo: ${{ env.DOCKER_REPO_NAME }} 33 | provider: dockerhub 34 | short_description: ${{ env.DOCKER_REPO_NAME }} 35 | # Empty string results in README-containers.md being pushed if it 36 | # exists. Otherwise, README.md is pushed. 37 | readme_file: '' 38 | 39 | PushQuayIoReadme: 40 | runs-on: ubuntu-latest 41 | name: Push README to quay.io 42 | if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. 43 | steps: 44 | - name: git checkout 45 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 46 | with: 47 | persist-credentials: false 48 | - name: Set quay.io org name 49 | run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV 50 | - name: Set quay.io repo name 51 | run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV 52 | - name: Push README to quay.io 53 | uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 54 | env: 55 | DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }} 56 | with: 57 | destination_container_repo: ${{ env.DOCKER_REPO_NAME }} 58 | provider: quay 59 | # Empty string results in README-containers.md being pushed if it 60 | # exists. Otherwise, README.md is pushed. 61 | readme_file: '' 62 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This action is synced from https://github.com/prometheus/prometheus 3 | name: golangci-lint 4 | on: 5 | push: 6 | paths: 7 | - "go.sum" 8 | - "go.mod" 9 | - "**.go" 10 | - "scripts/errcheck_excludes.txt" 11 | - ".github/workflows/golangci-lint.yml" 12 | - ".golangci.yml" 13 | pull_request: 14 | 15 | permissions: # added using https://github.com/step-security/secure-repo 16 | contents: read 17 | 18 | jobs: 19 | golangci: 20 | permissions: 21 | contents: read # for actions/checkout to fetch code 22 | pull-requests: read # for golangci/golangci-lint-action to fetch pull requests 23 | name: lint 24 | runs-on: ubuntu-latest 25 | steps: 26 | - name: Checkout repository 27 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 28 | - name: Install Go 29 | uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 30 | with: 31 | go-version: 1.24.x 32 | - name: Install snmp_exporter/generator dependencies 33 | run: sudo apt-get update && sudo apt-get -y install libsnmp-dev 34 | if: github.repository == 'prometheus/snmp_exporter' 35 | - name: Lint 36 | uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 37 | with: 38 | args: --verbose 39 | version: v2.1.5 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.build 2 | /.release 3 | /.tarballs 4 | /memcached_exporter 5 | *.tar.gz 6 | /vendor 7 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | enable: 4 | - revive 5 | - sloglint 6 | exclusions: 7 | generated: lax 8 | presets: 9 | - comments 10 | - common-false-positives 11 | - legacy 12 | - std-error-handling 13 | paths: 14 | - third_party$ 15 | - builtin$ 16 | - examples$ 17 | formatters: 18 | exclusions: 19 | generated: lax 20 | paths: 21 | - third_party$ 22 | - builtin$ 23 | - examples$ 24 | -------------------------------------------------------------------------------- /.promu.yml: -------------------------------------------------------------------------------- 1 | go: 2 | # Whenever the Go version is updated here, .circle/config.yml should also 3 | # be updated. 4 | version: 1.24 5 | repository: 6 | path: github.com/prometheus/memcached_exporter 7 | build: 8 | binaries: 9 | - name: memcached_exporter 10 | path: ./cmd/memcached_exporter 11 | ldflags: | 12 | -X github.com/prometheus/common/version.Version={{.Version}} 13 | -X github.com/prometheus/common/version.Revision={{.Revision}} 14 | -X github.com/prometheus/common/version.Branch={{.Branch}} 15 | -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} 16 | -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} 17 | tarball: 18 | files: 19 | - README.md 20 | - CHANGELOG.md 21 | - LICENSE 22 | - NOTICE 23 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | ignore: | 4 | **/node_modules 5 | 6 | rules: 7 | braces: 8 | max-spaces-inside: 1 9 | level: error 10 | brackets: 11 | max-spaces-inside: 1 12 | level: error 13 | commas: disable 14 | comments: disable 15 | comments-indentation: disable 16 | document-start: disable 17 | indentation: 18 | spaces: consistent 19 | indent-sequences: consistent 20 | key-duplicates: 21 | ignore: | 22 | config/testdata/section_key_dup.bad.yml 23 | line-length: disable 24 | truthy: 25 | check-keys: false 26 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 0.15.3 / 2025-05-28 2 | 3 | * [CHANGE] Update dependencies 4 | 5 | ## 0.15.2 / 2025-03-21 6 | 7 | * [CHANGE] Update dependencies 8 | 9 | This addresses CVE-2025-22870 10 | 11 | ## 0.15.1 / 2025-02-24 12 | 13 | * [CHANGE] Update dependencies 14 | 15 | This addresses CVE-2024-45337 and CVE-2024-45338 16 | 17 | ## 0.15.0 / 2024-11-08 18 | 19 | * [CHANGE] Update dependencies 20 | * [ENHANCEMENT] Add metric for `direct_reclaims` #227 21 | 22 | ## 0.14.4 / 2024-06-24 23 | 24 | * [CHANGE] Update dependencies 25 | 26 | This addresses CVE-2023-45288 27 | 28 | ## 0.14.3 / 2024-03-22 29 | 30 | * [CHANGE] Update dependencies 31 | 32 | This addresses CVE-2024-24786 which is not exploitable in the exporter, but set off security scanners. 33 | 34 | ## 0.14.2 / 2023-12-22 35 | 36 | * [CHANGE] Update dependencies 37 | 38 | This addresses CVE-2023-48795 which is not exploitable in the exporter, but set off security scanners. 39 | 40 | ## 0.14.1 / 2023-12-06 41 | 42 | * [CHANGE] Build with Go 1.21 #190 43 | * [BUGFIX] Add missing `_total` suffix for metrics for failure to store items #191 44 | 45 | ## 0.14.0 / 2023-12-06 46 | 47 | * [FEATURE] Add metrics for failure to store items #184 48 | 49 | ## 0.13.1 / 2023-12-06 50 | 51 | * [CHANGE] Update dependencies 52 | 53 | This addresses CVE-2023-3978 which is not exploitable in the exporter, but set off security scanners. 54 | 55 | ## 0.13.0 / 2023-06-02 56 | 57 | * [FEATURE] Multi-target scrape support #143, #173 58 | 59 | ## 0.12.0 / 2023-06-02 60 | 61 | * [ENHANCEMENT] Add `memcached_extstore_io_queue_depth` #169 62 | * [BUGFIX] Fix exposing `memcached_extstore_pages_free` #169 63 | 64 | ## 0.11.3 / 2023-04-12 65 | 66 | * [ENHANCEMENT] Better error messaging when TLS server name is required #162 67 | * [CHANGE] Update dependencies & build with Go 1.20 to avoid upstream CVEs #166 68 | 69 | ## 0.11.2 / 2023-03-08 70 | 71 | * [BUGFIX] Fix connections via UNIX domain socket #157 72 | * [CHANGE] Update dependencies, including exporter toolkit #161 73 | 74 | ## 0.11.1 / 2023-02-13 75 | 76 | * [FEATURE] Add metric to indicate if memcached is accepting connections #137 77 | * [FEATURE] Support TLS for connection to memcached #153 78 | * [FEATURE] Support systemd socket activation #147 79 | * [ENHANCEMENT] Miscellaneous dependency updates #151 #147 #146 #140 80 | 81 | Release 0.11.0 failed due to CI issues. 82 | 83 | ## 0.10.0 / 2022-06-21 84 | 85 | * [FEATURE] Add rusage and rejected_connection metrics #109 86 | * [FEATURE] Add extstore metrics #117 87 | 88 | ## 0.9.0 / 2021-03-25 89 | 90 | * [FEATURE] Add TLS and basic authentication #101 91 | 92 | ## 0.8.0 / 2020-12-04 93 | 94 | * [FEATURE] Support MySQL's InnoDB memcached plugin (by handling their multi-word stats settings values) 95 | * [FEATURE] Make exporter logic available as standalone library package #97 96 | * [ENHANCEMENT] Add --version flag and version metric #99 97 | * [ENHANCEMENT] Update prometheus client library 98 | 99 | ## 0.7.0 / 2020-07-24 100 | 101 | * [CHANGE] Switch logging to go-kit #73 102 | * [CHANGE] Register `memcached_lru_crawler_starts_total` metric correctly (formerly `namespace_lru_crawler_starts`) #83 103 | * [ENHANCEMENT] Add `memcached_time_seconds` metric #74 104 | * [ENHANCEMENT] Add slab metrics related to hot/warm/cold/temp LRUs #76 105 | * [BUGFIX] Fix `memcached_slab_mem_requested_bytes` metric in newer memcached versions #70 106 | 107 | ## 0.6.0 / 2019-08-25 108 | 109 | * [CHANGE] Handle non-existent metrics without NaN values #53 110 | * [ENHANCEMENT] Do not run as root by default in Docker #54 111 | * [ENHANCEMENT] Update prometheus client library 112 | 113 | ## 0.5.0 / 2018-10-17 114 | 115 | * [FEATURE] Add memcached_connections_yielded_total metric #35 116 | * [FEATURE] Add memcached_connections_listener_disabled_total metric #36 117 | * [ENHANCEMENT] Update prometheus client library removing outdated metrics #31 118 | 119 | ## 0.4.1 / 2018-02-01 120 | 121 | * [BUGFIX] Handle connection errors gracefully in all cases 122 | 123 | ## 0.4.0 / 2018-01-23 124 | 125 | * [CHANGE] Use the standard prometheus log library 126 | * [CHANGE] Use the standard prometheus flag library 127 | 128 | ## 0.3.0 / 2016-10-15 129 | 130 | * [CHANGE] Tarball includes a directory 131 | * [CHANGE] Use unique default port 9150 132 | * [FEATURE] Use common build system 133 | * [FEATURE] Export extended slab metrics. Thanks @ipstatic 134 | * [FEATURE] Add -version flag and use common version format 135 | * [FEATURE] Add memcached_max_connections metric 136 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Prometheus Community Code of Conduct 2 | 3 | Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). 4 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Prometheus uses GitHub to manage reviews of pull requests. 4 | 5 | * If you have a trivial fix or improvement, go ahead and create a pull request, 6 | addressing (with `@...`) the maintainer of this repository (see 7 | [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. 8 | 9 | * If you plan to do something more involved, first discuss your ideas 10 | on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). 11 | This will avoid unnecessary work and surely give you and us a good deal 12 | of inspiration. 13 | 14 | * Relevant coding style guidelines are the [Go Code Review 15 | Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) 16 | and the _Formatting and style_ section of Peter Bourgon's [Go: Best 17 | Practices for Production 18 | Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ARCH="amd64" 2 | ARG OS="linux" 3 | FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest 4 | LABEL maintainer="The Prometheus Authors " 5 | 6 | ARG ARCH="amd64" 7 | ARG OS="linux" 8 | COPY .build/${OS}-${ARCH}/memcached_exporter /bin/memcached_exporter 9 | 10 | USER nobody 11 | ENTRYPOINT ["/bin/memcached_exporter"] 12 | EXPOSE 9150 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | * Tobias Schmidt 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The Prometheus Authors 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | # Needs to be defined before including Makefile.common to auto-generate targets 15 | DOCKER_ARCHS ?= amd64 armv7 arm64 16 | 17 | include Makefile.common 18 | 19 | DOCKER_IMAGE_NAME ?= memcached-exporter 20 | -------------------------------------------------------------------------------- /Makefile.common: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The Prometheus Authors 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | 15 | # A common Makefile that includes rules to be reused in different prometheus projects. 16 | # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! 17 | 18 | # Example usage : 19 | # Create the main Makefile in the root project directory. 20 | # include Makefile.common 21 | # customTarget: 22 | # @echo ">> Running customTarget" 23 | # 24 | 25 | # Ensure GOBIN is not set during build so that promu is installed to the correct path 26 | unexport GOBIN 27 | 28 | GO ?= go 29 | GOFMT ?= $(GO)fmt 30 | FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) 31 | GOOPTS ?= 32 | GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) 33 | GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) 34 | 35 | GO_VERSION ?= $(shell $(GO) version) 36 | GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) 37 | PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') 38 | 39 | PROMU := $(FIRST_GOPATH)/bin/promu 40 | pkgs = ./... 41 | 42 | ifeq (arm, $(GOHOSTARCH)) 43 | GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) 44 | GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) 45 | else 46 | GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) 47 | endif 48 | 49 | GOTEST := $(GO) test 50 | GOTEST_DIR := 51 | ifneq ($(CIRCLE_JOB),) 52 | ifneq ($(shell command -v gotestsum 2> /dev/null),) 53 | GOTEST_DIR := test-results 54 | GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- 55 | endif 56 | endif 57 | 58 | PROMU_VERSION ?= 0.17.0 59 | PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz 60 | 61 | SKIP_GOLANGCI_LINT := 62 | GOLANGCI_LINT := 63 | GOLANGCI_LINT_OPTS ?= 64 | GOLANGCI_LINT_VERSION ?= v2.1.5 65 | GOLANGCI_FMT_OPTS ?= 66 | # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. 67 | # windows isn't included here because of the path separator being different. 68 | ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) 69 | ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) 70 | # If we're in CI and there is an Actions file, that means the linter 71 | # is being run in Actions, so we don't need to run it here. 72 | ifneq (,$(SKIP_GOLANGCI_LINT)) 73 | GOLANGCI_LINT := 74 | else ifeq (,$(CIRCLE_JOB)) 75 | GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint 76 | else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) 77 | GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint 78 | endif 79 | endif 80 | endif 81 | 82 | PREFIX ?= $(shell pwd) 83 | BIN_DIR ?= $(shell pwd) 84 | DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) 85 | DOCKERFILE_PATH ?= ./Dockerfile 86 | DOCKERBUILD_CONTEXT ?= ./ 87 | DOCKER_REPO ?= prom 88 | 89 | DOCKER_ARCHS ?= amd64 90 | 91 | BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) 92 | PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) 93 | TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) 94 | 95 | SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) 96 | 97 | ifeq ($(GOHOSTARCH),amd64) 98 | ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) 99 | # Only supported on amd64 100 | test-flags := -race 101 | endif 102 | endif 103 | 104 | # This rule is used to forward a target like "build" to "common-build". This 105 | # allows a new "build" target to be defined in a Makefile which includes this 106 | # one and override "common-build" without override warnings. 107 | %: common-% ; 108 | 109 | .PHONY: common-all 110 | common-all: precheck style check_license lint yamllint unused build test 111 | 112 | .PHONY: common-style 113 | common-style: 114 | @echo ">> checking code style" 115 | @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ 116 | if [ -n "$${fmtRes}" ]; then \ 117 | echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ 118 | echo "Please ensure you are using $$($(GO) version) for formatting code."; \ 119 | exit 1; \ 120 | fi 121 | 122 | .PHONY: common-check_license 123 | common-check_license: 124 | @echo ">> checking license header" 125 | @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ 126 | awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ 127 | done); \ 128 | if [ -n "$${licRes}" ]; then \ 129 | echo "license header checking failed:"; echo "$${licRes}"; \ 130 | exit 1; \ 131 | fi 132 | 133 | .PHONY: common-deps 134 | common-deps: 135 | @echo ">> getting dependencies" 136 | $(GO) mod download 137 | 138 | .PHONY: update-go-deps 139 | update-go-deps: 140 | @echo ">> updating Go dependencies" 141 | @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ 142 | $(GO) get -d $$m; \ 143 | done 144 | $(GO) mod tidy 145 | 146 | .PHONY: common-test-short 147 | common-test-short: $(GOTEST_DIR) 148 | @echo ">> running short tests" 149 | $(GOTEST) -short $(GOOPTS) $(pkgs) 150 | 151 | .PHONY: common-test 152 | common-test: $(GOTEST_DIR) 153 | @echo ">> running all tests" 154 | $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) 155 | 156 | $(GOTEST_DIR): 157 | @mkdir -p $@ 158 | 159 | .PHONY: common-format 160 | common-format: $(GOLANGCI_LINT) 161 | @echo ">> formatting code" 162 | $(GO) fmt $(pkgs) 163 | ifdef GOLANGCI_LINT 164 | @echo ">> formatting code with golangci-lint" 165 | $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS) 166 | endif 167 | 168 | .PHONY: common-vet 169 | common-vet: 170 | @echo ">> vetting code" 171 | $(GO) vet $(GOOPTS) $(pkgs) 172 | 173 | .PHONY: common-lint 174 | common-lint: $(GOLANGCI_LINT) 175 | ifdef GOLANGCI_LINT 176 | @echo ">> running golangci-lint" 177 | $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) 178 | endif 179 | 180 | .PHONY: common-lint-fix 181 | common-lint-fix: $(GOLANGCI_LINT) 182 | ifdef GOLANGCI_LINT 183 | @echo ">> running golangci-lint fix" 184 | $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) 185 | endif 186 | 187 | .PHONY: common-yamllint 188 | common-yamllint: 189 | @echo ">> running yamllint on all YAML files in the repository" 190 | ifeq (, $(shell command -v yamllint 2> /dev/null)) 191 | @echo "yamllint not installed so skipping" 192 | else 193 | yamllint . 194 | endif 195 | 196 | # For backward-compatibility. 197 | .PHONY: common-staticcheck 198 | common-staticcheck: lint 199 | 200 | .PHONY: common-unused 201 | common-unused: 202 | @echo ">> running check for unused/missing packages in go.mod" 203 | $(GO) mod tidy 204 | @git diff --exit-code -- go.sum go.mod 205 | 206 | .PHONY: common-build 207 | common-build: promu 208 | @echo ">> building binaries" 209 | $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) 210 | 211 | .PHONY: common-tarball 212 | common-tarball: promu 213 | @echo ">> building release tarball" 214 | $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) 215 | 216 | .PHONY: common-docker-repo-name 217 | common-docker-repo-name: 218 | @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" 219 | 220 | .PHONY: common-docker $(BUILD_DOCKER_ARCHS) 221 | common-docker: $(BUILD_DOCKER_ARCHS) 222 | $(BUILD_DOCKER_ARCHS): common-docker-%: 223 | docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ 224 | -f $(DOCKERFILE_PATH) \ 225 | --build-arg ARCH="$*" \ 226 | --build-arg OS="linux" \ 227 | $(DOCKERBUILD_CONTEXT) 228 | 229 | .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) 230 | common-docker-publish: $(PUBLISH_DOCKER_ARCHS) 231 | $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: 232 | docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" 233 | 234 | DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) 235 | .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) 236 | common-docker-tag-latest: $(TAG_DOCKER_ARCHS) 237 | $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: 238 | docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" 239 | docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" 240 | 241 | .PHONY: common-docker-manifest 242 | common-docker-manifest: 243 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) 244 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" 245 | 246 | .PHONY: promu 247 | promu: $(PROMU) 248 | 249 | $(PROMU): 250 | $(eval PROMU_TMP := $(shell mktemp -d)) 251 | curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) 252 | mkdir -p $(FIRST_GOPATH)/bin 253 | cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu 254 | rm -r $(PROMU_TMP) 255 | 256 | .PHONY: common-proto 257 | common-proto: 258 | @echo ">> generating code from proto files" 259 | @./scripts/genproto.sh 260 | 261 | ifdef GOLANGCI_LINT 262 | $(GOLANGCI_LINT): 263 | mkdir -p $(FIRST_GOPATH)/bin 264 | curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ 265 | | sed -e '/install -d/d' \ 266 | | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) 267 | endif 268 | 269 | .PHONY: precheck 270 | precheck:: 271 | 272 | define PRECHECK_COMMAND_template = 273 | precheck:: $(1)_precheck 274 | 275 | PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) 276 | .PHONY: $(1)_precheck 277 | $(1)_precheck: 278 | @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ 279 | echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ 280 | exit 1; \ 281 | fi 282 | endef 283 | 284 | govulncheck: install-govulncheck 285 | govulncheck ./... 286 | 287 | install-govulncheck: 288 | command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest 289 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Exporter for memcached stats. 2 | Copyright 2013-2016 The Prometheus Authors 3 | 4 | This product includes software developed at 5 | SoundCloud Ltd. (https://soundcloud.com/). 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Memcached Exporter for Prometheus [![Build Status][buildstatus]][circleci] 2 | 3 | [![Docker Repository on Quay](https://quay.io/repository/prometheus/memcached-exporter/status)][quay] 4 | [![Docker Pulls](https://img.shields.io/docker/pulls/prom/memcached-exporter.svg?maxAge=604800)][hub] 5 | 6 | A [memcached](https://memcached.org/) exporter for Prometheus. 7 | 8 | ## Building 9 | 10 | The memcached exporter exports metrics from a memcached server for 11 | consumption by Prometheus. The server is specified as `--memcached.address` flag 12 | to the program (default is `localhost:11211`). 13 | 14 | By default the memcache_exporter serves on port `0.0.0.0:9150` at `/metrics`: 15 | 16 | ```sh 17 | make 18 | ./memcached_exporter 19 | ``` 20 | 21 | Alternatively a Dockerfile is supplied: 22 | 23 | ```sh 24 | docker run -p 9150:9150 quay.io/prometheus/memcached-exporter:latest 25 | ``` 26 | 27 | ## Collectors 28 | 29 | The exporter collects a number of statistics from the server: 30 | 31 | ``` 32 | # HELP memcached_accepting_conns The Memcached server is currently accepting new connections. 33 | # TYPE memcached_accepting_conns gauge 34 | # HELP memcached_commands_total Total number of all requests broken down by command (get, set, etc.) and status. 35 | # TYPE memcached_commands_total counter 36 | # HELP memcached_connections_listener_disabled_total Number of times that memcached has hit its connections limit and disabled its listener. 37 | # TYPE memcached_connections_listener_disabled_total counter 38 | # HELP memcached_connections_total Total number of connections opened since the server started running. 39 | # TYPE memcached_connections_total counter 40 | # HELP memcached_connections_yielded_total Total number of connections yielded running due to hitting the memcached's -R limit. 41 | # TYPE memcached_connections_yielded_total counter 42 | # HELP memcached_current_bytes Current number of bytes used to store items. 43 | # TYPE memcached_current_bytes gauge 44 | # HELP memcached_current_connections Current number of open connections. 45 | # TYPE memcached_current_connections gauge 46 | # HELP memcached_current_items Current number of items stored by this instance. 47 | # TYPE memcached_current_items gauge 48 | # HELP memcached_direct_reclaims_total Times worker threads had to directly reclaim or evict items. 49 | # TYPE memcached_direct_reclaims_total counter 50 | # HELP memcached_items_evicted_total Total number of valid items removed from cache to free memory for new items. 51 | # TYPE memcached_items_evicted_total counter 52 | # HELP memcached_items_reclaimed_total Total number of times an entry was stored using memory from an expired entry. 53 | # TYPE memcached_items_reclaimed_total counter 54 | # HELP memcached_items_total Total number of items stored during the life of this instance. 55 | # TYPE memcached_items_total counter 56 | # HELP memcached_limit_bytes Number of bytes this server is allowed to use for storage. 57 | # TYPE memcached_limit_bytes gauge 58 | # HELP memcached_lru_crawler_enabled Whether the LRU crawler is enabled. 59 | # TYPE memcached_lru_crawler_enabled gauge 60 | # HELP memcached_lru_crawler_hot_max_factor Set idle age of HOT LRU to COLD age * this 61 | # TYPE memcached_lru_crawler_hot_max_factor gauge 62 | # HELP memcached_lru_crawler_hot_percent Percent of slab memory reserved for HOT LRU. 63 | # TYPE memcached_lru_crawler_hot_percent gauge 64 | # HELP memcached_lru_crawler_items_checked_total Total items examined by LRU Crawler. 65 | # TYPE memcached_lru_crawler_items_checked_total counter 66 | # HELP memcached_lru_crawler_maintainer_thread Split LRU mode and background threads. 67 | # TYPE memcached_lru_crawler_maintainer_thread gauge 68 | # HELP memcached_lru_crawler_moves_to_cold_total Total number of items moved from HOT/WARM to COLD LRU's. 69 | # TYPE memcached_lru_crawler_moves_to_cold_total counter 70 | # HELP memcached_lru_crawler_moves_to_warm_total Total number of items moved from COLD to WARM LRU. 71 | # TYPE memcached_lru_crawler_moves_to_warm_total counter 72 | # HELP memcached_lru_crawler_moves_within_lru_total Total number of items reshuffled within HOT or WARM LRU's. 73 | # TYPE memcached_lru_crawler_moves_within_lru_total counter 74 | # HELP memcached_lru_crawler_reclaimed_total Total items freed by LRU Crawler. 75 | # TYPE memcached_lru_crawler_reclaimed_total counter 76 | # HELP memcached_lru_crawler_sleep Microseconds to sleep between LRU crawls. 77 | # TYPE memcached_lru_crawler_sleep gauge 78 | # HELP memcached_lru_crawler_starts_total Times an LRU crawler was started. 79 | # TYPE memcached_lru_crawler_starts_total counter 80 | # HELP memcached_lru_crawler_to_crawl Max items to crawl per slab per run. 81 | # TYPE memcached_lru_crawler_to_crawl gauge 82 | # HELP memcached_lru_crawler_warm_max_factor Set idle age of WARM LRU to COLD age * this 83 | # TYPE memcached_lru_crawler_warm_max_factor gauge 84 | # HELP memcached_lru_crawler_warm_percent Percent of slab memory reserved for WARM LRU. 85 | # TYPE memcached_lru_crawler_warm_percent gauge 86 | # HELP memcached_malloced_bytes Number of bytes of memory allocated to slab pages. 87 | # TYPE memcached_malloced_bytes gauge 88 | # HELP memcached_max_connections Maximum number of clients allowed. 89 | # TYPE memcached_max_connections gauge 90 | # HELP memcached_read_bytes_total Total number of bytes read by this server from network. 91 | # TYPE memcached_read_bytes_total counter 92 | # HELP memcached_slab_chunk_size_bytes Number of bytes allocated to each chunk within this slab class. 93 | # TYPE memcached_slab_chunk_size_bytes gauge 94 | # HELP memcached_slab_chunks_free Number of chunks not yet allocated items. 95 | # TYPE memcached_slab_chunks_free gauge 96 | # HELP memcached_slab_chunks_free_end Number of free chunks at the end of the last allocated page. 97 | # TYPE memcached_slab_chunks_free_end gauge 98 | # HELP memcached_slab_chunks_per_page Number of chunks within a single page for this slab class. 99 | # TYPE memcached_slab_chunks_per_page gauge 100 | # HELP memcached_slab_chunks_used Number of chunks allocated to an item. 101 | # TYPE memcached_slab_chunks_used gauge 102 | # HELP memcached_slab_cold_items Number of items presently stored in the COLD LRU. 103 | # TYPE memcached_slab_cold_items gauge 104 | # HELP memcached_slab_commands_total Total number of all requests broken down by command (get, set, etc.) and status per slab. 105 | # TYPE memcached_slab_commands_total counter 106 | # HELP memcached_slab_current_chunks Number of chunks allocated to this slab class. 107 | # TYPE memcached_slab_current_chunks gauge 108 | # HELP memcached_slab_current_items Number of items currently stored in this slab class. 109 | # TYPE memcached_slab_current_items gauge 110 | # HELP memcached_slab_current_pages Number of pages allocated to this slab class. 111 | # TYPE memcached_slab_current_pages gauge 112 | # HELP memcached_slab_hot_age_seconds Age of the oldest item in HOT LRU. 113 | # TYPE memcached_slab_hot_age_seconds gauge 114 | # HELP memcached_slab_hot_items Number of items presently stored in the HOT LRU. 115 | # TYPE memcached_slab_hot_items gauge 116 | # HELP memcached_slab_items_age_seconds Number of seconds the oldest item has been in the slab class. 117 | # TYPE memcached_slab_items_age_seconds gauge 118 | # HELP memcached_slab_items_crawler_reclaimed_total Number of items freed by the LRU Crawler. 119 | # TYPE memcached_slab_items_crawler_reclaimed_total counter 120 | # HELP memcached_slab_items_evicted_nonzero_total Total number of times an item which had an explicit expire time set had to be evicted from the LRU before it expired. 121 | # TYPE memcached_slab_items_evicted_nonzero_total counter 122 | # HELP memcached_slab_items_evicted_time_seconds Seconds since the last access for the most recent item evicted from this class. 123 | # TYPE memcached_slab_items_evicted_time_seconds counter 124 | # HELP memcached_slab_items_evicted_total Total number of times an item had to be evicted from the LRU before it expired. 125 | # TYPE memcached_slab_items_evicted_total counter 126 | # HELP memcached_slab_items_evicted_unfetched_total Total nmber of items evicted and never fetched. 127 | # TYPE memcached_slab_items_evicted_unfetched_total counter 128 | # HELP memcached_slab_items_expired_unfetched_total Total number of valid items evicted from the LRU which were never touched after being set. 129 | # TYPE memcached_slab_items_expired_unfetched_total counter 130 | # HELP memcached_slab_items_moves_to_cold Number of items moved from HOT or WARM into COLD. 131 | # TYPE memcached_slab_items_moves_to_cold counter 132 | # HELP memcached_slab_items_moves_to_warm Number of items moves from COLD into WARM. 133 | # TYPE memcached_slab_items_moves_to_warm counter 134 | # HELP memcached_slab_items_moves_within_lru Number of times active items were bumped within HOT or WARM. 135 | # TYPE memcached_slab_items_moves_within_lru counter 136 | # HELP memcached_slab_items_outofmemory_total Total number of items for this slab class that have triggered an out of memory error. 137 | # TYPE memcached_slab_items_outofmemory_total counter 138 | # HELP memcached_slab_items_reclaimed_total Total number of items reclaimed. 139 | # TYPE memcached_slab_items_reclaimed_total counter 140 | # HELP memcached_slab_items_tailrepairs_total Total number of times the entries for a particular ID need repairing. 141 | # TYPE memcached_slab_items_tailrepairs_total counter 142 | # HELP memcached_slab_lru_hits_total Number of get_hits to the LRU. 143 | # TYPE memcached_slab_lru_hits_total counter 144 | # HELP memcached_slab_mem_requested_bytes Number of bytes of memory actual items take up within a slab. 145 | # TYPE memcached_slab_mem_requested_bytes counter 146 | # HELP memcached_slab_warm_age_seconds Age of the oldest item in HOT LRU. 147 | # TYPE memcached_slab_warm_age_seconds gauge 148 | # HELP memcached_slab_warm_items Number of items presently stored in the WARM LRU. 149 | # TYPE memcached_slab_warm_items gauge 150 | # HELP memcached_time_seconds current UNIX time according to the server. 151 | # TYPE memcached_time_seconds gauge 152 | # HELP memcached_up Could the memcached server be reached. 153 | # TYPE memcached_up gauge 154 | # HELP memcached_uptime_seconds Number of seconds since the server started. 155 | # TYPE memcached_uptime_seconds counter 156 | # HELP memcached_version The version of this memcached server. 157 | # TYPE memcached_version gauge 158 | # HELP memcached_written_bytes_total Total number of bytes sent by this server to network. 159 | # TYPE memcached_written_bytes_total counter 160 | ``` 161 | 162 | There is also optional support to export metrics about the memcached process 163 | itself by setting the `--memcached.pid-file ` flag. If the 164 | memcached_exporter process has the rights to read /proc information of the 165 | memcached process, then the following metrics will be exported additionally. 166 | 167 | ``` 168 | # HELP memcached_process_cpu_seconds_total Total user and system CPU time spent in seconds. 169 | # TYPE memcached_process_cpu_seconds_total counter 170 | # HELP memcached_process_max_fds Maximum number of open file descriptors. 171 | # TYPE memcached_process_max_fds gauge 172 | # HELP memcached_process_open_fds Number of open file descriptors. 173 | # TYPE memcached_process_open_fds gauge 174 | # HELP memcached_process_resident_memory_bytes Resident memory size in bytes. 175 | # TYPE memcached_process_resident_memory_bytes gauge 176 | # HELP memcached_process_start_time_seconds Start time of the process since unix epoch in seconds. 177 | # TYPE memcached_process_start_time_seconds gauge 178 | # HELP memcached_process_virtual_memory_bytes Virtual memory size in bytes. 179 | # TYPE memcached_process_virtual_memory_bytes gauge 180 | ``` 181 | 182 | ## TLS and basic authentication 183 | 184 | The Memcached Exporter supports TLS and basic authentication. 185 | 186 | To use TLS and/or basic authentication, you need to pass a configuration file 187 | using the `--web.config.file` parameter. The format of the file is described 188 | [in the exporter-toolkit repository](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). 189 | 190 | To use TLS for connections to memcached, use the `--memcached.tls.*` flags. 191 | See `memcached_exporter --help` for details. 192 | 193 | ## Multi-target 194 | 195 | The exporter also supports the [multi-target](https://prometheus.io/docs/guides/multi-target-exporter/) pattern on the `/scrape` endpoint. Example: 196 | ``` 197 | curl `localhost:9150/scrape?target=memcached-host.company.com:11211 198 | ``` 199 | 200 | An example configuration using [prometheus-elasticache-sd](https://github.com/maxbrunet/prometheus-elasticache-sd): 201 | 202 | ```yaml 203 | scrape_configs: 204 | - job_name: "memcached_exporter_targets" 205 | file_sd_configs: 206 | - files: 207 | - /path/to/elasticache.json # File created by service discovery 208 | metrics_path: /scrape 209 | relabel_configs: 210 | # Filter for memcached cache nodes 211 | - source_labels: [__meta_elasticache_engine] 212 | regex: memcached 213 | action: keep 214 | # Build Memcached URL to use as target parameter for the exporter 215 | - source_labels: 216 | - __meta_elasticache_endpoint_address 217 | - __meta_elasticache_endpoint_port 218 | replacement: $1 219 | separator: ':' 220 | target_label: __param_target 221 | # Use Memcached URL as instance label 222 | - source_labels: [__param_target] 223 | target_label: instance 224 | # Set exporter address 225 | - target_label: __address__ 226 | replacement: memcached-exporter-service.company.com:9151 227 | ``` 228 | 229 | If you are running solely for `multi-target` start the exporter with `--memcached.address=""` to avoid attempting to connect to a non existing memcached host, example: 230 | 231 | ``` 232 | ./memcached-exporter --memcached.address="" 233 | ``` 234 | 235 | [buildstatus]: https://circleci.com/gh/prometheus/memcached_exporter/tree/master.svg?style=shield 236 | [circleci]: https://circleci.com/gh/prometheus/memcached_exporter 237 | [hub]: https://hub.docker.com/r/prom/memcached-exporter/ 238 | [quay]: https://quay.io/repository/prometheus/memcached-exporter 239 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Reporting a security issue 2 | 3 | The Prometheus security policy, including how to report vulnerabilities, can be 4 | found here: 5 | 6 | 7 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 0.15.3 2 | -------------------------------------------------------------------------------- /cmd/memcached_exporter/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "crypto/tls" 18 | "net" 19 | "net/http" 20 | "os" 21 | "strings" 22 | 23 | "github.com/alecthomas/kingpin/v2" 24 | "github.com/prometheus/client_golang/prometheus" 25 | "github.com/prometheus/client_golang/prometheus/collectors" 26 | versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" 27 | "github.com/prometheus/client_golang/prometheus/promhttp" 28 | promconfig "github.com/prometheus/common/config" 29 | "github.com/prometheus/common/promslog" 30 | "github.com/prometheus/common/promslog/flag" 31 | "github.com/prometheus/common/version" 32 | "github.com/prometheus/exporter-toolkit/web" 33 | webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" 34 | 35 | "github.com/prometheus/memcached_exporter/pkg/exporter" 36 | "github.com/prometheus/memcached_exporter/scraper" 37 | ) 38 | 39 | func main() { 40 | var ( 41 | address = kingpin.Flag("memcached.address", "Memcached server address.").Default("localhost:11211").String() 42 | timeout = kingpin.Flag("memcached.timeout", "memcached connect timeout.").Default("1s").Duration() 43 | pidFile = kingpin.Flag("memcached.pid-file", "Optional path to a file containing the memcached PID for additional metrics.").Default("").String() 44 | enableTLS = kingpin.Flag("memcached.tls.enable", "Enable TLS connections to memcached").Bool() 45 | certFile = kingpin.Flag("memcached.tls.cert-file", "Client certificate file.").Default("").String() 46 | keyFile = kingpin.Flag("memcached.tls.key-file", "Client private key file.").Default("").String() 47 | caFile = kingpin.Flag("memcached.tls.ca-file", "Client root CA file.").Default("").String() 48 | insecureSkipVerify = kingpin.Flag("memcached.tls.insecure-skip-verify", "Skip server certificate verification").Bool() 49 | serverName = kingpin.Flag("memcached.tls.server-name", "Memcached TLS certificate servername").Default("").String() 50 | webConfig = webflag.AddFlags(kingpin.CommandLine, ":9150") 51 | metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String() 52 | scrapePath = kingpin.Flag("web.scrape-path", "Path under which to receive scrape requests.").Default("/scrape").String() 53 | ) 54 | 55 | promslogConfig := &promslog.Config{} 56 | flag.AddFlags(kingpin.CommandLine, promslogConfig) 57 | kingpin.HelpFlag.Short('h') 58 | kingpin.Version(version.Print("memcached_exporter")) 59 | kingpin.Parse() 60 | logger := promslog.New(promslogConfig) 61 | 62 | logger.Info("Starting memcached_exporter", "version", version.Info()) 63 | logger.Info("Build context", "context", version.BuildContext()) 64 | 65 | var ( 66 | tlsConfig *tls.Config 67 | err error 68 | ) 69 | if *enableTLS { 70 | if *serverName == "" { 71 | *serverName, _, err = net.SplitHostPort(*address) 72 | if err != nil { 73 | if strings.Contains(*address, "/") { 74 | logger.Error("If --memcached.tls.enable is set and --memcached.address is a unix socket, " + 75 | "you must also specify --memcached.tls.server-name") 76 | } else { 77 | logger.Error("Error parsing memcached address", "err", err) 78 | } 79 | os.Exit(1) 80 | } 81 | } 82 | tlsConfig, err = promconfig.NewTLSConfig(&promconfig.TLSConfig{ 83 | CertFile: *certFile, 84 | KeyFile: *keyFile, 85 | CAFile: *caFile, 86 | ServerName: *serverName, 87 | InsecureSkipVerify: *insecureSkipVerify, 88 | }) 89 | if err != nil { 90 | logger.Error("Failed to create TLS config", "err", err) 91 | os.Exit(1) 92 | } 93 | } 94 | 95 | prometheus.MustRegister(versioncollector.NewCollector("memcached_exporter")) 96 | 97 | if *address != "" { 98 | prometheus.MustRegister(exporter.New(*address, *timeout, logger, tlsConfig)) 99 | } 100 | 101 | if *pidFile != "" { 102 | procExporter := collectors.NewProcessCollector(collectors.ProcessCollectorOpts{ 103 | PidFn: prometheus.NewPidFileFn(*pidFile), 104 | Namespace: exporter.Namespace, 105 | }) 106 | prometheus.MustRegister(procExporter) 107 | } 108 | 109 | http.Handle(*metricsPath, promhttp.Handler()) 110 | scraper := scraper.New(*timeout, logger, tlsConfig) 111 | http.Handle(*scrapePath, scraper.Handler()) 112 | 113 | if *metricsPath != "/" && *metricsPath != "" { 114 | landingConfig := web.LandingConfig{ 115 | Name: "memcached_exporter", 116 | Description: "Prometheus Exporter for Memcached servers", 117 | Version: version.Info(), 118 | Links: []web.LandingLinks{ 119 | { 120 | Address: *metricsPath, 121 | Text: "Metrics", 122 | }, 123 | }, 124 | } 125 | landingPage, err := web.NewLandingPage(landingConfig) 126 | if err != nil { 127 | logger.Error("Error creating landing page", "err", err) 128 | os.Exit(1) 129 | } 130 | http.Handle("/", landingPage) 131 | } 132 | 133 | srv := &http.Server{} 134 | if err := web.ListenAndServe(srv, webConfig, logger); err != nil { 135 | logger.Error("Error running HTTP server", "err", err) 136 | os.Exit(1) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /cmd/memcached_exporter/main_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "bytes" 18 | "context" 19 | "io" 20 | "net/http" 21 | "os" 22 | "os/exec" 23 | "strconv" 24 | "strings" 25 | "testing" 26 | "time" 27 | 28 | "github.com/grobie/gomemcache/memcache" 29 | ) 30 | 31 | func waitExporterReady(t *testing.T, errorChannel chan error, address string) { 32 | for { 33 | timer := time.NewTimer(100 * time.Millisecond) 34 | select { 35 | case <-timer.C: 36 | resp, err := http.Get(address) 37 | if err != nil { 38 | t.Logf("error requesting the exporter: %v", err) 39 | continue 40 | } 41 | if resp.StatusCode != http.StatusOK { 42 | resp.Body.Close() 43 | t.Logf("unexpected exporter status code: %d", resp.StatusCode) 44 | continue 45 | } else { 46 | return 47 | } 48 | 49 | case err := <-errorChannel: 50 | t.Fatal("error running the exporter:", err) 51 | } 52 | } 53 | } 54 | 55 | func warmUpMemcached(t *testing.T, client *memcache.Client) { 56 | item := &memcache.Item{Key: "foo", Value: []byte("bar")} 57 | if err := client.Set(item); err != nil { 58 | t.Fatal(err) 59 | } 60 | if err := client.Set(item); err != nil { 61 | t.Fatal(err) 62 | } 63 | if _, err := client.Get("foo"); err != nil { 64 | t.Fatal(err) 65 | } 66 | if _, err := client.Get("qux"); err != memcache.ErrCacheMiss { 67 | t.Fatal(err) 68 | } 69 | last, err := client.Get("foo") 70 | if err != nil { 71 | t.Fatal(err) 72 | } 73 | last.Value = []byte("banana") 74 | if err := client.CompareAndSwap(last); err != nil { 75 | t.Fatal(err) 76 | } 77 | large := &memcache.Item{Key: "large", Value: bytes.Repeat([]byte("."), 130)} 78 | if err := client.Set(large); err != nil { 79 | t.Fatal(err) 80 | } 81 | } 82 | 83 | func TestAcceptanceSingleInstance(t *testing.T) { 84 | errc := make(chan error) 85 | 86 | addr := "localhost:11211" 87 | // MEMCACHED_PORT might be set by a linked memcached docker container. 88 | if env := os.Getenv("MEMCACHED_PORT"); env != "" { 89 | addr = strings.TrimPrefix(env, "tcp://") 90 | } 91 | 92 | t.Logf("starting exporter") 93 | ctx, cancel := context.WithCancel(context.Background()) 94 | exporter := exec.CommandContext(ctx, "../../memcached_exporter", "--memcached.address", addr) 95 | go func() { 96 | defer close(errc) 97 | 98 | if err := exporter.Run(); err != nil && errc != nil { 99 | errc <- err 100 | } 101 | }() 102 | 103 | defer cancel() 104 | 105 | // Wait for the exporter to be up and running. 106 | t.Logf("waiting exporter initialization") 107 | waitExporterReady(t, errc, "http://localhost:9150") 108 | 109 | client, err := memcache.New(addr) 110 | if err != nil { 111 | t.Fatal(err) 112 | } 113 | if err := client.StatsReset(); err != nil { 114 | t.Fatal(err) 115 | } 116 | 117 | warmUpMemcached(t, client) 118 | 119 | statsSettings, err := client.StatsSettings() 120 | if err != nil { 121 | t.Fatal(err) 122 | } 123 | 124 | useTempLRU := false 125 | for _, t := range statsSettings { 126 | if t["temp_lru"] == "true" { 127 | useTempLRU = true 128 | } 129 | } 130 | 131 | stats, err := client.Stats() 132 | if err != nil { 133 | t.Fatal(err) 134 | } 135 | 136 | memcacheVersion := "" 137 | for _, t := range stats { 138 | memcacheVersion = t.Stats["version"] 139 | } 140 | 141 | resp, err := http.Get("http://localhost:9150/metrics") 142 | if err != nil { 143 | t.Fatal(err) 144 | } 145 | defer resp.Body.Close() 146 | 147 | body, err := io.ReadAll(resp.Body) 148 | if err != nil { 149 | t.Fatal(err) 150 | } 151 | 152 | tests := []string{ 153 | // memcached_current_connections varies depending on memcached versions 154 | // so it isn't practical to check for an exact value. 155 | `memcached_current_connections `, 156 | `memcached_up 1`, 157 | `memcached_commands_total{command="get",status="hit"} 2`, 158 | `memcached_commands_total{command="get",status="miss"} 1`, 159 | `memcached_commands_total{command="set",status="hit"} 3`, 160 | `memcached_commands_total{command="cas",status="hit"} 1`, 161 | `memcached_current_bytes 262`, 162 | `memcached_max_connections 1024`, 163 | `memcached_current_items 2`, 164 | `memcached_items_total 4`, 165 | `memcached_slab_current_items{slab="1"} 1`, 166 | `memcached_slab_current_items{slab="5"} 1`, 167 | `memcached_slab_commands_total{command="set",slab="1",status="hit"} 2`, 168 | `memcached_slab_commands_total{command="cas",slab="1",status="hit"} 1`, 169 | `memcached_slab_commands_total{command="set",slab="5",status="hit"} 1`, 170 | `memcached_slab_commands_total{command="cas",slab="5",status="hit"} 0`, 171 | `memcached_slab_current_chunks{slab="1"} 10922`, 172 | `memcached_slab_current_chunks{slab="5"} 4369`, 173 | `memcached_slab_mem_requested_bytes{slab="1"} 68`, 174 | `memcached_slab_mem_requested_bytes{slab="5"} 194`, 175 | } 176 | 177 | if useTempLRU == true { 178 | tests = append(tests, 179 | `memcached_slab_temporary_items{slab="1"}`, 180 | `memcached_slab_lru_hits_total{lru="temporary",slab="5"}`, 181 | `memcached_slab_temporary_items{slab="5"}`) 182 | } 183 | 184 | memcacheVersionMajorMinor, err := strconv.ParseFloat(memcacheVersion[0:3], 64) 185 | if err != nil { 186 | t.Fatal(err) 187 | } 188 | 189 | if memcacheVersionMajorMinor >= 1.5 { 190 | tests = append(tests, 191 | `memcached_slab_lru_hits_total{lru="hot",slab="1"}`, 192 | `memcached_slab_lru_hits_total{lru="cold",slab="1"}`, 193 | `memcached_slab_lru_hits_total{lru="warm",slab="1"}`, 194 | `memcached_slab_lru_hits_total{lru="temporary",slab="1"}`, 195 | `memcached_slab_hot_items{slab="1"}`, 196 | `memcached_slab_warm_items{slab="1"}`, 197 | `memcached_slab_cold_items{slab="1"}`, 198 | `memcached_slab_hot_age_seconds{slab="1"}`, 199 | `memcached_slab_warm_age_seconds{slab="1"}`, 200 | `memcached_slab_lru_hits_total{lru="hot",slab="5"}`, 201 | `memcached_slab_lru_hits_total{lru="cold",slab="5"}`, 202 | `memcached_slab_lru_hits_total{lru="warm",slab="5"}`, 203 | `memcached_slab_hot_items{slab="5"}`, 204 | `memcached_slab_warm_items{slab="5"}`, 205 | `memcached_slab_cold_items{slab="5"}`, 206 | `memcached_slab_hot_age_seconds{slab="5"}`, 207 | `memcached_slab_warm_age_seconds{slab="5"}`) 208 | } 209 | 210 | for _, test := range tests { 211 | if !bytes.Contains(body, []byte(test)) { 212 | t.Errorf("want metrics to include %q, have:\n%s", test, body) 213 | } 214 | } 215 | 216 | cancel() 217 | 218 | <-errc 219 | } 220 | 221 | func TestAcceptanceScrapper(t *testing.T) { 222 | errc := make(chan error) 223 | 224 | addr := "localhost:11211" 225 | // MEMCACHED_PORT might be set by a linked memcached docker container. 226 | if env := os.Getenv("MEMCACHED_PORT"); env != "" { 227 | addr = strings.TrimPrefix(env, "tcp://") 228 | } 229 | 230 | t.Logf("starting exporter") 231 | ctx, cancel := context.WithCancel(context.Background()) 232 | exporter := exec.CommandContext(ctx, "../../memcached_exporter", "--memcached.address", "") 233 | go func() { 234 | defer close(errc) 235 | 236 | if err := exporter.Run(); err != nil && errc != nil { 237 | errc <- err 238 | } 239 | }() 240 | 241 | defer cancel() 242 | 243 | // Wait for the exporter to be up and running. 244 | t.Logf("waiting exporter initialization") 245 | waitExporterReady(t, errc, "http://localhost:9150") 246 | 247 | client, err := memcache.New(addr) 248 | if err != nil { 249 | t.Fatal(err) 250 | } 251 | if err := client.StatsReset(); err != nil { 252 | t.Fatal(err) 253 | } 254 | 255 | warmUpMemcached(t, client) 256 | 257 | statsSettings, err := client.StatsSettings() 258 | if err != nil { 259 | t.Fatal(err) 260 | } 261 | 262 | useTempLRU := false 263 | for _, t := range statsSettings { 264 | if t["temp_lru"] == "true" { 265 | useTempLRU = true 266 | } 267 | } 268 | 269 | stats, err := client.Stats() 270 | if err != nil { 271 | t.Fatal(err) 272 | } 273 | 274 | memcacheVersion := "" 275 | for _, t := range stats { 276 | memcacheVersion = t.Stats["version"] 277 | } 278 | 279 | resp, err := http.Get("http://localhost:9150/scrape?target=" + addr) 280 | if err != nil { 281 | t.Fatal(err) 282 | } 283 | defer resp.Body.Close() 284 | 285 | body, err := io.ReadAll(resp.Body) 286 | if err != nil { 287 | t.Fatal(err) 288 | } 289 | 290 | tests := []string{ 291 | // memcached_current_connections varies depending on memcached versions 292 | // so it isn't practical to check for an exact value. 293 | `memcached_current_connections `, 294 | `memcached_up 1`, 295 | `memcached_commands_total{command="get",status="hit"} 2`, 296 | `memcached_commands_total{command="get",status="miss"} 1`, 297 | `memcached_commands_total{command="set",status="hit"} 3`, 298 | `memcached_commands_total{command="cas",status="hit"} 1`, 299 | `memcached_current_bytes 262`, 300 | `memcached_max_connections 1024`, 301 | `memcached_current_items 2`, 302 | `memcached_items_total 4`, 303 | `memcached_slab_current_items{slab="1"} 1`, 304 | `memcached_slab_current_items{slab="5"} 1`, 305 | `memcached_slab_commands_total{command="set",slab="1",status="hit"} 2`, 306 | `memcached_slab_commands_total{command="cas",slab="1",status="hit"} 1`, 307 | `memcached_slab_commands_total{command="set",slab="5",status="hit"} 1`, 308 | `memcached_slab_commands_total{command="cas",slab="5",status="hit"} 0`, 309 | `memcached_slab_current_chunks{slab="1"} 10922`, 310 | `memcached_slab_current_chunks{slab="5"} 4369`, 311 | `memcached_slab_mem_requested_bytes{slab="1"} 68`, 312 | `memcached_slab_mem_requested_bytes{slab="5"} 194`, 313 | } 314 | 315 | if useTempLRU == true { 316 | tests = append(tests, 317 | `memcached_slab_temporary_items{slab="1"}`, 318 | `memcached_slab_lru_hits_total{lru="temporary",slab="5"}`, 319 | `memcached_slab_temporary_items{slab="5"}`) 320 | } 321 | 322 | memcacheVersionMajorMinor, err := strconv.ParseFloat(memcacheVersion[0:3], 64) 323 | if err != nil { 324 | t.Fatal(err) 325 | } 326 | 327 | if memcacheVersionMajorMinor >= 1.5 { 328 | tests = append(tests, 329 | `memcached_slab_lru_hits_total{lru="hot",slab="1"}`, 330 | `memcached_slab_lru_hits_total{lru="cold",slab="1"}`, 331 | `memcached_slab_lru_hits_total{lru="warm",slab="1"}`, 332 | `memcached_slab_lru_hits_total{lru="temporary",slab="1"}`, 333 | `memcached_slab_hot_items{slab="1"}`, 334 | `memcached_slab_warm_items{slab="1"}`, 335 | `memcached_slab_cold_items{slab="1"}`, 336 | `memcached_slab_hot_age_seconds{slab="1"}`, 337 | `memcached_slab_warm_age_seconds{slab="1"}`, 338 | `memcached_slab_lru_hits_total{lru="hot",slab="5"}`, 339 | `memcached_slab_lru_hits_total{lru="cold",slab="5"}`, 340 | `memcached_slab_lru_hits_total{lru="warm",slab="5"}`, 341 | `memcached_slab_hot_items{slab="5"}`, 342 | `memcached_slab_warm_items{slab="5"}`, 343 | `memcached_slab_cold_items{slab="5"}`, 344 | `memcached_slab_hot_age_seconds{slab="5"}`, 345 | `memcached_slab_warm_age_seconds{slab="5"}`) 346 | } 347 | 348 | for _, test := range tests { 349 | if !bytes.Contains(body, []byte(test)) { 350 | t.Errorf("want metrics to include %q, have:\n%s", test, body) 351 | } 352 | } 353 | 354 | cancel() 355 | 356 | <-errc 357 | } 358 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/prometheus/memcached_exporter 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/alecthomas/kingpin/v2 v2.4.0 7 | github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445 8 | github.com/prometheus/client_golang v1.22.0 9 | github.com/prometheus/common v0.64.0 10 | github.com/prometheus/exporter-toolkit v0.13.2 11 | ) 12 | 13 | require ( 14 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect 15 | github.com/beorn7/perks v1.0.1 // indirect 16 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 17 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 18 | github.com/jpillora/backoff v1.0.0 // indirect 19 | github.com/mdlayher/socket v0.4.1 // indirect 20 | github.com/mdlayher/vsock v1.2.1 // indirect 21 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 22 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect 23 | github.com/prometheus/client_model v0.6.2 // indirect 24 | github.com/prometheus/procfs v0.15.1 // indirect 25 | github.com/xhit/go-str2duration/v2 v2.1.0 // indirect 26 | golang.org/x/crypto v0.38.0 // indirect 27 | golang.org/x/net v0.40.0 // indirect 28 | golang.org/x/oauth2 v0.30.0 // indirect 29 | golang.org/x/sync v0.14.0 // indirect 30 | golang.org/x/sys v0.33.0 // indirect 31 | golang.org/x/text v0.25.0 // indirect 32 | google.golang.org/protobuf v1.36.6 // indirect 33 | gopkg.in/yaml.v2 v2.4.0 // indirect 34 | ) 35 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= 2 | github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= 3 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= 4 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= 5 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 6 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 7 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 8 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 9 | github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= 10 | github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 11 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 12 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 13 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 14 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 15 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 16 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 17 | github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445 h1:FlKQKUYPZ5yDCN248M3R7x8yu2E3yEZ0H7aLomE4EoE= 18 | github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445/go.mod h1:L69/dBlPQlWkcnU76WgcppK5e4rrxzQdi6LhLnK/ytA= 19 | github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= 20 | github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= 21 | github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 22 | github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 23 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 24 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 25 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 26 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 27 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 28 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 29 | github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= 30 | github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= 31 | github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= 32 | github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= 33 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 34 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 35 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= 36 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 37 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 38 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 39 | github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= 40 | github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= 41 | github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= 42 | github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= 43 | github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= 44 | github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= 45 | github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ= 46 | github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g= 47 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 48 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 49 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 50 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 51 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 52 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 53 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 54 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 55 | github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= 56 | github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= 57 | golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= 58 | golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= 59 | golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= 60 | golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= 61 | golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= 62 | golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= 63 | golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= 64 | golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= 65 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= 66 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 67 | golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= 68 | golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= 69 | google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= 70 | google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= 71 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 72 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 73 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 74 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 75 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 76 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 77 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 78 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 79 | -------------------------------------------------------------------------------- /pkg/README.md: -------------------------------------------------------------------------------- 1 | The `pkg` directory is deprecated. 2 | Please do not add new packages to this directory. 3 | Existing packages will be moved elsewhere eventually. 4 | -------------------------------------------------------------------------------- /pkg/exporter/exporter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package exporter 15 | 16 | import ( 17 | "crypto/tls" 18 | "errors" 19 | "log/slog" 20 | "net" 21 | "strconv" 22 | "strings" 23 | "time" 24 | 25 | "github.com/grobie/gomemcache/memcache" 26 | "github.com/prometheus/client_golang/prometheus" 27 | ) 28 | 29 | const ( 30 | Namespace = "memcached" 31 | subsystemLruCrawler = "lru_crawler" 32 | subsystemSlab = "slab" 33 | ) 34 | 35 | var errKeyNotFound = errors.New("key not found") 36 | 37 | // Exporter collects metrics from a memcached server. 38 | type Exporter struct { 39 | address string 40 | timeout time.Duration 41 | logger *slog.Logger 42 | tlsConfig *tls.Config 43 | 44 | up *prometheus.Desc 45 | uptime *prometheus.Desc 46 | time *prometheus.Desc 47 | version *prometheus.Desc 48 | rusageUser *prometheus.Desc 49 | rusageSystem *prometheus.Desc 50 | bytesRead *prometheus.Desc 51 | bytesWritten *prometheus.Desc 52 | currentConnections *prometheus.Desc 53 | maxConnections *prometheus.Desc 54 | connectionsTotal *prometheus.Desc 55 | rejectedConnections *prometheus.Desc 56 | connsYieldedTotal *prometheus.Desc 57 | listenerDisabledTotal *prometheus.Desc 58 | currentBytes *prometheus.Desc 59 | limitBytes *prometheus.Desc 60 | commands *prometheus.Desc 61 | items *prometheus.Desc 62 | itemsTotal *prometheus.Desc 63 | evictions *prometheus.Desc 64 | reclaimed *prometheus.Desc 65 | itemStoreTooLarge *prometheus.Desc 66 | itemStoreNoMemory *prometheus.Desc 67 | lruCrawlerEnabled *prometheus.Desc 68 | lruCrawlerSleep *prometheus.Desc 69 | lruCrawlerMaxItems *prometheus.Desc 70 | lruMaintainerThread *prometheus.Desc 71 | lruHotPercent *prometheus.Desc 72 | lruWarmPercent *prometheus.Desc 73 | lruHotMaxAgeFactor *prometheus.Desc 74 | lruWarmMaxAgeFactor *prometheus.Desc 75 | lruCrawlerStarts *prometheus.Desc 76 | lruCrawlerReclaimed *prometheus.Desc 77 | lruCrawlerItemsChecked *prometheus.Desc 78 | lruCrawlerMovesToCold *prometheus.Desc 79 | lruCrawlerMovesToWarm *prometheus.Desc 80 | lruCrawlerMovesWithinLru *prometheus.Desc 81 | directReclaims *prometheus.Desc 82 | malloced *prometheus.Desc 83 | itemsNumber *prometheus.Desc 84 | itemsAge *prometheus.Desc 85 | itemsCrawlerReclaimed *prometheus.Desc 86 | itemsEvicted *prometheus.Desc 87 | itemsEvictedNonzero *prometheus.Desc 88 | itemsEvictedTime *prometheus.Desc 89 | itemsEvictedUnfetched *prometheus.Desc 90 | itemsExpiredUnfetched *prometheus.Desc 91 | itemsOutofmemory *prometheus.Desc 92 | itemsReclaimed *prometheus.Desc 93 | itemsTailrepairs *prometheus.Desc 94 | itemsMovesToCold *prometheus.Desc 95 | itemsMovesToWarm *prometheus.Desc 96 | itemsMovesWithinLru *prometheus.Desc 97 | itemsHot *prometheus.Desc 98 | itemsWarm *prometheus.Desc 99 | itemsCold *prometheus.Desc 100 | itemsTemporary *prometheus.Desc 101 | itemsAgeOldestHot *prometheus.Desc 102 | itemsAgeOldestWarm *prometheus.Desc 103 | itemsLruHits *prometheus.Desc 104 | slabsChunkSize *prometheus.Desc 105 | slabsChunksPerPage *prometheus.Desc 106 | slabsCurrentPages *prometheus.Desc 107 | slabsCurrentChunks *prometheus.Desc 108 | slabsChunksUsed *prometheus.Desc 109 | slabsChunksFree *prometheus.Desc 110 | slabsChunksFreeEnd *prometheus.Desc 111 | slabsMemRequested *prometheus.Desc 112 | slabsCommands *prometheus.Desc 113 | extstoreCompactLost *prometheus.Desc 114 | extstoreCompactRescues *prometheus.Desc 115 | extstoreCompactSkipped *prometheus.Desc 116 | extstorePageAllocs *prometheus.Desc 117 | extstorePageEvictions *prometheus.Desc 118 | extstorePageReclaims *prometheus.Desc 119 | extstorePagesFree *prometheus.Desc 120 | extstorePagesUsed *prometheus.Desc 121 | extstoreObjectsEvicted *prometheus.Desc 122 | extstoreObjectsRead *prometheus.Desc 123 | extstoreObjectsWritten *prometheus.Desc 124 | extstoreObjectsUsed *prometheus.Desc 125 | extstoreBytesEvicted *prometheus.Desc 126 | extstoreBytesWritten *prometheus.Desc 127 | extstoreBytesRead *prometheus.Desc 128 | extstoreBytesUsed *prometheus.Desc 129 | extstoreBytesLimit *prometheus.Desc 130 | extstoreBytesFragmented *prometheus.Desc 131 | extstoreIOQueueDepth *prometheus.Desc 132 | acceptingConnections *prometheus.Desc 133 | } 134 | 135 | // New returns an initialized exporter. 136 | func New(server string, timeout time.Duration, logger *slog.Logger, tlsConfig *tls.Config) *Exporter { 137 | return &Exporter{ 138 | address: server, 139 | timeout: timeout, 140 | logger: logger, 141 | tlsConfig: tlsConfig, 142 | up: prometheus.NewDesc( 143 | prometheus.BuildFQName(Namespace, "", "up"), 144 | "Could the memcached server be reached.", 145 | nil, 146 | nil, 147 | ), 148 | uptime: prometheus.NewDesc( 149 | prometheus.BuildFQName(Namespace, "", "uptime_seconds"), 150 | "Number of seconds since the server started.", 151 | nil, 152 | nil, 153 | ), 154 | time: prometheus.NewDesc( 155 | prometheus.BuildFQName(Namespace, "", "time_seconds"), 156 | "current UNIX time according to the server.", 157 | nil, 158 | nil, 159 | ), 160 | version: prometheus.NewDesc( 161 | prometheus.BuildFQName(Namespace, "", "version"), 162 | "The version of this memcached server.", 163 | []string{"version"}, 164 | nil, 165 | ), 166 | rusageUser: prometheus.NewDesc( 167 | prometheus.BuildFQName(Namespace, "", "process_user_cpu_seconds_total"), 168 | "Accumulated user time for this process.", 169 | nil, 170 | nil, 171 | ), 172 | rusageSystem: prometheus.NewDesc( 173 | prometheus.BuildFQName(Namespace, "", "process_system_cpu_seconds_total"), 174 | "Accumulated system time for this process.", 175 | nil, 176 | nil, 177 | ), 178 | bytesRead: prometheus.NewDesc( 179 | prometheus.BuildFQName(Namespace, "", "read_bytes_total"), 180 | "Total number of bytes read by this server from network.", 181 | nil, 182 | nil, 183 | ), 184 | bytesWritten: prometheus.NewDesc( 185 | prometheus.BuildFQName(Namespace, "", "written_bytes_total"), 186 | "Total number of bytes sent by this server to network.", 187 | nil, 188 | nil, 189 | ), 190 | currentConnections: prometheus.NewDesc( 191 | prometheus.BuildFQName(Namespace, "", "current_connections"), 192 | "Current number of open connections.", 193 | nil, 194 | nil, 195 | ), 196 | maxConnections: prometheus.NewDesc( 197 | prometheus.BuildFQName(Namespace, "", "max_connections"), 198 | "Maximum number of clients allowed.", 199 | nil, 200 | nil, 201 | ), 202 | connectionsTotal: prometheus.NewDesc( 203 | prometheus.BuildFQName(Namespace, "", "connections_total"), 204 | "Total number of connections opened since the server started running.", 205 | nil, 206 | nil, 207 | ), 208 | rejectedConnections: prometheus.NewDesc( 209 | prometheus.BuildFQName(Namespace, "", "connections_rejected_total"), 210 | "Total number of connections rejected due to hitting the memcached's -c limit in maxconns_fast mode.", 211 | nil, 212 | nil, 213 | ), 214 | connsYieldedTotal: prometheus.NewDesc( 215 | prometheus.BuildFQName(Namespace, "", "connections_yielded_total"), 216 | "Total number of connections yielded running due to hitting the memcached's -R limit.", 217 | nil, 218 | nil, 219 | ), 220 | listenerDisabledTotal: prometheus.NewDesc( 221 | prometheus.BuildFQName(Namespace, "", "connections_listener_disabled_total"), 222 | "Number of times that memcached has hit its connections limit and disabled its listener.", 223 | nil, 224 | nil, 225 | ), 226 | currentBytes: prometheus.NewDesc( 227 | prometheus.BuildFQName(Namespace, "", "current_bytes"), 228 | "Current number of bytes used to store items.", 229 | nil, 230 | nil, 231 | ), 232 | limitBytes: prometheus.NewDesc( 233 | prometheus.BuildFQName(Namespace, "", "limit_bytes"), 234 | "Number of bytes this server is allowed to use for storage.", 235 | nil, 236 | nil, 237 | ), 238 | commands: prometheus.NewDesc( 239 | prometheus.BuildFQName(Namespace, "", "commands_total"), 240 | "Total number of all requests broken down by command (get, set, etc.) and status.", 241 | []string{"command", "status"}, 242 | nil, 243 | ), 244 | items: prometheus.NewDesc( 245 | prometheus.BuildFQName(Namespace, "", "current_items"), 246 | "Current number of items stored by this instance.", 247 | nil, 248 | nil, 249 | ), 250 | itemsTotal: prometheus.NewDesc( 251 | prometheus.BuildFQName(Namespace, "", "items_total"), 252 | "Total number of items stored during the life of this instance.", 253 | nil, 254 | nil, 255 | ), 256 | evictions: prometheus.NewDesc( 257 | prometheus.BuildFQName(Namespace, "", "items_evicted_total"), 258 | "Total number of valid items removed from cache to free memory for new items.", 259 | nil, 260 | nil, 261 | ), 262 | reclaimed: prometheus.NewDesc( 263 | prometheus.BuildFQName(Namespace, "", "items_reclaimed_total"), 264 | "Total number of times an entry was stored using memory from an expired entry.", 265 | nil, 266 | nil, 267 | ), 268 | itemStoreTooLarge: prometheus.NewDesc( 269 | prometheus.BuildFQName(Namespace, "", "item_too_large_total"), 270 | "The number of times an item exceeded the max-item-size when being stored.", 271 | nil, 272 | nil, 273 | ), 274 | itemStoreNoMemory: prometheus.NewDesc( 275 | prometheus.BuildFQName(Namespace, "", "item_no_memory_total"), 276 | "The number of times an item could not be stored due to no more memory.", 277 | nil, 278 | nil, 279 | ), 280 | directReclaims: prometheus.NewDesc( 281 | prometheus.BuildFQName(Namespace, "", "direct_reclaims_total"), 282 | "Times worker threads had to directly reclaim or evict items.", 283 | nil, 284 | nil, 285 | ), 286 | lruCrawlerEnabled: prometheus.NewDesc( 287 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "enabled"), 288 | "Whether the LRU crawler is enabled.", 289 | nil, 290 | nil, 291 | ), 292 | lruCrawlerSleep: prometheus.NewDesc( 293 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "sleep"), 294 | "Microseconds to sleep between LRU crawls.", 295 | nil, 296 | nil, 297 | ), 298 | lruCrawlerMaxItems: prometheus.NewDesc( 299 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "to_crawl"), 300 | "Max items to crawl per slab per run.", 301 | nil, 302 | nil, 303 | ), 304 | lruMaintainerThread: prometheus.NewDesc( 305 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "maintainer_thread"), 306 | "Split LRU mode and background threads.", 307 | nil, 308 | nil, 309 | ), 310 | lruHotPercent: prometheus.NewDesc( 311 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "hot_percent"), 312 | "Percent of slab memory reserved for HOT LRU.", 313 | nil, 314 | nil, 315 | ), 316 | lruWarmPercent: prometheus.NewDesc( 317 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "warm_percent"), 318 | "Percent of slab memory reserved for WARM LRU.", 319 | nil, 320 | nil, 321 | ), 322 | lruHotMaxAgeFactor: prometheus.NewDesc( 323 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "hot_max_factor"), 324 | "Set idle age of HOT LRU to COLD age * this", 325 | nil, 326 | nil, 327 | ), 328 | lruWarmMaxAgeFactor: prometheus.NewDesc( 329 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "warm_max_factor"), 330 | "Set idle age of WARM LRU to COLD age * this", 331 | nil, 332 | nil, 333 | ), 334 | lruCrawlerStarts: prometheus.NewDesc( 335 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "starts_total"), 336 | "Times an LRU crawler was started.", 337 | nil, 338 | nil, 339 | ), 340 | lruCrawlerReclaimed: prometheus.NewDesc( 341 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "reclaimed_total"), 342 | "Total items freed by LRU Crawler.", 343 | nil, 344 | nil, 345 | ), 346 | lruCrawlerItemsChecked: prometheus.NewDesc( 347 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "items_checked_total"), 348 | "Total items examined by LRU Crawler.", 349 | nil, 350 | nil, 351 | ), 352 | lruCrawlerMovesToCold: prometheus.NewDesc( 353 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "moves_to_cold_total"), 354 | "Total number of items moved from HOT/WARM to COLD LRU's.", 355 | nil, 356 | nil, 357 | ), 358 | lruCrawlerMovesToWarm: prometheus.NewDesc( 359 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "moves_to_warm_total"), 360 | "Total number of items moved from COLD to WARM LRU.", 361 | nil, 362 | nil, 363 | ), 364 | lruCrawlerMovesWithinLru: prometheus.NewDesc( 365 | prometheus.BuildFQName(Namespace, subsystemLruCrawler, "moves_within_lru_total"), 366 | "Total number of items reshuffled within HOT or WARM LRU's.", 367 | nil, 368 | nil, 369 | ), 370 | malloced: prometheus.NewDesc( 371 | prometheus.BuildFQName(Namespace, "", "malloced_bytes"), 372 | "Number of bytes of memory allocated to slab pages.", 373 | nil, 374 | nil, 375 | ), 376 | itemsNumber: prometheus.NewDesc( 377 | prometheus.BuildFQName(Namespace, subsystemSlab, "current_items"), 378 | "Number of items currently stored in this slab class.", 379 | []string{"slab"}, 380 | nil, 381 | ), 382 | itemsAge: prometheus.NewDesc( 383 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_age_seconds"), 384 | "Number of seconds the oldest item has been in the slab class.", 385 | []string{"slab"}, 386 | nil, 387 | ), 388 | itemsCrawlerReclaimed: prometheus.NewDesc( 389 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_crawler_reclaimed_total"), 390 | "Number of items freed by the LRU Crawler.", 391 | []string{"slab"}, 392 | nil, 393 | ), 394 | itemsEvicted: prometheus.NewDesc( 395 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_evicted_total"), 396 | "Total number of times an item had to be evicted from the LRU before it expired.", 397 | []string{"slab"}, 398 | nil, 399 | ), 400 | itemsEvictedNonzero: prometheus.NewDesc( 401 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_evicted_nonzero_total"), 402 | "Total number of times an item which had an explicit expire time set had to be evicted from the LRU before it expired.", 403 | []string{"slab"}, 404 | nil, 405 | ), 406 | itemsEvictedTime: prometheus.NewDesc( 407 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_evicted_time_seconds"), 408 | "Seconds since the last access for the most recent item evicted from this class.", 409 | []string{"slab"}, 410 | nil, 411 | ), 412 | itemsEvictedUnfetched: prometheus.NewDesc( 413 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_evicted_unfetched_total"), 414 | "Total nmber of items evicted and never fetched.", 415 | []string{"slab"}, 416 | nil, 417 | ), 418 | itemsExpiredUnfetched: prometheus.NewDesc( 419 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_expired_unfetched_total"), 420 | "Total number of valid items evicted from the LRU which were never touched after being set.", 421 | []string{"slab"}, 422 | nil, 423 | ), 424 | itemsOutofmemory: prometheus.NewDesc( 425 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_outofmemory_total"), 426 | "Total number of items for this slab class that have triggered an out of memory error.", 427 | []string{"slab"}, 428 | nil, 429 | ), 430 | itemsReclaimed: prometheus.NewDesc( 431 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_reclaimed_total"), 432 | "Total number of items reclaimed.", 433 | []string{"slab"}, 434 | nil, 435 | ), 436 | itemsTailrepairs: prometheus.NewDesc( 437 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_tailrepairs_total"), 438 | "Total number of times the entries for a particular ID need repairing.", 439 | []string{"slab"}, 440 | nil, 441 | ), 442 | itemsMovesToCold: prometheus.NewDesc( 443 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_moves_to_cold"), 444 | "Number of items moved from HOT or WARM into COLD.", 445 | []string{"slab"}, 446 | nil, 447 | ), 448 | itemsMovesToWarm: prometheus.NewDesc( 449 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_moves_to_warm"), 450 | "Number of items moves from COLD into WARM.", 451 | []string{"slab"}, 452 | nil, 453 | ), 454 | itemsMovesWithinLru: prometheus.NewDesc( 455 | prometheus.BuildFQName(Namespace, subsystemSlab, "items_moves_within_lru"), 456 | "Number of times active items were bumped within HOT or WARM.", 457 | []string{"slab"}, 458 | nil, 459 | ), 460 | itemsHot: prometheus.NewDesc( 461 | prometheus.BuildFQName(Namespace, subsystemSlab, "hot_items"), 462 | "Number of items presently stored in the HOT LRU.", 463 | []string{"slab"}, 464 | nil, 465 | ), 466 | itemsWarm: prometheus.NewDesc( 467 | prometheus.BuildFQName(Namespace, subsystemSlab, "warm_items"), 468 | "Number of items presently stored in the WARM LRU.", 469 | []string{"slab"}, 470 | nil, 471 | ), 472 | itemsCold: prometheus.NewDesc( 473 | prometheus.BuildFQName(Namespace, subsystemSlab, "cold_items"), 474 | "Number of items presently stored in the COLD LRU.", 475 | []string{"slab"}, 476 | nil, 477 | ), 478 | itemsTemporary: prometheus.NewDesc( 479 | prometheus.BuildFQName(Namespace, subsystemSlab, "temporary_items"), 480 | "Number of items presently stored in the TEMPORARY LRU.", 481 | []string{"slab"}, 482 | nil, 483 | ), 484 | itemsAgeOldestHot: prometheus.NewDesc( 485 | prometheus.BuildFQName(Namespace, subsystemSlab, "hot_age_seconds"), 486 | "Age of the oldest item in HOT LRU.", 487 | []string{"slab"}, 488 | nil, 489 | ), 490 | itemsAgeOldestWarm: prometheus.NewDesc( 491 | prometheus.BuildFQName(Namespace, subsystemSlab, "warm_age_seconds"), 492 | "Age of the oldest item in HOT LRU.", 493 | []string{"slab"}, 494 | nil, 495 | ), 496 | itemsLruHits: prometheus.NewDesc( 497 | prometheus.BuildFQName(Namespace, subsystemSlab, "lru_hits_total"), 498 | "Number of get_hits to the LRU.", 499 | []string{"slab", "lru"}, 500 | nil, 501 | ), 502 | slabsChunkSize: prometheus.NewDesc( 503 | prometheus.BuildFQName(Namespace, subsystemSlab, "chunk_size_bytes"), 504 | "Number of bytes allocated to each chunk within this slab class.", 505 | []string{"slab"}, 506 | nil, 507 | ), 508 | slabsChunksPerPage: prometheus.NewDesc( 509 | prometheus.BuildFQName(Namespace, subsystemSlab, "chunks_per_page"), 510 | "Number of chunks within a single page for this slab class.", 511 | []string{"slab"}, 512 | nil, 513 | ), 514 | slabsCurrentPages: prometheus.NewDesc( 515 | prometheus.BuildFQName(Namespace, subsystemSlab, "current_pages"), 516 | "Number of pages allocated to this slab class.", 517 | []string{"slab"}, 518 | nil, 519 | ), 520 | slabsCurrentChunks: prometheus.NewDesc( 521 | prometheus.BuildFQName(Namespace, subsystemSlab, "current_chunks"), 522 | "Number of chunks allocated to this slab class.", 523 | []string{"slab"}, 524 | nil, 525 | ), 526 | slabsChunksUsed: prometheus.NewDesc( 527 | prometheus.BuildFQName(Namespace, subsystemSlab, "chunks_used"), 528 | "Number of chunks allocated to an item.", 529 | []string{"slab"}, 530 | nil, 531 | ), 532 | slabsChunksFree: prometheus.NewDesc( 533 | prometheus.BuildFQName(Namespace, subsystemSlab, "chunks_free"), 534 | "Number of chunks not yet allocated items.", 535 | []string{"slab"}, 536 | nil, 537 | ), 538 | slabsChunksFreeEnd: prometheus.NewDesc( 539 | prometheus.BuildFQName(Namespace, subsystemSlab, "chunks_free_end"), 540 | "Number of free chunks at the end of the last allocated page.", 541 | []string{"slab"}, 542 | nil, 543 | ), 544 | slabsMemRequested: prometheus.NewDesc( 545 | prometheus.BuildFQName(Namespace, subsystemSlab, "mem_requested_bytes"), 546 | "Number of bytes of memory actual items take up within a slab.", 547 | []string{"slab"}, 548 | nil, 549 | ), 550 | slabsCommands: prometheus.NewDesc( 551 | prometheus.BuildFQName(Namespace, subsystemSlab, "commands_total"), 552 | "Total number of all requests broken down by command (get, set, etc.) and status per slab.", 553 | []string{"slab", "command", "status"}, 554 | nil, 555 | ), 556 | extstoreCompactLost: prometheus.NewDesc( 557 | prometheus.BuildFQName(Namespace, "", "extstore_compact_lost_total"), 558 | "Total number of items lost because they were locked during extstore compaction.", 559 | nil, 560 | nil, 561 | ), 562 | extstoreCompactRescues: prometheus.NewDesc( 563 | prometheus.BuildFQName(Namespace, "", "extstore_compact_rescued_total"), 564 | "Total number of items moved to a new page during extstore compaction,", 565 | nil, 566 | nil, 567 | ), 568 | extstoreCompactSkipped: prometheus.NewDesc( 569 | prometheus.BuildFQName(Namespace, "", "extstore_compact_skipped_total"), 570 | "Total number of items dropped due to inactivity during extstore compaction.", 571 | nil, 572 | nil, 573 | ), 574 | extstorePageAllocs: prometheus.NewDesc( 575 | prometheus.BuildFQName(Namespace, "", "extstore_pages_allocated_total"), 576 | "Total number of times a page was allocated in extstore.", 577 | nil, 578 | nil, 579 | ), 580 | extstorePageEvictions: prometheus.NewDesc( 581 | prometheus.BuildFQName(Namespace, "", "extstore_pages_evicted_total"), 582 | "Total number of times a page was evicted from extstore.", 583 | nil, 584 | nil, 585 | ), 586 | extstorePageReclaims: prometheus.NewDesc( 587 | prometheus.BuildFQName(Namespace, "", "extstore_pages_reclaimed_total"), 588 | "Total number of times an empty extstore page was freed.", 589 | nil, 590 | nil, 591 | ), 592 | extstorePagesFree: prometheus.NewDesc( 593 | prometheus.BuildFQName(Namespace, "", "extstore_pages_free"), 594 | "Number of extstore pages not yet containing any items.", 595 | nil, 596 | nil, 597 | ), 598 | extstorePagesUsed: prometheus.NewDesc( 599 | prometheus.BuildFQName(Namespace, "", "extstore_pages_used"), 600 | "Number of extstore pages containing at least one item.", 601 | nil, 602 | nil, 603 | ), 604 | extstoreObjectsEvicted: prometheus.NewDesc( 605 | prometheus.BuildFQName(Namespace, "", "extstore_objects_evicted_total"), 606 | "Total number of items evicted from extstore to free up space.", 607 | nil, 608 | nil, 609 | ), 610 | extstoreObjectsRead: prometheus.NewDesc( 611 | prometheus.BuildFQName(Namespace, "", "extstore_objects_read_total"), 612 | "Total number of items read from extstore.", 613 | nil, 614 | nil, 615 | ), 616 | extstoreObjectsWritten: prometheus.NewDesc( 617 | prometheus.BuildFQName(Namespace, "", "extstore_objects_written_total"), 618 | "Total number of items written to extstore.", 619 | nil, 620 | nil, 621 | ), 622 | extstoreObjectsUsed: prometheus.NewDesc( 623 | prometheus.BuildFQName(Namespace, "", "extstore_objects_used"), 624 | "Number of items stored in extstore.", 625 | nil, 626 | nil, 627 | ), 628 | extstoreBytesEvicted: prometheus.NewDesc( 629 | prometheus.BuildFQName(Namespace, "", "extstore_bytes_evicted_total"), 630 | "Total number of bytes evicted from extstore to free up space.", 631 | nil, 632 | nil, 633 | ), 634 | extstoreBytesWritten: prometheus.NewDesc( 635 | prometheus.BuildFQName(Namespace, "", "extstore_bytes_written_total"), 636 | "Total number of bytes written to extstore.", 637 | nil, 638 | nil, 639 | ), 640 | extstoreBytesRead: prometheus.NewDesc( 641 | prometheus.BuildFQName(Namespace, "", "extstore_bytes_read_total"), 642 | "Total number of bytes read from extstore.", 643 | nil, 644 | nil, 645 | ), 646 | extstoreBytesUsed: prometheus.NewDesc( 647 | prometheus.BuildFQName(Namespace, "", "extstore_bytes_used"), 648 | "Current number of bytes used to store items in extstore.", 649 | nil, 650 | nil, 651 | ), 652 | extstoreBytesFragmented: prometheus.NewDesc( 653 | prometheus.BuildFQName(Namespace, "", "extstore_bytes_fragmented"), 654 | "Current number of bytes in extstore pages allocated but not used to store an object.", 655 | nil, 656 | nil, 657 | ), 658 | extstoreBytesLimit: prometheus.NewDesc( 659 | prometheus.BuildFQName(Namespace, "", "extstore_bytes_limit"), 660 | "Number of bytes of external storage allocated for this server.", 661 | nil, 662 | nil, 663 | ), 664 | extstoreIOQueueDepth: prometheus.NewDesc( 665 | prometheus.BuildFQName(Namespace, "", "extstore_io_queue_depth"), 666 | "Number of items in the I/O queue waiting to be processed.", 667 | nil, 668 | nil, 669 | ), 670 | acceptingConnections: prometheus.NewDesc( 671 | prometheus.BuildFQName(Namespace, "", "accepting_connections"), 672 | "The Memcached server is currently accepting new connections.", 673 | nil, 674 | nil, 675 | ), 676 | } 677 | } 678 | 679 | // Describe describes all the metrics exported by the memcached exporter. It 680 | // implements prometheus.Collector. 681 | func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { 682 | ch <- e.up 683 | ch <- e.uptime 684 | ch <- e.time 685 | ch <- e.version 686 | ch <- e.rusageUser 687 | ch <- e.rusageSystem 688 | ch <- e.bytesRead 689 | ch <- e.bytesWritten 690 | ch <- e.currentConnections 691 | ch <- e.maxConnections 692 | ch <- e.connectionsTotal 693 | ch <- e.rejectedConnections 694 | ch <- e.connsYieldedTotal 695 | ch <- e.listenerDisabledTotal 696 | ch <- e.currentBytes 697 | ch <- e.limitBytes 698 | ch <- e.commands 699 | ch <- e.items 700 | ch <- e.itemsTotal 701 | ch <- e.evictions 702 | ch <- e.reclaimed 703 | ch <- e.itemStoreTooLarge 704 | ch <- e.itemStoreNoMemory 705 | ch <- e.lruCrawlerEnabled 706 | ch <- e.lruCrawlerSleep 707 | ch <- e.lruCrawlerMaxItems 708 | ch <- e.lruMaintainerThread 709 | ch <- e.lruHotPercent 710 | ch <- e.lruWarmPercent 711 | ch <- e.lruHotMaxAgeFactor 712 | ch <- e.lruWarmMaxAgeFactor 713 | ch <- e.lruCrawlerStarts 714 | ch <- e.directReclaims 715 | ch <- e.lruCrawlerReclaimed 716 | ch <- e.lruCrawlerItemsChecked 717 | ch <- e.lruCrawlerMovesToCold 718 | ch <- e.lruCrawlerMovesToWarm 719 | ch <- e.lruCrawlerMovesWithinLru 720 | ch <- e.itemsLruHits 721 | ch <- e.malloced 722 | ch <- e.itemsNumber 723 | ch <- e.itemsAge 724 | ch <- e.itemsCrawlerReclaimed 725 | ch <- e.itemsEvicted 726 | ch <- e.itemsEvictedNonzero 727 | ch <- e.itemsEvictedTime 728 | ch <- e.itemsEvictedUnfetched 729 | ch <- e.itemsExpiredUnfetched 730 | ch <- e.itemsOutofmemory 731 | ch <- e.itemsReclaimed 732 | ch <- e.itemsTailrepairs 733 | ch <- e.itemsExpiredUnfetched 734 | ch <- e.itemsMovesToCold 735 | ch <- e.itemsMovesToWarm 736 | ch <- e.itemsMovesWithinLru 737 | ch <- e.itemsHot 738 | ch <- e.itemsWarm 739 | ch <- e.itemsCold 740 | ch <- e.itemsTemporary 741 | ch <- e.itemsAgeOldestHot 742 | ch <- e.itemsAgeOldestWarm 743 | ch <- e.slabsChunkSize 744 | ch <- e.slabsChunksPerPage 745 | ch <- e.slabsCurrentPages 746 | ch <- e.slabsCurrentChunks 747 | ch <- e.slabsChunksUsed 748 | ch <- e.slabsChunksFree 749 | ch <- e.slabsChunksFreeEnd 750 | ch <- e.slabsMemRequested 751 | ch <- e.slabsCommands 752 | ch <- e.extstoreCompactLost 753 | ch <- e.extstoreCompactRescues 754 | ch <- e.extstoreCompactSkipped 755 | ch <- e.extstorePageAllocs 756 | ch <- e.extstorePageEvictions 757 | ch <- e.extstorePageReclaims 758 | ch <- e.extstorePagesFree 759 | ch <- e.extstorePagesUsed 760 | ch <- e.extstoreObjectsEvicted 761 | ch <- e.extstoreObjectsRead 762 | ch <- e.extstoreObjectsWritten 763 | ch <- e.extstoreObjectsUsed 764 | ch <- e.extstoreBytesEvicted 765 | ch <- e.extstoreBytesWritten 766 | ch <- e.extstoreBytesRead 767 | ch <- e.extstoreBytesUsed 768 | ch <- e.extstoreBytesFragmented 769 | ch <- e.extstoreBytesLimit 770 | ch <- e.extstoreIOQueueDepth 771 | ch <- e.acceptingConnections 772 | } 773 | 774 | // Collect fetches the statistics from the configured memcached server, and 775 | // delivers them as Prometheus metrics. It implements prometheus.Collector. 776 | func (e *Exporter) Collect(ch chan<- prometheus.Metric) { 777 | c, err := memcache.New(e.address) 778 | if err != nil { 779 | ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, 0) 780 | e.logger.Error("Failed to connect to memcached", "err", err) 781 | return 782 | } 783 | c.Timeout = e.timeout 784 | c.TlsConfig = e.tlsConfig 785 | 786 | up := float64(1) 787 | stats, err := c.Stats() 788 | if err != nil { 789 | e.logger.Error("Failed to collect stats from memcached", "err", err) 790 | up = 0 791 | } 792 | statsSettings, err := c.StatsSettings() 793 | if err != nil { 794 | e.logger.Error("Could not query stats settings", "err", err) 795 | up = 0 796 | } 797 | 798 | if err := e.parseStats(ch, stats); err != nil { 799 | up = 0 800 | } 801 | if err := e.parseStatsSettings(ch, statsSettings); err != nil { 802 | up = 0 803 | } 804 | 805 | ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, up) 806 | } 807 | 808 | func (e *Exporter) parseStats(ch chan<- prometheus.Metric, stats map[net.Addr]memcache.Stats) error { 809 | // TODO(ts): Clean up and consolidate metric mappings. 810 | itemsCounterMetrics := map[string]*prometheus.Desc{ 811 | "crawler_reclaimed": e.itemsCrawlerReclaimed, 812 | "evicted": e.itemsEvicted, 813 | "evicted_nonzero": e.itemsEvictedNonzero, 814 | "evicted_time": e.itemsEvictedTime, 815 | "evicted_unfetched": e.itemsEvictedUnfetched, 816 | "expired_unfetched": e.itemsExpiredUnfetched, 817 | "outofmemory": e.itemsOutofmemory, 818 | "reclaimed": e.itemsReclaimed, 819 | "store_too_large": e.itemStoreTooLarge, 820 | "store_no_memory": e.itemStoreNoMemory, 821 | "tailrepairs": e.itemsTailrepairs, 822 | "mem_requested": e.slabsMemRequested, 823 | "moves_to_cold": e.itemsMovesToCold, 824 | "moves_to_warm": e.itemsMovesToWarm, 825 | "moves_within_lru": e.itemsMovesWithinLru, 826 | } 827 | 828 | itemsGaugeMetrics := map[string]*prometheus.Desc{ 829 | "number_hot": e.itemsHot, 830 | "number_warm": e.itemsWarm, 831 | "number_cold": e.itemsCold, 832 | "number_temp": e.itemsTemporary, 833 | "age_hot": e.itemsAgeOldestHot, 834 | "age_warm": e.itemsAgeOldestWarm, 835 | } 836 | 837 | var parseError error 838 | for _, t := range stats { 839 | s := t.Stats 840 | ch <- prometheus.MustNewConstMetric(e.version, prometheus.GaugeValue, 1, s["version"]) 841 | 842 | for _, op := range []string{"get", "delete", "incr", "decr", "cas", "touch"} { 843 | err := firstError( 844 | e.parseAndNewMetric(ch, e.commands, prometheus.CounterValue, s, op+"_hits", op, "hit"), 845 | e.parseAndNewMetric(ch, e.commands, prometheus.CounterValue, s, op+"_misses", op, "miss"), 846 | ) 847 | if err != nil { 848 | parseError = err 849 | } 850 | } 851 | err := firstError( 852 | e.parseAndNewMetric(ch, e.uptime, prometheus.CounterValue, s, "uptime"), 853 | e.parseAndNewMetric(ch, e.time, prometheus.GaugeValue, s, "time"), 854 | e.parseAndNewMetric(ch, e.commands, prometheus.CounterValue, s, "cas_badval", "cas", "badval"), 855 | e.parseAndNewMetric(ch, e.commands, prometheus.CounterValue, s, "cmd_flush", "flush", "hit"), 856 | ) 857 | if err != nil { 858 | parseError = err 859 | } 860 | 861 | // memcached includes cas operations again in cmd_set. 862 | setCmd, err := parse(s, "cmd_set", e.logger) 863 | if err == nil { 864 | if cas, casErr := sum(s, "cas_misses", "cas_hits", "cas_badval"); casErr == nil { 865 | ch <- prometheus.MustNewConstMetric(e.commands, prometheus.CounterValue, setCmd-cas, "set", "hit") 866 | } else { 867 | e.logger.Error("Failed to parse cas", "err", casErr) 868 | parseError = casErr 869 | } 870 | } else { 871 | e.logger.Error("Failed to parse set", "err", err) 872 | parseError = err 873 | } 874 | 875 | // extstore stats are only included if extstore is actually active. Take the presence of the 876 | // maxbytes key as a signal that they all should be there and do the parsing 877 | if _, ok := s["extstore_limit_maxbytes"]; ok { 878 | err = firstError( 879 | e.parseAndNewMetric(ch, e.extstoreCompactLost, prometheus.CounterValue, s, "extstore_compact_lost"), 880 | e.parseAndNewMetric(ch, e.extstoreCompactRescues, prometheus.CounterValue, s, "extstore_compact_rescues"), 881 | e.parseAndNewMetric(ch, e.extstoreCompactSkipped, prometheus.CounterValue, s, "extstore_compact_skipped"), 882 | e.parseAndNewMetric(ch, e.extstorePageAllocs, prometheus.CounterValue, s, "extstore_page_allocs"), 883 | e.parseAndNewMetric(ch, e.extstorePageEvictions, prometheus.CounterValue, s, "extstore_page_evictions"), 884 | e.parseAndNewMetric(ch, e.extstorePageReclaims, prometheus.CounterValue, s, "extstore_page_reclaims"), 885 | e.parseAndNewMetric(ch, e.extstorePagesFree, prometheus.GaugeValue, s, "extstore_pages_free"), 886 | e.parseAndNewMetric(ch, e.extstorePagesUsed, prometheus.GaugeValue, s, "extstore_pages_used"), 887 | e.parseAndNewMetric(ch, e.extstoreObjectsEvicted, prometheus.CounterValue, s, "extstore_objects_evicted"), 888 | e.parseAndNewMetric(ch, e.extstoreObjectsRead, prometheus.CounterValue, s, "extstore_objects_read"), 889 | e.parseAndNewMetric(ch, e.extstoreObjectsWritten, prometheus.CounterValue, s, "extstore_objects_written"), 890 | e.parseAndNewMetric(ch, e.extstoreObjectsUsed, prometheus.GaugeValue, s, "extstore_objects_used"), 891 | e.parseAndNewMetric(ch, e.extstoreBytesEvicted, prometheus.CounterValue, s, "extstore_bytes_evicted"), 892 | e.parseAndNewMetric(ch, e.extstoreBytesWritten, prometheus.CounterValue, s, "extstore_bytes_written"), 893 | e.parseAndNewMetric(ch, e.extstoreBytesRead, prometheus.CounterValue, s, "extstore_bytes_read"), 894 | e.parseAndNewMetric(ch, e.extstoreBytesUsed, prometheus.CounterValue, s, "extstore_bytes_used"), 895 | e.parseAndNewMetric(ch, e.extstoreBytesFragmented, prometheus.GaugeValue, s, "extstore_bytes_fragmented"), 896 | e.parseAndNewMetric(ch, e.extstoreBytesLimit, prometheus.GaugeValue, s, "extstore_limit_maxbytes"), 897 | e.parseAndNewMetric(ch, e.extstoreIOQueueDepth, prometheus.GaugeValue, s, "extstore_io_queue"), 898 | ) 899 | if err != nil { 900 | parseError = err 901 | } 902 | } 903 | 904 | err = firstError( 905 | e.parseTimevalAndNewMetric(ch, e.rusageUser, prometheus.CounterValue, s, "rusage_user"), 906 | e.parseTimevalAndNewMetric(ch, e.rusageSystem, prometheus.CounterValue, s, "rusage_system"), 907 | e.parseAndNewMetric(ch, e.currentBytes, prometheus.GaugeValue, s, "bytes"), 908 | e.parseAndNewMetric(ch, e.limitBytes, prometheus.GaugeValue, s, "limit_maxbytes"), 909 | e.parseAndNewMetric(ch, e.items, prometheus.GaugeValue, s, "curr_items"), 910 | e.parseAndNewMetric(ch, e.itemsTotal, prometheus.CounterValue, s, "total_items"), 911 | e.parseAndNewMetric(ch, e.bytesRead, prometheus.CounterValue, s, "bytes_read"), 912 | e.parseAndNewMetric(ch, e.bytesWritten, prometheus.CounterValue, s, "bytes_written"), 913 | e.parseAndNewMetric(ch, e.currentConnections, prometheus.GaugeValue, s, "curr_connections"), 914 | e.parseAndNewMetric(ch, e.connectionsTotal, prometheus.CounterValue, s, "total_connections"), 915 | e.parseAndNewMetric(ch, e.rejectedConnections, prometheus.CounterValue, s, "rejected_connections"), 916 | e.parseAndNewMetric(ch, e.connsYieldedTotal, prometheus.CounterValue, s, "conn_yields"), 917 | e.parseAndNewMetric(ch, e.listenerDisabledTotal, prometheus.CounterValue, s, "listen_disabled_num"), 918 | e.parseAndNewMetric(ch, e.evictions, prometheus.CounterValue, s, "evictions"), 919 | e.parseAndNewMetric(ch, e.reclaimed, prometheus.CounterValue, s, "reclaimed"), 920 | e.parseAndNewMetric(ch, e.itemStoreTooLarge, prometheus.CounterValue, s, "store_too_large"), 921 | e.parseAndNewMetric(ch, e.itemStoreNoMemory, prometheus.CounterValue, s, "store_no_memory"), 922 | e.parseAndNewMetric(ch, e.lruCrawlerStarts, prometheus.CounterValue, s, "lru_crawler_starts"), 923 | e.parseAndNewMetric(ch, e.directReclaims, prometheus.CounterValue, s, "direct_reclaims"), 924 | e.parseAndNewMetric(ch, e.lruCrawlerItemsChecked, prometheus.CounterValue, s, "crawler_items_checked"), 925 | e.parseAndNewMetric(ch, e.lruCrawlerReclaimed, prometheus.CounterValue, s, "crawler_reclaimed"), 926 | e.parseAndNewMetric(ch, e.lruCrawlerMovesToCold, prometheus.CounterValue, s, "moves_to_cold"), 927 | e.parseAndNewMetric(ch, e.lruCrawlerMovesToWarm, prometheus.CounterValue, s, "moves_to_warm"), 928 | e.parseAndNewMetric(ch, e.lruCrawlerMovesWithinLru, prometheus.CounterValue, s, "moves_within_lru"), 929 | e.parseAndNewMetric(ch, e.malloced, prometheus.GaugeValue, s, "total_malloced"), 930 | e.parseAndNewMetric(ch, e.acceptingConnections, prometheus.GaugeValue, s, "accepting_conns"), 931 | ) 932 | if err != nil { 933 | parseError = err 934 | } 935 | 936 | for slab, u := range t.Items { 937 | slab := strconv.Itoa(slab) 938 | err := firstError( 939 | e.parseAndNewMetric(ch, e.itemsNumber, prometheus.GaugeValue, u, "number", slab), 940 | e.parseAndNewMetric(ch, e.itemsAge, prometheus.GaugeValue, u, "age", slab), 941 | e.parseAndNewMetric(ch, e.itemsLruHits, prometheus.CounterValue, u, "hits_to_hot", slab, "hot"), 942 | e.parseAndNewMetric(ch, e.itemsLruHits, prometheus.CounterValue, u, "hits_to_warm", slab, "warm"), 943 | e.parseAndNewMetric(ch, e.itemsLruHits, prometheus.CounterValue, u, "hits_to_cold", slab, "cold"), 944 | e.parseAndNewMetric(ch, e.itemsLruHits, prometheus.CounterValue, u, "hits_to_temp", slab, "temporary"), 945 | ) 946 | if err != nil { 947 | parseError = err 948 | } 949 | for m, d := range itemsCounterMetrics { 950 | if _, ok := u[m]; !ok { 951 | continue 952 | } 953 | if err := e.parseAndNewMetric(ch, d, prometheus.CounterValue, u, m, slab); err != nil { 954 | parseError = err 955 | } 956 | } 957 | for m, d := range itemsGaugeMetrics { 958 | if _, ok := u[m]; !ok { 959 | continue 960 | } 961 | if err := e.parseAndNewMetric(ch, d, prometheus.GaugeValue, u, m, slab); err != nil { 962 | parseError = err 963 | } 964 | } 965 | } 966 | 967 | for slab, v := range t.Slabs { 968 | slab := strconv.Itoa(slab) 969 | 970 | for _, op := range []string{"get", "delete", "incr", "decr", "cas", "touch"} { 971 | if err := e.parseAndNewMetric(ch, e.slabsCommands, prometheus.CounterValue, v, op+"_hits", slab, op, "hit"); err != nil { 972 | parseError = err 973 | } 974 | } 975 | if err := e.parseAndNewMetric(ch, e.slabsCommands, prometheus.CounterValue, v, "cas_badval", slab, "cas", "badval"); err != nil { 976 | parseError = err 977 | } 978 | 979 | slabSetCmd, err := parse(v, "cmd_set", e.logger) 980 | if err == nil { 981 | if slabCas, slabCasErr := sum(v, "cas_hits", "cas_badval"); slabCasErr == nil { 982 | ch <- prometheus.MustNewConstMetric(e.slabsCommands, prometheus.CounterValue, slabSetCmd-slabCas, slab, "set", "hit") 983 | } else { 984 | e.logger.Error("Failed to parse cas", "err", slabCasErr) 985 | parseError = slabCasErr 986 | } 987 | } else { 988 | e.logger.Error("Failed to parse set", "err", err) 989 | parseError = err 990 | } 991 | 992 | err = firstError( 993 | e.parseAndNewMetric(ch, e.slabsChunkSize, prometheus.GaugeValue, v, "chunk_size", slab), 994 | e.parseAndNewMetric(ch, e.slabsChunksPerPage, prometheus.GaugeValue, v, "chunks_per_page", slab), 995 | e.parseAndNewMetric(ch, e.slabsCurrentPages, prometheus.GaugeValue, v, "total_pages", slab), 996 | e.parseAndNewMetric(ch, e.slabsCurrentChunks, prometheus.GaugeValue, v, "total_chunks", slab), 997 | e.parseAndNewMetric(ch, e.slabsChunksUsed, prometheus.GaugeValue, v, "used_chunks", slab), 998 | e.parseAndNewMetric(ch, e.slabsChunksFree, prometheus.GaugeValue, v, "free_chunks", slab), 999 | e.parseAndNewMetric(ch, e.slabsChunksFreeEnd, prometheus.GaugeValue, v, "free_chunks_end", slab), 1000 | e.parseAndNewMetric(ch, e.slabsMemRequested, prometheus.GaugeValue, v, "mem_requested", slab), 1001 | ) 1002 | if err != nil { 1003 | parseError = err 1004 | } 1005 | } 1006 | } 1007 | 1008 | return parseError 1009 | } 1010 | 1011 | func (e *Exporter) parseStatsSettings(ch chan<- prometheus.Metric, statsSettings map[net.Addr]map[string]string) error { 1012 | var parseError error 1013 | for _, settings := range statsSettings { 1014 | if err := e.parseAndNewMetric(ch, e.maxConnections, prometheus.GaugeValue, settings, "maxconns"); err != nil { 1015 | parseError = err 1016 | } 1017 | 1018 | if v, ok := settings["lru_crawler"]; ok && v == "yes" { 1019 | err := firstError( 1020 | e.parseBoolAndNewMetric(ch, e.lruCrawlerEnabled, prometheus.GaugeValue, settings, "lru_crawler"), 1021 | e.parseAndNewMetric(ch, e.lruCrawlerSleep, prometheus.GaugeValue, settings, "lru_crawler_sleep"), 1022 | e.parseAndNewMetric(ch, e.lruCrawlerMaxItems, prometheus.GaugeValue, settings, "lru_crawler_tocrawl"), 1023 | e.parseBoolAndNewMetric(ch, e.lruMaintainerThread, prometheus.GaugeValue, settings, "lru_maintainer_thread"), 1024 | e.parseAndNewMetric(ch, e.lruHotPercent, prometheus.GaugeValue, settings, "hot_lru_pct"), 1025 | e.parseAndNewMetric(ch, e.lruWarmPercent, prometheus.GaugeValue, settings, "warm_lru_pct"), 1026 | e.parseAndNewMetric(ch, e.lruHotMaxAgeFactor, prometheus.GaugeValue, settings, "hot_max_factor"), 1027 | e.parseAndNewMetric(ch, e.lruWarmMaxAgeFactor, prometheus.GaugeValue, settings, "warm_max_factor"), 1028 | ) 1029 | if err != nil { 1030 | parseError = err 1031 | } 1032 | } 1033 | } 1034 | return parseError 1035 | } 1036 | 1037 | func (e *Exporter) parseAndNewMetric(ch chan<- prometheus.Metric, desc *prometheus.Desc, valueType prometheus.ValueType, stats map[string]string, key string, labelValues ...string) error { 1038 | return e.extractValueAndNewMetric(ch, desc, valueType, parse, stats, key, labelValues...) 1039 | } 1040 | 1041 | func (e *Exporter) parseBoolAndNewMetric(ch chan<- prometheus.Metric, desc *prometheus.Desc, valueType prometheus.ValueType, stats map[string]string, key string, labelValues ...string) error { 1042 | return e.extractValueAndNewMetric(ch, desc, valueType, parseBool, stats, key, labelValues...) 1043 | } 1044 | 1045 | func (e *Exporter) parseTimevalAndNewMetric(ch chan<- prometheus.Metric, desc *prometheus.Desc, valueType prometheus.ValueType, stats map[string]string, key string, labelValues ...string) error { 1046 | return e.extractValueAndNewMetric(ch, desc, valueType, parseTimeval, stats, key, labelValues...) 1047 | } 1048 | 1049 | func (e *Exporter) extractValueAndNewMetric(ch chan<- prometheus.Metric, desc *prometheus.Desc, valueType prometheus.ValueType, f func(map[string]string, string, *slog.Logger) (float64, error), stats map[string]string, key string, labelValues ...string) error { 1050 | v, err := f(stats, key, e.logger) 1051 | if err == errKeyNotFound { 1052 | return nil 1053 | } 1054 | if err != nil { 1055 | return err 1056 | } 1057 | 1058 | ch <- prometheus.MustNewConstMetric(desc, valueType, v, labelValues...) 1059 | return nil 1060 | } 1061 | 1062 | func parse(stats map[string]string, key string, logger *slog.Logger) (float64, error) { 1063 | value, ok := stats[key] 1064 | if !ok { 1065 | logger.Debug("Key not found", "key", key) 1066 | return 0, errKeyNotFound 1067 | } 1068 | 1069 | v, err := strconv.ParseFloat(value, 64) 1070 | if err != nil { 1071 | logger.Error("Failed to parse", "key", key, "value", value, "err", err) 1072 | return 0, err 1073 | } 1074 | return v, nil 1075 | } 1076 | 1077 | func parseBool(stats map[string]string, key string, logger *slog.Logger) (float64, error) { 1078 | value, ok := stats[key] 1079 | if !ok { 1080 | logger.Debug("Key not found", "key", key) 1081 | return 0, errKeyNotFound 1082 | } 1083 | 1084 | switch value { 1085 | case "yes": 1086 | return 1, nil 1087 | case "no": 1088 | return 0, nil 1089 | default: 1090 | logger.Error("Failed to parse", "key", key, "value", value) 1091 | return 0, errors.New("failed parse a bool value") 1092 | } 1093 | } 1094 | 1095 | func parseTimeval(stats map[string]string, key string, logger *slog.Logger) (float64, error) { 1096 | value, ok := stats[key] 1097 | if !ok { 1098 | logger.Debug("Key not found", "key", key) 1099 | return 0, errKeyNotFound 1100 | } 1101 | values := strings.Split(value, ".") 1102 | 1103 | if len(values) != 2 { 1104 | logger.Error("Failed to parse", "key", key, "value", value) 1105 | return 0, errors.New("failed parse a timeval value") 1106 | } 1107 | 1108 | seconds, err := strconv.ParseFloat(values[0], 64) 1109 | if err != nil { 1110 | logger.Error("Failed to parse", "key", key, "value", value, "err", err) 1111 | return 0, errors.New("failed parse a timeval value") 1112 | } 1113 | 1114 | microseconds, err := strconv.ParseFloat(values[1], 64) 1115 | if err != nil { 1116 | logger.Error("Failed to parse", "key", key, "value", value, "err", err) 1117 | return 0, errors.New("failed parse a timeval value") 1118 | } 1119 | 1120 | return (seconds + microseconds/(1000.0*1000.0)), nil 1121 | } 1122 | 1123 | func sum(stats map[string]string, keys ...string) (float64, error) { 1124 | s := 0. 1125 | for _, key := range keys { 1126 | if _, ok := stats[key]; !ok { 1127 | return 0, errKeyNotFound 1128 | } 1129 | v, err := strconv.ParseFloat(stats[key], 64) 1130 | if err != nil { 1131 | return 0, err 1132 | } 1133 | s += v 1134 | } 1135 | return s, nil 1136 | } 1137 | 1138 | func firstError(errors ...error) error { 1139 | for _, v := range errors { 1140 | if v != nil { 1141 | return v 1142 | } 1143 | } 1144 | return nil 1145 | } 1146 | -------------------------------------------------------------------------------- /pkg/exporter/exporter_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package exporter 15 | 16 | import ( 17 | "net" 18 | "testing" 19 | "time" 20 | 21 | "github.com/prometheus/client_golang/prometheus" 22 | "github.com/prometheus/common/promslog" 23 | ) 24 | 25 | func TestParseStatsSettings(t *testing.T) { 26 | addr, err := net.ResolveIPAddr("ip4", "127.0.0.1") 27 | if err != nil { 28 | t.Fatal(err) 29 | } 30 | 31 | t.Run("Success", func(t *testing.T) { 32 | t.Parallel() 33 | 34 | var statsSettings = map[net.Addr]map[string]string{ 35 | addr: { 36 | "maxconns": "10", 37 | "lru_crawler": "yes", 38 | "lru_crawler_sleep": "100", 39 | "lru_crawler_tocrawl": "0", 40 | "lru_maintainer_thread": "no", 41 | "hot_lru_pct": "20", 42 | "warm_lru_pct": "40", 43 | "hot_max_factor": "0.20", 44 | "warm_max_factor": "2.00", 45 | "accepting_conns": "1", 46 | }, 47 | } 48 | ch := make(chan prometheus.Metric, 100) 49 | e := New("", 100*time.Millisecond, promslog.NewNopLogger(), nil) 50 | if err := e.parseStatsSettings(ch, statsSettings); err != nil { 51 | t.Errorf("expect return error, error: %v", err) 52 | } 53 | }) 54 | 55 | t.Run("Failure", func(t *testing.T) { 56 | t.Parallel() 57 | var statsSettings = map[net.Addr]map[string]string{ 58 | addr: { 59 | "maxconns": "10", 60 | "lru_crawler": "yes", 61 | "lru_crawler_sleep": "100", 62 | "lru_crawler_tocrawl": "0", 63 | "lru_maintainer_thread": "fail", 64 | "hot_lru_pct": "20", 65 | "warm_lru_pct": "40", 66 | "hot_max_factor": "0.20", 67 | "warm_max_factor": "2.00", 68 | "accepting_conns": "fail", 69 | }, 70 | } 71 | ch := make(chan prometheus.Metric, 100) 72 | e := New("", 100*time.Millisecond, promslog.NewNopLogger(), nil) 73 | if err := e.parseStatsSettings(ch, statsSettings); err == nil { 74 | t.Error("expect return error but not") 75 | } 76 | }) 77 | } 78 | 79 | func TestParseTimeval(t *testing.T) { 80 | t.Run("Success", func(t *testing.T) { 81 | t.Parallel() 82 | _, err := parseTimeval(map[string]string{"rusage_system": "3.5"}, "rusage_system", promslog.NewNopLogger()) 83 | if err != nil { 84 | t.Errorf("expect return error, error: %v", err) 85 | } 86 | }) 87 | 88 | t.Run("Failure", func(t *testing.T) { 89 | t.Parallel() 90 | _, err := parseTimeval(map[string]string{"rusage_system": "35"}, "rusage_system", promslog.NewNopLogger()) 91 | if err == nil { 92 | t.Error("expect return error but not") 93 | } 94 | }) 95 | } 96 | -------------------------------------------------------------------------------- /scraper/scraper.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package scraper 15 | 16 | import ( 17 | "crypto/tls" 18 | "log/slog" 19 | "net/http" 20 | "time" 21 | 22 | "github.com/prometheus/client_golang/prometheus" 23 | "github.com/prometheus/client_golang/prometheus/promhttp" 24 | "github.com/prometheus/memcached_exporter/pkg/exporter" 25 | ) 26 | 27 | type Scraper struct { 28 | logger *slog.Logger 29 | timeout time.Duration 30 | tlsConfig *tls.Config 31 | 32 | scrapeCount prometheus.Counter 33 | scrapeErrors prometheus.Counter 34 | } 35 | 36 | func New(timeout time.Duration, logger *slog.Logger, tlsConfig *tls.Config) *Scraper { 37 | logger.Debug("Started scrapper") 38 | return &Scraper{ 39 | logger: logger, 40 | timeout: timeout, 41 | tlsConfig: tlsConfig, 42 | scrapeCount: prometheus.NewCounter(prometheus.CounterOpts{ 43 | Name: "memcached_exporter_scrapes_total", 44 | Help: "Count of memcached exporter scapes.", 45 | }), 46 | scrapeErrors: prometheus.NewCounter(prometheus.CounterOpts{ 47 | Name: "memcached_exporter_scrape_errors_total", 48 | Help: "Count of memcached exporter scape errors.", 49 | }), 50 | } 51 | } 52 | 53 | func (s *Scraper) Handler() http.HandlerFunc { 54 | return func(w http.ResponseWriter, r *http.Request) { 55 | target := r.URL.Query().Get("target") 56 | s.logger.Debug("scrapping memcached", "target", target) 57 | s.scrapeCount.Inc() 58 | 59 | if target == "" { 60 | errorStr := "'target' parameter must be specified" 61 | s.logger.Warn(errorStr) 62 | http.Error(w, errorStr, http.StatusBadRequest) 63 | s.scrapeErrors.Inc() 64 | return 65 | } 66 | 67 | e := exporter.New(target, s.timeout, s.logger, s.tlsConfig) 68 | registry := prometheus.NewRegistry() 69 | registry.MustRegister(e) 70 | 71 | promhttp.HandlerFor( 72 | registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}, 73 | ).ServeHTTP(w, r) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /scraper/scraper_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package scraper 15 | 16 | import ( 17 | "net/http" 18 | "net/http/httptest" 19 | "strings" 20 | "testing" 21 | "time" 22 | 23 | "github.com/prometheus/common/promslog" 24 | ) 25 | 26 | func TestHandler(t *testing.T) { 27 | t.Run("Success", func(t *testing.T) { 28 | t.Parallel() 29 | 30 | s := New(1*time.Second, promslog.NewNopLogger(), nil) 31 | 32 | req, err := http.NewRequest("GET", "/?target=127.0.0.1:11211", nil) 33 | 34 | if err != nil { 35 | t.Fatal(err) 36 | } 37 | 38 | rr := httptest.NewRecorder() 39 | handler := http.HandlerFunc(s.Handler()) 40 | 41 | handler.ServeHTTP(rr, req) 42 | 43 | if status := rr.Code; status != http.StatusOK { 44 | t.Errorf("handler returned wrong status code: got %d, want: %d. body: %s", 45 | status, http.StatusOK, rr.Body.String()) 46 | } 47 | 48 | memcachedUpMetric := "memcached_up 1" 49 | 50 | if body := rr.Body.String(); !strings.Contains(body, memcachedUpMetric) { 51 | t.Errorf("handler could not inspect metrics. body: %s", body) 52 | } 53 | }) 54 | 55 | t.Run("No target", func(t *testing.T) { 56 | t.Parallel() 57 | 58 | s := New(1*time.Second, promslog.NewNopLogger(), nil) 59 | 60 | req, err := http.NewRequest("GET", "/", nil) 61 | 62 | if err != nil { 63 | t.Fatal(err) 64 | } 65 | 66 | rr := httptest.NewRecorder() 67 | handler := http.HandlerFunc(s.Handler()) 68 | 69 | handler.ServeHTTP(rr, req) 70 | 71 | if status := rr.Code; status != http.StatusBadRequest { 72 | t.Errorf("handler returned wrong status code: got %d, want: %d", rr.Code, http.StatusBadRequest) 73 | } 74 | }) 75 | } 76 | --------------------------------------------------------------------------------