├── .circleci └── config.yml ├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── container_description.yml │ └── golangci-lint.yml ├── .gitignore ├── .golangci.yml ├── .promu.yml ├── .yamllint ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── MAINTAINERS.md ├── Makefile ├── Makefile.common ├── NOTICE ├── Procfile ├── README.md ├── SECURITY.md ├── VERSION ├── collectors ├── cache.go ├── cache_test.go ├── collectors_suite_test.go ├── monitoring_collector.go ├── monitoring_collector_test.go └── monitoring_metrics.go ├── delta ├── counter.go ├── counter_test.go ├── delta_suite_test.go ├── histogram.go └── histogram_test.go ├── go.mod ├── go.sum ├── hash └── fnv.go ├── stackdriver_exporter.go ├── stackdriver_exporter_test.go └── utils ├── utils.go ├── utils_suite_test.go └── utils_test.go /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2.1 3 | orbs: 4 | prometheus: prometheus/prometheus@0.17.1 5 | executors: 6 | # This must match .promu.yml. 7 | golang: 8 | docker: 9 | - image: cimg/go:1.24 10 | jobs: 11 | test: 12 | executor: golang 13 | steps: 14 | - prometheus/setup_environment 15 | - run: make 16 | - prometheus/store_artifact: 17 | file: stackdriver_exporter 18 | workflows: 19 | version: 2 20 | stackdriver_exporter: 21 | jobs: 22 | - test: 23 | filters: 24 | tags: 25 | only: /.*/ 26 | - prometheus/build: 27 | name: build 28 | filters: 29 | tags: 30 | only: /.*/ 31 | - prometheus/publish_master: 32 | context: org-context 33 | docker_hub_organization: prometheuscommunity 34 | quay_io_organization: prometheuscommunity 35 | requires: 36 | - test 37 | - build 38 | filters: 39 | branches: 40 | only: master 41 | - prometheus/publish_release: 42 | context: org-context 43 | docker_hub_organization: prometheuscommunity 44 | quay_io_organization: prometheuscommunity 45 | requires: 46 | - test 47 | - build 48 | filters: 49 | tags: 50 | only: /^v.*/ 51 | branches: 52 | ignore: /.*/ 53 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .build/ 2 | .tarballs/ 3 | 4 | !.build/linux-amd64/ 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | schedule: 6 | interval: "monthly" 7 | -------------------------------------------------------------------------------- /.github/workflows/container_description.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Push README to Docker Hub 3 | on: 4 | push: 5 | paths: 6 | - "README.md" 7 | - "README-containers.md" 8 | - ".github/workflows/container_description.yml" 9 | branches: [ main, master ] 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | PushDockerHubReadme: 16 | runs-on: ubuntu-latest 17 | name: Push README to Docker Hub 18 | if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. 19 | steps: 20 | - name: git checkout 21 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 22 | - name: Set docker hub repo name 23 | run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV 24 | - name: Push README to Dockerhub 25 | uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 26 | env: 27 | DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }} 28 | DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }} 29 | with: 30 | destination_container_repo: ${{ env.DOCKER_REPO_NAME }} 31 | provider: dockerhub 32 | short_description: ${{ env.DOCKER_REPO_NAME }} 33 | # Empty string results in README-containers.md being pushed if it 34 | # exists. Otherwise, README.md is pushed. 35 | readme_file: '' 36 | 37 | PushQuayIoReadme: 38 | runs-on: ubuntu-latest 39 | name: Push README to quay.io 40 | if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. 41 | steps: 42 | - name: git checkout 43 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 44 | - name: Set quay.io org name 45 | run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV 46 | - name: Set quay.io repo name 47 | run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV 48 | - name: Push README to quay.io 49 | uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 50 | env: 51 | DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }} 52 | with: 53 | destination_container_repo: ${{ env.DOCKER_REPO_NAME }} 54 | provider: quay 55 | # Empty string results in README-containers.md being pushed if it 56 | # exists. Otherwise, README.md is pushed. 57 | readme_file: '' 58 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This action is synced from https://github.com/prometheus/prometheus 3 | name: golangci-lint 4 | on: 5 | push: 6 | paths: 7 | - "go.sum" 8 | - "go.mod" 9 | - "**.go" 10 | - "scripts/errcheck_excludes.txt" 11 | - ".github/workflows/golangci-lint.yml" 12 | - ".golangci.yml" 13 | pull_request: 14 | 15 | permissions: # added using https://github.com/step-security/secure-repo 16 | contents: read 17 | 18 | jobs: 19 | golangci: 20 | permissions: 21 | contents: read # for actions/checkout to fetch code 22 | pull-requests: read # for golangci/golangci-lint-action to fetch pull requests 23 | name: lint 24 | runs-on: ubuntu-latest 25 | steps: 26 | - name: Checkout repository 27 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 28 | - name: Install Go 29 | uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 30 | with: 31 | go-version: 1.24.x 32 | - name: Install snmp_exporter/generator dependencies 33 | run: sudo apt-get update && sudo apt-get -y install libsnmp-dev 34 | if: github.repository == 'prometheus/snmp_exporter' 35 | - name: Lint 36 | uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # v6.5.2 37 | with: 38 | args: --verbose 39 | version: v1.64.6 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.build 2 | /.release 3 | /.tarballs 4 | /stackdriver_exporter 5 | *.tar.gz 6 | *.test 7 | *-stamp 8 | /vendor 9 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | linters: 3 | enable: 4 | - sloglint 5 | 6 | run: 7 | timeout: 5m 8 | 9 | issues: 10 | exclude-rules: 11 | - path: _test.go 12 | linters: 13 | - errcheck 14 | -------------------------------------------------------------------------------- /.promu.yml: -------------------------------------------------------------------------------- 1 | go: 2 | # This must match .circle/config.yml. 3 | version: 1.24 4 | repository: 5 | path: github.com/prometheus-community/stackdriver_exporter 6 | build: 7 | binaries: 8 | - name: stackdriver_exporter 9 | ldflags: | 10 | -X github.com/prometheus/common/version.Version={{.Version}} 11 | -X github.com/prometheus/common/version.Revision={{.Revision}} 12 | -X github.com/prometheus/common/version.Branch={{.Branch}} 13 | -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} 14 | -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} 15 | tarball: 16 | files: 17 | - LICENSE 18 | - NOTICE 19 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | ignore: | 4 | **/node_modules 5 | 6 | rules: 7 | braces: 8 | max-spaces-inside: 1 9 | level: error 10 | brackets: 11 | max-spaces-inside: 1 12 | level: error 13 | commas: disable 14 | comments: disable 15 | comments-indentation: disable 16 | document-start: disable 17 | indentation: 18 | spaces: consistent 19 | indent-sequences: consistent 20 | key-duplicates: 21 | ignore: | 22 | config/testdata/section_key_dup.bad.yml 23 | line-length: disable 24 | truthy: 25 | check-keys: false 26 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## master / unreleased 2 | 3 | ## 0.18.0 / 2025-01-16 4 | 5 | - [FEATURE] Support more specific prefixes in ?collect parameter #387 6 | - [FEATURE] Enabling monitoring metrics, aggregate deltas, and descriptor cache with ?collect #389 7 | 8 | ## 0.17.0 / 2024-11-04 9 | 10 | Deprecation notice: The comma delimited flags `google.project-id` and `monitoring.metrics-type-prefixes` are being replaced by repeatable flags `google.project-ids` and `monitoring.metrics-prefixes`. The comma delimited flags will be supported for at least one more release. 11 | 12 | - [CHANGE] Migrate logging to promslog #378 13 | - [ENHANCEMENT] Sanitize metric type prefixes to prevent duplicate metrics #319 14 | - [ENHANCEMENT] Add project ID to all logs from the collector #362 15 | - [FEATURE] Add support for specifying comma-delimited string flags as repeatable flags #355 16 | 17 | ## 0.16.0 / 2024-07-15 18 | 19 | * [FEATURE] Add ErrorLogger for promhttp #277 20 | * [ENHANCEMENT] Add more info about filters to docs and rename struct fields #198 21 | 22 | ## 0.15.1 / 2024-05-15 23 | 24 | * [BUGFIX] Fix histogram merge #324 25 | 26 | ## 0.15.0 / 2024-03-07 27 | 28 | * [FEATURE] Add projects query #243 29 | * [ENHANCEMENT] Refactor delta logic for library usage #190 30 | 31 | ## 0.14.1 / 2023-05-26 32 | 33 | * [BUGFIX] Fix default listening port #229 34 | 35 | ## 0.14.0 / 2023-05-26 36 | 37 | * [FEATURE] cache descriptors to reduce API calls #218 38 | 39 | ## 0.13.0 / 2023-01-25 40 | 41 | * [FEATURE] Add `monitoring.aggregate-deltas` and `monitoring.aggregate-deltas-ttl` flags which allow aggregating DELTA 42 | metrics as counters instead of a gauge #168 43 | * [FEATURE] Add `web.stackdriver-telemetry-path` flag. When configured the stackdriver metrics go to this endpoint and 44 | `web.telemetry-path` contain just the runtime metrics. #173 45 | * [ENHANCEMENT] Make Stackdriver main collector more library-friendly #157 46 | * [BUGFIX] Fixes suspected duplicate label panic for some GCP metric #153 47 | * [BUGFIX] Metrics-ingest-delay bugfix #151 48 | * [BUGFIX] Fix data race on metricDescriptorsFunction start and end times #158 49 | 50 | ## 0.12.0 / 2022-02-08 51 | 52 | Breaking Changes: 53 | 54 | The exporter nolonger supports configuration via ENV vars. This was a non-standard feature that is not part of the Prometheus ecossystem. All configuration is now handled by the existing command line arguments. 55 | 56 | * [CHANGE] Cleanup non-standard ENV var setup #142 57 | * [FEATURE] Add support to include ingest delay when pull metrics #129 58 | * [FEATURE] Add monitoring.filters flag #133 59 | * [ENHANCEMENT] Setup exporter metrics only once when we can #124 60 | 61 | ## 0.11.0 / 2020-09-02 62 | 63 | * [CHANGE] Do not treat failure to collect metrics as fatal #102 64 | * [FEATURE] Add support for multiple google project IDs #105 65 | 66 | ## 0.10.0 / 2020-06-28 67 | 68 | * [FEATURE] Autodiscover Google Poject ID #62 69 | 70 | ## 0.9.1 / 2020-06-02 71 | 72 | * [BUGFIX] Fix report time missing for histogram metrics #94 73 | 74 | ## 0.9.0 / 2020-05-26 75 | 76 | * [CHANGE] Add stackdriver timestamp to metrics #84 77 | * [CHANGE] Fix collect param name #91 78 | 79 | ## 0.8.0 / 2020-05-13 80 | 81 | * [CHANGE] Treat failure to collect metric as fatal #83 82 | * [CHANGE] Switch logging to promlog #88 83 | * [FEATURE] Add metrics prefix collect URL param #87 84 | 85 | ## 0.7.0 / 2020-05-01 86 | 87 | * [CHANGE] Remove deprecated `monitoring.New()` use. #76 88 | * [ENHANCEMENT] Server-side selection of project's metrics #53 89 | * [BUGFIX] Ensure metrics are fetched once for each metric descriptor #50 90 | 91 | ## 0.6.0 / 2018-12-02 92 | 93 | Google Stackdriver Prometheus Exporter v0.6.0: 94 | 95 | * Added a `collector.fill-missing-labels` flag to fill missing metrics labels with empty strings in order to avoid label dimensions inconsistent failure (PR https://github.com/frodenas/stackdriver_exporter/pull/23) 96 | * Added `stackdriver.max-retries`, `stackdriver.http-timeout`, `stackdriver.max-backoff`, `stackdriver.backoff-jitter`, and`stackdriver.retry-statuses` flags to allow exponential backoff and retries on stackdriver api (PR https://github.com/frodenas/stackdriver_exporter/pull/35) 97 | * Added a `monitoring.drop-delegated-projects` flag which allows one to disable metrics collection from delegated projects (PR https://github.com/frodenas/stackdriver_exporter/pull/40) 98 | * Fix segmentation fault on missing credentials (PR https://github.com/frodenas/stackdriver_exporter/pull/42) 99 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Prometheus Community Code of Conduct 2 | 3 | Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). 4 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Prometheus uses GitHub to manage reviews of pull requests. 4 | 5 | * If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) 6 | 7 | * If you have a trivial fix or improvement, go ahead and create a pull request, 8 | addressing (with `@...`) a suitable maintainer of this repository (see 9 | [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. 10 | 11 | * If you plan to do something more involved, first discuss your ideas 12 | on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). 13 | This will avoid unnecessary work and surely give you and us a good deal 14 | of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. 15 | 16 | * Relevant coding style guidelines are the [Go Code Review 17 | Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) 18 | and the _Formatting and style_ section of Peter Bourgon's [Go: Best 19 | Practices for Production 20 | Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). 21 | 22 | * Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) 23 | 24 | 25 | ## Steps to Contribute 26 | 27 | Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. 28 | 29 | Please check the [`low-hanging-fruit`](https://github.com/prometheus/prometheus/issues?q=is%3Aissue+is%3Aopen+label%3A%22low+hanging+fruit%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). 30 | 31 | For complete instructions on how to compile see: [Building From Source](https://github.com/prometheus/prometheus#building-from-source) 32 | 33 | For quickly compiling and testing your changes do: 34 | ``` 35 | # For building. 36 | make build 37 | ./stackdriver_exporter 38 | 39 | # For testing. 40 | make test # Make sure all the tests pass before you commit and push :) 41 | ``` 42 | 43 | We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. 44 | 45 | All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions). 46 | 47 | ## Pull Request Checklist 48 | 49 | * Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. 50 | 51 | * Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). 52 | 53 | * If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). 54 | 55 | * Add tests relevant to the fixed bug or new feature. 56 | 57 | ## Dependency management 58 | 59 | The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.13 or greater installed. 60 | 61 | All dependencies are vendored in the `vendor/` directory. 62 | 63 | To add or update a new dependency, use the `go get` command: 64 | 65 | ```bash 66 | # Pick the latest tagged release. 67 | go get example.com/some/module/pkg 68 | 69 | # Pick a specific version. 70 | go get example.com/some/module/pkg@vX.Y.Z 71 | ``` 72 | 73 | Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: 74 | 75 | 76 | ```bash 77 | # The GO111MODULE variable can be omitted when the code isn't located in GOPATH. 78 | GO111MODULE=on go mod tidy 79 | 80 | GO111MODULE=on go mod vendor 81 | ``` 82 | 83 | You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. 84 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ARCH="amd64" 2 | ARG OS="linux" 3 | FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest 4 | LABEL maintainer="The Prometheus Authors " 5 | 6 | ARG ARCH="amd64" 7 | ARG OS="linux" 8 | COPY .build/${OS}-${ARCH}/stackdriver_exporter /bin/stackdriver_exporter 9 | COPY LICENSE /LICENSE 10 | 11 | USER nobody 12 | ENTRYPOINT ["/bin/stackdriver_exporter"] 13 | EXPOSE 9255 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | * Ben Kochie @SuperQ 2 | * Kyle Eckhart @kgeckhart 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The Prometheus Authors 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | # Needs to be defined before including Makefile.common to auto-generate targets 15 | DOCKER_ARCHS ?= amd64 16 | DOCKER_REPO ?= prometheuscommunity 17 | 18 | include Makefile.common 19 | 20 | DOCKER_IMAGE_NAME ?= stackdriver-exporter 21 | -------------------------------------------------------------------------------- /Makefile.common: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The Prometheus Authors 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | 15 | # A common Makefile that includes rules to be reused in different prometheus projects. 16 | # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! 17 | 18 | # Example usage : 19 | # Create the main Makefile in the root project directory. 20 | # include Makefile.common 21 | # customTarget: 22 | # @echo ">> Running customTarget" 23 | # 24 | 25 | # Ensure GOBIN is not set during build so that promu is installed to the correct path 26 | unexport GOBIN 27 | 28 | GO ?= go 29 | GOFMT ?= $(GO)fmt 30 | FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) 31 | GOOPTS ?= 32 | GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) 33 | GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) 34 | 35 | GO_VERSION ?= $(shell $(GO) version) 36 | GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) 37 | PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') 38 | 39 | PROMU := $(FIRST_GOPATH)/bin/promu 40 | pkgs = ./... 41 | 42 | ifeq (arm, $(GOHOSTARCH)) 43 | GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) 44 | GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) 45 | else 46 | GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) 47 | endif 48 | 49 | GOTEST := $(GO) test 50 | GOTEST_DIR := 51 | ifneq ($(CIRCLE_JOB),) 52 | ifneq ($(shell command -v gotestsum 2> /dev/null),) 53 | GOTEST_DIR := test-results 54 | GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- 55 | endif 56 | endif 57 | 58 | PROMU_VERSION ?= 0.17.0 59 | PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz 60 | 61 | SKIP_GOLANGCI_LINT := 62 | GOLANGCI_LINT := 63 | GOLANGCI_LINT_OPTS ?= 64 | GOLANGCI_LINT_VERSION ?= v1.64.6 65 | # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. 66 | # windows isn't included here because of the path separator being different. 67 | ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) 68 | ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) 69 | # If we're in CI and there is an Actions file, that means the linter 70 | # is being run in Actions, so we don't need to run it here. 71 | ifneq (,$(SKIP_GOLANGCI_LINT)) 72 | GOLANGCI_LINT := 73 | else ifeq (,$(CIRCLE_JOB)) 74 | GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint 75 | else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) 76 | GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint 77 | endif 78 | endif 79 | endif 80 | 81 | PREFIX ?= $(shell pwd) 82 | BIN_DIR ?= $(shell pwd) 83 | DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) 84 | DOCKERFILE_PATH ?= ./Dockerfile 85 | DOCKERBUILD_CONTEXT ?= ./ 86 | DOCKER_REPO ?= prom 87 | 88 | DOCKER_ARCHS ?= amd64 89 | 90 | BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) 91 | PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) 92 | TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) 93 | 94 | SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) 95 | 96 | ifeq ($(GOHOSTARCH),amd64) 97 | ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) 98 | # Only supported on amd64 99 | test-flags := -race 100 | endif 101 | endif 102 | 103 | # This rule is used to forward a target like "build" to "common-build". This 104 | # allows a new "build" target to be defined in a Makefile which includes this 105 | # one and override "common-build" without override warnings. 106 | %: common-% ; 107 | 108 | .PHONY: common-all 109 | common-all: precheck style check_license lint yamllint unused build test 110 | 111 | .PHONY: common-style 112 | common-style: 113 | @echo ">> checking code style" 114 | @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ 115 | if [ -n "$${fmtRes}" ]; then \ 116 | echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ 117 | echo "Please ensure you are using $$($(GO) version) for formatting code."; \ 118 | exit 1; \ 119 | fi 120 | 121 | .PHONY: common-check_license 122 | common-check_license: 123 | @echo ">> checking license header" 124 | @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ 125 | awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ 126 | done); \ 127 | if [ -n "$${licRes}" ]; then \ 128 | echo "license header checking failed:"; echo "$${licRes}"; \ 129 | exit 1; \ 130 | fi 131 | 132 | .PHONY: common-deps 133 | common-deps: 134 | @echo ">> getting dependencies" 135 | $(GO) mod download 136 | 137 | .PHONY: update-go-deps 138 | update-go-deps: 139 | @echo ">> updating Go dependencies" 140 | @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ 141 | $(GO) get -d $$m; \ 142 | done 143 | $(GO) mod tidy 144 | 145 | .PHONY: common-test-short 146 | common-test-short: $(GOTEST_DIR) 147 | @echo ">> running short tests" 148 | $(GOTEST) -short $(GOOPTS) $(pkgs) 149 | 150 | .PHONY: common-test 151 | common-test: $(GOTEST_DIR) 152 | @echo ">> running all tests" 153 | $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) 154 | 155 | $(GOTEST_DIR): 156 | @mkdir -p $@ 157 | 158 | .PHONY: common-format 159 | common-format: 160 | @echo ">> formatting code" 161 | $(GO) fmt $(pkgs) 162 | 163 | .PHONY: common-vet 164 | common-vet: 165 | @echo ">> vetting code" 166 | $(GO) vet $(GOOPTS) $(pkgs) 167 | 168 | .PHONY: common-lint 169 | common-lint: $(GOLANGCI_LINT) 170 | ifdef GOLANGCI_LINT 171 | @echo ">> running golangci-lint" 172 | $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) 173 | endif 174 | 175 | .PHONY: common-lint-fix 176 | common-lint-fix: $(GOLANGCI_LINT) 177 | ifdef GOLANGCI_LINT 178 | @echo ">> running golangci-lint fix" 179 | $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) 180 | endif 181 | 182 | .PHONY: common-yamllint 183 | common-yamllint: 184 | @echo ">> running yamllint on all YAML files in the repository" 185 | ifeq (, $(shell command -v yamllint 2> /dev/null)) 186 | @echo "yamllint not installed so skipping" 187 | else 188 | yamllint . 189 | endif 190 | 191 | # For backward-compatibility. 192 | .PHONY: common-staticcheck 193 | common-staticcheck: lint 194 | 195 | .PHONY: common-unused 196 | common-unused: 197 | @echo ">> running check for unused/missing packages in go.mod" 198 | $(GO) mod tidy 199 | @git diff --exit-code -- go.sum go.mod 200 | 201 | .PHONY: common-build 202 | common-build: promu 203 | @echo ">> building binaries" 204 | $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) 205 | 206 | .PHONY: common-tarball 207 | common-tarball: promu 208 | @echo ">> building release tarball" 209 | $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) 210 | 211 | .PHONY: common-docker-repo-name 212 | common-docker-repo-name: 213 | @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" 214 | 215 | .PHONY: common-docker $(BUILD_DOCKER_ARCHS) 216 | common-docker: $(BUILD_DOCKER_ARCHS) 217 | $(BUILD_DOCKER_ARCHS): common-docker-%: 218 | docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ 219 | -f $(DOCKERFILE_PATH) \ 220 | --build-arg ARCH="$*" \ 221 | --build-arg OS="linux" \ 222 | $(DOCKERBUILD_CONTEXT) 223 | 224 | .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) 225 | common-docker-publish: $(PUBLISH_DOCKER_ARCHS) 226 | $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: 227 | docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" 228 | 229 | DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) 230 | .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) 231 | common-docker-tag-latest: $(TAG_DOCKER_ARCHS) 232 | $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: 233 | docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" 234 | docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" 235 | 236 | .PHONY: common-docker-manifest 237 | common-docker-manifest: 238 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) 239 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" 240 | 241 | .PHONY: promu 242 | promu: $(PROMU) 243 | 244 | $(PROMU): 245 | $(eval PROMU_TMP := $(shell mktemp -d)) 246 | curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) 247 | mkdir -p $(FIRST_GOPATH)/bin 248 | cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu 249 | rm -r $(PROMU_TMP) 250 | 251 | .PHONY: proto 252 | proto: 253 | @echo ">> generating code from proto files" 254 | @./scripts/genproto.sh 255 | 256 | ifdef GOLANGCI_LINT 257 | $(GOLANGCI_LINT): 258 | mkdir -p $(FIRST_GOPATH)/bin 259 | curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ 260 | | sed -e '/install -d/d' \ 261 | | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) 262 | endif 263 | 264 | .PHONY: precheck 265 | precheck:: 266 | 267 | define PRECHECK_COMMAND_template = 268 | precheck:: $(1)_precheck 269 | 270 | PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) 271 | .PHONY: $(1)_precheck 272 | $(1)_precheck: 273 | @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ 274 | echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ 275 | exit 1; \ 276 | fi 277 | endef 278 | 279 | govulncheck: install-govulncheck 280 | govulncheck ./... 281 | 282 | install-govulncheck: 283 | command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest 284 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Google Stackdriver Prometheus exporter 2 | Copyright 2020 The Prometheus Authors 3 | Copyright 2017-2020 Ferran Rodenas 4 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | web: stackdriver_exporter --web.listen-address=":$PORT" 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Google Stackdriver Prometheus Exporter 2 | [![Build Status](https://circleci.com/gh/prometheus-community/stackdriver_exporter.svg?style=svg)](https://circleci.com/gh/prometheus-community/stackdriver_exporter) 3 | [![golangci-lint](https://github.com/prometheus-community/stackdriver_exporter/actions/workflows/golangci-lint.yml/badge.svg)](https://github.com/prometheus-community/stackdriver_exporter/actions/workflows/golangci-lint.yml) 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus-community/stackdriver_exporter)](https://goreportcard.com/report/github.com/prometheus-community/stackdriver_exporter) 5 | [![GoDoc](https://pkg.go.dev/badge/github.com/prometheus-community/stackdriver_exporter?status.svg)](https://pkg.go.dev/github.com/prometheus-community/stackdriver_exporter?tab=doc) 6 | [![Release](https://img.shields.io/github/v/release/prometheus-community/stackdriver_exporter)](https://github.com/prometheus-community/stackdriver_exporter/releases) 7 | ![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/prometheus-community/stackdriver_exporter) 8 | [![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) 9 | 10 | A [Prometheus][prometheus] exporter for [Google Stackdriver Monitoring][stackdriver] metrics. It acts as a proxy that requests Stackdriver API for the metric's time-series everytime prometheus scrapes it. 11 | 12 | ## Installation 13 | 14 | ### Binaries 15 | 16 | Download the already existing [binaries][binaries] for your platform: 17 | 18 | ```console 19 | $ ./stackdriver_exporter 20 | ``` 21 | 22 | ### From source 23 | 24 | Using the standard `go install` (you must have [Go][golang] already installed in your local machine): 25 | 26 | ```console 27 | $ go install github.com/prometheus-community/stackdriver_exporter 28 | $ stackdriver_exporter 29 | ``` 30 | 31 | ### Docker 32 | 33 | To run the stackdriver exporter as a Docker container, run: 34 | 35 | ```console 36 | $ docker run -p 9255:9255 prometheuscommunity/stackdriver-exporter 37 | ``` 38 | 39 | #### Kubernetes 40 | 41 | You can find a helm chart in the prometheus-community charts repository at 42 | 43 | ```bash 44 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 45 | helm install [RELEASE_NAME] prometheus-community/prometheus-stackdriver-exporter 46 | ``` 47 | 48 | ### Cloud Foundry 49 | 50 | The exporter can be deployed to an already existing [Cloud Foundry][cloudfoundry] environment: 51 | 52 | ```console 53 | $ git clone https://github.com/prometheus-community/stackdriver_exporter.git 54 | $ cd stackdriver_exporter 55 | ``` 56 | 57 | Modify the included [application manifest file][manifest] to include the desired properties. Then you can push the exporter to your Cloud Foundry environment: 58 | 59 | ```console 60 | $ cf push 61 | ``` 62 | 63 | ### BOSH 64 | 65 | This exporter can be deployed using the [Prometheus BOSH Release][prometheus-boshrelease]. 66 | 67 | ## Usage 68 | 69 | ### Credentials and Permissions 70 | 71 | The Google Stackdriver Exporter uses the Google Golang Client Library, which offers a variety of ways to provide credentials. Please refer to the [Google Application Default Credentials][application-default-credentials] documentation to see how the credentials can be provided. 72 | 73 | If you are using IAM roles, the `roles/monitoring.viewer` IAM role contains the required permissions. See the [Access Control Guide][access-control] for more information. 74 | 75 | If you are still using the legacy [Access scopes][access-scopes], the `https://www.googleapis.com/auth/monitoring.read` scope is required. 76 | 77 | ### Flags 78 | 79 | | Flag | Required | Default | Description | 80 | | ----------------------------------- | -------- |---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 81 | | `google.project-ids` | No | GCloud SDK auto-discovery | Repeatable flag of Google Project IDs | 82 | | `google.projects.filter` | No | | GCloud projects filter expression. See more [here](https://cloud.google.com/sdk/gcloud/reference/projects/list). | 83 | | `monitoring.metrics-ingest-delay` | No | | Offsets metric collection by a delay appropriate for each metric type, e.g. because bigquery metrics are slow to appear | 84 | | `monitoring.drop-delegated-projects` | No | No | Drop metrics from attached projects and fetch `project_id` only. | 85 | | `monitoring.metrics-prefixes` | Yes | | Repeatable flag of Google Stackdriver Monitoring Metric Type prefixes (see [example][metrics-prefix-example] and [available metrics][metrics-list]) | 86 | | `monitoring.metrics-interval` | No | `5m` | Metric's timestamp interval to request from the Google Stackdriver Monitoring Metrics API. Only the most recent data point is used | 87 | | `monitoring.metrics-offset` | No | `0s` | Offset (into the past) for the metric's timestamp interval to request from the Google Stackdriver Monitoring Metrics API, to handle latency in published metrics | 88 | | `monitoring.filters` | No | | Additonal filters to be sent on the Monitoring API call. Add multiple filters by providing this parameter multiple times. See [monitoring.filters](#using-filters) for more info. | 89 | | `monitoring.aggregate-deltas` | No | | If enabled will treat all DELTA metrics as an in-memory counter instead of a gauge. Be sure to read [what to know about aggregating DELTA metrics](#what-to-know-about-aggregating-delta-metrics) | 90 | | `monitoring.aggregate-deltas-ttl` | No | `30m` | How long should a delta metric continue to be exported and stored after GCP stops producing it. Read [slow moving metrics](#slow-moving-metrics) to understand the problem this attempts to solve | 91 | | `monitoring.descriptor-cache-ttl` | No | `0s` | How long should the metric descriptors for a prefixed be cached for | 92 | | `stackdriver.max-retries` | No | `0` | Max number of retries that should be attempted on 503 errors from stackdriver. | 93 | | `stackdriver.http-timeout` | No | `10s` | How long should stackdriver_exporter wait for a result from the Stackdriver API. | 94 | | `stackdriver.max-backoff=` | No | | Max time between each request in an exp backoff scenario. | 95 | | `stackdriver.backoff-jitter` | No | `1s` | The amount of jitter to introduce in a exp backoff scenario. | 96 | | `stackdriver.retry-statuses` | No | `503` | The HTTP statuses that should trigger a retry. | 97 | | `web.config.file` | No | | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | 98 | | `web.listen-address` | No | `:9255` | Address to listen on for web interface and telemetry Repeatable for multiple addresses. | 99 | | `web.systemd-socket` | No | | Use systemd socket activation listeners instead of port listeners (Linux only). | 100 | | `web.stackdriver-telemetry-path` | No | `/metrics` | Path under which to expose Stackdriver metrics. | 101 | | `web.telemetry-path` | No | `/metrics` | Path under which to expose Prometheus metrics | 102 | 103 | ### TLS and basic authentication 104 | 105 | The Stackdriver Exporter supports TLS and basic authentication. 106 | 107 | To use TLS and/or basic authentication, you need to pass a configuration file 108 | using the `--web.config.file` parameter. The format of the file is described 109 | [in the exporter-toolkit repository](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). 110 | 111 | ### Metrics 112 | 113 | The exporter returns the following metrics: 114 | 115 | | Metric | Description | Labels | 116 | | ------ | ----------- | ------ | 117 | | `stackdriver_monitoring_api_calls_total` | Total number of Google Stackdriver Monitoring API calls made | `project_id` | 118 | | `stackdriver_monitoring_scrapes_total` | Total number of Google Stackdriver Monitoring metrics scrapes | `project_id` | 119 | | `stackdriver_monitoring_scrape_errors_total` | Total number of Google Stackdriver Monitoring metrics scrape errors | `project_id` | 120 | | `stackdriver_monitoring_last_scrape_error` | Whether the last metrics scrape from Google Stackdriver Monitoring resulted in an error (`1` for error, `0` for success) | `project_id` | 121 | | `stackdriver_monitoring_last_scrape_timestamp` | Number of seconds since 1970 since last metrics scrape from Google Stackdriver Monitoring | `project_id` | 122 | | `stackdriver_monitoring_last_scrape_duration_seconds` | Duration of the last metrics scrape from Google Stackdriver Monitoring | `project_id` | 123 | 124 | Metrics gathered from Google Stackdriver Monitoring are converted to Prometheus metrics: 125 | * Metric's names are normalized according to the Prometheus [specification][metrics-name] using the following pattern: 126 | 1. `namespace` is a constant prefix (`stackdriver`) 127 | 2. `subsystem` is the normalized monitored resource type (ie `gce_instance`) 128 | 3. `name` is the normalized metric type (ie `compute_googleapis_com_instance_cpu_usage_time`) 129 | * Labels attached to each metric are an aggregation of: 130 | 1. the `unit` in which the metric value is reported 131 | 3. the metric type labels (see [Metrics List][metrics-list]) 132 | 4. the monitored resource labels (see [Monitored Resource Types][monitored-resources]) 133 | * For each timeseries, only the most recent data point is exported. 134 | * Stackdriver `GAUGE` metric kinds are reported as Prometheus `Gauge` metrics 135 | * Stackdriver `CUMULATIVE` metric kinds are reported as Prometheus `Counter` metrics. 136 | * Stackdriver `DELTA` metric kinds are reported as Prometheus `Gauge` metrics or an accumulating `Counter` if `monitoring.aggregate-deltas` is set 137 | * Only `BOOL`, `INT64`, `DOUBLE` and `DISTRIBUTION` metric types are supported, other types (`STRING` and `MONEY`) are discarded. 138 | * `DISTRIBUTION` metric type is reported as a Prometheus `Histogram`, except the `_sum` time series is not supported. 139 | 140 | ### Example 141 | 142 | If we want to get all `CPU` (`compute.googleapis.com/instance/cpu`) and `Disk` (`compute.googleapis.com/instance/disk`) metrics for all [Google Compute Engine][google-compute] instances, we can run the exporter with the following options: 143 | 144 | ``` 145 | stackdriver_exporter \ 146 | --google.project-ids=my-test-project \ 147 | --monitoring.metrics-prefixes "compute.googleapis.com/instance/cpu" 148 | --monitoring.metrics-prefixes "compute.googleapis.com/instance/disk" 149 | ``` 150 | 151 | ### Using filters 152 | 153 | The structure for a filter is `:` 154 | 155 | The `targeted_metric_prefix` is used to ensure the filter is only applied to the metric_prefix(es) where it makes sense. 156 | It does not explicitly have to match a value from `metric_prefixes` but the `targeted_metric_prefix` must be at least a prefix to one or more `metric_prefixes` 157 | 158 | Example: \ 159 | metrics_prefixes = pubsub.googleapis.com/snapshot, pubsub.googleapis.com/subscription/num_undelivered_messages \ 160 | targeted_metric_prefix options would be \ 161 | pubsub.googleapis.com (apply to all defined prefixes) \ 162 | pubsub.googleapis.com/snapshot (apply to only snapshot metrics) \ 163 | pubsub.googleapis.com/subscription (apply to only subscription metrics) \ 164 | pubsub.googleapis.com/subscription/num_undelivered_messages (apply to only the specific subscription metric) \ 165 | 166 | The `filter_query` will be applied to a final metrics API query when querying for metric data. You can read more about the metric API filter options in GCPs documentation https://cloud.google.com/monitoring/api/v3/filters 167 | 168 | The final query sent to the metrics API already includes filters for project and metric type. Each applicable `filter_query` will be appended to the query with an AND 169 | 170 | Full example 171 | ``` 172 | stackdriver_exporter \ 173 | --google.project-ids=my-test-project \ 174 | --monitoring.metrics-prefixes='pubsub.googleapis.com/subscription' \ 175 | --monitoring.metrics-prefixes='compute.googleapis.com/instance/cpu' \ 176 | --monitoring.filters='pubsub.googleapis.com/subscription:resource.labels.subscription_id=monitoring.regex.full_match("us-west4.*my-team-subs.*")' \ 177 | --monitoring.filters='compute.googleapis.com/instance/cpu:resource.labels.instance=monitoring.regex.full_match("us-west4.*my-team-subs.*")' 178 | ``` 179 | 180 | Using projects filter: 181 | 182 | ``` 183 | stackdriver_exporter \ 184 | --google.projects.filter='labels.monitoring="true"' 185 | ``` 186 | 187 | ### Filtering enabled collectors 188 | 189 | The `stackdriver_exporter` collects all metrics type prefixes by default. 190 | 191 | For advanced uses, the collection can be filtered by using a repeatable URL param called `collect`. In the Prometheus configuration you can use you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#). 192 | 193 | 194 | ```yaml 195 | params: 196 | collect: 197 | - compute.googleapis.com/instance/cpu 198 | - compute.googleapis.com/instance/disk 199 | ``` 200 | 201 | ### What to know about Aggregating DELTA Metrics 202 | 203 | Treating DELTA Metrics as a gauge produces data which is wildly inaccurate/not very useful (see https://github.com/prometheus-community/stackdriver_exporter/issues/116). However, aggregating the DELTA metrics overtime is not a perfect solution and is intended to produce data which mirrors GCP's data as close as possible. 204 | 205 | The biggest challenge to producing a correct result is that a counter for prometheus does not start at 0, it starts at the first value which is exported. This can cause inconsistencies when the exporter first starts and for slow moving metrics which are described below. 206 | 207 | #### Start-up Delay 208 | 209 | When the exporter first starts it has no persisted counter information and the stores will be empty. When the first sample is received for a series it is intended to be a change from a previous value according to GCP, a delta. But the prometheus counter is not initialized to 0 so it does not export this as a change from 0, it exports that the counter started at the sample value. Since the series exported are dynamic it's not possible to export an [initial 0 value](https://prometheus.io/docs/practices/instrumentation/#avoid-missing-metrics) in order to account for this issue. The end result is that it can take a few cycles for aggregated metrics to start showing rates exactly as GCP. 210 | 211 | As an example consider a prometheus query, `sum by(backend_target_name) (rate(stackdriver_https_lb_rule_loadbalancing_googleapis_com_https_request_bytes_count[1m]))` which is aggregating 5 series. All 5 series will need to have two samples from GCP in order for the query to produce the same result as GCP. 212 | 213 | #### Slow Moving Metrics 214 | 215 | A slow moving metric would be a metric which is not constantly changing with every sample from GCP. GCP does not consistently report slow moving metrics DELTA metrics. If this occurs for too long (default 5m) prometheus will mark the series as [stale](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness). The end result is that the next reported sample will be treated as the start of a new series and not an increment from the previous value. Here's an example of this in action, ![](https://user-images.githubusercontent.com/4571540/184961445-ed40237b-108e-4177-9d06-aafe61f92430.png) 216 | 217 | There are two features which attempt to combat this issue, 218 | 219 | 1. `monitoring.aggregate-deltas-ttl` which controls how long a metric is persisted in the data store after its no longer being reported by GCP 220 | 1. Metrics which were not collected during a scrape are still exported at their current counter value 221 | 222 | The configuration when using `monitoring.aggregate-deltas` gives a 30 minute buffer to slower moving metrics and `monitoring.aggregate-deltas-ttl` can be adjusted to tune memory requirements vs correctness. Storing the data for longer results in a higher memory cost. 223 | 224 | The feature which continues to export metrics which are not collected can cause `the sample has been rejected because another sample with the same timestamp, but a different value, has already been ingested` if your [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) for the exporter has `honor_timestamps` enabled (this is the default value). This is caused by the fact that it's not possible to know the different between GCP having late arriving data and GCP not exporting a value. The underlying counter is still incremented when this happens so the next reported sample will show a higher rate than expected. 225 | 226 | ## Contributing 227 | 228 | Refer to the [contributing guidelines][contributing]. 229 | 230 | ## License 231 | 232 | Apache License 2.0, see [LICENSE][license]. 233 | 234 | [access-control]: https://cloud.google.com/monitoring/access-control 235 | [access-scopes]: https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam 236 | [application-default-credentials]: https://developers.google.com/identity/protocols/application-default-credentials 237 | [binaries]: https://github.com/prometheus-community/stackdriver_exporter/releases 238 | [cloudfoundry]: https://www.cloudfoundry.org/ 239 | [contributing]: https://github.com/prometheus-community/stackdriver_exporter/blob/master/CONTRIBUTING.md 240 | [google-compute]: https://cloud.google.com/compute/ 241 | [golang]: https://golang.org/ 242 | [license]: https://github.com/prometheus-community/stackdriver_exporter/blob/master/LICENSE 243 | [manifest]: https://github.com/prometheus-community/stackdriver_exporter/blob/master/manifest.yml 244 | [metrics-prefix-example]: https://github.com/prometheus-community/stackdriver_exporter#example 245 | [metrics-list]: https://cloud.google.com/monitoring/api/metrics 246 | [metrics-name]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels 247 | [monitored-resources]: https://cloud.google.com/monitoring/api/resources 248 | [prometheus]: https://prometheus.io/ 249 | [prometheus-boshrelease]: https://github.com/cloudfoundry-community/prometheus-boshrelease 250 | [stackdriver]: https://cloud.google.com/monitoring/ 251 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Reporting a security issue 2 | 3 | The Prometheus security policy, including how to report vulnerabilities, can be 4 | found here: 5 | 6 | 7 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 0.18.0 2 | -------------------------------------------------------------------------------- /collectors/cache.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collectors 15 | 16 | import ( 17 | "sync" 18 | "time" 19 | 20 | "google.golang.org/api/monitoring/v3" 21 | ) 22 | 23 | type DescriptorCache interface { 24 | // Lookup searches the cache for an entry. If the cache has no entry or the entry has expired nil is returned. 25 | Lookup(prefix string) []*monitoring.MetricDescriptor 26 | 27 | // Store stores an entry in the cache 28 | Store(prefix string, data []*monitoring.MetricDescriptor) 29 | } 30 | 31 | type noopDescriptorCache struct{} 32 | 33 | func (d *noopDescriptorCache) Lookup(prefix string) []*monitoring.MetricDescriptor { 34 | return nil 35 | } 36 | 37 | func (d *noopDescriptorCache) Store(prefix string, data []*monitoring.MetricDescriptor) {} 38 | 39 | // descriptorCache is a MetricTypePrefix -> MetricDescriptor cache 40 | type descriptorCache struct { 41 | cache map[string]*descriptorCacheEntry 42 | lock sync.Mutex 43 | ttl time.Duration 44 | } 45 | 46 | type descriptorCacheEntry struct { 47 | data []*monitoring.MetricDescriptor 48 | expiry time.Time 49 | } 50 | 51 | func newDescriptorCache(ttl time.Duration) *descriptorCache { 52 | return &descriptorCache{ttl: ttl, cache: make(map[string]*descriptorCacheEntry)} 53 | } 54 | 55 | // Lookup returns a list of MetricDescriptors if the prefix is found, nil if not found or expired 56 | func (d *descriptorCache) Lookup(prefix string) []*monitoring.MetricDescriptor { 57 | d.lock.Lock() 58 | defer d.lock.Unlock() 59 | 60 | v, ok := d.cache[prefix] 61 | if !ok || time.Now().After(v.expiry) { 62 | return nil 63 | } 64 | 65 | return v.data 66 | } 67 | 68 | // Store overrides a cache entry 69 | func (d *descriptorCache) Store(prefix string, data []*monitoring.MetricDescriptor) { 70 | entry := descriptorCacheEntry{data: data, expiry: time.Now().Add(d.ttl)} 71 | d.lock.Lock() 72 | defer d.lock.Unlock() 73 | d.cache[prefix] = &entry 74 | } 75 | 76 | // collectorCache is a cache for MonitoringCollectors 77 | type CollectorCache struct { 78 | cache map[string]*collectorCacheEntry 79 | lock sync.RWMutex 80 | ttl time.Duration 81 | } 82 | 83 | // collectorCacheEntry is a cache entry for a MonitoringCollector 84 | type collectorCacheEntry struct { 85 | collector *MonitoringCollector 86 | expiry time.Time 87 | } 88 | 89 | // NewCollectorCache returns a new CollectorCache with the given TTL 90 | func NewCollectorCache(ttl time.Duration) *CollectorCache { 91 | c := &CollectorCache{ 92 | cache: make(map[string]*collectorCacheEntry), 93 | ttl: ttl, 94 | } 95 | 96 | go c.cleanup() 97 | return c 98 | } 99 | 100 | // Get returns a MonitoringCollector if the key is found and not expired 101 | // If key is found it resets the TTL for the collector 102 | func (c *CollectorCache) Get(key string) (*MonitoringCollector, bool) { 103 | c.lock.RLock() 104 | defer c.lock.RUnlock() 105 | 106 | entry, ok := c.cache[key] 107 | 108 | if !ok { 109 | return nil, false 110 | } 111 | 112 | if time.Now().After(entry.expiry) { 113 | delete(c.cache, key) 114 | return nil, false 115 | } 116 | 117 | entry.expiry = time.Now().Add(c.ttl) 118 | return entry.collector, true 119 | } 120 | 121 | func (c *CollectorCache) Store(key string, collector *MonitoringCollector) { 122 | entry := &collectorCacheEntry{ 123 | collector: collector, 124 | expiry: time.Now().Add(c.ttl), 125 | } 126 | 127 | c.lock.Lock() 128 | defer c.lock.Unlock() 129 | c.cache[key] = entry 130 | } 131 | 132 | func (c *CollectorCache) cleanup() { 133 | ticker := time.NewTicker(5 * time.Minute) 134 | defer ticker.Stop() 135 | for range ticker.C { 136 | c.removeExpired() 137 | } 138 | } 139 | 140 | func (c *CollectorCache) removeExpired() { 141 | c.lock.Lock() 142 | defer c.lock.Unlock() 143 | 144 | now := time.Now() 145 | for key, entry := range c.cache { 146 | if now.After(entry.expiry) { 147 | delete(c.cache, key) 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /collectors/cache_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collectors 15 | 16 | import ( 17 | "fmt" 18 | "testing" 19 | "time" 20 | 21 | "google.golang.org/api/monitoring/v3" 22 | ) 23 | 24 | func makeDummyMetrics(n int) []*monitoring.MetricDescriptor { 25 | ret := make([]*monitoring.MetricDescriptor, n) 26 | for i := 0; i < n; i++ { 27 | ret[i] = &monitoring.MetricDescriptor{ 28 | DisplayName: fmt.Sprintf("test-%d", i), 29 | } 30 | } 31 | return ret 32 | } 33 | 34 | func isEqual(a, b []*monitoring.MetricDescriptor) bool { 35 | if len(a) != len(b) { 36 | return false 37 | } 38 | 39 | for idx, e := range a { 40 | if e.DisplayName != b[idx].DisplayName { 41 | return false 42 | } 43 | } 44 | 45 | return true 46 | } 47 | 48 | func TestDescriptorCache(t *testing.T) { 49 | ttl := 1 * time.Second 50 | cache := newDescriptorCache(ttl) 51 | entries := makeDummyMetrics(10) 52 | key := "akey" 53 | 54 | if cache.Lookup(key) != nil { 55 | t.Errorf("Cache should've returned nil on lookup without store") 56 | } 57 | 58 | cache.Store("more", makeDummyMetrics(10)) 59 | cache.Store("evenmore", makeDummyMetrics(10)) 60 | 61 | cache.Store(key, entries) 62 | newEntries := cache.Lookup(key) 63 | 64 | if newEntries == nil { 65 | t.Errorf("Cache returned unexpected nil") 66 | } 67 | 68 | if !isEqual(entries, newEntries) { 69 | t.Errorf("Cache modified entries") 70 | } 71 | 72 | time.Sleep(ttl) 73 | if cache.Lookup(key) != nil { 74 | t.Error("cache entries should have expired") 75 | } 76 | } 77 | 78 | func TestCollectorCache(t *testing.T) { 79 | createCollector := func(id string) *MonitoringCollector { 80 | return &MonitoringCollector{ 81 | projectID: id, 82 | } 83 | } 84 | 85 | t.Run("basic cache Op", func(t *testing.T) { 86 | ttl := 1 * time.Second 87 | cache := NewCollectorCache(ttl) 88 | collector := createCollector("test-project") 89 | key := "test-key" 90 | 91 | cache.Store(key, collector) 92 | 93 | if _, found := cache.Get("test-key"); !found { 94 | t.Error("Collector should be available in cache before TTL") 95 | } 96 | 97 | time.Sleep(2 * ttl) 98 | if _, found := cache.Get("test-key"); found { 99 | t.Error("Collector should have expired") 100 | } 101 | }) 102 | 103 | t.Run("multiple collectors", func(t *testing.T) { 104 | ttl := 1 * time.Second 105 | cache := NewCollectorCache(ttl) 106 | 107 | collectors := map[string]*MonitoringCollector{ 108 | "test-key-1": createCollector("test-project-1"), 109 | "test-key-2": createCollector("test-project-2"), 110 | "test-key-3": createCollector("test-project-3"), 111 | } 112 | 113 | for k, v := range collectors { 114 | cache.Store(k, v) 115 | } 116 | 117 | for k, original := range collectors { 118 | cached, found := cache.Get(k) 119 | if !found { 120 | t.Errorf("Collector %s not found in cache", k) 121 | continue 122 | } 123 | 124 | if cached.projectID != original.projectID { 125 | t.Errorf("Wrong collector for key %s. Got projectId %s, want %s", k, cached.projectID, original.projectID) 126 | } 127 | } 128 | }) 129 | } 130 | -------------------------------------------------------------------------------- /collectors/collectors_suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collectors_test 15 | 16 | import ( 17 | . "github.com/onsi/ginkgo" 18 | . "github.com/onsi/gomega" 19 | 20 | "testing" 21 | ) 22 | 23 | func TestCollectors(t *testing.T) { 24 | RegisterFailHandler(Fail) 25 | RunSpecs(t, "Collectors Suite") 26 | } 27 | -------------------------------------------------------------------------------- /collectors/monitoring_collector.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collectors 15 | 16 | import ( 17 | "errors" 18 | "fmt" 19 | "log/slog" 20 | "math" 21 | "strings" 22 | "sync" 23 | "time" 24 | 25 | "github.com/prometheus/client_golang/prometheus" 26 | "golang.org/x/net/context" 27 | "google.golang.org/api/monitoring/v3" 28 | 29 | "github.com/prometheus-community/stackdriver_exporter/utils" 30 | ) 31 | 32 | const namespace = "stackdriver" 33 | 34 | type MetricFilter struct { 35 | TargetedMetricPrefix string 36 | FilterQuery string 37 | } 38 | 39 | type MonitoringCollector struct { 40 | projectID string 41 | metricsTypePrefixes []string 42 | metricsFilters []MetricFilter 43 | metricsInterval time.Duration 44 | metricsOffset time.Duration 45 | metricsIngestDelay bool 46 | monitoringService *monitoring.Service 47 | apiCallsTotalMetric prometheus.Counter 48 | scrapesTotalMetric prometheus.Counter 49 | scrapeErrorsTotalMetric prometheus.Counter 50 | lastScrapeErrorMetric prometheus.Gauge 51 | lastScrapeTimestampMetric prometheus.Gauge 52 | lastScrapeDurationSecondsMetric prometheus.Gauge 53 | collectorFillMissingLabels bool 54 | monitoringDropDelegatedProjects bool 55 | logger *slog.Logger 56 | counterStore DeltaCounterStore 57 | histogramStore DeltaHistogramStore 58 | aggregateDeltas bool 59 | descriptorCache DescriptorCache 60 | } 61 | 62 | type MonitoringCollectorOptions struct { 63 | // MetricTypePrefixes are the Google Monitoring (ex-Stackdriver) metric type prefixes that the collector 64 | // will be querying. 65 | MetricTypePrefixes []string 66 | // ExtraFilters is a list of criteria to apply to each corresponding metric prefix query. If one or more are 67 | // applicable to a given metric type prefix, they will be 'AND' concatenated. 68 | ExtraFilters []MetricFilter 69 | // RequestInterval is the time interval used in each request to get metrics. If there are many data points returned 70 | // during this interval, only the latest will be reported. 71 | RequestInterval time.Duration 72 | // RequestOffset is used to offset the requested interval into the past. 73 | RequestOffset time.Duration 74 | // IngestDelay decides if the ingestion delay specified in the metrics metadata is used when calculating the 75 | // request time interval. 76 | IngestDelay bool 77 | // FillMissingLabels decides if metric labels should be added with empty string to prevent failures due to label inconsistency on metrics. 78 | FillMissingLabels bool 79 | // DropDelegatedProjects decides if only metrics matching the collector's projectID should be retrieved. 80 | DropDelegatedProjects bool 81 | // AggregateDeltas decides if DELTA metrics should be treated as a counter using the provided counterStore/distributionStore or a gauge 82 | AggregateDeltas bool 83 | // DescriptorCacheTTL is the TTL on the items in the descriptorCache which caches the MetricDescriptors for a MetricTypePrefix 84 | DescriptorCacheTTL time.Duration 85 | // DescriptorCacheOnlyGoogle decides whether only google specific descriptors should be cached or all 86 | DescriptorCacheOnlyGoogle bool 87 | } 88 | 89 | func isGoogleMetric(name string) bool { 90 | parts := strings.Split(name, "/") 91 | return strings.Contains(parts[0], "googleapis.com") 92 | } 93 | 94 | type googleDescriptorCache struct { 95 | inner *descriptorCache 96 | } 97 | 98 | func (d *googleDescriptorCache) Lookup(prefix string) []*monitoring.MetricDescriptor { 99 | if !isGoogleMetric(prefix) { 100 | return nil 101 | } 102 | return d.inner.Lookup(prefix) 103 | } 104 | 105 | func (d *googleDescriptorCache) Store(prefix string, data []*monitoring.MetricDescriptor) { 106 | if !isGoogleMetric(prefix) { 107 | return 108 | } 109 | d.inner.Store(prefix, data) 110 | } 111 | 112 | type DeltaCounterStore interface { 113 | Increment(metricDescriptor *monitoring.MetricDescriptor, currentValue *ConstMetric) 114 | ListMetrics(metricDescriptorName string) []*ConstMetric 115 | } 116 | 117 | type DeltaHistogramStore interface { 118 | Increment(metricDescriptor *monitoring.MetricDescriptor, currentValue *HistogramMetric) 119 | ListMetrics(metricDescriptorName string) []*HistogramMetric 120 | } 121 | 122 | func NewMonitoringCollector(projectID string, monitoringService *monitoring.Service, opts MonitoringCollectorOptions, logger *slog.Logger, counterStore DeltaCounterStore, histogramStore DeltaHistogramStore) (*MonitoringCollector, error) { 123 | const subsystem = "monitoring" 124 | 125 | logger = logger.With("project_id", projectID) 126 | 127 | apiCallsTotalMetric := prometheus.NewCounter( 128 | prometheus.CounterOpts{ 129 | Namespace: namespace, 130 | Subsystem: subsystem, 131 | Name: "api_calls_total", 132 | Help: "Total number of Google Stackdriver Monitoring API calls made.", 133 | ConstLabels: prometheus.Labels{"project_id": projectID}, 134 | }, 135 | ) 136 | 137 | scrapesTotalMetric := prometheus.NewCounter( 138 | prometheus.CounterOpts{ 139 | Namespace: namespace, 140 | Subsystem: subsystem, 141 | Name: "scrapes_total", 142 | Help: "Total number of Google Stackdriver Monitoring metrics scrapes.", 143 | ConstLabels: prometheus.Labels{"project_id": projectID}, 144 | }, 145 | ) 146 | 147 | scrapeErrorsTotalMetric := prometheus.NewCounter( 148 | prometheus.CounterOpts{ 149 | Namespace: namespace, 150 | Subsystem: subsystem, 151 | Name: "scrape_errors_total", 152 | Help: "Total number of Google Stackdriver Monitoring metrics scrape errors.", 153 | ConstLabels: prometheus.Labels{"project_id": projectID}, 154 | }, 155 | ) 156 | 157 | lastScrapeErrorMetric := prometheus.NewGauge( 158 | prometheus.GaugeOpts{ 159 | Namespace: namespace, 160 | Subsystem: subsystem, 161 | Name: "last_scrape_error", 162 | Help: "Whether the last metrics scrape from Google Stackdriver Monitoring resulted in an error (1 for error, 0 for success).", 163 | ConstLabels: prometheus.Labels{"project_id": projectID}, 164 | }, 165 | ) 166 | 167 | lastScrapeTimestampMetric := prometheus.NewGauge( 168 | prometheus.GaugeOpts{ 169 | Namespace: namespace, 170 | Subsystem: subsystem, 171 | Name: "last_scrape_timestamp", 172 | Help: "Number of seconds since 1970 since last metrics scrape from Google Stackdriver Monitoring.", 173 | ConstLabels: prometheus.Labels{"project_id": projectID}, 174 | }, 175 | ) 176 | 177 | lastScrapeDurationSecondsMetric := prometheus.NewGauge( 178 | prometheus.GaugeOpts{ 179 | Namespace: namespace, 180 | Subsystem: subsystem, 181 | Name: "last_scrape_duration_seconds", 182 | Help: "Duration of the last metrics scrape from Google Stackdriver Monitoring.", 183 | ConstLabels: prometheus.Labels{"project_id": projectID}, 184 | }, 185 | ) 186 | 187 | var descriptorCache DescriptorCache 188 | if opts.DescriptorCacheTTL == 0 { 189 | descriptorCache = &noopDescriptorCache{} 190 | } else if opts.DescriptorCacheOnlyGoogle { 191 | descriptorCache = &googleDescriptorCache{inner: newDescriptorCache(opts.DescriptorCacheTTL)} 192 | } else { 193 | descriptorCache = newDescriptorCache(opts.DescriptorCacheTTL) 194 | 195 | } 196 | 197 | monitoringCollector := &MonitoringCollector{ 198 | projectID: projectID, 199 | metricsTypePrefixes: opts.MetricTypePrefixes, 200 | metricsFilters: opts.ExtraFilters, 201 | metricsInterval: opts.RequestInterval, 202 | metricsOffset: opts.RequestOffset, 203 | metricsIngestDelay: opts.IngestDelay, 204 | monitoringService: monitoringService, 205 | apiCallsTotalMetric: apiCallsTotalMetric, 206 | scrapesTotalMetric: scrapesTotalMetric, 207 | scrapeErrorsTotalMetric: scrapeErrorsTotalMetric, 208 | lastScrapeErrorMetric: lastScrapeErrorMetric, 209 | lastScrapeTimestampMetric: lastScrapeTimestampMetric, 210 | lastScrapeDurationSecondsMetric: lastScrapeDurationSecondsMetric, 211 | collectorFillMissingLabels: opts.FillMissingLabels, 212 | monitoringDropDelegatedProjects: opts.DropDelegatedProjects, 213 | logger: logger, 214 | counterStore: counterStore, 215 | histogramStore: histogramStore, 216 | aggregateDeltas: opts.AggregateDeltas, 217 | descriptorCache: descriptorCache, 218 | } 219 | 220 | return monitoringCollector, nil 221 | } 222 | 223 | func (c *MonitoringCollector) Describe(ch chan<- *prometheus.Desc) { 224 | c.apiCallsTotalMetric.Describe(ch) 225 | c.scrapesTotalMetric.Describe(ch) 226 | c.scrapeErrorsTotalMetric.Describe(ch) 227 | c.lastScrapeErrorMetric.Describe(ch) 228 | c.lastScrapeTimestampMetric.Describe(ch) 229 | c.lastScrapeDurationSecondsMetric.Describe(ch) 230 | } 231 | 232 | func (c *MonitoringCollector) Collect(ch chan<- prometheus.Metric) { 233 | var begun = time.Now() 234 | 235 | errorMetric := float64(0) 236 | if err := c.reportMonitoringMetrics(ch, begun); err != nil { 237 | errorMetric = float64(1) 238 | c.scrapeErrorsTotalMetric.Inc() 239 | c.logger.Error("Error while getting Google Stackdriver Monitoring metrics", "err", err) 240 | } 241 | c.scrapeErrorsTotalMetric.Collect(ch) 242 | 243 | c.apiCallsTotalMetric.Collect(ch) 244 | 245 | c.scrapesTotalMetric.Inc() 246 | c.scrapesTotalMetric.Collect(ch) 247 | 248 | c.lastScrapeErrorMetric.Set(errorMetric) 249 | c.lastScrapeErrorMetric.Collect(ch) 250 | 251 | c.lastScrapeTimestampMetric.Set(float64(time.Now().Unix())) 252 | c.lastScrapeTimestampMetric.Collect(ch) 253 | 254 | c.lastScrapeDurationSecondsMetric.Set(time.Since(begun).Seconds()) 255 | c.lastScrapeDurationSecondsMetric.Collect(ch) 256 | } 257 | 258 | func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metric, begun time.Time) error { 259 | metricDescriptorsFunction := func(descriptors []*monitoring.MetricDescriptor) error { 260 | var wg = &sync.WaitGroup{} 261 | 262 | // It has been noticed that the same metric descriptor can be obtained from different GCP 263 | // projects. When that happens, metrics are fetched twice and it provokes the error: 264 | // "collected metric xxx was collected before with the same name and label values" 265 | // 266 | // Metric descriptor project is irrelevant when it comes to fetch metrics, as they will be 267 | // fetched from all the delegated projects filtering by metric type. Considering that, we 268 | // can filter descriptors to keep just one per type. 269 | // 270 | // The following makes sure metric descriptors are unique to avoid fetching more than once 271 | uniqueDescriptors := make(map[string]*monitoring.MetricDescriptor) 272 | for _, descriptor := range descriptors { 273 | uniqueDescriptors[descriptor.Type] = descriptor 274 | } 275 | 276 | errChannel := make(chan error, len(uniqueDescriptors)) 277 | 278 | endTime := time.Now().UTC().Add(c.metricsOffset * -1) 279 | startTime := endTime.Add(c.metricsInterval * -1) 280 | 281 | for _, metricDescriptor := range uniqueDescriptors { 282 | wg.Add(1) 283 | go func(metricDescriptor *monitoring.MetricDescriptor, ch chan<- prometheus.Metric, startTime, endTime time.Time) { 284 | defer wg.Done() 285 | c.logger.Debug("retrieving Google Stackdriver Monitoring metrics for descriptor", "descriptor", metricDescriptor.Type) 286 | filter := fmt.Sprintf("metric.type=\"%s\"", metricDescriptor.Type) 287 | if c.monitoringDropDelegatedProjects { 288 | filter = fmt.Sprintf( 289 | "project=\"%s\" AND metric.type=\"%s\"", 290 | c.projectID, 291 | metricDescriptor.Type) 292 | } 293 | 294 | if c.metricsIngestDelay && 295 | metricDescriptor.Metadata != nil && 296 | metricDescriptor.Metadata.IngestDelay != "" { 297 | ingestDelay := metricDescriptor.Metadata.IngestDelay 298 | ingestDelayDuration, err := time.ParseDuration(ingestDelay) 299 | if err != nil { 300 | c.logger.Error("error parsing ingest delay from metric metadata", "descriptor", metricDescriptor.Type, "err", err, "delay", ingestDelay) 301 | errChannel <- err 302 | return 303 | } 304 | c.logger.Debug("adding ingest delay", "descriptor", metricDescriptor.Type, "delay", ingestDelay) 305 | endTime = endTime.Add(ingestDelayDuration * -1) 306 | startTime = startTime.Add(ingestDelayDuration * -1) 307 | } 308 | 309 | for _, ef := range c.metricsFilters { 310 | if strings.HasPrefix(metricDescriptor.Type, ef.TargetedMetricPrefix) { 311 | filter = fmt.Sprintf("%s AND (%s)", filter, ef.FilterQuery) 312 | } 313 | } 314 | 315 | c.logger.Debug("retrieving Google Stackdriver Monitoring metrics with filter", "filter", filter) 316 | 317 | timeSeriesListCall := c.monitoringService.Projects.TimeSeries.List(utils.ProjectResource(c.projectID)). 318 | Filter(filter). 319 | IntervalStartTime(startTime.Format(time.RFC3339Nano)). 320 | IntervalEndTime(endTime.Format(time.RFC3339Nano)) 321 | 322 | for { 323 | c.apiCallsTotalMetric.Inc() 324 | page, err := timeSeriesListCall.Do() 325 | if err != nil { 326 | c.logger.Error("error retrieving Time Series metrics for descriptor", "descriptor", metricDescriptor.Type, "err", err) 327 | errChannel <- err 328 | break 329 | } 330 | if page == nil { 331 | break 332 | } 333 | if err := c.reportTimeSeriesMetrics(page, metricDescriptor, ch, begun); err != nil { 334 | c.logger.Error("error reporting Time Series metrics for descriptor", "descriptor", metricDescriptor.Type, "err", err) 335 | errChannel <- err 336 | break 337 | } 338 | if page.NextPageToken == "" { 339 | break 340 | } 341 | timeSeriesListCall.PageToken(page.NextPageToken) 342 | } 343 | }(metricDescriptor, ch, startTime, endTime) 344 | } 345 | 346 | wg.Wait() 347 | close(errChannel) 348 | 349 | return <-errChannel 350 | } 351 | 352 | var wg = &sync.WaitGroup{} 353 | 354 | errChannel := make(chan error, len(c.metricsTypePrefixes)) 355 | 356 | for _, metricsTypePrefix := range c.metricsTypePrefixes { 357 | wg.Add(1) 358 | go func(metricsTypePrefix string) { 359 | defer wg.Done() 360 | ctx := context.Background() 361 | filter := fmt.Sprintf("metric.type = starts_with(\"%s\")", metricsTypePrefix) 362 | if c.monitoringDropDelegatedProjects { 363 | filter = fmt.Sprintf( 364 | "project = \"%s\" AND metric.type = starts_with(\"%s\")", 365 | c.projectID, 366 | metricsTypePrefix) 367 | } 368 | 369 | if cached := c.descriptorCache.Lookup(metricsTypePrefix); cached != nil { 370 | c.logger.Debug("using cached Google Stackdriver Monitoring metric descriptors starting with", "prefix", metricsTypePrefix) 371 | if err := metricDescriptorsFunction(cached); err != nil { 372 | errChannel <- err 373 | } 374 | } else { 375 | var cache []*monitoring.MetricDescriptor 376 | 377 | callback := func(r *monitoring.ListMetricDescriptorsResponse) error { 378 | c.apiCallsTotalMetric.Inc() 379 | cache = append(cache, r.MetricDescriptors...) 380 | return metricDescriptorsFunction(r.MetricDescriptors) 381 | } 382 | 383 | c.logger.Debug("listing Google Stackdriver Monitoring metric descriptors starting with", "prefix", metricsTypePrefix) 384 | if err := c.monitoringService.Projects.MetricDescriptors.List(utils.ProjectResource(c.projectID)). 385 | Filter(filter). 386 | Pages(ctx, callback); err != nil { 387 | errChannel <- err 388 | } 389 | 390 | c.descriptorCache.Store(metricsTypePrefix, cache) 391 | } 392 | }(metricsTypePrefix) 393 | } 394 | 395 | wg.Wait() 396 | close(errChannel) 397 | 398 | c.logger.Debug("Done reporting monitoring metrics") 399 | return <-errChannel 400 | } 401 | 402 | func (c *MonitoringCollector) reportTimeSeriesMetrics( 403 | page *monitoring.ListTimeSeriesResponse, 404 | metricDescriptor *monitoring.MetricDescriptor, 405 | ch chan<- prometheus.Metric, 406 | begun time.Time, 407 | ) error { 408 | var metricValue float64 409 | var metricValueType prometheus.ValueType 410 | var newestTSPoint *monitoring.Point 411 | 412 | timeSeriesMetrics, err := newTimeSeriesMetrics(metricDescriptor, 413 | ch, 414 | c.collectorFillMissingLabels, 415 | c.counterStore, 416 | c.histogramStore, 417 | c.aggregateDeltas, 418 | ) 419 | if err != nil { 420 | return fmt.Errorf("error creating the TimeSeriesMetrics %v", err) 421 | } 422 | for _, timeSeries := range page.TimeSeries { 423 | newestEndTime := time.Unix(0, 0) 424 | for _, point := range timeSeries.Points { 425 | endTime, err := time.Parse(time.RFC3339Nano, point.Interval.EndTime) 426 | if err != nil { 427 | return fmt.Errorf("Error parsing TimeSeries Point interval end time `%s`: %s", point.Interval.EndTime, err) 428 | } 429 | if endTime.After(newestEndTime) { 430 | newestEndTime = endTime 431 | newestTSPoint = point 432 | } 433 | } 434 | labelKeys := []string{"unit"} 435 | labelValues := []string{metricDescriptor.Unit} 436 | 437 | // Add the metric labels 438 | // @see https://cloud.google.com/monitoring/api/metrics 439 | for key, value := range timeSeries.Metric.Labels { 440 | if !c.keyExists(labelKeys, key) { 441 | labelKeys = append(labelKeys, key) 442 | labelValues = append(labelValues, value) 443 | } 444 | } 445 | 446 | // Add the monitored resource labels 447 | // @see https://cloud.google.com/monitoring/api/resources 448 | for key, value := range timeSeries.Resource.Labels { 449 | if !c.keyExists(labelKeys, key) { 450 | labelKeys = append(labelKeys, key) 451 | labelValues = append(labelValues, value) 452 | } 453 | } 454 | 455 | if c.monitoringDropDelegatedProjects { 456 | dropDelegatedProject := false 457 | 458 | for idx, val := range labelKeys { 459 | if val == "project_id" && labelValues[idx] != c.projectID { 460 | dropDelegatedProject = true 461 | break 462 | } 463 | } 464 | 465 | if dropDelegatedProject { 466 | continue 467 | } 468 | } 469 | 470 | switch timeSeries.MetricKind { 471 | case "GAUGE": 472 | metricValueType = prometheus.GaugeValue 473 | case "DELTA": 474 | if c.aggregateDeltas { 475 | metricValueType = prometheus.CounterValue 476 | } else { 477 | metricValueType = prometheus.GaugeValue 478 | } 479 | case "CUMULATIVE": 480 | metricValueType = prometheus.CounterValue 481 | default: 482 | continue 483 | } 484 | 485 | switch timeSeries.ValueType { 486 | case "BOOL": 487 | metricValue = 0 488 | if *newestTSPoint.Value.BoolValue { 489 | metricValue = 1 490 | } 491 | case "INT64": 492 | metricValue = float64(*newestTSPoint.Value.Int64Value) 493 | case "DOUBLE": 494 | metricValue = *newestTSPoint.Value.DoubleValue 495 | case "DISTRIBUTION": 496 | dist := newestTSPoint.Value.DistributionValue 497 | buckets, err := c.generateHistogramBuckets(dist) 498 | 499 | if err == nil { 500 | timeSeriesMetrics.CollectNewConstHistogram(timeSeries, newestEndTime, labelKeys, dist, buckets, labelValues, timeSeries.MetricKind) 501 | } else { 502 | c.logger.Debug("discarding", "resource", timeSeries.Resource.Type, "metric", 503 | timeSeries.Metric.Type, "err", err) 504 | } 505 | continue 506 | default: 507 | c.logger.Debug("discarding", "value_type", timeSeries.ValueType, "metric", timeSeries) 508 | continue 509 | } 510 | 511 | timeSeriesMetrics.CollectNewConstMetric(timeSeries, newestEndTime, labelKeys, metricValueType, metricValue, labelValues, timeSeries.MetricKind) 512 | } 513 | timeSeriesMetrics.Complete(begun) 514 | return nil 515 | } 516 | 517 | func (c *MonitoringCollector) generateHistogramBuckets( 518 | dist *monitoring.Distribution, 519 | ) (map[float64]uint64, error) { 520 | opts := dist.BucketOptions 521 | var bucketKeys []float64 522 | switch { 523 | case opts.ExplicitBuckets != nil: 524 | // @see https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TypedValue#explicit 525 | bucketKeys = make([]float64, len(opts.ExplicitBuckets.Bounds)+1) 526 | copy(bucketKeys, opts.ExplicitBuckets.Bounds) 527 | case opts.LinearBuckets != nil: 528 | // @see https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TypedValue#linear 529 | // NumFiniteBuckets is inclusive so bucket count is num+2 530 | num := int(opts.LinearBuckets.NumFiniteBuckets) 531 | bucketKeys = make([]float64, num+2) 532 | for i := 0; i <= num; i++ { 533 | bucketKeys[i] = opts.LinearBuckets.Offset + (float64(i) * opts.LinearBuckets.Width) 534 | } 535 | case opts.ExponentialBuckets != nil: 536 | // @see https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TypedValue#exponential 537 | // NumFiniteBuckets is inclusive so bucket count is num+2 538 | num := int(opts.ExponentialBuckets.NumFiniteBuckets) 539 | bucketKeys = make([]float64, num+2) 540 | for i := 0; i <= num; i++ { 541 | bucketKeys[i] = opts.ExponentialBuckets.Scale * math.Pow(opts.ExponentialBuckets.GrowthFactor, float64(i)) 542 | } 543 | default: 544 | return nil, errors.New("Unknown distribution buckets") 545 | } 546 | // The last bucket is always infinity 547 | // @see https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TypedValue#bucketoptions 548 | bucketKeys[len(bucketKeys)-1] = math.Inf(1) 549 | 550 | // Prometheus expects each bucket to have a lower bound of 0, but Google 551 | // sends a bucket with a lower bound of the previous bucket's upper bound, so 552 | // we need to store the last bucket and add it to the next bucket to make it 553 | // 0-bound. 554 | // Any remaining keys without data have a value of 0 555 | buckets := map[float64]uint64{} 556 | var last uint64 557 | for i, b := range bucketKeys { 558 | if len(dist.BucketCounts) > i { 559 | buckets[b] = uint64(dist.BucketCounts[i]) + last 560 | last = buckets[b] 561 | } else { 562 | buckets[b] = last 563 | } 564 | } 565 | return buckets, nil 566 | } 567 | 568 | func (c *MonitoringCollector) keyExists(labelKeys []string, key string) bool { 569 | for _, item := range labelKeys { 570 | if item == key { 571 | c.logger.Debug("Found duplicate label key", "key", key) 572 | return true 573 | } 574 | } 575 | return false 576 | } 577 | -------------------------------------------------------------------------------- /collectors/monitoring_collector_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collectors 15 | 16 | import "testing" 17 | 18 | func TestIsGoogleMetric(t *testing.T) { 19 | good := []string{ 20 | "pubsub.googleapis.com/some/metric", 21 | } 22 | 23 | bad := []string{ 24 | "my.metric/a/b", 25 | "my.metrics/pubsub.googleapis.com/a", 26 | } 27 | 28 | for _, e := range good { 29 | if !isGoogleMetric(e) { 30 | t.Errorf("should be a google metric: %s", e) 31 | } 32 | } 33 | 34 | for _, e := range bad { 35 | if isGoogleMetric(e) { 36 | t.Errorf("should not be a google metric: %s", e) 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /collectors/monitoring_metrics.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collectors 15 | 16 | import ( 17 | "time" 18 | 19 | "github.com/prometheus/client_golang/prometheus" 20 | "google.golang.org/api/monitoring/v3" 21 | 22 | "sort" 23 | 24 | "github.com/prometheus-community/stackdriver_exporter/hash" 25 | "github.com/prometheus-community/stackdriver_exporter/utils" 26 | ) 27 | 28 | func buildFQName(timeSeries *monitoring.TimeSeries) string { 29 | // The metric name to report is composed by the 3 parts: 30 | // 1. namespace is a constant prefix (stackdriver) 31 | // 2. subsystem is the monitored resource type (ie gce_instance) 32 | // 3. name is the metric type (ie compute.googleapis.com/instance/cpu/usage_time) 33 | return prometheus.BuildFQName(namespace, utils.NormalizeMetricName(timeSeries.Resource.Type), utils.NormalizeMetricName(timeSeries.Metric.Type)) 34 | } 35 | 36 | type timeSeriesMetrics struct { 37 | metricDescriptor *monitoring.MetricDescriptor 38 | 39 | ch chan<- prometheus.Metric 40 | 41 | fillMissingLabels bool 42 | constMetrics map[string][]*ConstMetric 43 | histogramMetrics map[string][]*HistogramMetric 44 | 45 | counterStore DeltaCounterStore 46 | histogramStore DeltaHistogramStore 47 | aggregateDeltas bool 48 | } 49 | 50 | func newTimeSeriesMetrics(descriptor *monitoring.MetricDescriptor, 51 | ch chan<- prometheus.Metric, 52 | fillMissingLabels bool, 53 | counterStore DeltaCounterStore, 54 | histogramStore DeltaHistogramStore, 55 | aggregateDeltas bool) (*timeSeriesMetrics, error) { 56 | 57 | return &timeSeriesMetrics{ 58 | metricDescriptor: descriptor, 59 | ch: ch, 60 | fillMissingLabels: fillMissingLabels, 61 | constMetrics: make(map[string][]*ConstMetric), 62 | histogramMetrics: make(map[string][]*HistogramMetric), 63 | counterStore: counterStore, 64 | histogramStore: histogramStore, 65 | aggregateDeltas: aggregateDeltas, 66 | }, nil 67 | } 68 | 69 | func (t *timeSeriesMetrics) newMetricDesc(fqName string, labelKeys []string) *prometheus.Desc { 70 | return prometheus.NewDesc( 71 | fqName, 72 | t.metricDescriptor.Description, 73 | labelKeys, 74 | prometheus.Labels{}, 75 | ) 76 | } 77 | 78 | type ConstMetric struct { 79 | FqName string 80 | LabelKeys []string 81 | ValueType prometheus.ValueType 82 | Value float64 83 | LabelValues []string 84 | ReportTime time.Time 85 | CollectionTime time.Time 86 | 87 | KeysHash uint64 88 | } 89 | 90 | type HistogramMetric struct { 91 | FqName string 92 | LabelKeys []string 93 | Sum float64 94 | Count uint64 95 | Buckets map[float64]uint64 96 | LabelValues []string 97 | ReportTime time.Time 98 | CollectionTime time.Time 99 | 100 | KeysHash uint64 101 | } 102 | 103 | func (h *HistogramMetric) MergeHistogram(other *HistogramMetric) { 104 | // Increment totals based on incoming totals 105 | h.Sum += other.Sum 106 | h.Count += other.Count 107 | 108 | // Merge the buckets from existing in to current 109 | for key, value := range other.Buckets { 110 | h.Buckets[key] += value 111 | } 112 | } 113 | 114 | func (t *timeSeriesMetrics) CollectNewConstHistogram(timeSeries *monitoring.TimeSeries, reportTime time.Time, labelKeys []string, dist *monitoring.Distribution, buckets map[float64]uint64, labelValues []string, metricKind string) { 115 | fqName := buildFQName(timeSeries) 116 | histogramSum := dist.Mean * float64(dist.Count) 117 | var v HistogramMetric 118 | if t.fillMissingLabels || (metricKind == "DELTA" && t.aggregateDeltas) { 119 | v = HistogramMetric{ 120 | FqName: fqName, 121 | LabelKeys: labelKeys, 122 | Sum: histogramSum, 123 | Count: uint64(dist.Count), 124 | Buckets: buckets, 125 | LabelValues: labelValues, 126 | ReportTime: reportTime, 127 | CollectionTime: time.Now(), 128 | 129 | KeysHash: hashLabelKeys(labelKeys), 130 | } 131 | } 132 | 133 | if metricKind == "DELTA" && t.aggregateDeltas { 134 | t.histogramStore.Increment(t.metricDescriptor, &v) 135 | return 136 | } 137 | 138 | if t.fillMissingLabels { 139 | vs, ok := t.histogramMetrics[fqName] 140 | if !ok { 141 | vs = make([]*HistogramMetric, 0) 142 | } 143 | t.histogramMetrics[fqName] = append(vs, &v) 144 | return 145 | } 146 | 147 | t.ch <- t.newConstHistogram(fqName, reportTime, labelKeys, histogramSum, uint64(dist.Count), buckets, labelValues) 148 | } 149 | 150 | func (t *timeSeriesMetrics) newConstHistogram(fqName string, reportTime time.Time, labelKeys []string, sum float64, count uint64, buckets map[float64]uint64, labelValues []string) prometheus.Metric { 151 | return prometheus.NewMetricWithTimestamp( 152 | reportTime, 153 | prometheus.MustNewConstHistogram( 154 | t.newMetricDesc(fqName, labelKeys), 155 | count, 156 | sum, 157 | buckets, 158 | labelValues..., 159 | ), 160 | ) 161 | } 162 | 163 | func (t *timeSeriesMetrics) CollectNewConstMetric(timeSeries *monitoring.TimeSeries, reportTime time.Time, labelKeys []string, metricValueType prometheus.ValueType, metricValue float64, labelValues []string, metricKind string) { 164 | fqName := buildFQName(timeSeries) 165 | 166 | var v ConstMetric 167 | if t.fillMissingLabels || (metricKind == "DELTA" && t.aggregateDeltas) { 168 | v = ConstMetric{ 169 | FqName: fqName, 170 | LabelKeys: labelKeys, 171 | ValueType: metricValueType, 172 | Value: metricValue, 173 | LabelValues: labelValues, 174 | ReportTime: reportTime, 175 | CollectionTime: time.Now(), 176 | 177 | KeysHash: hashLabelKeys(labelKeys), 178 | } 179 | } 180 | 181 | if metricKind == "DELTA" && t.aggregateDeltas { 182 | t.counterStore.Increment(t.metricDescriptor, &v) 183 | return 184 | } 185 | 186 | if t.fillMissingLabels { 187 | vs, ok := t.constMetrics[fqName] 188 | if !ok { 189 | vs = make([]*ConstMetric, 0) 190 | } 191 | t.constMetrics[fqName] = append(vs, &v) 192 | return 193 | } 194 | 195 | t.ch <- t.newConstMetric(fqName, reportTime, labelKeys, metricValueType, metricValue, labelValues) 196 | } 197 | 198 | func (t *timeSeriesMetrics) newConstMetric(fqName string, reportTime time.Time, labelKeys []string, metricValueType prometheus.ValueType, metricValue float64, labelValues []string) prometheus.Metric { 199 | return prometheus.NewMetricWithTimestamp( 200 | reportTime, 201 | prometheus.MustNewConstMetric( 202 | t.newMetricDesc(fqName, labelKeys), 203 | metricValueType, 204 | metricValue, 205 | labelValues..., 206 | ), 207 | ) 208 | } 209 | 210 | func hashLabelKeys(labelKeys []string) uint64 { 211 | dh := hash.New() 212 | sortedKeys := make([]string, len(labelKeys)) 213 | copy(sortedKeys, labelKeys) 214 | sort.Strings(sortedKeys) 215 | for _, key := range sortedKeys { 216 | dh = hash.Add(dh, key) 217 | dh = hash.AddByte(dh, hash.SeparatorByte) 218 | } 219 | return dh 220 | } 221 | 222 | func (t *timeSeriesMetrics) Complete(reportingStartTime time.Time) { 223 | t.completeDeltaConstMetrics(reportingStartTime) 224 | t.completeDeltaHistogramMetrics(reportingStartTime) 225 | t.completeConstMetrics(t.constMetrics) 226 | t.completeHistogramMetrics(t.histogramMetrics) 227 | } 228 | 229 | func (t *timeSeriesMetrics) completeConstMetrics(constMetrics map[string][]*ConstMetric) { 230 | for _, vs := range constMetrics { 231 | if len(vs) > 1 { 232 | var needFill bool 233 | for i := 1; i < len(vs); i++ { 234 | if vs[0].KeysHash != vs[i].KeysHash { 235 | needFill = true 236 | } 237 | } 238 | if needFill { 239 | vs = fillConstMetricsLabels(vs) 240 | } 241 | } 242 | 243 | for _, v := range vs { 244 | t.ch <- t.newConstMetric(v.FqName, v.ReportTime, v.LabelKeys, v.ValueType, v.Value, v.LabelValues) 245 | } 246 | } 247 | } 248 | 249 | func (t *timeSeriesMetrics) completeHistogramMetrics(histograms map[string][]*HistogramMetric) { 250 | for _, vs := range histograms { 251 | if len(vs) > 1 { 252 | var needFill bool 253 | for i := 1; i < len(vs); i++ { 254 | if vs[0].KeysHash != vs[i].KeysHash { 255 | needFill = true 256 | } 257 | } 258 | if needFill { 259 | vs = fillHistogramMetricsLabels(vs) 260 | } 261 | } 262 | for _, v := range vs { 263 | t.ch <- t.newConstHistogram(v.FqName, v.ReportTime, v.LabelKeys, v.Sum, v.Count, v.Buckets, v.LabelValues) 264 | } 265 | } 266 | } 267 | 268 | func (t *timeSeriesMetrics) completeDeltaConstMetrics(reportingStartTime time.Time) { 269 | descriptorMetrics := t.counterStore.ListMetrics(t.metricDescriptor.Name) 270 | now := time.Now().Truncate(time.Minute) 271 | 272 | constMetrics := map[string][]*ConstMetric{} 273 | for _, collected := range descriptorMetrics { 274 | // If the metric wasn't collected we should still export it at the next sample time to avoid staleness 275 | if reportingStartTime.After(collected.CollectionTime) { 276 | // Ideally we could use monitoring.MetricDescriptorMetadata.SamplePeriod to determine how many 277 | // samples were missed to adjust this but monitoring.MetricDescriptorMetadata is viewed as optional 278 | // for a monitoring.MetricDescriptor 279 | reportingLag := collected.CollectionTime.Sub(collected.ReportTime).Truncate(time.Minute) 280 | collected.ReportTime = now.Add(-reportingLag) 281 | } 282 | if t.fillMissingLabels { 283 | if _, exists := constMetrics[collected.FqName]; !exists { 284 | constMetrics[collected.FqName] = []*ConstMetric{} 285 | } 286 | constMetrics[collected.FqName] = append(constMetrics[collected.FqName], collected) 287 | } else { 288 | t.ch <- t.newConstMetric( 289 | collected.FqName, 290 | collected.ReportTime, 291 | collected.LabelKeys, 292 | collected.ValueType, 293 | collected.Value, 294 | collected.LabelValues, 295 | ) 296 | } 297 | } 298 | 299 | if t.fillMissingLabels { 300 | t.completeConstMetrics(constMetrics) 301 | } 302 | } 303 | 304 | func (t *timeSeriesMetrics) completeDeltaHistogramMetrics(reportingStartTime time.Time) { 305 | descriptorMetrics := t.histogramStore.ListMetrics(t.metricDescriptor.Name) 306 | now := time.Now().Truncate(time.Minute) 307 | 308 | histograms := map[string][]*HistogramMetric{} 309 | for _, collected := range descriptorMetrics { 310 | // If the histogram wasn't collected we should still export it at the next sample time to avoid staleness 311 | if reportingStartTime.After(collected.CollectionTime) { 312 | // Ideally we could use monitoring.MetricDescriptorMetadata.SamplePeriod to determine how many 313 | // samples were missed to adjust this but monitoring.MetricDescriptorMetadata is viewed as optional 314 | // for a monitoring.MetricDescriptor 315 | reportingLag := collected.CollectionTime.Sub(collected.ReportTime).Truncate(time.Minute) 316 | collected.ReportTime = now.Add(-reportingLag) 317 | } 318 | if t.fillMissingLabels { 319 | if _, exists := histograms[collected.FqName]; !exists { 320 | histograms[collected.FqName] = []*HistogramMetric{} 321 | } 322 | histograms[collected.FqName] = append(histograms[collected.FqName], collected) 323 | } else { 324 | t.ch <- t.newConstHistogram( 325 | collected.FqName, 326 | collected.ReportTime, 327 | collected.LabelKeys, 328 | collected.Sum, 329 | collected.Count, 330 | collected.Buckets, 331 | collected.LabelValues, 332 | ) 333 | } 334 | } 335 | 336 | if t.fillMissingLabels { 337 | t.completeHistogramMetrics(histograms) 338 | } 339 | } 340 | 341 | func fillConstMetricsLabels(metrics []*ConstMetric) []*ConstMetric { 342 | allKeys := make(map[string]struct{}) 343 | for _, metric := range metrics { 344 | for _, key := range metric.LabelKeys { 345 | allKeys[key] = struct{}{} 346 | } 347 | } 348 | 349 | for _, metric := range metrics { 350 | if len(metric.LabelKeys) != len(allKeys) { 351 | metricKeys := make(map[string]struct{}) 352 | for _, key := range metric.LabelKeys { 353 | metricKeys[key] = struct{}{} 354 | } 355 | for key := range allKeys { 356 | if _, ok := metricKeys[key]; !ok { 357 | metric.LabelKeys = append(metric.LabelKeys, key) 358 | metric.LabelValues = append(metric.LabelValues, "") 359 | } 360 | } 361 | } 362 | } 363 | 364 | return metrics 365 | } 366 | 367 | func fillHistogramMetricsLabels(metrics []*HistogramMetric) []*HistogramMetric { 368 | allKeys := make(map[string]struct{}) 369 | for _, metric := range metrics { 370 | for _, key := range metric.LabelKeys { 371 | allKeys[key] = struct{}{} 372 | } 373 | } 374 | 375 | for _, metric := range metrics { 376 | if len(metric.LabelKeys) != len(allKeys) { 377 | metricKeys := make(map[string]struct{}) 378 | for _, key := range metric.LabelKeys { 379 | metricKeys[key] = struct{}{} 380 | } 381 | for key := range allKeys { 382 | if _, ok := metricKeys[key]; !ok { 383 | metric.LabelKeys = append(metric.LabelKeys, key) 384 | metric.LabelValues = append(metric.LabelValues, "") 385 | } 386 | } 387 | } 388 | } 389 | 390 | return metrics 391 | } 392 | -------------------------------------------------------------------------------- /delta/counter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package delta 15 | 16 | import ( 17 | "fmt" 18 | "log/slog" 19 | "sort" 20 | "strings" 21 | "sync" 22 | "time" 23 | 24 | "google.golang.org/api/monitoring/v3" 25 | 26 | "github.com/prometheus-community/stackdriver_exporter/collectors" 27 | "github.com/prometheus-community/stackdriver_exporter/hash" 28 | ) 29 | 30 | type MetricEntry struct { 31 | Collected map[uint64]*collectors.ConstMetric 32 | mutex *sync.RWMutex 33 | } 34 | 35 | type InMemoryCounterStore struct { 36 | store *sync.Map 37 | ttl time.Duration 38 | logger *slog.Logger 39 | } 40 | 41 | // NewInMemoryCounterStore returns an implementation of CounterStore which is persisted in-memory 42 | func NewInMemoryCounterStore(logger *slog.Logger, ttl time.Duration) *InMemoryCounterStore { 43 | store := &InMemoryCounterStore{ 44 | store: &sync.Map{}, 45 | logger: logger, 46 | ttl: ttl, 47 | } 48 | 49 | return store 50 | } 51 | 52 | func (s *InMemoryCounterStore) Increment(metricDescriptor *monitoring.MetricDescriptor, currentValue *collectors.ConstMetric) { 53 | if currentValue == nil { 54 | return 55 | } 56 | 57 | tmp, _ := s.store.LoadOrStore(metricDescriptor.Name, &MetricEntry{ 58 | Collected: map[uint64]*collectors.ConstMetric{}, 59 | mutex: &sync.RWMutex{}, 60 | }) 61 | entry := tmp.(*MetricEntry) 62 | 63 | key := toCounterKey(currentValue) 64 | 65 | entry.mutex.Lock() 66 | defer entry.mutex.Unlock() 67 | existing := entry.Collected[key] 68 | 69 | if existing == nil { 70 | s.logger.Debug("Tracking new counter", "fqName", currentValue.FqName, "key", key, "current_value", currentValue.Value, "incoming_time", currentValue.ReportTime) 71 | entry.Collected[key] = currentValue 72 | return 73 | } 74 | 75 | if existing.ReportTime.Before(currentValue.ReportTime) { 76 | s.logger.Debug("Incrementing existing counter", "fqName", currentValue.FqName, "key", key, "current_value", existing.Value, "adding", currentValue.Value, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime) 77 | currentValue.Value = currentValue.Value + existing.Value 78 | entry.Collected[key] = currentValue 79 | return 80 | } 81 | 82 | s.logger.Debug("Ignoring old sample for counter", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime) 83 | } 84 | 85 | func toCounterKey(c *collectors.ConstMetric) uint64 { 86 | labels := make(map[string]string) 87 | keysCopy := append([]string{}, c.LabelKeys...) 88 | for i := range c.LabelKeys { 89 | labels[c.LabelKeys[i]] = c.LabelValues[i] 90 | } 91 | sort.Strings(keysCopy) 92 | 93 | var keyParts []string 94 | for _, k := range keysCopy { 95 | keyParts = append(keyParts, fmt.Sprintf("%s:%s", k, labels[k])) 96 | } 97 | hashText := fmt.Sprintf("%s|%s", c.FqName, strings.Join(keyParts, "|")) 98 | h := hash.New() 99 | h = hash.Add(h, hashText) 100 | 101 | return h 102 | } 103 | 104 | func (s *InMemoryCounterStore) ListMetrics(metricDescriptorName string) []*collectors.ConstMetric { 105 | var output []*collectors.ConstMetric 106 | now := time.Now() 107 | ttlWindowStart := now.Add(-s.ttl) 108 | 109 | tmp, exists := s.store.Load(metricDescriptorName) 110 | if !exists { 111 | return output 112 | } 113 | entry := tmp.(*MetricEntry) 114 | 115 | entry.mutex.Lock() 116 | defer entry.mutex.Unlock() 117 | for key, collected := range entry.Collected { 118 | //Scan and remove metrics which are outside the TTL 119 | if ttlWindowStart.After(collected.CollectionTime) { 120 | s.logger.Debug("Deleting counter entry outside of TTL", "key", key, "fqName", collected.FqName) 121 | delete(entry.Collected, key) 122 | continue 123 | } 124 | 125 | //Dereference to create shallow copy 126 | metricCopy := *collected 127 | output = append(output, &metricCopy) 128 | } 129 | 130 | return output 131 | } 132 | -------------------------------------------------------------------------------- /delta/counter_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package delta_test 15 | 16 | import ( 17 | "time" 18 | 19 | . "github.com/onsi/ginkgo" 20 | . "github.com/onsi/gomega" 21 | "github.com/prometheus/common/promslog" 22 | "google.golang.org/api/monitoring/v3" 23 | 24 | "github.com/prometheus-community/stackdriver_exporter/collectors" 25 | "github.com/prometheus-community/stackdriver_exporter/delta" 26 | ) 27 | 28 | var _ = Describe("Counter", func() { 29 | var store *delta.InMemoryCounterStore 30 | var metric *collectors.ConstMetric 31 | descriptor := &monitoring.MetricDescriptor{Name: "This is a metric"} 32 | 33 | BeforeEach(func() { 34 | store = delta.NewInMemoryCounterStore(promslog.New(&promslog.Config{}), time.Minute) 35 | metric = &collectors.ConstMetric{ 36 | FqName: "counter_name", 37 | LabelKeys: []string{"labelKey"}, 38 | ValueType: 1, 39 | Value: 10, 40 | LabelValues: []string{"labelValue"}, 41 | ReportTime: time.Now().Truncate(time.Second), 42 | CollectionTime: time.Now().Truncate(time.Second), 43 | KeysHash: 4321, 44 | } 45 | }) 46 | 47 | It("can return tracked counters", func() { 48 | store.Increment(descriptor, metric) 49 | metrics := store.ListMetrics(descriptor.Name) 50 | 51 | Expect(len(metrics)).To(Equal(1)) 52 | Expect(metrics[0]).To(Equal(metric)) 53 | }) 54 | 55 | It("can increment counters multiple times", func() { 56 | store.Increment(descriptor, metric) 57 | 58 | metric2 := &collectors.ConstMetric{ 59 | FqName: "counter_name", 60 | LabelKeys: []string{"labelKey"}, 61 | ValueType: 1, 62 | Value: 20, 63 | LabelValues: []string{"labelValue"}, 64 | ReportTime: time.Now().Truncate(time.Second).Add(time.Second), 65 | CollectionTime: time.Now().Truncate(time.Second), 66 | KeysHash: 4321, 67 | } 68 | 69 | store.Increment(descriptor, metric2) 70 | 71 | metrics := store.ListMetrics(descriptor.Name) 72 | Expect(len(metrics)).To(Equal(1)) 73 | Expect(metrics[0].Value).To(Equal(float64(30))) 74 | }) 75 | 76 | It("will remove counters outside of TTL", func() { 77 | metric.CollectionTime = metric.CollectionTime.Add(-time.Hour) 78 | 79 | store.Increment(descriptor, metric) 80 | 81 | metrics := store.ListMetrics(descriptor.Name) 82 | Expect(len(metrics)).To(Equal(0)) 83 | }) 84 | }) 85 | -------------------------------------------------------------------------------- /delta/delta_suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package delta_test 15 | 16 | import ( 17 | "testing" 18 | 19 | . "github.com/onsi/ginkgo" 20 | . "github.com/onsi/gomega" 21 | ) 22 | 23 | func TestDelta(t *testing.T) { 24 | RegisterFailHandler(Fail) 25 | RunSpecs(t, "Delta Suite") 26 | } 27 | -------------------------------------------------------------------------------- /delta/histogram.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package delta 15 | 16 | import ( 17 | "fmt" 18 | "log/slog" 19 | "sort" 20 | "strings" 21 | "sync" 22 | "time" 23 | 24 | "google.golang.org/api/monitoring/v3" 25 | 26 | "github.com/prometheus-community/stackdriver_exporter/collectors" 27 | "github.com/prometheus-community/stackdriver_exporter/hash" 28 | ) 29 | 30 | type HistogramEntry struct { 31 | Collected map[uint64]*collectors.HistogramMetric 32 | mutex *sync.RWMutex 33 | } 34 | 35 | type InMemoryHistogramStore struct { 36 | store *sync.Map 37 | ttl time.Duration 38 | logger *slog.Logger 39 | } 40 | 41 | // NewInMemoryHistogramStore returns an implementation of HistogramStore which is persisted in-memory 42 | func NewInMemoryHistogramStore(logger *slog.Logger, ttl time.Duration) *InMemoryHistogramStore { 43 | store := &InMemoryHistogramStore{ 44 | store: &sync.Map{}, 45 | logger: logger, 46 | ttl: ttl, 47 | } 48 | 49 | return store 50 | } 51 | 52 | func (s *InMemoryHistogramStore) Increment(metricDescriptor *monitoring.MetricDescriptor, currentValue *collectors.HistogramMetric) { 53 | if currentValue == nil { 54 | return 55 | } 56 | 57 | tmp, _ := s.store.LoadOrStore(metricDescriptor.Name, &HistogramEntry{ 58 | Collected: map[uint64]*collectors.HistogramMetric{}, 59 | mutex: &sync.RWMutex{}, 60 | }) 61 | entry := tmp.(*HistogramEntry) 62 | 63 | key := toHistogramKey(currentValue) 64 | 65 | entry.mutex.Lock() 66 | defer entry.mutex.Unlock() 67 | existing := entry.Collected[key] 68 | 69 | if existing == nil { 70 | s.logger.Debug("Tracking new histogram", "fqName", currentValue.FqName, "key", key, "incoming_time", currentValue.ReportTime) 71 | entry.Collected[key] = currentValue 72 | return 73 | } 74 | 75 | if existing.ReportTime.Before(currentValue.ReportTime) { 76 | s.logger.Debug("Incrementing existing histogram", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime) 77 | currentValue.MergeHistogram(existing) 78 | // Replace the existing histogram by the new one after merging it. 79 | entry.Collected[key] = currentValue 80 | return 81 | } 82 | 83 | s.logger.Debug("Ignoring old sample for histogram", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime) 84 | } 85 | 86 | func toHistogramKey(hist *collectors.HistogramMetric) uint64 { 87 | labels := make(map[string]string) 88 | keysCopy := append([]string{}, hist.LabelKeys...) 89 | for i := range hist.LabelKeys { 90 | labels[hist.LabelKeys[i]] = hist.LabelValues[i] 91 | } 92 | sort.Strings(keysCopy) 93 | 94 | var keyParts []string 95 | for _, k := range keysCopy { 96 | keyParts = append(keyParts, fmt.Sprintf("%s:%s", k, labels[k])) 97 | } 98 | hashText := fmt.Sprintf("%s|%s", hist.FqName, strings.Join(keyParts, "|")) 99 | h := hash.New() 100 | h = hash.Add(h, hashText) 101 | 102 | return h 103 | } 104 | 105 | func (s *InMemoryHistogramStore) ListMetrics(metricDescriptorName string) []*collectors.HistogramMetric { 106 | var output []*collectors.HistogramMetric 107 | now := time.Now() 108 | ttlWindowStart := now.Add(-s.ttl) 109 | 110 | tmp, exists := s.store.Load(metricDescriptorName) 111 | if !exists { 112 | return output 113 | } 114 | entry := tmp.(*HistogramEntry) 115 | 116 | entry.mutex.Lock() 117 | defer entry.mutex.Unlock() 118 | for key, collected := range entry.Collected { 119 | // Scan and remove metrics which are outside the TTL 120 | if ttlWindowStart.After(collected.CollectionTime) { 121 | s.logger.Debug("Deleting histogram entry outside of TTL", "key", key, "fqName", collected.FqName) 122 | delete(entry.Collected, key) 123 | continue 124 | } 125 | 126 | copy := *collected 127 | output = append(output, ©) 128 | } 129 | 130 | return output 131 | } 132 | -------------------------------------------------------------------------------- /delta/histogram_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package delta_test 15 | 16 | import ( 17 | "time" 18 | 19 | . "github.com/onsi/ginkgo" 20 | . "github.com/onsi/gomega" 21 | "github.com/prometheus/common/promslog" 22 | "google.golang.org/api/monitoring/v3" 23 | 24 | "github.com/prometheus-community/stackdriver_exporter/collectors" 25 | "github.com/prometheus-community/stackdriver_exporter/delta" 26 | ) 27 | 28 | var _ = Describe("HistogramStore", func() { 29 | var store *delta.InMemoryHistogramStore 30 | var histogram *collectors.HistogramMetric 31 | descriptor := &monitoring.MetricDescriptor{Name: "This is a metric"} 32 | bucketKey := 1.00000000000000000001 33 | bucketValue := uint64(1000) 34 | 35 | BeforeEach(func() { 36 | store = delta.NewInMemoryHistogramStore(promslog.New(&promslog.Config{}), time.Minute) 37 | histogram = &collectors.HistogramMetric{ 38 | FqName: "histogram_name", 39 | LabelKeys: []string{"labelKey"}, 40 | Sum: 10, 41 | Count: 100, 42 | Buckets: map[float64]uint64{bucketKey: bucketValue}, 43 | LabelValues: []string{"labelValue"}, 44 | ReportTime: time.Now().Truncate(time.Second), 45 | CollectionTime: time.Now().Truncate(time.Second), 46 | KeysHash: 8765, 47 | } 48 | }) 49 | 50 | It("can return tracked histograms", func() { 51 | store.Increment(descriptor, histogram) 52 | metrics := store.ListMetrics(descriptor.Name) 53 | 54 | Expect(len(metrics)).To(Equal(1)) 55 | Expect(metrics[0]).To(Equal(histogram)) 56 | }) 57 | 58 | It("can merge histograms", func() { 59 | store.Increment(descriptor, histogram) 60 | 61 | // Shallow copy and change report time so they will merge 62 | nextValue := &collectors.HistogramMetric{ 63 | FqName: "histogram_name", 64 | LabelKeys: []string{"labelKey"}, 65 | Sum: 10, 66 | Count: 100, 67 | Buckets: map[float64]uint64{bucketKey: bucketValue}, 68 | LabelValues: []string{"labelValue"}, 69 | ReportTime: time.Now().Truncate(time.Second).Add(time.Second), 70 | CollectionTime: time.Now().Truncate(time.Second), 71 | KeysHash: 8765, 72 | } 73 | 74 | store.Increment(descriptor, nextValue) 75 | 76 | metrics := store.ListMetrics(descriptor.Name) 77 | 78 | Expect(len(metrics)).To(Equal(1)) 79 | histogram := metrics[0] 80 | Expect(histogram.Count).To(Equal(uint64(200))) 81 | Expect(histogram.Sum).To(Equal(20.0)) 82 | Expect(len(histogram.Buckets)).To(Equal(1)) 83 | Expect(histogram.Buckets[bucketKey]).To(Equal(bucketValue * 2)) 84 | }) 85 | 86 | It("will remove histograms outside of TTL", func() { 87 | histogram.CollectionTime = histogram.CollectionTime.Add(-time.Hour) 88 | 89 | store.Increment(descriptor, histogram) 90 | 91 | metrics := store.ListMetrics(descriptor.Name) 92 | Expect(len(metrics)).To(Equal(0)) 93 | }) 94 | }) 95 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/prometheus-community/stackdriver_exporter 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/PuerkitoBio/rehttp v1.4.0 7 | github.com/alecthomas/kingpin/v2 v2.4.0 8 | github.com/fatih/camelcase v1.0.0 9 | github.com/onsi/ginkgo v1.16.5 10 | github.com/onsi/gomega v1.36.2 11 | github.com/prometheus/client_golang v1.21.1 12 | github.com/prometheus/common v0.62.0 13 | github.com/prometheus/exporter-toolkit v0.13.2 14 | golang.org/x/net v0.37.0 15 | golang.org/x/oauth2 v0.28.0 16 | google.golang.org/api v0.224.0 17 | ) 18 | 19 | require ( 20 | cloud.google.com/go/auth v0.15.0 // indirect 21 | cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect 22 | cloud.google.com/go/compute/metadata v0.6.0 // indirect 23 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect 24 | github.com/beorn7/perks v1.0.1 // indirect 25 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 26 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 27 | github.com/felixge/httpsnoop v1.0.4 // indirect 28 | github.com/fsnotify/fsnotify v1.4.9 // indirect 29 | github.com/go-logr/logr v1.4.2 // indirect 30 | github.com/go-logr/stdr v1.2.2 // indirect 31 | github.com/google/go-cmp v0.7.0 // indirect 32 | github.com/google/s2a-go v0.1.9 // indirect 33 | github.com/google/uuid v1.6.0 // indirect 34 | github.com/googleapis/enterprise-certificate-proxy v0.3.5 // indirect 35 | github.com/googleapis/gax-go/v2 v2.14.1 // indirect 36 | github.com/jpillora/backoff v1.0.0 // indirect 37 | github.com/klauspost/compress v1.17.11 // indirect 38 | github.com/mdlayher/socket v0.4.1 // indirect 39 | github.com/mdlayher/vsock v1.2.1 // indirect 40 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 41 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect 42 | github.com/nxadm/tail v1.4.8 // indirect 43 | github.com/prometheus/client_model v0.6.1 // indirect 44 | github.com/prometheus/procfs v0.15.1 // indirect 45 | github.com/xhit/go-str2duration/v2 v2.1.0 // indirect 46 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 47 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect 48 | go.opentelemetry.io/otel v1.34.0 // indirect 49 | go.opentelemetry.io/otel/metric v1.34.0 // indirect 50 | go.opentelemetry.io/otel/trace v1.34.0 // indirect 51 | golang.org/x/crypto v0.36.0 // indirect 52 | golang.org/x/sync v0.12.0 // indirect 53 | golang.org/x/sys v0.31.0 // indirect 54 | golang.org/x/text v0.23.0 // indirect 55 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect 56 | google.golang.org/grpc v1.70.0 // indirect 57 | google.golang.org/protobuf v1.36.5 // indirect 58 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 59 | gopkg.in/yaml.v2 v2.4.0 // indirect 60 | gopkg.in/yaml.v3 v3.0.1 // indirect 61 | ) 62 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps= 2 | cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8= 3 | cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= 4 | cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= 5 | cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= 6 | cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= 7 | github.com/PuerkitoBio/rehttp v1.4.0 h1:rIN7A2s+O9fmHUM1vUcInvlHj9Ysql4hE+Y0wcl/xk8= 8 | github.com/PuerkitoBio/rehttp v1.4.0/go.mod h1:LUwKPoDbDIA2RL5wYZCNsQ90cx4OJ4AWBmq6KzWZL1s= 9 | github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= 10 | github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= 11 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= 12 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= 13 | github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0 h1:0NmehRCgyk5rljDQLKUO+cRJCnduDyn11+zGZIc9Z48= 14 | github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0/go.mod h1:6L7zgvqo0idzI7IO8de6ZC051AfXb5ipkIJ7bIA2tGA= 15 | github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= 16 | github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 17 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 18 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 19 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 20 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 21 | github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= 22 | github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 23 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 24 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 25 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 26 | github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= 27 | github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= 28 | github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= 29 | github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= 30 | github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= 31 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 32 | github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= 33 | github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 34 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 35 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 36 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 37 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 38 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 39 | github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= 40 | github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= 41 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= 42 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= 43 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 44 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 45 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 46 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 47 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 48 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 49 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 50 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 51 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 52 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 53 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 54 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 55 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 56 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 57 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 58 | github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= 59 | github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= 60 | github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= 61 | github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= 62 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 63 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 64 | github.com/googleapis/enterprise-certificate-proxy v0.3.5 h1:VgzTY2jogw3xt39CusEnFJWm7rlsq5yL5q9XdLOuP5g= 65 | github.com/googleapis/enterprise-certificate-proxy v0.3.5/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= 66 | github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= 67 | github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= 68 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 69 | github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= 70 | github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= 71 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= 72 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= 73 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 74 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 75 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 76 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 77 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 78 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 79 | github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= 80 | github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= 81 | github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= 82 | github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= 83 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 84 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 85 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= 86 | github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 87 | github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= 88 | github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= 89 | github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= 90 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 91 | github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= 92 | github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= 93 | github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= 94 | github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= 95 | github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= 96 | github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= 97 | github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= 98 | github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= 99 | github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= 100 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 101 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 102 | github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= 103 | github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= 104 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 105 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 106 | github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= 107 | github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= 108 | github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ= 109 | github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g= 110 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 111 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 112 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= 113 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= 114 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 115 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 116 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 117 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 118 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 119 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 120 | github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= 121 | github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= 122 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 123 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= 124 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= 125 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= 126 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= 127 | go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= 128 | go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= 129 | go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= 130 | go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= 131 | go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= 132 | go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= 133 | go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= 134 | go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= 135 | go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= 136 | go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= 137 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 138 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 139 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 140 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= 141 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= 142 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 143 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 144 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 145 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 146 | golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 147 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 148 | golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 149 | golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= 150 | golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 151 | golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= 152 | golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= 153 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 154 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 155 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 156 | golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= 157 | golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= 158 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 159 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 160 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 161 | golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 162 | golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 163 | golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 164 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 165 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 166 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 167 | golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 168 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 169 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 170 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 171 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 172 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 173 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 174 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 175 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 176 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 177 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 178 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 179 | golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 180 | golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= 181 | golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= 182 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 183 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 184 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 185 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 186 | google.golang.org/api v0.224.0 h1:Ir4UPtDsNiwIOHdExr3fAj4xZ42QjK7uQte3lORLJwU= 187 | google.golang.org/api v0.224.0/go.mod h1:3V39my2xAGkodXy0vEqcEtkqgw2GtrFL5WuBZlCTCOQ= 188 | google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= 189 | google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= 190 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= 191 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= 192 | google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= 193 | google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= 194 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 195 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 196 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 197 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 198 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 199 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 200 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 201 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 202 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 203 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 204 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 205 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 206 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 207 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 208 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 209 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 210 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 211 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 212 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 213 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 214 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 215 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 216 | -------------------------------------------------------------------------------- /hash/fnv.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package hash 15 | 16 | const SeparatorByte = 255 17 | 18 | // https://github.com/prometheus/client_golang/blob/master/prometheus/fnv.go 19 | // Inline and byte-free variant of hash/fnv's fnv64a. 20 | 21 | const ( 22 | offset64 = 14695981039346656037 23 | prime64 = 1099511628211 24 | ) 25 | 26 | // New initializies a new fnv64a hash value. 27 | func New() uint64 { 28 | return offset64 29 | } 30 | 31 | // Add adds a string to a fnv64a hash value, returning the updated hash. 32 | func Add(h uint64, s string) uint64 { 33 | for i := 0; i < len(s); i++ { 34 | h ^= uint64(s[i]) 35 | h *= prime64 36 | } 37 | return h 38 | } 39 | 40 | // AddByte adds a byte to a fnv64a hash value, returning the updated hash. 41 | func AddByte(h uint64, b byte) uint64 { 42 | h ^= uint64(b) 43 | h *= prime64 44 | return h 45 | } 46 | -------------------------------------------------------------------------------- /stackdriver_exporter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "fmt" 18 | "log/slog" 19 | "net/http" 20 | "os" 21 | "slices" 22 | "strings" 23 | "time" 24 | 25 | "github.com/PuerkitoBio/rehttp" 26 | "github.com/alecthomas/kingpin/v2" 27 | "github.com/prometheus/client_golang/prometheus" 28 | versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" 29 | "github.com/prometheus/client_golang/prometheus/promhttp" 30 | "github.com/prometheus/common/promslog" 31 | "github.com/prometheus/common/promslog/flag" 32 | "github.com/prometheus/common/version" 33 | "github.com/prometheus/exporter-toolkit/web" 34 | webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" 35 | "golang.org/x/net/context" 36 | "golang.org/x/oauth2/google" 37 | "google.golang.org/api/compute/v1" 38 | "google.golang.org/api/monitoring/v3" 39 | "google.golang.org/api/option" 40 | 41 | "github.com/prometheus-community/stackdriver_exporter/collectors" 42 | "github.com/prometheus-community/stackdriver_exporter/delta" 43 | "github.com/prometheus-community/stackdriver_exporter/utils" 44 | ) 45 | 46 | var ( 47 | // General exporter flags 48 | 49 | toolkitFlags = webflag.AddFlags(kingpin.CommandLine, ":9255") 50 | 51 | metricsPath = kingpin.Flag( 52 | "web.telemetry-path", "Path under which to expose Prometheus metrics.", 53 | ).Default("/metrics").String() 54 | 55 | stackdriverMetricsPath = kingpin.Flag( 56 | "web.stackdriver-telemetry-path", "Path under which to expose Stackdriver metrics.", 57 | ).Default("/metrics").String() 58 | 59 | projectID = kingpin.Flag( 60 | "google.project-id", "DEPRECATED - Comma seperated list of Google Project IDs. Use 'google.project-ids' instead.", 61 | ).String() 62 | 63 | projectIDs = kingpin.Flag( 64 | "google.project-ids", "Repeatable flag of Google Project IDs", 65 | ).Strings() 66 | 67 | projectsFilter = kingpin.Flag( 68 | "google.projects.filter", "Google projects search filter.", 69 | ).String() 70 | 71 | stackdriverMaxRetries = kingpin.Flag( 72 | "stackdriver.max-retries", "Max number of retries that should be attempted on 503 errors from stackdriver.", 73 | ).Default("0").Int() 74 | 75 | stackdriverHttpTimeout = kingpin.Flag( 76 | "stackdriver.http-timeout", "How long should stackdriver_exporter wait for a result from the Stackdriver API.", 77 | ).Default("10s").Duration() 78 | 79 | stackdriverMaxBackoffDuration = kingpin.Flag( 80 | "stackdriver.max-backoff", "Max time between each request in an exp backoff scenario.", 81 | ).Default("5s").Duration() 82 | 83 | stackdriverBackoffJitterBase = kingpin.Flag( 84 | "stackdriver.backoff-jitter", "The amount of jitter to introduce in a exp backoff scenario.", 85 | ).Default("1s").Duration() 86 | 87 | stackdriverRetryStatuses = kingpin.Flag( 88 | "stackdriver.retry-statuses", "The HTTP statuses that should trigger a retry.", 89 | ).Default("503").Ints() 90 | 91 | // Monitoring collector flags 92 | monitoringMetricsTypePrefixes = kingpin.Flag( 93 | "monitoring.metrics-type-prefixes", "DEPRECATED - Comma separated Google Stackdriver Monitoring Metric Type prefixes. Use 'monitoring.metrics-prefixes' instead.", 94 | ).String() 95 | 96 | monitoringMetricsPrefixes = kingpin.Flag( 97 | "monitoring.metrics-prefixes", "Google Stackdriver Monitoring Metric Type prefixes. Repeat this flag to scrape multiple prefixes.", 98 | ).Strings() 99 | 100 | monitoringMetricsInterval = kingpin.Flag( 101 | "monitoring.metrics-interval", "Interval to request the Google Stackdriver Monitoring Metrics for. Only the most recent data point is used.", 102 | ).Default("5m").Duration() 103 | 104 | monitoringMetricsOffset = kingpin.Flag( 105 | "monitoring.metrics-offset", "Offset for the Google Stackdriver Monitoring Metrics interval into the past.", 106 | ).Default("0s").Duration() 107 | 108 | monitoringMetricsIngestDelay = kingpin.Flag( 109 | "monitoring.metrics-ingest-delay", "Offset for the Google Stackdriver Monitoring Metrics interval into the past by the ingest delay from the metric's metadata.", 110 | ).Default("false").Bool() 111 | 112 | collectorFillMissingLabels = kingpin.Flag( 113 | "collector.fill-missing-labels", "Fill missing metrics labels with empty string to avoid label dimensions inconsistent failure.", 114 | ).Default("true").Bool() 115 | 116 | monitoringDropDelegatedProjects = kingpin.Flag( 117 | "monitoring.drop-delegated-projects", "Drop metrics from attached projects and fetch `project_id` only.", 118 | ).Default("false").Bool() 119 | 120 | monitoringMetricsExtraFilter = kingpin.Flag( 121 | "monitoring.filters", 122 | "Filters. i.e: pubsub.googleapis.com/subscription:resource.labels.subscription_id=monitoring.regex.full_match(\"my-subs-prefix.*\")", 123 | ).Strings() 124 | 125 | monitoringMetricsAggregateDeltas = kingpin.Flag( 126 | "monitoring.aggregate-deltas", "If enabled will treat all DELTA metrics as an in-memory counter instead of a gauge", 127 | ).Default("false").Bool() 128 | 129 | monitoringMetricsDeltasTTL = kingpin.Flag( 130 | "monitoring.aggregate-deltas-ttl", "How long should a delta metric continue to be exported after GCP stops producing a metric", 131 | ).Default("30m").Duration() 132 | 133 | monitoringDescriptorCacheTTL = kingpin.Flag( 134 | "monitoring.descriptor-cache-ttl", "How long should the metric descriptors for a prefixed be cached for", 135 | ).Default("0s").Duration() 136 | 137 | monitoringDescriptorCacheOnlyGoogle = kingpin.Flag( 138 | "monitoring.descriptor-cache-only-google", "Only cache descriptors for *.googleapis.com metrics", 139 | ).Default("true").Bool() 140 | ) 141 | 142 | func init() { 143 | prometheus.MustRegister(versioncollector.NewCollector("stackdriver_exporter")) 144 | } 145 | 146 | func getDefaultGCPProject(ctx context.Context) (*string, error) { 147 | credentials, err := google.FindDefaultCredentials(ctx, compute.ComputeScope) 148 | if err != nil { 149 | return nil, err 150 | } 151 | if credentials.ProjectID == "" { 152 | return nil, fmt.Errorf("unable to identify the gcloud project. Got empty string") 153 | } 154 | return &credentials.ProjectID, nil 155 | } 156 | 157 | func createMonitoringService(ctx context.Context) (*monitoring.Service, error) { 158 | googleClient, err := google.DefaultClient(ctx, monitoring.MonitoringReadScope) 159 | if err != nil { 160 | return nil, fmt.Errorf("Error creating Google client: %v", err) 161 | } 162 | 163 | googleClient.Timeout = *stackdriverHttpTimeout 164 | googleClient.Transport = rehttp.NewTransport( 165 | googleClient.Transport, // need to wrap DefaultClient transport 166 | rehttp.RetryAll( 167 | rehttp.RetryMaxRetries(*stackdriverMaxRetries), 168 | rehttp.RetryStatuses(*stackdriverRetryStatuses...)), // Cloud support suggests retrying on 503 errors 169 | rehttp.ExpJitterDelay(*stackdriverBackoffJitterBase, *stackdriverMaxBackoffDuration), // Set timeout to <10s as that is prom default timeout 170 | ) 171 | 172 | monitoringService, err := monitoring.NewService(ctx, option.WithHTTPClient(googleClient)) 173 | if err != nil { 174 | return nil, fmt.Errorf("Error creating Google Stackdriver Monitoring service: %v", err) 175 | } 176 | 177 | return monitoringService, nil 178 | } 179 | 180 | type handler struct { 181 | handler http.Handler 182 | logger *slog.Logger 183 | 184 | projectIDs []string 185 | metricsPrefixes []string 186 | metricsExtraFilters []collectors.MetricFilter 187 | additionalGatherer prometheus.Gatherer 188 | m *monitoring.Service 189 | collectors *collectors.CollectorCache 190 | } 191 | 192 | func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 193 | collectParams := r.URL.Query()["collect"] 194 | filters := make(map[string]bool) 195 | for _, param := range collectParams { 196 | filters[param] = true 197 | } 198 | 199 | if len(filters) > 0 { 200 | h.innerHandler(filters).ServeHTTP(w, r) 201 | return 202 | } 203 | 204 | h.handler.ServeHTTP(w, r) 205 | } 206 | 207 | func newHandler(projectIDs []string, metricPrefixes []string, metricExtraFilters []collectors.MetricFilter, m *monitoring.Service, logger *slog.Logger, additionalGatherer prometheus.Gatherer) *handler { 208 | var ttl time.Duration 209 | // Add collector caching TTL as max of deltas aggregation or descriptor caching 210 | if *monitoringMetricsAggregateDeltas || *monitoringDescriptorCacheTTL > 0 { 211 | ttl = *monitoringMetricsDeltasTTL 212 | if *monitoringDescriptorCacheTTL > ttl { 213 | ttl = *monitoringDescriptorCacheTTL 214 | } 215 | } else { 216 | ttl = 2 * time.Hour 217 | } 218 | 219 | logger.Info("Creating collector cache", "ttl", ttl) 220 | 221 | h := &handler{ 222 | logger: logger, 223 | projectIDs: projectIDs, 224 | metricsPrefixes: metricPrefixes, 225 | metricsExtraFilters: metricExtraFilters, 226 | additionalGatherer: additionalGatherer, 227 | m: m, 228 | collectors: collectors.NewCollectorCache(ttl), 229 | } 230 | 231 | h.handler = h.innerHandler(nil) 232 | return h 233 | } 234 | 235 | func (h *handler) getCollector(project string, filters map[string]bool) (*collectors.MonitoringCollector, error) { 236 | filterdPrefixes := h.filterMetricTypePrefixes(filters) 237 | collectorKey := fmt.Sprintf("%s-%v", project, filterdPrefixes) 238 | 239 | if collector, found := h.collectors.Get(collectorKey); found { 240 | return collector, nil 241 | } 242 | 243 | collector, err := collectors.NewMonitoringCollector(project, h.m, collectors.MonitoringCollectorOptions{ 244 | MetricTypePrefixes: filterdPrefixes, 245 | ExtraFilters: h.metricsExtraFilters, 246 | RequestInterval: *monitoringMetricsInterval, 247 | RequestOffset: *monitoringMetricsOffset, 248 | IngestDelay: *monitoringMetricsIngestDelay, 249 | FillMissingLabels: *collectorFillMissingLabels, 250 | DropDelegatedProjects: *monitoringDropDelegatedProjects, 251 | AggregateDeltas: *monitoringMetricsAggregateDeltas, 252 | DescriptorCacheTTL: *monitoringDescriptorCacheTTL, 253 | DescriptorCacheOnlyGoogle: *monitoringDescriptorCacheOnlyGoogle, 254 | }, h.logger, delta.NewInMemoryCounterStore(h.logger, *monitoringMetricsDeltasTTL), delta.NewInMemoryHistogramStore(h.logger, *monitoringMetricsDeltasTTL)) 255 | if err != nil { 256 | return nil, err 257 | } 258 | h.collectors.Store(collectorKey, collector) 259 | return collector, nil 260 | } 261 | 262 | func (h *handler) innerHandler(filters map[string]bool) http.Handler { 263 | registry := prometheus.NewRegistry() 264 | 265 | for _, project := range h.projectIDs { 266 | monitoringCollector, err := h.getCollector(project, filters) 267 | if err != nil { 268 | h.logger.Error("error creating monitoring collector", "err", err) 269 | os.Exit(1) 270 | } 271 | registry.MustRegister(monitoringCollector) 272 | } 273 | var gatherers prometheus.Gatherer = registry 274 | if h.additionalGatherer != nil { 275 | gatherers = prometheus.Gatherers{ 276 | h.additionalGatherer, 277 | registry, 278 | } 279 | } 280 | opts := promhttp.HandlerOpts{ErrorLog: slog.NewLogLogger(h.logger.Handler(), slog.LevelError)} 281 | // Delegate http serving to Prometheus client library, which will call collector.Collect. 282 | return promhttp.HandlerFor(gatherers, opts) 283 | } 284 | 285 | // filterMetricTypePrefixes filters the initial list of metric type prefixes, with the ones coming from an individual 286 | // prometheus collect request. 287 | func (h *handler) filterMetricTypePrefixes(filters map[string]bool) []string { 288 | filteredPrefixes := h.metricsPrefixes 289 | if len(filters) > 0 { 290 | filteredPrefixes = nil 291 | for _, prefix := range h.metricsPrefixes { 292 | for filter := range filters { 293 | if strings.HasPrefix(filter, prefix) { 294 | filteredPrefixes = append(filteredPrefixes, filter) 295 | } 296 | } 297 | } 298 | } 299 | return parseMetricTypePrefixes(filteredPrefixes) 300 | } 301 | 302 | func main() { 303 | promslogConfig := &promslog.Config{} 304 | flag.AddFlags(kingpin.CommandLine, promslogConfig) 305 | 306 | kingpin.Version(version.Print("stackdriver_exporter")) 307 | kingpin.HelpFlag.Short('h') 308 | kingpin.Parse() 309 | 310 | logger := promslog.New(promslogConfig) 311 | if *projectID != "" { 312 | logger.Warn("The google.project-id flag is deprecated and will be replaced by google.project-ids.") 313 | } 314 | if *monitoringMetricsTypePrefixes != "" { 315 | logger.Warn("The monitoring.metrics-type-prefixes flag is deprecated and will be replaced by monitoring.metrics-prefix.") 316 | } 317 | if *monitoringMetricsTypePrefixes == "" && len(*monitoringMetricsPrefixes) == 0 { 318 | logger.Error("At least one GCP monitoring prefix is required.") 319 | os.Exit(1) 320 | } 321 | 322 | ctx := context.Background() 323 | var discoveredProjectIDs []string 324 | 325 | if len(*projectIDs) == 0 && *projectID == "" && *projectsFilter == "" { 326 | logger.Info("Neither projectIDs nor projectsFilter was provided. Trying to discover it") 327 | var err error 328 | defaultProject, err := getDefaultGCPProject(ctx) 329 | if err != nil { 330 | logger.Error("no explicit projectIDs and error trying to discover default GCloud project", "err", err) 331 | os.Exit(1) 332 | } 333 | discoveredProjectIDs = append(discoveredProjectIDs, *defaultProject) 334 | } 335 | 336 | monitoringService, err := createMonitoringService(ctx) 337 | if err != nil { 338 | logger.Error("failed to create monitoring service", "err", err) 339 | os.Exit(1) 340 | } 341 | 342 | if *projectsFilter != "" { 343 | projectIDsFromFilter, err := utils.GetProjectIDsFromFilter(ctx, *projectsFilter) 344 | if err != nil { 345 | logger.Error("failed to get project IDs from filter", "err", err) 346 | os.Exit(1) 347 | } 348 | discoveredProjectIDs = append(discoveredProjectIDs, projectIDsFromFilter...) 349 | } 350 | 351 | if len(*projectIDs) > 0 { 352 | discoveredProjectIDs = append(discoveredProjectIDs, *projectIDs...) 353 | } 354 | if *projectID != "" { 355 | discoveredProjectIDs = append(discoveredProjectIDs, strings.Split(*projectID, ",")...) 356 | } 357 | 358 | var metricsPrefixes []string 359 | if len(*monitoringMetricsPrefixes) > 0 { 360 | metricsPrefixes = append(metricsPrefixes, *monitoringMetricsPrefixes...) 361 | } 362 | if *monitoringMetricsTypePrefixes != "" { 363 | metricsPrefixes = append(metricsPrefixes, strings.Split(*monitoringMetricsTypePrefixes, ",")...) 364 | } 365 | 366 | logger.Info( 367 | "Starting stackdriver_exporter", 368 | "version", version.Info(), 369 | "build_context", version.BuildContext(), 370 | "metric_prefixes", fmt.Sprintf("%v", metricsPrefixes), 371 | "extra_filters", strings.Join(*monitoringMetricsExtraFilter, ","), 372 | "projectIDs", fmt.Sprintf("%v", discoveredProjectIDs), 373 | "projectsFilter", *projectsFilter, 374 | ) 375 | 376 | parsedMetricsPrefixes := parseMetricTypePrefixes(metricsPrefixes) 377 | metricExtraFilters := parseMetricExtraFilters() 378 | // drop duplicate projects 379 | slices.Sort(discoveredProjectIDs) 380 | uniqueProjectIds := slices.Compact(discoveredProjectIDs) 381 | 382 | if *metricsPath == *stackdriverMetricsPath { 383 | handler := newHandler( 384 | uniqueProjectIds, parsedMetricsPrefixes, metricExtraFilters, monitoringService, logger, prometheus.DefaultGatherer) 385 | http.Handle(*metricsPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, handler)) 386 | } else { 387 | logger.Info("Serving Stackdriver metrics at separate path", "path", *stackdriverMetricsPath) 388 | handler := newHandler( 389 | uniqueProjectIds, parsedMetricsPrefixes, metricExtraFilters, monitoringService, logger, nil) 390 | http.Handle(*stackdriverMetricsPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, handler)) 391 | http.Handle(*metricsPath, promhttp.Handler()) 392 | } 393 | 394 | if *metricsPath != "/" && *metricsPath != "" { 395 | landingConfig := web.LandingConfig{ 396 | Name: "Stackdriver Exporter", 397 | Description: "Prometheus Exporter for Google Stackdriver", 398 | Version: version.Info(), 399 | Links: []web.LandingLinks{ 400 | { 401 | Address: *metricsPath, 402 | Text: "Metrics", 403 | }, 404 | }, 405 | } 406 | if *metricsPath != *stackdriverMetricsPath { 407 | landingConfig.Links = append(landingConfig.Links, 408 | web.LandingLinks{ 409 | Address: *stackdriverMetricsPath, 410 | Text: "Stackdriver Metrics", 411 | }, 412 | ) 413 | } 414 | landingPage, err := web.NewLandingPage(landingConfig) 415 | if err != nil { 416 | logger.Error("error creating landing page", "err", err) 417 | os.Exit(1) 418 | } 419 | http.Handle("/", landingPage) 420 | } 421 | 422 | srv := &http.Server{} 423 | if err := web.ListenAndServe(srv, toolkitFlags, logger); err != nil { 424 | logger.Error("Error starting server", "err", err) 425 | os.Exit(1) 426 | } 427 | } 428 | 429 | func parseMetricTypePrefixes(inputPrefixes []string) []string { 430 | metricTypePrefixes := []string{} 431 | 432 | // Drop duplicate prefixes. 433 | slices.Sort(inputPrefixes) 434 | uniquePrefixes := slices.Compact(inputPrefixes) 435 | 436 | // Drop prefixes that start with another existing prefix to avoid error: 437 | // "collected metric xxx was collected before with the same name and label values". 438 | for i, prefix := range uniquePrefixes { 439 | if i != 0 { 440 | previousIndex := len(metricTypePrefixes) - 1 441 | 442 | // Drop current prefix if it starts with the previous one. 443 | if strings.HasPrefix(prefix, metricTypePrefixes[previousIndex]) { 444 | continue 445 | } 446 | } 447 | metricTypePrefixes = append(metricTypePrefixes, prefix) 448 | } 449 | 450 | return metricTypePrefixes 451 | } 452 | 453 | func parseMetricExtraFilters() []collectors.MetricFilter { 454 | var extraFilters []collectors.MetricFilter 455 | for _, ef := range *monitoringMetricsExtraFilter { 456 | targetedMetricPrefix, filterQuery := utils.SplitExtraFilter(ef, ":") 457 | if targetedMetricPrefix != "" { 458 | extraFilter := collectors.MetricFilter{ 459 | TargetedMetricPrefix: strings.ToLower(targetedMetricPrefix), 460 | FilterQuery: filterQuery, 461 | } 462 | extraFilters = append(extraFilters, extraFilter) 463 | } 464 | } 465 | return extraFilters 466 | } 467 | -------------------------------------------------------------------------------- /stackdriver_exporter_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2024 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import "testing" 17 | import "reflect" 18 | 19 | func TestParseMetricTypePrefixes(t *testing.T) { 20 | inputPrefixes := []string{ 21 | "redis.googleapis.com/stats/memory/usage", 22 | "loadbalancing.googleapis.com/https/request_count", 23 | "loadbalancing.googleapis.com", 24 | "redis.googleapis.com/stats/memory/usage_ratio", 25 | "redis.googleapis.com/stats/memory/usage_ratio", 26 | } 27 | expectedOutputPrefixes := []string{ 28 | "loadbalancing.googleapis.com", 29 | "redis.googleapis.com/stats/memory/usage", 30 | } 31 | 32 | outputPrefixes := parseMetricTypePrefixes(inputPrefixes) 33 | 34 | if !reflect.DeepEqual(outputPrefixes, expectedOutputPrefixes) { 35 | t.Errorf("Metric type prefix sanitization did not produce expected output. Expected:\n%s\nGot:\n%s", expectedOutputPrefixes, outputPrefixes) 36 | } 37 | } 38 | 39 | func TestFilterMetricTypePrefixes(t *testing.T) { 40 | metricPrefixes := []string{ 41 | "redis.googleapis.com/stats/", 42 | } 43 | 44 | h := &handler{ 45 | metricsPrefixes: metricPrefixes, 46 | } 47 | 48 | inputFilters := map[string]bool{ 49 | "redis.googleapis.com/stats/memory/usage": true, 50 | "redis.googleapis.com/stats/memory/usage_ratio": true, 51 | "redis.googleapis.com": true, 52 | } 53 | 54 | expectedOutputPrefixes := []string{ 55 | "redis.googleapis.com/stats/memory/usage", 56 | } 57 | 58 | outputPrefixes := h.filterMetricTypePrefixes(inputFilters) 59 | 60 | if !reflect.DeepEqual(outputPrefixes, expectedOutputPrefixes) { 61 | t.Errorf("filterMetricTypePrefixes did not produce expected output. Expected:\n%s\nGot:\n%s", expectedOutputPrefixes, outputPrefixes) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /utils/utils.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package utils 15 | 16 | import ( 17 | "context" 18 | "regexp" 19 | "strings" 20 | 21 | "github.com/fatih/camelcase" 22 | "google.golang.org/api/cloudresourcemanager/v1" 23 | ) 24 | 25 | var ( 26 | safeNameRE = regexp.MustCompile(`[^a-zA-Z0-9_]*$`) 27 | ) 28 | 29 | func NormalizeMetricName(metricName string) string { 30 | var normalizedMetricName []string 31 | 32 | words := camelcase.Split(metricName) 33 | for _, word := range words { 34 | safeWord := strings.Trim(safeNameRE.ReplaceAllLiteralString(word, "_"), "_") 35 | lowerWord := strings.TrimSpace(strings.ToLower(safeWord)) 36 | if lowerWord != "" { 37 | normalizedMetricName = append(normalizedMetricName, lowerWord) 38 | } 39 | } 40 | 41 | return strings.Join(normalizedMetricName, "_") 42 | } 43 | 44 | func SplitExtraFilter(extraFilter string, separator string) (string, string) { 45 | mPrefix := strings.Split(extraFilter, separator) 46 | if mPrefix[0] == extraFilter { 47 | return "", "" 48 | } 49 | return mPrefix[0], strings.Join(mPrefix[1:], "") 50 | } 51 | 52 | func ProjectResource(projectID string) string { 53 | return "projects/" + projectID 54 | } 55 | 56 | // GetProjectIDsFromFilter returns a list of project IDs from a Google Cloud organization using a filter. 57 | func GetProjectIDsFromFilter(ctx context.Context, filter string) ([]string, error) { 58 | var projectIDs []string 59 | 60 | service, err := cloudresourcemanager.NewService(ctx) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | projects := service.Projects.List().Filter(filter) 66 | if err := projects.Pages(context.Background(), func(page *cloudresourcemanager.ListProjectsResponse) error { 67 | for _, project := range page.Projects { 68 | projectIDs = append(projectIDs, project.ProjectId) 69 | } 70 | return nil 71 | }); err != nil { 72 | return nil, err 73 | } 74 | 75 | return projectIDs, nil 76 | } 77 | -------------------------------------------------------------------------------- /utils/utils_suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package utils_test 15 | 16 | import ( 17 | . "github.com/onsi/ginkgo" 18 | . "github.com/onsi/gomega" 19 | 20 | "testing" 21 | ) 22 | 23 | func TestUtils(t *testing.T) { 24 | RegisterFailHandler(Fail) 25 | RunSpecs(t, "Utils Suite") 26 | } 27 | -------------------------------------------------------------------------------- /utils/utils_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package utils_test 15 | 16 | import ( 17 | . "github.com/onsi/ginkgo" 18 | . "github.com/onsi/gomega" 19 | 20 | . "github.com/prometheus-community/stackdriver_exporter/utils" 21 | ) 22 | 23 | var _ = Describe("NormalizeMetricName", func() { 24 | It("returns a normalized metric name", func() { 25 | Expect(NormalizeMetricName("This_is__a-MetricName.Example/with:0totals")).To(Equal("this_is_a_metric_name_example_with_0_totals")) 26 | }) 27 | }) 28 | 29 | var _ = Describe("ProjectResource", func() { 30 | It("returns a project resource", func() { 31 | Expect(ProjectResource("fake-project-1")).To(Equal("projects/fake-project-1")) 32 | }) 33 | }) 34 | --------------------------------------------------------------------------------