├── .github └── workflows │ ├── ci.yml │ └── linting.yml ├── .gitignore ├── .rubocop ├── .rubocop.yml ├── .streerc ├── Appraisals ├── CHANGELOG ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── Gemfile ├── Guardfile ├── LICENSE.txt ├── README.md ├── Rakefile ├── bench └── bench.rb ├── bin └── prometheus_exporter ├── examples └── custom_collector.rb ├── gemfiles ├── .bundle │ └── config ├── ar_60.gemfile ├── ar_61.gemfile ├── ar_70.gemfile └── ar_71.gemfile ├── lib ├── prometheus_exporter.rb └── prometheus_exporter │ ├── client.rb │ ├── instrumentation.rb │ ├── instrumentation │ ├── active_record.rb │ ├── delayed_job.rb │ ├── good_job.rb │ ├── hutch.rb │ ├── method_profiler.rb │ ├── periodic_stats.rb │ ├── process.rb │ ├── puma.rb │ ├── resque.rb │ ├── shoryuken.rb │ ├── sidekiq.rb │ ├── sidekiq_process.rb │ ├── sidekiq_queue.rb │ ├── sidekiq_stats.rb │ └── unicorn.rb │ ├── metric.rb │ ├── metric │ ├── base.rb │ ├── counter.rb │ ├── gauge.rb │ ├── histogram.rb │ └── summary.rb │ ├── middleware.rb │ ├── server.rb │ ├── server │ ├── active_record_collector.rb │ ├── collector.rb │ ├── collector_base.rb │ ├── delayed_job_collector.rb │ ├── good_job_collector.rb │ ├── hutch_collector.rb │ ├── metrics_container.rb │ ├── process_collector.rb │ ├── puma_collector.rb │ ├── resque_collector.rb │ ├── runner.rb │ ├── shoryuken_collector.rb │ ├── sidekiq_collector.rb │ ├── sidekiq_process_collector.rb │ ├── sidekiq_queue_collector.rb │ ├── sidekiq_stats_collector.rb │ ├── type_collector.rb │ ├── unicorn_collector.rb │ ├── web_collector.rb │ └── web_server.rb │ └── version.rb ├── prometheus_exporter.gemspec └── test ├── client_test.rb ├── custom_type_collector.rb ├── instrumentation ├── active_record_test.rb └── method_profiler_test.rb ├── metric ├── base_test.rb ├── counter_test.rb ├── gauge_test.rb ├── histogram_test.rb └── summary_test.rb ├── middleware_test.rb ├── prometheus_exporter_test.rb ├── server ├── active_record_collector_test.rb ├── collector_test.rb ├── good_job_collector_test.rb ├── metrics_container_test.rb ├── process_collector_test.rb ├── puma_collector_test.rb ├── resque_collector_test.rb ├── runner_test.rb ├── sidekiq_process_collector_test.rb ├── sidekiq_queue_collector_test.rb ├── sidekiq_stats_collector_test.rb ├── unicorn_collector_test.rb ├── web_collector_test.rb └── web_server_test.rb ├── sidekiq_middleware_test.rb └── test_helper.rb /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | schedule: 9 | - cron: "0 0 * * 0" # weekly 10 | 11 | permissions: 12 | contents: write 13 | packages: write 14 | 15 | env: 16 | DOCKER_REPO: ghcr.io/discourse/prometheus_exporter 17 | 18 | jobs: 19 | build: 20 | runs-on: ubuntu-latest 21 | name: Ruby ${{ matrix.ruby }} AR ${{ matrix.activerecord }} 22 | timeout-minutes: 10 23 | 24 | env: 25 | BUNDLE_GEMFILE: ${{ github.workspace }}/gemfiles/ar_${{ matrix.activerecord }}.gemfile 26 | 27 | strategy: 28 | fail-fast: false 29 | matrix: 30 | ruby: ["3.1", "3.2", "3.3"] 31 | activerecord: [61, 70, 71] 32 | 33 | steps: 34 | - uses: actions/checkout@v4 35 | 36 | - uses: ruby/setup-ruby@v1 37 | with: 38 | ruby-version: ${{ matrix.ruby }} 39 | bundler: latest 40 | bundler-cache: true 41 | 42 | - name: Run tests 43 | run: bundle exec rake 44 | 45 | publish: 46 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 47 | needs: build 48 | runs-on: ubuntu-latest 49 | 50 | outputs: 51 | new_version_published: ${{ steps.release.outputs.new_version }} 52 | 53 | steps: 54 | - uses: actions/checkout@v4 55 | 56 | - name: Release gem 57 | id: release 58 | uses: discourse/publish-rubygems-action@v2 59 | env: 60 | RUBYGEMS_API_KEY: ${{ secrets.RUBYGEMS_API_KEY }} 61 | GIT_EMAIL: team@discourse.org 62 | GIT_NAME: discoursebot 63 | 64 | publish_docker: 65 | needs: publish 66 | if: needs.publish.outputs.new_version_published == 'true' 67 | runs-on: ubuntu-latest 68 | timeout-minutes: 20 69 | 70 | steps: 71 | - uses: actions/checkout@v4 72 | - uses: docker/setup-qemu-action@v2 73 | - uses: docker/setup-buildx-action@v2 74 | 75 | - name: Set vars 76 | id: vars 77 | run: | 78 | ruby -r ./lib/prometheus_exporter/version.rb -e 'print "version=#{PrometheusExporter::VERSION}"' >> $GITHUB_OUTPUT 79 | 80 | - name: Login to Github Container Registry 81 | uses: docker/login-action@v2 82 | with: 83 | registry: ghcr.io 84 | username: ${{ github.actor }} 85 | password: ${{ secrets.GITHUB_TOKEN }} 86 | 87 | - name: Build and push images 88 | uses: docker/build-push-action@v3 89 | with: 90 | context: . 91 | push: true 92 | platforms: linux/amd64,linux/arm64 93 | build-args: | 94 | GEM_VERSION=${{ steps.vars.outputs.version }} 95 | tags: | 96 | ${{ env.DOCKER_REPO }}:${{ steps.vars.outputs.version }} 97 | ${{ env.DOCKER_REPO }}:latest 98 | -------------------------------------------------------------------------------- /.github/workflows/linting.yml: -------------------------------------------------------------------------------- 1 | name: Linting 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | lint: 11 | runs-on: ubuntu-latest 12 | timeout-minutes: 10 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | 17 | - name: Set up Ruby 18 | uses: ruby/setup-ruby@v1 19 | with: 20 | ruby-version: 3.4 21 | bundler: latest 22 | bundler-cache: true 23 | 24 | - name: Rubocop 25 | run: bundle exec rubocop 26 | 27 | - name: Syntax tree 28 | run: bundle exec stree check Gemfile $(git ls-files '*.rb') $(git ls-files '*.rake') $(git ls-files '*.thor') 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.bundle/ 2 | /.yardoc 3 | /_yardoc/ 4 | /coverage/ 5 | /doc/ 6 | /pkg/ 7 | /spec/reports/ 8 | /tmp/ 9 | Gemfile.lock 10 | /gemfiles/*.gemfile.lock 11 | 12 | 13 | .rubocop-https---raw-githubusercontent-com-discourse-discourse-master--rubocop-yml 14 | -------------------------------------------------------------------------------- /.rubocop: -------------------------------------------------------------------------------- 1 | --ignore-unrecognized-cops 2 | -------------------------------------------------------------------------------- /.rubocop.yml: -------------------------------------------------------------------------------- 1 | inherit_gem: 2 | rubocop-discourse: default.yml 3 | 4 | AllCops: 5 | Exclude: 6 | - 'gemfiles/**/*' 7 | - 'vendor/**/*' 8 | 9 | Discourse/Plugins/NoMonkeyPatching: 10 | Enabled: false 11 | 12 | Discourse/Plugins/NamespaceMethods: 13 | Exclude: 14 | - bin/prometheus_exporter 15 | 16 | Style/InvertibleUnlessCondition: 17 | Exclude: 18 | - '*.gemspec' 19 | -------------------------------------------------------------------------------- /.streerc: -------------------------------------------------------------------------------- 1 | --print-width=100 2 | --plugins=plugin/trailing_comma,disable_ternary 3 | -------------------------------------------------------------------------------- /Appraisals: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | appraise "ar-60" do 4 | gem "activerecord", "~> 6.0.0" 5 | end 6 | 7 | appraise "ar-61" do 8 | gem "activerecord", "~> 6.1.1" 9 | end 10 | 11 | appraise "ar-70" do 12 | # latest version 13 | gem "activerecord", "~> 7.1.2" 14 | end 15 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | - FEATURE: Added puma_busy_threads metric that provides a holistic view of server workload by calculating (active threads - idle threads) + queued requests 11 | 12 | ## [2.2.0] - 2024-12-05 13 | 14 | ### Added 15 | 16 | - Feature: Add Dalli::Client memcache metrics for web_collector 17 | 18 | ### Fixed 19 | 20 | - FIX: Ensure socket is closed when error is raised while opening socket 21 | 22 | ## [2.1.1] - 2024-06-19 23 | 24 | ### Added 25 | 26 | - FEATURE: improve good_job instrumentation 27 | - FEATURE: imstrumentation for malloc / oldmalloc increace in GC stats 28 | 29 | ### Fixed 30 | 31 | - FIX: improve Ruby 3.X support 32 | 33 | ## [2.1.0] - 2024-01-08 34 | 35 | ### Added 36 | 37 | - FEATURE: good_job instrumentation 38 | 39 | ### Changed 40 | 41 | - PERF: improve performance of histogram 42 | - DEV: use new metric collector pattern so we reuse code between collectors 43 | 44 | ## [2.0.8] - 2023-01-20 45 | 46 | ### Added 47 | 48 | - FEATURE: attempting to make our first docker release 49 | 50 | ## [2.0.7] - 2023-01-13 51 | 52 | ### Added 53 | - FEATURE: allow binding server to both ipv4 and v6 54 | 55 | ### Fixed 56 | 57 | - FIX: expire stale sidekiq metrics 58 | 59 | 60 | ## [2.0.6] - 2022-11-22 61 | 62 | ### Fixed 63 | 64 | - FIX: use user specified labels over default in merge conflict 65 | - FIX: sidekiq stats collector memory leak 66 | 67 | ## [2.0.5] - 2022-11-15 68 | 69 | ### Fixed 70 | 71 | - FIX: regression :prepend style instrumentation not working correctly 72 | 73 | ## [2.0.4] - 2022-11-10 74 | 75 | ### Fixed 76 | 77 | - FIX support for Redis 5 gem instrumentation 78 | 79 | ## [2.0.3] - 2022-05-23 80 | 81 | ### Added 82 | 83 | - FEATURE: new ping endpoint for keepalive checks 84 | 85 | ### Fixed 86 | 87 | - FIX: order histogram correctly for GCP support 88 | - FIX: improve sidekiq instrumentation 89 | 90 | ## [2.0.2] - 2022-02-25 91 | 92 | ### Fixed 93 | 94 | - FIX: runner was not requiring unicorn integration correctly leading to a crash 95 | 96 | ## [2.0.1] - 2022-02-24 97 | 98 | ### Fixed 99 | 100 | - FIX: ensure threads do not leak when calling #start repeatedly on instrumentation classes, this is an urgent patch for Puma integration 101 | 102 | ## [2.0.0] - 2022-02-18 103 | 104 | ### Added 105 | 106 | - FEATURE: Add per worker custom labels 107 | - FEATURE: support custom histogram buckets 108 | 109 | ### Fixed 110 | 111 | - FIX: all metrics are exposing status label, and not only `http_requests_total` 112 | 113 | ### Changed 114 | 115 | - BREAKING: rename all `http_duration` metrics to `http_request_duration` to match prometheus official naming conventions (See https://prometheus.io/docs/practices/naming/#metric-names). 116 | 117 | ## [1.0.1] - 2021-12-22 118 | 119 | ### Added 120 | 121 | - FEATURE: add labels to preflight requests 122 | - FEATURE: SidekiqStats metrics 123 | 124 | ### Fixed 125 | 126 | - FIX: mintor refactors to Sidekiq metrics 127 | 128 | ## [1.0.0] - 2021-11-23 129 | 130 | ### Added 131 | 132 | - FEATURE: Sidekiq process metrics 133 | - FEATURE: Allow collecting web metrics as histograms 134 | 135 | ### Fixed 136 | 137 | - FIX: logger improved for web server 138 | - FIX: Remove job labels from DelayedJob queues 139 | 140 | ### Changed 141 | 142 | - BREAKING: rename metrics to match prometheus official naming conventions (See https://prometheus.io/docs/practices/naming/#metric-names) 143 | 144 | 145 | ## [0.8.1] - 2021-08-04 146 | 147 | ### Added 148 | 149 | - FEATURE: swap from hardcoded STDERR to logger pattern (see README for details) 150 | 151 | ## [0.8.0] - 2021-07-05 152 | 153 | ### Added 154 | 155 | - FEATURE: add job_name and queue_name labels to delayed job metrics 156 | - FEATURE: always scope puma metrics on hostname in collector 157 | - FEATURE: add customizable labels option to puma collector 158 | - FEATURE: support for Resque 159 | - FEATURE: Improve Active Record instrumentation 160 | - FEATURE: Support HTTP_X_AMZN_TRACE_ID when supplied 161 | 162 | ### Fixed 163 | 164 | - FIX: handle ThreadError more gracefully in cases where process shuts down 165 | - FIX: Add source location to MethodProfiler patches 166 | 167 | ### Removed 168 | 169 | - DEV: Remove support for EOL ruby 2.5 170 | 171 | ## [0.7.0] - 2020-12-29 172 | 173 | ### Added 174 | 175 | - FEATURE: clean pattern for overriding middleware labels was introduced (in README) 176 | 177 | ### Fixed 178 | 179 | - Fix: Better support for forking 180 | 181 | ### Changed 182 | 183 | - Dev: Removed support from EOL rubies, only 2.5, 2.6, 2.7 and 3.0 are supported now. 184 | - Dev: Better support for Ruby 3.0, explicitly depending on webrick 185 | - Dev: Rails 6.1 instrumentation support 186 | 187 | ## [0.6.0] - 2020-11-17 188 | 189 | ### Added 190 | 191 | - FEATURE: add support for basic-auth in the prometheus_exporter web server 192 | 193 | ## [0.5.3] - 2020-07-29 194 | 195 | ### Added 196 | 197 | - FEATURE: added #remove to all metric types so users can remove specific labels if needed 198 | 199 | ## [0.5.2] - 2020-07-01 200 | 201 | ### Added 202 | 203 | - FEATURE: expanded instrumentation for sidekiq 204 | - FEATURE: configurable default labels 205 | 206 | ## [0.5.1] - 2020-02-25 207 | 208 | ### Added 209 | 210 | - FEATURE: Allow configuring the default client's host and port via environment variables 211 | 212 | ## [0.5.0] - 2020-02-14 213 | 214 | ### Fixed 215 | 216 | - FIX: Avoid calling `hostname` aggressively, instead cache it on the exporter instance 217 | 218 | ### Changed 219 | 220 | - Breaking change: listen only to localhost by default to prevent unintended insecure configuration 221 | 222 | ## [0.4.17] - 2020-01-13 223 | 224 | ### Added 225 | 226 | - FEATURE: add support for `to_h` on all metrics which can be used to query existing key/values 227 | 228 | ## [0.4.16] - 2019-11-04 229 | 230 | ### Added 231 | 232 | - FEATURE: Support #reset! on all metric types to reset a metric to default 233 | 234 | ## [0.4.15] - 2019-11-04 235 | 236 | ### Added 237 | 238 | - FEATURE: Improve delayed job collector, add pending counts 239 | - FEATURE: New ActiveRecord collector (documented in readme) 240 | - FEATURE: Allow passing in histogram and summary options 241 | - FEATURE: Allow custom labels for unicorn collector 242 | 243 | ## [0.4.14] - 2019-09-10 244 | 245 | ### Added 246 | 247 | - FEATURE: allow finding metrics by name RemoteMetric #find_registered_metric 248 | 249 | ### Fixed 250 | 251 | - FIX: guard socket closing 252 | 253 | ## [0.4.13] - 2019-07-09 254 | 255 | ### Fixed 256 | 257 | - Fix: Memory leak in unicorn and puma collectors 258 | 259 | ## [0.4.12] - 2019-05-30 260 | 261 | ### Fixed 262 | 263 | - Fix: unicorn collector reporting incorrect number of unicorn workers 264 | 265 | ## [0.4.11] - 2019-05-15 266 | 267 | ### Fixed 268 | 269 | - Fix: Handle stopping nil worker_threads in Client 270 | 271 | ### Changed 272 | 273 | - Dev: add frozen string literals 274 | 275 | ## [0.4.10] - 2019-04-29 276 | 277 | ### Fixed 278 | 279 | - Fix: Custom label support for puma collector 280 | - Fix: Raindrops socket collector not working correctly 281 | 282 | ## [0.4.9] - 2019-04-11 283 | 284 | ### Fixed 285 | 286 | - Fix: Gem was not working correctly in Ruby 2.4 and below due to a syntax error 287 | 288 | ## [0.4.8] - 2019-04-10 289 | 290 | ### Added 291 | 292 | - Feature: added helpers for instrumenting unicorn using raindrops 293 | 294 | ## [0.4.7] - 2019-04-08 295 | 296 | ### Fixed 297 | 298 | - Fix: collector was not escaping " \ and \n correctly. This could lead 299 | to a corrupt payload in some cases. 300 | 301 | ## [0.4.6] - 2019-04-02 302 | 303 | ### Added 304 | 305 | - Feature: Allow resetting a counter 306 | - Feature: Add sidekiq metrics: restarted, dead jobs counters 307 | 308 | ### Fixed 309 | 310 | - Fix: Client shutting down before sending metrics to collector 311 | 312 | ## [0.4.5] - 2019-02-14 313 | 314 | ### Added 315 | 316 | - Feature: Allow process collector to ship custom labels for all process metrics 317 | 318 | ### Fixed 319 | 320 | - Fix: Always scope process metrics on hostname in collector 321 | 322 | ## [0.4.4] - 2019-02-13 323 | 324 | ### Added 325 | 326 | - Feature: add support for local metric collection without using HTTP 327 | 328 | ## [0.4.3] - 2019-02-11 329 | 330 | ### Added 331 | 332 | - Feature: Add alias for Gauge #observe called #set, this makes it a bit easier to migrate from prom 333 | - Feature: Add increment and decrement to Counter 334 | 335 | ## [0.4.2] - 2018-11-30 336 | 337 | ### Fixed 338 | 339 | - Fix: setting a Gauge to nil will remove Gauge (setting to non numeric will raise) 340 | 341 | ## [0.4.0] - 2018-10-23 342 | 343 | ### Added 344 | 345 | - Feature: histogram support 346 | - Feature: custom quantile support for summary 347 | - Feature: Puma metrics 348 | 349 | ### Fixed 350 | 351 | - Fix: delayed job metrics 352 | 353 | ## [0.3.4] - 2018-10-02 354 | 355 | ### Fixed 356 | 357 | - Fix: custom collector via CLI was not working correctly 358 | 359 | ## [0.3.3] 360 | 361 | ### Added 362 | 363 | - Feature: Add more metrics to delayed job collector 364 | 365 | ## [0.3.2] 366 | 367 | ### Added 368 | 369 | - Feature: Add posibility to set custom_labels on multi process mode 370 | 371 | ## [0.3.1] 372 | 373 | ### Changed 374 | 375 | - Allow runner to accept a --timeout var 376 | - Allow runner to accept a blank prefix 377 | 378 | ## [0.3.0] 379 | 380 | ### Changed 381 | 382 | - Breaking change: Follow Prometheus metric [naming conventions](https://prometheus.io/docs/practices/naming/#metric-names) 383 | 384 | ## [0.1.15] - 2018-02-19 385 | 386 | ### Added 387 | 388 | - Feature: Prefer to use oj if it is loadable 389 | 390 | ## [0.1.14] - 2018-02-17 391 | 392 | ### Added 393 | 394 | - Feature: runner was extracted so it can be reused @304 395 | 396 | ### Fixed 397 | 398 | - Fix: error when shipping summary metric with no labels 399 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at sam.saffron@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [http://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: http://contributor-covenant.org 74 | [version]: http://contributor-covenant.org/version/1/4/ 75 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG RUBY_VERSION=3.1 2 | ARG GEM_VERSION= 3 | 4 | FROM ruby:${RUBY_VERSION}-slim 5 | 6 | RUN apt update && apt install -y curl 7 | 8 | RUN gem install --no-doc --version=${GEM_VERSION} prometheus_exporter 9 | 10 | EXPOSE 9394 11 | ENTRYPOINT ["prometheus_exporter", "-b", "ANY"] 12 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | source "https://rubygems.org" 4 | 5 | git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } 6 | 7 | # Specify your gem's dependencies in prometheus_exporter.gemspec 8 | gemspec 9 | -------------------------------------------------------------------------------- /Guardfile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | guard :minitest do 4 | # with Minitest::Unit 5 | watch(%r{^test/(.*)\/?(.*)_test\.rb$}) 6 | watch(%r{^lib/prometheus_exporter/(.*/)?([^/]+)\.rb$}) { |m| "test/#{m[1]}#{m[2]}_test.rb" } 7 | watch(%r{^test/test_helper\.rb$}) { 'test' } 8 | end 9 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Discourse 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "bundler/gem_tasks" 4 | require "rake/testtask" 5 | 6 | Rake::TestTask.new(:test) do |t| 7 | t.libs << "test" 8 | t.libs << "lib" 9 | t.test_files = FileList["test/**/*_test.rb"] 10 | end 11 | 12 | task default: :test 13 | -------------------------------------------------------------------------------- /bench/bench.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../lib/prometheus_exporter" 4 | require_relative "../lib/prometheus_exporter/client" 5 | require_relative "../lib/prometheus_exporter/server" 6 | 7 | # test how long it takes a custom collector to process 10k messages 8 | 9 | class Collector 10 | def initialize(done) 11 | @i = 0 12 | @done = done 13 | end 14 | 15 | def process(message) 16 | _parsed = JSON.parse(message) 17 | @i += 1 18 | @done.call if @i % 10_000 == 0 19 | end 20 | 21 | def prometheus_metrics_text 22 | end 23 | end 24 | 25 | @start = nil 26 | @client = nil 27 | @runs = 1000 28 | 29 | done = 30 | lambda do 31 | puts "Elapsed for 10k messages is #{Time.now - @start}" 32 | if (@runs -= 1) > 0 33 | @start = Time.now 34 | 10_000.times { @client.send_json(hello: "world") } 35 | end 36 | end 37 | 38 | collector = Collector.new(done) 39 | server = PrometheusExporter::Server::WebServer.new port: 12_349, collector: collector 40 | server.start 41 | @client = PrometheusExporter::Client.new port: 12_349, max_queue_size: 100_000 42 | 43 | @start = Time.now 44 | 10_000.times { @client.send_json(hello: "world") } 45 | 46 | sleep 47 | -------------------------------------------------------------------------------- /bin/prometheus_exporter: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | require 'json' 6 | require 'logger' 7 | 8 | require_relative "./../lib/prometheus_exporter" 9 | require_relative "./../lib/prometheus_exporter/server" 10 | 11 | def run 12 | options = { 13 | logger_path: STDERR 14 | } 15 | custom_collector_filename = nil 16 | custom_type_collectors_filenames = [] 17 | 18 | OptionParser.new do |opt| 19 | opt.banner = "Usage: prometheus_exporter [options]" 20 | opt.on('-p', 21 | '--port INTEGER', 22 | Integer, 23 | "Port exporter should listen on (default: #{PrometheusExporter::DEFAULT_PORT})") do |o| 24 | options[:port] = o.to_i 25 | end 26 | opt.on('-b', 27 | '--bind STRING', 28 | String, 29 | "IP address exporter should listen on (default: #{PrometheusExporter::DEFAULT_BIND_ADDRESS})") do |o| 30 | options[:bind] = o.to_s 31 | end 32 | opt.on('-t', 33 | '--timeout INTEGER', 34 | Integer, 35 | "Timeout in seconds for metrics endpoint (default: #{PrometheusExporter::DEFAULT_TIMEOUT})") do |o| 36 | options[:timeout] = o.to_i 37 | end 38 | opt.on('--prefix METRIC_PREFIX', "Prefix to apply to all metrics (default: #{PrometheusExporter::DEFAULT_PREFIX})") do |o| 39 | options[:prefix] = o.to_s 40 | end 41 | opt.on('--label METRIC_LABEL', "Label to apply to all metrics (default: #{PrometheusExporter::DEFAULT_LABEL})") do |o| 42 | options[:label] = JSON.parse(o.to_s) 43 | end 44 | opt.on('-c', '--collector FILE', String, "(optional) Custom collector to run") do |o| 45 | custom_collector_filename = o.to_s 46 | end 47 | opt.on('-a', '--type-collector FILE', String, "(optional) Custom type collectors to run in main collector") do |o| 48 | custom_type_collectors_filenames << o 49 | end 50 | opt.on('-v', '--verbose') do |o| 51 | options[:verbose] = true 52 | end 53 | opt.on('-g', '--histogram', "Use histogram instead of summary for aggregations") do |o| 54 | options[:histogram] = true 55 | end 56 | opt.on('--auth FILE', String, "(optional) enable basic authentication using a htpasswd FILE") do |o| 57 | options[:auth] = o 58 | end 59 | opt.on('--realm REALM', String, "(optional) Use REALM for basic authentication (default: \"#{PrometheusExporter::DEFAULT_REALM}\")") do |o| 60 | options[:realm] = o 61 | end 62 | 63 | opt.on('--unicorn-listen-address ADDRESS', String, '(optional) Address where unicorn listens on (unix or TCP address)') do |o| 64 | options[:unicorn_listen_address] = o 65 | end 66 | 67 | opt.on('--unicorn-master PID_FILE', String, '(optional) PID file of unicorn master process to monitor unicorn') do |o| 68 | options[:unicorn_pid_file] = o 69 | end 70 | 71 | opt.on('--logger-path PATH', String, '(optional) Path to file for logger output. Defaults to STDERR') do |o| 72 | options[:logger_path] = o 73 | end 74 | end.parse! 75 | 76 | logger = Logger.new(options[:logger_path]) 77 | logger.level = Logger::INFO 78 | 79 | if options.has_key?(:realm) && !options.has_key?(:auth) 80 | logger.warn "Providing REALM without AUTH has no effect" 81 | end 82 | 83 | if options.has_key?(:auth) 84 | unless File.exist?(options[:auth]) && File.readable?(options[:auth]) 85 | logger.fatal "The AUTH file either doesn't exist or we don't have access to it" 86 | exit 1 87 | end 88 | end 89 | 90 | if custom_collector_filename 91 | require File.expand_path(custom_collector_filename) 92 | found = false 93 | 94 | base_klass = PrometheusExporter::Server::CollectorBase 95 | 96 | ObjectSpace.each_object(Class) do |klass| 97 | if klass < base_klass && klass != base_klass 98 | options[:collector_class] = klass 99 | found = true 100 | end 101 | end 102 | 103 | if !found 104 | logger.fatal "Can not find a class inheriting off PrometheusExporter::Server::CollectorBase" 105 | exit 1 106 | end 107 | end 108 | 109 | if custom_type_collectors_filenames.length > 0 110 | custom_type_collectors_filenames.each do |t| 111 | require File.expand_path(t) 112 | end 113 | 114 | ObjectSpace.each_object(Class) do |klass| 115 | if klass < PrometheusExporter::Server::TypeCollector 116 | options[:type_collectors] ||= [] 117 | options[:type_collectors] << klass 118 | end 119 | end 120 | end 121 | 122 | runner = PrometheusExporter::Server::Runner.new(options) 123 | 124 | logger.info "Starting prometheus exporter on #{runner.bind}:#{runner.port}" 125 | runner.start 126 | sleep 127 | end 128 | 129 | run 130 | -------------------------------------------------------------------------------- /examples/custom_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class MyCustomCollector < PrometheusExporter::Server::BaseCollector 4 | def initialize 5 | @gauge1 = PrometheusExporter::Metric::Gauge.new("thing1", "I am thing 1") 6 | @gauge2 = PrometheusExporter::Metric::Gauge.new("thing2", "I am thing 2") 7 | @mutex = Mutex.new 8 | end 9 | 10 | def process(obj) 11 | @mutex.synchronize do 12 | if thing1 = obj["thing1"] 13 | @gauge1.observe(thing1) 14 | end 15 | 16 | if thing2 = obj["thing2"] 17 | @gauge2.observe(thing2) 18 | end 19 | end 20 | end 21 | 22 | def prometheus_metrics_text 23 | @mutex.synchronize { "#{@gauge1.to_prometheus_text}\n#{@gauge2.to_prometheus_text}" } 24 | end 25 | end 26 | -------------------------------------------------------------------------------- /gemfiles/.bundle/config: -------------------------------------------------------------------------------- 1 | --- 2 | BUNDLE_RETRY: "1" 3 | -------------------------------------------------------------------------------- /gemfiles/ar_60.gemfile: -------------------------------------------------------------------------------- 1 | # This file was generated by Appraisal 2 | 3 | source "https://rubygems.org" 4 | 5 | gemspec path: "../" 6 | -------------------------------------------------------------------------------- /gemfiles/ar_61.gemfile: -------------------------------------------------------------------------------- 1 | # This file was generated by Appraisal 2 | 3 | source "https://rubygems.org" 4 | 5 | gem "activerecord", "~> 6.1.0" 6 | 7 | gemspec path: "../" 8 | -------------------------------------------------------------------------------- /gemfiles/ar_70.gemfile: -------------------------------------------------------------------------------- 1 | # This file was generated by Appraisal 2 | 3 | source "https://rubygems.org" 4 | 5 | gem "activerecord", "~> 7.0.0" 6 | 7 | gemspec path: "../" 8 | -------------------------------------------------------------------------------- /gemfiles/ar_71.gemfile: -------------------------------------------------------------------------------- 1 | # This file was generated by Appraisal 2 | 3 | source "https://rubygems.org" 4 | 5 | gem "activerecord", "~> 7.1.0" 6 | 7 | gemspec path: "../" 8 | -------------------------------------------------------------------------------- /lib/prometheus_exporter.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "prometheus_exporter/version" 4 | require "json" 5 | 6 | module PrometheusExporter 7 | # per: https://github.com/prometheus/prometheus/wiki/Default-port-allocations 8 | DEFAULT_PORT = 9394 9 | DEFAULT_BIND_ADDRESS = "localhost" 10 | DEFAULT_PREFIX = "ruby_" 11 | DEFAULT_LABEL = {} 12 | DEFAULT_TIMEOUT = 2 13 | DEFAULT_REALM = "Prometheus Exporter" 14 | 15 | class OjCompat 16 | def self.parse(obj) 17 | Oj.compat_load(obj) 18 | end 19 | 20 | def self.dump(obj) 21 | Oj.dump(obj, mode: :compat) 22 | end 23 | end 24 | 25 | def self.hostname 26 | @hostname ||= 27 | begin 28 | require "socket" 29 | Socket.gethostname 30 | rescue => e 31 | STDERR.puts "Unable to lookup hostname #{e}" 32 | "unknown-host" 33 | end 34 | end 35 | 36 | def self.detect_json_serializer(preferred) 37 | if preferred.nil? 38 | preferred = :oj if has_oj? 39 | end 40 | 41 | preferred == :oj ? OjCompat : JSON 42 | end 43 | 44 | @@has_oj = nil 45 | def self.has_oj? 46 | ( 47 | @@has_oj ||= 48 | begin 49 | require "oj" 50 | :T 51 | rescue LoadError 52 | :F 53 | end 54 | ) == :T 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/client.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "socket" 4 | require "logger" 5 | 6 | module PrometheusExporter 7 | class Client 8 | class RemoteMetric 9 | attr_reader :name, :type, :help 10 | 11 | def initialize(name:, help:, type:, client:, opts: nil) 12 | @name = name 13 | @help = help 14 | @client = client 15 | @type = type 16 | @opts = opts 17 | end 18 | 19 | def standard_values(value, keys, prometheus_exporter_action = nil) 20 | values = { type: @type, help: @help, name: @name, keys: keys, value: value } 21 | values[ 22 | :prometheus_exporter_action 23 | ] = prometheus_exporter_action if prometheus_exporter_action 24 | values[:opts] = @opts if @opts 25 | values 26 | end 27 | 28 | def observe(value = 1, keys = nil) 29 | @client.send_json(standard_values(value, keys)) 30 | end 31 | 32 | def increment(keys = nil, value = 1) 33 | @client.send_json(standard_values(value, keys, :increment)) 34 | end 35 | 36 | def decrement(keys = nil, value = 1) 37 | @client.send_json(standard_values(value, keys, :decrement)) 38 | end 39 | end 40 | 41 | def self.default 42 | @default ||= new 43 | end 44 | 45 | def self.default=(client) 46 | @default = client 47 | end 48 | 49 | MAX_SOCKET_AGE = 25 50 | MAX_QUEUE_SIZE = 10_000 51 | 52 | attr_reader :logger 53 | 54 | def initialize( 55 | host: ENV.fetch("PROMETHEUS_EXPORTER_HOST", "localhost"), 56 | port: ENV.fetch("PROMETHEUS_EXPORTER_PORT", PrometheusExporter::DEFAULT_PORT), 57 | max_queue_size: nil, 58 | thread_sleep: 0.5, 59 | json_serializer: nil, 60 | custom_labels: nil, 61 | logger: Logger.new(STDERR), 62 | log_level: Logger::WARN, 63 | process_queue_once_and_stop: false 64 | ) 65 | @logger = logger 66 | @logger.level = log_level 67 | @metrics = [] 68 | 69 | @queue = Queue.new 70 | 71 | @socket = nil 72 | @socket_started = nil 73 | @socket_pid = nil 74 | 75 | max_queue_size ||= MAX_QUEUE_SIZE 76 | max_queue_size = max_queue_size.to_i 77 | 78 | raise ArgumentError, "max_queue_size must be larger than 0" if max_queue_size <= 0 79 | 80 | @max_queue_size = max_queue_size 81 | @host = host 82 | @port = port 83 | @worker_thread = nil 84 | @mutex = Mutex.new 85 | @thread_sleep = thread_sleep 86 | 87 | @json_serializer = json_serializer == :oj ? PrometheusExporter::OjCompat : JSON 88 | 89 | @custom_labels = custom_labels 90 | @process_queue_once_and_stop = process_queue_once_and_stop 91 | end 92 | 93 | def custom_labels=(custom_labels) 94 | @custom_labels = custom_labels 95 | end 96 | 97 | def register(type, name, help, opts = nil) 98 | metric = RemoteMetric.new(type: type, name: name, help: help, client: self, opts: opts) 99 | @metrics << metric 100 | metric 101 | end 102 | 103 | def find_registered_metric(name, type: nil, help: nil) 104 | @metrics.find do |metric| 105 | type_match = type ? metric.type == type : true 106 | help_match = help ? metric.help == help : true 107 | name_match = metric.name == name 108 | 109 | type_match && help_match && name_match 110 | end 111 | end 112 | 113 | def send_json(obj) 114 | payload = 115 | if @custom_labels 116 | if obj[:custom_labels] 117 | obj.merge(custom_labels: @custom_labels.merge(obj[:custom_labels])) 118 | else 119 | obj.merge(custom_labels: @custom_labels) 120 | end 121 | else 122 | obj 123 | end 124 | send(@json_serializer.dump(payload)) 125 | end 126 | 127 | def send(str) 128 | @queue << str 129 | if @queue.length > @max_queue_size 130 | logger.warn "Prometheus Exporter client is dropping message cause queue is full" 131 | @queue.pop 132 | end 133 | 134 | ensure_worker_thread! 135 | end 136 | 137 | def process_queue 138 | while @queue.length > 0 139 | ensure_socket! 140 | 141 | begin 142 | message = @queue.pop 143 | @socket.write(message.bytesize.to_s(16).upcase) 144 | @socket.write("\r\n") 145 | @socket.write(message) 146 | @socket.write("\r\n") 147 | rescue => e 148 | logger.warn "Prometheus Exporter is dropping a message: #{e}" 149 | close_socket! 150 | raise 151 | end 152 | end 153 | end 154 | 155 | def stop(wait_timeout_seconds: 0) 156 | @mutex.synchronize do 157 | wait_for_empty_queue_with_timeout(wait_timeout_seconds) 158 | @worker_thread&.kill 159 | sleep 0.001 while @worker_thread&.alive? 160 | @worker_thread = nil 161 | close_socket! 162 | end 163 | end 164 | 165 | private 166 | 167 | def worker_loop 168 | close_socket_if_old! 169 | process_queue 170 | rescue => e 171 | logger.error "Prometheus Exporter, failed to send message #{e}" 172 | end 173 | 174 | def ensure_worker_thread! 175 | if @process_queue_once_and_stop 176 | worker_loop 177 | return 178 | end 179 | 180 | unless @worker_thread&.alive? 181 | @mutex.synchronize do 182 | return if @worker_thread&.alive? 183 | 184 | @worker_thread = 185 | Thread.new do 186 | while true 187 | worker_loop 188 | sleep @thread_sleep 189 | end 190 | end 191 | end 192 | end 193 | rescue ThreadError => e 194 | raise unless e.message =~ /can't alloc thread/ 195 | logger.error "Prometheus Exporter, failed to send message ThreadError #{e}" 196 | end 197 | 198 | def close_socket! 199 | begin 200 | if @socket && !@socket.closed? 201 | @socket.write("0\r\n") 202 | @socket.write("\r\n") 203 | @socket.flush 204 | @socket.close 205 | end 206 | rescue Errno::EPIPE 207 | end 208 | 209 | @socket = nil 210 | @socket_started = nil 211 | end 212 | 213 | def close_socket_if_old! 214 | if @socket_pid == Process.pid && @socket && @socket_started && 215 | ((@socket_started + MAX_SOCKET_AGE) < Time.now.to_f) 216 | close_socket! 217 | end 218 | end 219 | 220 | def ensure_socket! 221 | # if process was forked socket may be owned by parent 222 | # leave it alone and reset 223 | if @socket_pid != Process.pid 224 | @socket = nil 225 | @socket_started = nil 226 | @socket_pid = nil 227 | end 228 | 229 | close_socket_if_old! 230 | if !@socket 231 | @socket = TCPSocket.new @host, @port 232 | @socket.write("POST /send-metrics HTTP/1.1\r\n") 233 | @socket.write("Transfer-Encoding: chunked\r\n") 234 | @socket.write("Host: #{@host}\r\n") 235 | @socket.write("Connection: Close\r\n") 236 | @socket.write("Content-Type: application/octet-stream\r\n") 237 | @socket.write("\r\n") 238 | @socket_started = Time.now.to_f 239 | @socket_pid = Process.pid 240 | end 241 | 242 | nil 243 | rescue StandardError 244 | close_socket! 245 | @socket_pid = nil 246 | raise 247 | end 248 | 249 | def wait_for_empty_queue_with_timeout(timeout_seconds) 250 | start_time = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) 251 | while @queue.length > 0 252 | break if start_time + timeout_seconds < ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) 253 | sleep(0.05) 254 | end 255 | end 256 | end 257 | 258 | class LocalClient < Client 259 | attr_reader :collector 260 | 261 | def initialize(collector:, json_serializer: nil, custom_labels: nil) 262 | @collector = collector 263 | super(json_serializer: json_serializer, custom_labels: custom_labels) 264 | end 265 | 266 | def send(json) 267 | @collector.process(json) 268 | end 269 | end 270 | end 271 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "client" 4 | require_relative "instrumentation/periodic_stats" 5 | require_relative "instrumentation/process" 6 | require_relative "instrumentation/method_profiler" 7 | require_relative "instrumentation/sidekiq" 8 | require_relative "instrumentation/sidekiq_queue" 9 | require_relative "instrumentation/sidekiq_process" 10 | require_relative "instrumentation/sidekiq_stats" 11 | require_relative "instrumentation/delayed_job" 12 | require_relative "instrumentation/puma" 13 | require_relative "instrumentation/hutch" 14 | require_relative "instrumentation/unicorn" 15 | require_relative "instrumentation/active_record" 16 | require_relative "instrumentation/shoryuken" 17 | require_relative "instrumentation/resque" 18 | require_relative "instrumentation/good_job" 19 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/active_record.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | # collects stats from currently running process 4 | module PrometheusExporter::Instrumentation 5 | class ActiveRecord < PeriodicStats 6 | ALLOWED_CONFIG_LABELS = %i[database username host port] 7 | 8 | def self.start(client: nil, frequency: 30, custom_labels: {}, config_labels: []) 9 | client ||= PrometheusExporter::Client.default 10 | 11 | # Not all rails versions support connection pool stats 12 | unless ::ActiveRecord::Base.connection_pool.respond_to?(:stat) 13 | client.logger.error( 14 | "ActiveRecord connection pool stats not supported in your rails version", 15 | ) 16 | return 17 | end 18 | 19 | config_labels.map!(&:to_sym) 20 | validate_config_labels(config_labels) 21 | 22 | active_record_collector = new(custom_labels, config_labels) 23 | 24 | worker_loop do 25 | metrics = active_record_collector.collect 26 | metrics.each { |metric| client.send_json metric } 27 | end 28 | 29 | super 30 | end 31 | 32 | def self.validate_config_labels(config_labels) 33 | return if config_labels.size == 0 34 | if (config_labels - ALLOWED_CONFIG_LABELS).size > 0 35 | raise "Invalid Config Labels, available options #{ALLOWED_CONFIG_LABELS}" 36 | end 37 | end 38 | 39 | def initialize(metric_labels, config_labels) 40 | @metric_labels = metric_labels 41 | @config_labels = config_labels 42 | end 43 | 44 | def collect 45 | metrics = [] 46 | collect_active_record_pool_stats(metrics) 47 | metrics 48 | end 49 | 50 | def pid 51 | @pid = ::Process.pid 52 | end 53 | 54 | def collect_active_record_pool_stats(metrics) 55 | ObjectSpace.each_object(::ActiveRecord::ConnectionAdapters::ConnectionPool) do |pool| 56 | next if pool.connections.nil? 57 | 58 | metric = { 59 | pid: pid, 60 | type: "active_record", 61 | hostname: ::PrometheusExporter.hostname, 62 | metric_labels: labels(pool), 63 | } 64 | metric.merge!(pool.stat) 65 | metrics << metric 66 | end 67 | end 68 | 69 | private 70 | 71 | def labels(pool) 72 | if ::ActiveRecord.version < Gem::Version.new("6.1.0.rc1") 73 | @metric_labels.merge(pool_name: pool.spec.name).merge( 74 | pool 75 | .spec 76 | .config 77 | .select { |k, v| @config_labels.include? k } 78 | .map { |k, v| [k.to_s.dup.prepend("dbconfig_"), v] } 79 | .to_h, 80 | ) 81 | else 82 | @metric_labels.merge(pool_name: pool.db_config.name).merge( 83 | @config_labels.each_with_object({}) do |l, acc| 84 | acc["dbconfig_#{l}"] = pool.db_config.public_send(l) 85 | end, 86 | ) 87 | end 88 | end 89 | end 90 | end 91 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/delayed_job.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Instrumentation 4 | class DelayedJob 5 | JOB_CLASS_REGEXP = /job_class: ((\w+:{0,2})+)/.freeze 6 | 7 | class << self 8 | def register_plugin(client: nil, include_module_name: false) 9 | instrumenter = self.new(client: client) 10 | return unless defined?(Delayed::Plugin) 11 | 12 | plugin = 13 | Class.new(Delayed::Plugin) do 14 | callbacks do |lifecycle| 15 | lifecycle.around(:invoke_job) do |job, *args, &block| 16 | max_attempts = Delayed::Worker.max_attempts 17 | enqueued_count = Delayed::Job.where(queue: job.queue).count 18 | pending_count = 19 | Delayed::Job.where(attempts: 0, locked_at: nil, queue: job.queue).count 20 | instrumenter.call( 21 | job, 22 | max_attempts, 23 | enqueued_count, 24 | pending_count, 25 | include_module_name, 26 | *args, 27 | &block 28 | ) 29 | end 30 | end 31 | end 32 | 33 | Delayed::Worker.plugins << plugin 34 | end 35 | end 36 | 37 | def initialize(client: nil) 38 | @client = client || PrometheusExporter::Client.default 39 | end 40 | 41 | def call(job, max_attempts, enqueued_count, pending_count, include_module_name, *args, &block) 42 | success = false 43 | start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) 44 | latency = Time.current - job.run_at 45 | attempts = job.attempts + 1 # Increment because we're adding the current attempt 46 | result = block.call(job, *args) 47 | success = true 48 | result 49 | ensure 50 | duration = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start 51 | 52 | @client.send_json( 53 | type: "delayed_job", 54 | name: job.handler.to_s.match(JOB_CLASS_REGEXP).to_a[include_module_name ? 1 : 2].to_s, 55 | queue_name: job.queue, 56 | success: success, 57 | duration: duration, 58 | latency: latency, 59 | attempts: attempts, 60 | max_attempts: max_attempts, 61 | enqueued: enqueued_count, 62 | pending: pending_count, 63 | ) 64 | end 65 | end 66 | end 67 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/good_job.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | # collects stats from GoodJob 4 | module PrometheusExporter::Instrumentation 5 | class GoodJob < PeriodicStats 6 | def self.start(client: nil, frequency: 30) 7 | good_job_collector = new 8 | client ||= PrometheusExporter::Client.default 9 | 10 | worker_loop { client.send_json(good_job_collector.collect) } 11 | 12 | super 13 | end 14 | 15 | def collect 16 | { 17 | type: "good_job", 18 | scheduled: ::GoodJob::Job.scheduled.size, 19 | retried: ::GoodJob::Job.retried.size, 20 | queued: ::GoodJob::Job.queued.size, 21 | running: ::GoodJob::Job.running.size, 22 | finished: ::GoodJob::Job.finished.size, 23 | succeeded: ::GoodJob::Job.succeeded.size, 24 | discarded: ::GoodJob::Job.discarded.size, 25 | } 26 | end 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/hutch.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Instrumentation 4 | class Hutch 5 | def initialize(klass) 6 | @klass = klass 7 | @client = PrometheusExporter::Client.default 8 | end 9 | 10 | def handle(message) 11 | success = false 12 | start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) 13 | result = @klass.process(message) 14 | success = true 15 | result 16 | ensure 17 | duration = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start 18 | @client.send_json( 19 | type: "hutch", 20 | name: @klass.class.to_s, 21 | success: success, 22 | duration: duration, 23 | ) 24 | end 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/method_profiler.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | # see https://samsaffron.com/archive/2017/10/18/fastest-way-to-profile-a-method-in-ruby 4 | module PrometheusExporter::Instrumentation 5 | end 6 | 7 | class PrometheusExporter::Instrumentation::MethodProfiler 8 | def self.patch(klass, methods, name, instrument:) 9 | if instrument == :alias_method 10 | patch_using_alias_method(klass, methods, name) 11 | elsif instrument == :prepend 12 | patch_using_prepend(klass, methods, name) 13 | else 14 | raise ArgumentError, "instrument must be :alias_method or :prepend" 15 | end 16 | end 17 | 18 | def self.transfer 19 | result = Thread.current[:_method_profiler] 20 | Thread.current[:_method_profiler] = nil 21 | result 22 | end 23 | 24 | def self.start(transfer = nil) 25 | Thread.current[:_method_profiler] = transfer || 26 | { __start: Process.clock_gettime(Process::CLOCK_MONOTONIC) } 27 | end 28 | 29 | def self.clear 30 | Thread.current[:_method_profiler] = nil 31 | end 32 | 33 | def self.stop 34 | finish = Process.clock_gettime(Process::CLOCK_MONOTONIC) 35 | if data = Thread.current[:_method_profiler] 36 | Thread.current[:_method_profiler] = nil 37 | start = data.delete(:__start) 38 | data[:total_duration] = finish - start 39 | end 40 | data 41 | end 42 | 43 | def self.define_methods_on_module(klass, methods, name) 44 | patch_source_line = __LINE__ + 3 45 | 46 | patches = methods.map { |method_name| <<~RUBY }.join("\n") 47 | def #{method_name}(...) 48 | unless prof = Thread.current[:_method_profiler] 49 | return super 50 | end 51 | begin 52 | start = Process.clock_gettime(Process::CLOCK_MONOTONIC) 53 | super 54 | ensure 55 | data = (prof[:#{name}] ||= {duration: 0.0, calls: 0}) 56 | data[:duration] += Process.clock_gettime(Process::CLOCK_MONOTONIC) - start 57 | data[:calls] += 1 58 | end 59 | end 60 | RUBY 61 | 62 | klass.module_eval(patches, __FILE__, patch_source_line) 63 | end 64 | 65 | def self.patch_using_prepend(klass, methods, name) 66 | prepend_instrument = Module.new 67 | define_methods_on_module(prepend_instrument, methods, name) 68 | klass.prepend(prepend_instrument) 69 | end 70 | 71 | def self.patch_using_alias_method(klass, methods, name) 72 | patch_source_line = __LINE__ + 3 73 | 74 | patches = methods.map { |method_name| <<~RUBY }.join("\n") 75 | unless defined?(#{method_name}__mp_unpatched) 76 | alias_method :#{method_name}__mp_unpatched, :#{method_name} 77 | 78 | def #{method_name}(...) 79 | unless prof = Thread.current[:_method_profiler] 80 | return #{method_name}__mp_unpatched(...) 81 | end 82 | 83 | begin 84 | start = Process.clock_gettime(Process::CLOCK_MONOTONIC) 85 | #{method_name}__mp_unpatched(...) 86 | ensure 87 | data = (prof[:#{name}] ||= {duration: 0.0, calls: 0}) 88 | data[:duration] += Process.clock_gettime(Process::CLOCK_MONOTONIC) - start 89 | data[:calls] += 1 90 | end 91 | end 92 | end 93 | RUBY 94 | 95 | klass.class_eval(patches, __FILE__, patch_source_line) 96 | end 97 | end 98 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/periodic_stats.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Instrumentation 4 | class PeriodicStats 5 | def self.start(*args, frequency:, client: nil, **kwargs) 6 | client ||= PrometheusExporter::Client.default 7 | 8 | raise ArgumentError.new("Expected frequency to be a number") if !(Numeric === frequency) 9 | 10 | raise ArgumentError.new("Expected frequency to be a positive number") if frequency < 0 11 | 12 | raise ArgumentError.new("Worker loop was not set") if !@worker_loop 13 | 14 | klass = self 15 | 16 | stop 17 | 18 | @stop_thread = false 19 | 20 | @thread = 21 | Thread.new do 22 | while !@stop_thread 23 | begin 24 | @worker_loop.call 25 | rescue => e 26 | client.logger.error("#{klass} Prometheus Exporter Failed To Collect Stats #{e}") 27 | ensure 28 | sleep frequency 29 | end 30 | end 31 | end 32 | end 33 | 34 | def self.started? 35 | !!@thread&.alive? 36 | end 37 | 38 | def self.worker_loop(&blk) 39 | @worker_loop = blk 40 | end 41 | 42 | def self.stop 43 | # to avoid a warning 44 | @thread = nil if !defined?(@thread) 45 | 46 | if @thread&.alive? 47 | @stop_thread = true 48 | @thread.wakeup 49 | @thread.join 50 | end 51 | @thread = nil 52 | end 53 | end 54 | end 55 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/process.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | # collects stats from currently running process 4 | module PrometheusExporter::Instrumentation 5 | class Process < PeriodicStats 6 | def self.start(client: nil, type: "ruby", frequency: 30, labels: nil) 7 | metric_labels = 8 | if labels && type 9 | labels.merge(type: type) 10 | elsif labels 11 | labels 12 | else 13 | { type: type } 14 | end 15 | 16 | process_collector = new(metric_labels) 17 | client ||= PrometheusExporter::Client.default 18 | 19 | worker_loop do 20 | metric = process_collector.collect 21 | client.send_json metric 22 | end 23 | 24 | super 25 | end 26 | 27 | def initialize(metric_labels) 28 | @metric_labels = metric_labels 29 | end 30 | 31 | def collect 32 | metric = {} 33 | metric[:type] = "process" 34 | metric[:metric_labels] = @metric_labels 35 | metric[:hostname] = ::PrometheusExporter.hostname 36 | collect_gc_stats(metric) 37 | collect_v8_stats(metric) 38 | collect_process_stats(metric) 39 | metric 40 | end 41 | 42 | def pid 43 | @pid = ::Process.pid 44 | end 45 | 46 | def rss 47 | @pagesize ||= 48 | begin 49 | `getconf PAGESIZE`.to_i 50 | rescue StandardError 51 | 4096 52 | end 53 | begin 54 | File.read("/proc/#{pid}/statm").split(" ")[1].to_i * @pagesize 55 | rescue StandardError 56 | 0 57 | end 58 | end 59 | 60 | def collect_process_stats(metric) 61 | metric[:pid] = pid 62 | metric[:rss] = rss 63 | end 64 | 65 | def collect_gc_stats(metric) 66 | stat = GC.stat 67 | metric[:heap_live_slots] = stat[:heap_live_slots] 68 | metric[:heap_free_slots] = stat[:heap_free_slots] 69 | metric[:major_gc_ops_total] = stat[:major_gc_count] 70 | metric[:minor_gc_ops_total] = stat[:minor_gc_count] 71 | metric[:allocated_objects_total] = stat[:total_allocated_objects] 72 | metric[:malloc_increase_bytes_limit] = stat[:malloc_increase_bytes_limit] 73 | metric[:oldmalloc_increase_bytes_limit] = stat[:oldmalloc_increase_bytes_limit] 74 | end 75 | 76 | def collect_v8_stats(metric) 77 | return if !defined?(MiniRacer) 78 | 79 | metric[:v8_heap_count] = metric[:v8_heap_size] = 0 80 | metric[:v8_heap_size] = metric[:v8_physical_size] = 0 81 | metric[:v8_used_heap_size] = 0 82 | 83 | ObjectSpace.each_object(MiniRacer::Context) do |context| 84 | stats = context.heap_stats 85 | if stats 86 | metric[:v8_heap_count] += 1 87 | metric[:v8_heap_size] += stats[:total_heap_size].to_i 88 | metric[:v8_used_heap_size] += stats[:used_heap_size].to_i 89 | metric[:v8_physical_size] += stats[:total_physical_size].to_i 90 | end 91 | end 92 | end 93 | end 94 | end 95 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/puma.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "json" 4 | 5 | # collects stats from puma 6 | module PrometheusExporter::Instrumentation 7 | class Puma < PeriodicStats 8 | def self.start(client: nil, frequency: 30, labels: {}) 9 | puma_collector = new(labels) 10 | client ||= PrometheusExporter::Client.default 11 | 12 | worker_loop do 13 | metric = puma_collector.collect 14 | client.send_json metric 15 | end 16 | 17 | super 18 | end 19 | 20 | def initialize(metric_labels = {}) 21 | @metric_labels = metric_labels 22 | end 23 | 24 | def collect 25 | metric = { 26 | pid: pid, 27 | type: "puma", 28 | hostname: ::PrometheusExporter.hostname, 29 | metric_labels: @metric_labels, 30 | } 31 | collect_puma_stats(metric) 32 | metric 33 | end 34 | 35 | def pid 36 | @pid = ::Process.pid 37 | end 38 | 39 | def collect_puma_stats(metric) 40 | stats = JSON.parse(::Puma.stats) 41 | 42 | if stats.key?("workers") 43 | metric[:phase] = stats["phase"] 44 | metric[:workers] = stats["workers"] 45 | metric[:booted_workers] = stats["booted_workers"] 46 | metric[:old_workers] = stats["old_workers"] 47 | 48 | stats["worker_status"].each do |worker| 49 | next if worker["last_status"].empty? 50 | collect_worker_status(metric, worker["last_status"]) 51 | end 52 | else 53 | collect_worker_status(metric, stats) 54 | end 55 | end 56 | 57 | private 58 | 59 | def collect_worker_status(metric, status) 60 | metric[:request_backlog] ||= 0 61 | metric[:running_threads] ||= 0 62 | metric[:thread_pool_capacity] ||= 0 63 | metric[:max_threads] ||= 0 64 | metric[:busy_threads] ||= 0 65 | 66 | metric[:request_backlog] += status["backlog"] 67 | metric[:running_threads] += status["running"] 68 | metric[:thread_pool_capacity] += status["pool_capacity"] 69 | metric[:max_threads] += status["max_threads"] 70 | metric[:busy_threads] += status["busy_threads"] 71 | end 72 | end 73 | end 74 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/resque.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | # collects stats from resque 4 | module PrometheusExporter::Instrumentation 5 | class Resque < PeriodicStats 6 | def self.start(client: nil, frequency: 30) 7 | resque_collector = new 8 | client ||= PrometheusExporter::Client.default 9 | 10 | worker_loop { client.send_json(resque_collector.collect) } 11 | 12 | super 13 | end 14 | 15 | def collect 16 | metric = {} 17 | metric[:type] = "resque" 18 | collect_resque_stats(metric) 19 | metric 20 | end 21 | 22 | def collect_resque_stats(metric) 23 | info = ::Resque.info 24 | 25 | metric[:processed_jobs] = info[:processed] 26 | metric[:failed_jobs] = info[:failed] 27 | metric[:pending_jobs] = info[:pending] 28 | metric[:queues] = info[:queues] 29 | metric[:worker] = info[:workers] 30 | metric[:working] = info[:working] 31 | end 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/shoryuken.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Instrumentation 4 | class Shoryuken 5 | def initialize(client: nil) 6 | @client = client || PrometheusExporter::Client.default 7 | end 8 | 9 | def call(worker, queue, msg, body) 10 | success = false 11 | start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) 12 | result = yield 13 | success = true 14 | result 15 | rescue ::Shoryuken::Shutdown => e 16 | shutdown = true 17 | raise e 18 | ensure 19 | duration = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start 20 | @client.send_json( 21 | type: "shoryuken", 22 | queue: queue, 23 | name: worker.class.name, 24 | success: success, 25 | shutdown: shutdown, 26 | duration: duration, 27 | ) 28 | end 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/sidekiq.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "yaml" 4 | 5 | module PrometheusExporter::Instrumentation 6 | JOB_WRAPPER_CLASS_NAME = "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" 7 | DELAYED_CLASS_NAMES = %w[ 8 | Sidekiq::Extensions::DelayedClass 9 | Sidekiq::Extensions::DelayedModel 10 | Sidekiq::Extensions::DelayedMailer 11 | ] 12 | 13 | class Sidekiq 14 | def self.death_handler 15 | ->(job, ex) do 16 | job_is_fire_and_forget = job["retry"] == false 17 | 18 | worker_class = Object.const_get(job["class"]) 19 | worker_custom_labels = self.get_worker_custom_labels(worker_class, job) 20 | 21 | unless job_is_fire_and_forget 22 | PrometheusExporter::Client.default.send_json( 23 | type: "sidekiq", 24 | name: get_name(job["class"], job), 25 | dead: true, 26 | custom_labels: worker_custom_labels, 27 | ) 28 | end 29 | end 30 | end 31 | 32 | def self.get_worker_custom_labels(worker_class, msg) 33 | return {} unless worker_class.respond_to?(:custom_labels) 34 | 35 | # TODO remove when version 3.0.0 is released 36 | method_arity = worker_class.method(:custom_labels).arity 37 | 38 | if method_arity > 0 39 | worker_class.custom_labels(msg) 40 | else 41 | worker_class.custom_labels 42 | end 43 | end 44 | 45 | def initialize(options = { client: nil }) 46 | @client = options.fetch(:client, nil) || PrometheusExporter::Client.default 47 | end 48 | 49 | def call(worker, msg, queue) 50 | success = false 51 | shutdown = false 52 | start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) 53 | result = yield 54 | success = true 55 | result 56 | rescue ::Sidekiq::Shutdown => e 57 | shutdown = true 58 | raise e 59 | ensure 60 | duration = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start 61 | @client.send_json( 62 | type: "sidekiq", 63 | name: self.class.get_name(worker.class.to_s, msg), 64 | queue: queue, 65 | success: success, 66 | shutdown: shutdown, 67 | duration: duration, 68 | custom_labels: self.class.get_worker_custom_labels(worker.class, msg), 69 | ) 70 | end 71 | 72 | private 73 | 74 | def self.get_name(class_name, msg) 75 | if class_name == JOB_WRAPPER_CLASS_NAME 76 | get_job_wrapper_name(msg) 77 | elsif DELAYED_CLASS_NAMES.include?(class_name) 78 | get_delayed_name(msg, class_name) 79 | else 80 | class_name 81 | end 82 | end 83 | 84 | def self.get_job_wrapper_name(msg) 85 | msg["wrapped"] 86 | end 87 | 88 | def self.get_delayed_name(msg, class_name) 89 | begin 90 | # fallback to class_name since we're relying on the internal implementation 91 | # of the delayed extensions 92 | # https://github.com/mperham/sidekiq/blob/master/lib/sidekiq/extensions/class_methods.rb 93 | target, method_name, _args = YAML.load(msg["args"].first) 94 | if target.class == Class 95 | "#{target.name}##{method_name}" 96 | else 97 | "#{target.class.name}##{method_name}" 98 | end 99 | rescue Psych::DisallowedClass, ArgumentError 100 | parsed = Psych.parse(msg["args"].first) 101 | children = parsed.root.children 102 | target = (children[0].value || children[0].tag).sub("!", "") 103 | method_name = (children[1].value || children[1].tag).sub(":", "") 104 | 105 | if target && method_name 106 | "#{target}##{method_name}" 107 | else 108 | class_name 109 | end 110 | end 111 | rescue StandardError 112 | class_name 113 | end 114 | end 115 | end 116 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/sidekiq_process.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Instrumentation 4 | class SidekiqProcess < PeriodicStats 5 | def self.start(client: nil, frequency: 30) 6 | client ||= PrometheusExporter::Client.default 7 | sidekiq_process_collector = new 8 | 9 | worker_loop { client.send_json(sidekiq_process_collector.collect) } 10 | 11 | super 12 | end 13 | 14 | def initialize 15 | @pid = ::Process.pid 16 | @hostname = Socket.gethostname 17 | end 18 | 19 | def collect 20 | { type: "sidekiq_process", process: collect_stats } 21 | end 22 | 23 | def collect_stats 24 | process = current_process 25 | return {} unless process 26 | 27 | { 28 | busy: process["busy"], 29 | concurrency: process["concurrency"], 30 | labels: { 31 | labels: process["labels"].sort.join(","), 32 | queues: process["queues"].sort.join(","), 33 | quiet: process["quiet"], 34 | tag: process["tag"], 35 | hostname: process["hostname"], 36 | identity: process["identity"], 37 | }, 38 | } 39 | end 40 | 41 | def current_process 42 | ::Sidekiq::ProcessSet.new.find { |sp| sp["hostname"] == @hostname && sp["pid"] == @pid } 43 | end 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/sidekiq_queue.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Instrumentation 4 | class SidekiqQueue < PeriodicStats 5 | def self.start(client: nil, frequency: 30, all_queues: false) 6 | client ||= PrometheusExporter::Client.default 7 | sidekiq_queue_collector = new(all_queues: all_queues) 8 | 9 | worker_loop { client.send_json(sidekiq_queue_collector.collect) } 10 | 11 | super 12 | end 13 | 14 | def initialize(all_queues: false) 15 | @all_queues = all_queues 16 | @pid = ::Process.pid 17 | @hostname = Socket.gethostname 18 | end 19 | 20 | def collect 21 | { type: "sidekiq_queue", queues: collect_queue_stats } 22 | end 23 | 24 | def collect_queue_stats 25 | sidekiq_queues = ::Sidekiq::Queue.all 26 | 27 | unless @all_queues 28 | queues = collect_current_process_queues 29 | sidekiq_queues.select! { |sidekiq_queue| queues.include?(sidekiq_queue.name) } 30 | end 31 | 32 | sidekiq_queues 33 | .map do |queue| 34 | { 35 | backlog: queue.size, 36 | latency_seconds: queue.latency.to_i, 37 | labels: { 38 | queue: queue.name, 39 | }, 40 | } 41 | end 42 | .compact 43 | end 44 | 45 | private 46 | 47 | def collect_current_process_queues 48 | ps = ::Sidekiq::ProcessSet.new 49 | 50 | process = ps.find { |sp| sp["hostname"] == @hostname && sp["pid"] == @pid } 51 | 52 | process.nil? ? [] : process["queues"] 53 | end 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/sidekiq_stats.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Instrumentation 4 | class SidekiqStats < PeriodicStats 5 | def self.start(client: nil, frequency: 30) 6 | client ||= PrometheusExporter::Client.default 7 | sidekiq_stats_collector = new 8 | 9 | worker_loop { client.send_json(sidekiq_stats_collector.collect) } 10 | 11 | super 12 | end 13 | 14 | def collect 15 | { type: "sidekiq_stats", stats: collect_stats } 16 | end 17 | 18 | def collect_stats 19 | stats = ::Sidekiq::Stats.new 20 | { 21 | "dead_size" => stats.dead_size, 22 | "enqueued" => stats.enqueued, 23 | "failed" => stats.failed, 24 | "processed" => stats.processed, 25 | "processes_size" => stats.processes_size, 26 | "retry_size" => stats.retry_size, 27 | "scheduled_size" => stats.scheduled_size, 28 | "workers_size" => stats.workers_size, 29 | } 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/instrumentation/unicorn.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | begin 4 | require "raindrops" 5 | rescue LoadError 6 | # No raindrops available, dont do anything 7 | end 8 | 9 | module PrometheusExporter::Instrumentation 10 | # collects stats from unicorn 11 | class Unicorn < PeriodicStats 12 | def self.start(pid_file:, listener_address:, client: nil, frequency: 30) 13 | unicorn_collector = new(pid_file: pid_file, listener_address: listener_address) 14 | client ||= PrometheusExporter::Client.default 15 | 16 | worker_loop do 17 | metric = unicorn_collector.collect 18 | client.send_json metric 19 | end 20 | 21 | super 22 | end 23 | 24 | def initialize(pid_file:, listener_address:) 25 | @pid_file = pid_file 26 | @listener_address = listener_address 27 | @tcp = listener_address =~ /\A.+:\d+\z/ 28 | end 29 | 30 | def collect 31 | metric = {} 32 | metric[:type] = "unicorn" 33 | collect_unicorn_stats(metric) 34 | metric 35 | end 36 | 37 | def collect_unicorn_stats(metric) 38 | stats = listener_address_stats 39 | 40 | metric[:active_workers] = stats.active 41 | metric[:request_backlog] = stats.queued 42 | metric[:workers] = worker_process_count 43 | end 44 | 45 | private 46 | 47 | def worker_process_count 48 | return nil unless File.exist?(@pid_file) 49 | pid = File.read(@pid_file).to_i 50 | 51 | return nil if pid < 1 52 | 53 | # find all processes whose parent is the unicorn master 54 | # but we're actually only interested in the number of processes (= lines of output) 55 | result = `pgrep -P #{pid} -f unicorn -a` 56 | result.lines.count 57 | end 58 | 59 | def listener_address_stats 60 | if @tcp 61 | Raindrops::Linux.tcp_listener_stats([@listener_address])[@listener_address] 62 | else 63 | Raindrops::Linux.unix_listener_stats([@listener_address])[@listener_address] 64 | end 65 | end 66 | end 67 | end 68 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/metric.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "metric/base" 4 | require_relative "metric/counter" 5 | require_relative "metric/gauge" 6 | require_relative "metric/histogram" 7 | require_relative "metric/summary" 8 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/metric/base.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Metric 4 | class Base 5 | @default_prefix = nil if !defined?(@default_prefix) 6 | @default_labels = nil if !defined?(@default_labels) 7 | @default_aggregation = nil if !defined?(@default_aggregation) 8 | 9 | # prefix applied to all metrics 10 | def self.default_prefix=(name) 11 | @default_prefix = name 12 | end 13 | 14 | def self.default_prefix 15 | @default_prefix.to_s 16 | end 17 | 18 | def self.default_labels=(labels) 19 | @default_labels = labels 20 | end 21 | 22 | def self.default_labels 23 | @default_labels || {} 24 | end 25 | 26 | def self.default_aggregation=(aggregation) 27 | @default_aggregation = aggregation 28 | end 29 | 30 | def self.default_aggregation 31 | @default_aggregation ||= Summary 32 | end 33 | 34 | attr_accessor :help, :name, :data 35 | 36 | def initialize(name, help) 37 | @name = name 38 | @help = help 39 | end 40 | 41 | def type 42 | raise "Not implemented" 43 | end 44 | 45 | def metric_text 46 | raise "Not implemented" 47 | end 48 | 49 | def reset! 50 | raise "Not implemented" 51 | end 52 | 53 | def to_h 54 | raise "Not implemented" 55 | end 56 | 57 | def from_json(json) 58 | json = JSON.parse(json) if String === json 59 | @name = json["name"] 60 | @help = json["help"] 61 | @data = json["data"] 62 | if Hash === json["data"] 63 | @data = {} 64 | json["data"].each do |k, v| 65 | k = JSON.parse(k) 66 | k = Hash[k.map { |k1, v1| [k1.to_sym, v1] }] 67 | @data[k] = v 68 | end 69 | end 70 | end 71 | 72 | def prefix(name) 73 | Base.default_prefix + name 74 | end 75 | 76 | def labels_text(labels) 77 | labels = Base.default_labels.merge(labels || {}) 78 | if labels && labels.length > 0 79 | s = 80 | labels 81 | .map do |key, value| 82 | value = value.to_s 83 | value = escape_value(value) if needs_escape?(value) 84 | "#{key}=\"#{value}\"" 85 | end 86 | .join(",") 87 | "{#{s}}" 88 | end 89 | end 90 | 91 | def to_prometheus_text 92 | <<~TEXT 93 | # HELP #{prefix(name)} #{help} 94 | # TYPE #{prefix(name)} #{type} 95 | #{metric_text} 96 | TEXT 97 | end 98 | 99 | private 100 | 101 | def escape_value(str) 102 | str.gsub(/[\n"\\]/m) do |m| 103 | if m == "\n" 104 | "\\n" 105 | else 106 | "\\#{m}" 107 | end 108 | end 109 | end 110 | 111 | def needs_escape?(str) 112 | str.match?(/[\n"\\]/m) 113 | end 114 | end 115 | end 116 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/metric/counter.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Metric 4 | class Counter < Base 5 | attr_reader :data 6 | 7 | def initialize(name, help) 8 | super 9 | reset! 10 | end 11 | 12 | def type 13 | "counter" 14 | end 15 | 16 | def reset! 17 | @data = {} 18 | end 19 | 20 | def metric_text 21 | @data.map { |labels, value| "#{prefix(@name)}#{labels_text(labels)} #{value}" }.join("\n") 22 | end 23 | 24 | def to_h 25 | @data.dup 26 | end 27 | 28 | def remove(labels) 29 | @data.delete(labels) 30 | end 31 | 32 | def observe(increment = 1, labels = {}) 33 | @data[labels] ||= 0 34 | @data[labels] += increment 35 | end 36 | 37 | def increment(labels = {}, value = 1) 38 | @data[labels] ||= 0 39 | @data[labels] += value 40 | end 41 | 42 | def decrement(labels = {}, value = 1) 43 | @data[labels] ||= 0 44 | @data[labels] -= value 45 | end 46 | 47 | def reset(labels = {}, value = 0) 48 | @data[labels] = value 49 | end 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/metric/gauge.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Metric 4 | class Gauge < Base 5 | attr_reader :data 6 | 7 | def initialize(name, help) 8 | if name.end_with?("_total") 9 | raise ArgumentError, "The metric name of gauge must not have _total suffix. Given: #{name}" 10 | end 11 | 12 | super 13 | reset! 14 | end 15 | 16 | def type 17 | "gauge" 18 | end 19 | 20 | def metric_text 21 | @data.map { |labels, value| "#{prefix(@name)}#{labels_text(labels)} #{value}" }.join("\n") 22 | end 23 | 24 | def reset! 25 | @data = {} 26 | end 27 | 28 | def to_h 29 | @data.dup 30 | end 31 | 32 | def remove(labels) 33 | @data.delete(labels) 34 | end 35 | 36 | def observe(value, labels = {}) 37 | if value.nil? 38 | data.delete(labels) 39 | else 40 | raise ArgumentError, "value must be a number" if !(Numeric === value) 41 | @data[labels] = value 42 | end 43 | end 44 | 45 | alias_method :set, :observe 46 | 47 | def increment(labels = {}, value = 1) 48 | @data[labels] ||= 0 49 | @data[labels] += value 50 | end 51 | 52 | def decrement(labels = {}, value = 1) 53 | @data[labels] ||= 0 54 | @data[labels] -= value 55 | end 56 | end 57 | end 58 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/metric/histogram.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Metric 4 | class Histogram < Base 5 | DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5.0, 10.0].freeze 6 | 7 | @default_buckets = nil if !defined?(@default_buckets) 8 | 9 | def self.default_buckets 10 | @default_buckets || DEFAULT_BUCKETS 11 | end 12 | 13 | def self.default_buckets=(buckets) 14 | @default_buckets = buckets 15 | end 16 | 17 | attr_reader :buckets 18 | 19 | def initialize(name, help, opts = {}) 20 | super(name, help) 21 | @buckets = (opts[:buckets] || self.class.default_buckets).sort 22 | reset! 23 | end 24 | 25 | def reset! 26 | @sums = {} 27 | @counts = {} 28 | @observations = {} 29 | end 30 | 31 | def to_h 32 | data = {} 33 | @observations.each do |labels, buckets| 34 | count = @counts[labels] 35 | sum = @sums[labels] 36 | data[labels] = { "count" => count, "sum" => sum } 37 | end 38 | data 39 | end 40 | 41 | def remove(labels) 42 | @observations.delete(labels) 43 | @counts.delete(labels) 44 | @sums.delete(labels) 45 | end 46 | 47 | def type 48 | "histogram" 49 | end 50 | 51 | def metric_text 52 | text = +"" 53 | first = true 54 | @observations.each do |labels, buckets| 55 | text << "\n" unless first 56 | first = false 57 | count = @counts[labels] 58 | sum = @sums[labels] 59 | @buckets.each do |bucket| 60 | value = @observations[labels][bucket] 61 | text << "#{prefix(@name)}_bucket#{labels_text(with_bucket(labels, bucket.to_s))} #{value}\n" 62 | end 63 | text << "#{prefix(@name)}_bucket#{labels_text(with_bucket(labels, "+Inf"))} #{count}\n" 64 | text << "#{prefix(@name)}_count#{labels_text(labels)} #{count}\n" 65 | text << "#{prefix(@name)}_sum#{labels_text(labels)} #{sum}" 66 | end 67 | text 68 | end 69 | 70 | def observe(value, labels = nil) 71 | labels ||= {} 72 | buckets = ensure_histogram(labels) 73 | 74 | value = value.to_f 75 | @sums[labels] += value 76 | @counts[labels] += 1 77 | 78 | fill_buckets(value, buckets) 79 | end 80 | 81 | def ensure_histogram(labels) 82 | @sums[labels] ||= 0.0 83 | @counts[labels] ||= 0 84 | buckets = @observations[labels] 85 | if buckets.nil? 86 | buckets = @buckets.map { |b| [b, 0] }.to_h 87 | @observations[labels] = buckets 88 | end 89 | buckets 90 | end 91 | 92 | def fill_buckets(value, buckets) 93 | @buckets.reverse_each do |b| 94 | break if value > b 95 | buckets[b] += 1 96 | end 97 | end 98 | 99 | def with_bucket(labels, bucket) 100 | labels.merge("le" => bucket) 101 | end 102 | end 103 | end 104 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/metric/summary.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Metric 4 | class Summary < Base 5 | DEFAULT_QUANTILES = [0.99, 0.9, 0.5, 0.1, 0.01] 6 | ROTATE_AGE = 120 7 | 8 | attr_reader :estimators, :count, :total 9 | 10 | def initialize(name, help, opts = {}) 11 | super(name, help) 12 | reset! 13 | @quantiles = opts[:quantiles] || DEFAULT_QUANTILES 14 | end 15 | 16 | def reset! 17 | @buffers = [{}, {}] 18 | @last_rotated = Process.clock_gettime(Process::CLOCK_MONOTONIC) 19 | @current_buffer = 0 20 | @counts = {} 21 | @sums = {} 22 | end 23 | 24 | def to_h 25 | data = {} 26 | calculate_all_quantiles.each do |labels, quantiles| 27 | count = @counts[labels] 28 | sum = @sums[labels] 29 | data[labels] = { "count" => count, "sum" => sum } 30 | end 31 | data 32 | end 33 | 34 | def remove(labels) 35 | @counts.delete(labels) 36 | @sums.delete(labels) 37 | @buffers[0].delete(labels) 38 | @buffers[1].delete(labels) 39 | end 40 | 41 | def type 42 | "summary" 43 | end 44 | 45 | def calculate_quantiles(raw_data) 46 | sorted = raw_data.sort 47 | length = sorted.length 48 | result = {} 49 | 50 | if length > 0 51 | @quantiles.each { |quantile| result[quantile] = sorted[(length * quantile).ceil - 1] } 52 | end 53 | 54 | result 55 | end 56 | 57 | def calculate_all_quantiles 58 | buffer = @buffers[@current_buffer] 59 | 60 | result = {} 61 | buffer.each { |labels, raw_data| result[labels] = calculate_quantiles(raw_data) } 62 | 63 | result 64 | end 65 | 66 | def metric_text 67 | text = +"" 68 | first = true 69 | calculate_all_quantiles.each do |labels, quantiles| 70 | text << "\n" unless first 71 | first = false 72 | quantiles.each do |quantile, value| 73 | with_quantile = labels.merge(quantile: quantile) 74 | text << "#{prefix(@name)}#{labels_text(with_quantile)} #{value.to_f}\n" 75 | end 76 | text << "#{prefix(@name)}_sum#{labels_text(labels)} #{@sums[labels]}\n" 77 | text << "#{prefix(@name)}_count#{labels_text(labels)} #{@counts[labels]}" 78 | end 79 | text 80 | end 81 | 82 | # makes sure we have storage 83 | def ensure_summary(labels) 84 | @buffers[0][labels] ||= [] 85 | @buffers[1][labels] ||= [] 86 | @sums[labels] ||= 0.0 87 | @counts[labels] ||= 0 88 | nil 89 | end 90 | 91 | def rotate_if_needed 92 | if (now = Process.clock_gettime(Process::CLOCK_MONOTONIC)) > (@last_rotated + ROTATE_AGE) 93 | @last_rotated = now 94 | @buffers[@current_buffer].each { |labels, raw| raw.clear } 95 | @current_buffer = @current_buffer == 0 ? 1 : 0 96 | end 97 | nil 98 | end 99 | 100 | def observe(value, labels = nil) 101 | labels ||= {} 102 | ensure_summary(labels) 103 | rotate_if_needed 104 | 105 | value = value.to_f 106 | @buffers[0][labels] << value 107 | @buffers[1][labels] << value 108 | @sums[labels] += value 109 | @counts[labels] += 1 110 | end 111 | end 112 | end 113 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/middleware.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "prometheus_exporter/instrumentation/method_profiler" 4 | require "prometheus_exporter/client" 5 | 6 | class PrometheusExporter::Middleware 7 | MethodProfiler = PrometheusExporter::Instrumentation::MethodProfiler 8 | 9 | def initialize(app, config = { instrument: :alias_method, client: nil }) 10 | @app = app 11 | @client = config[:client] || PrometheusExporter::Client.default 12 | 13 | if config[:instrument] 14 | apply_redis_client_middleware! if defined?(RedisClient) 15 | 16 | if defined?(Redis::VERSION) && (Gem::Version.new(Redis::VERSION) >= Gem::Version.new("5.0.0")) 17 | # redis 5 support handled via RedisClient 18 | elsif defined?(Redis::Client) 19 | MethodProfiler.patch( 20 | Redis::Client, 21 | %i[call call_pipeline], 22 | :redis, 23 | instrument: config[:instrument], 24 | ) 25 | end 26 | 27 | if defined?(PG::Connection) 28 | MethodProfiler.patch( 29 | PG::Connection, 30 | %i[exec async_exec exec_prepared exec_params send_query_prepared query], 31 | :sql, 32 | instrument: config[:instrument], 33 | ) 34 | end 35 | 36 | if defined?(Mysql2::Client) 37 | MethodProfiler.patch(Mysql2::Client, [:query], :sql, instrument: config[:instrument]) 38 | MethodProfiler.patch(Mysql2::Statement, [:execute], :sql, instrument: config[:instrument]) 39 | MethodProfiler.patch(Mysql2::Result, [:each], :sql, instrument: config[:instrument]) 40 | end 41 | 42 | if defined?(Dalli::Client) 43 | MethodProfiler.patch( 44 | Dalli::Client, 45 | %i[delete fetch get add set], 46 | :memcache, 47 | instrument: config[:instrument], 48 | ) 49 | end 50 | end 51 | end 52 | 53 | def call(env) 54 | queue_time = measure_queue_time(env) 55 | 56 | MethodProfiler.start 57 | result = @app.call(env) 58 | info = MethodProfiler.stop 59 | 60 | result 61 | ensure 62 | status = (result && result[0]) || -1 63 | obj = { 64 | type: "web", 65 | timings: info, 66 | queue_time: queue_time, 67 | status: status, 68 | default_labels: default_labels(env, result), 69 | } 70 | labels = custom_labels(env) 71 | obj = obj.merge(custom_labels: labels) if labels 72 | 73 | @client.send_json(obj) 74 | end 75 | 76 | def default_labels(env, result) 77 | params = env["action_dispatch.request.parameters"] 78 | action = controller = nil 79 | if params 80 | action = params["action"] 81 | controller = params["controller"] 82 | elsif (cors = env["rack.cors"]) && cors.respond_to?(:preflight?) && cors.preflight? 83 | # if the Rack CORS Middleware identifies the request as a preflight request, 84 | # the stack doesn't get to the point where controllers/actions are defined 85 | action = "preflight" 86 | controller = "preflight" 87 | end 88 | 89 | { action: action || "other", controller: controller || "other" } 90 | end 91 | 92 | # allows subclasses to add custom labels based on env 93 | def custom_labels(env) 94 | nil 95 | end 96 | 97 | private 98 | 99 | # measures the queue time (= time between receiving the request in downstream 100 | # load balancer and starting request in ruby process) 101 | def measure_queue_time(env) 102 | start_time = queue_start(env) 103 | 104 | return unless start_time 105 | 106 | queue_time = request_start.to_f - start_time.to_f 107 | queue_time unless queue_time.negative? 108 | end 109 | 110 | # need to use CLOCK_REALTIME, as nginx/apache write this also out as the unix timestamp 111 | def request_start 112 | Process.clock_gettime(Process::CLOCK_REALTIME) 113 | end 114 | 115 | # determine queue start from well-known trace headers 116 | def queue_start(env) 117 | # get the content of the x-queue-start or x-request-start header 118 | value = env["HTTP_X_REQUEST_START"] || env["HTTP_X_QUEUE_START"] 119 | unless value.nil? || value == "" 120 | # nginx returns time as milliseconds with 3 decimal places 121 | # apache returns time as microseconds without decimal places 122 | # this method takes care to convert both into a proper second + fractions timestamp 123 | value = value.to_s.gsub(/t=|\./, "") 124 | return "#{value[0, 10]}.#{value[10, 13]}".to_f 125 | end 126 | 127 | # get the content of the x-amzn-trace-id header 128 | # see also: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-request-tracing.html 129 | value = env["HTTP_X_AMZN_TRACE_ID"] 130 | value&.split("Root=")&.last&.split("-")&.fetch(1)&.to_i(16) 131 | end 132 | 133 | private 134 | 135 | module RedisInstrumenter 136 | MethodProfiler.define_methods_on_module(self, %w[call call_pipelined], "redis") 137 | end 138 | 139 | def apply_redis_client_middleware! 140 | RedisClient.register(RedisInstrumenter) 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "metric" 4 | require_relative "server/type_collector" 5 | require_relative "server/web_collector" 6 | require_relative "server/process_collector" 7 | require_relative "server/sidekiq_collector" 8 | require_relative "server/sidekiq_queue_collector" 9 | require_relative "server/sidekiq_process_collector" 10 | require_relative "server/sidekiq_stats_collector" 11 | require_relative "server/delayed_job_collector" 12 | require_relative "server/collector_base" 13 | require_relative "server/collector" 14 | require_relative "server/web_server" 15 | require_relative "server/runner" 16 | require_relative "server/puma_collector" 17 | require_relative "server/hutch_collector" 18 | require_relative "server/unicorn_collector" 19 | require_relative "server/active_record_collector" 20 | require_relative "server/shoryuken_collector" 21 | require_relative "server/resque_collector" 22 | require_relative "server/good_job_collector" 23 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/active_record_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class ActiveRecordCollector < TypeCollector 5 | MAX_METRIC_AGE = 60 6 | 7 | ACTIVE_RECORD_GAUGES = { 8 | connections: "Total connections in pool", 9 | busy: "Connections in use in pool", 10 | dead: "Dead connections in pool", 11 | idle: "Idle connections in pool", 12 | waiting: "Connection requests waiting", 13 | size: "Maximum allowed connection pool size", 14 | } 15 | 16 | def initialize 17 | @active_record_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE) 18 | @active_record_metrics.filter = ->(new_metric, old_metric) do 19 | new_metric["pid"] == old_metric["pid"] && 20 | new_metric["hostname"] == old_metric["hostname"] && 21 | new_metric["metric_labels"]["pool_name"] == old_metric["metric_labels"]["pool_name"] 22 | end 23 | end 24 | 25 | def type 26 | "active_record" 27 | end 28 | 29 | def metrics 30 | return [] if @active_record_metrics.length == 0 31 | 32 | metrics = {} 33 | 34 | @active_record_metrics.map do |m| 35 | metric_key = 36 | (m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"]) 37 | metric_key.merge!(m["custom_labels"]) if m["custom_labels"] 38 | 39 | ACTIVE_RECORD_GAUGES.map do |k, help| 40 | k = k.to_s 41 | if v = m[k] 42 | g = 43 | metrics[k] ||= PrometheusExporter::Metric::Gauge.new( 44 | "active_record_connection_pool_#{k}", 45 | help, 46 | ) 47 | g.observe(v, metric_key) 48 | end 49 | end 50 | end 51 | 52 | metrics.values 53 | end 54 | 55 | def collect(obj) 56 | @active_record_metrics << obj 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class Collector < CollectorBase 5 | def initialize(json_serializer: nil) 6 | @process_metrics = [] 7 | @metrics = {} 8 | @mutex = Mutex.new 9 | @collectors = {} 10 | @json_serializer = PrometheusExporter.detect_json_serializer(json_serializer) 11 | register_collector(WebCollector.new) 12 | register_collector(ProcessCollector.new) 13 | register_collector(SidekiqCollector.new) 14 | register_collector(SidekiqQueueCollector.new) 15 | register_collector(SidekiqProcessCollector.new) 16 | register_collector(SidekiqStatsCollector.new) 17 | register_collector(DelayedJobCollector.new) 18 | register_collector(PumaCollector.new) 19 | register_collector(HutchCollector.new) 20 | register_collector(UnicornCollector.new) 21 | register_collector(ActiveRecordCollector.new) 22 | register_collector(ShoryukenCollector.new) 23 | register_collector(ResqueCollector.new) 24 | register_collector(GoodJobCollector.new) 25 | end 26 | 27 | def register_collector(collector) 28 | @collectors[collector.type] = collector 29 | end 30 | 31 | def process(str) 32 | process_hash(@json_serializer.parse(str)) 33 | end 34 | 35 | def process_hash(obj) 36 | @mutex.synchronize do 37 | if collector = @collectors[obj["type"]] 38 | collector.collect(obj) 39 | else 40 | metric = @metrics[obj["name"]] 41 | metric = register_metric_unsafe(obj) if !metric 42 | 43 | keys = obj["keys"] || {} 44 | keys = obj["custom_labels"].merge(keys) if obj["custom_labels"] 45 | 46 | case obj["prometheus_exporter_action"] 47 | when "increment" 48 | metric.increment(keys, obj["value"]) 49 | when "decrement" 50 | metric.decrement(keys, obj["value"]) 51 | else 52 | metric.observe(obj["value"], keys) 53 | end 54 | end 55 | end 56 | end 57 | 58 | def prometheus_metrics_text 59 | @mutex.synchronize do 60 | (@metrics.values + @collectors.values.map(&:metrics).flatten).map( 61 | &:to_prometheus_text 62 | ).join("\n") 63 | end 64 | end 65 | 66 | def register_metric(metric) 67 | @mutex.synchronize { @metrics[metric.name] = metric } 68 | end 69 | 70 | protected 71 | 72 | def register_metric_unsafe(obj) 73 | name = obj["name"] 74 | help = obj["help"] 75 | opts = symbolize_keys(obj["opts"] || {}) 76 | 77 | metric = 78 | case obj["type"] 79 | when "gauge" 80 | PrometheusExporter::Metric::Gauge.new(name, help) 81 | when "counter" 82 | PrometheusExporter::Metric::Counter.new(name, help) 83 | when "summary" 84 | PrometheusExporter::Metric::Summary.new(name, help, opts) 85 | when "histogram" 86 | PrometheusExporter::Metric::Histogram.new(name, help, opts) 87 | end 88 | 89 | if metric 90 | @metrics[name] = metric 91 | else 92 | STDERR.puts "failed to register metric #{obj}" 93 | end 94 | end 95 | 96 | def symbolize_keys(hash) 97 | hash.inject({}) do |memo, k| 98 | memo[k.first.to_sym] = k.last 99 | memo 100 | end 101 | end 102 | end 103 | end 104 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/collector_base.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | # minimal interface to implement a customer collector 5 | class CollectorBase 6 | # called each time a string is delivered from the web 7 | def process(str) 8 | end 9 | 10 | # a string denoting the metrics 11 | def prometheus_metrics_text(str) 12 | end 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/delayed_job_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class DelayedJobCollector < TypeCollector 5 | def initialize 6 | @delayed_jobs_total = nil 7 | @delayed_job_duration_seconds = nil 8 | @delayed_job_latency_seconds_total = nil 9 | @delayed_jobs_total = nil 10 | @delayed_failed_jobs_total = nil 11 | @delayed_jobs_max_attempts_reached_total = nil 12 | @delayed_job_duration_seconds_summary = nil 13 | @delayed_job_attempts_summary = nil 14 | @delayed_jobs_enqueued = nil 15 | @delayed_jobs_pending = nil 16 | end 17 | 18 | def type 19 | "delayed_job" 20 | end 21 | 22 | def collect(obj) 23 | custom_labels = obj["custom_labels"] || {} 24 | gauge_labels = { queue_name: obj["queue_name"] }.merge(custom_labels) 25 | counter_labels = gauge_labels.merge(job_name: obj["name"]) 26 | 27 | ensure_delayed_job_metrics 28 | @delayed_job_duration_seconds.observe(obj["duration"], counter_labels) 29 | @delayed_job_latency_seconds_total.observe(obj["latency"], counter_labels) 30 | @delayed_jobs_total.observe(1, counter_labels) 31 | @delayed_failed_jobs_total.observe(1, counter_labels) if !obj["success"] 32 | if obj["attempts"] >= obj["max_attempts"] 33 | @delayed_jobs_max_attempts_reached_total.observe(1, counter_labels) 34 | end 35 | @delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels) 36 | if obj["success"] 37 | @delayed_job_duration_seconds_summary.observe( 38 | obj["duration"], 39 | counter_labels.merge(status: "success"), 40 | ) 41 | end 42 | if !obj["success"] 43 | @delayed_job_duration_seconds_summary.observe( 44 | obj["duration"], 45 | counter_labels.merge(status: "failed"), 46 | ) 47 | end 48 | @delayed_job_attempts_summary.observe(obj["attempts"], counter_labels) if obj["success"] 49 | @delayed_jobs_enqueued.observe(obj["enqueued"], gauge_labels) 50 | @delayed_jobs_pending.observe(obj["pending"], gauge_labels) 51 | end 52 | 53 | def metrics 54 | if @delayed_jobs_total 55 | [ 56 | @delayed_job_duration_seconds, 57 | @delayed_job_latency_seconds_total, 58 | @delayed_jobs_total, 59 | @delayed_failed_jobs_total, 60 | @delayed_jobs_max_attempts_reached_total, 61 | @delayed_job_duration_seconds_summary, 62 | @delayed_job_attempts_summary, 63 | @delayed_jobs_enqueued, 64 | @delayed_jobs_pending, 65 | ] 66 | else 67 | [] 68 | end 69 | end 70 | 71 | protected 72 | 73 | def ensure_delayed_job_metrics 74 | if !@delayed_jobs_total 75 | @delayed_job_duration_seconds = 76 | PrometheusExporter::Metric::Counter.new( 77 | "delayed_job_duration_seconds", 78 | "Total time spent in delayed jobs.", 79 | ) 80 | 81 | @delayed_job_latency_seconds_total = 82 | PrometheusExporter::Metric::Counter.new( 83 | "delayed_job_latency_seconds_total", 84 | "Total delayed jobs latency.", 85 | ) 86 | 87 | @delayed_jobs_total = 88 | PrometheusExporter::Metric::Counter.new( 89 | "delayed_jobs_total", 90 | "Total number of delayed jobs executed.", 91 | ) 92 | 93 | @delayed_jobs_enqueued = 94 | PrometheusExporter::Metric::Gauge.new( 95 | "delayed_jobs_enqueued", 96 | "Number of enqueued delayed jobs.", 97 | ) 98 | 99 | @delayed_jobs_pending = 100 | PrometheusExporter::Metric::Gauge.new( 101 | "delayed_jobs_pending", 102 | "Number of pending delayed jobs.", 103 | ) 104 | 105 | @delayed_failed_jobs_total = 106 | PrometheusExporter::Metric::Counter.new( 107 | "delayed_failed_jobs_total", 108 | "Total number failed delayed jobs executed.", 109 | ) 110 | 111 | @delayed_jobs_max_attempts_reached_total = 112 | PrometheusExporter::Metric::Counter.new( 113 | "delayed_jobs_max_attempts_reached_total", 114 | "Total number of delayed jobs that reached max attempts.", 115 | ) 116 | 117 | @delayed_job_duration_seconds_summary = 118 | PrometheusExporter::Metric::Base.default_aggregation.new( 119 | "delayed_job_duration_seconds_summary", 120 | "Summary of the time it takes jobs to execute.", 121 | ) 122 | 123 | @delayed_job_attempts_summary = 124 | PrometheusExporter::Metric::Base.default_aggregation.new( 125 | "delayed_job_attempts_summary", 126 | "Summary of the amount of attempts it takes delayed jobs to succeed.", 127 | ) 128 | end 129 | end 130 | end 131 | end 132 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/good_job_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class GoodJobCollector < TypeCollector 5 | MAX_METRIC_AGE = 30 6 | GOOD_JOB_GAUGES = { 7 | scheduled: "Total number of scheduled GoodJob jobs.", 8 | retried: "Total number of retried GoodJob jobs.", 9 | queued: "Total number of queued GoodJob jobs.", 10 | running: "Total number of running GoodJob jobs.", 11 | finished: "Total number of finished GoodJob jobs.", 12 | succeeded: "Total number of succeeded GoodJob jobs.", 13 | discarded: "Total number of discarded GoodJob jobs.", 14 | } 15 | 16 | def initialize 17 | @good_job_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE) 18 | @gauges = {} 19 | end 20 | 21 | def type 22 | "good_job" 23 | end 24 | 25 | def metrics 26 | return [] if good_job_metrics.length == 0 27 | 28 | good_job_metrics.map do |metric| 29 | labels = metric.fetch("custom_labels", {}) 30 | 31 | GOOD_JOB_GAUGES.map do |name, help| 32 | value = metric[name.to_s] 33 | 34 | if value 35 | gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("good_job_#{name}", help) 36 | gauge.observe(value, labels) 37 | end 38 | end 39 | end 40 | 41 | gauges.values 42 | end 43 | 44 | def collect(object) 45 | @good_job_metrics << object 46 | end 47 | 48 | private 49 | 50 | attr_reader :good_job_metrics, :gauges 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/hutch_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class HutchCollector < TypeCollector 5 | def initialize 6 | @hutch_jobs_total = nil 7 | @hutch_job_duration_seconds = nil 8 | @hutch_jobs_total = nil 9 | @hutch_failed_jobs_total = nil 10 | end 11 | 12 | def type 13 | "hutch" 14 | end 15 | 16 | def collect(obj) 17 | default_labels = { job_name: obj["name"] } 18 | custom_labels = obj["custom_labels"] 19 | labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels) 20 | 21 | ensure_hutch_metrics 22 | @hutch_job_duration_seconds.observe(obj["duration"], labels) 23 | @hutch_jobs_total.observe(1, labels) 24 | @hutch_failed_jobs_total.observe(1, labels) if !obj["success"] 25 | end 26 | 27 | def metrics 28 | if @hutch_jobs_total 29 | [@hutch_job_duration_seconds, @hutch_jobs_total, @hutch_failed_jobs_total] 30 | else 31 | [] 32 | end 33 | end 34 | 35 | protected 36 | 37 | def ensure_hutch_metrics 38 | if !@hutch_jobs_total 39 | @hutch_job_duration_seconds = 40 | PrometheusExporter::Metric::Counter.new( 41 | "hutch_job_duration_seconds", 42 | "Total time spent in hutch jobs.", 43 | ) 44 | 45 | @hutch_jobs_total = 46 | PrometheusExporter::Metric::Counter.new( 47 | "hutch_jobs_total", 48 | "Total number of hutch jobs executed.", 49 | ) 50 | 51 | @hutch_failed_jobs_total = 52 | PrometheusExporter::Metric::Counter.new( 53 | "hutch_failed_jobs_total", 54 | "Total number failed hutch jobs executed.", 55 | ) 56 | end 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/metrics_container.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class MetricsContainer 5 | METRIC_MAX_AGE = 60 6 | METRIC_EXPIRE_ATTR = "_expire_at" 7 | 8 | attr_reader :data, :ttl 9 | attr_accessor :filter 10 | 11 | def initialize(ttl: METRIC_MAX_AGE, expire_attr: METRIC_EXPIRE_ATTR, filter: nil) 12 | @data = [] 13 | @ttl = ttl 14 | @expire_attr = expire_attr 15 | @filter = filter 16 | end 17 | 18 | def <<(obj) 19 | now = get_time 20 | obj[@expire_attr] = now + @ttl 21 | 22 | expire(time: now, new_metric: obj) 23 | 24 | @data << obj 25 | @data 26 | end 27 | 28 | def [](key) 29 | @data.tap { expire }[key] 30 | end 31 | 32 | def size(&blk) 33 | wrap_expire(:size, &blk) 34 | end 35 | alias_method :length, :size 36 | 37 | def map(&blk) 38 | wrap_expire(:map, &blk) 39 | end 40 | 41 | def each(&blk) 42 | wrap_expire(:each, &blk) 43 | end 44 | 45 | def expire(time: nil, new_metric: nil) 46 | time ||= get_time 47 | 48 | @data.delete_if do |metric| 49 | expired = metric[@expire_attr] < time 50 | expired ||= filter.call(new_metric, metric) if @filter && new_metric 51 | expired 52 | end 53 | end 54 | 55 | private 56 | 57 | def get_time 58 | ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) 59 | end 60 | 61 | def wrap_expire(method_name, &blk) 62 | expire 63 | @data.public_send(method_name, &blk) 64 | end 65 | end 66 | end 67 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/process_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class ProcessCollector < TypeCollector 5 | MAX_METRIC_AGE = 60 6 | 7 | PROCESS_GAUGES = { 8 | heap_free_slots: "Free ruby heap slots.", 9 | heap_live_slots: "Used ruby heap slots.", 10 | v8_heap_size: "Total JavaScript V8 heap size (bytes).", 11 | v8_used_heap_size: "Total used JavaScript V8 heap size (bytes).", 12 | v8_physical_size: "Physical size consumed by V8 heaps.", 13 | v8_heap_count: "Number of V8 contexts running.", 14 | rss: "Total RSS used by process.", 15 | malloc_increase_bytes_limit: 16 | "Limit before Ruby triggers a GC against current objects (bytes).", 17 | oldmalloc_increase_bytes_limit: 18 | "Limit before Ruby triggers a major GC against old objects (bytes).", 19 | } 20 | 21 | PROCESS_COUNTERS = { 22 | major_gc_ops_total: "Major GC operations by process.", 23 | minor_gc_ops_total: "Minor GC operations by process.", 24 | allocated_objects_total: "Total number of allocated objects by process.", 25 | } 26 | 27 | def initialize 28 | @process_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE) 29 | @process_metrics.filter = ->(new_metric, old_metric) do 30 | new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"] 31 | end 32 | end 33 | 34 | def type 35 | "process" 36 | end 37 | 38 | def metrics 39 | return [] if @process_metrics.length == 0 40 | 41 | metrics = {} 42 | 43 | @process_metrics.map do |m| 44 | metric_key = 45 | (m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"]) 46 | metric_key.merge!(m["custom_labels"]) if m["custom_labels"] 47 | 48 | PROCESS_GAUGES.map do |k, help| 49 | k = k.to_s 50 | if v = m[k] 51 | g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new(k, help) 52 | g.observe(v, metric_key) 53 | end 54 | end 55 | 56 | PROCESS_COUNTERS.map do |k, help| 57 | k = k.to_s 58 | if v = m[k] 59 | c = metrics[k] ||= PrometheusExporter::Metric::Counter.new(k, help) 60 | c.observe(v, metric_key) 61 | end 62 | end 63 | end 64 | 65 | metrics.values 66 | end 67 | 68 | def collect(obj) 69 | @process_metrics << obj 70 | end 71 | end 72 | end 73 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/puma_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class PumaCollector < TypeCollector 5 | MAX_PUMA_METRIC_AGE = 30 6 | PUMA_GAUGES = { 7 | workers: "Number of puma workers.", 8 | booted_workers: "Number of puma workers booted.", 9 | old_workers: "Number of old puma workers.", 10 | running_threads: "Number of puma threads currently running.", 11 | request_backlog: "Number of requests waiting to be processed by a puma thread.", 12 | thread_pool_capacity: "Number of puma threads available at current scale.", 13 | max_threads: "Number of puma threads at available at max scale.", 14 | } 15 | 16 | if defined?(::Puma::Const) && Gem::Version.new(::Puma::Const::VERSION) >= Gem::Version.new('6.6.0') 17 | PUMA_GAUGES[:busy_threads] = "Wholistic stat reflecting the overall current state of work to be done and the capacity to do it" 18 | end 19 | 20 | def initialize 21 | @puma_metrics = MetricsContainer.new(ttl: MAX_PUMA_METRIC_AGE) 22 | @puma_metrics.filter = ->(new_metric, old_metric) do 23 | new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"] 24 | end 25 | end 26 | 27 | def type 28 | "puma" 29 | end 30 | 31 | def metrics 32 | return [] if @puma_metrics.length == 0 33 | 34 | metrics = {} 35 | 36 | @puma_metrics.map do |m| 37 | labels = {} 38 | labels.merge!(phase: m["phase"]) if m["phase"] 39 | labels.merge!(m["custom_labels"]) if m["custom_labels"] 40 | labels.merge!(m["metric_labels"]) if m["metric_labels"] 41 | 42 | PUMA_GAUGES.map do |k, help| 43 | k = k.to_s 44 | if v = m[k] 45 | g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new("puma_#{k}", help) 46 | g.observe(v, labels) 47 | end 48 | end 49 | end 50 | 51 | metrics.values 52 | end 53 | 54 | def collect(obj) 55 | @puma_metrics << obj 56 | end 57 | end 58 | end 59 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/resque_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class ResqueCollector < TypeCollector 5 | MAX_METRIC_AGE = 30 6 | RESQUE_GAUGES = { 7 | processed_jobs: "Total number of processed Resque jobs.", 8 | failed_jobs: "Total number of failed Resque jobs.", 9 | pending_jobs: "Total number of pending Resque jobs.", 10 | queues: "Total number of Resque queues.", 11 | workers: "Total number of Resque workers running.", 12 | working: "Total number of Resque workers working.", 13 | } 14 | 15 | def initialize 16 | @resque_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE) 17 | @gauges = {} 18 | end 19 | 20 | def type 21 | "resque" 22 | end 23 | 24 | def metrics 25 | return [] if resque_metrics.length == 0 26 | 27 | resque_metrics.map do |metric| 28 | labels = metric.fetch("custom_labels", {}) 29 | 30 | RESQUE_GAUGES.map do |name, help| 31 | name = name.to_s 32 | if value = metric[name] 33 | gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("resque_#{name}", help) 34 | gauge.observe(value, labels) 35 | end 36 | end 37 | end 38 | 39 | gauges.values 40 | end 41 | 42 | def collect(object) 43 | @resque_metrics << object 44 | end 45 | 46 | private 47 | 48 | attr_reader :resque_metrics, :gauges 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/runner.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../client" 4 | 5 | module PrometheusExporter::Server 6 | class RunnerException < StandardError 7 | end 8 | 9 | class WrongInheritance < RunnerException 10 | end 11 | 12 | class Runner 13 | def initialize(options = {}) 14 | @timeout = nil 15 | @port = nil 16 | @bind = nil 17 | @collector_class = nil 18 | @type_collectors = nil 19 | @prefix = nil 20 | @auth = nil 21 | @realm = nil 22 | @histogram = nil 23 | 24 | options.each { |k, v| send("#{k}=", v) if self.class.method_defined?("#{k}=") } 25 | end 26 | 27 | def start 28 | PrometheusExporter::Metric::Base.default_prefix = prefix 29 | PrometheusExporter::Metric::Base.default_labels = label 30 | 31 | if histogram 32 | PrometheusExporter::Metric::Base.default_aggregation = PrometheusExporter::Metric::Histogram 33 | end 34 | 35 | register_type_collectors 36 | 37 | unless collector.is_a?(PrometheusExporter::Server::CollectorBase) 38 | raise WrongInheritance, 39 | "Collector class must be inherited from PrometheusExporter::Server::CollectorBase" 40 | end 41 | 42 | if unicorn_listen_address && unicorn_pid_file 43 | require_relative "../instrumentation" 44 | 45 | local_client = PrometheusExporter::LocalClient.new(collector: collector) 46 | PrometheusExporter::Instrumentation::Unicorn.start( 47 | pid_file: unicorn_pid_file, 48 | listener_address: unicorn_listen_address, 49 | client: local_client, 50 | ) 51 | end 52 | 53 | server = 54 | server_class.new( 55 | port: port, 56 | bind: bind, 57 | collector: collector, 58 | timeout: timeout, 59 | verbose: verbose, 60 | auth: auth, 61 | realm: realm, 62 | ) 63 | server.start 64 | end 65 | 66 | attr_accessor :unicorn_listen_address, :unicorn_pid_file 67 | attr_writer :prefix, 68 | :port, 69 | :bind, 70 | :collector_class, 71 | :type_collectors, 72 | :timeout, 73 | :verbose, 74 | :server_class, 75 | :label, 76 | :auth, 77 | :realm, 78 | :histogram 79 | 80 | def auth 81 | @auth || nil 82 | end 83 | 84 | def realm 85 | @realm || PrometheusExporter::DEFAULT_REALM 86 | end 87 | 88 | def prefix 89 | @prefix || PrometheusExporter::DEFAULT_PREFIX 90 | end 91 | 92 | def port 93 | @port || PrometheusExporter::DEFAULT_PORT 94 | end 95 | 96 | def bind 97 | @bind || PrometheusExporter::DEFAULT_BIND_ADDRESS 98 | end 99 | 100 | def collector_class 101 | @collector_class || PrometheusExporter::Server::Collector 102 | end 103 | 104 | def type_collectors 105 | @type_collectors || [] 106 | end 107 | 108 | def timeout 109 | @timeout || PrometheusExporter::DEFAULT_TIMEOUT 110 | end 111 | 112 | def verbose 113 | return @verbose if defined?(@verbose) 114 | false 115 | end 116 | 117 | def server_class 118 | @server_class || PrometheusExporter::Server::WebServer 119 | end 120 | 121 | def collector 122 | @_collector ||= collector_class.new 123 | end 124 | 125 | def label 126 | @label ||= PrometheusExporter::DEFAULT_LABEL 127 | end 128 | 129 | def histogram 130 | @histogram || false 131 | end 132 | 133 | private 134 | 135 | def register_type_collectors 136 | type_collectors.each do |klass| 137 | collector.register_collector klass.new 138 | STDERR.puts "Registered TypeCollector: #{klass}" if verbose 139 | end 140 | end 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/shoryuken_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class ShoryukenCollector < TypeCollector 5 | def initialize 6 | @shoryuken_jobs_total = nil 7 | @shoryuken_job_duration_seconds = nil 8 | @shoryuken_jobs_total = nil 9 | @shoryuken_restarted_jobs_total = nil 10 | @shoryuken_failed_jobs_total = nil 11 | end 12 | 13 | def type 14 | "shoryuken" 15 | end 16 | 17 | def collect(obj) 18 | default_labels = { job_name: obj["name"], queue_name: obj["queue"] } 19 | custom_labels = obj["custom_labels"] 20 | labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels) 21 | 22 | ensure_shoryuken_metrics 23 | @shoryuken_job_duration_seconds.observe(obj["duration"], labels) 24 | @shoryuken_jobs_total.observe(1, labels) 25 | @shoryuken_restarted_jobs_total.observe(1, labels) if obj["shutdown"] 26 | @shoryuken_failed_jobs_total.observe(1, labels) if !obj["success"] && !obj["shutdown"] 27 | end 28 | 29 | def metrics 30 | if @shoryuken_jobs_total 31 | [ 32 | @shoryuken_job_duration_seconds, 33 | @shoryuken_jobs_total, 34 | @shoryuken_restarted_jobs_total, 35 | @shoryuken_failed_jobs_total, 36 | ] 37 | else 38 | [] 39 | end 40 | end 41 | 42 | protected 43 | 44 | def ensure_shoryuken_metrics 45 | if !@shoryuken_jobs_total 46 | @shoryuken_job_duration_seconds = 47 | PrometheusExporter::Metric::Counter.new( 48 | "shoryuken_job_duration_seconds", 49 | "Total time spent in shoryuken jobs.", 50 | ) 51 | 52 | @shoryuken_jobs_total = 53 | PrometheusExporter::Metric::Counter.new( 54 | "shoryuken_jobs_total", 55 | "Total number of shoryuken jobs executed.", 56 | ) 57 | 58 | @shoryuken_restarted_jobs_total = 59 | PrometheusExporter::Metric::Counter.new( 60 | "shoryuken_restarted_jobs_total", 61 | "Total number of shoryuken jobs that we restarted because of a shoryuken shutdown.", 62 | ) 63 | 64 | @shoryuken_failed_jobs_total = 65 | PrometheusExporter::Metric::Counter.new( 66 | "shoryuken_failed_jobs_total", 67 | "Total number of failed shoryuken jobs.", 68 | ) 69 | end 70 | end 71 | end 72 | end 73 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/sidekiq_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class SidekiqCollector < TypeCollector 5 | def initialize 6 | @sidekiq_jobs_total = nil 7 | @sidekiq_job_duration_seconds = nil 8 | @sidekiq_jobs_total = nil 9 | @sidekiq_restarted_jobs_total = nil 10 | @sidekiq_failed_jobs_total = nil 11 | @sidekiq_dead_jobs_total = nil 12 | end 13 | 14 | def type 15 | "sidekiq" 16 | end 17 | 18 | def collect(obj) 19 | default_labels = { job_name: obj["name"], queue: obj["queue"] } 20 | custom_labels = obj["custom_labels"] 21 | labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels) 22 | 23 | ensure_sidekiq_metrics 24 | if obj["dead"] 25 | @sidekiq_dead_jobs_total.observe(1, labels) 26 | else 27 | @sidekiq_job_duration_seconds.observe(obj["duration"], labels) 28 | @sidekiq_jobs_total.observe(1, labels) 29 | @sidekiq_restarted_jobs_total.observe(1, labels) if obj["shutdown"] 30 | @sidekiq_failed_jobs_total.observe(1, labels) if !obj["success"] && !obj["shutdown"] 31 | end 32 | end 33 | 34 | def metrics 35 | if @sidekiq_jobs_total 36 | [ 37 | @sidekiq_job_duration_seconds, 38 | @sidekiq_jobs_total, 39 | @sidekiq_restarted_jobs_total, 40 | @sidekiq_failed_jobs_total, 41 | @sidekiq_dead_jobs_total, 42 | ] 43 | else 44 | [] 45 | end 46 | end 47 | 48 | protected 49 | 50 | def ensure_sidekiq_metrics 51 | if !@sidekiq_jobs_total 52 | @sidekiq_job_duration_seconds = 53 | PrometheusExporter::Metric::Base.default_aggregation.new( 54 | "sidekiq_job_duration_seconds", 55 | "Total time spent in sidekiq jobs.", 56 | ) 57 | 58 | @sidekiq_jobs_total = 59 | PrometheusExporter::Metric::Counter.new( 60 | "sidekiq_jobs_total", 61 | "Total number of sidekiq jobs executed.", 62 | ) 63 | 64 | @sidekiq_restarted_jobs_total = 65 | PrometheusExporter::Metric::Counter.new( 66 | "sidekiq_restarted_jobs_total", 67 | "Total number of sidekiq jobs that we restarted because of a sidekiq shutdown.", 68 | ) 69 | 70 | @sidekiq_failed_jobs_total = 71 | PrometheusExporter::Metric::Counter.new( 72 | "sidekiq_failed_jobs_total", 73 | "Total number of failed sidekiq jobs.", 74 | ) 75 | 76 | @sidekiq_dead_jobs_total = 77 | PrometheusExporter::Metric::Counter.new( 78 | "sidekiq_dead_jobs_total", 79 | "Total number of dead sidekiq jobs.", 80 | ) 81 | end 82 | end 83 | end 84 | end 85 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/sidekiq_process_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class SidekiqProcessCollector < PrometheusExporter::Server::TypeCollector 5 | MAX_METRIC_AGE = 60 6 | 7 | SIDEKIQ_PROCESS_GAUGES = { 8 | "busy" => "Number of running jobs", 9 | "concurrency" => "Maximum concurrency", 10 | }.freeze 11 | 12 | attr_reader :sidekiq_metrics, :gauges 13 | 14 | def initialize 15 | @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE) 16 | @gauges = {} 17 | end 18 | 19 | def type 20 | "sidekiq_process" 21 | end 22 | 23 | def metrics 24 | SIDEKIQ_PROCESS_GAUGES.each_key { |name| gauges[name]&.reset! } 25 | 26 | sidekiq_metrics.map do |metric| 27 | labels = metric.fetch("labels", {}) 28 | SIDEKIQ_PROCESS_GAUGES.map do |name, help| 29 | if (value = metric[name]) 30 | gauge = 31 | gauges[name] ||= PrometheusExporter::Metric::Gauge.new( 32 | "sidekiq_process_#{name}", 33 | help, 34 | ) 35 | gauge.observe(value, labels) 36 | end 37 | end 38 | end 39 | 40 | gauges.values 41 | end 42 | 43 | def collect(object) 44 | @sidekiq_metrics << object["process"] 45 | end 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/sidekiq_queue_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | module PrometheusExporter::Server 3 | class SidekiqQueueCollector < TypeCollector 4 | MAX_METRIC_AGE = 60 5 | 6 | SIDEKIQ_QUEUE_GAUGES = { 7 | "backlog" => "Size of the sidekiq queue.", 8 | "latency_seconds" => "Latency of the sidekiq queue.", 9 | }.freeze 10 | 11 | attr_reader :sidekiq_metrics, :gauges 12 | 13 | def initialize 14 | @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE) 15 | @gauges = {} 16 | end 17 | 18 | def type 19 | "sidekiq_queue" 20 | end 21 | 22 | def metrics 23 | SIDEKIQ_QUEUE_GAUGES.each_key { |name| gauges[name]&.reset! } 24 | 25 | sidekiq_metrics.map do |metric| 26 | labels = metric.fetch("labels", {}) 27 | SIDEKIQ_QUEUE_GAUGES.map do |name, help| 28 | if (value = metric[name]) 29 | gauge = 30 | gauges[name] ||= PrometheusExporter::Metric::Gauge.new("sidekiq_queue_#{name}", help) 31 | gauge.observe(value, labels) 32 | end 33 | end 34 | end 35 | 36 | gauges.values 37 | end 38 | 39 | def collect(object) 40 | object["queues"].each do |queue| 41 | queue["labels"].merge!(object["custom_labels"]) if object["custom_labels"] 42 | @sidekiq_metrics << queue 43 | end 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/sidekiq_stats_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class SidekiqStatsCollector < TypeCollector 5 | MAX_METRIC_AGE = 60 6 | 7 | SIDEKIQ_STATS_GAUGES = { 8 | "dead_size" => "Size of dead the queue", 9 | "enqueued" => "Number of enqueued jobs", 10 | "failed" => "Number of failed jobs", 11 | "processed" => "Total number of processed jobs", 12 | "processes_size" => "Number of processes", 13 | "retry_size" => "Size of the retries queue", 14 | "scheduled_size" => "Size of the scheduled queue", 15 | "workers_size" => "Number of jobs actively being processed", 16 | }.freeze 17 | 18 | attr_reader :sidekiq_metrics, :gauges 19 | 20 | def initialize 21 | @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE) 22 | @gauges = {} 23 | end 24 | 25 | def type 26 | "sidekiq_stats" 27 | end 28 | 29 | def metrics 30 | SIDEKIQ_STATS_GAUGES.each_key { |name| gauges[name]&.reset! } 31 | 32 | sidekiq_metrics.map do |metric| 33 | SIDEKIQ_STATS_GAUGES.map do |name, help| 34 | if (value = metric["stats"][name]) 35 | gauge = 36 | gauges[name] ||= PrometheusExporter::Metric::Gauge.new("sidekiq_stats_#{name}", help) 37 | gauge.observe(value) 38 | end 39 | end 40 | end 41 | 42 | gauges.values 43 | end 44 | 45 | def collect(object) 46 | @sidekiq_metrics << object 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/type_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "prometheus_exporter/server/metrics_container" 4 | 5 | module PrometheusExporter::Server 6 | class TypeCollector 7 | def type 8 | raise "must implement type" 9 | end 10 | 11 | def collect(obj) 12 | raise "must implement collect" 13 | end 14 | 15 | def metrics 16 | raise "must implement metrics" 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/unicorn_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | # custom type collector for prometheus_exporter for handling the metrics sent from 4 | # PrometheusExporter::Instrumentation::Unicorn 5 | module PrometheusExporter::Server 6 | class UnicornCollector < PrometheusExporter::Server::TypeCollector 7 | MAX_METRIC_AGE = 60 8 | 9 | UNICORN_GAUGES = { 10 | workers: "Number of unicorn workers.", 11 | active_workers: "Number of active unicorn workers", 12 | request_backlog: "Number of requests waiting to be processed by a unicorn worker.", 13 | }.freeze 14 | 15 | def initialize 16 | @unicorn_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE) 17 | end 18 | 19 | def type 20 | "unicorn" 21 | end 22 | 23 | def metrics 24 | return [] if @unicorn_metrics.length.zero? 25 | 26 | metrics = {} 27 | 28 | @unicorn_metrics.map do |m| 29 | labels = m["custom_labels"] || {} 30 | 31 | UNICORN_GAUGES.map do |k, help| 32 | k = k.to_s 33 | if (v = m[k]) 34 | g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new("unicorn_#{k}", help) 35 | g.observe(v, labels) 36 | end 37 | end 38 | end 39 | 40 | metrics.values 41 | end 42 | 43 | def collect(obj) 44 | @unicorn_metrics << obj 45 | end 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/web_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter::Server 4 | class WebCollector < TypeCollector 5 | def initialize 6 | @metrics = {} 7 | @http_requests_total = nil 8 | @http_request_duration_seconds = nil 9 | @http_request_redis_duration_seconds = nil 10 | @http_request_sql_duration_seconds = nil 11 | @http_request_queue_duration_seconds = nil 12 | @http_request_memcache_duration_seconds = nil 13 | end 14 | 15 | def type 16 | "web" 17 | end 18 | 19 | def collect(obj) 20 | ensure_metrics 21 | observe(obj) 22 | end 23 | 24 | def metrics 25 | @metrics.values 26 | end 27 | 28 | protected 29 | 30 | def ensure_metrics 31 | unless @http_requests_total 32 | @metrics["http_requests_total"] = @http_requests_total = 33 | PrometheusExporter::Metric::Counter.new( 34 | "http_requests_total", 35 | "Total HTTP requests from web app.", 36 | ) 37 | 38 | @metrics["http_request_duration_seconds"] = @http_request_duration_seconds = 39 | PrometheusExporter::Metric::Base.default_aggregation.new( 40 | "http_request_duration_seconds", 41 | "Time spent in HTTP reqs in seconds.", 42 | ) 43 | 44 | @metrics["http_request_redis_duration_seconds"] = @http_request_redis_duration_seconds = 45 | PrometheusExporter::Metric::Base.default_aggregation.new( 46 | "http_request_redis_duration_seconds", 47 | "Time spent in HTTP reqs in Redis, in seconds.", 48 | ) 49 | 50 | @metrics["http_request_sql_duration_seconds"] = @http_request_sql_duration_seconds = 51 | PrometheusExporter::Metric::Base.default_aggregation.new( 52 | "http_request_sql_duration_seconds", 53 | "Time spent in HTTP reqs in SQL in seconds.", 54 | ) 55 | 56 | @metrics[ 57 | "http_request_memcache_duration_seconds" 58 | ] = @http_request_memcache_duration_seconds = 59 | PrometheusExporter::Metric::Base.default_aggregation.new( 60 | "http_request_memcache_duration_seconds", 61 | "Time spent in HTTP reqs in Memcache in seconds.", 62 | ) 63 | 64 | @metrics["http_request_queue_duration_seconds"] = @http_request_queue_duration_seconds = 65 | PrometheusExporter::Metric::Base.default_aggregation.new( 66 | "http_request_queue_duration_seconds", 67 | "Time spent queueing the request in load balancer in seconds.", 68 | ) 69 | end 70 | end 71 | 72 | def observe(obj) 73 | default_labels = obj["default_labels"] 74 | custom_labels = obj["custom_labels"] 75 | labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels) 76 | 77 | @http_requests_total.observe(1, labels.merge("status" => obj["status"])) 78 | 79 | if timings = obj["timings"] 80 | @http_request_duration_seconds.observe(timings["total_duration"], labels) 81 | if redis = timings["redis"] 82 | @http_request_redis_duration_seconds.observe(redis["duration"], labels) 83 | end 84 | if sql = timings["sql"] 85 | @http_request_sql_duration_seconds.observe(sql["duration"], labels) 86 | end 87 | if memcache = timings["memcache"] 88 | @http_request_memcache_duration_seconds.observe(memcache["duration"], labels) 89 | end 90 | end 91 | if queue_time = obj["queue_time"] 92 | @http_request_queue_duration_seconds.observe(queue_time, labels) 93 | end 94 | end 95 | end 96 | end 97 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/server/web_server.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "webrick" 4 | require "timeout" 5 | require "zlib" 6 | require "stringio" 7 | 8 | module PrometheusExporter::Server 9 | class WebServer 10 | attr_reader :collector 11 | 12 | def initialize(opts) 13 | @port = opts[:port] || PrometheusExporter::DEFAULT_PORT 14 | @bind = opts[:bind] || PrometheusExporter::DEFAULT_BIND_ADDRESS 15 | @collector = opts[:collector] || Collector.new 16 | @timeout = opts[:timeout] || PrometheusExporter::DEFAULT_TIMEOUT 17 | @verbose = opts[:verbose] || false 18 | @auth = opts[:auth] 19 | @realm = opts[:realm] || PrometheusExporter::DEFAULT_REALM 20 | 21 | @metrics_total = 22 | PrometheusExporter::Metric::Counter.new( 23 | "collector_metrics_total", 24 | "Total metrics processed by exporter web.", 25 | ) 26 | 27 | @sessions_total = 28 | PrometheusExporter::Metric::Counter.new( 29 | "collector_sessions_total", 30 | "Total send_metric sessions processed by exporter web.", 31 | ) 32 | 33 | @bad_metrics_total = 34 | PrometheusExporter::Metric::Counter.new( 35 | "collector_bad_metrics_total", 36 | "Total mis-handled metrics by collector.", 37 | ) 38 | 39 | @metrics_total.observe(0) 40 | @sessions_total.observe(0) 41 | @bad_metrics_total.observe(0) 42 | 43 | @access_log, @logger = nil 44 | log_target = opts[:log_target] 45 | 46 | if @verbose 47 | @access_log = [ 48 | [$stderr, WEBrick::AccessLog::COMMON_LOG_FORMAT], 49 | [$stderr, WEBrick::AccessLog::REFERER_LOG_FORMAT], 50 | ] 51 | @logger = WEBrick::Log.new(log_target || $stderr) 52 | else 53 | @access_log = [] 54 | @logger = WEBrick::Log.new(log_target || "/dev/null") 55 | end 56 | 57 | @logger.info "Using Basic Authentication via #{@auth}" if @verbose && @auth 58 | 59 | if %w[ALL ANY].include?(@bind) 60 | @logger.info "Listening on both 0.0.0.0/:: network interfaces" 61 | @bind = nil 62 | end 63 | 64 | @server = 65 | WEBrick::HTTPServer.new( 66 | Port: @port, 67 | BindAddress: @bind, 68 | Logger: @logger, 69 | AccessLog: @access_log, 70 | ) 71 | 72 | @server.mount_proc "/" do |req, res| 73 | res["Content-Type"] = "text/plain; charset=utf-8" 74 | if req.path == "/metrics" 75 | authenticate(req, res) if @auth 76 | 77 | res.status = 200 78 | if req.header["accept-encoding"].to_s.include?("gzip") 79 | sio = StringIO.new 80 | collected_metrics = metrics 81 | begin 82 | writer = Zlib::GzipWriter.new(sio) 83 | writer.write(collected_metrics) 84 | ensure 85 | writer.close 86 | end 87 | res.body = sio.string 88 | res.header["content-encoding"] = "gzip" 89 | else 90 | res.body = metrics 91 | end 92 | elsif req.path == "/send-metrics" 93 | handle_metrics(req, res) 94 | elsif req.path == "/ping" 95 | res.body = "PONG" 96 | else 97 | res.status = 404 98 | res.body = 99 | "Not Found! The Prometheus Ruby Exporter only listens on /ping, /metrics and /send-metrics" 100 | end 101 | end 102 | end 103 | 104 | def handle_metrics(req, res) 105 | @sessions_total.observe 106 | req.body do |block| 107 | begin 108 | @metrics_total.observe 109 | @collector.process(block) 110 | rescue => e 111 | @logger.error "\n\n#{e.inspect}\n#{e.backtrace}\n\n" if @verbose 112 | @bad_metrics_total.observe 113 | res.body = "Bad Metrics #{e}" 114 | res.status = e.respond_to?(:status_code) ? e.status_code : 500 115 | break 116 | end 117 | end 118 | 119 | res.body = "OK" 120 | res.status = 200 121 | end 122 | 123 | def start 124 | @runner ||= 125 | Thread.start do 126 | begin 127 | @server.start 128 | rescue => e 129 | @logger.error "Failed to start prometheus collector web on port #{@port}: #{e}" 130 | end 131 | end 132 | end 133 | 134 | def stop 135 | @server.shutdown 136 | end 137 | 138 | def metrics 139 | metric_text = nil 140 | begin 141 | Timeout.timeout(@timeout) { metric_text = @collector.prometheus_metrics_text } 142 | rescue Timeout::Error 143 | # we timed out ... bummer 144 | @logger.error "Generating Prometheus metrics text timed out" 145 | end 146 | 147 | metrics = [] 148 | 149 | metrics << add_gauge( 150 | "collector_working", 151 | "Is the master process collector able to collect metrics", 152 | metric_text && metric_text.length > 0 ? 1 : 0, 153 | ) 154 | 155 | metrics << add_gauge("collector_rss", "total memory used by collector process", get_rss) 156 | 157 | metrics << @metrics_total 158 | metrics << @sessions_total 159 | metrics << @bad_metrics_total 160 | 161 | <<~TEXT 162 | #{metrics.map(&:to_prometheus_text).join("\n\n")} 163 | #{metric_text} 164 | TEXT 165 | end 166 | 167 | def get_rss 168 | @pagesize ||= 169 | begin 170 | `getconf PAGESIZE`.to_i 171 | rescue StandardError 172 | 4096 173 | end 174 | @pid ||= Process.pid 175 | begin 176 | File.read("/proc/#{@pid}/statm").split(" ")[1].to_i * @pagesize 177 | rescue StandardError 178 | 0 179 | end 180 | end 181 | 182 | def add_gauge(name, help, value) 183 | gauge = PrometheusExporter::Metric::Gauge.new(name, help) 184 | gauge.observe(value) 185 | gauge 186 | end 187 | 188 | def authenticate(req, res) 189 | htpasswd = WEBrick::HTTPAuth::Htpasswd.new(@auth) 190 | basic_auth = 191 | WEBrick::HTTPAuth::BasicAuth.new({ Realm: @realm, UserDB: htpasswd, Logger: @logger }) 192 | 193 | basic_auth.authenticate(req, res) 194 | end 195 | end 196 | end 197 | -------------------------------------------------------------------------------- /lib/prometheus_exporter/version.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module PrometheusExporter 4 | VERSION = "2.2.0" 5 | end 6 | -------------------------------------------------------------------------------- /prometheus_exporter.gemspec: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | lib = File.expand_path("../lib", __FILE__) 4 | $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) 5 | require "prometheus_exporter/version" 6 | 7 | Gem::Specification.new do |spec| 8 | spec.name = "prometheus_exporter" 9 | spec.version = PrometheusExporter::VERSION 10 | spec.authors = ["Sam Saffron"] 11 | spec.email = ["sam.saffron@gmail.com"] 12 | 13 | spec.summary = "Prometheus Exporter" 14 | spec.description = "Prometheus metric collector and exporter for Ruby" 15 | spec.homepage = "https://github.com/discourse/prometheus_exporter" 16 | spec.license = "MIT" 17 | 18 | spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features|bin)/}) } 19 | spec.bindir = "bin" 20 | spec.executables = ["prometheus_exporter"] 21 | spec.require_paths = ["lib"] 22 | 23 | spec.add_dependency "webrick" 24 | 25 | spec.add_development_dependency "rubocop", ">= 0.69" 26 | spec.add_development_dependency "bundler", ">= 2.1.4" 27 | spec.add_development_dependency "rake", "~> 13.0" 28 | spec.add_development_dependency "minitest", "~> 5.23.0" 29 | spec.add_development_dependency "guard", "~> 2.0" 30 | spec.add_development_dependency "mini_racer", "~> 0.12.0" 31 | spec.add_development_dependency "guard-minitest", "~> 2.0" 32 | spec.add_development_dependency "oj", "~> 3.0" 33 | spec.add_development_dependency "rack-test", "~> 2.1.0" 34 | spec.add_development_dependency "minitest-stub-const", "~> 0.6" 35 | spec.add_development_dependency "rubocop-discourse", ">= 3" 36 | spec.add_development_dependency "appraisal", "~> 2.3" 37 | spec.add_development_dependency "activerecord", "~> 7.1" 38 | spec.add_development_dependency "redis", "> 5" 39 | spec.add_development_dependency "m" 40 | spec.add_development_dependency "syntax_tree" 41 | spec.add_development_dependency "syntax_tree-disable_ternary" 42 | spec.add_development_dependency "raindrops", "~> 0.19" if !RUBY_ENGINE == "jruby" 43 | spec.required_ruby_version = ">= 3.0.0" 44 | end 45 | -------------------------------------------------------------------------------- /test/client_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "test_helper" 4 | require "prometheus_exporter/client" 5 | 6 | class PrometheusExporterTest < Minitest::Test 7 | def test_find_the_correct_registered_metric 8 | client = PrometheusExporter::Client.new 9 | 10 | # register a metrics for testing 11 | counter_metric = client.register(:counter, "counter_metric", "helping") 12 | 13 | # when the given name doesn't match any existing metric, it returns nil 14 | result = client.find_registered_metric("not_registered") 15 | assert_nil(result) 16 | 17 | # when the given name matches an existing metric, it returns this metric 18 | result = client.find_registered_metric("counter_metric") 19 | assert_equal(counter_metric, result) 20 | 21 | # when the given name matches an existing metric, but the given type doesn't, it returns nil 22 | result = client.find_registered_metric("counter_metric", type: :gauge) 23 | assert_nil(result) 24 | 25 | # when the given name and type match an existing metric, it returns the metric 26 | result = client.find_registered_metric("counter_metric", type: :counter) 27 | assert_equal(counter_metric, result) 28 | 29 | # when the given name matches an existing metric, but the given help doesn't, it returns nil 30 | result = client.find_registered_metric("counter_metric", help: "not helping") 31 | assert_nil(result) 32 | 33 | # when the given name and help match an existing metric, it returns the metric 34 | result = client.find_registered_metric("counter_metric", help: "helping") 35 | assert_equal(counter_metric, result) 36 | 37 | # when the given name matches an existing metric, but the given help and type don't, it returns nil 38 | result = client.find_registered_metric("counter_metric", type: :gauge, help: "not helping") 39 | assert_nil(result) 40 | 41 | # when the given name, type, and help all match an existing metric, it returns the metric 42 | result = client.find_registered_metric("counter_metric", type: :counter, help: "helping") 43 | assert_equal(counter_metric, result) 44 | end 45 | 46 | def test_standard_values 47 | client = PrometheusExporter::Client.new 48 | counter_metric = client.register(:counter, "counter_metric", "helping") 49 | assert_equal(false, counter_metric.standard_values("value", "key").has_key?(:opts)) 50 | 51 | expected_quantiles = { quantiles: [0.99, 9] } 52 | summary_metric = client.register(:summary, "summary_metric", "helping", expected_quantiles) 53 | assert_equal(expected_quantiles, summary_metric.standard_values("value", "key")[:opts]) 54 | end 55 | 56 | def test_close_socket_on_error 57 | logs = StringIO.new 58 | logger = Logger.new(logs) 59 | logger.level = :error 60 | 61 | client = 62 | PrometheusExporter::Client.new(logger: logger, port: 321, process_queue_once_and_stop: true) 63 | client.send("put a message in the queue") 64 | 65 | assert_includes( 66 | logs.string, 67 | "Prometheus Exporter, failed to send message Connection refused - connect(2) for \"localhost\" port 321", 68 | ) 69 | end 70 | 71 | def test_overriding_logger 72 | logs = StringIO.new 73 | logger = Logger.new(logs) 74 | logger.level = :warn 75 | 76 | client = 77 | PrometheusExporter::Client.new( 78 | logger: logger, 79 | max_queue_size: 1, 80 | process_queue_once_and_stop: true, 81 | ) 82 | client.send("put a message in the queue") 83 | client.send("put a second message in the queue to trigger the logger") 84 | 85 | assert_includes(logs.string, "dropping message cause queue is full") 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /test/custom_type_collector.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class CustomTypeCollector < PrometheusExporter::Server::TypeCollector 4 | def type 5 | "custom1" 6 | end 7 | 8 | def observe(obj) 9 | p obj 10 | end 11 | 12 | def metrics 13 | [] 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /test/instrumentation/active_record_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/instrumentation" 5 | require "active_record" 6 | 7 | class PrometheusInstrumentationActiveRecordTest < Minitest::Test 8 | def setup 9 | super 10 | 11 | # With this trick this variable with be accessible with ::ObjectSpace 12 | @pool = 13 | if active_record_version >= Gem::Version.create("6.1.0.rc1") 14 | active_record61_pool 15 | elsif active_record_version >= Gem::Version.create("6.0.0") 16 | active_record60_pool 17 | else 18 | raise "unsupported active_record version" 19 | end 20 | end 21 | 22 | def metric_labels 23 | { foo: :bar } 24 | end 25 | 26 | def config_labels 27 | %i[database username] 28 | end 29 | 30 | def collector 31 | @collector ||= 32 | PrometheusExporter::Instrumentation::ActiveRecord.new(metric_labels, config_labels) 33 | end 34 | 35 | %i[size connections busy dead idle waiting checkout_timeout type metric_labels].each do |key| 36 | define_method("test_collecting_metrics_contain_#{key}_key") do 37 | assert_includes collector.collect.first, key 38 | end 39 | end 40 | 41 | def test_metrics_labels 42 | assert_includes collector.collect.first[:metric_labels], :foo 43 | end 44 | 45 | def test_type 46 | assert_equal collector.collect.first[:type], "active_record" 47 | end 48 | 49 | private 50 | 51 | def active_record_version 52 | Gem.loaded_specs["activerecord"].version 53 | end 54 | 55 | def active_record60_pool 56 | ::ActiveRecord::ConnectionAdapters::ConnectionPool.new(OpenStruct.new(config: {})) 57 | end 58 | 59 | def active_record61_pool 60 | ::ActiveRecord::ConnectionAdapters::ConnectionPool.new( 61 | OpenStruct.new(db_config: OpenStruct.new(checkout_timeout: 0, idle_timeout: 0, pool: 5)), 62 | ) 63 | end 64 | end 65 | -------------------------------------------------------------------------------- /test/instrumentation/method_profiler_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/instrumentation" 5 | 6 | class PrometheusInstrumentationMethodProfilerTest < Minitest::Test 7 | class SomeClassPatchedUsingAliasMethod 8 | def some_method 9 | "Hello, world" 10 | end 11 | end 12 | 13 | class SomeClassPatchedUsingPrepend 14 | def some_method 15 | "Hello, world" 16 | end 17 | end 18 | 19 | PrometheusExporter::Instrumentation::MethodProfiler.patch( 20 | SomeClassPatchedUsingAliasMethod, 21 | [:some_method], 22 | :test, 23 | instrument: :alias_method, 24 | ) 25 | 26 | PrometheusExporter::Instrumentation::MethodProfiler.patch( 27 | SomeClassPatchedUsingPrepend, 28 | [:some_method], 29 | :test, 30 | instrument: :prepend, 31 | ) 32 | 33 | def test_alias_method_source_location 34 | file, line = SomeClassPatchedUsingAliasMethod.instance_method(:some_method).source_location 35 | source = File.read(file).lines[line - 1].strip 36 | assert_equal 'def #{method_name}(...)', source 37 | end 38 | 39 | def test_alias_method_preserves_behavior 40 | assert_equal "Hello, world", SomeClassPatchedUsingAliasMethod.new.some_method 41 | end 42 | 43 | def test_prepend_source_location 44 | file, line = SomeClassPatchedUsingPrepend.instance_method(:some_method).source_location 45 | source = File.read(file).lines[line - 1].strip 46 | assert_equal 'def #{method_name}(...)', source 47 | end 48 | 49 | def test_prepend_preserves_behavior 50 | assert_equal "Hello, world", SomeClassPatchedUsingPrepend.new.some_method 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /test/metric/base_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/metric" 5 | 6 | module PrometheusExporter::Metric 7 | describe Base do 8 | let :counter do 9 | Counter.new("a_counter", "my amazing counter") 10 | end 11 | 12 | before do 13 | Base.default_prefix = "" 14 | Base.default_labels = {} 15 | Base.default_aggregation = nil 16 | end 17 | 18 | after do 19 | Base.default_prefix = "" 20 | Base.default_labels = {} 21 | Base.default_aggregation = nil 22 | end 23 | 24 | it "supports a dynamic prefix" do 25 | Base.default_prefix = "web_" 26 | counter.observe 27 | 28 | text = <<~TEXT 29 | # HELP web_a_counter my amazing counter 30 | # TYPE web_a_counter counter 31 | web_a_counter 1 32 | TEXT 33 | 34 | assert_equal(counter.to_prometheus_text, text) 35 | end 36 | 37 | it "supports default labels" do 38 | Base.default_labels = { foo: "bar" } 39 | 40 | counter.observe(2, baz: "bar") 41 | counter.observe 42 | 43 | text = <<~TEXT 44 | # HELP a_counter my amazing counter 45 | # TYPE a_counter counter 46 | a_counter{foo="bar",baz="bar"} 2 47 | a_counter{foo="bar"} 1 48 | TEXT 49 | 50 | assert_equal(counter.to_prometheus_text, text) 51 | end 52 | 53 | it "uses specified labels over default labels when there is conflict" do 54 | Base.default_labels = { foo: "bar" } 55 | 56 | counter.observe(2, foo: "baz") 57 | counter.observe 58 | 59 | text = <<~TEXT 60 | # HELP a_counter my amazing counter 61 | # TYPE a_counter counter 62 | a_counter{foo="baz"} 2 63 | a_counter{foo="bar"} 1 64 | TEXT 65 | 66 | assert_equal(counter.to_prometheus_text, text) 67 | end 68 | 69 | it "supports reset! for Gauge" do 70 | gauge = Gauge.new("test", "test") 71 | 72 | gauge.observe(999) 73 | gauge.observe(100, a: "a") 74 | gauge.reset! 75 | 76 | text = <<~TEXT 77 | # HELP test test 78 | # TYPE test gauge 79 | TEXT 80 | 81 | assert_equal(gauge.to_prometheus_text.strip, text.strip) 82 | end 83 | 84 | it "supports reset! for Counter" do 85 | counter = Counter.new("test", "test") 86 | 87 | counter.observe(999) 88 | counter.observe(100, a: "a") 89 | counter.reset! 90 | 91 | text = <<~TEXT 92 | # HELP test test 93 | # TYPE test counter 94 | TEXT 95 | 96 | assert_equal(counter.to_prometheus_text.strip, text.strip) 97 | end 98 | 99 | it "supports reset! for Histogram" do 100 | histogram = Histogram.new("test", "test") 101 | 102 | histogram.observe(999) 103 | histogram.observe(100, a: "a") 104 | histogram.reset! 105 | 106 | text = <<~TEXT 107 | # HELP test test 108 | # TYPE test histogram 109 | TEXT 110 | 111 | assert_equal(histogram.to_prometheus_text.strip, text.strip) 112 | end 113 | 114 | it "supports reset! for Summary" do 115 | summary = Summary.new("test", "test") 116 | 117 | summary.observe(999) 118 | summary.observe(100, a: "a") 119 | summary.reset! 120 | 121 | text = <<~TEXT 122 | # HELP test test 123 | # TYPE test summary 124 | TEXT 125 | 126 | assert_equal(summary.to_prometheus_text.strip, text.strip) 127 | end 128 | 129 | it "creates a summary by default" do 130 | aggregation = Base.default_aggregation.new("test", "test") 131 | 132 | text = <<~TEXT 133 | # HELP test test 134 | # TYPE test summary 135 | TEXT 136 | 137 | assert_equal(aggregation.to_prometheus_text.strip, text.strip) 138 | end 139 | 140 | it "creates a histogram when configured" do 141 | Base.default_aggregation = Histogram 142 | aggregation = Base.default_aggregation.new("test", "test") 143 | 144 | text = <<~TEXT 145 | # HELP test test 146 | # TYPE test histogram 147 | TEXT 148 | 149 | assert_equal(aggregation.to_prometheus_text.strip, text.strip) 150 | end 151 | end 152 | end 153 | -------------------------------------------------------------------------------- /test/metric/counter_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/metric" 5 | 6 | module PrometheusExporter::Metric 7 | describe Counter do 8 | let :counter do 9 | Counter.new("a_counter", "my amazing counter") 10 | end 11 | 12 | before { Base.default_prefix = "" } 13 | 14 | it "supports a dynamic prefix" do 15 | Base.default_prefix = "web_" 16 | counter.observe 17 | 18 | text = <<~TEXT 19 | # HELP web_a_counter my amazing counter 20 | # TYPE web_a_counter counter 21 | web_a_counter 1 22 | TEXT 23 | 24 | assert_equal(counter.to_prometheus_text, text) 25 | Base.default_prefix = "" 26 | end 27 | 28 | it "can correctly increment counters with labels" do 29 | counter.observe(2, sam: "ham") 30 | counter.observe(1, sam: "ham", fam: "bam") 31 | counter.observe 32 | 33 | text = <<~TEXT 34 | # HELP a_counter my amazing counter 35 | # TYPE a_counter counter 36 | a_counter{sam="ham"} 2 37 | a_counter{sam="ham",fam="bam"} 1 38 | a_counter 1 39 | TEXT 40 | 41 | assert_equal(counter.to_prometheus_text, text) 42 | end 43 | 44 | it "can correctly increment" do 45 | counter.observe(1, sam: "ham") 46 | counter.increment({ sam: "ham" }, 2) 47 | 48 | text = <<~TEXT 49 | # HELP a_counter my amazing counter 50 | # TYPE a_counter counter 51 | a_counter{sam="ham"} 3 52 | TEXT 53 | 54 | assert_equal(counter.to_prometheus_text, text) 55 | end 56 | 57 | it "can correctly decrement" do 58 | counter.observe(5, sam: "ham") 59 | counter.decrement({ sam: "ham" }, 2) 60 | 61 | text = <<~TEXT 62 | # HELP a_counter my amazing counter 63 | # TYPE a_counter counter 64 | a_counter{sam="ham"} 3 65 | TEXT 66 | 67 | assert_equal(counter.to_prometheus_text, text) 68 | end 69 | 70 | it "can correctly log multiple increments" do 71 | counter.observe 72 | counter.observe 73 | counter.observe 74 | 75 | text = <<~TEXT 76 | # HELP a_counter my amazing counter 77 | # TYPE a_counter counter 78 | a_counter 3 79 | TEXT 80 | 81 | assert_equal(counter.to_prometheus_text, text) 82 | end 83 | 84 | it "can correctly escape label names" do 85 | counter.observe(1, sam: "encoding \\ \\") 86 | counter.observe(1, sam: "encoding \" \"") 87 | counter.observe(1, sam: "encoding \n \n") 88 | 89 | # per spec: label_value can be any sequence of UTF-8 characters, but the backslash (\, double-quote ("}, and line feed (\n) characters have to be escaped as \\, \", and \n, respectively 90 | 91 | text = <<~TEXT 92 | # HELP a_counter my amazing counter 93 | # TYPE a_counter counter 94 | a_counter{sam="encoding \\\\ \\\\"} 1 95 | a_counter{sam="encoding \\" \\""} 1 96 | a_counter{sam="encoding \\n \\n"} 1 97 | TEXT 98 | 99 | assert_equal(counter.to_prometheus_text, text) 100 | end 101 | 102 | it "can correctly reset to a default value" do 103 | counter.observe(5, sam: "ham") 104 | counter.reset(sam: "ham") 105 | 106 | text = <<~TEXT 107 | # HELP a_counter my amazing counter 108 | # TYPE a_counter counter 109 | a_counter{sam="ham"} 0 110 | TEXT 111 | 112 | assert_equal(counter.to_prometheus_text, text) 113 | end 114 | 115 | it "can correctly reset to an explicit value" do 116 | counter.observe(5, sam: "ham") 117 | counter.reset({ sam: "ham" }, 2) 118 | 119 | text = <<~TEXT 120 | # HELP a_counter my amazing counter 121 | # TYPE a_counter counter 122 | a_counter{sam="ham"} 2 123 | TEXT 124 | 125 | assert_equal(counter.to_prometheus_text, text) 126 | end 127 | 128 | it "can correctly remove metrics" do 129 | counter.observe(5, sam: "ham") 130 | counter.observe(10, foo: "bar") 131 | counter.remove(sam: "ham") 132 | counter.remove(missing: "ham") 133 | 134 | assert_equal(counter.to_h, { foo: "bar" } => 10) 135 | end 136 | 137 | it "can correctly return data set" do 138 | counter.observe(5, sam: "ham") 139 | counter.observe(10, foo: "bar") 140 | 141 | assert_equal(counter.to_h, { sam: "ham" } => 5, { foo: "bar" } => 10) 142 | end 143 | end 144 | end 145 | -------------------------------------------------------------------------------- /test/metric/gauge_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/metric" 5 | 6 | module PrometheusExporter::Metric 7 | describe Gauge do 8 | let :gauge do 9 | Gauge.new("a_gauge", "my amazing gauge") 10 | end 11 | 12 | let :gauge_with_total_suffix do 13 | Gauge.new("a_gauge_total", "my amazing gauge") 14 | end 15 | 16 | before { Base.default_prefix = "" } 17 | 18 | it "should not allow observe to corrupt data" do 19 | assert_raises { gauge.observe("hello") } 20 | 21 | # going to special case nil here instead of adding a new API 22 | # observing nil should set to nothing 23 | # this is a slight difference to official API which would raise 24 | # on non numeric, however it provides a bit more flexibility 25 | # and allows us to remove metrics if we wish 26 | gauge.observe(100) 27 | gauge.observe(nil) 28 | gauge.observe(nil, a: "thing") 29 | 30 | text = <<~TEXT 31 | # HELP a_gauge my amazing gauge 32 | # TYPE a_gauge gauge 33 | 34 | TEXT 35 | 36 | assert_equal(gauge.to_prometheus_text, text) 37 | end 38 | 39 | it "supports a dynamic prefix" do 40 | Base.default_prefix = "web_" 41 | gauge.observe(400.11) 42 | 43 | text = <<~TEXT 44 | # HELP web_a_gauge my amazing gauge 45 | # TYPE web_a_gauge gauge 46 | web_a_gauge 400.11 47 | TEXT 48 | 49 | assert_equal(gauge.to_prometheus_text, text) 50 | 51 | Base.default_prefix = "" 52 | end 53 | 54 | it "can correctly set gauges with labels" do 55 | gauge.observe(100.5, sam: "ham") 56 | gauge.observe(5, sam: "ham", fam: "bam") 57 | gauge.observe(400.11) 58 | 59 | text = <<~TEXT 60 | # HELP a_gauge my amazing gauge 61 | # TYPE a_gauge gauge 62 | a_gauge{sam="ham"} 100.5 63 | a_gauge{sam="ham",fam="bam"} 5 64 | a_gauge 400.11 65 | TEXT 66 | 67 | assert_equal(gauge.to_prometheus_text, text) 68 | end 69 | 70 | it "can correctly reset on change" do 71 | gauge.observe(10) 72 | gauge.observe(11) 73 | 74 | text = <<~TEXT 75 | # HELP a_gauge my amazing gauge 76 | # TYPE a_gauge gauge 77 | a_gauge 11 78 | TEXT 79 | 80 | assert_equal(gauge.to_prometheus_text, text) 81 | end 82 | 83 | it "can use the set on alias" do 84 | gauge.set(10) 85 | gauge.set(11) 86 | 87 | text = <<~TEXT 88 | # HELP a_gauge my amazing gauge 89 | # TYPE a_gauge gauge 90 | a_gauge 11 91 | TEXT 92 | 93 | assert_equal(gauge.to_prometheus_text, text) 94 | end 95 | 96 | it "can correctly reset on change with labels" do 97 | gauge.observe(1, sam: "ham") 98 | gauge.observe(2, sam: "ham") 99 | 100 | text = <<~TEXT 101 | # HELP a_gauge my amazing gauge 102 | # TYPE a_gauge gauge 103 | a_gauge{sam="ham"} 2 104 | TEXT 105 | 106 | assert_equal(gauge.to_prometheus_text, text) 107 | end 108 | 109 | it "can correctly increment" do 110 | gauge.observe(1, sam: "ham") 111 | gauge.increment({ sam: "ham" }, 2) 112 | 113 | text = <<~TEXT 114 | # HELP a_gauge my amazing gauge 115 | # TYPE a_gauge gauge 116 | a_gauge{sam="ham"} 3 117 | TEXT 118 | 119 | assert_equal(gauge.to_prometheus_text, text) 120 | end 121 | 122 | it "can correctly decrement" do 123 | gauge.observe(5, sam: "ham") 124 | gauge.decrement({ sam: "ham" }, 2) 125 | 126 | text = <<~TEXT 127 | # HELP a_gauge my amazing gauge 128 | # TYPE a_gauge gauge 129 | a_gauge{sam="ham"} 3 130 | TEXT 131 | 132 | assert_equal(gauge.to_prometheus_text, text) 133 | end 134 | 135 | it "can correctly remove metrics" do 136 | gauge.observe(5, sam: "ham") 137 | gauge.observe(10, foo: "bar") 138 | gauge.remove(sam: "ham") 139 | gauge.remove(bam: "ham") 140 | 141 | assert_equal(gauge.to_h, { foo: "bar" } => 10) 142 | end 143 | 144 | it "can correctly return data set" do 145 | gauge.observe(5, sam: "ham") 146 | gauge.observe(10, foo: "bar") 147 | 148 | assert_equal(gauge.to_h, { sam: "ham" } => 5, { foo: "bar" } => 10) 149 | end 150 | 151 | it "should not allow to create new instance with _total suffix" do 152 | assert_raises ArgumentError do 153 | gauge_with_total_suffix 154 | end 155 | end 156 | end 157 | end 158 | -------------------------------------------------------------------------------- /test/metric/histogram_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/metric" 5 | 6 | module PrometheusExporter::Metric 7 | describe Histogram do 8 | let :histogram do 9 | Histogram.new("a_histogram", "my amazing histogram") 10 | end 11 | 12 | before { Base.default_prefix = "" } 13 | 14 | it "can correctly gather a histogram" do 15 | histogram.observe(0.1) 16 | histogram.observe(0.2) 17 | histogram.observe(0.610001) 18 | histogram.observe(0.610001) 19 | histogram.observe(0.610001) 20 | histogram.observe(0.910001) 21 | histogram.observe(0.1) 22 | 23 | expected = <<~TEXT 24 | # HELP a_histogram my amazing histogram 25 | # TYPE a_histogram histogram 26 | a_histogram_bucket{le="0.005"} 0 27 | a_histogram_bucket{le="0.01"} 0 28 | a_histogram_bucket{le="0.025"} 0 29 | a_histogram_bucket{le="0.05"} 0 30 | a_histogram_bucket{le="0.1"} 2 31 | a_histogram_bucket{le="0.25"} 3 32 | a_histogram_bucket{le="0.5"} 3 33 | a_histogram_bucket{le="1"} 7 34 | a_histogram_bucket{le="2.5"} 7 35 | a_histogram_bucket{le="5.0"} 7 36 | a_histogram_bucket{le="10.0"} 7 37 | a_histogram_bucket{le="+Inf"} 7 38 | a_histogram_count 7 39 | a_histogram_sum 3.1400040000000002 40 | TEXT 41 | 42 | assert_equal(histogram.to_prometheus_text, expected) 43 | end 44 | 45 | it "can correctly gather a histogram over multiple labels" do 46 | histogram.observe(0.1, nil) 47 | histogram.observe(0.2) 48 | histogram.observe(0.610001) 49 | histogram.observe(0.610001) 50 | 51 | histogram.observe(0.1, name: "bob", family: "skywalker") 52 | histogram.observe(0.7, name: "bob", family: "skywalker") 53 | histogram.observe(0.99, name: "bob", family: "skywalker") 54 | 55 | expected = <<~TEXT 56 | # HELP a_histogram my amazing histogram 57 | # TYPE a_histogram histogram 58 | a_histogram_bucket{le="0.005"} 0 59 | a_histogram_bucket{le="0.01"} 0 60 | a_histogram_bucket{le="0.025"} 0 61 | a_histogram_bucket{le="0.05"} 0 62 | a_histogram_bucket{le="0.1"} 1 63 | a_histogram_bucket{le="0.25"} 2 64 | a_histogram_bucket{le="0.5"} 2 65 | a_histogram_bucket{le="1"} 4 66 | a_histogram_bucket{le="2.5"} 4 67 | a_histogram_bucket{le="5.0"} 4 68 | a_histogram_bucket{le="10.0"} 4 69 | a_histogram_bucket{le="+Inf"} 4 70 | a_histogram_count 4 71 | a_histogram_sum 1.520002 72 | a_histogram_bucket{name="bob",family="skywalker",le="0.005"} 0 73 | a_histogram_bucket{name="bob",family="skywalker",le="0.01"} 0 74 | a_histogram_bucket{name="bob",family="skywalker",le="0.025"} 0 75 | a_histogram_bucket{name="bob",family="skywalker",le="0.05"} 0 76 | a_histogram_bucket{name="bob",family="skywalker",le="0.1"} 1 77 | a_histogram_bucket{name="bob",family="skywalker",le="0.25"} 1 78 | a_histogram_bucket{name="bob",family="skywalker",le="0.5"} 1 79 | a_histogram_bucket{name="bob",family="skywalker",le="1"} 3 80 | a_histogram_bucket{name="bob",family="skywalker",le="2.5"} 3 81 | a_histogram_bucket{name="bob",family="skywalker",le="5.0"} 3 82 | a_histogram_bucket{name="bob",family="skywalker",le="10.0"} 3 83 | a_histogram_bucket{name="bob",family="skywalker",le="+Inf"} 3 84 | a_histogram_count{name="bob",family="skywalker"} 3 85 | a_histogram_sum{name="bob",family="skywalker"} 1.79 86 | TEXT 87 | 88 | assert_equal(histogram.to_prometheus_text, expected) 89 | end 90 | 91 | it "can correctly gather a histogram using custom buckets" do 92 | histogram = Histogram.new("a_histogram", "my amazing histogram", buckets: [2, 1, 3]) 93 | 94 | histogram.observe(0.5) 95 | histogram.observe(1.5) 96 | histogram.observe(4) 97 | histogram.observe(2, name: "gargamel") 98 | 99 | expected = <<~TEXT 100 | # HELP a_histogram my amazing histogram 101 | # TYPE a_histogram histogram 102 | a_histogram_bucket{le="1"} 1 103 | a_histogram_bucket{le="2"} 2 104 | a_histogram_bucket{le="3"} 2 105 | a_histogram_bucket{le="+Inf"} 3 106 | a_histogram_count 3 107 | a_histogram_sum 6.0 108 | a_histogram_bucket{name="gargamel",le="1"} 0 109 | a_histogram_bucket{name="gargamel",le="2"} 1 110 | a_histogram_bucket{name="gargamel",le="3"} 1 111 | a_histogram_bucket{name="gargamel",le="+Inf"} 1 112 | a_histogram_count{name="gargamel"} 1 113 | a_histogram_sum{name="gargamel"} 2.0 114 | TEXT 115 | 116 | assert_equal(histogram.to_prometheus_text, expected) 117 | end 118 | 119 | it "can correctly return data set" do 120 | histogram.observe(0.1, name: "bob", family: "skywalker") 121 | histogram.observe(0.7, name: "bob", family: "skywalker") 122 | histogram.observe(0.99, name: "bob", family: "skywalker") 123 | 124 | key = { name: "bob", family: "skywalker" } 125 | val = { "count" => 3, "sum" => 1.79 } 126 | 127 | assert_equal(histogram.to_h, key => val) 128 | end 129 | 130 | it "can correctly remove histograms" do 131 | histogram.observe(0.1, name: "bob", family: "skywalker") 132 | histogram.observe(0.7, name: "bob", family: "skywalker") 133 | histogram.observe(0.99, name: "bob", family: "skywalker") 134 | 135 | histogram.observe(0.6, name: "gandalf", family: "skywalker") 136 | 137 | histogram.remove(name: "gandalf", family: "skywalker") 138 | histogram.remove(name: "jane", family: "skywalker") 139 | 140 | key = { name: "bob", family: "skywalker" } 141 | val = { "count" => 3, "sum" => 1.79 } 142 | 143 | assert_equal(histogram.to_h, key => val) 144 | end 145 | 146 | it "supports default buckets" do 147 | assert_equal( 148 | Histogram::DEFAULT_BUCKETS, 149 | [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5.0, 10.0], 150 | ) 151 | assert_equal(Histogram::DEFAULT_BUCKETS, Histogram.default_buckets) 152 | end 153 | 154 | it "allows to change default buckets" do 155 | custom_buckets = [0.005, 0.1, 1, 2, 5, 10] 156 | Histogram.default_buckets = custom_buckets 157 | 158 | assert_equal(Histogram.default_buckets, custom_buckets) 159 | 160 | Histogram.default_buckets = Histogram::DEFAULT_BUCKETS 161 | end 162 | 163 | it "uses the default buckets for instance" do 164 | assert_equal(histogram.buckets, Histogram::DEFAULT_BUCKETS) 165 | end 166 | 167 | it "uses the the custom default buckets for instance" do 168 | custom_buckets = [0.005, 0.1, 1, 2, 5, 10] 169 | Histogram.default_buckets = custom_buckets 170 | 171 | assert_equal(histogram.buckets, custom_buckets) 172 | 173 | Histogram.default_buckets = Histogram::DEFAULT_BUCKETS 174 | end 175 | 176 | it "uses the specified buckets" do 177 | buckets = [0.1, 0.2, 0.3] 178 | histogram = Histogram.new("test_bucktets", "I have specified buckets", buckets: buckets) 179 | 180 | assert_equal(histogram.buckets, buckets) 181 | end 182 | end 183 | end 184 | -------------------------------------------------------------------------------- /test/metric/summary_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/metric" 5 | 6 | module PrometheusExporter::Metric 7 | describe Summary do 8 | let :summary do 9 | Summary.new("a_summary", "my amazing summary") 10 | end 11 | 12 | before { Base.default_prefix = "" } 13 | 14 | it "can correctly gather a summary with custom quantiles" do 15 | summary = Summary.new("custom", "custom summary", quantiles: [0.4, 0.6]) 16 | 17 | (1..10).each { |i| summary.observe(i) } 18 | 19 | expected = <<~TEXT 20 | # HELP custom custom summary 21 | # TYPE custom summary 22 | custom{quantile="0.4"} 4.0 23 | custom{quantile="0.6"} 6.0 24 | custom_sum 55.0 25 | custom_count 10 26 | TEXT 27 | 28 | assert_equal(summary.to_prometheus_text, expected) 29 | end 30 | 31 | it "can correctly gather a summary over multiple labels" do 32 | summary.observe(0.1, nil) 33 | summary.observe(0.2) 34 | summary.observe(0.610001) 35 | summary.observe(0.610001) 36 | 37 | summary.observe(0.1, name: "bob", family: "skywalker") 38 | summary.observe(0.7, name: "bob", family: "skywalker") 39 | summary.observe(0.99, name: "bob", family: "skywalker") 40 | 41 | expected = <<~TEXT 42 | # HELP a_summary my amazing summary 43 | # TYPE a_summary summary 44 | a_summary{quantile="0.99"} 0.610001 45 | a_summary{quantile="0.9"} 0.610001 46 | a_summary{quantile="0.5"} 0.2 47 | a_summary{quantile="0.1"} 0.1 48 | a_summary{quantile="0.01"} 0.1 49 | a_summary_sum 1.520002 50 | a_summary_count 4 51 | a_summary{name="bob",family="skywalker",quantile="0.99"} 0.99 52 | a_summary{name="bob",family="skywalker",quantile="0.9"} 0.99 53 | a_summary{name="bob",family="skywalker",quantile="0.5"} 0.7 54 | a_summary{name="bob",family="skywalker",quantile="0.1"} 0.1 55 | a_summary{name="bob",family="skywalker",quantile="0.01"} 0.1 56 | a_summary_sum{name="bob",family="skywalker"} 1.79 57 | a_summary_count{name="bob",family="skywalker"} 3 58 | TEXT 59 | 60 | assert_equal(summary.to_prometheus_text, expected) 61 | end 62 | 63 | it "can correctly gather a summary" do 64 | summary.observe(0.1) 65 | summary.observe(0.2) 66 | summary.observe(0.610001) 67 | summary.observe(0.610001) 68 | summary.observe(0.610001) 69 | summary.observe(0.910001) 70 | summary.observe(0.1) 71 | 72 | expected = <<~TEXT 73 | # HELP a_summary my amazing summary 74 | # TYPE a_summary summary 75 | a_summary{quantile="0.99"} 0.910001 76 | a_summary{quantile="0.9"} 0.910001 77 | a_summary{quantile="0.5"} 0.610001 78 | a_summary{quantile="0.1"} 0.1 79 | a_summary{quantile="0.01"} 0.1 80 | a_summary_sum 3.1400040000000002 81 | a_summary_count 7 82 | TEXT 83 | 84 | assert_equal(summary.to_prometheus_text, expected) 85 | end 86 | 87 | it "can correctly rotate quantiles" do 88 | Process.stub(:clock_gettime, 1.0) do 89 | summary.observe(0.1) 90 | summary.observe(0.2) 91 | summary.observe(0.6) 92 | end 93 | 94 | Process.stub(:clock_gettime, 1.0 + Summary::ROTATE_AGE + 1.0) { summary.observe(300) } 95 | 96 | Process.stub(:clock_gettime, 1.0 + (Summary::ROTATE_AGE * 2) + 1.1) do 97 | summary.observe(100) 98 | summary.observe(200) 99 | summary.observe(300) 100 | 101 | expected = <<~TEXT 102 | # HELP a_summary my amazing summary 103 | # TYPE a_summary summary 104 | a_summary{quantile="0.99"} 300.0 105 | a_summary{quantile="0.9"} 300.0 106 | a_summary{quantile="0.5"} 200.0 107 | a_summary{quantile="0.1"} 100.0 108 | a_summary{quantile="0.01"} 100.0 109 | a_summary_sum 900.9 110 | a_summary_count 7 111 | TEXT 112 | 113 | assert_equal(summary.to_prometheus_text, expected) 114 | end 115 | end 116 | 117 | it "can correctly return data set" do 118 | summary.observe(0.1, name: "bob", family: "skywalker") 119 | summary.observe(0.7, name: "bob", family: "skywalker") 120 | summary.observe(0.99, name: "bob", family: "skywalker") 121 | 122 | key = { name: "bob", family: "skywalker" } 123 | val = { "count" => 3, "sum" => 1.79 } 124 | 125 | assert_equal(summary.to_h, key => val) 126 | end 127 | 128 | it "can correctly remove data" do 129 | summary.observe(0.1, name: "bob", family: "skywalker") 130 | summary.observe(0.7, name: "bob", family: "skywalker") 131 | summary.observe(0.99, name: "bob", family: "skywalker") 132 | 133 | summary.observe(0.1, name: "jane", family: "skywalker") 134 | summary.observe(0.2, name: "jane", family: "skywalker") 135 | 136 | summary.remove(name: "jane", family: "skywalker") 137 | 138 | key = { name: "bob", family: "skywalker" } 139 | val = { "count" => 3, "sum" => 1.79 } 140 | 141 | assert_equal(summary.to_h, key => val) 142 | end 143 | end 144 | end 145 | -------------------------------------------------------------------------------- /test/middleware_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "minitest/stub_const" 4 | require_relative "test_helper" 5 | require "rack/test" 6 | require "prometheus_exporter/middleware" 7 | 8 | class PrometheusExporterMiddlewareTest < Minitest::Test 9 | include Rack::Test::Methods 10 | 11 | attr_reader :app 12 | 13 | class FakeClient 14 | attr_reader :last_send 15 | 16 | def send_json(args) 17 | @last_send = args 18 | end 19 | end 20 | 21 | def client 22 | @client ||= FakeClient.new 23 | end 24 | 25 | def inner_app 26 | Proc.new { |env| [200, {}, "OK"] } 27 | end 28 | 29 | def now 30 | @now = Process.clock_gettime(Process::CLOCK_REALTIME) 31 | end 32 | 33 | def configure_middleware(overrides = {}) 34 | config = { client: client, instrument: :alias_method }.merge(overrides) 35 | @app = PrometheusExporter::Middleware.new(inner_app, config) 36 | def @app.request_start 37 | 1234567891.123 38 | end 39 | end 40 | 41 | def assert_valid_headers_response(delta = 0.5) 42 | configure_middleware 43 | get "/" 44 | assert last_response.ok? 45 | refute_nil client.last_send 46 | refute_nil client.last_send[:queue_time] 47 | assert_in_delta 1, client.last_send[:queue_time], delta 48 | end 49 | 50 | def assert_invalid_headers_response 51 | configure_middleware 52 | get "/" 53 | assert last_response.ok? 54 | refute_nil client.last_send 55 | assert_nil client.last_send[:queue_time] 56 | end 57 | 58 | def test_converting_apache_request_start 59 | configure_middleware 60 | now_microsec = "1234567890123456" 61 | header "X-Request-Start", "t=#{now_microsec}" 62 | assert_valid_headers_response 63 | end 64 | 65 | def test_converting_nginx_request_start 66 | configure_middleware 67 | now = "1234567890.123" 68 | header "X-Request-Start", "t=#{now}" 69 | assert_valid_headers_response 70 | end 71 | 72 | def test_request_start_in_wrong_format 73 | configure_middleware 74 | header "X-Request-Start", "" 75 | assert_invalid_headers_response 76 | end 77 | 78 | def test_converting_amzn_trace_id_start 79 | configure_middleware 80 | now = "1234567890" 81 | header "X-Amzn-Trace-Id", "Root=1-#{now.to_i.to_s(16)}-abc123" 82 | assert_valid_headers_response 83 | end 84 | 85 | def test_amzn_trace_id_in_wrong_format 86 | configure_middleware 87 | header "X-Amzn-Trace-Id", "" 88 | assert_invalid_headers_response 89 | end 90 | 91 | def test_redis_5_call_patching 92 | RedisValidationMiddleware.reset! 93 | configure_middleware 94 | 95 | # protocol 2 is the old redis protocol, it uses no preamble so you don't leak HELLO 96 | # calls 97 | redis_config = RedisClient.config(host: "127.0.0.1", port: 10, protocol: 2) 98 | redis = redis_config.new_pool(timeout: 0.5, size: 1) 99 | PrometheusExporter::Instrumentation::MethodProfiler.start 100 | redis.call("PING") # => "PONG" 101 | redis.call("PING") # => "PONG" 102 | results = PrometheusExporter::Instrumentation::MethodProfiler.stop 103 | assert(2, results[:redis][:calls]) 104 | 105 | assert_equal(2, RedisValidationMiddleware.call_calls) 106 | assert_equal(0, RedisValidationMiddleware.call_pipelined_calls) 107 | end 108 | 109 | def test_redis_5_call_pipelined_patching 110 | RedisValidationMiddleware.reset! 111 | configure_middleware 112 | 113 | # protocol 2 is the old redis protocol, it uses no preamble so you don't leak HELLO 114 | # calls 115 | redis_config = RedisClient.config(host: "127.0.0.1", port: 10, protocol: 2) 116 | redis = redis_config.new_pool(timeout: 0.5, size: 1) 117 | PrometheusExporter::Instrumentation::MethodProfiler.start 118 | redis.pipelined do |pipeline| 119 | pipeline.call("PING") # => "PONG" 120 | pipeline.call("PING") # => "PONG" 121 | end 122 | 123 | assert_equal(0, RedisValidationMiddleware.call_calls) 124 | assert_equal(1, RedisValidationMiddleware.call_pipelined_calls) 125 | 126 | results = PrometheusExporter::Instrumentation::MethodProfiler.stop 127 | assert_equal(1, results[:redis][:calls]) 128 | end 129 | 130 | def test_patch_called_with_prepend_instrument 131 | Object.stub_const(:Redis, Module) do 132 | ::Redis.stub_const(:Client) do 133 | mock = Minitest::Mock.new 134 | mock.expect :call, nil, [Redis::Client, Array, :redis], instrument: :prepend 135 | ::PrometheusExporter::Instrumentation::MethodProfiler.stub(:patch, mock) do 136 | configure_middleware(instrument: :prepend) 137 | end 138 | mock.verify 139 | end 140 | end 141 | 142 | Object.stub_const(:PG, Module) do 143 | ::PG.stub_const(:Connection) do 144 | mock = Minitest::Mock.new 145 | mock.expect :call, nil, [PG::Connection, Array, :sql], instrument: :prepend 146 | ::PrometheusExporter::Instrumentation::MethodProfiler.stub(:patch, mock) do 147 | configure_middleware(instrument: :prepend) 148 | end 149 | mock.verify 150 | end 151 | end 152 | 153 | Object.stub_const(:Mysql2, Module) do 154 | ::Mysql2.stub_consts({ Client: nil, Statement: nil, Result: nil }) do 155 | mock = Minitest::Mock.new 156 | mock.expect :call, nil, [Mysql2::Client, Array, :sql], instrument: :prepend 157 | mock.expect :call, nil, [Mysql2::Statement, Array, :sql], instrument: :prepend 158 | mock.expect :call, nil, [Mysql2::Result, Array, :sql], instrument: :prepend 159 | ::PrometheusExporter::Instrumentation::MethodProfiler.stub(:patch, mock) do 160 | configure_middleware(instrument: :prepend) 161 | end 162 | mock.verify 163 | end 164 | end 165 | 166 | Object.stub_const(:Dalli, Module) do 167 | ::Dalli.stub_const(:Client) do 168 | mock = Minitest::Mock.new 169 | mock.expect :call, nil, [Dalli::Client, Array, :memcache], instrument: :prepend 170 | ::PrometheusExporter::Instrumentation::MethodProfiler.stub(:patch, mock) do 171 | configure_middleware(instrument: :prepend) 172 | end 173 | mock.verify 174 | end 175 | end 176 | end 177 | 178 | def test_patch_called_with_alias_method_instrument 179 | Object.stub_const(:Redis, Module) do 180 | # must be less than version 5 for this instrumentation 181 | ::Redis.stub_const(:VERSION, "4.0.4") do 182 | ::Redis.stub_const(:Client) do 183 | mock = Minitest::Mock.new 184 | mock.expect :call, nil, [Redis::Client, Array, :redis], instrument: :alias_method 185 | ::PrometheusExporter::Instrumentation::MethodProfiler.stub(:patch, mock) do 186 | configure_middleware 187 | end 188 | mock.verify 189 | end 190 | end 191 | end 192 | 193 | Object.stub_const(:PG, Module) do 194 | ::PG.stub_const(:Connection) do 195 | mock = Minitest::Mock.new 196 | mock.expect :call, nil, [PG::Connection, Array, :sql], instrument: :alias_method 197 | ::PrometheusExporter::Instrumentation::MethodProfiler.stub(:patch, mock) do 198 | configure_middleware 199 | end 200 | mock.verify 201 | end 202 | end 203 | 204 | Object.stub_const(:Mysql2, Module) do 205 | ::Mysql2.stub_consts({ Client: nil, Statement: nil, Result: nil }) do 206 | mock = Minitest::Mock.new 207 | mock.expect :call, nil, [Mysql2::Client, Array, :sql], instrument: :alias_method 208 | mock.expect :call, nil, [Mysql2::Statement, Array, :sql], instrument: :alias_method 209 | mock.expect :call, nil, [Mysql2::Result, Array, :sql], instrument: :alias_method 210 | ::PrometheusExporter::Instrumentation::MethodProfiler.stub(:patch, mock) do 211 | configure_middleware 212 | end 213 | mock.verify 214 | end 215 | end 216 | 217 | Object.stub_const(:Dalli, Module) do 218 | ::Dalli.stub_const(:Client) do 219 | mock = Minitest::Mock.new 220 | mock.expect :call, nil, [Dalli::Client, Array, :memcache], instrument: :alias_method 221 | ::PrometheusExporter::Instrumentation::MethodProfiler.stub(:patch, mock) do 222 | configure_middleware(instrument: :alias_method) 223 | end 224 | mock.verify 225 | end 226 | end 227 | end 228 | end 229 | -------------------------------------------------------------------------------- /test/prometheus_exporter_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "test_helper" 4 | 5 | class PrometheusExporterTest < Minitest::Test 6 | def test_that_it_has_a_version_number 7 | refute_nil ::PrometheusExporter::VERSION 8 | end 9 | 10 | def test_it_can_get_hostname 11 | assert_equal `hostname`.strip, ::PrometheusExporter.hostname 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /test/server/active_record_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "mini_racer" 5 | require "prometheus_exporter/server" 6 | require "prometheus_exporter/instrumentation" 7 | 8 | class PrometheusActiveRecordCollectorTest < Minitest::Test 9 | include CollectorHelper 10 | 11 | def collector 12 | @collector ||= PrometheusExporter::Server::ActiveRecordCollector.new 13 | end 14 | 15 | def test_collecting_metrics 16 | collector.collect( 17 | "type" => "active_record", 18 | "pid" => "1000", 19 | "hostname" => "localhost", 20 | "connections" => 50, 21 | "busy" => 20, 22 | "dead" => 10, 23 | "idle" => 20, 24 | "waiting" => 0, 25 | "size" => 120, 26 | ) 27 | metrics = collector.metrics 28 | assert_equal 6, metrics.size 29 | end 30 | 31 | def test_collecting_metrics_with_custom_labels 32 | collector.collect( 33 | "type" => "active_record", 34 | "pid" => "1000", 35 | "hostname" => "localhost", 36 | "connections" => 50, 37 | "busy" => 20, 38 | "dead" => 10, 39 | "idle" => 20, 40 | "waiting" => 0, 41 | "size" => 120, 42 | "metric_labels" => { 43 | "service" => "service1", 44 | }, 45 | ) 46 | 47 | metrics = collector.metrics 48 | assert_equal 6, metrics.size 49 | assert( 50 | metrics.first.metric_text.include?( 51 | 'active_record_connection_pool_connections{service="service1",pid="1000",hostname="localhost"} 50', 52 | ), 53 | ) 54 | end 55 | 56 | def test_collecting_metrics_with_client_default_labels 57 | collector.collect( 58 | "type" => "active_record", 59 | "pid" => "1000", 60 | "hostname" => "localhost", 61 | "connections" => 50, 62 | "busy" => 20, 63 | "dead" => 10, 64 | "idle" => 20, 65 | "waiting" => 0, 66 | "size" => 120, 67 | "metric_labels" => { 68 | "service" => "service1", 69 | }, 70 | "custom_labels" => { 71 | "environment" => "test", 72 | }, 73 | ) 74 | 75 | metrics = collector.metrics 76 | assert_equal 6, metrics.size 77 | assert( 78 | metrics.first.metric_text.include?( 79 | 'active_record_connection_pool_connections{service="service1",pid="1000",hostname="localhost",environment="test"} 50', 80 | ), 81 | ) 82 | end 83 | 84 | def test_collecting_metrics_for_multiple_pools 85 | collector.collect( 86 | "type" => "active_record", 87 | "hostname" => "localhost", 88 | "pid" => "1000", 89 | "connections" => 50, 90 | "busy" => 20, 91 | "dead" => 10, 92 | "idle" => 20, 93 | "waiting" => 0, 94 | "size" => 120, 95 | "metric_labels" => { 96 | "pool_name" => "primary", 97 | }, 98 | ) 99 | collector.collect( 100 | "type" => "active_record", 101 | "hostname" => "localhost", 102 | "pid" => "1000", 103 | "connections" => 5, 104 | "busy" => 2, 105 | "dead" => 1, 106 | "idle" => 2, 107 | "waiting" => 0, 108 | "size" => 12, 109 | "metric_labels" => { 110 | "pool_name" => "other", 111 | }, 112 | ) 113 | 114 | metrics = collector.metrics 115 | assert_equal 6, metrics.size 116 | assert( 117 | metrics.first.metric_text.include?( 118 | 'active_record_connection_pool_connections{pool_name="primary",pid="1000",hostname="localhost"} 50', 119 | ), 120 | ) 121 | assert( 122 | metrics.first.metric_text.include?( 123 | 'active_record_connection_pool_connections{pool_name="other",pid="1000",hostname="localhost"} 5', 124 | ), 125 | ) 126 | end 127 | 128 | def test_metrics_deduplication 129 | data = { 130 | "pid" => "1000", 131 | "hostname" => "localhost", 132 | "metric_labels" => { 133 | "pool_name" => "primary", 134 | }, 135 | "connections" => 100, 136 | } 137 | 138 | collector.collect(data) 139 | collector.collect(data.merge("connections" => 200)) 140 | collector.collect(data.merge("pid" => "2000", "connections" => 300)) 141 | collector.collect(data.merge("pid" => "3000", "connections" => 400)) 142 | collector.collect(data.merge("hostname" => "localhost2", "pid" => "2000", "connections" => 500)) 143 | 144 | metrics = collector.metrics 145 | metrics_lines = metrics.map(&:metric_text).join.split("\n") 146 | 147 | assert_equal 1, metrics.size 148 | assert_equal [ 149 | 'active_record_connection_pool_connections{pool_name="primary",pid="1000",hostname="localhost"} 200', 150 | 'active_record_connection_pool_connections{pool_name="primary",pid="2000",hostname="localhost"} 300', 151 | 'active_record_connection_pool_connections{pool_name="primary",pid="3000",hostname="localhost"} 400', 152 | 'active_record_connection_pool_connections{pool_name="primary",pid="2000",hostname="localhost2"} 500', 153 | ], 154 | metrics_lines 155 | end 156 | 157 | def test_metrics_expiration 158 | data = { 159 | "pid" => "1000", 160 | "hostname" => "localhost", 161 | "connections" => 50, 162 | "busy" => 20, 163 | "dead" => 10, 164 | "idle" => 20, 165 | "waiting" => 0, 166 | "size" => 120, 167 | "metric_labels" => { 168 | "pool_name" => "primary", 169 | }, 170 | } 171 | 172 | stub_monotonic_clock(0) do 173 | collector.collect(data) 174 | collector.collect(data.merge("pid" => "1001", "hostname" => "localhost2")) 175 | assert_equal 6, collector.metrics.size 176 | end 177 | 178 | stub_monotonic_clock(max_metric_age + 1) { assert_equal 0, collector.metrics.size } 179 | end 180 | end 181 | -------------------------------------------------------------------------------- /test/server/good_job_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/server" 5 | require "prometheus_exporter/instrumentation" 6 | 7 | class PrometheusGoodJobCollectorTest < Minitest::Test 8 | include CollectorHelper 9 | 10 | def collector 11 | @collector ||= PrometheusExporter::Server::GoodJobCollector.new 12 | end 13 | 14 | def test_collecting_metrics 15 | collector.collect( 16 | { 17 | "scheduled" => 3, 18 | "retried" => 4, 19 | "queued" => 0, 20 | "running" => 5, 21 | "finished" => 100, 22 | "succeeded" => 2000, 23 | "discarded" => 9, 24 | }, 25 | ) 26 | 27 | metrics = collector.metrics 28 | 29 | expected = [ 30 | "good_job_scheduled 3", 31 | "good_job_retried 4", 32 | "good_job_queued 0", 33 | "good_job_running 5", 34 | "good_job_finished 100", 35 | "good_job_succeeded 2000", 36 | "good_job_discarded 9", 37 | ] 38 | assert_equal expected, metrics.map(&:metric_text) 39 | end 40 | 41 | def test_collecting_metrics_with_custom_labels 42 | collector.collect( 43 | "type" => "good_job", 44 | "scheduled" => 3, 45 | "retried" => 4, 46 | "queued" => 0, 47 | "running" => 5, 48 | "finished" => 100, 49 | "succeeded" => 2000, 50 | "discarded" => 9, 51 | "custom_labels" => { 52 | "hostname" => "good_job_host", 53 | }, 54 | ) 55 | 56 | metrics = collector.metrics 57 | 58 | assert(metrics.first.metric_text.include?('good_job_scheduled{hostname="good_job_host"}')) 59 | end 60 | 61 | def test_metrics_expiration 62 | data = { "type" => "good_job", "scheduled" => 3, "retried" => 4, "queued" => 0 } 63 | 64 | stub_monotonic_clock(0) do 65 | collector.collect(data) 66 | assert_equal 3, collector.metrics.size 67 | end 68 | 69 | stub_monotonic_clock(max_metric_age + 1) { assert_equal 0, collector.metrics.size } 70 | end 71 | end 72 | -------------------------------------------------------------------------------- /test/server/metrics_container_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | require_relative "../test_helper" 3 | require "prometheus_exporter/server" 4 | require "prometheus_exporter/instrumentation" 5 | require "prometheus_exporter/server/metrics_container" 6 | 7 | class PrometheusMetricsContainerTest < Minitest::Test 8 | def metrics 9 | @metrics ||= PrometheusExporter::Server::MetricsContainer.new 10 | end 11 | 12 | def test_container_with_expiration 13 | stub_monotonic_clock(1.0) do 14 | metrics << { key: "value" } 15 | assert_equal 1, metrics.size 16 | assert_equal 1, metrics.length 17 | assert_equal 61.0, metrics[0]["_expire_at"] 18 | end 19 | 20 | stub_monotonic_clock(61.0) do 21 | metrics << { key: "value2" } 22 | assert_equal 2, metrics.size 23 | assert_equal %w[value value2], metrics.map { |v| v[:key] } 24 | assert_equal 61.0, metrics[0]["_expire_at"] 25 | assert_equal 121.0, metrics[1]["_expire_at"] 26 | end 27 | 28 | stub_monotonic_clock(62.0) do 29 | metrics << { key: "value3" } 30 | assert_equal 2, metrics.size 31 | assert_equal %w[value2 value3], metrics.map { |v| v[:key] } 32 | assert_equal 121.0, metrics[0]["_expire_at"] 33 | assert_equal 122.0, metrics[1]["_expire_at"] 34 | end 35 | 36 | stub_monotonic_clock(1000.0) do 37 | # check raw data before expiry event 38 | assert_equal 2, metrics.data.size 39 | 40 | num = 0 41 | metrics.each { |m| num += 1 } 42 | assert_equal 0, num 43 | assert_equal 0, metrics.size 44 | end 45 | end 46 | 47 | def test_container_with_filter 48 | metrics.filter = ->(new_metric, old_metric) { new_metric[:hostname] == old_metric[:hostname] } 49 | 50 | stub_monotonic_clock(1.0) do 51 | metrics << { hostname: "host1", value: 100 } 52 | metrics << { hostname: "host2", value: 200 } 53 | metrics << { hostname: "host1", value: 200 } 54 | assert_equal 2, metrics.size 55 | assert_equal "host2", metrics[0][:hostname] 56 | assert_equal "host1", metrics[1][:hostname] 57 | end 58 | 59 | stub_monotonic_clock(62.0) do 60 | metrics << { hostname: "host3", value: 300 } 61 | assert_equal 1, metrics.size 62 | assert_equal "host3", metrics[0][:hostname] 63 | end 64 | end 65 | end 66 | -------------------------------------------------------------------------------- /test/server/process_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "mini_racer" 5 | require "prometheus_exporter/server" 6 | require "prometheus_exporter/instrumentation" 7 | 8 | class ProcessCollectorTest < Minitest::Test 9 | include CollectorHelper 10 | 11 | def collector 12 | @collector ||= PrometheusExporter::Server::ProcessCollector.new 13 | end 14 | 15 | def base_data 16 | { 17 | "type" => "process", 18 | "pid" => "1000", 19 | "hostname" => "localhost", 20 | "heap_free_slots" => 1000, 21 | "heap_live_slots" => 1001, 22 | "v8_heap_size" => 2000, 23 | "v8_used_heap_size" => 2001, 24 | "v8_physical_size" => 2003, 25 | "v8_heap_count" => 2004, 26 | "rss" => 3000, 27 | "major_gc_ops_total" => 4000, 28 | "minor_gc_ops_total" => 4001, 29 | "allocated_objects_total" => 4002, 30 | } 31 | end 32 | 33 | def test_metrics_collection 34 | collector.collect(base_data) 35 | 36 | assert_equal 10, collector.metrics.size 37 | assert_equal [ 38 | 'heap_free_slots{pid="1000",hostname="localhost"} 1000', 39 | 'heap_live_slots{pid="1000",hostname="localhost"} 1001', 40 | 'v8_heap_size{pid="1000",hostname="localhost"} 2000', 41 | 'v8_used_heap_size{pid="1000",hostname="localhost"} 2001', 42 | 'v8_physical_size{pid="1000",hostname="localhost"} 2003', 43 | 'v8_heap_count{pid="1000",hostname="localhost"} 2004', 44 | 'rss{pid="1000",hostname="localhost"} 3000', 45 | 'major_gc_ops_total{pid="1000",hostname="localhost"} 4000', 46 | 'minor_gc_ops_total{pid="1000",hostname="localhost"} 4001', 47 | 'allocated_objects_total{pid="1000",hostname="localhost"} 4002', 48 | ], 49 | collector_metric_lines 50 | end 51 | 52 | def test_metrics_deduplication 53 | collector.collect(base_data) 54 | assert_equal 10, collector.metrics.size 55 | assert_equal 10, collector_metric_lines.size 56 | 57 | collector.collect(base_data) 58 | assert_equal 10, collector.metrics.size 59 | assert_equal 10, collector_metric_lines.size 60 | 61 | collector.collect(base_data.merge({ "hostname" => "localhost2" })) 62 | assert_equal 10, collector.metrics.size 63 | assert_equal 20, collector_metric_lines.size 64 | end 65 | 66 | def test_metrics_expiration 67 | stub_monotonic_clock(0) do 68 | collector.collect(base_data) 69 | assert_equal 10, collector.metrics.size 70 | end 71 | 72 | stub_monotonic_clock(max_metric_age + 1) { assert_equal 0, collector.metrics.size } 73 | end 74 | end 75 | -------------------------------------------------------------------------------- /test/server/puma_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "mini_racer" 5 | require "prometheus_exporter/server" 6 | require "prometheus_exporter/instrumentation" 7 | 8 | class PrometheusPumaCollectorTest < Minitest::Test 9 | include CollectorHelper 10 | 11 | def collector 12 | @collector ||= PrometheusExporter::Server::PumaCollector.new 13 | end 14 | 15 | def test_collecting_metrics_for_different_hosts_without_custom_labels 16 | collector.collect( 17 | "type" => "puma", 18 | "pid" => "1000", 19 | "hostname" => "test1.example.com", 20 | "phase" => 0, 21 | "workers" => 2, 22 | "booted_workers" => 2, 23 | "old_workers" => 0, 24 | "request_backlog" => 0, 25 | "running_threads" => 4, 26 | "thread_pool_capacity" => 10, 27 | "max_threads" => 10, 28 | "busy_threads" => 2, 29 | ) 30 | 31 | collector.collect( 32 | "type" => "puma", 33 | "pid" => "1000", 34 | "hostname" => "test2.example.com", 35 | "phase" => 0, 36 | "workers" => 4, 37 | "booted_workers" => 4, 38 | "old_workers" => 0, 39 | "request_backlog" => 1, 40 | "running_threads" => 9, 41 | "thread_pool_capacity" => 10, 42 | "max_threads" => 10, 43 | "busy_threads" => 3, 44 | ) 45 | 46 | # overwriting previous metrics from first host 47 | collector.collect( 48 | "type" => "puma", 49 | "pid" => "1000", 50 | "hostname" => "test1.example.com", 51 | "phase" => 0, 52 | "workers" => 3, 53 | "booted_workers" => 3, 54 | "old_workers" => 0, 55 | "request_backlog" => 2, 56 | "running_threads" => 8, 57 | "thread_pool_capacity" => 10, 58 | "max_threads" => 10, 59 | "busy_threads" => 4, 60 | ) 61 | 62 | metrics = collector.metrics 63 | assert_equal 8, metrics.size 64 | assert_equal "puma_workers{phase=\"0\"} 3", metrics.first.metric_text 65 | end 66 | 67 | def test_collecting_metrics_for_different_hosts_with_custom_labels 68 | collector.collect( 69 | "type" => "puma", 70 | "pid" => "1000", 71 | "hostname" => "test1.example.com", 72 | "phase" => 0, 73 | "workers" => 2, 74 | "booted_workers" => 2, 75 | "old_workers" => 0, 76 | "request_backlog" => 0, 77 | "running_threads" => 4, 78 | "thread_pool_capacity" => 10, 79 | "max_threads" => 10, 80 | "busy_threads" => 2, 81 | "custom_labels" => { 82 | "hostname" => "test1.example.com", 83 | }, 84 | ) 85 | 86 | collector.collect( 87 | "type" => "puma", 88 | "pid" => "1000", 89 | "hostname" => "test2.example.com", 90 | "phase" => 0, 91 | "workers" => 4, 92 | "booted_workers" => 4, 93 | "old_workers" => 0, 94 | "request_backlog" => 1, 95 | "running_threads" => 9, 96 | "thread_pool_capacity" => 10, 97 | "max_threads" => 10, 98 | "busy_threads" => 3, 99 | "custom_labels" => { 100 | "hostname" => "test2.example.com", 101 | }, 102 | ) 103 | 104 | # overwriting previous metrics from first host 105 | collector.collect( 106 | "type" => "puma", 107 | "pid" => "1000", 108 | "hostname" => "test1.example.com", 109 | "phase" => 0, 110 | "workers" => 3, 111 | "booted_workers" => 3, 112 | "old_workers" => 0, 113 | "request_backlog" => 2, 114 | "running_threads" => 8, 115 | "thread_pool_capacity" => 10, 116 | "max_threads" => 10, 117 | "busy_threads" => 4, 118 | "custom_labels" => { 119 | "hostname" => "test1.example.com", 120 | }, 121 | ) 122 | 123 | metrics = collector.metrics 124 | assert_equal 8, metrics.size 125 | assert_equal "puma_workers{phase=\"0\",hostname=\"test2.example.com\"} 4\n" \ 126 | "puma_workers{phase=\"0\",hostname=\"test1.example.com\"} 3", 127 | metrics.first.metric_text 128 | end 129 | end 130 | -------------------------------------------------------------------------------- /test/server/resque_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/server" 5 | require "prometheus_exporter/instrumentation" 6 | 7 | class PrometheusResqueCollectorTest < Minitest::Test 8 | include CollectorHelper 9 | 10 | def collector 11 | @collector ||= PrometheusExporter::Server::ResqueCollector.new 12 | end 13 | 14 | def test_collecting_metrics 15 | collector.collect("pending_jobs" => 4, "processed_jobs" => 7, "failed_jobs" => 1) 16 | 17 | metrics = collector.metrics 18 | 19 | expected = ["resque_processed_jobs 7", "resque_failed_jobs 1", "resque_pending_jobs 4"] 20 | assert_equal expected, metrics.map(&:metric_text) 21 | end 22 | 23 | def test_collecting_metrics_with_custom_labels 24 | collector.collect( 25 | "type" => "resque", 26 | "pending_jobs" => 1, 27 | "processed_jobs" => 2, 28 | "failed_jobs" => 3, 29 | "custom_labels" => { 30 | "hostname" => "a323d2f681e2", 31 | }, 32 | ) 33 | 34 | metrics = collector.metrics 35 | assert(metrics.first.metric_text.include?('resque_processed_jobs{hostname="a323d2f681e2"}')) 36 | end 37 | 38 | def test_metrics_expiration 39 | data = { "type" => "resque", "pending_jobs" => 1, "processed_jobs" => 2, "failed_jobs" => 3 } 40 | 41 | stub_monotonic_clock(0) do 42 | collector.collect(data) 43 | assert_equal 3, collector.metrics.size 44 | end 45 | 46 | stub_monotonic_clock(max_metric_age + 1) { assert_equal 0, collector.metrics.size } 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /test/server/runner_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/server" 5 | 6 | class PrometheusRunnerTest < Minitest::Test 7 | class MockerWebServer < OpenStruct 8 | def start 9 | true 10 | end 11 | end 12 | 13 | class CollectorMock < PrometheusExporter::Server::CollectorBase 14 | def initialize 15 | @collectors = [] 16 | end 17 | 18 | def register_collector(collector) 19 | @collectors << collector 20 | end 21 | 22 | def collectors 23 | @collectors 24 | end 25 | end 26 | 27 | class WrongCollectorMock 28 | end 29 | 30 | class TypeCollectorMock < PrometheusExporter::Server::TypeCollector 31 | def type 32 | "test" 33 | end 34 | 35 | def collect(_) 36 | nil 37 | end 38 | 39 | def metrics 40 | [] 41 | end 42 | end 43 | 44 | def teardown 45 | PrometheusExporter::Metric::Base.default_aggregation = nil 46 | end 47 | 48 | def test_runner_defaults 49 | runner = PrometheusExporter::Server::Runner.new 50 | 51 | assert_equal(runner.prefix, "ruby_") 52 | assert_equal(runner.port, 9394) 53 | assert_equal(runner.timeout, 2) 54 | assert_equal(runner.collector_class, PrometheusExporter::Server::Collector) 55 | assert_equal(runner.type_collectors, []) 56 | assert_equal(runner.verbose, false) 57 | assert_empty(runner.label) 58 | assert_nil(runner.auth) 59 | assert_equal(runner.realm, "Prometheus Exporter") 60 | end 61 | 62 | def test_runner_custom_options 63 | runner = 64 | PrometheusExporter::Server::Runner.new( 65 | prefix: "new_", 66 | port: 1234, 67 | timeout: 1, 68 | collector_class: CollectorMock, 69 | type_collectors: [TypeCollectorMock], 70 | verbose: true, 71 | label: { 72 | environment: "integration", 73 | }, 74 | auth: "my_htpasswd_file", 75 | realm: "test realm", 76 | histogram: true, 77 | ) 78 | 79 | assert_equal(runner.prefix, "new_") 80 | assert_equal(runner.port, 1234) 81 | assert_equal(runner.timeout, 1) 82 | assert_equal(runner.collector_class, CollectorMock) 83 | assert_equal(runner.type_collectors, [TypeCollectorMock]) 84 | assert_equal(runner.verbose, true) 85 | assert_equal(runner.label, { environment: "integration" }) 86 | assert_equal(runner.auth, "my_htpasswd_file") 87 | assert_equal(runner.realm, "test realm") 88 | assert_equal(runner.histogram, true) 89 | 90 | reset_base_metric_label 91 | end 92 | 93 | def test_runner_start 94 | runner = 95 | PrometheusExporter::Server::Runner.new( 96 | server_class: MockerWebServer, 97 | label: { 98 | environment: "integration", 99 | }, 100 | ) 101 | result = runner.start 102 | 103 | assert_equal(result, true) 104 | assert_equal(PrometheusExporter::Metric::Base.default_prefix, "ruby_") 105 | assert_equal(runner.port, 9394) 106 | assert_equal(runner.timeout, 2) 107 | assert_equal(runner.verbose, false) 108 | assert_nil(runner.auth) 109 | assert_equal(runner.realm, "Prometheus Exporter") 110 | assert_equal(PrometheusExporter::Metric::Base.default_labels, { environment: "integration" }) 111 | assert_instance_of(PrometheusExporter::Server::Collector, runner.collector) 112 | 113 | reset_base_metric_label 114 | end 115 | 116 | def test_runner_custom_collector 117 | runner = 118 | PrometheusExporter::Server::Runner.new( 119 | server_class: MockerWebServer, 120 | collector_class: CollectorMock, 121 | ) 122 | runner.start 123 | 124 | assert_equal(runner.collector_class, CollectorMock) 125 | end 126 | 127 | def test_runner_wrong_collector 128 | runner = 129 | PrometheusExporter::Server::Runner.new( 130 | server_class: MockerWebServer, 131 | collector_class: WrongCollectorMock, 132 | ) 133 | 134 | assert_raises PrometheusExporter::Server::WrongInheritance do 135 | runner.start 136 | end 137 | end 138 | 139 | def test_runner_custom_collector_types 140 | runner = 141 | PrometheusExporter::Server::Runner.new( 142 | server_class: MockerWebServer, 143 | collector_class: CollectorMock, 144 | type_collectors: [TypeCollectorMock], 145 | ) 146 | runner.start 147 | 148 | custom_collectors = runner.collector.collectors 149 | 150 | assert_equal(custom_collectors.size, 1) 151 | assert_instance_of(TypeCollectorMock, custom_collectors.first) 152 | end 153 | 154 | def test_runner_histogram_mode 155 | runner = PrometheusExporter::Server::Runner.new(server_class: MockerWebServer, histogram: true) 156 | runner.start 157 | 158 | assert_equal( 159 | PrometheusExporter::Metric::Base.default_aggregation, 160 | PrometheusExporter::Metric::Histogram, 161 | ) 162 | end 163 | 164 | def reset_base_metric_label 165 | PrometheusExporter::Metric::Base.default_labels = {} 166 | end 167 | end 168 | -------------------------------------------------------------------------------- /test/server/sidekiq_process_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/server" 5 | require "prometheus_exporter/instrumentation" 6 | 7 | class PrometheusSidekiqProcessCollectorTest < Minitest::Test 8 | include CollectorHelper 9 | 10 | def collector 11 | @collector ||= PrometheusExporter::Server::SidekiqProcessCollector.new 12 | end 13 | 14 | def test_collecting_metrics 15 | collector.collect( 16 | "process" => { 17 | "busy" => 1, 18 | "concurrency" => 2, 19 | "labels" => { 20 | "labels" => "lab_1,lab_2", 21 | "queues" => "default,reliable", 22 | "quiet" => "false", 23 | "tag" => "default", 24 | "hostname" => "sidekiq-1234", 25 | "identity" => "sidekiq-1234:1", 26 | }, 27 | }, 28 | ) 29 | 30 | metrics = collector.metrics 31 | expected = [ 32 | 'sidekiq_process_busy{labels="lab_1,lab_2",queues="default,reliable",quiet="false",tag="default",hostname="sidekiq-1234",identity="sidekiq-1234:1"} 1', 33 | 'sidekiq_process_concurrency{labels="lab_1,lab_2",queues="default,reliable",quiet="false",tag="default",hostname="sidekiq-1234",identity="sidekiq-1234:1"} 2', 34 | ] 35 | assert_equal expected, metrics.map(&:metric_text) 36 | end 37 | 38 | def test_only_fresh_metrics_are_collected 39 | stub_monotonic_clock(1.0) do 40 | collector.collect( 41 | "process" => { 42 | "busy" => 1, 43 | "concurrency" => 2, 44 | "labels" => { 45 | "labels" => "lab_1,lab_2", 46 | "queues" => "default,reliable", 47 | "quiet" => "false", 48 | "tag" => "default", 49 | "hostname" => "sidekiq-1234", 50 | "identity" => "sidekiq-1234:1", 51 | }, 52 | }, 53 | ) 54 | end 55 | 56 | stub_monotonic_clock(2.0, advance: max_metric_age) do 57 | collector.collect( 58 | "process" => { 59 | "busy" => 2, 60 | "concurrency" => 2, 61 | "labels" => { 62 | "labels" => "other_label", 63 | "queues" => "default,reliable", 64 | "quiet" => "true", 65 | "tag" => "default", 66 | "hostname" => "sidekiq-1234", 67 | "identity" => "sidekiq-1234:1", 68 | }, 69 | }, 70 | ) 71 | 72 | metrics = collector.metrics 73 | expected = [ 74 | 'sidekiq_process_busy{labels="other_label",queues="default,reliable",quiet="true",tag="default",hostname="sidekiq-1234",identity="sidekiq-1234:1"} 2', 75 | 'sidekiq_process_concurrency{labels="other_label",queues="default,reliable",quiet="true",tag="default",hostname="sidekiq-1234",identity="sidekiq-1234:1"} 2', 76 | ] 77 | assert_equal expected, metrics.map(&:metric_text) 78 | end 79 | end 80 | end 81 | -------------------------------------------------------------------------------- /test/server/sidekiq_queue_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/server" 5 | require "prometheus_exporter/instrumentation" 6 | 7 | class PrometheusSidekiqQueueCollectorTest < Minitest::Test 8 | include CollectorHelper 9 | 10 | def collector 11 | @collector ||= PrometheusExporter::Server::SidekiqQueueCollector.new 12 | end 13 | 14 | def test_collecting_metrics 15 | collector.collect( 16 | "queues" => ["backlog" => 16, "latency_seconds" => 7, "labels" => { "queue" => "default" }], 17 | ) 18 | 19 | metrics = collector.metrics 20 | 21 | expected = [ 22 | 'sidekiq_queue_backlog{queue="default"} 16', 23 | 'sidekiq_queue_latency_seconds{queue="default"} 7', 24 | ] 25 | assert_equal expected, metrics.map(&:metric_text) 26 | end 27 | 28 | def test_collecting_metrics_with_client_default_labels 29 | collector.collect( 30 | "queues" => ["backlog" => 16, "latency_seconds" => 7, "labels" => { "queue" => "default" }], 31 | "custom_labels" => { 32 | "environment" => "test", 33 | }, 34 | ) 35 | 36 | metrics = collector.metrics 37 | 38 | expected = [ 39 | 'sidekiq_queue_backlog{queue="default",environment="test"} 16', 40 | 'sidekiq_queue_latency_seconds{queue="default",environment="test"} 7', 41 | ] 42 | assert_equal expected, metrics.map(&:metric_text) 43 | end 44 | 45 | def test_only_fresh_metrics_are_collected 46 | stub_monotonic_clock(1.0) do 47 | collector.collect("queues" => ["backlog" => 1, "labels" => { "queue" => "default" }]) 48 | end 49 | 50 | stub_monotonic_clock(2.0, advance: max_metric_age) do 51 | collector.collect("queues" => ["latency_seconds" => 1, "labels" => { "queue" => "default" }]) 52 | 53 | metrics = collector.metrics 54 | 55 | expected = ['sidekiq_queue_latency_seconds{queue="default"} 1'] 56 | assert_equal expected, metrics.map(&:metric_text) 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /test/server/sidekiq_stats_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "prometheus_exporter/server" 5 | require "prometheus_exporter/instrumentation" 6 | 7 | class PrometheusSidekiqStatsCollectorTest < Minitest::Test 8 | include CollectorHelper 9 | 10 | def collector 11 | @collector ||= PrometheusExporter::Server::SidekiqStatsCollector.new 12 | end 13 | 14 | def test_collecting_metrics 15 | collector.collect( 16 | "stats" => { 17 | "dead_size" => 1, 18 | "enqueued" => 2, 19 | "failed" => 3, 20 | "processed" => 4, 21 | "processes_size" => 5, 22 | "retry_size" => 6, 23 | "scheduled_size" => 7, 24 | "workers_size" => 8, 25 | }, 26 | ) 27 | 28 | metrics = collector.metrics 29 | expected = [ 30 | "sidekiq_stats_dead_size 1", 31 | "sidekiq_stats_enqueued 2", 32 | "sidekiq_stats_failed 3", 33 | "sidekiq_stats_processed 4", 34 | "sidekiq_stats_processes_size 5", 35 | "sidekiq_stats_retry_size 6", 36 | "sidekiq_stats_scheduled_size 7", 37 | "sidekiq_stats_workers_size 8", 38 | ] 39 | assert_equal expected, metrics.map(&:metric_text) 40 | end 41 | 42 | def test_only_fresh_metrics_are_collected 43 | stub_monotonic_clock(1.0) do 44 | collector.collect( 45 | "stats" => { 46 | "dead_size" => 1, 47 | "enqueued" => 2, 48 | "failed" => 3, 49 | "processed" => 4, 50 | "processes_size" => 5, 51 | "retry_size" => 6, 52 | "scheduled_size" => 7, 53 | "workers_size" => 8, 54 | }, 55 | ) 56 | end 57 | 58 | stub_monotonic_clock(2.0, advance: max_metric_age) do 59 | collector.collect( 60 | "stats" => { 61 | "dead_size" => 2, 62 | "enqueued" => 3, 63 | "failed" => 4, 64 | "processed" => 5, 65 | "processes_size" => 6, 66 | "retry_size" => 7, 67 | "scheduled_size" => 8, 68 | "workers_size" => 9, 69 | }, 70 | ) 71 | 72 | metrics = collector.metrics 73 | expected = [ 74 | "sidekiq_stats_dead_size 2", 75 | "sidekiq_stats_enqueued 3", 76 | "sidekiq_stats_failed 4", 77 | "sidekiq_stats_processed 5", 78 | "sidekiq_stats_processes_size 6", 79 | "sidekiq_stats_retry_size 7", 80 | "sidekiq_stats_scheduled_size 8", 81 | "sidekiq_stats_workers_size 9", 82 | ] 83 | 84 | assert_equal expected, metrics.map(&:metric_text) 85 | assert_equal 1, collector.sidekiq_metrics.size 86 | end 87 | end 88 | end 89 | -------------------------------------------------------------------------------- /test/server/unicorn_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "mini_racer" 5 | require "prometheus_exporter/server" 6 | require "prometheus_exporter/instrumentation" 7 | 8 | class PrometheusUnicornCollectorTest < Minitest::Test 9 | include CollectorHelper 10 | 11 | def collector 12 | @collector ||= PrometheusExporter::Server::UnicornCollector.new 13 | end 14 | 15 | def test_collecting_metrics 16 | collector.collect("workers" => 4, "active_workers" => 3, "request_backlog" => 0) 17 | 18 | assert_collector_metric_lines [ 19 | "unicorn_workers 4", 20 | "unicorn_active_workers 3", 21 | "unicorn_request_backlog 0", 22 | ] 23 | end 24 | 25 | def test_collecting_metrics_with_custom_labels 26 | collector.collect( 27 | "type" => "unicorn", 28 | "workers" => 2, 29 | "active_workers" => 0, 30 | "request_backlog" => 0, 31 | "custom_labels" => { 32 | "hostname" => "a323d2f681e2", 33 | }, 34 | ) 35 | 36 | metrics = collector.metrics 37 | 38 | assert(metrics.first.metric_text.include?('unicorn_workers{hostname="a323d2f681e2"}')) 39 | end 40 | 41 | def test_metrics_deduplication 42 | collector.collect("workers" => 4, "active_workers" => 3, "request_backlog" => 0) 43 | collector.collect("workers" => 4, "active_workers" => 3, "request_backlog" => 0) 44 | collector.collect( 45 | "workers" => 4, 46 | "active_workers" => 3, 47 | "request_backlog" => 0, 48 | "hostname" => "localhost2", 49 | ) 50 | assert_equal 3, collector_metric_lines.size 51 | end 52 | 53 | def test_metrics_expiration 54 | stub_monotonic_clock(0) do 55 | collector.collect("workers" => 4, "active_workers" => 3, "request_backlog" => 0) 56 | assert_equal 3, collector.metrics.size 57 | end 58 | 59 | stub_monotonic_clock(max_metric_age + 1) { assert_equal 0, collector.metrics.size } 60 | end 61 | end 62 | -------------------------------------------------------------------------------- /test/server/web_collector_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../test_helper" 4 | require "mini_racer" 5 | require "prometheus_exporter/server" 6 | require "prometheus_exporter/instrumentation" 7 | 8 | class PrometheusWebCollectorTest < Minitest::Test 9 | def setup 10 | PrometheusExporter::Metric::Base.default_prefix = "" 11 | PrometheusExporter::Metric::Base.default_aggregation = nil 12 | end 13 | 14 | def teardown 15 | PrometheusExporter::Metric::Base.default_aggregation = nil 16 | end 17 | 18 | def collector 19 | @collector ||= PrometheusExporter::Server::WebCollector.new 20 | end 21 | 22 | def test_collecting_metrics_without_specific_timings 23 | collector.collect( 24 | "type" => "web", 25 | "timings" => nil, 26 | "default_labels" => { 27 | "action" => "index", 28 | "controller" => "home", 29 | :"status" => 200, 30 | }, 31 | ) 32 | 33 | metrics = collector.metrics 34 | 35 | assert_equal 6, metrics.size 36 | end 37 | 38 | def test_collecting_metrics 39 | collector.collect( 40 | "type" => "web", 41 | "timings" => { 42 | "sql" => { 43 | duration: 0.5, 44 | count: 40, 45 | }, 46 | "redis" => { 47 | duration: 0.03, 48 | count: 4, 49 | }, 50 | "memcache" => { 51 | duration: 0.02, 52 | count: 1, 53 | }, 54 | "queue" => 0.03, 55 | "total_duration" => 1.0, 56 | }, 57 | "default_labels" => { 58 | "action" => "index", 59 | "controller" => "home", 60 | "status" => 200, 61 | }, 62 | ) 63 | 64 | metrics = collector.metrics 65 | assert_equal 6, metrics.size 66 | end 67 | 68 | def test_collecting_metrics_with_custom_labels 69 | collector.collect( 70 | "type" => "web", 71 | "timings" => nil, 72 | "status" => 200, 73 | "default_labels" => { 74 | "controller" => "home", 75 | "action" => "index", 76 | }, 77 | "custom_labels" => { 78 | "service" => "service1", 79 | }, 80 | ) 81 | 82 | metrics = collector.metrics 83 | 84 | assert_equal 6, metrics.size 85 | assert( 86 | metrics.first.metric_text.include?( 87 | 'http_requests_total{controller="home",action="index",service="service1",status="200"} 1', 88 | ), 89 | ) 90 | end 91 | 92 | def test_collecting_metrics_merging_custom_labels_and_status 93 | collector.collect( 94 | "type" => "web", 95 | "timings" => nil, 96 | "status" => 200, 97 | "default_labels" => { 98 | "controller" => "home", 99 | "action" => "index", 100 | }, 101 | "custom_labels" => { 102 | "service" => "service1", 103 | "status" => 200, 104 | }, 105 | ) 106 | 107 | metrics = collector.metrics 108 | 109 | assert_equal 6, metrics.size 110 | assert( 111 | metrics.first.metric_text.include?( 112 | 'http_requests_total{controller="home",action="index",service="service1",status="200"} 1', 113 | ), 114 | ) 115 | end 116 | 117 | def test_collecting_metrics_in_histogram_mode 118 | PrometheusExporter::Metric::Base.default_aggregation = PrometheusExporter::Metric::Histogram 119 | 120 | collector.collect( 121 | "type" => "web", 122 | "status" => 200, 123 | "timings" => { 124 | "sql" => { 125 | duration: 0.5, 126 | count: 40, 127 | }, 128 | "redis" => { 129 | duration: 0.03, 130 | count: 4, 131 | }, 132 | "memcache" => { 133 | duration: 0.02, 134 | count: 1, 135 | }, 136 | "queue" => 0.03, 137 | "total_duration" => 1.0, 138 | }, 139 | "default_labels" => { 140 | "controller" => "home", 141 | "action" => "index", 142 | }, 143 | "custom_labels" => { 144 | "service" => "service1", 145 | }, 146 | ) 147 | 148 | metrics = collector.metrics 149 | metrics_lines = metrics.map(&:metric_text).flat_map(&:lines) 150 | 151 | assert_equal 6, metrics.size 152 | assert_includes( 153 | metrics_lines, 154 | "http_requests_total{controller=\"home\",action=\"index\",service=\"service1\",status=\"200\"} 1", 155 | ) 156 | assert_includes( 157 | metrics_lines, 158 | "http_request_duration_seconds_bucket{controller=\"home\",action=\"index\",service=\"service1\",le=\"+Inf\"} 1\n", 159 | ) 160 | end 161 | end 162 | -------------------------------------------------------------------------------- /test/sidekiq_middleware_test.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "minitest/stub_const" 4 | require_relative "test_helper" 5 | require "prometheus_exporter/instrumentation/sidekiq" 6 | 7 | class PrometheusExporterSidekiqMiddlewareTest < Minitest::Test 8 | class FakeClient 9 | end 10 | 11 | def client 12 | @client ||= FakeClient.new 13 | end 14 | 15 | class FakeSidekiqMiddlewareChainEntry 16 | attr_reader :klass 17 | 18 | def initialize(klass, *args) 19 | @klass = klass 20 | @args = args 21 | end 22 | 23 | def make_new 24 | @klass.new(*@args) 25 | end 26 | end 27 | 28 | def test_initiating_middlware 29 | middleware_entry = 30 | FakeSidekiqMiddlewareChainEntry.new( 31 | PrometheusExporter::Instrumentation::Sidekiq, 32 | { client: client }, 33 | ) 34 | assert_instance_of PrometheusExporter::Instrumentation::Sidekiq, middleware_entry.make_new 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /test/test_helper.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | $LOAD_PATH.unshift File.expand_path("../../lib", __FILE__) 4 | require "prometheus_exporter" 5 | 6 | require "minitest/autorun" 7 | require "ostruct" 8 | require "redis" 9 | 10 | module TestingMod 11 | class FakeConnection 12 | def call_pipelined(...) 13 | end 14 | 15 | def call(...) 16 | end 17 | 18 | def connected? 19 | true 20 | end 21 | 22 | def revalidate 23 | end 24 | 25 | def read_timeout=(v) 26 | end 27 | 28 | def write_timeout=(v) 29 | end 30 | end 31 | 32 | def connect(_config) 33 | FakeConnection.new 34 | end 35 | end 36 | 37 | module RedisValidationMiddleware 38 | def self.reset! 39 | @@call_calls = 0 40 | @@call_pipelined_calls = 0 41 | end 42 | 43 | def self.call_calls 44 | @@call_calls || 0 45 | end 46 | 47 | def self.call_pipelined_calls 48 | @@call_pipelined_calls || 0 49 | end 50 | 51 | def call(command, _config) 52 | @@call_calls ||= 0 53 | @@call_calls += 1 54 | super 55 | end 56 | 57 | def call_pipelined(command, _config) 58 | @@call_pipelined_calls ||= 0 59 | @@call_pipelined_calls += 1 60 | super 61 | end 62 | end 63 | 64 | RedisClient::Middlewares.prepend(TestingMod) 65 | RedisClient.register(RedisValidationMiddleware) 66 | 67 | unless defined?(::Puma) 68 | module Puma 69 | module Const 70 | VERSION = "6.6.0" 71 | end 72 | 73 | def self.stats 74 | '{}' 75 | end 76 | end 77 | end 78 | 79 | class TestHelper 80 | def self.wait_for(time, &blk) 81 | (time / 0.001).to_i.times do 82 | return true if blk.call 83 | sleep 0.001 84 | end 85 | false 86 | end 87 | end 88 | 89 | module ClockHelper 90 | def stub_monotonic_clock(at = 0.0, advance: nil, &blk) 91 | Process.stub(:clock_gettime, at + advance.to_f, Process::CLOCK_MONOTONIC, &blk) 92 | end 93 | end 94 | 95 | module CollectorHelper 96 | def setup 97 | PrometheusExporter::Metric::Base.default_prefix = "" 98 | end 99 | 100 | def max_metric_age 101 | @_max_age ||= get_max_metric_age 102 | end 103 | 104 | def collector_metric_lines 105 | collector.metrics.map(&:metric_text).join("\n").split("\n") 106 | end 107 | 108 | def assert_collector_metric_lines(expected) 109 | assert_equal(expected, collector_metric_lines) 110 | end 111 | 112 | private 113 | 114 | def get_max_metric_age 115 | klass = @collector.class 116 | unless klass.const_defined?(:MAX_METRIC_AGE) 117 | raise "Collector class #{@collector.class.name} must set MAX_METRIC_AGE constant!" 118 | end 119 | klass.const_get(:MAX_METRIC_AGE) 120 | end 121 | end 122 | 123 | # Allow stubbing process monotonic clock from any class in the suite 124 | Minitest::Test.send(:include, ClockHelper) 125 | --------------------------------------------------------------------------------