├── .gitignore
├── .travis.yml
├── CHANGELOG.md
├── LICENSE.txt
├── README.md
├── build.gradle
├── gradle.properties
├── gradle
└── wrapper
│ ├── gradle-wrapper.jar
│ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
└── src
├── main
└── java
│ └── com
│ └── airbnb
│ ├── kafka
│ ├── kafka08
│ │ ├── StatsdMetricsReporter.java
│ │ └── StatsdMetricsReporterMBean.java
│ └── kafka09
│ │ └── StatsdMetricsReporter.java
│ └── metrics
│ ├── Dimension.java
│ ├── ExcludeMetricPredicate.java
│ ├── KafkaStatsDReporter.java
│ ├── MetricInfo.java
│ ├── MetricNameFormatter.java
│ ├── Parser.java
│ ├── ParserForNoTag.java
│ ├── ParserForTagInMBeanName.java
│ ├── StatsDMetricsRegistry.java
│ └── StatsDReporter.java
└── test
├── java
└── com
│ └── airbnb
│ ├── kafka
│ ├── kafka08
│ │ └── StatsdMetricsReporterTest.java
│ └── kafka09
│ │ └── StatsdMetricsReporterTest.java
│ └── metrics
│ ├── DimensionTest.java
│ ├── ExcludeMetricPredicateTest.java
│ ├── KafkaStatsDReporterTest.java
│ ├── MetricNameFormatterTest.java
│ ├── ParserTest.java
│ └── StatsDReporterTest.java
└── resources
└── log4j.properties
/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | *.ipr
3 | *.iws
4 | .DS_Store
5 | /out
6 | build
7 | /.gradle
8 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: java
3 |
4 | env:
5 | - TERM=dumb
6 |
7 | jdk:
8 | - openjdk8
9 |
10 | script:
11 | - ./gradlew testWithAllSupportedKafkaVersions
12 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # 0.5.2
2 |
3 | - Convert INFINITY values to 0.
4 |
5 | # 0.4.0
6 |
7 | - `0.4.0` adds support for tags on metrics. See [dogstatsd extensions](http://docs.datadoghq.com/guides/dogstatsd/#tags). If your statsd server does not support tags, you can disable them in the Kafka configuration. See property `external.kafka.statsd.tag.enabled` below.
8 |
9 | - The statsd client is [`com.indeed:java-dogstatsd-client:2.0.11`](https://github.com/indeedeng/java-dogstatsd-client/tree/java-dogstatsd-client-2.0.11).
10 | - support new `MetricNames` introduced by kafka `0.8.2.x`
11 | - remove JVM metrics. Only the metrics from Kafka `MetricRegistry` are sent.
12 |
13 | # 0.3.0
14 | - send JVM metrics
15 |
16 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://travis-ci.org/airbnb/kafka-statsd-metrics2)
2 |
3 | # kafka-statsd-metrics2
4 |
5 | Send Kafka Metrics to StatsD.
6 |
7 | ## Contact
8 | **Let us know!** If you fork this, or if you use it, or if it helps in anyway, we'd love to hear from you! opensource@airbnb.com
9 |
10 | ## What is it about?
11 | Kafka uses [Yammer Metrics](http://metrics.codahale.com/getting-started/) (now part of the [Dropwizard project](http://metrics.codahale.com/about/)) for [metrics reporting](https://kafka.apache.org/documentation.html#monitoring)
12 | in both the server and the client.
13 | This can be configured to report stats using pluggable stats reporters to hook up to your monitoring system.
14 |
15 | This project provides a simple integration between Kafka and a StatsD reporter for Metrics.
16 |
17 | Metrics can be filtered based on the metric name and the metric dimensions (min, max, percentiles, etc).
18 |
19 | ## Supported Kafka versions
20 |
21 | - For Kafka `0.9.0.0` or later use `kafka-statsd-metrics2-0.5.0`
22 | - For Kafka `0.8.2.0` or later use `kafka-statsd-metrics2-0.4.0`
23 | - For Kafka `0.8.1.1` or prior use `kafka-statsd-metrics2-0.3.0`
24 |
25 |
26 | ## Releases
27 |
28 | ### 0.5.4
29 | - Fix metrics with different tags not reported properly
30 |
31 | ### 0.5.2 / 0.5.3
32 | - Convert INFINITY values to 0.
33 |
34 | ### 0.5.1
35 | - Fix metrics change log level
36 |
37 | ### 0.5.0
38 |
39 | - `0.5.0` add support to report new producer/consumer metrics in kafka-0.9
40 | - Compatible with Kafka 0.8
41 | - A complete list of all the metrics supported in the metrics reporter can be found [here](http://docs.confluent.io/2.0.1/kafka/monitoring.html)
42 |
43 |
44 | ### 0.4.0
45 |
46 | - `0.4.0` adds support for tags on metrics. See [dogstatsd extensions](http://docs.datadoghq.com/guides/dogstatsd/#tags). If your statsd server does not support tags, you can disable them in the Kafka configuration. See property `external.kafka.statsd.tag.enabled` below.
47 |
48 | - The statsd client is [`com.indeed:java-dogstatsd-client:2.0.11`](https://github.com/indeedeng/java-dogstatsd-client/tree/java-dogstatsd-client-2.0.11).
49 | - support new `MetricNames` introduced by kafka 0.8.2.x
50 |
51 | ### 0.3.0
52 |
53 | - initial release
54 |
55 | ## How to install?
56 |
57 | - [Download](https://bintray.com/airbnb/jars/kafka-statsd-metrics2/view) or build the shadow jar for `kafka-statsd-metrics`.
58 | - Install the jar in Kafka classpath, typically `./kafka_2.11-0.9.0.1/libs/`
59 | - In the Kafka config file, `server.properties`, add the following properties. Default values are in parenthesis.
60 |
61 | ## How to use metrics in Kafka 0.9 / 0.8?
62 | ### New metrics in kafka 0.9
63 |
64 | 1. Add `metric.reporters` in producer.properties or consumer.properties
65 | ```bash
66 | # declare the reporter if new producer/consumer is used
67 | metric.reporters=com.airbnb.kafka.kafka09.StatsdMetricsReporter
68 | ```
69 | 2. Run new-producer or new-consumer
70 |
71 | Producer:
72 | ```bash
73 | bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test --producer.config config/producer.properties
74 | bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --new-consumer --zookeeper localhost:2181 --topic test -from-beginning --consumer.config config/consumer.properties
75 | ```
76 |
77 | ### Old metrics in kafka 0.8
78 |
79 | 1. Add `kafka.metrics.reporters` in producer.properties or consumer.properties
80 | ```bash
81 | # declare the reporter if old producer/consumer is used
82 | kafka.metrics.reporters=com.airbnb.kafka.kafka08.StatsdMetricsReporter
83 | ```
84 | 2. Run old-producer or old-consumer
85 |
86 | ```bash
87 | bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test --producer.config config/producer.properties --old-producer
88 | bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --zookeeper localhost:2181 --topic test -from-beginning --consumer.config config/consumer.properties
89 | ```
90 |
91 | 2. Run old-consumer
92 |
93 |
94 | ### Configurations
95 | ```bash
96 | # declare the reporter if new producer/consumer is used
97 | metric.reporters=com.airbnb.kafka.StatsdMetricsReporter
98 |
99 | # declare the reporter if old producer/consumer is used
100 | kafka.metrics.reporters=com.airbnb.kafka.kafka08.StatsdMetricsReporter
101 |
102 | # enable the reporter, (false)
103 | external.kafka.statsd.reporter.enabled=true
104 |
105 | # the host of the StatsD server (localhost)
106 | external.kafka.statsd.host=localhost
107 |
108 | # the port of the StatsD server (8125)
109 | external.kafka.statsd.port=8125
110 |
111 | # enable the support of statsd tag extension, e.g. datadog statsd (true)
112 | external.kafka.statsd.tag.enabled=true
113 |
114 | # a prefix for all metrics names (empty)
115 | external.kafka.statsd.metrics.prefix=
116 |
117 | # note that the StatsD reporter follows the global polling interval (10)
118 | # kafka.metrics.polling.interval.secs=10
119 |
120 |
121 |
122 | # A regex to exclude some metrics
123 | # Default is: (kafka\.consumer\.FetchRequestAndResponseMetrics.*)|(.*ReplicaFetcherThread.*)|(kafka\.server\.FetcherLagMetrics\..*)|(kafka\.log\.Log\..*)|(kafka\.cluster\.Partition\..*)
124 | #
125 | # The metric name is formatted with this template: group.type.scope.name
126 | #
127 | # external.kafka.statsd.metrics.exclude_regex=
128 |
129 | #
130 | # Each metric provides multiple dimensions: min, max, meanRate, etc
131 | # This might be too much data.
132 | # It is possible to disable some metric dimensions with the following properties:
133 | # By default all dimenstions are enabled.
134 | #
135 | # external.kafka.statsd.dimension.enabled.count=true
136 | # external.kafka.statsd.dimension.enabled.meanRate=true
137 | # external.kafka.statsd.dimension.enabled.rate1m=true
138 | # external.kafka.statsd.dimension.enabled.rate5m=true
139 | # external.kafka.statsd.dimension.enabled.rate15m=true
140 | # external.kafka.statsd.dimension.enabled.min=true
141 | # external.kafka.statsd.dimension.enabled.max=true
142 | # external.kafka.statsd.dimension.enabled.mean=true
143 | # external.kafka.statsd.dimension.enabled.stddev=true
144 | # external.kafka.statsd.dimension.enabled.median=true
145 | # external.kafka.statsd.dimension.enabled.p75=true
146 | # external.kafka.statsd.dimension.enabled.p95=true
147 | # external.kafka.statsd.dimension.enabled.p98=true
148 | # external.kafka.statsd.dimension.enabled.p99=true
149 | # external.kafka.statsd.dimension.enabled.p999=true
150 | ```
151 |
152 | - finally restart the Kafka server
153 |
154 | ## How to test your configuration?
155 |
156 | You can check your configuration in different ways:
157 |
158 | - During Kafka startup, the reporter class will be instantiated and initialized. The logs should contain a message similar to:
159 | `"Kafka Statsd metrics reporter is enabled"`
160 | - A JMX MBean named `kafka:type=com.airbnb.kafka.kafka08.StatsdMetricsReporter` should also exist.
161 | - Check the logs of your StatsD server
162 | - Finally, on the configured StatsD host, you could listen on the configured port and check for incoming data:
163 |
164 | ```bash
165 | # assuming the Statsd server has been stopped...
166 | $ nc -ul 8125
167 |
168 | kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.samples:1|gkafka.controller.ControllerStats
169 | .LeaderElectionRateAndTimeMs.meanRate:0.05|gkafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.
170 | 1MinuteRate:0.17|gkafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.5MinuteRate:0.19|g....
171 | ```
172 |
173 | ## Sample file of metrics output from statsd
174 | [new-producer-metrics.txt](https://www.dropbox.com/s/p8e4vl5moa80ikp/new-producer-metrics.txt?dl=0)
175 |
176 | [new-consumer-metrics.txt](https://www.dropbox.com/s/ab3t8qis5p58l7f/new-consumer-metrics.txt?dl=0)
177 |
178 |
179 | ## List of metrics for Kafka 0.8.2
180 |
181 | Below are the metrics in Kafka 0.8.2
182 |
183 | ```bash
184 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
185 | | Metrics kind | Metric Name | Metric Tags |
186 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
187 | | Gauge | kafka.server.ReplicaManager.LeaderCount | |
188 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
189 | | Gauge | kafka.server.ReplicaManager.PartitionCount | |
190 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
191 | | Gauge | kafka.server.ReplicaManager.UnderReplicatedPartitions | |
192 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
193 | | Gauge | kafka.controller.KafkaController.ActiveControllerCount | |
194 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
195 | | Gauge | kafka.controller.KafkaController.OfflinePartitionsCount | |
196 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
197 | | Gauge | kafka.controller.KafkaController.PreferredReplicaImbalanceCount | |
198 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
199 | | Gauge | kafka.network.RequestChannel.RequestQueueSize | |
200 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
201 | | Gauge | kafka.server.ReplicaFetcherManager.Replica_MaxLag | {"clientId" -> clientId} |
202 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
203 | | Gauge | kafka.server.ReplicaFetcherManager.Replica_MinFetchRate | {"clientId" -> clientId} |
204 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
205 | | Gauge | kafka.server.FetchRequestPurgatory.PurgatorySize | |
206 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
207 | | Gauge | kafka.server.FetchRequestPurgatory.NumDelayedRequests | |
208 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
209 | | Gauge | kafka.server.ProducerRequestPurgatory.PurgatorySize | |
210 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
211 | | Gauge | kafka.server.ProducerRequestPurgatory.NumDelayedRequests | |
212 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
213 | | Gauge | kafka.consumer.ConsumerFetcherManager.MaxLag | {"clientId" -> clientId} |
214 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
215 | | Gauge | kafka.consumer.ConsumerFetcherManager.MinFetchRate | {"clientId" -> clientId} |
216 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
217 | | Gauge | kafka.consumer.ZookeeperConsumerConnector.FetchQueueSize | {"clientId" -> config.clientId, "topic" -> topic, "threadId" -> thread} |
218 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
219 | | Gauge | kafka.network.RequestChannel.ResponseQueueSize | {"Processor" -> i} |
220 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
221 | | Timer | kafka.log.LogFlushStats.LogFlushRateAndTimeMs | |
222 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
223 | | Meter | kafka.server.ReplicaManager.IsrExpandsPerSec | |
224 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
225 | | Meter | kafka.server.ReplicaManager.IsrShrinksPerSec | |
226 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
227 | | Meter | kafka.server.DelayedFetchRequestMetrics.FollowerExpiresPerSecond | |
228 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
229 | | Meter | kafka.server.DelayedFetchRequestMetrics.ConsumerExpiresPerSecond | |
230 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
231 | | Meter | kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec | |
232 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
233 | | Timer | kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs | |
234 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
235 | | Meter | kafka.producer.ProducerStats.SerializationErrorsPerSec | {"clientId" -> clientId} |
236 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
237 | | Meter | kafka.producer.ProducerStats.ResendsPerSec | {"clientId" -> clientId} |
238 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
239 | | Meter | kafka.producer.ProducerStats.FailedSendsPerSec | {"clientId" -> clientId} |
240 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
241 | | Meter | kafka.producer.ProducerTopicMetrics.MessagesPerSec_all | {"clientId" -> clientId} |
242 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
243 | | Meter | kafka.producer.ProducerTopicMetrics.BytesPerSec_all | {"clientId" -> clientId} |
244 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
245 | | Meter | kafka.producer.ProducerTopicMetrics.DroppedMessagesPerSec_all | {"clientId" -> clientId} |
246 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
247 | | Meter | kafka.producer.ProducerTopicMetrics.MessagesPerSec | {"clientId" -> clientId, "topic" -> topic} |
248 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
249 | | Meter | kafka.producer.ProducerTopicMetrics.BytesPerSec | {"clientId" -> clientId, "topic" -> topic} |
250 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
251 | | Meter | kafka.producer.ProducerTopicMetrics.DroppedMessagesPerSec | {"clientId" -> clientId, "topic" -> topic} |
252 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
253 | | Meter | kafka.server.FetcherStats.RequestsPerSec | {"clientId" -> metricId.clientId, "brokerHost" -> metricId.brokerHost, "brokerPort" -> metricId.brokerPort} |
254 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
255 | | Meter | kafka.server.FetcherStats.BytesPerSec | {"clientId" -> metricId.clientId, "brokerHost" -> metricId.brokerHost, "brokerPort" -> metricId.brokerPort} |
256 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
257 | | Meter | kafka.server.BrokerTopicMetrics.MessagesInPerSec_all | |
258 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
259 | | Meter | kafka.server.BrokerTopicMetrics.BytesInPerSec_all | |
260 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
261 | | Meter | kafka.server.BrokerTopicMetrics.BytesOutPerSec_all | |
262 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
263 | | Meter | kafka.server.BrokerTopicMetrics.LogBytesAppendedPerSec_all | |
264 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
265 | | Meter | kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec_all | |
266 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
267 | | Meter | kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec_all | |
268 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
269 | | Meter | kafka.server.BrokerTopicMetrics.MessagesInPerSec | {"topic" -> topic} |
270 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
271 | | Meter | kafka.server.BrokerTopicMetrics.BytesInPerSec | {"topic" -> topic} |
272 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
273 | | Meter | kafka.server.BrokerTopicMetrics.BytesOutPerSec | {"topic" -> topic} |
274 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
275 | | Meter | kafka.server.BrokerTopicMetrics.LogBytesAppendedPerSec | {"topic" -> topic} |
276 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
277 | | Meter | kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec | {"topic" -> topic} |
278 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
279 | | Meter | kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec | {"topic" -> topic} |
280 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
281 | | Meter | kafka.server.DelayedProducerRequestMetrics.ExpiresPerSecond_all | |
282 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
283 | | Meter | kafka.server.DelayedProducerRequestMetrics.ExpiresPerSecond | {"topic" -> topicAndPartition.topic, "partition" -> topicAndPartition.partition} |
284 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
285 | | Timer | kafka.producer.ProducerRequestMetrics.ProducerRequestRateAndTimeMs_all | {"clientId" -> clientId} |
286 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
287 | | Histogram | kafka.producer.ProducerRequestMetrics.ProducerRequestSize_all | {"clientId" -> clientId} |
288 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
289 | | Timer | kafka.consumer.FetchRequestAndResponseMetrics.FetchRequestRateAndTimeMs_all | {"clientId" -> clientId} |
290 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
291 | | Histogram | kafka.consumer.FetchRequestAndResponseMetrics.FetchResponseSize_all | {"clientId" -> clientId} |
292 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
293 | | Timer | kafka.producer.ProducerRequestMetrics.ProducerRequestRateAndTimeMs | {"clientId" -> clientId, "brokerHost" -> brokerHost, "brokerPort" -> brokerPort} |
294 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
295 | | Histogram | kafka.producer.ProducerRequestMetrics.ProducerRequestSize | {"clientId" -> clientId, "brokerHost" -> brokerHost, "brokerPort" -> brokerPort} |
296 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
297 | | Timer | kafka.consumer.FetchRequestAndResponseMetrics.FetchRequestRateAndTimeMs | {"clientId" -> clientId, "brokerHost" -> brokerHost, "brokerPort" -> brokerPort} |
298 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
299 | | Histogram | kafka.consumer.FetchRequestAndResponseMetrics.FetchResponseSize | {"clientId" -> clientId, "brokerHost" -> brokerHost, "brokerPort" -> brokerPort} |
300 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
301 | | Meter | kafka.consumer.ConsumerTopicMetrics.MessagesPerSec_all | {"clientId" -> clientId} |
302 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
303 | | Meter | kafka.consumer.ConsumerTopicMetrics.BytesPerSec_all | {"clientId" -> clientId} |
304 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
305 | | Meter | kafka.consumer.ConsumerTopicMetrics.MessagesPerSec | {"clientId" -> clientId, "topic" -> topic} |
306 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
307 | | Meter | kafka.consumer.ConsumerTopicMetrics.BytesPerSec | {"clientId" -> clientId, "topic" -> topic} |
308 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
309 | | Meter | kafka.network.RequestMetrics.RequestsPerSec | {"request" -> name} |
310 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
311 | | Histogram | kafka.network.RequestMetrics.RequestQueueTimeMs | {"request" -> name} |
312 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
313 | | Histogram | kafka.network.RequestMetrics.LocalTimeMs | {"request" -> name} |
314 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
315 | | Histogram | kafka.network.RequestMetrics.RemoteTimeMs | {"request" -> name} |
316 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
317 | | Histogram | kafka.network.RequestMetrics.ResponseQueueTimeMs | {"request" -> name} |
318 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
319 | | Histogram | kafka.network.RequestMetrics.ResponseSendTimeMs | {"request" -> name} |
320 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
321 | | Histogram | kafka.network.RequestMetrics.TotalTimeMs | {"request" -> name} |
322 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
323 | | Gauge | kafka.server.FetcherLagMetrics.ConsumerLag | {"clientId" -> clientId, "topic" -> topic, "partition" -> partitionId} |
324 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
325 | | Gauge | kafka.producer.async.ProducerSendThread.ProducerQueueSize | {"clientId" -> clientId} |
326 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
327 | | Gauge | kafka.log.Log.NumLogSegments | {"topic" -> topic, "partition" -> partition} |
328 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
329 | | Gauge | kafka.log.Log.LogEndOffset | {"topic" -> topic, "partition" -> partition} |
330 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
331 | | Gauge | kafka.cluster.Partition.UnderReplicated | {"topic" -> topic, "partition" -> partitionId} |
332 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
333 | | Gauge | kafka.network.SocketServer.ResponsesBeingSent | |
334 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
335 | | Meter | kafka.network.SocketServer.NetworkProcessorAvgIdlePercent | |
336 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
337 | | Meter | kafka.network.SocketServer.IdlePercent | {"networkProcessor" -> i.toString} |
338 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
339 | | Gauge | kafka.server.OffsetManager.NumOffsets | |
340 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
341 | | Gauge | kafka.server.OffsetManager.NumGroups | |
342 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
343 | | Gauge | kafka.consumer.ZookeeperConsumerConnector.OwnedPartitionsCount | {"clientId" -> config.clientId, "groupId" -> config.groupId} |
344 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
345 | | Gauge | kafka.consumer.ZookeeperConsumerConnector.OwnedPartitionsCount | {"clientId" -> config.clientId, "groupId" -> config.groupId, "topic" -> topic} |
346 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
347 | | Meter | kafka.consumer.ZookeeperConsumerConnector.KafkaCommitsPerSec | {"clientId" -> clientId} |
348 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
349 | | Meter | kafka.consumer.ZookeeperConsumerConnector.ZooKeeperCommitsPerSec | {"clientId" -> clientId} |
350 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
351 | | Meter | kafka.consumer.ZookeeperConsumerConnector.RebalanceRateAndTime | {"clientId" -> clientId} |
352 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
353 | | Meter | kafka.tools.DataChannel.MirrorMaker-DataChannel-WaitOnPut | |
354 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
355 | | Meter | kafka.tools.DataChannel.MirrorMaker-DataChannel-WaitOnTake | |
356 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
357 | | Histogram | kafka.tools.DataChannel.MirrorMaker-DataChannel-Size | |
358 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
359 | | Gauge | kafka.common.AppInfo.Version | |
360 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
361 | | Meter | kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent | |
362 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
363 | | Meter | kafka.util.Throttler."""a input string not with small cardinality""" | |
364 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
365 | | Gauge | kafka.log.LogCleaner.max-buffer-utilization-percent | |
366 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
367 | | Gauge | kafka.log.LogCleaner.cleaner-recopy-percent | |
368 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
369 | | Gauge | kafka.log.LogCleaner.max-clean-time-secs | |
370 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
371 | | Timer | other.kafka.FetchThread.fetch-thread | |
372 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
373 | | Timer | other.kafka.CommitThread.commit-thread | |
374 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
375 | | Gauge | kafka.log.Log.LogStartOffset | {"topic" -> topic, "partition" -> partition} |
376 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
377 | | Gauge | kafka.log.Log.Size | {"topic" -> topic, "partition" -> partition) |
378 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
379 | | Gauge | kafka.server.KafkaServer.BrokerState | |
380 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+
381 | | Gauge | kafka.log.LogCleanerManager.max-dirty-percent | |
382 | +--------------+-----------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+```
383 | ```
384 |
385 | ## Metrics-2.x vs Metrics-3.x
386 | The metrics project has two main versions: v2 and v3. Version 3 is not backward compatible.
387 |
388 | Version [0.8.1.1](https://github.com/apache/kafka/blob/0.8.1.1/build.gradle#L217) and [0.8.2.1](https://github.com/apache/kafka/blob/0.8.2.1/build.gradle#L209), Kafka depends on [metrics-2.2.0](http://mvnrepository.com/artifact/com.yammer.metrics/metrics-core/2.2.0).
389 |
390 | *Note:*
391 | In a future release, Kafka [might upgrade](https://issues.apache.org/jira/browse/KAFKA-960) to Metrics-3.x.
392 | Due to the incompatibilities between Metrics versions, a new Statsd reporter for metrics-3 will be required.
393 | All contributions welcome!
394 |
395 |
396 | ## How to build
397 |
398 | After cloning the repo, type
399 |
400 | ```bash
401 | ./gradlew shadowJar
402 | ```
403 |
404 | This produces a jar file in `build/libs/`.
405 |
406 | The shallow jar is a standalone jar.
407 |
408 |
409 | # License & Attributions
410 |
411 | This project is released under the Apache License Version 2.0 (APLv2).
412 |
--------------------------------------------------------------------------------
/build.gradle:
--------------------------------------------------------------------------------
1 | plugins {
2 | id 'java'
3 | id 'idea'
4 | id 'com.github.johnrengelman.shadow' version '5.2.0'
5 | id 'com.adarshr.test-logger' version '2.1.0'
6 | }
7 |
8 | defaultTasks 'build'
9 |
10 | group = 'com.airbnb'
11 | description = 'StatsD Support for Kafka Metrics (kafka version: 0.8 and 0.9)'
12 |
13 | repositories {
14 | mavenCentral()
15 | maven { url "http://dl.bintray.com/airbnb/jars" }
16 | }
17 |
18 | ext {
19 | defaultKafkaVersion = '0.9.0.1'
20 | kafkaVersion = project.getProperties().getOrDefault("kafkaVersion", defaultKafkaVersion)
21 | kafkaVersionsToTest = ['0.8.2.2', '0.9.0.1', '0.10.2.2', '1.1.1', '2.3.1']
22 |
23 | testWithKafkaClient = project.getProperties().getOrDefault("testWithKafkaClient", false)
24 | kafkaClientVersionsToTest = ['0.9.0.1', '0.10.2.2', '1.1.1', '2.3.1']
25 | }
26 |
27 | dependencies {
28 | compile 'com.indeed:java-dogstatsd-client:2.0.11'
29 |
30 | if (testWithKafkaClient) {
31 | compileOnly "org.apache.kafka:kafka-clients:${kafkaVersion}"
32 | } else {
33 | compileOnly "org.apache.kafka:kafka_2.11:${defaultKafkaVersion}"
34 | }
35 | compileOnly 'org.slf4j:slf4j-log4j12:+'
36 |
37 | if (testWithKafkaClient) {
38 | testCompile "org.apache.kafka:kafka-clients:${kafkaVersion}"
39 | } else {
40 | testCompile "org.apache.kafka:kafka_2.11:${kafkaVersion}"
41 | }
42 | testCompile 'org.slf4j:slf4j-log4j12:+'
43 | testCompile 'junit:junit:4.11'
44 | testCompile 'org.easymock:easymock:3.2'
45 | testCompile 'org.mockito:mockito-inline:3.5.10'
46 | testCompile 'com.google.guava:guava:29.0-jre'
47 | }
48 |
49 | compileJava {
50 | options.setDeprecation true
51 | options.encoding = 'UTF-8'
52 | }
53 |
54 | def excludeFromCompile(Task task, Map> excludeMap) {
55 | excludeMap.each { path, filesToExclude ->
56 | filesToExclude.each { fileToExclude ->
57 | task.exclude(path + fileToExclude + ".java")
58 | }
59 | }
60 | }
61 |
62 | if (testWithKafkaClient) {
63 | def srcExcludes = [
64 | "com/airbnb/kafka/kafka08/": ["**/*"],
65 | "com/airbnb/metrics/": ["ExcludeMetricPredicate", "MetricNameFormatter", "Parser", "ParserForNoTag", "ParserForTagInMBeanName", "StatsDReporter"]
66 | ]
67 | def testExcludes = [
68 | "com/airbnb/kafka/kafka08/": ["**/*"],
69 | "com/airbnb/metrics/": ["DimensionTest", "ExcludeMetricPredicateTest", "MetricNameFormatterTest", "ParserTest", "StatsDReporterTest"]
70 | ]
71 |
72 | excludeFromCompile(compileJava, srcExcludes)
73 | excludeFromCompile(compileTestJava, testExcludes)
74 | }
75 |
76 | configurations {
77 | // manually excludes some unnecessary dependencies
78 | compile.exclude module: 'zookeeper'
79 | compile.exclude module: 'zkclient'
80 | compile.exclude module: 'javax'
81 | compile.exclude module: 'jline'
82 | compile.exclude module: 'jms'
83 | compile.exclude module: 'jmxri'
84 | compile.exclude module: 'jmxtools'
85 | compile.exclude module: 'mail'
86 | }
87 |
88 | shadowJar {
89 | exclude 'META-INF/*.DSA'
90 | exclude 'META-INF/*.RSA'
91 | exclude 'META-INF/maven/*'
92 | }
93 |
94 | def testKafkaVersionTasks = []
95 |
96 | for (kafkaVersion in kafkaVersionsToTest) {
97 | String kafkaVersionUnderscored = kafkaVersion.replace('.', '_')
98 | def task = tasks.create(name: "testWithKafkaCore_${kafkaVersionUnderscored}", type: GradleBuild) {
99 | // Changing `buildName` is a workaround for a Gradle issue when trying to run
100 | // the aggregate task `testWithAllSupportedKafkaVersions`.
101 | // See the following issues:
102 | // 1. https://github.com/gradle/gradle/issues/11301
103 | // 2. https://github.com/gradle/gradle/issues/12872
104 | buildName = project.getName() + "-kafka_" + kafkaVersionUnderscored
105 | buildFile = './build.gradle'
106 | tasks = ['test']
107 | startParameter.projectProperties = [kafkaVersion: "${kafkaVersion}"]
108 | group = 'Verification'
109 | description = "Runs the unit tests with Kafka core version ${kafkaVersion}"
110 | }
111 |
112 | if (!testKafkaVersionTasks.isEmpty()) {
113 | task.mustRunAfter(testKafkaVersionTasks.last())
114 | }
115 |
116 | testKafkaVersionTasks.add(task)
117 | }
118 |
119 | test.doFirst {
120 | println "Running tests with Kafka version ${kafkaVersion}"
121 | println "Testing with Kafka clients: ${testWithKafkaClient}"
122 | }
123 |
124 | task testWithAllSupportedKafkaCoreVersions {
125 | group = 'Verification'
126 | description = "Runs the unit tests with all supported Kafka core versions: ${kafkaVersionsToTest}"
127 | dependsOn(testKafkaVersionTasks)
128 | }
129 |
130 | def testKafkaClientVersionTasks = []
131 |
132 | for (kafkaVersion in kafkaClientVersionsToTest) {
133 | String kafkaVersionUnderscored = kafkaVersion.replace('.', '_')
134 | def task = tasks.create(name: "testWithKafkaClient_${kafkaVersionUnderscored}", type: GradleBuild) {
135 | // Changing `buildName` is a workaround for a Gradle issue when trying to run
136 | // the aggregate task `testWithAllSupportedKafkaVersions`.
137 | // See the following issues:
138 | // 1. https://github.com/gradle/gradle/issues/11301
139 | // 2. https://github.com/gradle/gradle/issues/12872
140 | buildName = project.getName() + "-kafka-clients_" + kafkaVersionUnderscored
141 | buildFile = './build.gradle'
142 | tasks = ['test']
143 | startParameter.projectProperties = [kafkaVersion: "${kafkaVersion}", testWithKafkaClient: true]
144 | group = 'Verification'
145 | description = "Runs the unit tests with Kafka client version ${kafkaVersion}"
146 | }
147 |
148 | if (!testKafkaClientVersionTasks.isEmpty()) {
149 | task.mustRunAfter(testKafkaClientVersionTasks.last())
150 | }
151 |
152 | testKafkaClientVersionTasks.add(task)
153 | }
154 |
155 | task testWithAllSupportedKafkaClientVersions {
156 | group = 'Verification'
157 | description = "Runs the unit tests with all supported Kafka client versions: ${kafkaClientVersionsToTest}"
158 | dependsOn(testKafkaClientVersionTasks)
159 | }
160 |
161 | task testWithAllSupportedKafkaVersions {
162 | group = 'Verification'
163 | description = "Runs the unit tests with all supported Kafka core and client versions: ${kafkaVersionsToTest}"
164 | dependsOn(testWithAllSupportedKafkaCoreVersions, testWithAllSupportedKafkaClientVersions)
165 | }
166 |
--------------------------------------------------------------------------------
/gradle.properties:
--------------------------------------------------------------------------------
1 | version=0.5.4
2 |
3 |
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/airbnb/kafka-statsd-metrics2/4f320c8c7118b7b8823e5b4db78baab5a02d4571/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Wed Jul 30 14:46:29 PDT 2014
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-6.2.1-all.zip
7 |
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
10 | DEFAULT_JVM_OPTS=""
11 |
12 | APP_NAME="Gradle"
13 | APP_BASE_NAME=`basename "$0"`
14 |
15 | # Use the maximum available, or set MAX_FD != -1 to use that value.
16 | MAX_FD="maximum"
17 |
18 | warn ( ) {
19 | echo "$*"
20 | }
21 |
22 | die ( ) {
23 | echo
24 | echo "$*"
25 | echo
26 | exit 1
27 | }
28 |
29 | # OS specific support (must be 'true' or 'false').
30 | cygwin=false
31 | msys=false
32 | darwin=false
33 | case "`uname`" in
34 | CYGWIN* )
35 | cygwin=true
36 | ;;
37 | Darwin* )
38 | darwin=true
39 | ;;
40 | MINGW* )
41 | msys=true
42 | ;;
43 | esac
44 |
45 | # For Cygwin, ensure paths are in UNIX format before anything is touched.
46 | if $cygwin ; then
47 | [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
48 | fi
49 |
50 | # Attempt to set APP_HOME
51 | # Resolve links: $0 may be a link
52 | PRG="$0"
53 | # Need this for relative symlinks.
54 | while [ -h "$PRG" ] ; do
55 | ls=`ls -ld "$PRG"`
56 | link=`expr "$ls" : '.*-> \(.*\)$'`
57 | if expr "$link" : '/.*' > /dev/null; then
58 | PRG="$link"
59 | else
60 | PRG=`dirname "$PRG"`"/$link"
61 | fi
62 | done
63 | SAVED="`pwd`"
64 | cd "`dirname \"$PRG\"`/" >&-
65 | APP_HOME="`pwd -P`"
66 | cd "$SAVED" >&-
67 |
68 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
69 |
70 | # Determine the Java command to use to start the JVM.
71 | if [ -n "$JAVA_HOME" ] ; then
72 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
73 | # IBM's JDK on AIX uses strange locations for the executables
74 | JAVACMD="$JAVA_HOME/jre/sh/java"
75 | else
76 | JAVACMD="$JAVA_HOME/bin/java"
77 | fi
78 | if [ ! -x "$JAVACMD" ] ; then
79 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
80 |
81 | Please set the JAVA_HOME variable in your environment to match the
82 | location of your Java installation."
83 | fi
84 | else
85 | JAVACMD="java"
86 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
87 |
88 | Please set the JAVA_HOME variable in your environment to match the
89 | location of your Java installation."
90 | fi
91 |
92 | # Increase the maximum file descriptors if we can.
93 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
94 | MAX_FD_LIMIT=`ulimit -H -n`
95 | if [ $? -eq 0 ] ; then
96 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
97 | MAX_FD="$MAX_FD_LIMIT"
98 | fi
99 | ulimit -n $MAX_FD
100 | if [ $? -ne 0 ] ; then
101 | warn "Could not set maximum file descriptor limit: $MAX_FD"
102 | fi
103 | else
104 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
105 | fi
106 | fi
107 |
108 | # For Darwin, add options to specify how the application appears in the dock
109 | if $darwin; then
110 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
111 | fi
112 |
113 | # For Cygwin, switch paths to Windows format before running java
114 | if $cygwin ; then
115 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
116 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
158 | function splitJvmOpts() {
159 | JVM_OPTS=("$@")
160 | }
161 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
162 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
163 |
164 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
165 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
12 | set DEFAULT_JVM_OPTS=
13 |
14 | set DIRNAME=%~dp0
15 | if "%DIRNAME%" == "" set DIRNAME=.
16 | set APP_BASE_NAME=%~n0
17 | set APP_HOME=%DIRNAME%
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windowz variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 | if "%@eval[2+2]" == "4" goto 4NT_args
53 |
54 | :win9xME_args
55 | @rem Slurp the command line arguments.
56 | set CMD_LINE_ARGS=
57 | set _SKIP=2
58 |
59 | :win9xME_args_slurp
60 | if "x%~1" == "x" goto execute
61 |
62 | set CMD_LINE_ARGS=%*
63 | goto execute
64 |
65 | :4NT_args
66 | @rem Get arguments from the 4NT Shell from JP Software
67 | set CMD_LINE_ARGS=%$
68 |
69 | :execute
70 | @rem Setup the command line
71 |
72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
73 |
74 | @rem Execute Gradle
75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
76 |
77 | :end
78 | @rem End local scope for the variables with windows NT shell
79 | if "%ERRORLEVEL%"=="0" goto mainEnd
80 |
81 | :fail
82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
83 | rem the _cmd.exe /c_ return code!
84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
85 | exit /b 1
86 |
87 | :mainEnd
88 | if "%OS%"=="Windows_NT" endlocal
89 |
90 | :omega
91 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/kafka/kafka08/StatsdMetricsReporter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.kafka.kafka08;
18 |
19 | import com.airbnb.metrics.Dimension;
20 | import com.airbnb.metrics.ExcludeMetricPredicate;
21 | import com.airbnb.metrics.StatsDReporter;
22 | import com.timgroup.statsd.NonBlockingStatsDClient;
23 | import com.timgroup.statsd.StatsDClient;
24 | import com.timgroup.statsd.StatsDClientException;
25 | import com.yammer.metrics.Metrics;
26 | import com.yammer.metrics.core.MetricPredicate;
27 | import com.yammer.metrics.reporting.AbstractPollingReporter;
28 | import kafka.metrics.KafkaMetricsReporter;
29 | import kafka.utils.VerifiableProperties;
30 | import org.slf4j.LoggerFactory;
31 |
32 | import java.util.EnumSet;
33 | import java.util.concurrent.TimeUnit;
34 | import java.util.concurrent.atomic.AtomicBoolean;
35 |
36 | /**
37 | *
38 | */
39 | public class StatsdMetricsReporter implements StatsdMetricsReporterMBean, KafkaMetricsReporter {
40 |
41 | private static final org.slf4j.Logger log = LoggerFactory.getLogger(StatsDReporter.class);
42 |
43 | public static final String DEFAULT_EXCLUDE_REGEX = "(kafka\\.server\\.FetcherStats.*ConsumerFetcherThread.*)|(kafka\\.consumer\\.FetchRequestAndResponseMetrics.*)|(.*ReplicaFetcherThread.*)|(kafka\\.server\\.FetcherLagMetrics\\..*)|(kafka\\.log\\.Log\\..*)|(kafka\\.cluster\\.Partition\\..*)";
44 |
45 | private boolean enabled;
46 | private final AtomicBoolean running = new AtomicBoolean(false);
47 | private String host;
48 | private int port;
49 | private String prefix;
50 | private long pollingPeriodInSeconds;
51 | private EnumSet metricDimensions;
52 | private MetricPredicate metricPredicate;
53 | private StatsDClient statsd;
54 | private boolean isTagEnabled;
55 | private AbstractPollingReporter underlying = null;
56 |
57 | @Override
58 | public String getMBeanName() {
59 | return "kafka:type=" + getClass().getName();
60 | }
61 |
62 | public boolean isRunning() {
63 | return running.get();
64 | }
65 |
66 | //try to make it compatible with kafka-statsd-metrics2
67 | @Override
68 | public synchronized void init(VerifiableProperties props) {
69 | loadConfig(props);
70 | if (enabled) {
71 | log.info("Reporter is enabled and starting...");
72 | startReporter(pollingPeriodInSeconds);
73 | } else {
74 | log.warn("Reporter is disabled");
75 | }
76 | }
77 |
78 | private void loadConfig(VerifiableProperties props) {
79 | enabled = props.getBoolean("external.kafka.statsd.reporter.enabled", false);
80 | host = props.getString("external.kafka.statsd.host", "localhost");
81 | port = props.getInt("external.kafka.statsd.port", 8125);
82 | prefix = props.getString("external.kafka.statsd.metrics.prefix", "");
83 | pollingPeriodInSeconds = props.getInt("kafka.metrics.polling.interval.secs", 10);
84 | metricDimensions = Dimension.fromProperties(props.props(), "external.kafka.statsd.dimension.enabled.");
85 |
86 | String excludeRegex = props.getString("external.kafka.statsd.metrics.exclude_regex", DEFAULT_EXCLUDE_REGEX);
87 | if (excludeRegex != null && excludeRegex.length() != 0) {
88 | metricPredicate = new ExcludeMetricPredicate(excludeRegex);
89 | } else {
90 | metricPredicate = MetricPredicate.ALL;
91 | }
92 |
93 | this.isTagEnabled = props.getBoolean("external.kafka.statsd.tag.enabled", true);
94 | }
95 |
96 | @Override
97 | public void startReporter(long pollingPeriodInSeconds) {
98 | if (pollingPeriodInSeconds <= 0) {
99 | throw new IllegalArgumentException("Polling period must be greater than zero");
100 | }
101 |
102 | synchronized (running) {
103 | if (running.get()) {
104 | log.warn("Reporter is already running");
105 | } else {
106 | statsd = createStatsd();
107 | underlying = new StatsDReporter(
108 | Metrics.defaultRegistry(),
109 | statsd,
110 | metricPredicate,
111 | metricDimensions,
112 | isTagEnabled);
113 | underlying.start(pollingPeriodInSeconds, TimeUnit.SECONDS);
114 | log.info("Started Reporter with host={}, port={}, polling_period_secs={}, prefix={}",
115 | host, port, pollingPeriodInSeconds, prefix);
116 | running.set(true);
117 | }
118 | }
119 | }
120 |
121 | private StatsDClient createStatsd() {
122 | try {
123 | return new NonBlockingStatsDClient(
124 | prefix, /* prefix to any stats; may be null or empty string */
125 | host, /* common case: localhost */
126 | port /* port */
127 | );
128 | } catch (StatsDClientException ex) {
129 | log.error("Reporter cannot be started");
130 | throw ex;
131 | }
132 | }
133 |
134 | @Override
135 | public void stopReporter() {
136 | if (!enabled) {
137 | log.warn("Reporter is disabled");
138 | } else {
139 | synchronized (running) {
140 | if (running.get()) {
141 | underlying.shutdown();
142 | statsd.stop();
143 | running.set(false);
144 | log.info("Stopped Reporter with host={}, port={}", host, port);
145 | } else {
146 | log.warn("Reporter is not running");
147 | }
148 | }
149 | }
150 | }
151 |
152 | }
153 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/kafka/kafka08/StatsdMetricsReporterMBean.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.kafka.kafka08;
18 |
19 | import kafka.metrics.KafkaMetricsReporterMBean;
20 |
21 | /**
22 | * @see kafka.metrics.KafkaMetricsReporterMBean: the custom reporter needs to
23 | * additionally implement an MBean trait that extends kafka.metrics.KafkaMetricsReporterMBean
24 | * so that the registered MBean is compliant with the standard MBean convention.
25 | */
26 | public interface StatsdMetricsReporterMBean extends KafkaMetricsReporterMBean {
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/kafka/kafka09/StatsdMetricsReporter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.kafka.kafka09;
18 |
19 | import com.airbnb.metrics.Dimension;
20 | import com.airbnb.metrics.KafkaStatsDReporter;
21 | import com.airbnb.metrics.MetricInfo;
22 | import com.airbnb.metrics.StatsDMetricsRegistry;
23 |
24 | import java.util.EnumSet;
25 | import java.util.HashMap;
26 | import java.util.List;
27 | import java.util.Map;
28 | import java.util.concurrent.TimeUnit;
29 | import java.util.concurrent.atomic.AtomicBoolean;
30 |
31 | import com.timgroup.statsd.NonBlockingStatsDClient;
32 | import com.timgroup.statsd.StatsDClient;
33 | import com.timgroup.statsd.StatsDClientException;
34 | import org.apache.kafka.common.MetricName;
35 | import org.apache.kafka.common.metrics.KafkaMetric;
36 | import org.apache.kafka.common.metrics.MetricsReporter;
37 | import org.slf4j.LoggerFactory;
38 |
39 | public class StatsdMetricsReporter implements MetricsReporter {
40 | private static final org.slf4j.Logger log = LoggerFactory.getLogger(StatsdMetricsReporter.class);
41 |
42 | public static final String REPORTER_NAME = "kafka-statsd-metrics-0.5";
43 |
44 | public static final String STATSD_REPORTER_ENABLED = "external.kafka.statsd.reporter.enabled";
45 | public static final String STATSD_HOST = "external.kafka.statsd.host";
46 | public static final String STATSD_PORT = "external.kafka.statsd.port";
47 | public static final String STATSD_METRICS_PREFIX = "external.kafka.statsd.metrics.prefix";
48 | public static final String POLLING_INTERVAL_SECS = "kafka.metrics.polling.interval.secs";
49 | public static final String STATSD_DIMENSION_ENABLED = "external.kafka.statsd.dimension.enabled";
50 |
51 | private static final String METRIC_PREFIX = "kafka.";
52 | private static final int POLLING_PERIOD_IN_SECONDS = 10;
53 |
54 | private boolean enabled;
55 | private final AtomicBoolean running = new AtomicBoolean(false);
56 | private String host;
57 | private int port;
58 | private String prefix;
59 | private long pollingPeriodInSeconds;
60 | private EnumSet metricDimensions;
61 | private StatsDClient statsd;
62 | private Map kafkaMetrics;
63 |
64 | StatsDMetricsRegistry registry;
65 | KafkaStatsDReporter underlying = null;
66 |
67 | public boolean isRunning() {
68 | return running.get();
69 | }
70 |
71 | @Override
72 | public void init(List metrics) {
73 | registry = new StatsDMetricsRegistry();
74 | kafkaMetrics = new HashMap();
75 |
76 | if (enabled) {
77 | startReporter(POLLING_PERIOD_IN_SECONDS);
78 | } else {
79 | log.warn("KafkaStatsDReporter is disabled");
80 | }
81 |
82 | for (KafkaMetric metric : metrics) {
83 | metricChange(metric);
84 | }
85 | }
86 |
87 | private String getMetricName(final KafkaMetric metric) {
88 | MetricName metricName = metric.metricName();
89 |
90 | return METRIC_PREFIX + metricName.group() + "." + metricName.name();
91 | }
92 |
93 | @Override
94 | public void metricChange(final KafkaMetric metric) {
95 | String name = getMetricName(metric);
96 |
97 | StringBuilder strBuilder = new StringBuilder();
98 |
99 | for (String key : metric.metricName().tags().keySet()) {
100 | strBuilder.append(key).append(":").append(metric.metricName().tags().get(key)).append(",");
101 | }
102 |
103 | if (strBuilder.length() > 0) {
104 | strBuilder.deleteCharAt(strBuilder.length() - 1);
105 | }
106 |
107 | registry.register(metric.metricName(), new MetricInfo(metric, name, strBuilder.toString()));
108 | log.debug("metrics name: {}", name);
109 | }
110 |
111 | @Override
112 | public void metricRemoval(KafkaMetric metric) {
113 | registry.unregister(metric.metricName());
114 | }
115 |
116 | @Override
117 | public void close() {
118 | stopReporter();
119 | }
120 |
121 | @Override
122 | public void configure(Map configs) {
123 | enabled = configs.containsKey(STATSD_REPORTER_ENABLED) ?
124 | Boolean.valueOf((String) configs.get(STATSD_REPORTER_ENABLED)) : false;
125 | host = configs.containsKey(STATSD_HOST) ?
126 | (String) configs.get(STATSD_HOST) : "localhost";
127 | port = configs.containsKey(STATSD_PORT) ?
128 | Integer.valueOf((String) configs.get(STATSD_PORT)) : 8125;
129 | prefix = configs.containsKey(STATSD_METRICS_PREFIX) ?
130 | (String) configs.get(STATSD_METRICS_PREFIX) : "";
131 | pollingPeriodInSeconds = configs.containsKey(POLLING_INTERVAL_SECS) ?
132 | Integer.valueOf((String) configs.get(POLLING_INTERVAL_SECS)) : 10;
133 | metricDimensions = Dimension.fromConfigs(configs, STATSD_DIMENSION_ENABLED);
134 | }
135 |
136 | public void startReporter(long pollingPeriodInSeconds) {
137 | if (pollingPeriodInSeconds <= 0) {
138 | throw new IllegalArgumentException("Polling period must be greater than zero");
139 | }
140 |
141 | synchronized (running) {
142 | if (running.get()) {
143 | log.warn("KafkaStatsDReporter: {} is already running", REPORTER_NAME);
144 | } else {
145 | statsd = createStatsd();
146 | underlying = new KafkaStatsDReporter(statsd, registry);
147 | underlying.start(pollingPeriodInSeconds, TimeUnit.SECONDS);
148 | log.info(
149 | "Started KafkaStatsDReporter: {} with host={}, port={}, polling_period_secs={}, prefix={}",
150 | REPORTER_NAME, host, port, pollingPeriodInSeconds, prefix
151 | );
152 | running.set(true);
153 | }
154 | }
155 | }
156 |
157 | StatsDClient createStatsd() {
158 | try {
159 | return new NonBlockingStatsDClient(prefix, host, port);
160 | } catch (StatsDClientException ex) {
161 | log.error("KafkaStatsDReporter cannot be started");
162 | throw ex;
163 | }
164 | }
165 |
166 | private void stopReporter() {
167 | if (!enabled) {
168 | log.warn("KafkaStatsDReporter is disabled");
169 | } else {
170 | synchronized (running) {
171 | if (running.get()) {
172 | try {
173 | underlying.shutdown();
174 | } catch (InterruptedException e) {
175 | log.warn("Stop reporter exception: {}", e);
176 | }
177 | statsd.stop();
178 | running.set(false);
179 | log.info("Stopped KafkaStatsDReporter with host={}, port={}", host, port);
180 | } else {
181 | log.warn("KafkaStatsDReporter is not running");
182 | }
183 | }
184 | }
185 | }
186 | }
187 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/Dimension.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import java.util.EnumSet;
20 | import java.util.Map;
21 | import java.util.Properties;
22 |
23 | /**
24 | *
25 | */
26 | public enum Dimension { //use name itself as suffix
27 | count("count"),
28 | meanRate("meanRate"),
29 | rate1m("1MinuteRate"),
30 | rate5m("5MinuteRate"),
31 | rate15m("15MinuteRate"),
32 | min("min"),
33 | max("max"),
34 | mean("mean"),
35 | stddev("stddev"),
36 | median("median"),
37 | p75("p75"),
38 | p95("p95"),
39 | p98("p98"),
40 | p99("p99"),
41 | p999("p999");
42 |
43 | final String displayName;
44 |
45 | public String getDisplayName() {
46 | return displayName;
47 | }
48 |
49 | Dimension(String defaultValue) {
50 | this.displayName = defaultValue;
51 | }
52 |
53 | public static EnumSet fromProperties(Properties p, String prefix) {
54 | EnumSet df = EnumSet.allOf(Dimension.class);
55 | for (Dimension k : Dimension.values()) {
56 | String key = prefix + k.toString();
57 | if (p.containsKey(key)) {
58 | Boolean value = Boolean.parseBoolean(p.getProperty(key));
59 | if (!value) {
60 | df.remove(k);
61 | }
62 | }
63 | }
64 | return df;
65 | }
66 |
67 | public static EnumSet fromConfigs(Map configs, String prefix) {
68 | EnumSet df = EnumSet.allOf(Dimension.class);
69 | for (Dimension k : Dimension.values()) {
70 | String key = prefix + k.toString();
71 | if (configs.containsKey(key)) {
72 | Boolean value = (Boolean) configs.get(key);
73 | if (!value) {
74 | df.remove(k);
75 | }
76 | }
77 | }
78 | return df;
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/ExcludeMetricPredicate.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import com.yammer.metrics.core.Metric;
20 | import com.yammer.metrics.core.MetricName;
21 | import com.yammer.metrics.core.MetricPredicate;
22 | import org.apache.log4j.Logger;
23 |
24 | import java.util.regex.Pattern;
25 |
26 | /**
27 | *
28 | */
29 | public class ExcludeMetricPredicate implements MetricPredicate {
30 | private final Logger logger = Logger.getLogger(getClass());
31 |
32 | final String excludeRegex;
33 | final Pattern pattern;
34 |
35 | public ExcludeMetricPredicate(String excludeRegex) {
36 | this.excludeRegex = excludeRegex;
37 | this.pattern = Pattern.compile(excludeRegex);
38 | }
39 |
40 | @Override
41 | public boolean matches(MetricName name, Metric metric) {
42 | String n = MetricNameFormatter.format(name);
43 | boolean excluded = pattern.matcher(n).matches();
44 | if (excluded) {
45 | if (logger.isTraceEnabled()) {
46 | logger.trace("Metric " + n + " is excluded");
47 | }
48 | }
49 | return !excluded;
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/KafkaStatsDReporter.java:
--------------------------------------------------------------------------------
1 | package com.airbnb.metrics;
2 |
3 | import java.util.concurrent.ScheduledExecutorService;
4 | import java.util.concurrent.ScheduledThreadPoolExecutor;
5 | import java.util.concurrent.TimeUnit;
6 |
7 | import com.timgroup.statsd.StatsDClient;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | public class KafkaStatsDReporter implements Runnable {
12 | private static final Logger log = LoggerFactory.getLogger(KafkaStatsDReporter.class);
13 | private final ScheduledExecutorService executor;
14 |
15 | private final StatsDClient statsDClient;
16 | private final StatsDMetricsRegistry registry;
17 |
18 | public KafkaStatsDReporter(
19 | StatsDClient statsDClient,
20 | StatsDMetricsRegistry registry
21 | ) {
22 | this.statsDClient = statsDClient;
23 | this.registry = registry;
24 | this.executor = new ScheduledThreadPoolExecutor(1);
25 | }
26 |
27 | public void start(
28 | long period,
29 | TimeUnit unit
30 | ) {
31 | executor.scheduleWithFixedDelay(this, period, period, unit);
32 | }
33 |
34 | public void shutdown() throws InterruptedException {
35 | executor.shutdown();
36 | }
37 |
38 | private void sendAllKafkaMetrics() {
39 | registry.getAllMetricInfo().forEach(this::sendAMetric);
40 | }
41 |
42 | private void sendAMetric(MetricInfo metricInfo) {
43 | String metricName = metricInfo.getName();
44 | String tags = metricInfo.getTags();
45 |
46 |
47 | final Object value = metricInfo.getMetric().value();
48 | Double val = new Double(value.toString());
49 |
50 | if (val == Double.NEGATIVE_INFINITY || val == Double.POSITIVE_INFINITY) {
51 | val = 0D;
52 | }
53 |
54 | if (tags != null) {
55 | statsDClient.gauge(metricName, val, tags);
56 | } else {
57 | statsDClient.gauge(metricName, val);
58 | }
59 | }
60 |
61 | @Override
62 | public void run() {
63 | sendAllKafkaMetrics();
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/MetricInfo.java:
--------------------------------------------------------------------------------
1 | package com.airbnb.metrics;
2 |
3 | import org.apache.kafka.common.Metric;
4 |
5 | public class MetricInfo {
6 | private final Metric metric;
7 | private final String name;
8 | private final String tags;
9 |
10 | public MetricInfo(Metric metric, String name, String tags) {
11 | this.metric = metric;
12 | this.name = name;
13 | this.tags = tags;
14 | }
15 |
16 | public Metric getMetric() {
17 | return metric;
18 | }
19 |
20 | public String getName() {
21 | return name;
22 | }
23 |
24 | public String getTags() {
25 | return tags;
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/MetricNameFormatter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 |
20 | import com.yammer.metrics.core.MetricName;
21 |
22 | import java.util.regex.Matcher;
23 | import java.util.regex.Pattern;
24 |
25 |
26 | public class MetricNameFormatter {
27 | static final Pattern whitespaceRegex = Pattern.compile("\\s+");
28 |
29 |
30 | public static String formatWithScope(MetricName metricName) {
31 | StringBuilder sb = new StringBuilder(128)
32 | .append(metricName.getGroup())
33 | .append('.')
34 | .append(metricName.getType())
35 | .append('.');
36 | if (metricName.hasScope() && !metricName.getScope().isEmpty()) {
37 | sb.append(metricName.getScope())
38 | .append(".");
39 | }
40 | sb.append(sanitizeName(metricName.getName()));
41 | return sb.toString();
42 | }
43 |
44 | public static String format(MetricName metricName) {
45 | return format(metricName, "");
46 | }
47 |
48 | public static String format(MetricName metricName, String suffix) {
49 | return new StringBuilder(128)
50 | .append(metricName.getGroup())
51 | .append('.')
52 | .append(metricName.getType())
53 | .append('.')
54 | .append(sanitizeName(metricName.getName()))
55 | .append(suffix)
56 | .toString();
57 | }
58 |
59 | public static String sanitizeName(String name) {
60 | Matcher m = whitespaceRegex.matcher(name);
61 | if (m.find())
62 | return m.replaceAll("_");
63 | else
64 | return name;
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/Parser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import com.yammer.metrics.core.MetricName;
20 | import org.slf4j.Logger;
21 | import org.slf4j.LoggerFactory;
22 |
23 | /**
24 | *
25 | */
26 | public abstract class Parser {
27 |
28 | static final Logger log = LoggerFactory.getLogger(Parser.class);
29 |
30 | protected String name;
31 | protected String[] tags;
32 |
33 | public String getName() {
34 | return name;
35 | }
36 |
37 | public String[] getTags() {
38 | return tags;
39 | }
40 |
41 | public abstract void parse(MetricName metricName);
42 |
43 | }
44 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/ParserForNoTag.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import com.yammer.metrics.core.MetricName;
20 |
21 | import static com.airbnb.metrics.MetricNameFormatter.formatWithScope;
22 |
23 | /**
24 | * Parser for statsd not supporting tags
25 | */
26 | public class ParserForNoTag extends Parser {
27 |
28 | public static final String[] EMPTY_TAG = new String[]{};
29 |
30 | @Override
31 | public void parse(MetricName metricName) {
32 | name = formatWithScope(metricName);
33 | tags = EMPTY_TAG;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/ParserForTagInMBeanName.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import com.yammer.metrics.core.MetricName;
20 |
21 | import java.util.Map;
22 | import java.util.concurrent.ConcurrentHashMap;
23 | import java.util.regex.Pattern;
24 |
25 | import static com.airbnb.metrics.MetricNameFormatter.format;
26 |
27 | /**
28 | * Parser for kafka 0.8.2 or later version
29 | * where the MBeanName contains tags and
30 | * Scope will store tags as well.
31 | */
32 | public class ParserForTagInMBeanName extends Parser {
33 |
34 | public static final String SUFFIX_FOR_ALL = "_all";
35 | public static final String[] UNKNOWN_TAG = new String[]{"clientId:unknown"};
36 | public static final String[] EMPTY_TAG = new String[]{};
37 |
38 | @Override
39 | public void parse(MetricName metricName) {
40 | Pattern p = tagRegexMap.get(metricName.getType());
41 | if (p != null && !p.matcher(metricName.getMBeanName()).matches()) {
42 | name = format(metricName, SUFFIX_FOR_ALL);
43 | } else {
44 | name = format(metricName);
45 | }
46 | tags = parseTags(metricName);
47 | }
48 | //todo update documents
49 |
50 | private String[] parseTags(MetricName metricName) {
51 | String[] tags = EMPTY_TAG;
52 | if (metricName.hasScope()) {
53 | final String name = metricName.getName();
54 | final String mBeanName = metricName.getMBeanName();
55 | final int idx = mBeanName.indexOf(name);
56 | if (idx < 0) {
57 | log.error("Cannot find name[{}] in MBeanName[{}]", name, mBeanName);
58 | } else {
59 | String tagStr = mBeanName.substring(idx + name.length() + 1);
60 | if ("kafka.producer".equals(metricName.getGroup()) &&
61 | !tagStr.contains("clientId")) {
62 | tagStr = "clientId=unknown,".concat(tagStr);
63 | }
64 | if (tagStr.length() > 0) {
65 | tags = tagStr.replace('=', ':').split(",");
66 | }
67 | }
68 | } else if ("kafka.producer".equals(metricName.getGroup())) {
69 | tags = UNKNOWN_TAG;
70 | }
71 | return tags;
72 | }
73 |
74 | public static final Map tagRegexMap = new ConcurrentHashMap();
75 |
76 | static {
77 | tagRegexMap.put("BrokerTopicMetrics", Pattern.compile(".*topic=.*"));
78 | tagRegexMap.put("DelayedProducerRequestMetrics", Pattern.compile(".*topic=.*"));
79 |
80 | tagRegexMap.put("ProducerTopicMetrics", Pattern.compile(".*topic=.*"));
81 | tagRegexMap.put("ProducerRequestMetrics", Pattern.compile(".*brokerHost=.*"));
82 |
83 | tagRegexMap.put("ConsumerTopicMetrics", Pattern.compile(".*topic=.*"));
84 | tagRegexMap.put("FetchRequestAndResponseMetrics", Pattern.compile(".*brokerHost=.*"));
85 | tagRegexMap.put("ZookeeperConsumerConnector", Pattern.compile(".*name=OwnedPartitionsCount,.*topic=.*|^((?!name=OwnedPartitionsCount).)*$"));
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/StatsDMetricsRegistry.java:
--------------------------------------------------------------------------------
1 | package com.airbnb.metrics;
2 |
3 | import java.util.Collection;
4 | import java.util.HashMap;
5 | import java.util.Map;
6 |
7 | import org.apache.kafka.common.MetricName;
8 |
9 | public class StatsDMetricsRegistry {
10 | private final Map metrics;
11 |
12 | public StatsDMetricsRegistry() {
13 | metrics = new HashMap<>();
14 | }
15 |
16 | public void register(MetricName metricName, MetricInfo metricInfo) {
17 | metrics.put(metricName, metricInfo);
18 | }
19 |
20 | public void unregister(MetricName metricName) {
21 | metrics.remove(metricName);
22 | }
23 |
24 | public Collection getAllMetricInfo() {
25 | return metrics.values();
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/java/com/airbnb/metrics/StatsDReporter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import com.timgroup.statsd.StatsDClient;
20 | import com.yammer.metrics.core.*;
21 | import com.yammer.metrics.reporting.AbstractPollingReporter;
22 | import com.yammer.metrics.stats.Snapshot;
23 | import org.slf4j.Logger;
24 | import org.slf4j.LoggerFactory;
25 |
26 | import java.math.BigDecimal;
27 | import java.math.BigInteger;
28 | import java.util.EnumSet;
29 | import java.util.Map;
30 | import java.util.TreeMap;
31 |
32 | import static com.airbnb.metrics.Dimension.*;
33 |
34 | /**
35 | *
36 | */
37 | public class StatsDReporter extends AbstractPollingReporter implements MetricProcessor {
38 | static final Logger log = LoggerFactory.getLogger(StatsDReporter.class);
39 | public static final String REPORTER_NAME = "kafka-statsd-metrics";
40 |
41 | private final StatsDClient statsd;
42 | private final Clock clock;
43 | private final EnumSet dimensions;
44 | private MetricPredicate metricPredicate;
45 | private boolean isTagEnabled;
46 |
47 | private Parser parser;
48 |
49 | public StatsDReporter(MetricsRegistry metricsRegistry,
50 | StatsDClient statsd,
51 | EnumSet metricDimensions) {
52 | this(metricsRegistry, statsd, REPORTER_NAME, MetricPredicate.ALL, metricDimensions, true);
53 | }
54 |
55 | public StatsDReporter(MetricsRegistry metricsRegistry,
56 | StatsDClient statsd,
57 | MetricPredicate metricPredicate,
58 | EnumSet metricDimensions,
59 | boolean isTagEnabled) {
60 | this(metricsRegistry, statsd, REPORTER_NAME, metricPredicate, metricDimensions, isTagEnabled);
61 | }
62 |
63 | public StatsDReporter(MetricsRegistry metricsRegistry,
64 | StatsDClient statsd,
65 | String reporterName,
66 | MetricPredicate metricPredicate,
67 | EnumSet metricDimensions,
68 | boolean isTagEnabled) {
69 | super(metricsRegistry, reporterName);
70 | this.statsd = statsd; //exception in statsd is handled by default NO_OP_HANDLER (do nothing)
71 | this.clock = Clock.defaultClock();
72 | this.parser = null; //postpone set it because kafka doesn't start reporting any metrics.
73 | this.dimensions = metricDimensions;
74 | this.metricPredicate = metricPredicate;
75 | this.isTagEnabled = isTagEnabled;
76 | }
77 |
78 | @Override
79 | public void run() {
80 | try {
81 | final long epoch = clock.time() / 1000;
82 | if (parser == null) {
83 | createParser(getMetricsRegistry());
84 | }
85 | sendAllKafkaMetrics(epoch);
86 | } catch (RuntimeException ex) {
87 | log.error("Failed to print metrics to statsd", ex);
88 | }
89 | }
90 |
91 | private void createParser(MetricsRegistry metricsRegistry) {
92 | if (isTagEnabled) {
93 | final boolean isMetricsTagged = isTagged(metricsRegistry.allMetrics());
94 | if (isMetricsTagged) {
95 | log.info("Kafka metrics are tagged");
96 | parser = new ParserForTagInMBeanName();
97 | } else {
98 | parser = new ParserForNoTag();
99 | }
100 | } else {
101 | parser = new ParserForNoTag();
102 | }
103 | }
104 |
105 | //kafka.common.AppInfo is not reliable, sometimes, not correctly loaded.
106 | public boolean isTagged(Map metrics) {
107 | for (MetricName metricName : metrics.keySet()) {
108 | if ("kafka.common:type=AppInfo,name=Version".equals(metricName.getMBeanName())
109 | || metricName.hasScope()) {
110 | return true;
111 | }
112 | }
113 | return false;
114 | }
115 |
116 | private void sendAllKafkaMetrics(long epoch) {
117 | final Map allMetrics = new TreeMap(getMetricsRegistry().allMetrics());
118 | for (Map.Entry entry : allMetrics.entrySet()) {
119 | sendAMetric(entry.getKey(), entry.getValue(), epoch);
120 | }
121 | }
122 |
123 | private void sendAMetric(MetricName metricName, Metric metric, long epoch) {
124 | log.debug("MBeanName[{}], Group[{}], Name[{}], Scope[{}], Type[{}]",
125 | metricName.getMBeanName(), metricName.getGroup(), metricName.getName(),
126 | metricName.getScope(), metricName.getType());
127 |
128 | if (metricPredicate.matches(metricName, metric) && metric != null) {
129 | try {
130 | parser.parse(metricName);
131 | metric.processWith(this, metricName, epoch);
132 | } catch (Exception ignored) {
133 | log.error("Error printing regular metrics:", ignored);
134 | }
135 | }
136 | }
137 |
138 | @Override
139 | public void processCounter(MetricName metricName, Counter counter, Long context) throws Exception {
140 | statsd.gauge(parser.getName(), counter.count(), parser.getTags());
141 | }
142 |
143 | @Override
144 | public void processMeter(MetricName metricName, Metered meter, Long epoch) {
145 | send(meter);
146 | }
147 |
148 | @Override
149 | public void processHistogram(MetricName metricName, Histogram histogram, Long context) throws Exception {
150 | send((Summarizable) histogram);
151 | send((Sampling) histogram);
152 | }
153 |
154 | @Override
155 | public void processTimer(MetricName metricName, Timer timer, Long context) throws Exception {
156 | send((Metered) timer);
157 | send((Summarizable) timer);
158 | send((Sampling) timer);
159 | }
160 |
161 | @Override
162 | public void processGauge(MetricName metricName, Gauge> gauge, Long context) throws Exception {
163 | final Object value = gauge.value();
164 | final Boolean flag = isDoubleParsable(value);
165 | if (flag == null) {
166 | log.debug("Gauge can only record long or double metric, it is " + value.getClass());
167 | } else if (flag.equals(true)) {
168 | statsd.gauge(parser.getName(), new Double(value.toString()), parser.getTags());
169 | } else {
170 | statsd.gauge(parser.getName(), new Long(value.toString()), parser.getTags());
171 | }
172 | }
173 |
174 | protected static final Dimension[] meterDims = {count, meanRate, rate1m, rate5m, rate15m};
175 | protected static final Dimension[] summarizableDims = {min, max, mean, stddev};
176 | protected static final Dimension[] SamplingDims = {median, p75, p95, p98, p99, p999};
177 |
178 | private void send(Metered metric) {
179 | double[] values = {metric.count(), metric.meanRate(), metric.oneMinuteRate(),
180 | metric.fiveMinuteRate(), metric.fifteenMinuteRate()};
181 | for (int i = 0; i < values.length; ++i) {
182 | sendDouble(meterDims[i], values[i]);
183 | }
184 | }
185 |
186 | protected void send(Summarizable metric) {
187 | double[] values = {metric.min(), metric.max(), metric.mean(), metric.stdDev()};
188 | for (int i = 0; i < values.length; ++i) {
189 | sendDouble(summarizableDims[i], values[i]);
190 | }
191 | }
192 |
193 | protected void send(Sampling metric) {
194 | final Snapshot snapshot = metric.getSnapshot();
195 | double[] values = {snapshot.getMedian(), snapshot.get75thPercentile(), snapshot.get95thPercentile(),
196 | snapshot.get98thPercentile(), snapshot.get99thPercentile(), snapshot.get999thPercentile()};
197 | for (int i = 0; i < values.length; ++i) {
198 | sendDouble(SamplingDims[i], values[i]);
199 | }
200 | }
201 |
202 | private void sendDouble(Dimension dim, double value) {
203 | if (dimensions.contains(dim)) {
204 | statsd.gauge(parser.getName() + "." + dim.getDisplayName(), value, parser.getTags());
205 | }
206 | }
207 |
208 | private Boolean isDoubleParsable(final Object o) {
209 | if (o instanceof Float) {
210 | return true;
211 | } else if (o instanceof Double) {
212 | return true;
213 | } else if (o instanceof Byte) {
214 | return false;
215 | } else if (o instanceof Short) {
216 | return false;
217 | } else if (o instanceof Integer) {
218 | return false;
219 | } else if (o instanceof Long) {
220 | return false;
221 | } else if (o instanceof BigInteger) {
222 | return false;
223 | } else if (o instanceof BigDecimal) {
224 | return true;
225 | }
226 | return null;
227 | }
228 | }
229 |
--------------------------------------------------------------------------------
/src/test/java/com/airbnb/kafka/kafka08/StatsdMetricsReporterTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.kafka.kafka08;
18 |
19 | import kafka.utils.VerifiableProperties;
20 | import org.junit.Before;
21 | import org.junit.Test;
22 |
23 | import java.util.Properties;
24 |
25 | import static org.easymock.EasyMock.*;
26 | import static org.junit.Assert.*;
27 |
28 | public class StatsdMetricsReporterTest {
29 |
30 | private VerifiableProperties properties;
31 |
32 | @Before
33 | public void init() {
34 | properties = createMock(VerifiableProperties.class);
35 | expect(properties.props()).andReturn(new Properties());
36 | expect(properties.getInt("kafka.metrics.polling.interval.secs", 10)).andReturn(11);
37 | expect(properties.getString("external.kafka.statsd.host", "localhost")).andReturn("127.0.0.1");
38 | expect(properties.getInt("external.kafka.statsd.port", 8125)).andReturn(1234);
39 | expect(properties.getString("external.kafka.statsd.metrics.prefix", "")).andReturn("foo");
40 | expect(properties.getString("external.kafka.statsd.metrics.exclude_regex",
41 | StatsdMetricsReporter.DEFAULT_EXCLUDE_REGEX)).andReturn("foo");
42 | expect(properties.getBoolean("external.kafka.statsd.tag.enabled", true)).andReturn(false);
43 | }
44 |
45 | @Test
46 | public void mbean_name_should_match() {
47 | String name = new StatsdMetricsReporter().getMBeanName();
48 | assertEquals("kafka:type=com.airbnb.kafka.kafka08.StatsdMetricsReporter", name);
49 | }
50 |
51 | @Test
52 | public void init_should_start_reporter_when_enabled() {
53 | expect(properties.getBoolean("external.kafka.statsd.reporter.enabled", false)).andReturn(true);
54 |
55 | replay(properties);
56 | StatsdMetricsReporter reporter = new StatsdMetricsReporter();
57 | assertFalse("reporter should not be running", reporter.isRunning());
58 | reporter.init(properties);
59 | assertTrue("reporter should be running once #init has been invoked", reporter.isRunning());
60 |
61 | verify(properties);
62 | }
63 |
64 | @Test
65 | public void init_should_not_start_reporter_when_disabled() {
66 | expect(properties.getBoolean("external.kafka.statsd.reporter.enabled", false)).andReturn(false);
67 |
68 | replay(properties);
69 | StatsdMetricsReporter reporter = new StatsdMetricsReporter();
70 | assertFalse("reporter should not be running", reporter.isRunning());
71 | reporter.init(properties);
72 | assertFalse("reporter should NOT be running once #init has been invoked", reporter.isRunning());
73 |
74 | verify(properties);
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/src/test/java/com/airbnb/kafka/kafka09/StatsdMetricsReporterTest.java:
--------------------------------------------------------------------------------
1 | package com.airbnb.kafka.kafka09;
2 |
3 | import com.airbnb.metrics.MetricInfo;
4 | import com.google.common.collect.ImmutableList;
5 | import com.google.common.collect.ImmutableMap;
6 | import com.google.common.collect.ImmutableSet;
7 | import com.timgroup.statsd.NonBlockingStatsDClient;
8 | import com.timgroup.statsd.StatsDClient;
9 | import java.util.ArrayList;
10 | import java.util.Collection;
11 | import java.util.HashMap;
12 | import java.util.Map;
13 |
14 | import java.util.stream.Collectors;
15 | import org.apache.kafka.common.Metric;
16 | import org.apache.kafka.common.MetricName;
17 | import org.apache.kafka.common.metrics.KafkaMetric;
18 | import org.junit.Assert;
19 | import org.junit.Before;
20 | import org.junit.Test;
21 |
22 | import static org.junit.Assert.assertFalse;
23 | import static org.junit.Assert.assertTrue;
24 | import static org.mockito.Mockito.atLeastOnce;
25 | import static org.mockito.Mockito.mock;
26 | import static org.mockito.Mockito.spy;
27 | import static org.mockito.Mockito.verify;
28 | import static org.mockito.Mockito.when;
29 |
30 | public class StatsdMetricsReporterTest {
31 | private final String TEST_METRIC_NAME = "test-metric";
32 | private final String TEST_METRIC_GROUP = "test-group";
33 | private final String TEST_METRIC_DESCRIPTION = "This is a test metric.";
34 |
35 | private Map configs;
36 |
37 | @Before
38 | public void init() {
39 | configs = new HashMap();
40 | configs.put(StatsdMetricsReporter.STATSD_HOST, "127.0.0.1");
41 | configs.put(StatsdMetricsReporter.STATSD_PORT, "1234");
42 | configs.put(StatsdMetricsReporter.STATSD_METRICS_PREFIX, "foo");
43 | configs.put(StatsdMetricsReporter.STATSD_REPORTER_ENABLED, "false");
44 | }
45 |
46 | @Test
47 | public void init_should_start_reporter_when_enabled() {
48 | configs.put(StatsdMetricsReporter.STATSD_REPORTER_ENABLED, "true");
49 | StatsdMetricsReporter reporter = new StatsdMetricsReporter();
50 | assertFalse("reporter should not be running", reporter.isRunning());
51 | reporter.configure(configs);
52 | reporter.init(new ArrayList());
53 | assertTrue("reporter should be running once #init has been invoked", reporter.isRunning());
54 | }
55 |
56 | @Test
57 | public void init_should_not_start_reporter_when_disabled() {
58 | configs.put(StatsdMetricsReporter.STATSD_REPORTER_ENABLED, "false");
59 | StatsdMetricsReporter reporter = new StatsdMetricsReporter();
60 | assertFalse("reporter should not be running", reporter.isRunning());
61 | reporter.configure(configs);
62 | reporter.init(new ArrayList());
63 | assertFalse("reporter should NOT be running once #init has been invoked", reporter.isRunning());
64 | }
65 |
66 | @Test
67 | public void testMetricsReporter_sameMetricNamesWithDifferentTags() {
68 | StatsdMetricsReporter reporter = spy(new StatsdMetricsReporter());
69 | reporter.configure(ImmutableMap.of(StatsdMetricsReporter.STATSD_REPORTER_ENABLED, "true"));
70 | StatsDClient mockStatsDClient = mock(NonBlockingStatsDClient.class);
71 | when(reporter.createStatsd()).thenReturn(mockStatsDClient);
72 |
73 | KafkaMetric testMetricWithTag = generateMockKafkaMetric(TEST_METRIC_NAME, TEST_METRIC_GROUP, TEST_METRIC_DESCRIPTION, ImmutableMap.of("test-key", "test-value"));
74 | reporter.init(ImmutableList.of(testMetricWithTag));
75 | Assert.assertEquals(ImmutableSet.of(testMetricWithTag), getAllKafkaMetricsHelper(reporter));
76 |
77 | KafkaMetric otherTestMetricWithTag = generateMockKafkaMetric(TEST_METRIC_NAME, TEST_METRIC_GROUP, TEST_METRIC_DESCRIPTION, ImmutableMap.of("another-test-key", "another-test-value"));
78 | reporter.metricChange(otherTestMetricWithTag);
79 | Assert.assertEquals(ImmutableSet.of(testMetricWithTag, otherTestMetricWithTag), getAllKafkaMetricsHelper(reporter));
80 |
81 | reporter.underlying.run();
82 | reporter.registry.getAllMetricInfo().forEach(info -> verify(mockStatsDClient, atLeastOnce()).gauge(info.getName(), info.getMetric().value(), info.getTags()));
83 | }
84 |
85 | private KafkaMetric generateMockKafkaMetric(String name, String group, String description, Map tags) {
86 | KafkaMetric mockMetric = mock(KafkaMetric.class);
87 | when(mockMetric.metricName()).thenReturn(new MetricName(name, group, description, tags));
88 | return mockMetric;
89 | }
90 |
91 | private static Collection getAllKafkaMetricsHelper(StatsdMetricsReporter reporter) {
92 | return reporter.registry.getAllMetricInfo().stream().map(MetricInfo::getMetric).collect(Collectors.toSet());
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/src/test/java/com/airbnb/metrics/DimensionTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import org.junit.Test;
20 |
21 | import java.util.EnumSet;
22 | import java.util.Properties;
23 |
24 | import static org.junit.Assert.*;
25 |
26 | /**
27 | *
28 | */
29 | public class DimensionTest {
30 |
31 | @Test
32 | public void create_from_properties() {
33 | String prefix = "foo.";
34 | Properties p = new Properties();
35 | p.setProperty(prefix + "count", "true");
36 | p.setProperty(prefix + "meanRate", "false");
37 | EnumSet dimensions = Dimension.fromProperties(p, prefix);
38 |
39 | assertTrue(dimensions.contains(Dimension.count));
40 | assertFalse(dimensions.contains(Dimension.meanRate));
41 | assertEquals(Dimension.rate1m.displayName, "1MinuteRate");
42 | }
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/src/test/java/com/airbnb/metrics/ExcludeMetricPredicateTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import com.yammer.metrics.core.MetricName;
20 | import org.junit.Test;
21 |
22 | import static org.junit.Assert.assertFalse;
23 | import static org.junit.Assert.assertTrue;
24 |
25 | /**
26 | *
27 | */
28 | public class ExcludeMetricPredicateTest {
29 |
30 | @Test
31 | public void exclude() {
32 | ExcludeMetricPredicate predicate = new ExcludeMetricPredicate("my\\.package\\.MyClass.*");
33 | // String group, String type, String name, String scope
34 | assertFalse(predicate.matches(new MetricName("my.package", "MyClass", "some_name", "some_scope"), null));
35 | assertTrue(predicate.matches(new MetricName("another.package", "MyClass", "some_name", "some_scope"), null));
36 | }
37 |
38 | @Test
39 | public void exclude2() {
40 | ExcludeMetricPredicate predicate = new ExcludeMetricPredicate("(kafka\\.consumer\\.FetchRequestAndResponseMetrics.*)|(.*ReplicaFetcherThread.*)|(kafka\\.server\\.FetcherLagMetrics\\..*)|(kafka\\.log\\.Log\\..*)|(kafka\\.cluster\\.Partition\\..*)");
41 | assertFalse(predicate.matches(new MetricName("kafka.consumer", "FetchRequestAndResponseMetrics", "some_name", "some_scope"), null));
42 | assertFalse(predicate.matches(new MetricName("kafka.server", "FetcherStats", "ReplicaFetcherThread", "some_scope"), null));
43 | assertTrue(predicate.matches(new MetricName("kafka.server", "ReplicaManager", "IsrExpandsPerSec", "some_scope"), null));
44 |
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/test/java/com/airbnb/metrics/KafkaStatsDReporterTest.java:
--------------------------------------------------------------------------------
1 | package com.airbnb.metrics;
2 |
3 | import com.timgroup.statsd.StatsDClient;
4 | import java.util.HashMap;
5 | import org.apache.kafka.common.Metric;
6 | import org.apache.kafka.common.MetricName;
7 | import org.junit.Before;
8 | import org.junit.Test;
9 | import org.mockito.Matchers;
10 | import org.mockito.Mock;
11 | import org.mockito.MockitoAnnotations;
12 |
13 | import static org.mockito.Mockito.verify;
14 |
15 | public class KafkaStatsDReporterTest {
16 | @Mock
17 | private StatsDClient statsD;
18 | private KafkaStatsDReporter reporter;
19 | private StatsDMetricsRegistry registry;
20 |
21 | @Before
22 | public void init() throws Exception {
23 | MockitoAnnotations.initMocks(this);
24 | registry = new StatsDMetricsRegistry();
25 | reporter = new KafkaStatsDReporter(
26 | statsD,
27 | registry
28 | );
29 | }
30 |
31 | protected void addMetricAndRunReporter(
32 | Metric metric,
33 | String metricName,
34 | String tag
35 | ) throws Exception {
36 | try {
37 | registry.register(metric.metricName(), new MetricInfo(metric, metricName, tag));
38 | reporter.run();
39 | } finally {
40 | reporter.shutdown();
41 | }
42 | }
43 |
44 | @Test
45 | public final void sendDoubleGauge() throws Exception {
46 | final double value = 10.11;
47 | Metric metric = new Metric() {
48 | @Override
49 | public MetricName metricName() {
50 | return new MetricName("test-metric", "group", "", new HashMap<>());
51 | }
52 |
53 | @Override
54 | public double value() {
55 | return value;
56 | }
57 |
58 | // This is a new method added to the `Metric` interface from Kafka v1.0.0,
59 | // which we need for tests on later Kafka versions to pass.
60 | public Object metricValue() {
61 | return value;
62 | }
63 | };
64 |
65 | addMetricAndRunReporter(metric, "foo", "bar");
66 | verify(statsD).gauge(Matchers.eq("foo"), Matchers.eq(value), Matchers.eq("bar"));
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/src/test/java/com/airbnb/metrics/MetricNameFormatterTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import com.yammer.metrics.core.MetricName;
20 | import org.junit.Test;
21 |
22 | import static com.airbnb.metrics.MetricNameFormatter.format;
23 | import static com.airbnb.metrics.MetricNameFormatter.formatWithScope;
24 | import static org.junit.Assert.assertEquals;
25 |
26 | /**
27 | *
28 | */
29 | public class MetricNameFormatterTest {
30 |
31 | @Test
32 | public void testFormat() throws Exception {
33 | assertEquals(
34 | format(new MetricName("kafka.common", "AppInfo", "Version", null, "kafka.common:type=AppInfo,name=Version")),
35 | "kafka.common.AppInfo.Version");
36 | assertEquals(
37 | format(new MetricName("kafka.common", "AppInfo", "Version", "my_scope", "kafka.common:type=AppInfo,name=Version")),
38 | "kafka.common.AppInfo.Version");
39 | }
40 |
41 | @Test
42 | public void testFormatWithScope() throws Exception {
43 | assertEquals(
44 | formatWithScope(new MetricName("kafka.common", "AppInfo", "Version", null, "kafka.common:type=AppInfo,name=Version")),
45 | "kafka.common.AppInfo.Version");
46 | assertEquals(
47 | formatWithScope(new MetricName("kafka.common", "AppInfo", "Version", "", "kafka.common:type=AppInfo,name=Version")),
48 | "kafka.common.AppInfo.Version");
49 | assertEquals(
50 | formatWithScope(new MetricName("kafka.common", "AppInfo", "Version", "my_scope", "kafka.common:type=AppInfo,name=Version")),
51 | "kafka.common.AppInfo.my_scope.Version");
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/src/test/java/com/airbnb/metrics/ParserTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 | import com.yammer.metrics.core.MetricName;
20 | import org.junit.Test;
21 |
22 | import static org.junit.Assert.assertArrayEquals;
23 | import static org.junit.Assert.assertEquals;
24 |
25 | /**
26 | *
27 | */
28 | public class ParserTest {
29 |
30 | @Test
31 | public void testParseTagInMBeanNameWithSuffix() throws Exception {
32 | MetricName name = new MetricName("kafka.producer",
33 | "ProducerRequestMetrics", "ProducerRequestSize",
34 | "clientId.group7", "kafka.producer:type=ProducerRequestMetrics,name=ProducerRequestSize,clientId=group7");
35 | Parser p = new ParserForTagInMBeanName();
36 | p.parse(name);
37 | assertEquals(p.getName(), "kafka.producer.ProducerRequestMetrics.ProducerRequestSize_all");
38 | assertArrayEquals(p.getTags(), new String[]{"clientId:group7"});
39 | }
40 |
41 | @Test
42 | public void testParseTagInMBeanNameWithSuffixWithoutClientId() throws Exception {
43 | MetricName name = new MetricName("kafka.producer",
44 | "ProducerRequestMetrics", "ProducerRequestSize",
45 | null, "kafka.producer:type=ProducerRequestMetrics,name=ProducerRequestSize");
46 | Parser p = new ParserForTagInMBeanName();
47 | p.parse(name);
48 | assertEquals(p.getName(), "kafka.producer.ProducerRequestMetrics.ProducerRequestSize_all");
49 | assertArrayEquals(p.getTags(), new String[]{"clientId:unknown"});
50 | }
51 |
52 | @Test
53 | public void testParseTagInMBeanNameWithoutSuffix() throws Exception {
54 | MetricName name = new MetricName("kafka.producer",
55 | "ProducerRequestMetrics", "ProducerRequestSize",
56 | "clientId.group7.brokerPort.9092.brokerHost.10_1_152_206",
57 | "kafka.producer:type=ProducerRequestMetrics,name=ProducerRequestSize,clientId=group7,brokerPort=9092,brokerHost=10.1.152.206");
58 | Parser p = new ParserForTagInMBeanName();
59 | p.parse(name);
60 | assertEquals(p.getName(), "kafka.producer.ProducerRequestMetrics.ProducerRequestSize");
61 | assertArrayEquals(p.getTags(), new String[]{"clientId:group7", "brokerPort:9092", "brokerHost:10.1.152.206"});
62 | }
63 |
64 | @Test
65 | public void testParseTagInMBeanNameWithoutClientId() throws Exception {
66 | MetricName name = new MetricName("kafka.producer",
67 | "ProducerRequestMetrics", "ProducerRequestSize",
68 | "brokerPort.9092.brokerHost.10_1_152_206", "kafka.producer:type=ProducerRequestMetrics,name=ProducerRequestSize,brokerPort=9092,brokerHost=10.1.152.206");
69 | Parser p = new ParserForTagInMBeanName();
70 | p.parse(name);
71 | assertEquals(p.getName(), "kafka.producer.ProducerRequestMetrics.ProducerRequestSize");
72 | assertArrayEquals(p.getTags(), new String[]{"clientId:unknown", "brokerPort:9092", "brokerHost:10.1.152.206"});
73 | }
74 |
75 | @Test
76 | public void testParseTagInMBeanNameWithoutSuffixForConsumer() throws Exception {
77 | MetricName name = new MetricName("kafka.consumer",
78 | "ZookeeperConsumerConnector", "ZooKeeperCommitsPerSec",
79 | "clientId.group7",
80 | "kafka.consumer:type=ZookeeperConsumerConnector,name=ZooKeeperCommitsPerSec,clientId=group7");
81 | Parser p = new ParserForTagInMBeanName();
82 | p.parse(name);
83 | assertEquals(p.getName(), "kafka.consumer.ZookeeperConsumerConnector.ZooKeeperCommitsPerSec");
84 | assertArrayEquals(p.getTags(), new String[]{"clientId:group7"});
85 | }
86 |
87 | @Test
88 | public void testParseTagInMBeanNameNoTag() throws Exception {
89 | MetricName name = new MetricName("kafka.server",
90 | "ReplicaManager", "LeaderCount",
91 | null, "kafka.server:type=ReplicaManager,name=LeaderCount");
92 | Parser p = new ParserForTagInMBeanName();
93 | p.parse(name);
94 | assertEquals(p.getName(), "kafka.server.ReplicaManager.LeaderCount");
95 | assertArrayEquals(p.getTags(), new String[]{});
96 | }
97 |
98 | @Test
99 | public void testParseNoTag() throws Exception {
100 | MetricName name = new MetricName("kafka.producer",
101 | "ProducerRequestMetrics", "group7-AllBrokersProducerRequestSize");
102 | Parser p = new ParserForNoTag();
103 | p.parse(name);
104 | assertEquals(p.getName(), "kafka.producer.ProducerRequestMetrics.group7-AllBrokersProducerRequestSize");
105 | assertArrayEquals(p.getTags(), new String[]{});
106 | }
107 |
108 | }
109 |
--------------------------------------------------------------------------------
/src/test/java/com/airbnb/metrics/StatsDReporterTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2015. Airbnb.com
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package com.airbnb.metrics;
18 |
19 |
20 | import java.util.EnumSet;
21 | import java.util.Random;
22 | import java.util.concurrent.Callable;
23 | import java.util.concurrent.TimeUnit;
24 |
25 | import com.timgroup.statsd.StatsDClient;
26 | import com.yammer.metrics.core.Clock;
27 | import com.yammer.metrics.core.Counter;
28 | import com.yammer.metrics.core.Gauge;
29 | import com.yammer.metrics.core.Histogram;
30 | import com.yammer.metrics.core.Meter;
31 | import com.yammer.metrics.core.Metered;
32 | import com.yammer.metrics.core.Metric;
33 | import com.yammer.metrics.core.MetricName;
34 | import com.yammer.metrics.core.MetricProcessor;
35 | import com.yammer.metrics.core.MetricsRegistry;
36 | import com.yammer.metrics.core.Sampling;
37 | import com.yammer.metrics.core.Summarizable;
38 | import com.yammer.metrics.core.Timer;
39 | import com.yammer.metrics.reporting.AbstractPollingReporter;
40 | import com.yammer.metrics.stats.Snapshot;
41 | import org.junit.Before;
42 | import org.junit.Test;
43 | import org.mockito.Matchers;
44 | import org.mockito.Mock;
45 | import org.mockito.MockitoAnnotations;
46 | import org.mockito.invocation.InvocationOnMock;
47 | import org.mockito.stubbing.Answer;
48 | import org.mockito.stubbing.Stubber;
49 |
50 | import static org.junit.Assert.assertTrue;
51 | import static org.mockito.Matchers.any;
52 | import static org.mockito.Mockito.doAnswer;
53 | import static org.mockito.Mockito.mock;
54 | import static org.mockito.Mockito.never;
55 | import static org.mockito.Mockito.verify;
56 | import static org.mockito.Mockito.when;
57 |
58 | public class StatsDReporterTest {
59 |
60 | private static final String METRIC_BASE_NAME = "java.lang.Object.metric";
61 | @Mock
62 | private Clock clock;
63 | @Mock
64 | private StatsDClient statsD;
65 | private AbstractPollingReporter reporter;
66 | private TestMetricsRegistry registry;
67 |
68 | protected static class TestMetricsRegistry extends MetricsRegistry {
69 | public T add(MetricName name, T metric) {
70 | return getOrAdd(name, metric);
71 | }
72 | }
73 |
74 | @Before
75 | public void init() throws Exception {
76 | MockitoAnnotations.initMocks(this);
77 | when(clock.tick()).thenReturn(1234L);
78 | when(clock.time()).thenReturn(5678L);
79 | registry = new TestMetricsRegistry();
80 | reporter = new StatsDReporter(registry,
81 | statsD,
82 | EnumSet.allOf(Dimension.class)
83 | );
84 | }
85 |
86 | @Test
87 | public void isTaggedTest() {
88 | registry.add(new MetricName("kafka.common", "AppInfo", "Version", null, "kafka.common:type=AppInfo,name=Version"),
89 | new Gauge() {
90 | public String value() {
91 | return "0.8.2";
92 | }
93 | });
94 | assertTrue(((StatsDReporter) reporter).isTagged(registry.allMetrics()));
95 | }
96 |
97 | protected void addMetricAndRunReporter(Callable action) throws Exception {
98 | // Invoke the callable to trigger (ie, mark()/inc()/etc) and return the metric
99 | final T metric = action.call();
100 | try {
101 | // Add the metric to the registry, run the reporter and flush the result
102 | registry.add(new MetricName(Object.class, "metric"), metric);
103 | reporter.run();
104 | } finally {
105 | reporter.shutdown();
106 | }
107 | }
108 |
109 | private void verifySend(String metricNameSuffix, double metricValue) {
110 | verify(statsD).gauge(METRIC_BASE_NAME + "." + metricNameSuffix,
111 | metricValue);
112 | }
113 |
114 | private void verifySend(double metricValue) {
115 | verify(statsD).gauge(METRIC_BASE_NAME, metricValue);
116 | }
117 |
118 | private void verifySend(long metricValue) {
119 | verify(statsD).gauge(METRIC_BASE_NAME, metricValue);
120 | }
121 |
122 | private void verifySend(String metricNameSuffix, String metricValue) {
123 | verify(statsD).gauge(METRIC_BASE_NAME + "." + metricNameSuffix,
124 | Double.valueOf(metricValue));
125 | }
126 |
127 | public void verifyTimer() {
128 | verifySend("count", "1");
129 | verifySend("meanRate", "2.00");
130 | verifySend("1MinuteRate", "1.00");
131 | verifySend("5MinuteRate", "5.00");
132 | verifySend("15MinuteRate", "15.00");
133 | verifySend("min", "1.00");
134 | verifySend("max", "3.00");
135 | verifySend("mean", "2.00");
136 | verifySend("stddev", "1.50");
137 | verifySend("median", "0.50");
138 | verifySend("p75", "0.7505");
139 | verifySend("p95", "0.9509");
140 | verifySend("p98", "0.98096");
141 | verifySend("p99", "0.99098");
142 | verifySend("p999", "0.999998");
143 | }
144 |
145 | public void verifyMeter() {
146 | verifySend("count", 1);
147 | verifySend("meanRate", 2.00);
148 | verifySend("1MinuteRate", 1.00);
149 | verifySend("5MinuteRate", 5.00);
150 | verifySend("15MinuteRate", 15.00);
151 | }
152 |
153 | public void verifyHistogram() {
154 | verifySend("min", 1.00);
155 | verifySend("max", 3.00);
156 | verifySend("mean", 2.00);
157 | verifySend("stddev", 1.50);
158 | verifySend("median", 0.50);
159 | verifySend("p75", "0.7505");
160 | verifySend("p95", "0.9509");
161 | verifySend("p98", "0.98096");
162 | verifySend("p99", "0.99098");
163 | verifySend("p999", "0.999998");
164 | }
165 |
166 | public void verifyCounter(long count) {
167 | verifySend(count);
168 | }
169 |
170 | @Test
171 | public final void counter() throws Exception {
172 | final long count = new Random().nextInt(Integer.MAX_VALUE);
173 | addMetricAndRunReporter(
174 | new Callable() {
175 | @Override
176 | public Counter call() throws Exception {
177 | return createCounter(count);
178 | }
179 | });
180 | verifyCounter(count);
181 | }
182 |
183 | @Test
184 | public final void histogram() throws Exception {
185 | addMetricAndRunReporter(
186 | new Callable() {
187 | @Override
188 | public Histogram call() throws Exception {
189 | return createHistogram();
190 | }
191 | });
192 | verifyHistogram();
193 | }
194 |
195 | @Test
196 | public final void meter() throws Exception {
197 | addMetricAndRunReporter(
198 | new Callable() {
199 | @Override
200 | public Meter call() throws Exception {
201 | return createMeter();
202 | }
203 | });
204 | verifyMeter();
205 | }
206 |
207 | @Test
208 | public final void timer() throws Exception {
209 | addMetricAndRunReporter(
210 | new Callable() {
211 | @Override
212 | public Timer call() throws Exception {
213 | return createTimer();
214 | }
215 | });
216 | verifyTimer();
217 | }
218 |
219 | @Test
220 | public final void longGauge() throws Exception {
221 | final long value = 0xdeadbeef;
222 | addMetricAndRunReporter(
223 | new Callable>() {
224 | @Override
225 | public Gauge