├── .gitignore
├── HEADER
├── LICENSE
├── NOTICE
├── README.md
├── build.gradle
├── core
├── build.gradle
└── src
│ ├── main
│ ├── avro
│ │ └── metric_v1.avsc
│ └── java
│ │ └── io
│ │ └── amient
│ │ └── kafka
│ │ └── metrics
│ │ ├── AutoJsonDeserializer.java
│ │ ├── InternalAvroSerde.java
│ │ ├── JMXScanner.java
│ │ ├── JMXScannerTask.java
│ │ ├── MeasurementDeserializer.java
│ │ ├── MeasurementFormatter.java
│ │ ├── MeasurementPublisher.java
│ │ ├── MeasurementSerializer.java
│ │ └── ProducerPublisher.java
│ └── test
│ ├── java
│ └── JmxScannerMain.java
│ └── resources
│ └── test.properties
├── discovery
├── build.gradle
└── src
│ └── main
│ ├── java
│ └── io
│ │ └── amient
│ │ └── kafka
│ │ └── metrics
│ │ ├── Broker.java
│ │ ├── Dashboard.java
│ │ └── DiscoveryTool.java
│ └── resources
│ └── log4j.properties
├── doc
├── discovery-example-3-brokers.png
├── kafka-metrics-overview.svg
├── kafka-metrics-scenario0.png
├── kafka-metrics-scenario0.svg
├── kafka-metrics-scenario1.png
├── kafka-metrics-scenario1.svg
├── kafka-metrics-scenario2.png
├── kafka-metrics-scenario2.svg
├── kafka-metrics-scenario3.png
├── kafka-metrics-scenario3.svg
└── metrics.png
├── docker-compose.yml
├── docker-instance.sh
├── gradle
└── wrapper
│ ├── gradle-wrapper.jar
│ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
├── influxdb-loader
├── build.gradle
├── conf
│ ├── local-jmx.properties
│ └── local-topic.properties
└── src
│ └── main
│ ├── java
│ └── io
│ │ └── amient
│ │ └── kafka
│ │ └── metrics
│ │ ├── ConsumerMetrics.java
│ │ ├── InfluxDbLoaderMain.java
│ │ └── InfluxDbPublisher.java
│ └── resources
│ └── log4j.properties
├── metrics-agent
├── build.gradle
└── src
│ └── main
│ ├── java
│ └── io
│ │ └── amient
│ │ └── kafka
│ │ └── metrics
│ │ └── KafkaMetricsAgent.java
│ └── resources
│ └── log4j.properties
├── metrics-connect
├── build.gradle
└── src
│ ├── main
│ └── java
│ │ └── io
│ │ └── amient
│ │ └── kafka
│ │ └── metrics
│ │ ├── InfluxDbSinkConnector.java
│ │ ├── InfluxDbSinkTask.java
│ │ └── MeasurementConverter.java
│ └── test
│ └── java
│ └── io
│ └── amient
│ └── kafka
│ └── metrics
│ └── MeasurementConverterTest.java
├── metrics-reporter
├── build.gradle
└── src
│ ├── main
│ └── java
│ │ └── io
│ │ └── amient
│ │ └── kafka
│ │ └── metrics
│ │ ├── ConsumerGroupReporter.java
│ │ ├── ConsumerGroupReporterMBean.java
│ │ ├── GroupMetrics.java
│ │ ├── KafkaMetricsProcessor.java
│ │ ├── KafkaMetricsProcessorBuilder.java
│ │ ├── TopicReporter.java
│ │ └── TopicReporterMBean.java
│ └── test
│ └── java
│ └── io
│ └── amient
│ └── kafka
│ └── metrics
│ └── InfluxDbPublisherTest.java
├── settings.gradle
└── zzz
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .idea
3 | .gradle
4 | *.iml
5 | build/
6 | classes/
7 | .data/
8 | out/
9 |
10 |
--------------------------------------------------------------------------------
/HEADER:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | This product is based on ideas from stealth.ly metrics-kafka, riemann and influxDb.
2 |
3 | Packaged version of this software contains:
4 |
5 | - Apache Kafka (Apache 2.0 License)
6 | - Apache Avro (Apache 2.0 License)
7 | - kafka-avro-serializer Copyright 2014 Confluent Inc. (Apache 2.0 License)
8 | - Yammer Metrics (Apache 2.0 License)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kafka Metrics
2 |
3 | This is a system for real-time aggregation of metrics from large distributed systems. Rather than replacing existing
4 | monitoring solutions it fulfills the role of `real-time distributed aggregation` element to combine metrics from
5 | multiple systems, with some out-of-the-box features for data streams pipelines based on Apache Kafka.
6 |
7 | ## Contents
8 |
9 | 1. [Overview](#overview)
10 | - [Architecture](#overview)
11 | - [Basic Scenario](#scenario0)
12 | - [Multi-Server Scenario](#scenario1)
13 | - [Multi-Data-Centre Scenario](#scenario3)
14 | - [Multi-Enviornment Scenario](#scenario2)
15 | 2. [Quick Start](#quick-start)
16 | 3. [Modules Reference](#modules-reference)
17 | - [Cluster Discovery Tool](#usage-discovery)
18 | - [InfluxDB Loader](#usage-loader)
19 | - [Metrics Connect](#usage-connect)
20 | - [Metrics Agent](#metrics-agent)
21 | - [TopicReporter](#usage-reporter)
22 | - [Usage in Kafka Broker, Kafka Prism, Kafka Producer (pre 0.8.2), Kafka Consumer (pre 0.9)](#usage-reporter-kafka-old)
23 | - [Usage in Kafka NEW Producer (0.8.2+) and Consumer (0.9+)](#usage-reporter-kafka-new)
24 | - [Usage in any application using dropwizard metrics (formerly yammer metrics)](#usage-reporter-dropwizard)
25 | - [Usage in Samza](#usage-samza)
26 | 4. [Configuration](#configuration)
27 | - [InfluxDB Configuration](#configuration-influxdb)
28 | - [JMX Scanner Configuration](#configuration-scanner)
29 | - [Metrics Producer Configuration](#configuration-producer)
30 | - [Metrics Consumer Configuration](#configuration-consumer)
31 | 5. [Operations & Troubleshooting](#operations)
32 | 6. [Development](#development)
33 |
34 |
35 |
36 | ## Overview
37 |
38 |
39 | Kafka Metrics is a set of libraries and runtime modules that can be deployed in various configurations and can be used
40 | as an **A)** out-of-the-box monitoring for data streams infrastructures built with Apache Kafka including automatic discovery
41 | and configuration for existing Kafka clusters **B)** a framework for monitoring distributed systems in general using Apache Kafka infrastructure as a transport layer.
42 |
43 | The aim of the design is to have small composable modules that can be deployed by configuration to cover use cases ranging
44 | from quick, non-intrusive inspection of existing Kafka clusters and stream pipelines, to massive-scale purpose-built
45 | monitoring, detection and alerting infrastructure for distributed systems in general.
46 |
47 | 
48 |
49 | There are several ways of how the aggregation of metrics is achieved using one or more modules.
50 |
51 |
52 |
53 | ### Basic Scenario
54 |
55 |
56 | For smaller systems consisting of components on the same network or simply a localhost, direct JMX scanner tasks can be
57 | configured for each JMX Application. This method doesn't require to include any extra code in the monitored applications
58 | as long as they already expose JMX MBeans and in a local environment the kafka topic can also be omitted.
59 |
60 | 
61 |
62 |
63 |
64 | ### Multi-Server Scenario
65 |
66 |
67 | For bigger systems, where metrics from several hosts need to be aggregated or in cases where more fault-tolerant
68 | collection of metrics is required, a combination of pluggable TopicReproter or JMX Metrics Agent and a Kafka Topic can
69 | be deployed by configuration. The JMX Scanner used in the basic scenario is replaced with InfluxDB Loader which is a
70 | kafka consumer that reads measurements from the metrics topic and writes them into the InfluxDB.
71 |
72 |
73 | 
74 |
75 |
76 |
77 | ### Multi-Data-Centre Scenario
78 |
79 |
80 | For multi-DC, potentially global deployments, where metrics from several disparate clusters need to be collected, each
81 | cluster has its agent which publishes into a local metrics topic and one of the existing mirroring components
82 | (Kafka Prism, Kafka Mirror Maker, ...) is deployed to aggregate local metrics topic into a single aggregated stream
83 | providing a real-time monitoring of the entire system.
84 |
85 | 
86 |
87 |
88 |
89 | ### Multi-Environment Scenario
90 |
91 |
92 | Finally, in the heterogeneous environments, where different kinds of application and infrastructure stacks exist,
93 | firstly any JMX-Enabled or YAMMER-Enabled application can be plugged by configuration.
94 |
95 | ***For non-JVM applications or for JVM applications that do not expose JMX MBeans, there is a work in progress to have
96 | REST Metrics Agent which can receive http put requests and which can be deployed in all scenarios either with or without
97 | the metrics topic.***
98 |
99 | 
100 |
101 |
102 |
103 |
104 | ## Quick-start example with existing Kafka cluster using discovery module and auto-generated dashboard
105 |
106 |
107 | First we need to build the project from source which requires at least `java 1.7` installed on your system:
108 |
109 | ./gradlew build
110 |
111 | There is a docker-compose.yml file that contains grafana, influxdb and kapactior images and a small script
112 | that starts and integrates them together:
113 |
114 | ./docker-instance.sh
115 |
116 | Grafana UI should be now exposed at `http://localhost:3000` - under Data Sources tab there should also be one item
117 | named 'Kafka Metrics InfluxDB'. The next command will discover all topics the brokers on a local kafka broker
118 | by looking into the zookeeper but you can replace the zookeeper connect string with your own:
119 |
120 | ./discovery/build/scripts/discovery --zookeeper "127.0.0.1:2181" --dashboard "my-kafka-cluster" \
121 | --dashboard-path $PWD/.data/grafana/dashboards --interval 25 \
122 | --influxdb "http://root:root@localhost:8086" | ./influxdb-loader/build/scripts/influxdb-loader
123 |
124 | The dashboard should be now accessible on this url:
125 |
126 | http://localhost:3000/dashboard/file/my-kafka-cluster.json
127 |
128 | For a cluster of 3 brokers it might look like this:
129 |
130 | 
131 |
132 |
133 |
134 | ## Modules Reference
135 |
136 |
137 |
138 |
139 | ### Cluster Discovery Tool
140 |
141 |
142 | Metrics Discovery tool can be used for generating configs and dashboards for existing Kafka Clusters. It uses
143 | Zookeeper Client and generates Grafana dashboards as json files and configurations for other Kafka Metrics modules
144 | into the STDOUT. The output configuration can be piped into one of the runtime modules, e.g. InfluxDBLoader
145 | or Metrics Agent. It is a Java Application and first has to be built with the following command:
146 |
147 | ./gradlew :discovery:build
148 |
149 | #### Example usage for local Kafka cluster and InfluxDB
150 |
151 | ./discovery/build/scripts/discovery \
152 | --zookeeper "localhost:2181" \
153 | --dashboard "local-kafka-cluster" \
154 | --dashboard-path "./.data/grafana/dashboards" \
155 | --influxdb "http://root:root@localhost:8086" | ./influxdb-loader/build/scripts/influxdb-loader
156 |
157 | The above command discovers all the brokers that are part of the cluster and configures an influxdb-loader
158 | using local instance of InfluxDB. It also generates a dashboard for the discovered cluster which
159 | will be stored in the default Kafka Metrics instance.
160 |
161 | #### Example usage for remote Kafka cluster with Metrics Agent
162 |
163 | On the Kafka Cluster:
164 |
165 | ./discovery/build/scripts/discovery \
166 | --zookeeper ":" \
167 | --dashboard "remote-kafka-cluster" \
168 | --topic "metrics" | ./metrics-agent/build/scripts/metrics-agent
169 |
170 | On the Kafka Metrics instance:
171 |
172 | ./discovery/build/scripts/discovery \
173 | --zookeeper ":" \
174 | --topic "metrics" \
175 | --dashboard "remote-kafka-cluster" \
176 | --dashboard-path "./.data/grafana/dashboards" \
177 | --influxdb "http://root:root@localhost:8086" | ./influxdb-loader/build/scripts/influxdb-loader
178 |
179 |
180 |
181 |
182 | ### InfluxDB Loader Usage
183 |
184 |
185 | InfluxDB Loader is a Java application which writes measurements into InfluxDB backend which can be configured
186 | to scan the measurements from any number of JMX ports oand Kafka metrics topics.
187 | In versions 0.9.+, the topic input functionality is replaced by the Metrics Connect module which utilizes Kafka Connect
188 | framework. To build an executable jar, run the following command:
189 |
190 | ./gradlew :influxdb-loader:build
191 |
192 | Once built, the loader can be launched with `./influxdb-loader/build/scripts/influxdb-loader` by passing it
193 | path to properties file containing the following configuration:
194 | - [InfluxDB Configuration](#configuration-influxdb) (required)
195 | - [JMX Scanner Configuration](#configuration-scanner) (at least one scanner or consumer is required)
196 | - [Metrics Consumer Configuration](#configuration-consumer) (at least on scanner or consumer is required)
197 |
198 | There is a few example config files under `influxdb-loader/conf` which explain how JMX scanners can be added.
199 | If you have a Kafka Broker running locally which has a JMX Server listening on port 19092 and a docker instances of
200 | InfluxDB and Grafana running locally, you can use the following script and config file to collect the broker metrics:
201 |
202 | ./influxdb-loader/build/scripts/influxdb-loader influxdb-loader/conf/local-jmx.properties
203 |
204 |
205 |
206 | ### Metrics Connect Usage
207 |
208 |
209 | This module builds on Kafka Connect framework. The connector is jar that needs to be first built:
210 |
211 | ./gradlew :metrics-connect:build
212 |
213 | The command above generates a jar that needs to be in the classpath of Kafka Connect which can be achieved
214 | by copying the jar into `libs` directory of the kafka installation:
215 |
216 | cp ./metrics-connect/build/lib/metrics-connect-*.jar $KAFKA_HOME/libs
217 |
218 | Now you can launch for example kafka connect standalone connector with the following example configurations:
219 |
220 | "$KAFKA_HOME/bin/connect-standalone.sh" "metrics-connect.properties" "influxdb-sink.properties" "hdfs-sink.properties"
221 |
222 | First, `metrics-connect.properties` is the connect worker configuration which doesn't specify any connectors
223 | but says that all connectors will use MeasurementConverter to deserialize measurement objects.
224 |
225 | bootstrap.servers=localhost:9092
226 | key.converter=org.apache.kafka.connect.storage.StringConverter
227 | value.converter=io.amient.kafka.metrics.MeasurementConverter
228 | ...
229 |
230 | The second configuration file is a sink connector that loads the measurements to InfluxDB, for example:
231 |
232 | name=metrics-influxdb-sink
233 | connector.class=io.amient.kafka.metrics.InfluxDbSinkConnector
234 | topics=metric
235 | ...
236 |
237 | The third configuration file is a sink connector that loads the measurements to hdfs, for example as parquet files:
238 |
239 | name=metrics-hdfs-sink
240 | topics=metrics
241 | connector.class=io.confluent.connect.hdfs.HdfsSinkConnector
242 | format.class=io.confluent.connect.hdfs.parquet.ParquetFormat
243 | partitioner.class=io.confluent.connect.hdfs.partitioner.TimeBasedPartitioner
244 | path.format='d'=YYYY'-'MM'-'dd/
245 | partition.duration.ms=86400000
246 | locale=en
247 | timezone=Etc/GMT+1
248 | ...
249 |
250 |
251 |
252 | ### Metrics Agent Usage
253 |
254 |
255 | The purpose of the agent is to move expensive metrics collection like JMX polling closer to the application and publish
256 | these into the kafka metrics topic. The JMX scanners can be configured in the same way as with InfluxDB Loader except
257 | the InfluxDB backend connection is replaced with kafka metrics producer which publishes the measurements into a kafka
258 | topic. It is also a Java application and the executable jar can be built with the following command:
259 |
260 | ./gradlew :metrics-agent:build
261 |
262 | To run the agent instance, a configuration file is required, which should contain the following sections:
263 | - [JMX Scanner Configuration](#configuration-scanner)
264 | - [Metrics Producer Configuration](#configuration-producer)
265 |
266 | ./metrics-agent/build/scripts/kafka-metrics-agent
267 |
268 |
269 |
270 | ### Topic Reporter Usage
271 |
272 |
273 | The Topic Reporter provides a different way of collecting metrics from Kafka Brokers, Producers, Consumers and Samza
274 | processors - each of these expose configuration options for plugging a reporter directly into their runtime and the
275 | class `io.amient.kafka.metrics.TopicReporter` can be used in either of them. It translates the metrics to kafka metrics
276 | measurements and publishes them into a topic.
277 |
278 | This reporter publishes all the metrics to configured, most often local kafka topic `metrics`. Due to different stage of
279 | maturity of various kafka components, watch out for subtle differences when adding TopicReporter class. To be able to
280 | use the reporter as plug-in for kafka brokers and tools you need to put the packaged jar in their classpath, which in
281 | kafka broker means putting it in the kafka /libs directory:
282 |
283 | ./gradlew :metrics-reporter:build
284 | cp stream-reporter/build/lib/stream-reporter-*.jar $KAFKA_HOME/libs/
285 |
286 | The reporter only requires one set of configuration properties:
287 | - [Metrics Producer Configuration](#configuration-producer)
288 |
289 |
290 |
291 |
292 | #### Usage in Kafka Broker, Kafka Prism, Kafka Producer (pre 0.8.2), Kafka Consumer (pre 0.9)
293 |
294 |
295 |
296 | add following properties to the configuration for the component
297 |
298 | kafka.metrics.reporters=io.amient.kafka.metrics.TopicReporter
299 | kafka.metrics.polling.interval.secs=10
300 | kafka.metrics.topic=_mterics
301 | #kafka.metrics.=
302 | #kafka.metrics....
303 |
304 |
305 |
306 | #### Usage in Kafka NEW Producer (0.8.2+) and Consumer (0.9+)
307 |
308 |
309 | metric.reporters=io.amient.kafka.metrics.TopicReporter
310 | kafka.metrics....
311 |
312 |
313 |
314 | #### Usage in any application using dropwizard metrics (formerly yammer metrics)
315 |
316 |
317 | Like any other yammer metrics reporter, given an instance (and configuration), once started, the reporter
318 | will produce kafka-metrics messages to a configured topic every given time interval. Scala-Maven Example:
319 |
320 | ``` pom.xml
321 | ...
322 |
323 | io.amient.kafka.metrics
324 | metrics-reporter
325 | ${kafka.version}
326 |
327 | ...
328 | ```
329 |
330 | ... Using builder for programmatic initialization
331 |
332 | val registry = MetricsRegistry.defaultRegistry()
333 | val reporter = TopicReporter.forRegistry(registry)
334 | .setTopic("metrics") //this is also default
335 | .setBootstrapServers("kafka1:9092,kafka2:9092")
336 | .setTag("host", "my-host-xyz")
337 | .setTag("app", "my-app-name")
338 | .build()
339 | reporter.start(10, TimeUnit.SECONDS);
340 |
341 | ... OR Using config properties:
342 |
343 | val registry = MetricsRegistry.defaultRegistry()
344 | val config = new java.util.Properties()
345 | val reporter = TopicReporter.forRegistry(registry).configure(config).build()
346 | reporter.start(10, TimeUnit.SECONDS);
347 |
348 |
349 |
350 | ### Usage in Samza (0.9+)
351 |
352 |
353 | The InfluxDB Loader and Metrics Connect use the same code which understands json messages that Samza generates
354 | using MetricsSnapshotSerdeFactory. So just a normal samza metrics configuration without additional code, for example:
355 |
356 | metrics.reporters=topic
357 | metrics.reporter.topic.class=org.apache.samza.metrics.reporter.MetricsSnapshotReporterFactory
358 | metrics.reporter.topic.stream=kafkametrics.metrics
359 | serializers.registry.metrics.class=org.apache.samza.serializers.MetricsSnapshotSerdeFactory
360 | systems.kafkametrics.streams.metrics.samza.msg.serde=metrics
361 | systems.kafkametrics.samza.factory=org.apache.samza.system.kafka.KafkaSystemFactory
362 | systems.kafkametrics.consumer.zookeeper.connect=<...>
363 | systems.kafkametrics.producer.bootstrap.servers=<...>
364 |
365 |
366 |
367 | ## Configuration
368 |
369 |
370 |
371 |
372 | ### InfluxDB Configuration
373 |
374 |
375 | The following configuration is required for modules that need to write to InfluxDB backend:
376 |
377 | parameter | default | description
378 | -------------------------------------------|------------------------|------------------------------------------------------------------------------
379 | **influxdb.database** | `metrics` | InfluxDB Database Name where to publish the measurements
380 | **influxdb.url** | `http://localhost:8086`| URL of the InfluxDB API Instance
381 | **influxdb.username** | `root` | Authentication username for API calls
382 | **influxdb.password** | `root` | Authentication passord for API calls
383 |
384 |
385 |
386 | ### JMX Scanner Configuration
387 |
388 |
389 | The following configuration options can be used with the **InfluxDB Loader** and **MetricsAgent**:
390 |
391 | parameter | default | description
392 | -------------------------------------------|------------------------|------------------------------------------------------------------------------
393 | jmx.{ID}.address | - | Address of the JMX Service Endpoint
394 | jmx.{ID}.query.scope | `*:*` | this will be used to filer object names in the JMX Server registry, i.e. `*:*` or `kafka.*:*` or `kafka.server:type=BrokerTopicMetrics,*`
395 | jmx.{ID}.query.interval.s | 10 | how frequently to query the JMX Service
396 | jmx.{ID}.tag.{TAG-1} | - | optinal tags which will be attached to each measurement
397 | jmx.{ID}.tag.{TAG-2} | - | ...
398 | jmx.{ID}.tag.{TAG-n} | - | ...
399 |
400 |
401 |
402 |
403 | ### Metrics Producer Configuration
404 |
405 |
406 | The following configuration options can be used with the TopicReporter and MetricsAgent:
407 |
408 | parameter | default | description
409 | -------------------------------------------|-------------------|------------------------------------------------------------------------------
410 | **kafka.metrics.topic** | `metrics` | Topic name where metrics are published
411 | **kafka.metrics.polling.interval** | `10s` | Poll and publish frequency of metrics, llowed interval values: 1s, 10s, 1m
412 | **kafka.metrics.bootstrap.servers** | *inferred* | Coma-separated list of kafka server addresses (host:port). When used in Brokers, `localhost` is default.
413 | *kafka.metrics.tag..* | - | Fixed name-value pairs that will be used as tags in the published measurement for this instance, .e.g `kafka.metrics.tag.host.my-host-01` or `kafka.metrics.tag.dc.uk-az1`
414 |
415 |
416 |
417 | ### Metrics Consumer Configuration
418 |
419 |
420 | The following configuration options can be used with the modules that use Kafka consumer to get measurements:
421 |
422 | parameter | default | description
423 | -------------------------------------------|------------------------|------------------------------------------------------------------------------
424 | consumer.topic | `metrics` | Topic to consumer (where measurements are published by Reporter)
425 | consumer.numThreads | `1` | Number of consumer threads
426 | consumer.zookeeper.connect | `localhost:2181` | As per [Kafka Consumer Configuration](http://kafka.apache.org/documentation.html#consumerconfigs)
427 | consumer.group.id | - | As per Any [Kafka Consumer Configuration](http://kafka.apache.org/documentation.html#consumerconfigs)
428 | consumer.... | - | Any other [Kafka Consumer Configuration](http://kafka.apache.org/documentation.html#consumerconfigs)
429 |
430 |
431 |
432 |
433 | ## Operations & Troubleshooting
434 |
435 |
436 |
437 | ### Inspecting the metrics topic
438 |
439 | Using kafka console consumer with a formatter for kafka-metrics:
440 |
441 | ./bin/kafka-console-consumer.sh --zookeeper localhost --topic metrics --formatter io.amient.kafka.metrics.MeasurementFormatter
442 |
443 |
444 |
445 | ## Development
446 |
447 |
448 | ### Issue tracking
449 |
450 | https://github.com/amient/kafka-metrics/issues
451 |
452 | ### Versioning
453 |
454 | **Kafka Metrics is closely related to Apache Kafka** and from this perspective it can be viewed as having 2 dimensions:
455 |
456 | - *general functionality* - concepts that are available regardless of Kafka version
457 | - *version-specific functionality* - implementation details that are specific/missing/added in concrete Kafka version
458 |
459 | We need this to be able to support variety of real-world setups which may use different Apache Kafka versions in their
460 | infrastructure. For this reason, **we maintain active branches for each version of Apache Kafka project** starting
461 | from version 0.8.2.1.
462 |
463 | When considering a new general feature, like for example having a first-class
464 | [collectd integration](https://github.com/amient/kafka-metrics/issues/4), it should be considered how this will work in
465 | different versions and then design the API appropriately such that it can be easily merged and ported in each active
466 | branch.
467 |
468 | Once designed, the general features should be implemented against the `master` branch which is linked to
469 | the latest official release of Apache Kafka and once this is fully working a pull request against the master can be made.
470 | As part of merging the pull request, the feature must be back-ported to all supported versions.
471 |
472 | In case of using a new features of Apache Kafka which are not available in the previous versions actively supported
473 | by this project, an attempt should be made to design the desired *general functionality* in such way that the older
474 | version can merge and emulate the missing feature internally. Good example for this is using Kafka Connect features
475 | in place of InfluxDB Loader that consumes measurement messages from Kafka topic and writes them to InfluxDb.
476 | The *general feature* here is to be able to publish measurements into InfluxDB from a Kafka topic. In 0.8.x versions
477 | we can use a custom Kafka Consumer (implemented in the core module as MetricsConsumer class) but in 0.9.x+ releases
478 | we can use a Connector implementation that can be used in a Kafka Connect context. There is a re-design ticket which
479 | addresses the point of having the internal API flexible enough to allow for these 2 different ways of implementing it:
480 | [issues/12](https://github.com/amient/kafka-metrics/issues/12)
481 |
482 | **Additional layer of complexity is different versions of InfluxDB.** To keep things simple we are not attempting to
483 | support multiple versions of InfluxDB protocol and use the latest available. It is possible to support different
484 | time-series backends but in the world of monitoring there are already a plenty of ways to integrate with InfluxDB so
485 | for now we keep this option closed unless this becomes an actual pain that cannot be solved otherwise.
486 |
487 | ### Contributing
488 |
489 | If you'd like to contribute, please open an issue to start a discussion about the idea or enter discussion of an
490 | existing one and we'll take it from there.
491 |
492 |
493 |
494 |
--------------------------------------------------------------------------------
/build.gradle:
--------------------------------------------------------------------------------
1 | ext {
2 | encoding = "UTF-8"
3 |
4 | javaVersion = '1.8'
5 | junitVersion = '4.12'
6 | kafkaVersion = '2.0.0'
7 | jacksonVersion = '2.9.5'
8 | avroVersion = '1.8.2'
9 | slf4jVersion = '1.7.21'
10 | //
11 | scalaVersion = '2.11.8'
12 | baseScalaVersion = scalaVersion.split("\\.")[0] + '.' + scalaVersion.split("\\.")[1]
13 | }
14 |
15 | project.version = ext.kafkaVersion
16 |
17 | task wrapper(type: Wrapper) {
18 | gradleVersion = '4.9'
19 | }
20 |
21 | allprojects {
22 |
23 | plugins.withType(JavaPlugin).whenPluginAdded {
24 |
25 | version = rootProject.version
26 | sourceCompatibility = rootProject.javaVersion
27 | libsDirName = 'lib'
28 |
29 | configurations {
30 | provided
31 | testCompile.extendsFrom provided
32 | }
33 |
34 | repositories {
35 |
36 | mavenCentral()
37 | }
38 |
39 | dependencies {
40 | compile group: 'org.slf4j', name:'slf4j-api', version: slf4jVersion
41 | compile group: 'org.slf4j', name:'slf4j-log4j12', version: slf4jVersion
42 | testCompile group: 'junit', name: 'junit', version: junitVersion
43 | }
44 |
45 | sourceSets {
46 | main {
47 | compileClasspath += configurations.provided
48 | }
49 | }
50 |
51 | // idea {
52 | // module {
53 | // scopes.PROVIDED.plus += [configurations.provided]
54 | // }
55 | // }
56 |
57 | }
58 | }
59 |
60 |
--------------------------------------------------------------------------------
/core/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'java'
2 |
3 | apply plugin: "maven"
4 | group = "io.amient.kafka.metrics"
5 | version = rootProject.version
6 |
7 | buildscript {
8 | repositories {
9 | maven {
10 | url "https://plugins.gradle.org/m2/"
11 | }
12 | }
13 | dependencies {
14 | classpath "com.commercehub.gradle.plugin:gradle-avro-plugin:0.9.0"
15 | }
16 | }
17 |
18 | apply plugin: "com.commercehub.gradle.plugin.avro"
19 |
20 | dependencies {
21 | provided group: 'org.apache.kafka', name: 'kafka_' + baseScalaVersion, version: rootProject.kafkaVersion
22 | provided group: 'org.apache.kafka', name: 'kafka-clients', version: rootProject.kafkaVersion
23 | compile group: 'org.apache.avro', name: 'avro', version: rootProject.avroVersion
24 | compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: rootProject.jacksonVersion
25 | }
26 |
--------------------------------------------------------------------------------
/core/src/main/avro/metric_v1.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "io.amient.kafka.metrics",
3 | "type": "record",
4 | "name": "MeasurementV1",
5 | "fields": [
6 | {
7 | "name": "timestamp",
8 | "type": "long"
9 | },
10 | {
11 | "name": "name",
12 | "type": "string"
13 | },
14 | {
15 | "name": "tags",
16 | "type": {"type" : "map", "values" : "string"}
17 | },
18 | {
19 | "name": "fields",
20 | "type": {"type" : "map", "values" : "double"}
21 | }
22 | ]
23 | }
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/AutoJsonDeserializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.codehaus.jackson.JsonNode;
23 | import org.codehaus.jackson.map.ObjectMapper;
24 | import org.codehaus.jackson.map.RuntimeJsonMappingException;
25 | import org.slf4j.Logger;
26 | import org.slf4j.LoggerFactory;
27 |
28 | import java.io.IOException;
29 | import java.util.*;
30 | import java.util.regex.Matcher;
31 | import java.util.regex.Pattern;
32 |
33 | public class AutoJsonDeserializer {
34 |
35 | final private ObjectMapper mapper = new ObjectMapper();
36 | final private SamzaJsonMetricsDecoder samzaDecoder = new SamzaJsonMetricsDecoder();
37 |
38 | public List fromBytes(byte[] bytes) {
39 | try {
40 | JsonNode node = mapper.readTree(bytes);
41 | if (node.has("header") && node.has("metrics") && node.get("header").has("samza-version")) {
42 | return samzaDecoder.fromJsonTree(node);
43 | } else {
44 | throw new RuntimeJsonMappingException("Unrecoginzed JSON metric format: " + node.asText());
45 | }
46 | } catch (IOException e) {
47 | throw new RuntimeException("Error deserializing json object", e);
48 | }
49 | }
50 |
51 | private static class SamzaJsonMetricsDecoder {
52 |
53 | final private MeasurementFormatter formatter = new MeasurementFormatter();
54 | private Logger log = LoggerFactory.getLogger(SamzaJsonMetricsDecoder.class);
55 |
56 | public List fromJsonTree(JsonNode node) {
57 | List result = new LinkedList();
58 | JsonNode header = node.get("header");
59 | String version = header.get("version").getTextValue();
60 | if (version.equals("0.0.1")) {
61 | Long timestamp = header.get("time").getLongValue();
62 | Map commonTags = new HashMap();
63 | Iterator> tagFields = header.getFields();
64 | while (tagFields.hasNext()) {
65 | Map.Entry tagField = tagFields.next();
66 | String tagKey = tagField.getKey();
67 | if (tagKey.equals("time")) continue;
68 | if (tagKey.equals("reset-time")) continue;
69 | if (tagKey.equals("version")) continue;
70 | commonTags.put(tagField.getKey(), tagField.getValue().getTextValue());
71 | }
72 |
73 | Iterator> metricFields = node.get("metrics").getFields();
74 | while (metricFields.hasNext()) {
75 | Map.Entry metricField = metricFields.next();
76 | String name = metricField.getKey();
77 |
78 | //group by identical tags
79 | Map, Map> points = new HashMap, Map>();
80 | Iterator> fieldValues = metricField.getValue().getFields();
81 | while (fieldValues.hasNext()) {
82 | Map.Entry field = fieldValues.next();
83 | Double value = formatter.anyValueToDouble(field.getValue().getNumberValue());
84 | Map tags = new HashMap(commonTags);
85 | if (value != null) {
86 | String fieldName = tagSamzaMetricField(name, field.getKey(), tags);
87 | if (fieldName != null) {
88 | Map fields = points.get(tags);
89 | if (fields == null) {
90 | fields = new HashMap();
91 | points.put(tags, fields);
92 | }
93 | fields.put(fieldName, value);
94 | }
95 | }
96 | }
97 |
98 | for(Map.Entry, Map> e: points.entrySet()) {
99 | MeasurementV1 measurement = new MeasurementV1();
100 | measurement.setName(name);
101 | measurement.setTimestamp(timestamp);
102 | measurement.setTags(e.getKey());
103 | measurement.setFields(e.getValue());
104 | result.add(measurement);
105 | }
106 | }
107 | } else {
108 | log.warn("Unsupported Samza Metrics JSON Format Version " + version);
109 | }
110 | return result;
111 | }
112 |
113 |
114 | private Pattern fieldSystemStreamPartition = Pattern.compile("^(.+)-SystemStreamPartition \\[([^,]+), ([^,]+), ([0-9]+)\\]$");
115 | private Pattern systemTopicPartitionField = Pattern.compile("^([^-]+)-([^-]+)-([0-9]+)-(.+)$");
116 | private Pattern systemHostPortField = Pattern.compile("^([^-]+)-(.+-[0-9]+)-(.+)$");
117 | private Pattern taskPartitionField = Pattern.compile("^(.+)-partition\\s([0-9]+)-(.+)$");
118 | private Pattern partitionField = Pattern.compile("^partition\\s([0-9]+)-(.+)$");
119 | private Pattern systemField = Pattern.compile("^([^-]+)-(.+)$");
120 |
121 | private String tagSamzaMetricField(String name, String field, Map tags) {
122 | if (name.startsWith("org.apache.samza.")) {
123 | Matcher m1 = fieldSystemStreamPartition.matcher(field);
124 | if (m1.find()) {
125 | //e.g. 'buffered-message-count-SystemStreamPartition [kafkaevents, datasync, 5]'
126 | tags.put("system", m1.group(2));
127 | tags.put("topic", m1.group(3));
128 | tags.put("partition", m1.group(4));
129 | return m1.group(1);
130 | }
131 | Matcher m2 = systemTopicPartitionField.matcher(field);
132 | if (m2.find()) {
133 | //e.g. 'kafkaevents-datasync-10-messages-behind-high-watermark=0.0'
134 | tags.put("system", m2.group(1));
135 | tags.put("topic", m2.group(2));
136 | tags.put("partition", m2.group(3));
137 | return m2.group(4);
138 | }
139 | Matcher m3 = systemHostPortField.matcher(field);
140 | if (m3.find()) {
141 | //e.g. 'kafkayarn-bl-message-s01.visualdna.com-9092-bytes-read'
142 | tags.put("system", m3.group(1));
143 | tags.put("broker", m3.group(2));
144 | return m3.group(3);
145 | }
146 | Matcher m4 = taskPartitionField.matcher(field);
147 | if (m4.find()) {
148 | //e.g. 'taskname-partition 4-sends'
149 | tags.put("task", m4.group(1));
150 | tags.put("partition", m4.group(2));
151 | return m4.group(3);
152 | }
153 | Matcher m5 = partitionField.matcher(field);
154 | if (m5.find()) {
155 | //e.g. 'partition 4-restore-time'
156 | tags.put("partition", m5.group(1));
157 | return m5.group(2);
158 | }
159 | Matcher m6 = systemField.matcher(field);
160 | if (m6.find()) {
161 | //e.g. 'buffered-message-count-SystemStreamPartition [kafkaevents, datasync, 5]'
162 | tags.put("system", m6.group(1));
163 | return m6.group(2);
164 | }
165 | }
166 | return field;
167 | }
168 | }
169 | }
170 |
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/InternalAvroSerde.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.apache.avro.io.*;
23 | import org.apache.avro.specific.SpecificDatumReader;
24 | import org.apache.avro.specific.SpecificDatumWriter;
25 |
26 | import java.io.ByteArrayOutputStream;
27 | import java.io.IOException;
28 | import java.nio.ByteBuffer;
29 |
30 | public class InternalAvroSerde {
31 |
32 | private final static byte MAGIC_BYTE = 0x1;
33 | private final static byte CURRENT_VERSION = 1;
34 |
35 | private enum SchemaVersions {
36 | V1(new SpecificDatumReader(MeasurementV1.getClassSchema()));
37 |
38 | public final SpecificDatumReader> reader;
39 |
40 | SchemaVersions(SpecificDatumReader> reader) {
41 | this.reader = reader;
42 | }
43 | }
44 |
45 | private final EncoderFactory encoderFactory = EncoderFactory.get();
46 | private final DecoderFactory decoderFactory = DecoderFactory.get();
47 |
48 | public byte[] toBytes(MeasurementV1 measurement) {
49 | try {
50 | ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
51 | byteStream.write(MAGIC_BYTE);
52 | byteStream.write(CURRENT_VERSION);
53 | BinaryEncoder encoder = encoderFactory.directBinaryEncoder(byteStream, null);
54 | DatumWriter writer = new SpecificDatumWriter(measurement.getSchema());
55 | writer.write(measurement, encoder);
56 | encoder.flush();
57 | byte[] result = byteStream.toByteArray();
58 | byteStream.close();
59 | return result;
60 | } catch (IOException e) {
61 | throw new RuntimeException("Error serializing Measurement object", e);
62 | }
63 | }
64 |
65 | public MeasurementV1 fromBytes(byte[] bytes) {
66 | if (bytes == null) {
67 | return null;
68 | }
69 | ByteBuffer buffer = ByteBuffer.wrap(bytes);
70 | byte magic = buffer.get();
71 | byte version = buffer.get();
72 | try {
73 | int length = buffer.limit() - 2;
74 | int start = buffer.position() + buffer.arrayOffset();
75 | DatumReader> reader = SchemaVersions.values()[version - 1].reader;
76 | Object object = reader.read(null, decoderFactory.binaryDecoder(buffer.array(), start, length, null));
77 | if (object instanceof MeasurementV1) {
78 | return (MeasurementV1) object;
79 | } else {
80 | throw new IllegalArgumentException("Unsupported object type " + object.getClass());
81 | }
82 | } catch (IOException e) {
83 | throw new IllegalArgumentException("Error deserializing Measurement message version " + version, e);
84 | }
85 |
86 | }
87 |
88 |
89 | }
90 |
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/JMXScanner.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.slf4j.Logger;
23 | import org.slf4j.LoggerFactory;
24 |
25 | import javax.management.MalformedObjectNameException;
26 | import java.io.IOException;
27 | import java.util.Enumeration;
28 | import java.util.HashMap;
29 | import java.util.Map;
30 | import java.util.Properties;
31 | import java.util.concurrent.Executors;
32 | import java.util.concurrent.ScheduledExecutorService;
33 | import java.util.concurrent.TimeUnit;
34 |
35 | import io.amient.kafka.metrics.JMXScannerTask.JMXScannerConfig;
36 |
37 | public class JMXScanner {
38 |
39 | static private final Logger log = LoggerFactory.getLogger(JMXScanner.class);
40 |
41 | final private ScheduledExecutorService jmxScanExecutor;
42 |
43 | public JMXScanner(Properties props, MeasurementPublisher publisher)
44 | throws IOException, MalformedObjectNameException, InterruptedException {
45 | if (publisher == null) log.warn("ProducerPublisher not configured, will only scan..");
46 | Map jmxConfigs = new HashMap();
47 | for (Enumeration e = props.keys(); e.hasMoreElements(); ) {
48 | String propKey = (String) e.nextElement();
49 | String propVal = props.get(propKey).toString();
50 | if (propKey.startsWith("jmx.")) {
51 | propKey = propKey.substring(4);
52 | int idLen = propKey.indexOf('.') + 1;
53 | String id = propKey.substring(0, idLen - 1);
54 | if (!jmxConfigs.containsKey(id)) jmxConfigs.put(id, new JMXScannerConfig());
55 | JMXScannerConfig jmxConfig = jmxConfigs.get(id);
56 | propKey = propKey.substring(idLen);
57 | log.info(propKey + "=" + propVal);
58 | if (propKey.startsWith("tag.")) {
59 | propKey = propKey.substring(4);
60 | jmxConfig.setTag(propKey, propVal);
61 | } else if (propKey.equals("address")) {
62 | jmxConfig.setAddress(propVal);
63 | } else if (propKey.equals("query.scope")) {
64 | jmxConfig.setQueryScope(propVal);
65 | } else if (propKey.equals("query.interval.s")) {
66 | jmxConfig.setQueryInterval(Long.parseLong(propVal));
67 | }
68 | }
69 | }
70 |
71 | jmxScanExecutor = Executors.newScheduledThreadPool(jmxConfigs.size());
72 | for (JMXScannerConfig jmxConfig : jmxConfigs.values()) {
73 | log.info("Starting JMXScannerTask for " + jmxConfig.getAddress()
74 | + " every " + jmxConfig.getQueryIntervalSeconds() + " seconds");
75 | JMXScannerTask jmxScanner = new JMXScannerTask(jmxConfig, publisher);
76 | jmxScanExecutor.scheduleAtFixedRate(jmxScanner, 0, jmxConfig.getQueryIntervalSeconds(), TimeUnit.SECONDS);
77 | }
78 | }
79 |
80 | public Boolean isTerminated() {
81 | return jmxScanExecutor.isTerminated();
82 | }
83 |
84 |
85 | }
86 |
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/JMXScannerTask.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.slf4j.Logger;
23 | import org.slf4j.LoggerFactory;
24 |
25 | import javax.management.*;
26 | import javax.management.remote.JMXConnector;
27 | import javax.management.remote.JMXConnectorFactory;
28 | import javax.management.remote.JMXServiceURL;
29 | import java.io.IOException;
30 | import java.util.HashMap;
31 | import java.util.LinkedHashMap;
32 | import java.util.Map;
33 | import java.util.Set;
34 |
35 | public class JMXScannerTask implements Runnable {
36 |
37 | static private final Logger log = LoggerFactory.getLogger(JMXScannerTask.class);
38 |
39 | private final Map tags;
40 | private final MeasurementPublisher publisher;
41 | private final MeasurementFormatter formatter;
42 | private final ObjectName pattern;
43 | private final String address;
44 | private JMXConnector jmxConnector = null;
45 | private MBeanServerConnection conn = null;
46 |
47 | public static class JMXScannerConfig {
48 |
49 | private final Map tags = new LinkedHashMap();
50 | private String address;
51 | private String queryScope = "*:*";
52 | private long queryIntervalSeconds = 10;
53 |
54 | public void setTag(String propKey, String propVal) {
55 | this.tags.put(propKey, propVal);
56 | }
57 |
58 | public void setAddress(String address) {
59 | this.address = address;
60 | }
61 |
62 | public void setQueryScope(String query) {
63 | this.queryScope = query;
64 | }
65 |
66 | public String getAddress() {
67 | return address;
68 | }
69 |
70 | public Map getTags() {
71 | return tags;
72 | }
73 |
74 | public String getQueryScope() {
75 | return queryScope;
76 | }
77 |
78 | public void setQueryInterval(long intervalSeconds) {
79 | this.queryIntervalSeconds = intervalSeconds;
80 | }
81 |
82 | public long getQueryIntervalSeconds() {
83 | return queryIntervalSeconds;
84 | }
85 |
86 | }
87 |
88 | public JMXScannerTask(JMXScannerConfig config, MeasurementPublisher publisher) throws IOException, MalformedObjectNameException {
89 | this.pattern = new ObjectName(config.getQueryScope());
90 | this.address = "service:jmx:rmi:///jndi/rmi://" + config.getAddress() + "/jmxrmi";
91 | this.tags = config.getTags();
92 | this.publisher = publisher;
93 | this.formatter = new MeasurementFormatter();
94 | log.info("connection " + address + ", scope " + config.queryScope);
95 | }
96 |
97 | @Override
98 | public void run() {
99 | try {
100 | if (conn == null) {
101 | try {
102 | JMXServiceURL url = new JMXServiceURL(address);
103 | this.jmxConnector = JMXConnectorFactory.connect(url);
104 | this.conn = jmxConnector.getMBeanServerConnection();
105 | } catch (IOException e) {
106 | log.warn("Could not connect to " + address);
107 | }
108 | }
109 | if (conn != null) {
110 | final long timestamp = System.currentTimeMillis();
111 | Set beans = conn.queryMBeans(pattern, null);
112 | for (ObjectInstance i : beans) {
113 | if (log.isDebugEnabled()) {
114 | log.debug(i.getObjectName().toString());
115 | }
116 | MeasurementV1[] measurements = extractMeasurements(i, timestamp);
117 | for (MeasurementV1 measurement : measurements) {
118 | if (publisher != null && measurement.getFields().size() > 0) {
119 | publisher.publish(measurement);
120 | }
121 | }
122 |
123 | }
124 | }
125 | } catch (IntrospectionException e) {
126 | log.warn("could not retrieve some mbeans", e);
127 | } catch (IOException e) {
128 | this.conn = null;
129 | } catch (Exception e) {
130 | log.warn("could not retrieve some mbeans", e);
131 | }
132 | }
133 |
134 | private MeasurementV1[] extractMeasurements(ObjectInstance i, Long timestamp)
135 | throws IntrospectionException, ReflectionException, InstanceNotFoundException, IOException, AttributeNotFoundException, MBeanException {
136 | ObjectName name = i.getObjectName();
137 |
138 | if (name.getKeyProperty("name") == null) {
139 | return extractAttributesAsMeasurements(i, timestamp);
140 | }
141 |
142 | MeasurementV1 measurement = new MeasurementV1();
143 | measurement.setTimestamp(timestamp);
144 | measurement.setName(name.getKeyProperty("name"));
145 | measurement.setTags(new LinkedHashMap(tags));
146 | measurement.getTags().put("group", name.getDomain());
147 | for (Map.Entry k : name.getKeyPropertyList().entrySet()) {
148 | if (!k.getKey().equals("name")) {
149 | measurement.getTags().put(k.getKey(), k.getValue());
150 | }
151 | }
152 |
153 | HashMap fields = new HashMap();
154 | MBeanInfo info = conn.getMBeanInfo(name);
155 | for (MBeanAttributeInfo attr : info.getAttributes()) {
156 | try {
157 | Object anyVal = conn.getAttribute(name, attr.getName());
158 | try {
159 | Double value = formatter.anyValueToDouble(anyVal);
160 | if (value != null)
161 | fields.put(attr.getName(), value);
162 | } catch (RuntimeMBeanException e) {
163 | log.warn("could not cast value " + anyVal + " of attribute " + attr + " of " + name +" into double value ", e);
164 | }
165 | } catch (RuntimeMBeanException e) {
166 | String msg = "failed to get attribute name=" + attr.getName() + " type=" + attr.getType() + " of " + name;
167 | if (log.isDebugEnabled()) {
168 | log.debug(msg, e.getCause());
169 | } else {
170 | log.warn(msg + " due to " + e.getCause());
171 | }
172 | }
173 | }
174 |
175 | measurement.setFields(new HashMap(fields));
176 | return new MeasurementV1[]{measurement};
177 |
178 | }
179 |
180 | private MeasurementV1[] extractAttributesAsMeasurements(ObjectInstance i, Long timestamp)
181 | throws IntrospectionException, ReflectionException, InstanceNotFoundException, IOException, AttributeNotFoundException, MBeanException {
182 | ObjectName name = i.getObjectName();
183 | MBeanInfo info = conn.getMBeanInfo(name);
184 | MBeanAttributeInfo[] attributes = info.getAttributes();
185 | MeasurementV1[] result = new MeasurementV1[attributes.length];
186 | int k = 0;
187 | for (MBeanAttributeInfo attr : info.getAttributes()) {
188 | MeasurementV1 measurement = new MeasurementV1();
189 | measurement.setTimestamp(timestamp);
190 | measurement.setName(attr.getName());
191 | measurement.setTags(new LinkedHashMap(tags));
192 | measurement.getTags().put("group", name.getDomain());
193 | for (Map.Entry tag: name.getKeyPropertyList().entrySet()) {
194 | measurement.getTags().put(tag.getKey(), tag.getValue());
195 | }
196 |
197 | Double value = formatter.anyValueToDouble(conn.getAttribute(name, attr.getName()));
198 | HashMap fields = new HashMap();
199 | if (value != null)
200 | fields.put("Value", value);
201 | measurement.setFields(new HashMap(fields));
202 | result[k++] = measurement;
203 | }
204 |
205 | return result;
206 | }
207 | }
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/MeasurementDeserializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.apache.kafka.common.errors.SerializationException;
23 | import org.apache.kafka.common.serialization.Deserializer;
24 |
25 | import java.util.Arrays;
26 | import java.util.List;
27 | import java.util.Map;
28 |
29 | public class MeasurementDeserializer implements Deserializer> {
30 |
31 | private InternalAvroSerde internalAvro = new InternalAvroSerde();
32 | private AutoJsonDeserializer autoJsonDeserializer = new AutoJsonDeserializer();
33 |
34 |
35 | @Override
36 | public void configure(Map configs, boolean isKey) {
37 |
38 | }
39 |
40 | @Override
41 | public List deserialize(String topic, byte[] bytes) {
42 | switch(bytes[0]) {
43 | case 0x0: throw new SerializationException("Schema Registry doesn't support maps and arrays yet.");
44 | case 0x1: return Arrays.asList(internalAvro.fromBytes(bytes));
45 | case '{': return autoJsonDeserializer.fromBytes(bytes);
46 | default: throw new SerializationException("Serialization MAGIC_BYTE not recognized: " + bytes[0]);
47 | }
48 | }
49 |
50 | @Override
51 | public void close() {
52 |
53 | }
54 | }
55 |
56 |
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/MeasurementFormatter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import kafka.common.MessageFormatter;
23 | import org.apache.kafka.clients.consumer.ConsumerRecord;
24 | import org.apache.kafka.common.errors.SerializationException;
25 |
26 | import java.io.ByteArrayOutputStream;
27 | import java.io.PrintStream;
28 | import java.text.DateFormat;
29 | import java.text.SimpleDateFormat;
30 | import java.util.Date;
31 | import java.util.Properties;
32 |
33 | public class MeasurementFormatter implements MessageFormatter {
34 |
35 | private static final DateFormat date = new SimpleDateFormat("dd/MM/yyyy G 'at' HH:mm:ss z");
36 |
37 | private MeasurementDeserializer decoder = null;
38 |
39 | @Override
40 | public void init(Properties props) {
41 | decoder = new MeasurementDeserializer();
42 | }
43 |
44 | @Override
45 | public void writeTo(ConsumerRecord consumerRecord, PrintStream output) {
46 | try {
47 | for(MeasurementV1 measurement: decoder.deserialize(null, consumerRecord.value())) {
48 | writeTo(measurement, output);
49 | }
50 | } catch (SerializationException e) {
51 | output.append(e.getMessage());
52 | output.append("\n\n");
53 | }
54 | }
55 |
56 | public void writeTo(MeasurementV1 measurement, PrintStream output) {
57 | output.append(measurement.getName());
58 | for (java.util.Map.Entry tag : measurement.getTags().entrySet()) {
59 | output.append(",");
60 | output.append(tag.getKey());
61 | output.append("=");
62 | output.append(tag.getValue());
63 | }
64 | output.append(" [" + date.format(new Date(measurement.getTimestamp())) + "] ");
65 | output.append("\n");
66 | for (java.util.Map.Entry field : measurement.getFields().entrySet()) {
67 | output.append(field.getKey());
68 | output.append("=");
69 | output.append(field.getValue().toString());
70 | output.append("\t");
71 | }
72 | output.append("\n\n");
73 | }
74 |
75 | @Override
76 | public void close() {
77 |
78 | }
79 |
80 | public Double anyValueToDouble(Object anyValue) {
81 | if (anyValue instanceof Double) {
82 | Double value = ((Double) anyValue);
83 | if (!value.isNaN() && !value.isInfinite()) {
84 | return value;
85 | }
86 | } else if ((anyValue instanceof Float)) {
87 | Float value = ((Float) anyValue);
88 | if (!value.isNaN() && !value.isInfinite()) {
89 | return ((Float) anyValue).doubleValue();
90 | }
91 | } else if ((anyValue instanceof Long)
92 | || (anyValue instanceof Integer)
93 | || (anyValue instanceof Short)
94 | || (anyValue instanceof Byte)) {
95 | return Double.valueOf(anyValue.toString());
96 | }
97 | return null;
98 |
99 | }
100 |
101 | public String toString(MeasurementV1 measurement) {
102 | ByteArrayOutputStream os = new ByteArrayOutputStream();
103 | PrintStream ps = new PrintStream(os);
104 | writeTo(measurement, ps);
105 | return os.toString();
106 | }
107 | }
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/MeasurementPublisher.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | public interface MeasurementPublisher {
23 |
24 | void publish(MeasurementV1 m);
25 |
26 | void close();
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/MeasurementSerializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.apache.kafka.common.serialization.Serializer;
23 |
24 | import java.util.Map;
25 |
26 | public class MeasurementSerializer implements Serializer {
27 |
28 | private InternalAvroSerde internalAvro = new InternalAvroSerde();
29 |
30 | public void configure(Map map, boolean b) {
31 |
32 | }
33 |
34 | public byte[] serialize(String s, MeasurementV1 measurement) {
35 | return internalAvro.toBytes(measurement);
36 | }
37 |
38 | public void close() {
39 |
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/core/src/main/java/io/amient/kafka/metrics/ProducerPublisher.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.apache.kafka.clients.producer.KafkaProducer;
23 | import org.apache.kafka.clients.producer.ProducerRecord;
24 | import org.apache.kafka.common.errors.SerializationException;
25 | import org.apache.kafka.common.serialization.Serializer;
26 | import org.slf4j.Logger;
27 | import org.slf4j.LoggerFactory;
28 |
29 | import java.io.UnsupportedEncodingException;
30 | import java.nio.ByteBuffer;
31 | import java.util.Map;
32 | import java.util.Properties;
33 |
34 | public class ProducerPublisher implements MeasurementPublisher {
35 |
36 | static private final Logger log = LoggerFactory.getLogger(ProducerPublisher.class);
37 | public static final String CONFIG_BOOTSTRAP_SERVERS = "kafka.metrics.bootstrap.servers";
38 | public static final String CONFIG_METRICS_TOPIC = "kafka.metrics.topic";
39 |
40 | private static final int DEFAULT_BACK_OFF_MS = 10000;
41 | static final String DEFAULT_CLIENT_ID = "amient-kafka-metrics";
42 |
43 | private final KafkaProducer producer;
44 | private final String topic;
45 |
46 | volatile private long failureTimestamp = 0;
47 |
48 | public ProducerPublisher(Properties props) {
49 | this(
50 | props.getProperty(ProducerPublisher.CONFIG_BOOTSTRAP_SERVERS, "localhost:9092"),
51 | props.getProperty(ProducerPublisher.CONFIG_METRICS_TOPIC, "_metrics")
52 | );
53 | }
54 |
55 | public ProducerPublisher(final String kafkaBootstrapServers, final String topic) {
56 | this.topic = topic;
57 | if (kafkaBootstrapServers == null) throw new IllegalArgumentException("Missing configuration: " + CONFIG_BOOTSTRAP_SERVERS);
58 | if (topic == null) throw new IllegalArgumentException("Missing configuration: " + CONFIG_METRICS_TOPIC);
59 | this.producer = new KafkaProducer(new Properties() {{
60 | put("bootstrap.servers", kafkaBootstrapServers);
61 | put("compression.type", "gzip");
62 | put("batch.size", "250");
63 | put("linger.ms", "1000");
64 | put("key.serializer", IntegerSerializer.class);
65 | put("value.serializer", io.amient.kafka.metrics.MeasurementSerializer.class);
66 | put("client.id", DEFAULT_CLIENT_ID);
67 | }});
68 | }
69 |
70 | public void publish(MeasurementV1 m) {
71 | Long time = m.getTimestamp();
72 | if (failureTimestamp > 0) {
73 | if (failureTimestamp + DEFAULT_BACK_OFF_MS < time) return; else failureTimestamp = 0;
74 | }
75 | try {
76 | tryPublish(m);
77 | } catch (Throwable e) {
78 | log.warn("Failed to publish measurement to kafka topic, will retry...", e);
79 | failureTimestamp = time;
80 | }
81 | }
82 |
83 | public void tryPublish(MeasurementV1 m) {
84 | producer.send(new ProducerRecord(topic, m.getName().hashCode() + m.getTags().hashCode(), m));
85 | }
86 |
87 | @Override
88 | public void close() {
89 | if (producer != null) {
90 | producer.close();
91 | }
92 | }
93 |
94 | void addProducerShutdownHook(){
95 | Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
96 |
97 | @Override
98 | public void run() {
99 | close();
100 | }
101 | }));
102 | }
103 |
104 | public static class IntegerSerializer implements Serializer {
105 | @Override
106 | public void configure(Map configs, boolean isKey) {}
107 |
108 | @Override
109 | public byte[] serialize(String topic, Integer data) {
110 | if (data == null)
111 | return null;
112 | else {
113 | ByteBuffer result = ByteBuffer.allocate(4);
114 | result.putInt(data);
115 | return result.array();
116 | }
117 | }
118 |
119 | @Override
120 | public void close() {}
121 | }
122 | }
123 |
--------------------------------------------------------------------------------
/core/src/test/java/JmxScannerMain.java:
--------------------------------------------------------------------------------
1 | import io.amient.kafka.metrics.JMXScanner;
2 | import io.amient.kafka.metrics.MeasurementPublisher;
3 | import io.amient.kafka.metrics.MeasurementV1;
4 | import org.apache.kafka.clients.consumer.internals.ConsumerMetrics;
5 |
6 | import javax.management.MalformedObjectNameException;
7 | import java.io.FileInputStream;
8 | import java.io.IOException;
9 |
10 | public class JmxScannerMain {
11 | public static void main(String[] args) {
12 | try {
13 | java.util.Properties props = new java.util.Properties();
14 | if (args.length == 0) {
15 | props.load(System.in);
16 | } else {
17 | props.load(new FileInputStream(args[0]));
18 | }
19 | props.list(System.out);
20 | MeasurementPublisher publisher = new MeasurementPublisher() {
21 |
22 | @Override
23 | public void publish(MeasurementV1 m) {
24 | System.out.println(m);
25 | }
26 |
27 | @Override
28 | public void close() {
29 |
30 | }
31 | };
32 | JMXScanner jmxScannerInstance = new JMXScanner(props, publisher);
33 | ConsumerMetrics consumer = null;
34 | while (!jmxScannerInstance.isTerminated()) {
35 | Thread.sleep(5000);
36 | }
37 | } catch (IOException e) {
38 | e.printStackTrace();
39 | System.exit(1);
40 | } catch (InterruptedException e) {
41 | e.printStackTrace();
42 | } catch (MalformedObjectNameException e) {
43 | e.printStackTrace();
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/core/src/test/resources/test.properties:
--------------------------------------------------------------------------------
1 | jmx.1.address=localhost:8881
--------------------------------------------------------------------------------
/discovery/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'java'
2 | apply plugin: 'application'
3 | mainClassName = 'io.amient.kafka.metrics.DiscoveryTool'
4 |
5 | dependencies {
6 | compile group: 'com.101tec', name: 'zkclient', version:'0.9'
7 | compile group: 'com.fasterxml.jackson.core', name:'jackson-databind', version: '2.7.3'
8 | compile group: 'net.sf.jopt-simple', name: 'jopt-simple', version: '5.0.1'
9 | }
10 |
11 | jar {
12 | manifest {
13 | attributes 'Main-Class': mainClassName
14 | }
15 |
16 | from (configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } ) {
17 | exclude "META-INF/*.SF"
18 | exclude "META-INF/LICENSE*"
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/discovery/src/main/java/io/amient/kafka/metrics/Broker.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | public class Broker {
23 | public final String host;
24 | public final int jmxPort;
25 | public final String id;
26 | public final int port;
27 |
28 | public Broker(String id, String host, int port, int jmxPort) {
29 | this.id = id;
30 | this.host = host;
31 | this.port = port;
32 | this.jmxPort = jmxPort;
33 | }
34 |
35 | public String hostPort() {
36 | return host + ":" + port;
37 | }
38 | }
39 |
40 |
--------------------------------------------------------------------------------
/discovery/src/main/java/io/amient/kafka/metrics/Dashboard.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import com.fasterxml.jackson.databind.ObjectMapper;
23 | import com.fasterxml.jackson.databind.SerializationFeature;
24 | import com.fasterxml.jackson.databind.node.ArrayNode;
25 | import com.fasterxml.jackson.databind.node.ObjectNode;
26 | import joptsimple.internal.Strings;
27 |
28 | import java.io.FileOutputStream;
29 | import java.io.IOException;
30 |
31 | public class Dashboard {
32 | private final ObjectMapper mapper = new ObjectMapper();
33 | private final ObjectNode root;
34 | private final ArrayNode rows;
35 | private final String filename;
36 | private final String dataSource;
37 | private final ArrayNode templating;
38 | private int numPanels = 0;
39 |
40 | public Dashboard(String title, String dataSource, String filename) {
41 | this.dataSource = dataSource;
42 | this.filename = filename;
43 | root = mapper.createObjectNode();
44 | root.put("schemaVersion", 7);
45 | root.put("id", (String) null);
46 | root.put("version", 0);
47 | root.put("title", title);
48 | root.put("originalTitle", title);
49 | root.put("style", "dark");
50 | root.put("timezone", "browser");
51 | root.put("refresh", "10s");
52 | root.set("time", mapper.createObjectNode().put("from", "now-30m").put("to", "now"));
53 | root.put("editable", true);
54 | root.put("hideControls", false);
55 | root.put("sharedCrosshair", false);
56 | root.set("links", mapper.createArrayNode());
57 | root.set("tags", mapper.createArrayNode());
58 | templating = mapper.createArrayNode();
59 | root.set("templating", mapper.createObjectNode().set("list", templating));
60 | root.set("annotations", mapper.createObjectNode().set("list", mapper.createArrayNode()));
61 | rows = mapper.createArrayNode();
62 | root.set("rows", rows);
63 | }
64 |
65 | public void save() {
66 | mapper.enable(SerializationFeature.INDENT_OUTPUT);
67 | try {
68 | FileOutputStream out = new FileOutputStream(filename);
69 | try {
70 | mapper.writeValue(out, root);
71 | } finally {
72 | out.close();
73 | }
74 | } catch (IOException e) {
75 | e.printStackTrace();
76 | }
77 | }
78 |
79 | public ArrayNode newRow(String rowTitle, int heightPx, boolean expand) {
80 | ObjectNode row = rows.addObject();
81 | row.put("title", rowTitle);
82 | row.put("showTitle", rowTitle != null);
83 | row.put("height", heightPx + "px");
84 | row.put("editable", true);
85 | row.put("collapse", !expand);
86 | ArrayNode panels = mapper.createArrayNode();
87 | row.set("panels", panels);
88 | return panels;
89 | }
90 |
91 | public ObjectNode newGraph(ArrayNode rowPanels, String title, int span, boolean showLegend) {
92 | ObjectNode graph = newPanel(rowPanels, title, span, "graph");
93 | //
94 | graph.put("nullPointMode", "connected");
95 | graph.put("x-axis", true);
96 | graph.put("y-axis", true);
97 | graph.set("y_formats", mapper.createArrayNode().add("short").add("short"));
98 | graph.put("lines", true);
99 | graph.put("linewidth", 2);
100 | graph.put("steppedLine", false);
101 | graph.put("fill", 1);
102 | graph.put("points", false);
103 | graph.put("pointradius", 2);
104 | graph.put("bars", false);
105 | graph.put("percentage", false);
106 | graph.put("stack", false);
107 | //
108 | graph.set("tooltip", mapper.createObjectNode()
109 | .put("value_type", "cumulative")
110 | .put("shared", true));
111 | //
112 | graph.set("seriesOverrides", mapper.createArrayNode());
113 | graph.set("aliasColors", mapper.createObjectNode());
114 | graph.set("legend", mapper.createObjectNode()
115 | .put("show", showLegend)
116 | .put("values", false)
117 | .put("min", false)
118 | .put("max", false)
119 | .put("current", false)
120 | .put("total", false)
121 | .put("avg", false));
122 | //
123 | graph.set("grid", mapper.createObjectNode()
124 | .put("leftLogBase", 1)
125 | .put("leftMax", (Integer) null)
126 | .put("rightMax", (Integer) null)
127 | .put("leftMin", (Integer) null)
128 | .put("rightMin", (Integer) null)
129 | .put("rightLogBase", (Integer) 1)
130 | .put("threshold1", (Integer) null)
131 | .put("threshold1Color", "rgba(216, 200, 27, 0.27)")
132 | .put("threshold2", (Integer) null)
133 | .put("threshold2Color", "rgba(234, 112, 112, 0.22)"));
134 |
135 | return graph;
136 | }
137 |
138 | public ObjectNode newTable(ArrayNode rowPanels, String title, int span, String valueName, String alias, String query) {
139 | ObjectNode table = newPanel(rowPanels, title, span, "table");
140 | table.put("transform", "timeseries_aggregations");
141 | newTarget(table, alias, query);
142 | //
143 | ArrayNode columns = mapper.createArrayNode();
144 | columns.addObject().put("value", valueName).put("text", valueName);
145 | table.set("columns", columns);
146 | ArrayNode styles = mapper.createArrayNode();
147 | styles.addObject()
148 | .put("value", valueName)
149 | .put("type", "number")
150 | .put("pattern", "/.*/")
151 | .put("decimals", 0)
152 | //.put("colorMode", null)//
153 | .put("unit", "short");
154 | table.set("styles", styles);
155 | //
156 | table.put("showHeader", true);
157 | table.put("scroll", true);
158 | table.put("fontSize", "100%");
159 | table.put("pageSize", (Integer) null);
160 | table.set("sort", mapper.createObjectNode().put("col", (String) null).put("desc", false));
161 | return table;
162 | }
163 |
164 | public ObjectNode newStat(ArrayNode rowPanels, String title, int span, String query) {
165 | ObjectNode stat = newPanel(rowPanels, title, span, "singlestat");
166 | stat.put("valueName", "current");
167 | stat.put("decimals", 0);
168 | stat.put("maxDataPoints", 100);
169 | stat.put("prefix", "");
170 | stat.put("postfix", "");
171 | stat.put("nullText", (String) null);
172 | stat.put("prefixFontSize", "50%");
173 | stat.put("valueFontSize", "80%");
174 | stat.put("postfixFontSize", "50%");
175 | stat.put("format", "none");
176 | stat.put("nullPointMode", "connected");
177 | stat.set("sparkline", mapper.createObjectNode()
178 | .put("show", false)
179 | .put("full", false)
180 | );
181 | newTarget(stat, "", query);
182 | return stat;
183 | }
184 |
185 | public ObjectNode newVariable(String name, boolean includeAll, String... options) {
186 | ObjectNode variable = templating.addObject()
187 | .put("type", "custom")
188 | .put("name", name)
189 | .put("label", name)
190 | .put("includeAll", includeAll)
191 | .put("multi", false)
192 | .put("query", Strings.join(options, ","))
193 | .put("datasource", (String) null)
194 | .put("refresh", 0)
195 | .put("hide", 0);
196 | variable.set("current", mapper.createObjectNode()
197 | .put("text", "All")
198 | .put("value", "$__all")
199 | .set("tags", mapper.createArrayNode()));
200 | ArrayNode optionsArray = mapper.createArrayNode();
201 | variable.set("options", optionsArray);
202 | if (includeAll) {
203 | variable.put("allValue", ".+");
204 | optionsArray.addObject().put("text", "All").put("value", "$__all").put("selected", true);
205 | }
206 | for(String option: options) {
207 | optionsArray.addObject().put("text", option).put("value", option).put("selected", false);
208 | }
209 | return variable;
210 | }
211 |
212 | public ObjectNode newTarget(ObjectNode panel, String aliasPattern, String rawQuery) {
213 | ArrayNode targets = ((ArrayNode) panel.get("targets"));
214 | ObjectNode target = targets.addObject();
215 | target.put("refId", Character.toString((char) (64 + targets.size())));
216 | target.put("query", rawQuery);
217 | target.put("alias", aliasPattern);
218 | target.put("rawQuery", true);
219 | return target;
220 | }
221 |
222 | private ObjectNode newPanel(ArrayNode rowPanels, String title, int span, String type) {
223 | ObjectNode panel = rowPanels.addObject();
224 | panel.put("title", title);
225 | panel.put("span", span);
226 | panel.put("id", ++numPanels);
227 | panel.put("datasource", dataSource);
228 | panel.put("type", type);
229 | panel.put("renderer", "flot");
230 | //
231 | panel.put("timeFrom", (String) null);
232 | panel.put("timeShift", (String) null);
233 |
234 | //
235 | panel.put("editable", true);
236 | panel.put("error", false);
237 | panel.put("isNew", true);
238 | //
239 | panel.set("targets", mapper.createArrayNode());
240 | return panel;
241 | }
242 |
243 |
244 | public ObjectNode newObject() {
245 | return mapper.createObjectNode();
246 | }
247 |
248 | public ArrayNode newArray(String... values) {
249 | ArrayNode node = mapper.createArrayNode();
250 | for (String v : values) node.add(v);
251 | return node;
252 | }
253 |
254 | public ObjectNode get(ObjectNode node, String fieldName) {
255 | return (ObjectNode) node.get(fieldName);
256 | }
257 | }
258 |
--------------------------------------------------------------------------------
/discovery/src/main/java/io/amient/kafka/metrics/DiscoveryTool.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import com.fasterxml.jackson.databind.JsonNode;
23 | import com.fasterxml.jackson.databind.ObjectMapper;
24 | import com.fasterxml.jackson.databind.node.ArrayNode;
25 | import com.fasterxml.jackson.databind.node.ObjectNode;
26 | import joptsimple.OptionParser;
27 | import joptsimple.OptionSet;
28 | import joptsimple.OptionSpec;
29 | import org.I0Itec.zkclient.ZkClient;
30 | import org.I0Itec.zkclient.exception.ZkMarshallingError;
31 | import org.I0Itec.zkclient.serialize.ZkSerializer;
32 |
33 | import java.io.Closeable;
34 | import java.io.IOException;
35 | import java.net.URL;
36 | import java.util.*;
37 |
38 | public class DiscoveryTool extends ZkClient implements Closeable {
39 |
40 | private static final String DEFAULT_DATASOURCE = "Kafka Metrics InfluxDB";
41 | private static final String DEFAULT_DATABASE = "metrics";
42 |
43 | public static void main(String[] args) throws IOException {
44 |
45 | OptionParser parser = new OptionParser();
46 |
47 | parser.accepts("help", "Print usage help");
48 | OptionSpec zookeeper = parser.accepts("zookeeper", "Address of the seed zookeeper server")
49 | .withRequiredArg().required();
50 | OptionSpec dashboard = parser
51 | .accepts("dashboard", "Grafana dashboard name to be used in all generated configs")
52 | .withRequiredArg().required();
53 | OptionSpec dashboardPath = parser
54 | .accepts("dashboard-path", "Grafana location, i.e. `./instance/.data/grafana/dashboards`")
55 | .withRequiredArg();
56 | OptionSpec topic = parser.accepts("topic", "Name of the metrics topic to consume measurements from")
57 | .withRequiredArg();
58 | OptionSpec influxdb = parser.accepts("influxdb", "InfluxDB connect URL (including user and password)")
59 | .withRequiredArg();
60 | OptionSpec interval = parser.accepts("interval", "JMX scanning interval in seconds")
61 | .withRequiredArg().defaultsTo("10");
62 | //TODO --influxdb-database (DEFAULT_DATABASE)
63 | //TODO --dashboard-datasource (DEFAULT_DATASOURCE)
64 |
65 | if (args.length == 0 || args[0] == "-h" || args[0] == "--help") {
66 | parser.printHelpOn(System.err);
67 | System.exit(0);
68 | }
69 |
70 | OptionSet opts = parser.parse(args);
71 |
72 | try {
73 |
74 | DiscoveryTool tool = new DiscoveryTool(opts.valueOf(zookeeper));
75 |
76 | try {
77 | List topics = tool.getKafkaTopics();
78 | List brokers = tool.getKafkaBrokers();
79 | int interval_s = Integer.parseInt(opts.valueOf(interval));
80 |
81 | if (opts.has(dashboard) && opts.has(dashboardPath)) {
82 | tool.generateDashboard(opts.valueOf(dashboard), brokers, topics, DEFAULT_DATASOURCE,
83 | opts.valueOf(dashboardPath), interval_s)
84 | .save();
85 | }
86 |
87 | if (opts.has(topic)) {
88 | //producer/reporter settings
89 | System.out.println("kafka.metrics.topic=" + opts.valueOf(topic));
90 | System.out.println("kafka.metrics.polling.interval=" + interval_s + "s");
91 | //TODO --producer-bootstrap for truly non-intrusive agent deployment,
92 | // i.e. when producing to a different cluster from the one being discovered
93 | System.out.println("kafka.metrics.bootstrap.servers=" + brokers.get(0).hostPort());
94 | //consumer settings
95 | System.out.println("consumer.topic=" + opts.valueOf(topic));
96 | System.out.println("consumer.bootstrap.servers=" + brokers.get(0).hostPort());
97 | System.out.println("consumer.group.id=kafka-metrics-"+ opts.valueOf(dashboard));
98 | }
99 |
100 | if (!opts.has(influxdb) || !opts.has(topic)) {
101 | tool.generateScannerConfig(brokers, opts.valueOf(dashboard), interval_s).list(System.out);
102 | }
103 |
104 | if (opts.has(influxdb)) {
105 | URL url = new URL(opts.valueOf(influxdb));
106 | System.out.println("influxdb.database=" + DEFAULT_DATABASE);
107 | System.out.println("influxdb.url=" + url.toString());
108 | if (url.getUserInfo() != null) {
109 | System.out.println("influxdb.username=" + url.getUserInfo().split(":")[0]);
110 | if (url.getUserInfo().contains(":")) {
111 | System.out.println("influxdb.password=" + url.getUserInfo().split(":")[1]);
112 | }
113 | }
114 | }
115 |
116 | System.out.flush();
117 | } catch (IOException e) {
118 | e.printStackTrace();
119 | System.exit(3);
120 | } finally {
121 | tool.close();
122 | }
123 | } catch (Exception e) {
124 | e.printStackTrace();
125 | System.exit(2);
126 | }
127 |
128 | }
129 |
130 | private final String brokersZkPath = "/brokers/ids";
131 |
132 | private final String topicsZkPath = "/brokers/topics";
133 |
134 | public Properties generateScannerConfig(List brokers, String name, int interval_s) throws IOException {
135 | Properties scannerProps = new Properties();
136 | for (Broker broker : brokers) {
137 | Integer section = Integer.parseInt(broker.id) + 1;
138 | scannerProps.put(String.format("jmx.%d.address", section), broker.host + ":" + broker.jmxPort);
139 | scannerProps.put(String.format("jmx.%d.query.scope", section), "kafka.*:*");
140 | scannerProps.put(String.format("jmx.%d.query.interval.s", section), String.valueOf(interval_s));
141 | scannerProps.put(String.format("jmx.%d.tag.host", section), broker.host);
142 | scannerProps.put(String.format("jmx.%d.tag.service", section), String.format("broker-%s", broker.id));
143 | scannerProps.put(String.format("jmx.%d.tag.name", section), name);
144 | }
145 | return scannerProps;
146 | }
147 |
148 | public DiscoveryTool(String serverstring) {
149 | super(serverstring, 30000, 30000, new ZkSerializer() {
150 | private final ObjectMapper mapper = new ObjectMapper();
151 |
152 | @Override
153 | public byte[] serialize(Object o) throws ZkMarshallingError {
154 | throw new ZkMarshallingError("This is a read-only zkClient");
155 | }
156 |
157 | @Override
158 | public Object deserialize(byte[] bytes) throws ZkMarshallingError {
159 | try {
160 | return mapper.readTree(bytes);
161 | } catch (IOException e) {
162 | throw new ZkMarshallingError(e);
163 | }
164 | }
165 | });
166 | }
167 |
168 | public List getKafkaTopics() {
169 | List result = new LinkedList<>();
170 | for (String topic : getChildren(topicsZkPath)) {
171 | result.add(topic);
172 | }
173 | return result;
174 | }
175 |
176 | public List getKafkaBrokers() throws IOException {
177 | List result = new LinkedList<>();
178 | for (String brokerId : getChildren(brokersZkPath)) {
179 | result.add(getBroker(brokerId));
180 | }
181 | return result;
182 | }
183 |
184 | public Broker getBroker(String brokerId) {
185 | JsonNode json = readData(brokersZkPath + "/" + brokerId);
186 | return new Broker(
187 | brokerId,
188 | json.get("host").asText(),
189 | json.get("port").asInt(),
190 | json.get("jmx_port").asInt()
191 | );
192 | }
193 |
194 | public Dashboard generateDashboard(
195 | String name, List brokers, List topics, String dataSource, String path, int interval_s) {
196 | Dashboard dash = new Dashboard(name, dataSource, path + "/" + name + ".json");
197 |
198 | ///////////// ROW 1 - TOPIC METRICS
199 | dash.newVariable("topic", true, topics.toArray(new String[topics.size()]));
200 | ArrayNode topicsRow = dash.newRow("TOPIC METRICS FOR `$topic`", 250, true);
201 |
202 | ObjectNode graphT1 = dash.newGraph(topicsRow, "Input / Sec", 3, false).put("fill", 2).put("stack", false);
203 | graphT1.replace("y_formats", dash.newArray("bytes", "short"));
204 | graphT1.set("tooltip", dash.newObject().put("value_type", "individual").put("shared", false));
205 | dash.newTarget(graphT1, "$tag_topic", "SELECT sum(\"OneMinuteRate\") FROM \"BytesInPerSec\" " +
206 | "WHERE \"name\" = '" + name + "' AND \"topic\" =~ /^$topic$/ AND $timeFilter " +
207 | "GROUP BY time(" + interval_s + "s), \"topic\" fill(null)");
208 |
209 | ObjectNode graphT2 = dash.newGraph(topicsRow, "Input / Sec", 2, false).put("fill", 2).put("stack", false);
210 | graphT2.replace("y_formats", dash.newArray("wps", "short"));
211 | graphT2.set("tooltip", dash.newObject().put("value_type", "individual").put("shared", false));
212 | dash.newTarget(graphT2, "$tag_topic", "SELECT sum(\"OneMinuteRate\") FROM \"MessagesInPerSec\" " +
213 | "WHERE \"name\" = '" + name + "' AND \"topic\" =~ /^$topic$/ AND $timeFilter " +
214 | "GROUP BY time(" + interval_s + "s), \"topic\" fill(null)");
215 |
216 | ObjectNode graphT3 = dash.newGraph(topicsRow, "Failed Fetch Requests / Sec", 2, false)
217 | .put("fill", 4).put("stack", false);
218 | graphT3.set("tooltip", dash.newObject().put("value_type", "individual").put("shared", false));
219 | dash.newTarget(graphT3, "$tag_topic", "SELECT sum(\"OneMinuteRate\") FROM \"FailedFetchRequestsPerSec\" " +
220 | "WHERE \"name\" = '" + name + "' AND \"topic\" =~ /^$topic$/ AND $timeFilter " +
221 | "GROUP BY time(" + interval_s + "s), \"topic\" fill(null)");
222 |
223 | ObjectNode graphT4 = dash.newGraph(topicsRow, "Output / Sec", 5, false).put("fill", 2).put("stack", false);
224 | graphT4.replace("y_formats", dash.newArray("bytes", "short"));
225 | graphT4.replace("aliasColors", dash.newObject().put("$topic$", "#447EBC"));
226 | graphT4.set("tooltip", dash.newObject().put("value_type", "individual").put("shared", false));
227 | dash.newTarget(graphT4, "$tag_topic", "SELECT sum(\"OneMinuteRate\") FROM \"BytesOutPerSec\" " +
228 | "WHERE \"name\" = '" + name + "' AND \"topic\" =~ /^$topic$/ AND $timeFilter " +
229 | "GROUP BY time(" + interval_s + "s), \"topic\" fill(null)");
230 |
231 | ///////////// ROW 2 - AGGREGATED CLUSTER METRICS
232 | ArrayNode clusterRow = dash.newRow(String.format("CLUSTER METRICS FOR %d broker(s)", brokers.size()), 172, true);
233 |
234 | dash.newStat(clusterRow, "Controllers", 1,
235 | "SELECT sum(\"Value\") FROM \"ActiveControllerCount\" " +
236 | "WHERE \"group\" = 'kafka.controller' AND \"name\" = '" + name + "' AND $timeFilter " +
237 | "GROUP BY time(" + interval_s + "s)")
238 | .put("valueFontSize", "150%");
239 |
240 | ObjectNode graph1 = dash.newGraph(clusterRow, "Under-Replicated Partitions", 2, false).put("bars", true);
241 | dash.newTarget(graph1, "$tag_service", "SELECT mean(\"Value\") FROM \"UnderReplicatedPartitions\" " +
242 | "WHERE \"group\" = 'kafka.server' AND \"name\" = '" + name + "' AND $timeFilter " +
243 | "GROUP BY time(" + interval_s + "s), \"service\"");
244 |
245 | dash.newTable(clusterRow, "Partition Count", 2, "avg", "$tag_service",
246 | "SELECT last(\"Value\") FROM \"PartitionCount\" " +
247 | "WHERE \"group\" = 'kafka.server' AND \"name\" = '" + name + "' AND $timeFilter " +
248 | "GROUP BY time(" + interval_s + "s), \"service\"")
249 | .put("transform", "timeseries_aggregations")
250 | .put("showHeader", false);
251 |
252 | //Total Maximum Log Flush Time
253 | ObjectNode graph5 = dash.newGraph(clusterRow, "Log Flush Time (98th maximum)", 2, false)
254 | .put("linewidth",1).put("points", false).put("fill",0);
255 | graph5.replace("y_formats", dash.newArray("ms", "short"));
256 | dash.get(graph5, "grid")
257 | .put("threshold1", 6).put("threshold1Color", "rgba(236, 118, 21, 0.21)")
258 | .put("threshold2", 12).put("threshold2Color", "rgba(234, 112, 112, 0.22)");
259 | dash.newTarget(graph5, "$tag_service", "SELECT max(\"98thPercentile\") as \"98thPercentile\" " +
260 | "FROM \"LogFlushRateAndTimeMs\" " +
261 | "WHERE \"group\" = 'kafka.log' AND \"name\" = '" + name + "' AND $timeFilter " +
262 | "GROUP BY time(1m), \"service\"");
263 |
264 | ObjectNode graph2 = dash.newGraph(clusterRow, "Input / Sec", 2, false)
265 | .put("fill", 2).put("stack", true);
266 | graph2.replace("y_formats", dash.newArray("bytes", "short"));
267 | graph2.replace("tooltip", dash.newObject().put("value_type", "individual").put("shared", true));
268 | dash.get(graph2, "grid").put("leftMin", 0);
269 | dash.newTarget(graph2, "$tag_service", "SELECT sum(\"OneMinuteRate\") FROM \"BytesInPerSec\" " +
270 | "WHERE \"group\" = 'kafka.server' AND \"topic\" =~ /^$topic$/ AND \"name\" = '" + name + "' " +
271 | "AND $timeFilter " +
272 | "GROUP BY time(" + interval_s + "s), \"service\"");
273 |
274 | ObjectNode graph3 = dash.newGraph(clusterRow, "Output / Sec", 2, false).put("fill", 2).put("stack", true);
275 | graph3.replace("y_formats", dash.newArray("bytes", "short"));
276 | graph3.replace("tooltip", dash.newObject().put("value_type", "individual").put("shared", true));
277 | dash.get(graph3, "grid").put("leftMin", 0);
278 | dash.newTarget(graph3, "$tag_service", "SELECT sum(\"OneMinuteRate\") FROM \"BytesOutPerSec\" " +
279 | "WHERE \"group\" = 'kafka.server' AND \"topic\" =~ /^$topic$/ AND \"name\" = '" + name + "' " +
280 | "AND $timeFilter " +
281 | "GROUP BY time(" + interval_s + "s), \"service\"");
282 |
283 | dash.newStat(clusterRow, "Requests/Sec", 1,
284 | "SELECT mean(\"OneMinuteRate\") FROM \"RequestsPerSec\" " +
285 | "WHERE \"group\" = 'kafka.network' AND \"name\" = '" + name + "' AND $timeFilter " +
286 | "GROUP BY time(" + interval_s + "s)")
287 | .put("decimals", 1)
288 | .put("valueName", "avg")
289 | .put("valueFontSize", "35%")
290 | .put("format", "short")
291 | .replace("sparkline", dash.newObject().put("show", true).put("full", false));
292 |
293 |
294 | ///////////// ROW (2 + b) - BROKER-LEVEL METRICS
295 | for (Broker broker : brokers) {
296 | //extra row for each broker
297 | ArrayNode brokerRow = dash.newRow(
298 | String.format("Kafka Broker ID %s @ %s", broker.id, broker.hostPort()), 250, false);
299 |
300 | //Purgatory graph
301 | ObjectNode graph6 = dash.newGraph(brokerRow, "Num.delayed Operations", 4, true);
302 | dash.newTarget(graph6, "$col",
303 | "SELECT max(\"Value\"), median(\"Value\"), min(\"Value\") FROM \"NumDelayedOperations\" " +
304 | "WHERE \"name\" = '" + name + "' AND \"service\" = 'broker-1' AND $timeFilter " +
305 | "GROUP BY time($interval) fill(null)");
306 |
307 | //Log Flush Time graph
308 | ObjectNode graph7 = dash.newGraph(brokerRow, "Log Flush Time (mean)", 4, false)
309 | .put("linewidth",1).put("points", true).put("pointradius", 1).put("fill", 0);
310 | graph7.replace("y_formats", dash.newArray("ms", "short"));
311 | dash.get(graph7, "grid")
312 | .put("leftLogBase", 2)
313 | .put("threshold1", 100).put("threshold1Color", "rgba(236, 118, 21, 0.21)")
314 | .put("threshold2", 250).put("threshold2Color", "rgba(234, 112, 112, 0.22)");
315 | dash.newTarget(graph7, "$col", "SELECT sum(\"999thPercentile\") as \"999thPercentile\" " +
316 | "FROM \"LogFlushRateAndTimeMs\" " +
317 | "WHERE \"group\" = 'kafka.log' AND \"service\" = '" +String.format("broker-%s", broker.id)+"'" +
318 | "AND \"name\" = '" + name + "' AND $timeFilter " +
319 | "GROUP BY time(30s)");
320 | dash.newTarget(graph7, "$col", "SELECT sum(\"99thPercentile\") as \"99thPercentile\" " +
321 | "FROM \"LogFlushRateAndTimeMs\" " +
322 | "WHERE \"group\" = 'kafka.log' AND \"service\" = '" +String.format("broker-%s", broker.id)+"'" +
323 | "AND \"name\" = '" + name + "' AND $timeFilter " +
324 | "GROUP BY time(30s)");
325 |
326 | dash.newTarget(graph7, "$col", "SELECT sum(\"95thPercentile\") as \"95thPercentile\" " +
327 | "FROM \"LogFlushRateAndTimeMs\" " +
328 | "WHERE \"group\" = 'kafka.log' AND \"service\" = '" +String.format("broker-%s", broker.id)+"'" +
329 | "AND \"name\" = '" + name + "' AND $timeFilter " +
330 | "GROUP BY time(30s)");
331 |
332 | //Combined Throughput Graph
333 | ObjectNode graph8 = dash.newGraph(brokerRow, "Throughput", 4, true)
334 | .put("linewidth",1).put("fill", 6).put("y-axis", false);
335 | graph8.replace("y_formats", dash.newArray("bytes", "short"));
336 | graph8.set("aliasColors", dash.newObject().put("Input", "#BF1B00").put("Output", "#508642"));
337 | dash.newTarget(graph8, "Output",
338 | "SELECT sum(\"OneMinuteRate\") * -1 FROM \"BytesOutPerSec\" " +
339 | "WHERE \"name\" = '" + name + "' AND \"topic\" =~ /^$topic$/ " +
340 | "AND \"service\" = '" +String.format("broker-%s", broker.id)+"' AND $timeFilter " +
341 | "GROUP BY time($interval) fill(null)");
342 | dash.newTarget(graph8, "Input",
343 | "SELECT sum(\"OneMinuteRate\") FROM \"BytesInPerSec\" " +
344 | "WHERE \"name\" = '" + name + "' AND \"topic\" =~ /^$topic$/ " +
345 | "AND \"service\" = '"+String.format("broker-%s", broker.id)+"' AND $timeFilter " +
346 | "GROUP BY time($interval) fill(null)");
347 | }
348 |
349 | return dash;
350 | }
351 | }
--------------------------------------------------------------------------------
/discovery/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=WARN, stdout
2 |
3 | log4j.logger.co.gridport.kafka.Mirror = INFO
4 | log4j.logger.co.gridport.kafka.MirrorExecutor = INFO
5 | log4j.logger.co.gridport.kafka.MirrorPartitioner = WARN
6 |
7 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
8 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
9 | log4j.appender.stdout.layout.ConversionPattern=%p %m (%c:%L) %n
10 |
11 | log4j.logger.io.amient.kafka.metrics=INFO
12 |
13 |
--------------------------------------------------------------------------------
/doc/discovery-example-3-brokers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amient/kafka-metrics/2f6107c062e6b5ec2bc3c12f9e82bcca9304a6fc/doc/discovery-example-3-brokers.png
--------------------------------------------------------------------------------
/doc/kafka-metrics-scenario0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amient/kafka-metrics/2f6107c062e6b5ec2bc3c12f9e82bcca9304a6fc/doc/kafka-metrics-scenario0.png
--------------------------------------------------------------------------------
/doc/kafka-metrics-scenario1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amient/kafka-metrics/2f6107c062e6b5ec2bc3c12f9e82bcca9304a6fc/doc/kafka-metrics-scenario1.png
--------------------------------------------------------------------------------
/doc/kafka-metrics-scenario2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amient/kafka-metrics/2f6107c062e6b5ec2bc3c12f9e82bcca9304a6fc/doc/kafka-metrics-scenario2.png
--------------------------------------------------------------------------------
/doc/kafka-metrics-scenario3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amient/kafka-metrics/2f6107c062e6b5ec2bc3c12f9e82bcca9304a6fc/doc/kafka-metrics-scenario3.png
--------------------------------------------------------------------------------
/doc/metrics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amient/kafka-metrics/2f6107c062e6b5ec2bc3c12f9e82bcca9304a6fc/doc/metrics.png
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 |
5 | influxdb:
6 | image: influxdb:1.5.3
7 | environment:
8 | INFLUXDB_HTTP_ENABLED: "true"
9 | INFLUXDB_HTTP_AUTH_ENABLED: "false"
10 | volumes:
11 | - ./.data/influxdb:/var/lib/influxdb
12 | ports:
13 | - "8086:8086"
14 |
15 | grafana:
16 | image: grafana/grafana:5.1.0
17 | environment:
18 | GF_LOG_MODE: "file"
19 | GF_PATHS_LOGS: "/logs"
20 | GF_SECURITY_ADMIN_PASSWORD: "admin"
21 | GF_DASHBOARDS_JSON_ENABLED: "true"
22 | GF_DASHBOARDS_JSON_PATH: "/dashboards"
23 | GF_PATHS_DATA: "/data"
24 | volumes:
25 | - ./.data/grafana/dashboards:/dashboards
26 | - ./.data/grafana/db:/data
27 | - ./.data/grafana/logs:/logs
28 | ports:
29 | - "3000:3000"
30 |
--------------------------------------------------------------------------------
/docker-instance.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 |
5 | wait_for_endpoint() {
6 | URL=$1
7 | EXPECTED=$2
8 | MAX_WAIT=$3
9 | while [ $MAX_WAIT -gt 0 ]; do
10 | echo -en "\r$URL $MAX_WAIT";
11 | RESPONSE_STATUS=$(curl --stderr /dev/null -X GET -i "$URL" | head -1 | cut -d' ' -f2)
12 | if [ ! -z $RESPONSE_STATUS ] ; then
13 | if [ $RESPONSE_STATUS == $EXPECTED ]; then
14 | return 1
15 | else
16 | echo "UNEXPECTED RESPONSE_STATUS $RESPONSE_STATUS FOR $URL"
17 | return 0
18 | fi
19 | fi
20 | let MAX_WAIT=MAX_WAIT-1
21 | sleep 1
22 | done
23 | return 0
24 | }
25 |
26 | cd $DIR
27 |
28 | docker-compose up &
29 |
30 | terminate() {
31 | cd $DIR
32 | docker-compose down
33 | }
34 |
35 | trap terminate EXIT INT
36 |
37 | GRAFANA_URL="http://admin:admin@localhost:3000"
38 |
39 | INFLUXDB_URL="http://localhost:8086"
40 |
41 | wait_for_endpoint "$INFLUXDB_URL/ping" 204 1800
42 | if [ $? == 1 ]; then
43 | echo "influxdb endpoind check successful"
44 | curl -G "$INFLUXDB_URL/query" --data-urlencode "q=CREATE DATABASE metrics"
45 | else
46 | echo "influxdb endpoint check failed"
47 | exit 2;
48 | fi
49 |
50 | wait_for_endpoint "$GRAFANA_URL/api/login/ping" 401 30
51 | if [ $? == 1 ]; then
52 | echo "grafana endpoind check successful"
53 | echo "configuring 'Kafka Metrics InfluxDB' datasource -> $INFLUXDB_URL in the provided Grafana instance @ $GRAFANA_URL"
54 | curl "$GRAFANA_URL/api/datasources" -s -X POST -H 'Content-Type: application/json;charset=UTF-8' --data-binary '{"name": "Kafka Metrics InfluxDB", "type": "influxdb", "access": "direct", "url": "'$INFLUXDB_URL'", "password": "none", "user": "kafka-metrics", "database": "metrics", "isDefault": true}'
55 | echo ""
56 | else
57 | exit 1;
58 | fi
59 |
60 |
61 | tail -f "$DIR/.data/grafana/logs/grafana.log"
62 |
63 |
64 |
65 |
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amient/kafka-metrics/2f6107c062e6b5ec2bc3c12f9e82bcca9304a6fc/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Tue Oct 23 18:30:11 BST 2018
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.9-all.zip
7 |
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Attempt to set APP_HOME
10 | # Resolve links: $0 may be a link
11 | PRG="$0"
12 | # Need this for relative symlinks.
13 | while [ -h "$PRG" ] ; do
14 | ls=`ls -ld "$PRG"`
15 | link=`expr "$ls" : '.*-> \(.*\)$'`
16 | if expr "$link" : '/.*' > /dev/null; then
17 | PRG="$link"
18 | else
19 | PRG=`dirname "$PRG"`"/$link"
20 | fi
21 | done
22 | SAVED="`pwd`"
23 | cd "`dirname \"$PRG\"`/" >/dev/null
24 | APP_HOME="`pwd -P`"
25 | cd "$SAVED" >/dev/null
26 |
27 | APP_NAME="Gradle"
28 | APP_BASE_NAME=`basename "$0"`
29 |
30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
31 | DEFAULT_JVM_OPTS=""
32 |
33 | # Use the maximum available, or set MAX_FD != -1 to use that value.
34 | MAX_FD="maximum"
35 |
36 | warn ( ) {
37 | echo "$*"
38 | }
39 |
40 | die ( ) {
41 | echo
42 | echo "$*"
43 | echo
44 | exit 1
45 | }
46 |
47 | # OS specific support (must be 'true' or 'false').
48 | cygwin=false
49 | msys=false
50 | darwin=false
51 | nonstop=false
52 | case "`uname`" in
53 | CYGWIN* )
54 | cygwin=true
55 | ;;
56 | Darwin* )
57 | darwin=true
58 | ;;
59 | MINGW* )
60 | msys=true
61 | ;;
62 | NONSTOP* )
63 | nonstop=true
64 | ;;
65 | esac
66 |
67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
68 |
69 | # Determine the Java command to use to start the JVM.
70 | if [ -n "$JAVA_HOME" ] ; then
71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
72 | # IBM's JDK on AIX uses strange locations for the executables
73 | JAVACMD="$JAVA_HOME/jre/sh/java"
74 | else
75 | JAVACMD="$JAVA_HOME/bin/java"
76 | fi
77 | if [ ! -x "$JAVACMD" ] ; then
78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
79 |
80 | Please set the JAVA_HOME variable in your environment to match the
81 | location of your Java installation."
82 | fi
83 | else
84 | JAVACMD="java"
85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
86 |
87 | Please set the JAVA_HOME variable in your environment to match the
88 | location of your Java installation."
89 | fi
90 |
91 | # Increase the maximum file descriptors if we can.
92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
93 | MAX_FD_LIMIT=`ulimit -H -n`
94 | if [ $? -eq 0 ] ; then
95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
96 | MAX_FD="$MAX_FD_LIMIT"
97 | fi
98 | ulimit -n $MAX_FD
99 | if [ $? -ne 0 ] ; then
100 | warn "Could not set maximum file descriptor limit: $MAX_FD"
101 | fi
102 | else
103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
104 | fi
105 | fi
106 |
107 | # For Darwin, add options to specify how the application appears in the dock
108 | if $darwin; then
109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
110 | fi
111 |
112 | # For Cygwin, switch paths to Windows format before running java
113 | if $cygwin ; then
114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
116 | JAVACMD=`cygpath --unix "$JAVACMD"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Escape application args
158 | save ( ) {
159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
160 | echo " "
161 | }
162 | APP_ARGS=$(save "$@")
163 |
164 | # Collect all arguments for the java command, following the shell quoting and substitution rules
165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
166 |
167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
169 | cd "$(dirname "$0")"
170 | fi
171 |
172 | exec "$JAVACMD" "$@"
173 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | set DIRNAME=%~dp0
12 | if "%DIRNAME%" == "" set DIRNAME=.
13 | set APP_BASE_NAME=%~n0
14 | set APP_HOME=%DIRNAME%
15 |
16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
17 | set DEFAULT_JVM_OPTS=
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windows variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 |
53 | :win9xME_args
54 | @rem Slurp the command line arguments.
55 | set CMD_LINE_ARGS=
56 | set _SKIP=2
57 |
58 | :win9xME_args_slurp
59 | if "x%~1" == "x" goto execute
60 |
61 | set CMD_LINE_ARGS=%*
62 |
63 | :execute
64 | @rem Setup the command line
65 |
66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
67 |
68 | @rem Execute Gradle
69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
70 |
71 | :end
72 | @rem End local scope for the variables with windows NT shell
73 | if "%ERRORLEVEL%"=="0" goto mainEnd
74 |
75 | :fail
76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
77 | rem the _cmd.exe /c_ return code!
78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
79 | exit /b 1
80 |
81 | :mainEnd
82 | if "%OS%"=="Windows_NT" endlocal
83 |
84 | :omega
85 |
--------------------------------------------------------------------------------
/influxdb-loader/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'java'
2 | apply plugin: 'application'
3 | mainClassName = 'io.amient.kafka.metrics.InfluxDbLoaderMain'
4 |
5 | dependencies {
6 | compile project(':core')
7 | compile group: 'org.apache.kafka', name: 'kafka_' + baseScalaVersion, version: rootProject.kafkaVersion
8 | compile group: 'org.apache.kafka', name: 'kafka-clients', version: rootProject.kafkaVersion
9 | compile group: 'org.influxdb', name: 'influxdb-java', version: '2.14'
10 | }
11 |
12 | jar {
13 | manifest {
14 | attributes 'Main-Class': mainClassName
15 | }
16 |
17 | from (configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } ) {
18 | exclude "META-INF/*.SF"
19 | exclude "META-INF/LICENSE*"
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/influxdb-loader/conf/local-jmx.properties:
--------------------------------------------------------------------------------
1 | influxdb.database=metrics
2 | influxdb.url=http://localhost:8086
3 | influxdb.username=root
4 | influxdb.password=root
5 |
6 | jmx.1.address=localhost:9010
7 | jmx.1.query.scope=Affinity:*
8 | jmx.1.query.interval.s=10
9 | jmx.1.tag.host=localhost
10 | jmx.1.tag.service=broker-0
11 |
--------------------------------------------------------------------------------
/influxdb-loader/conf/local-topic.properties:
--------------------------------------------------------------------------------
1 | influxdb.database=metrics
2 | influxdb.url=http://localhost:8086
3 | influxdb.username=admin
4 | influxdb.password=
5 |
6 | consumer.topic=metrics
7 | consumer.numThreads=1
8 | consumer.bootstrap.servers=localhost:9092
9 | consumer.group.id=kafka-metric-collector
10 | consumer.zookeeper.session.timeout.ms=2000
11 | consumer.zookeeper.sync.time.ms=200
12 | consumer.auto.commit.interval.ms=10000
13 | consumer.auto.offset.reset=earliest
14 |
--------------------------------------------------------------------------------
/influxdb-loader/src/main/java/io/amient/kafka/metrics/ConsumerMetrics.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.apache.kafka.clients.consumer.ConsumerConfig;
23 | import org.apache.kafka.clients.consumer.ConsumerRecord;
24 | import org.apache.kafka.clients.consumer.KafkaConsumer;
25 | import org.apache.kafka.common.serialization.StringDeserializer;
26 | import org.slf4j.Logger;
27 | import org.slf4j.LoggerFactory;
28 |
29 | import java.util.*;
30 |
31 | public class ConsumerMetrics {
32 |
33 | static private final Logger log = LoggerFactory.getLogger(ConsumerMetrics.class);
34 |
35 | static private final String CONFIG_PREFIX = "consumer.";
36 | static final String COFNIG_CONSUMER_TOPIC = CONFIG_PREFIX + "topic";
37 | static final String COFNIG_CONSUMER_THREADS = CONFIG_PREFIX + "numThreads";
38 |
39 | static final String DEFAULT_CLIENT_ID = "kafka-metrics";
40 |
41 | private KafkaConsumer> consumer = null;
42 |
43 | volatile private Boolean terminated = false;
44 |
45 | public ConsumerMetrics(Properties props) {
46 | String topic = props.getProperty(COFNIG_CONSUMER_TOPIC, "metrics");
47 | Integer numThreads = Integer.parseInt(props.getProperty(COFNIG_CONSUMER_THREADS, "1"));
48 |
49 | Properties consumerProps = new Properties();
50 | consumerProps.put("client.id", DEFAULT_CLIENT_ID);
51 | for (Enumeration e = props.keys(); e.hasMoreElements(); ) {
52 | String propKey = (String) e.nextElement();
53 | String propVal = props.get(propKey).toString();
54 | if (propKey.startsWith(CONFIG_PREFIX)) {
55 | propKey = propKey.substring(9);
56 | consumerProps.put(propKey, propVal);
57 | log.info(propKey + "=" + propVal);
58 | }
59 | }
60 | consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
61 | consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, MeasurementDeserializer.class.getName());
62 |
63 | if (consumerProps.size() <= 1) {
64 | log.info("ConsumerMetrics disabled");
65 | return;
66 | }
67 |
68 | consumer = new KafkaConsumer<>(consumerProps);
69 |
70 | addShutdownHook();
71 |
72 | try {
73 | consumer.subscribe(Arrays.asList(topic));
74 |
75 | new Task(new InfluxDbPublisher(props), consumer).run();
76 |
77 | } finally {
78 | terminated = true;
79 | }
80 |
81 | shutdown();
82 |
83 | }
84 |
85 | private void shutdown() {
86 | if (consumer != null) {
87 | consumer.close();
88 | }
89 | }
90 |
91 | private void addShutdownHook() {
92 | Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
93 | @Override
94 | public void run() {
95 | shutdown();
96 | }
97 | }));
98 | }
99 |
100 |
101 | public boolean isTerminated() {
102 | return terminated;
103 | }
104 |
105 | public static class Task implements Runnable {
106 |
107 | final private MeasurementFormatter formatter;
108 | final private MeasurementPublisher publisher;
109 | final private KafkaConsumer> consumer;
110 |
111 | public Task(MeasurementPublisher publisher, KafkaConsumer> consumer) {
112 | this.consumer = consumer;
113 | this.formatter = new MeasurementFormatter();
114 | this.publisher = publisher;
115 | }
116 |
117 | public void run() {
118 |
119 | try {
120 | while (true) {
121 | Iterator>> it = consumer.poll(250).iterator();
122 | while (it.hasNext()) {
123 | try {
124 | ConsumerRecord> m = it.next();
125 | if (m.value() != null) {
126 | for (MeasurementV1 measurement : m.value()) {
127 | try {
128 | publisher.publish(measurement);
129 | } catch (RuntimeException e) {
130 |
131 | log.error("Unable to publish measurement " + formatter.toString(measurement)
132 | + "tag count=" + measurement.getFields().size()
133 | + ", field count=" + measurement.getFields().size()
134 | , e);
135 |
136 | }
137 | }
138 | }
139 | } catch (Throwable e) {
140 | e.printStackTrace();
141 | return;
142 | }
143 | }
144 | }
145 | } finally {
146 | System.out.println("Finished metrics consumer task");
147 | publisher.close();
148 | }
149 | }
150 | }
151 |
152 | }
153 |
--------------------------------------------------------------------------------
/influxdb-loader/src/main/java/io/amient/kafka/metrics/InfluxDbLoaderMain.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.slf4j.Logger;
23 | import org.slf4j.LoggerFactory;
24 |
25 | import java.io.FileInputStream;
26 | import java.io.IOException;
27 |
28 | public class InfluxDbLoaderMain {
29 | private static Logger log = LoggerFactory.getLogger(InfluxDbLoaderMain.class);
30 |
31 | public static void main(String[] args) {
32 | try {
33 | java.util.Properties props = new java.util.Properties();
34 | if (args.length == 0) {
35 | props.load(System.in);
36 | log.info("Configuring InfluxDBLoader from STDIN");
37 | } else {
38 | log.info("Configuring InfluxDBLoader from property file: " + args[0]);
39 | props.load(new FileInputStream(args[0]));
40 | }
41 | props.list(System.out);
42 | try {
43 | MeasurementPublisher publisher = new InfluxDbPublisher(props);
44 | JMXScanner jmxScannerInstance = new JMXScanner(props, publisher);
45 | ConsumerMetrics consumer = props.containsKey(ConsumerMetrics.COFNIG_CONSUMER_TOPIC)
46 | ? new ConsumerMetrics(props) : null;
47 | while (!jmxScannerInstance.isTerminated() || (consumer != null && !consumer.isTerminated())) {
48 | Thread.sleep(5000);
49 | }
50 | } catch (Throwable e) {
51 | log.error("Failed to launch KafkaMetrics JMX Scanner", e);
52 | }
53 |
54 | } catch (IOException e) {
55 | e.printStackTrace();
56 | System.exit(1);
57 | }
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/influxdb-loader/src/main/java/io/amient/kafka/metrics/InfluxDbPublisher.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.influxdb.InfluxDB;
23 | import org.influxdb.InfluxDBFactory;
24 | import org.influxdb.dto.Point;
25 | import org.slf4j.Logger;
26 | import org.slf4j.LoggerFactory;
27 |
28 | import java.util.Properties;
29 | import java.util.concurrent.TimeUnit;
30 |
31 | public class InfluxDbPublisher implements MeasurementPublisher {
32 |
33 | static private final Logger log = LoggerFactory.getLogger(InfluxDbPublisher.class);
34 | static final String COFNIG_INFLUXDB_DATABASE = "influxdb.database";
35 | static final String COFNIG_INFLUXDB_URL = "influxdb.url";
36 | static final String COFNIG_INFLUXDB_RETENTION_POLICY = "influxdb.retention.policy";
37 | static final String COFNIG_INFLUXDB_USERNAME = "influxdb.username";
38 | static final String COFNIG_INFLUXDB_PASSWORD = "influxdb.password";
39 | private static final int DEFAULT_BACK_OFF_MS = 15000;
40 | final private String dbName;
41 | final private String address;
42 | private final String username;
43 | private final String password;
44 | private final String retention;
45 |
46 | private InfluxDB influxDB = null;
47 |
48 | volatile private long failureTimestamp = 0;
49 |
50 | public InfluxDbPublisher(Properties config) {
51 | this.dbName = config.getProperty(COFNIG_INFLUXDB_DATABASE, "metrics");
52 | this.address = config.getProperty(COFNIG_INFLUXDB_URL, "http://localhost:8086");
53 | this.username = config.getProperty(COFNIG_INFLUXDB_USERNAME, "root");
54 | this.password = config.getProperty(COFNIG_INFLUXDB_PASSWORD, "root");
55 | this.retention = config.getProperty(COFNIG_INFLUXDB_RETENTION_POLICY, "default");
56 | }
57 |
58 | public void publish(MeasurementV1 m) {
59 | Long time = m.getTimestamp();
60 | if (failureTimestamp > 0) {
61 | if (failureTimestamp + DEFAULT_BACK_OFF_MS < time) return; else failureTimestamp = 0;
62 | }
63 | try {
64 | tryPublish(m);
65 | } catch (Throwable e) {
66 | log.warn("Failed to publish measurement to InfluxDB, will retry...", e);
67 | influxDB = null;
68 | failureTimestamp = time;
69 | }
70 | }
71 |
72 | public void tryPublish(MeasurementV1 m) {
73 | if (influxDB == null) {
74 | influxDB = InfluxDBFactory.connect(address, username, password);
75 | influxDB.enableBatch(1000, 100, TimeUnit.MILLISECONDS);
76 | }
77 | Point.Builder builder = Point.measurement(m.getName().toString()).time(m.getTimestamp(), TimeUnit.MILLISECONDS);
78 | for (java.util.Map.Entry tag : m.getTags().entrySet()) {
79 | builder.tag(tag.getKey().toString(), tag.getValue().toString());
80 | }
81 | for (java.util.Map.Entry field : m.getFields().entrySet()) {
82 | builder.field(field.getKey().toString(), field.getValue());
83 | }
84 | influxDB.write(dbName, retention, builder.build());
85 | }
86 |
87 |
88 |
89 | public void close() {
90 |
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/influxdb-loader/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=WARN, stdout
2 |
3 | log4j.logger.co.gridport.kafka.Mirror = INFO
4 | log4j.logger.co.gridport.kafka.MirrorExecutor = INFO
5 | log4j.logger.co.gridport.kafka.MirrorPartitioner = WARN
6 |
7 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
8 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
9 | log4j.appender.stdout.layout.ConversionPattern=%p %m (%c:%L) %n
10 |
11 | log4j.logger.io.amient.kafka.metrics=INFO
12 |
13 |
--------------------------------------------------------------------------------
/metrics-agent/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'java'
2 | apply plugin: 'application'
3 | mainClassName = 'io.amient.kafka.metrics.KafkaMetricsAgent'
4 |
5 | dependencies {
6 | compile project(':core')
7 | compile group: 'org.apache.kafka', name: 'kafka_' + baseScalaVersion, version: rootProject.kafkaVersion
8 | }
9 |
10 |
11 | jar {
12 | from {
13 | (configurations.compile + configurations.provided).collect {
14 | it.isDirectory() ? it : zipTree(it)
15 | }
16 | }
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/metrics-agent/src/main/java/io/amient/kafka/metrics/KafkaMetricsAgent.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 |
23 | import org.slf4j.Logger;
24 | import org.slf4j.LoggerFactory;
25 |
26 | import java.io.FileInputStream;
27 | import java.io.IOException;
28 | import java.util.Properties;
29 |
30 | public class KafkaMetricsAgent {
31 |
32 | private static Logger log = LoggerFactory.getLogger(KafkaMetricsAgent.class);
33 |
34 | public static void main(String[] args) {
35 | try {
36 | java.util.Properties props = new java.util.Properties();
37 | if (args.length == 0) {
38 | props.load(System.in);
39 | log.info("Configuring KafkaMetricsAgent from STDIN");
40 | } else {
41 | log.info("Configuring KafkaMetricsAgent from property file: " + args[0]);
42 | props.load(new FileInputStream(args[0]));
43 | }
44 | props.list(System.out);
45 | try {
46 | ProducerPublisher publisher = props.containsKey(ProducerPublisher.CONFIG_METRICS_TOPIC) ? new ProducerPublisher(props) : null;
47 | if (publisher != null) {
48 | publisher.addProducerShutdownHook();
49 | }
50 | JMXScanner scanner = new JMXScanner(props, publisher);
51 | while (!scanner.isTerminated()) {
52 | Thread.sleep(5000);
53 | }
54 | } catch (Throwable e) {
55 | log.error("Failed to launch KafkaMetrics JMX Scanner", e);
56 | }
57 |
58 | } catch (IOException e) {
59 | e.printStackTrace();
60 | System.exit(1);
61 | }
62 | }
63 |
64 | }
65 |
--------------------------------------------------------------------------------
/metrics-agent/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=WARN, stdout
2 |
3 | log4j.logger.co.gridport.kafka.Mirror = INFO
4 | log4j.logger.co.gridport.kafka.MirrorExecutor = INFO
5 | log4j.logger.co.gridport.kafka.MirrorPartitioner = WARN
6 |
7 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
8 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
9 | log4j.appender.stdout.layout.ConversionPattern=%p %m (%c:%L) %n
10 |
11 | log4j.logger.io.amient.kafka.metrics=INFO
12 |
13 |
--------------------------------------------------------------------------------
/metrics-connect/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'java'
2 |
3 | apply plugin: "maven"
4 | group = "io.amient.kafka.metrics"
5 | version = rootProject.version
6 |
7 | dependencies {
8 | compile project(':core')
9 | compile project(':influxdb-loader')
10 | provided group: 'org.apache.kafka', name: 'kafka_' + baseScalaVersion, version: rootProject.kafkaVersion
11 | provided group: 'org.apache.kafka', name: 'connect-api', version: rootProject.kafkaVersion
12 | }
13 |
14 | jar {
15 | from {
16 | configurations.compile.collect { it.isDirectory() ? it : zipTree(it) }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/metrics-connect/src/main/java/io/amient/kafka/metrics/InfluxDbSinkConnector.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.apache.kafka.common.config.ConfigDef;
23 | import org.apache.kafka.common.utils.AppInfoParser;
24 | import org.apache.kafka.connect.connector.Task;
25 | import org.apache.kafka.connect.sink.SinkConnector;
26 |
27 | import java.util.ArrayList;
28 | import java.util.List;
29 | import java.util.Map;
30 |
31 |
32 | public class InfluxDbSinkConnector extends SinkConnector {
33 |
34 |
35 | private Map config;
36 |
37 | @Override
38 | public String version() {
39 | return AppInfoParser.getVersion();
40 | }
41 |
42 | @Override
43 | public void start(Map props) {
44 | this.config = props;
45 | }
46 |
47 | @Override
48 | public void stop() {
49 |
50 | }
51 |
52 | @Override
53 | public ConfigDef config() {
54 | ConfigDef defs = new ConfigDef();
55 | defs.define("influxdb.url", ConfigDef.Type.STRING, "http://localhost:8086", ConfigDef.Importance.HIGH, "influxdb server http address in the form http://:");
56 | defs.define("influxdb.database", ConfigDef.Type.STRING, "metrics", ConfigDef.Importance.HIGH, "influxdb database name to which to publish");
57 | defs.define("influxdb.username", ConfigDef.Type.STRING, "", ConfigDef.Importance.MEDIUM, "influxdb username to use for http updates");
58 | defs.define("influxdb.password", ConfigDef.Type.STRING, "", ConfigDef.Importance.MEDIUM, "influxdb password to use for http updates");
59 | return defs;
60 | }
61 |
62 | @Override
63 | public Class extends Task> taskClass() {
64 | return InfluxDbSinkTask.class;
65 | }
66 |
67 | @Override
68 | public List> taskConfigs(int maxTasks) {
69 | ArrayList> configs = new ArrayList<>();
70 | for (int i = 0; i < maxTasks; i++) {
71 | configs.add(config);
72 | }
73 | return configs;
74 | }
75 |
76 | }
77 |
--------------------------------------------------------------------------------
/metrics-connect/src/main/java/io/amient/kafka/metrics/InfluxDbSinkTask.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.apache.kafka.clients.consumer.OffsetAndMetadata;
23 | import org.apache.kafka.common.TopicPartition;
24 | import org.apache.kafka.connect.sink.SinkRecord;
25 | import org.apache.kafka.connect.sink.SinkTask;
26 |
27 | import java.util.Collection;
28 | import java.util.Map;
29 | import java.util.Properties;
30 |
31 | public class InfluxDbSinkTask extends SinkTask {
32 |
33 | private InfluxDbPublisher publisher = null;
34 | private MeasurementConverter converter = null;
35 |
36 | @Override
37 | public String version() {
38 | return new InfluxDbSinkConnector().version();
39 | }
40 |
41 | @Override
42 | public void start(Map props) {
43 | Properties publisherConfig = new Properties();
44 | publisherConfig.putAll(props);
45 | publisher = new InfluxDbPublisher(publisherConfig);
46 | converter = new MeasurementConverter();
47 | }
48 |
49 | @Override
50 | public void put(Collection sinkRecords) {
51 | for (SinkRecord record : sinkRecords) {
52 | MeasurementV1 measurement = converter.fromConnectData(record.valueSchema(), record.value());
53 | publisher.publish(measurement);
54 | }
55 | }
56 |
57 | @Override
58 | public void flush(Map offsets) {
59 | //nothing to flush
60 | }
61 |
62 | @Override
63 | public void stop() {
64 | if (publisher != null) publisher.close();
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/metrics-connect/src/main/java/io/amient/kafka/metrics/MeasurementConverter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import org.apache.kafka.connect.data.Schema;
23 | import org.apache.kafka.connect.data.SchemaAndValue;
24 | import org.apache.kafka.connect.data.SchemaBuilder;
25 | import org.apache.kafka.connect.data.Struct;
26 | import org.apache.kafka.connect.storage.Converter;
27 |
28 | import java.util.Map;
29 |
30 | public class MeasurementConverter implements Converter {
31 |
32 | public Schema schema = null;
33 |
34 | private InternalAvroSerde internalAvro = null;
35 |
36 | @Override
37 | public void configure(Map configs, boolean isKey) {
38 | internalAvro = new InternalAvroSerde();
39 | this.schema = SchemaBuilder.struct()
40 | .name("Measurement")
41 | .version(1)
42 | .field("timestamp", Schema.INT64_SCHEMA)
43 | .field("name", Schema.STRING_SCHEMA)
44 | .field("tags", SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA).schema())
45 | .field("fields", SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.FLOAT64_SCHEMA).schema())
46 | .build();
47 | }
48 |
49 | @Override
50 | public SchemaAndValue toConnectData(String topic, byte[] value) {
51 | if (value == null) return null;
52 | MeasurementV1 measurement = internalAvro.fromBytes(value);
53 | Struct result = new Struct(schema);
54 | result.put("timestamp", measurement.getTimestamp());
55 | result.put("name", measurement.getName());
56 | result.put("tags", measurement.getTags());
57 | result.put("fields", measurement.getFields());
58 | return new SchemaAndValue(schema, result);
59 | }
60 |
61 | @Override
62 | public byte[] fromConnectData(String topic, Schema schema, Object logicalValue) {
63 | if (logicalValue == null) return null;
64 | return internalAvro.toBytes(fromConnectData(schema, logicalValue));
65 | }
66 |
67 | public MeasurementV1 fromConnectData(Schema schema, Object logicalValue) {
68 | Struct struct = (Struct) logicalValue;
69 | MeasurementV1.Builder builder = MeasurementV1.newBuilder();
70 | builder.setTimestamp((long) struct.get("timestamp"));
71 | builder.setName((String) struct.get("name"));
72 | builder.setTags((Map) struct.get("tags"));
73 | builder.setFields((Map) struct.get("fields"));
74 | return builder.build();
75 | }
76 |
77 | }
78 |
--------------------------------------------------------------------------------
/metrics-connect/src/test/java/io/amient/kafka/metrics/MeasurementConverterTest.java:
--------------------------------------------------------------------------------
1 | package io.amient.kafka.metrics;
2 |
3 | import org.apache.kafka.connect.data.SchemaAndValue;
4 | import org.junit.Test;
5 |
6 | import java.util.HashMap;
7 |
8 | import static org.junit.Assert.assertEquals;
9 |
10 |
11 | public class MeasurementConverterTest {
12 |
13 | @Test
14 | public void endToEndConversionTest() {
15 | InternalAvroSerde internalAvro = new InternalAvroSerde();
16 | MeasurementFormatter formatter = new MeasurementFormatter();
17 | MeasurementConverter converter = new MeasurementConverter();
18 | converter.configure(new HashMap(), false);
19 |
20 | MeasurementV1.Builder builder = MeasurementV1.newBuilder();
21 | builder.setTimestamp(System.currentTimeMillis());
22 | builder.setName("xyz.abc.123");
23 | builder.setTags(new HashMap(){{
24 | put("dimension1", "tag1");
25 | put("dimension2", "tag2");
26 | }});
27 | builder.setFields(new HashMap(){{
28 | put("Value1", 10.0);
29 | put("Value2", 0.0);
30 | }});
31 | MeasurementV1 m = builder.build();
32 | System.out.println(formatter.toString(m));
33 |
34 | SchemaAndValue schemaAndValue = converter.toConnectData("topic1", internalAvro.toBytes(m));
35 |
36 | MeasurementV1 m2 = internalAvro.fromBytes(converter.fromConnectData("topic1", schemaAndValue.schema(), schemaAndValue.value()));
37 |
38 | System.out.println(formatter.toString(m2));
39 |
40 | assertEquals(m, m2);
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/metrics-reporter/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'java'
2 |
3 | apply plugin: "maven"
4 | group = "io.amient.kafka.metrics"
5 | version = rootProject.version
6 |
7 | dependencies {
8 | compile project(':core')
9 | compile 'com.yammer.metrics:metrics-core:2.2.0'
10 | provided group: 'org.apache.kafka', name: 'kafka_' + baseScalaVersion, version: rootProject.kafkaVersion
11 | }
12 |
13 |
14 | jar {
15 | from (configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } ) {
16 | exclude "META-INF/*.SF"
17 | exclude "META-INF/LICENSE*"
18 | }
19 | }
20 |
21 |
--------------------------------------------------------------------------------
/metrics-reporter/src/main/java/io/amient/kafka/metrics/ConsumerGroupReporter.java:
--------------------------------------------------------------------------------
1 | package io.amient.kafka.metrics;
2 |
3 | import com.yammer.metrics.Metrics;
4 | import com.yammer.metrics.core.*;
5 | import com.yammer.metrics.reporting.AbstractPollingReporter;
6 | import kafka.utils.VerifiableProperties;
7 | import org.apache.kafka.clients.admin.AdminClient;
8 | import org.apache.kafka.clients.admin.ConsumerGroupListing;
9 | import org.apache.kafka.clients.consumer.OffsetAndMetadata;
10 | import org.apache.kafka.common.TopicPartition;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 |
14 | import java.util.*;
15 | import java.util.concurrent.TimeUnit;
16 | import java.util.concurrent.atomic.AtomicLong;
17 |
18 | public class ConsumerGroupReporter implements kafka.metrics.KafkaMetricsReporter,
19 | io.amient.kafka.metrics.ConsumerGroupReporterMBean {
20 |
21 | private static final Logger log = LoggerFactory.getLogger(ConsumerGroupReporter.class);
22 |
23 | private static final String CONFIG_POLLING_INTERVAL = "kafka.metrics.polling.interval";
24 | private boolean initialized;
25 | private Properties props;
26 | private long pollingIntervalSeconds;
27 | private int brokerId;
28 | private boolean running;
29 | private X underlying;
30 |
31 | @Override
32 | public String getMBeanName() {
33 | return "kafka:type=io.amient.kafka.metrics.ConsumerGroupReporter";
34 | }
35 |
36 | @Override
37 | public void init(VerifiableProperties props) {
38 | if (!initialized) {
39 |
40 | this.props = new Properties();
41 | if (props.containsKey(CONFIG_POLLING_INTERVAL)) {
42 | this.pollingIntervalSeconds = props.getInt(CONFIG_POLLING_INTERVAL);
43 | } else {
44 | this.pollingIntervalSeconds = 10;
45 | }
46 |
47 | this.brokerId = Integer.parseInt(props.getProperty("broker.id"));
48 | log.info("Building ConsumerGroupReporter: polling.interval=" + pollingIntervalSeconds);
49 | Enumeration keys = props.props().keys();
50 | while (keys.hasMoreElements()) {
51 | String key = keys.nextElement().toString();
52 | if (key.startsWith("kafka.metrics.")) {
53 | String subKey = key.substring(14);
54 | this.props.put(subKey, props.props().get(key));
55 | log.info("Building ConsumerGroupReporter: " + subKey + "=" + this.props.get(subKey));
56 | }
57 | }
58 | initialized = true;
59 | this.underlying = new X(Metrics.defaultRegistry());
60 | startReporter(pollingIntervalSeconds);
61 |
62 | }
63 | }
64 |
65 |
66 | public void startReporter(long pollingPeriodSecs) {
67 | if (initialized && !running) {
68 | underlying.start(pollingPeriodSecs, TimeUnit.SECONDS);
69 | running = true;
70 | log.info("Started TopicReporter instance with polling period " + pollingPeriodSecs + " seconds");
71 | }
72 | }
73 |
74 | public void stopReporter() {
75 | if (initialized && running) {
76 | running = false;
77 | underlying.shutdown();
78 | log.info("Stopped TopicReporter instance");
79 | underlying = new X(Metrics.defaultRegistry());
80 | }
81 | }
82 |
83 |
84 | private class X extends AbstractPollingReporter {
85 |
86 | final GroupMetrics consumerOffsets = new GroupMetrics("ConsumerOffset", ConsumerGauge.class, getMetricsRegistry());
87 | final GroupMetrics consumerLags = new GroupMetrics("ConsumerLag", ConsumerGauge.class, getMetricsRegistry());
88 | private final AdminClient admin;
89 | private Clock clock;
90 |
91 | protected X(MetricsRegistry registry) {
92 | super(registry, "consumer-groups-reporter");
93 | this.clock = Clock.defaultClock();
94 | this.admin = AdminClient.create(props);
95 | }
96 |
97 | @Override
98 | public void shutdown() {
99 | try {
100 | super.shutdown();
101 | } finally {
102 | admin.close();
103 | }
104 | }
105 |
106 | @Override
107 | public void run() {
108 | final Long timestamp = clock.time();
109 | //process extra consumer metrics
110 | try {
111 | int controllerId = admin.describeCluster().controller().get(pollingIntervalSeconds, TimeUnit.SECONDS).id();
112 | if (brokerId == controllerId) {
113 | final Map logEndOffsets = new HashMap<>();
114 | final Set> metrics = getMetricsRegistry().allMetrics().entrySet();
115 | try {
116 | for (Map.Entry entry : metrics) {
117 | final MetricName name = entry.getKey();
118 | if (name.getGroup().equals("kafka.log") && name.getName().equals("LogEndOffset")) {
119 | /*
120 | * Decompose kafka metrics tags which uses yammer metrics Scope to "squash" all tags together
121 | */
122 | String topic = null;
123 | Integer partition = null;
124 | String[] scope = name.getScope().split("\\.");
125 |
126 | for (int s = 0; s < scope.length; s += 2) {
127 | String field = scope[s];
128 | String value = scope[s + 1];
129 | switch(field) {
130 | case "topic": topic = value; break;
131 | case "partition": partition = Integer.parseInt(value); break;
132 | }
133 | }
134 | if (topic != null && partition != null) {
135 | Gauge m = (Gauge) entry.getValue();
136 | logEndOffsets.put(new TopicPartition(topic, partition), m.value());
137 | }
138 | }
139 |
140 | }
141 | } catch (Exception e) {
142 | e.printStackTrace();
143 | }
144 | Collection consumerGroups = admin.listConsumerGroups().all().get(pollingIntervalSeconds, TimeUnit.SECONDS);
145 |
146 | consumerGroups.parallelStream().
147 | filter(group -> !group.groupId().startsWith("console-consumer")).
148 | forEach(group -> {
149 | try {
150 | Map offsets = admin.listConsumerGroupOffsets(group.groupId()).partitionsToOffsetAndMetadata().get(pollingIntervalSeconds, TimeUnit.SECONDS);
151 | for (Map.Entry entry : offsets.entrySet()) {
152 | TopicPartition tp = entry.getKey();
153 | if (logEndOffsets.containsKey(tp)) {
154 | long logEndOffset = logEndOffsets.get(tp);
155 |
156 | long consumerOffset = entry.getValue().offset();
157 | ConsumerGauge offsetGauge = consumerOffsets.get(group.groupId(), tp);
158 | offsetGauge.value.set(consumerOffset);
159 |
160 | ConsumerGauge lagGauge = consumerLags.get(group.groupId(), tp);
161 | lagGauge.value.set(Math.max(0, logEndOffset - consumerOffset));
162 | }
163 | }
164 | } catch (Exception e) {
165 | log.error("error while fetching offsets for group " + group, e);
166 | }
167 | });
168 | }
169 | } catch (Exception e) {
170 | log.error("error while processing conusmer offsets", e);
171 | }
172 | }
173 |
174 | }
175 |
176 |
177 | public static class ConsumerGauge extends Gauge {
178 | AtomicLong value = new AtomicLong(0);
179 |
180 | @Override
181 | public Long value() {
182 | return value.get();
183 | }
184 | }
185 |
186 |
187 |
188 |
189 | }
190 |
--------------------------------------------------------------------------------
/metrics-reporter/src/main/java/io/amient/kafka/metrics/ConsumerGroupReporterMBean.java:
--------------------------------------------------------------------------------
1 | package io.amient.kafka.metrics;
2 |
3 | public interface ConsumerGroupReporterMBean extends kafka.metrics.KafkaMetricsReporterMBean {}
4 |
5 |
--------------------------------------------------------------------------------
/metrics-reporter/src/main/java/io/amient/kafka/metrics/GroupMetrics.java:
--------------------------------------------------------------------------------
1 | package io.amient.kafka.metrics;
2 |
3 | import com.yammer.metrics.core.Gauge;
4 | import com.yammer.metrics.core.Metric;
5 | import com.yammer.metrics.core.MetricName;
6 | import com.yammer.metrics.core.MetricsRegistry;
7 | import org.apache.kafka.common.TopicPartition;
8 |
9 | import java.util.HashMap;
10 | import java.util.Map;
11 |
12 | public class GroupMetrics {
13 |
14 | private final Class extends T> cls;
15 | private final MetricsRegistry registry;
16 | private final String name;
17 | private final Map> data = new HashMap<>();
18 |
19 | public GroupMetrics(String metricName, Class extends T> cls, MetricsRegistry registry) {
20 | this.registry = registry;
21 | this.name = metricName;
22 | this.cls = cls;
23 | }
24 |
25 | public T get(String group, TopicPartition tp) {
26 | Map metrics = data.get(group);
27 | if (metrics == null) {
28 | metrics = new HashMap<>();
29 | data.put(group, metrics);
30 | }
31 | T metric = metrics.get(tp);
32 | if (metric == null) {
33 | try {
34 | metric = cls.newInstance();
35 | if (metric instanceof Gauge) {
36 | registry.newGauge(NewName(group, tp), (Gauge)metric);
37 | }
38 | } catch (Exception e) {
39 | throw new RuntimeException(e);
40 | }
41 | metrics.put(tp, metric);
42 | }
43 | return metric;
44 | }
45 |
46 | private MetricName NewName(String group, TopicPartition tp) {
47 | return new MetricName(
48 | "kafka.groups",
49 | "Group",
50 | name,
51 | "",
52 | "kafka.consumer:type=Group,name=" + name
53 | + ",group=" + group
54 | + ",topic=" + tp.topic()
55 | + ",partition=" + tp.partition());
56 | }
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/metrics-reporter/src/main/java/io/amient/kafka/metrics/KafkaMetricsProcessor.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import com.yammer.metrics.core.*;
23 | import com.yammer.metrics.reporting.AbstractPollingReporter;
24 | import com.yammer.metrics.stats.Snapshot;
25 | import org.apache.kafka.clients.admin.AdminClient;
26 | import org.apache.kafka.clients.admin.ConsumerGroupListing;
27 | import org.apache.kafka.clients.consumer.OffsetAndMetadata;
28 | import org.apache.kafka.common.TopicPartition;
29 | import org.apache.kafka.common.metrics.KafkaMetric;
30 | import org.slf4j.Logger;
31 | import org.slf4j.LoggerFactory;
32 |
33 | import java.util.Collection;
34 | import java.util.HashMap;
35 | import java.util.Map;
36 | import java.util.Set;
37 | import java.util.concurrent.TimeUnit;
38 | import java.util.concurrent.atomic.AtomicLong;
39 |
40 | public class KafkaMetricsProcessor extends AbstractPollingReporter implements MetricProcessor {
41 | private static final Logger log = LoggerFactory.getLogger(KafkaMetricsProcessor.class);
42 |
43 | private final MeasurementPublisher publisher;
44 | private final MeasurementFormatter formatter;
45 | private final Clock clock;
46 |
47 | private final Map kafkaMetrics;
48 | private final Map fixedTags;
49 | private final Integer pollingIntervalSeconds;
50 | private boolean closed = false;
51 |
52 | public KafkaMetricsProcessor(
53 | MetricsRegistry metricsRegistry,
54 | Map kafkaMetrics,
55 | MeasurementPublisher publisher,
56 | Map fixedTags,
57 | Integer pollingIntervalSeconds
58 | ) {
59 | super(metricsRegistry, "streaming-reporter");
60 | this.kafkaMetrics = kafkaMetrics;
61 | this.clock = Clock.defaultClock();
62 | this.fixedTags = fixedTags;
63 | this.publisher = publisher;
64 | this.formatter = new MeasurementFormatter();
65 | this.pollingIntervalSeconds = pollingIntervalSeconds;
66 | }
67 |
68 | public Integer getPollingIntervaSeconds() {
69 | return pollingIntervalSeconds;
70 | }
71 |
72 | private MeasurementV1 createMeasurement(com.yammer.metrics.core.MetricName name,
73 | Long timestamp, Map tags, Map fields) {
74 | MeasurementV1 measurement = new MeasurementV1();
75 | measurement.setTimestamp(timestamp);
76 | measurement.setName(name.getName());
77 | measurement.setTags(new HashMap(tags));
78 | if (name.getGroup() != null && !name.getGroup().isEmpty()) measurement.getTags().put("group", name.getGroup());
79 | if (name.getType() != null && !name.getType().isEmpty()) measurement.getTags().put("type", name.getType());
80 | if (name.getScope() != null && !name.getScope().isEmpty()) {
81 | if (name.getGroup() != null && name.getGroup().startsWith("kafka.") && name.getScope().contains(".")) {
82 | /*
83 | * Decompose kafka metrics tags which uses yammer metrics Scope to "squash" all tags together
84 | */
85 | String[] scope = name.getScope().split("\\.");
86 | for (int s = 0; s < scope.length; s += 2) {
87 | measurement.getTags().put(scope[s], scope[s + 1]);
88 | }
89 | } else {
90 | measurement.getTags().put("scope", name.getScope());
91 | }
92 | }
93 | measurement.setFields(new HashMap(fields));
94 | return measurement;
95 | }
96 |
97 | public void publish(MeasurementV1 m) {
98 | if (!closed) {
99 | publisher.publish(m);
100 | }
101 | }
102 |
103 | @Override
104 | public void start(long timeout, TimeUnit unit) {
105 | super.start(timeout, unit);
106 | }
107 |
108 | @Override
109 | public void shutdown() {
110 | try {
111 | super.shutdown();
112 | } finally {
113 | closed = true;
114 | if (publisher != null) publisher.close();
115 | }
116 | }
117 |
118 | @Override
119 | public void run() {
120 | final Long timestamp = clock.time();
121 | //process kafka metrics
122 | if (kafkaMetrics != null)
123 | for (Map.Entry m
124 | : kafkaMetrics.entrySet()) {
125 | Double value = m.getValue().value();
126 | if (!value.isNaN() && !value.isInfinite()) {
127 | MeasurementV1 measurement = new MeasurementV1();
128 | measurement.setTimestamp(timestamp);
129 | measurement.setName(m.getKey().name());
130 | Map tags = new HashMap(fixedTags);
131 | tags.put("group", m.getKey().group());
132 | for (Map.Entry tag : m.getValue().metricName().tags().entrySet()) {
133 | tags.put(tag.getKey(), tag.getValue());
134 | }
135 | Map fields = new HashMap();
136 | fields.put("Value", value);
137 | measurement.setTags(tags);
138 | measurement.setFields(fields);
139 | publish(measurement);
140 | }
141 | }
142 | //process yammer metrics
143 | final Set> metrics = getMetricsRegistry().allMetrics().entrySet();
144 | try {
145 | for (Map.Entry entry : metrics) {
146 | final MetricName metricName = entry.getKey();
147 | final Metric metric = entry.getValue();
148 | if (MetricPredicate.ALL.matches(metricName, metric)) {
149 | metric.processWith(this, entry.getKey(), timestamp);
150 | }
151 | }
152 | } catch (Exception e) {
153 | e.printStackTrace();
154 | }
155 | }
156 |
157 | @Override
158 | public void processMeter(MetricName name, Metered meter, Long timestamp) {
159 | Map fields = new HashMap();
160 | fields.put("Count", Double.valueOf(meter.count()));
161 | fields.put("MeanRate", meter.meanRate());
162 | fields.put("FifteenMinuteRate", meter.fifteenMinuteRate());
163 | fields.put("FiveMinuteRate", meter.fiveMinuteRate());
164 | fields.put("OneMinuteRate", meter.oneMinuteRate());
165 |
166 | publish(createMeasurement(name, timestamp, fixedTags, fields));
167 | }
168 |
169 | @Override
170 | public void processCounter(MetricName name, Counter counter, Long timestamp) {
171 | Map fields = new HashMap();
172 | fields.put("Count", Double.valueOf(counter.count()));
173 | publish(createMeasurement(name, timestamp, fixedTags, fields));
174 | }
175 |
176 | @Override
177 | public void processGauge(MetricName name, Gauge> gauge, Long timestamp) {
178 | Map fields = new HashMap();
179 | try {
180 | Double value = formatter.anyValueToDouble(gauge.value());
181 | if (value != null) {
182 | fields.put("Value", value);
183 | MeasurementV1 m = createMeasurement(name, timestamp, fixedTags, fields);
184 | publish(m);
185 | }
186 | } catch (Exception e) {
187 | log.warn("Could not process gauge for metric " + name + ": " + e.getMessage());
188 | }
189 | }
190 |
191 | @Override
192 | public void processHistogram(MetricName name, Histogram histogram, Long timestamp) {
193 | Map fields = new HashMap();
194 | fields.put("Count", Double.valueOf(histogram.count()));
195 | fields.put("Max", histogram.max());
196 | fields.put("Mean", histogram.mean());
197 | fields.put("Min", histogram.min());
198 | fields.put("StdDev", histogram.stdDev());
199 | fields.put("Sum", histogram.sum());
200 | Snapshot snapshot = histogram.getSnapshot();
201 | fields.put("95thPercentile", snapshot.get95thPercentile());
202 | fields.put("98thPercentile", snapshot.get98thPercentile());
203 | fields.put("99thPercentile", snapshot.get99thPercentile());
204 | fields.put("999thPercentile", snapshot.get999thPercentile());
205 | publish(createMeasurement(name, timestamp, fixedTags, fields));
206 | }
207 |
208 | @Override
209 | public void processTimer(MetricName name, Timer timer, Long timestamp) {
210 | Map fields = new HashMap();
211 | fields.put("Count", Double.valueOf(timer.count()));
212 | fields.put("MeanRate", timer.meanRate());
213 | fields.put("FifteenMinuteRate", timer.fifteenMinuteRate());
214 | fields.put("FiveMinuteRate", timer.fiveMinuteRate());
215 | fields.put("OneMinuteRate", timer.oneMinuteRate());
216 | fields.put("Max", timer.max());
217 | fields.put("Mean", timer.mean());
218 | fields.put("Min", timer.min());
219 | fields.put("StdDev", timer.stdDev());
220 | fields.put("Sum", timer.sum());
221 | publish(createMeasurement(name, timestamp, fixedTags, fields));
222 | }
223 |
224 |
225 | }
226 |
227 |
228 |
--------------------------------------------------------------------------------
/metrics-reporter/src/main/java/io/amient/kafka/metrics/KafkaMetricsProcessorBuilder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import com.yammer.metrics.core.MetricsRegistry;
23 | import org.apache.kafka.common.MetricName;
24 | import org.apache.kafka.common.metrics.KafkaMetric;
25 | import org.slf4j.Logger;
26 | import org.slf4j.LoggerFactory;
27 |
28 | import java.util.Enumeration;
29 | import java.util.HashMap;
30 | import java.util.Map;
31 | import java.util.Properties;
32 |
33 | public class KafkaMetricsProcessorBuilder {
34 |
35 | private static final Logger log = LoggerFactory.getLogger(KafkaMetricsProcessorBuilder.class);
36 |
37 | private static final String CONFIG_POLLING_INTERVAL = "kafka.metrics.polling.interval";
38 | private static final String CONFIG_REPORTER_TAG_PREFIX = "kafka.metrics.tag.";
39 |
40 | private MetricsRegistry registry;
41 | private Map kafkaMetrics = null;
42 | private Map tags = new HashMap();
43 | private String topic = "metrics";
44 | private String bootstrapServers;
45 | private Integer pollingIntervalSeconds = 10;
46 |
47 | public KafkaMetricsProcessorBuilder(MetricsRegistry registry) {
48 | this.registry = registry;
49 | }
50 |
51 | public KafkaMetricsProcessorBuilder configure(Properties config) {
52 |
53 | if (!config.containsKey(ProducerPublisher.CONFIG_BOOTSTRAP_SERVERS) && config.containsKey("port")) {
54 | //if this is plugged into kafka broker itself we can use it for metrics producer itself
55 | config.put(ProducerPublisher.CONFIG_BOOTSTRAP_SERVERS, "localhost:" + config.get("port"));
56 | }
57 | if (config.containsKey("bootstrap.servers") && !config.containsKey(ProducerPublisher.CONFIG_BOOTSTRAP_SERVERS)) {
58 | //if plugged into kafka producer and bootstrap servers not specified, re-use the wrapping producer's ones
59 | config.put(ProducerPublisher.CONFIG_BOOTSTRAP_SERVERS, config.getProperty("bootstrap.servers"));
60 | }
61 |
62 | for (Enumeration e = config.keys(); e.hasMoreElements(); ) {
63 | Object propKey = e.nextElement();
64 | configure((String) propKey, config.get(propKey).toString());
65 | }
66 | return this;
67 | }
68 |
69 | public KafkaMetricsProcessorBuilder configure(String propName, String propValue) {
70 | if (propName.startsWith(CONFIG_REPORTER_TAG_PREFIX)) {
71 | String tag = propName.substring(CONFIG_REPORTER_TAG_PREFIX.length());
72 | return setTag(tag, propValue);
73 | } else if (propName.equals(ProducerPublisher.CONFIG_METRICS_TOPIC)) {
74 | return setTopic(propValue);
75 | } else if (propName.equals(ProducerPublisher.CONFIG_BOOTSTRAP_SERVERS)) {
76 | setBootstrapServers(propValue);
77 | } else if (propName.equals(CONFIG_POLLING_INTERVAL)) {
78 | this.pollingIntervalSeconds = Integer.parseInt(propValue);
79 | }
80 | return this;
81 | }
82 |
83 | public KafkaMetricsProcessorBuilder setTopic(String topic) {
84 | this.topic = topic;
85 | return this;
86 | }
87 |
88 | public KafkaMetricsProcessorBuilder setBootstrapServers(String bootstrapServers) {
89 | this.bootstrapServers = bootstrapServers;
90 | return this;
91 | }
92 |
93 | public KafkaMetricsProcessorBuilder setTag(String tagName, String tagValue) {
94 | tags.put(tagName, tagValue);
95 | return this;
96 | }
97 |
98 | public KafkaMetricsProcessorBuilder setKafkaMetrics(Map kafkaMetrics) {
99 | this.kafkaMetrics = kafkaMetrics;
100 | return this;
101 | }
102 |
103 | public KafkaMetricsProcessorBuilder setTags(HashMap tags) {
104 | this.tags = tags;
105 | return this;
106 | }
107 |
108 | public void decorateConfig(Properties config) {
109 | config.put("kafka.metrics.reporters", TopicReporter.class.getName());
110 | config.put(ProducerPublisher.CONFIG_METRICS_TOPIC, topic);
111 | config.put(ProducerPublisher.CONFIG_BOOTSTRAP_SERVERS, bootstrapServers);
112 | config.put(CONFIG_POLLING_INTERVAL, pollingIntervalSeconds.toString() + "s");
113 | for(Map.Entry tag: tags.entrySet()) {
114 | config.put(CONFIG_REPORTER_TAG_PREFIX + tag.getKey(), tag.getValue());
115 | }
116 | }
117 |
118 | /**
119 | * generate properties for New Kafka Client Producer ( 0.8.2+) and Consumer (0.9+)
120 | * @param kafkaClientConfig
121 | */
122 | public void decorateKafkaClientConfig(Properties kafkaClientConfig) {
123 | kafkaClientConfig.put("metric.reporters", TopicReporter.class.getName());
124 | decorateConfig(kafkaClientConfig);
125 | }
126 |
127 | public KafkaMetricsProcessor build() {
128 | log.info("Building TopicReporter: " + ProducerPublisher.CONFIG_METRICS_TOPIC + "=" + topic);
129 | log.info("Building TopicReporter: " + ProducerPublisher.CONFIG_BOOTSTRAP_SERVERS + "=" + bootstrapServers);
130 | log.info("Building TopicReporter: " + CONFIG_POLLING_INTERVAL + "=" + pollingIntervalSeconds);
131 | for(Map.Entry tag: tags.entrySet()) {
132 | log.info("Building TopicReporter with tag: " + tag.getKey() + "=" + tag.getValue());
133 | }
134 |
135 |
136 | MeasurementPublisher publisher = new ProducerPublisher(bootstrapServers, topic);
137 | return new KafkaMetricsProcessor(registry, kafkaMetrics, publisher, tags, pollingIntervalSeconds);
138 | }
139 |
140 | }
141 |
--------------------------------------------------------------------------------
/metrics-reporter/src/main/java/io/amient/kafka/metrics/TopicReporter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | import com.yammer.metrics.Metrics;
23 | import com.yammer.metrics.core.MetricsRegistry;
24 | import kafka.utils.VerifiableProperties;
25 | import org.apache.kafka.common.MetricName;
26 | import org.apache.kafka.common.metrics.KafkaMetric;
27 | import org.slf4j.Logger;
28 | import org.slf4j.LoggerFactory;
29 |
30 | import java.util.Map;
31 | import java.util.Properties;
32 | import java.util.concurrent.ConcurrentHashMap;
33 | import java.util.concurrent.TimeUnit;
34 |
35 | public class TopicReporter implements
36 | // org.apache.kafka.common.metrics.MetricsReporter, //FIXME this other interface is a bit odd but it seems to have some useful metrics
37 | kafka.metrics.KafkaMetricsReporter,
38 | io.amient.kafka.metrics.TopicReporterMBean {
39 | private static final Logger log = LoggerFactory.getLogger(TopicReporter.class);
40 |
41 | final private Map kafkaMetrics = new ConcurrentHashMap<>();
42 | private KafkaMetricsProcessorBuilder builder;
43 | private KafkaMetricsProcessor underlying;
44 | private Properties config;
45 | volatile private boolean running;
46 | volatile private boolean initialized;
47 |
48 | public TopicReporter() {
49 | log.info("INIT TopicReporter");
50 | }
51 |
52 | /**
53 | * Builder for programmatic configuration into an existing Yammer Metrics registry
54 | * @param registry metrics registry to which to attach the reporter
55 | * @return a builder instance for the reporter
56 | */
57 | public static KafkaMetricsProcessorBuilder forRegistry(MetricsRegistry registry) {
58 | return new KafkaMetricsProcessorBuilder(registry);
59 | }
60 |
61 | public String getMBeanName() {
62 | return "kafka:type=io.amient.kafka.metrics.TopicReporter";
63 | }
64 |
65 | public void init(VerifiableProperties kafkaConfig) {
66 |
67 | if (!initialized) {
68 | initialized = true;
69 |
70 | this.config = kafkaConfig.props();
71 | this.builder = forRegistry(Metrics.defaultRegistry());
72 | builder.configure(config);
73 | underlying = builder.build();
74 | startReporter(underlying.getPollingIntervaSeconds());
75 | }
76 | }
77 |
78 | public void startReporter(long pollingPeriodSecs) {
79 | if (initialized && !running) {
80 | underlying.start(pollingPeriodSecs, TimeUnit.SECONDS);
81 | running = true;
82 | log.info("Started TopicReporter instance with polling period " + pollingPeriodSecs + " seconds");
83 | }
84 | }
85 |
86 | public void stopReporter() {
87 | if (initialized && running) {
88 | running = false;
89 | underlying.shutdown();
90 | log.info("Stopped TopicReporter instance");
91 | underlying = builder.build();
92 | }
93 | }
94 |
95 |
96 | // @Override
97 | // public void configure(Map configs) {
98 | // config = new Properties();
99 | // config.putAll(configs);
100 | // }
101 | //
102 | // @Override
103 | // public void init(List metrics) {
104 | // for (org.apache.kafka.common.metrics.KafkaMetric metric : metrics) {
105 | // metricChange(metric);
106 | // }
107 | // if (! initialized) {
108 | // builder = forRegistry(new MetricsRegistry());
109 | // builder.configure(config);
110 | // builder.setKafkaMetrics(kafkaMetrics);
111 | // underlying = builder.build();
112 | // startReporter(underlying.getPollingIntervaSeconds());
113 | // }
114 | // }
115 | //
116 | // @Override
117 | // public void metricChange(org.apache.kafka.common.metrics.KafkaMetric metric) {
118 | // kafkaMetrics.put(metric.metricName(), metric);
119 | // }
120 | //
121 | // @Override
122 | // public void metricRemoval(KafkaMetric metric) {
123 | // kafkaMetrics.remove(metric.metricName());
124 | // }
125 | //
126 | // @Override
127 | // public void close() {
128 | // stopReporter();
129 | // }
130 |
131 | }
132 |
--------------------------------------------------------------------------------
/metrics-reporter/src/main/java/io/amient/kafka/metrics/TopicReporterMBean.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2015 Michal Harish, michal.harish@gmail.com
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one or more
5 | * contributor license agreements. See the NOTICE file distributed with
6 | * this work for additional information regarding copyright ownership.
7 | * The ASF licenses this file to You under the Apache License, Version 2.0
8 | * (the "License"); you may not use this file except in compliance with
9 | * the License. You may obtain a copy of the License at
10 | *
11 | * http://www.apache.org/licenses/LICENSE-2.0
12 | *
13 | * Unless required by applicable law or agreed to in writing, software
14 | * distributed under the License is distributed on an "AS IS" BASIS,
15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | * See the License for the specific language governing permissions and
17 | * limitations under the License.
18 | */
19 |
20 | package io.amient.kafka.metrics;
21 |
22 | public interface TopicReporterMBean extends kafka.metrics.KafkaMetricsReporterMBean {}
23 |
--------------------------------------------------------------------------------
/metrics-reporter/src/test/java/io/amient/kafka/metrics/InfluxDbPublisherTest.java:
--------------------------------------------------------------------------------
1 | package io.amient.kafka.metrics;
2 |
3 | import org.junit.Test;
4 |
5 | import java.util.Properties;
6 |
7 | /**
8 | * Created by mharis on 20/11/2015.
9 | */
10 | public class InfluxDbPublisherTest {
11 |
12 | // @Test
13 | // public void t1() {
14 | // InfluxDbPublisher p = new InfluxDbPublisher(new Properties() {{
15 | // put(KafkaMetricsProcessor.CONFIG_REPORTER_HOST, "localhost");
16 | // put(KafkaMetricsProcessor.CONFIG_REPORTER_SERVICE, "test");
17 | // }});
18 | // Measurement m = new Measurement();
19 | // m.setTimestamp(System.currentTimeMillis());
20 | // m.setName("cpu");
21 | // m.setGroup("io.amient");
22 | // m.setHost("localhost");
23 | // m.setFields("utilisation=100.0");
24 | // p.publish(m);
25 | // }
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':core', ':influxdb-loader', ':discovery', ':metrics-reporter', ':metrics-agent', ':metrics-connect'
--------------------------------------------------------------------------------
/zzz:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | fail() {
4 | MESSAGE=$1
5 | RED='\033[0;31m'
6 | NC='\033[0m'
7 | echo -e "${RED}Failure: $MESSAGE $NC"
8 | echo ""
9 | exit 1;
10 | }
11 |
12 | continue() {
13 | result=$1
14 | MESSAGE=$2
15 | if [ $result -ne 0 ]; then
16 | fail $MESSAGE
17 | fi
18 | }
19 |
20 | variant() {
21 | VARIANT="$1"
22 | SETS="$2"
23 | C="${@:3}"
24 |
25 | echo ""
26 | if [ "$VARIANT" == "$ROOT" ]; then
27 | git checkout master
28 | continue $? "master/checkout"
29 | git pull
30 | continue $? "master/pull"
31 | BRANCH="$ROOT"
32 | if [ "$BRANCH" != "master" ]; then
33 | git checkout $BRANCH 2>/dev/null || git checkout -b $BRANCH
34 | continue $? "$BRANCH/checkout"
35 | fi
36 | else
37 | git checkout "master-$VARIANT"
38 | continue $? "master-$VARIANT/checkout"
39 | git pull
40 | continue $? "master-$VARIANT/pull"
41 | BRANCH="$ROOT-$VARIANT"
42 | if [ "$BRANCH" != "master-$VARIANT" ]; then
43 | git checkout $BRANCH 2>/dev/null || git checkout -b $BRANCH
44 | continue $? "$BRANCH/checkout"
45 | fi
46 | git merge $ROOT --no-edit
47 | continue $? "$BRANCH/merge $ROOT"
48 | fi
49 |
50 | if [ "$C" != "-" ]; then
51 | MESSAGE="$BRANCH/$C (all modules)"
52 | echo $MESSAGE
53 | ./gradlew $C
54 | continue $? $MESSAGE
55 | fi
56 |
57 | }
58 |
59 | publish() {
60 | BRANCH=$1
61 | COMMAND=$2
62 | git checkout $BRANCH
63 | continue $? "$BRANCH/checkout"
64 | git pull
65 | continue $? "$BRANCH/pull"
66 | ./gradlew $COMMAND
67 | continue $? "$BRANCH/publish"
68 | }
69 |
70 | Usage() {
71 | echo ""
72 | echo "This script will run the given gradle commands over the whole cross-compiled space of this project."
73 | echo ""
74 | echo "Usage: ./zzz "
75 | echo ""
76 | echo " ROOT-BRANCH Can be either master or any development branch"
77 | echo " - if it's not master, variant branches will be created automatically"
78 | echo " TASK: "
79 | echo " compile - run compile on all variants"
80 | echo " test - run test on branches only"
81 | echo " test-all - run all tests on root and all variants"
82 | echo " merge - only merge root into all variants"
83 | echo " install - install selected modules sets from all branches into local maven"
84 | echo " publish - publish all master and variant modules to sonatype central repo"
85 | echo ""
86 | echo ""
87 | }
88 |
89 | if [ -z "$1" ]; then
90 | Usage
91 | fail "Missing root branch argument"
92 | fi
93 |
94 | ROOT="$1"
95 |
96 | case "$2" in
97 | compile)
98 | C="compile"
99 | RUN_ON_ROOT="true"
100 | RUN_ON_VARIANTS="true"
101 | ;;
102 | test-all)
103 | C="test --quiet"
104 | RUN_ON_ROOT="true"
105 | RUN_ON_VARIANTS="true"
106 | ;;
107 | test)
108 | C="test --quiet"
109 | RUN_ON_ROOT="false"
110 | RUN_ON_VARIANTS="true"
111 | ;;
112 | merge)
113 | C="-"
114 | RUN_ON_ROOT="false"
115 | RUN_ON_VARIANTS="true"
116 | ;;
117 | build)
118 | C="build"
119 | RUN_ON_ROOT="true"
120 | RUN_ON_VARIANTS="true"
121 | ;;
122 | *)
123 | Usage
124 | fail "Invalid argument"
125 | ;;
126 | esac
127 |
128 | if [ -z "$3" ]; then
129 | SETS="-"
130 | else
131 | SETS="$3"
132 | fi
133 |
134 | #excute on the root branch
135 | if [ "$RUN_ON_ROOT" == "true" ]; then
136 | variant "$ROOT" "$SETS" "$C"
137 | fi
138 |
139 | #excute on the variant branches
140 | if [ "$RUN_ON_VARIANTS" == "true" ]; then
141 | variant "1.1" "$SETS" $C
142 | variant "1.0" "$SETS" $C
143 | fi
144 |
145 | git checkout "$ROOT"
146 | continue $? "Checkout back to root branch"
147 |
--------------------------------------------------------------------------------