├── .travis.yml ├── png └── consumer-offsets-demo1.png ├── src ├── main │ ├── java │ │ ├── dao │ │ │ ├── MBeansDao.java │ │ │ ├── OffsetsDao.java │ │ │ └── impl │ │ │ │ ├── OffsetsInfluxDBDaoImpl.java │ │ │ │ └── MBeansInfluxDBDaoImpl.java │ │ ├── common │ │ │ ├── factory │ │ │ │ ├── KafkaMBeanServiceFactory.java │ │ │ │ ├── KafkaMBeanService.java │ │ │ │ └── KafkaMBeanServiceImpl.java │ │ │ ├── protocol │ │ │ │ ├── BaseProtocol.java │ │ │ │ ├── MessageValueStructAndVersionInfo.java │ │ │ │ ├── KeyAndValueSchemasInfo.java │ │ │ │ ├── BrokersInfo.java │ │ │ │ ├── OffsetInfo.java │ │ │ │ └── MBeanInfo.java │ │ │ └── util │ │ │ │ ├── KafkaUtils.java │ │ │ │ ├── StrUtils.java │ │ │ │ └── ZookeeperUtils.java │ │ ├── app │ │ │ └── KafkaInsight.java │ │ ├── task │ │ │ ├── KafkaOffsetTask.java │ │ │ └── KafkaMBeanTask.java │ │ └── core │ │ │ └── KafkaOffsetGetter.java │ └── resources │ │ ├── application.conf │ │ └── log4j.properties └── test │ └── java │ └── common │ └── util │ └── StrUtilsTest.java ├── README.md ├── pom.xml └── LICENSE /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | jdk: oraclejdk8 3 | install: true 4 | script: mvn clean verify 5 | -------------------------------------------------------------------------------- /png/consumer-offsets-demo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dubin555/Kafka-Insight/HEAD/png/consumer-offsets-demo1.png -------------------------------------------------------------------------------- /src/main/java/dao/MBeansDao.java: -------------------------------------------------------------------------------- 1 | package dao; 2 | 3 | /** 4 | * Created by dubin on 05/10/2017. 5 | */ 6 | public interface MBeansDao { 7 | void insert(); 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/dao/OffsetsDao.java: -------------------------------------------------------------------------------- 1 | package dao; 2 | 3 | /** 4 | * Created by dubin on 05/10/2017. 5 | */ 6 | public interface OffsetsDao { 7 | void insert(); 8 | } 9 | -------------------------------------------------------------------------------- /src/main/java/common/factory/KafkaMBeanServiceFactory.java: -------------------------------------------------------------------------------- 1 | package common.factory; 2 | 3 | /** 4 | * Created by dubin on 29/09/2017. 5 | */ 6 | 7 | public class KafkaMBeanServiceFactory { 8 | public KafkaMBeanService create() { 9 | return new KafkaMBeanServiceImpl(); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/main/java/common/protocol/BaseProtocol.java: -------------------------------------------------------------------------------- 1 | package common.protocol; 2 | 3 | import com.google.gson.Gson; 4 | 5 | /** 6 | * Created by dubin on 29/09/2017. 7 | */ 8 | public class BaseProtocol { 9 | 10 | public String toString() { 11 | return new Gson().toJson(this); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/app/KafkaInsight.java: -------------------------------------------------------------------------------- 1 | package app; 2 | 3 | import task.KafkaMBeanTask; 4 | import task.KafkaOffsetTask; 5 | 6 | import java.util.concurrent.ExecutorService; 7 | import java.util.concurrent.Executors; 8 | 9 | /** 10 | * Created by dubin on 05/10/2017. 11 | */ 12 | public class KafkaInsight { 13 | public static void main(String[] args) { 14 | ExecutorService es = Executors.newCachedThreadPool(); 15 | es.submit(new KafkaMBeanTask()); 16 | es.submit(new KafkaOffsetTask()); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/main/java/common/protocol/MessageValueStructAndVersionInfo.java: -------------------------------------------------------------------------------- 1 | package common.protocol; 2 | 3 | import org.apache.kafka.common.protocol.types.Struct; 4 | 5 | /** 6 | * Created by dubin on 04/10/2017. 7 | */ 8 | public class MessageValueStructAndVersionInfo extends BaseProtocol { 9 | 10 | private Struct value; 11 | private Short version; 12 | 13 | public Struct getValue() { 14 | return value; 15 | } 16 | 17 | public void setValue(Struct value) { 18 | this.value = value; 19 | } 20 | 21 | public Short getVersion() { 22 | return version; 23 | } 24 | 25 | public void setVersion(Short version) { 26 | this.version = version; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/common/protocol/KeyAndValueSchemasInfo.java: -------------------------------------------------------------------------------- 1 | package common.protocol; 2 | 3 | import common.protocol.BaseProtocol; 4 | 5 | import org.apache.kafka.common.protocol.types.Schema; 6 | 7 | /** 8 | * Created by dubin on 04/10/2017. 9 | */ 10 | public class KeyAndValueSchemasInfo extends BaseProtocol{ 11 | 12 | private Schema keySchema; 13 | private Schema valueSchema; 14 | 15 | public Schema getKeySchema() { 16 | return keySchema; 17 | } 18 | 19 | public void setKeySchema(Schema keySchema) { 20 | this.keySchema = keySchema; 21 | } 22 | 23 | public Schema getValueSchema() { 24 | return valueSchema; 25 | } 26 | 27 | public void setValueSchema(Schema valueSchema) { 28 | this.valueSchema = valueSchema; 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | kafka { 2 | zkAddr = "localhost:2181" 3 | mbean { 4 | jmx { 5 | url.template = "service:jmx:rmi:///jndi/rmi://%s/jmxrm" 6 | mbean { 7 | fifteenMinuteRate = "FifteenMinuteRate" 8 | fiveMinuteRate = "FiveMinuteRate" 9 | oneMinuteRate = "OneMinuteRate" 10 | meanRate = "MeanRate" 11 | } 12 | task { 13 | items = ["bytesInPerSec", "bytesOutPerSec", "messagesInPerSec"] 14 | freq = "120" // Time to sleep when one round of mbean task done 15 | } 16 | } 17 | } 18 | offset { 19 | task { 20 | freq = "60" // Time to sleep when one round of offset task done 21 | } 22 | } 23 | db { 24 | influx { 25 | url = "http://localhost:8086" 26 | tableName { 27 | offset = "offsetsTimeSeries" 28 | mbean = "offsetsTimeSeries" 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/test/java/common/util/StrUtilsTest.java: -------------------------------------------------------------------------------- 1 | package common.util; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | /** 6 | * Created by dubin on 30/09/2017. 7 | */ 8 | public class StrUtilsTest { 9 | @org.junit.Test 10 | public void stringify() throws Exception { 11 | assertEquals(StrUtils.stringify(1000), "1000B"); 12 | assertEquals(StrUtils.stringify(1024L), "1.00KB"); 13 | assertEquals(StrUtils.stringify(1024*1024L), "1.00MB"); 14 | assertEquals(StrUtils.stringify(1024*1024*1024L), "1.00GB"); 15 | assertEquals(StrUtils.stringify(1024*1024*1024*1024L), "1.00TB"); 16 | } 17 | 18 | @org.junit.Test 19 | public void numberic() throws Exception { 20 | assertEquals(StrUtils.numberic("1"), 1.00, 0.000000001); 21 | assertNotEquals(StrUtils.numberic("1"), 0.99, 0.000000001); 22 | assertEquals(StrUtils.numberic("1.099"), 1.10, 0.000000001); 23 | } 24 | 25 | @org.junit.Test 26 | public void integer() throws Exception { 27 | assertEquals(StrUtils.integer(1.01), 1); 28 | } 29 | 30 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/dubin555/Kafka-Insight.svg?branch=master)](https://travis-ci.org/dubin555/Kafka-Insight) 2 | ![](https://img.shields.io/badge/language-java-orange.svg) 3 | [![Hex.pm](https://img.shields.io/hexpm/l/plug.svg)](https://github.com/dubin555/Kafka-Insight/master/LICENSE) 4 | # Kafka-Insight 5 | This code is for monitoring Kafka offsets 6 | What it looks like? You can modify it anyway you want in Grafana, here is just my first version for demo. 7 | ![Consumer offset and mBean monitor](https://github.com/dubin555/Kafka-Insight/blob/master/png/consumer-offsets-demo1.png) 8 | ## Install 9 | ### Requirement 10 | * Kafka 0.10.x 11 | * Java 8 12 | * InfluxDB 13 | 14 | ### Compile 15 | ```bash 16 | mvn clean package 17 | ``` 18 | ### Change the config file 19 | Modify the "application.conf", at least, the below parts need to be modified. 20 | * kafka.zkAddr, the Zookeeper address 21 | * kafka.db.influx.url, the InfluxDB address 22 | 23 | ### Deploy 24 | Run the main class "app.KafkaInsight" anyway you want. 25 | It is single point for now and will be suffering from single point failure. HA is part of the plan. 26 | 27 | ## Wiki 28 | For the other doc of the code, please refer to [Wiki](https://github.com/dubin555/Kafka-Insight/wiki) -------------------------------------------------------------------------------- /src/main/java/common/protocol/BrokersInfo.java: -------------------------------------------------------------------------------- 1 | package common.protocol; 2 | 3 | /** 4 | * Created by dubin on 30/09/2017. 5 | */ 6 | public class BrokersInfo extends BaseProtocol { 7 | private int id = 0; 8 | private String host = ""; 9 | private int port = 0; 10 | private String created = ""; 11 | private String modify = ""; 12 | private int jmxPort = 0; 13 | 14 | public int getJmxPort() { 15 | return jmxPort; 16 | } 17 | 18 | public void setJmxPort(int jmxPort) { 19 | this.jmxPort = jmxPort; 20 | } 21 | 22 | public int getId() { 23 | return id; 24 | } 25 | 26 | public void setId(int id) { 27 | this.id = id; 28 | } 29 | 30 | public String getHost() { 31 | return host; 32 | } 33 | 34 | public void setHost(String host) { 35 | this.host = host; 36 | } 37 | 38 | public int getPort() { 39 | return port; 40 | } 41 | 42 | public void setPort(int port) { 43 | this.port = port; 44 | } 45 | 46 | public String getCreated() { 47 | return created; 48 | } 49 | 50 | public void setCreated(String created) { 51 | this.created = created; 52 | } 53 | 54 | public String getModify() { 55 | return modify; 56 | } 57 | 58 | public void setModify(String modify) { 59 | this.modify = modify; 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/java/common/protocol/OffsetInfo.java: -------------------------------------------------------------------------------- 1 | package common.protocol; 2 | 3 | /** 4 | * Created by dubin on 03/10/2017. 5 | */ 6 | public class OffsetInfo extends BaseProtocol{ 7 | 8 | private String group; 9 | private String topic; 10 | private Long committedOffset; 11 | private Long logSize; 12 | private Long lag; 13 | private Long timestamp; 14 | 15 | public long getTimestamp() { 16 | return timestamp; 17 | } 18 | 19 | public void setTimestamp(long timestamp) { 20 | this.timestamp = timestamp; 21 | } 22 | 23 | public String getGroup() { 24 | return group; 25 | } 26 | 27 | public void setGroup(String group) { 28 | this.group = group; 29 | } 30 | 31 | public String getTopic() { 32 | return topic; 33 | } 34 | 35 | public void setTopic(String topic) { 36 | this.topic = topic; 37 | } 38 | 39 | public Long getCommittedOffset() { 40 | return committedOffset; 41 | } 42 | 43 | public void setCommittedOffset(Long committedOffset) { 44 | this.committedOffset = committedOffset; 45 | } 46 | 47 | public Long getLogSize() { 48 | return logSize; 49 | } 50 | 51 | public void setLogSize(Long logSize) { 52 | this.logSize = logSize; 53 | } 54 | 55 | public Long getLag() { 56 | return lag; 57 | } 58 | 59 | public void setLag(Long lag) { 60 | this.lag = lag; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/main/java/common/protocol/MBeanInfo.java: -------------------------------------------------------------------------------- 1 | package common.protocol; 2 | 3 | /** 4 | * Created by dubin on 29/09/2017. 5 | */ 6 | public class MBeanInfo extends BaseProtocol{ 7 | 8 | private double fifteenMinute; 9 | private double fiveMinute; 10 | private double meanRate; 11 | private double oneMinute; 12 | private String label; 13 | private String topic; 14 | 15 | public String getTopic() { 16 | return topic; 17 | } 18 | 19 | public void setTopic(String topic) { 20 | this.topic = topic; 21 | } 22 | 23 | public String getLabel() { 24 | return label; 25 | } 26 | 27 | public void setLabel(String label) { 28 | this.label = label; 29 | } 30 | 31 | public double getFifteenMinute() { 32 | return fifteenMinute; 33 | } 34 | 35 | public void setFifteenMinute(double fifteenMinute) { 36 | this.fifteenMinute = fifteenMinute; 37 | } 38 | 39 | public double getFiveMinute() { 40 | return fiveMinute; 41 | } 42 | 43 | public void setFiveMinute(double fiveMinute) { 44 | this.fiveMinute = fiveMinute; 45 | } 46 | 47 | public double getMeanRate() { 48 | return meanRate; 49 | } 50 | 51 | public void setMeanRate(double meanRate) { 52 | this.meanRate = meanRate; 53 | } 54 | 55 | public double getOneMinute() { 56 | return oneMinute; 57 | } 58 | 59 | public void setOneMinute(double oneMinute) { 60 | this.oneMinute = oneMinute; 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /src/main/java/common/factory/KafkaMBeanService.java: -------------------------------------------------------------------------------- 1 | package common.factory; 2 | 3 | import common.protocol.MBeanInfo; 4 | 5 | import java.util.Map; 6 | 7 | /** 8 | * Created by dubin on 29/09/2017. 9 | */ 10 | public interface KafkaMBeanService { 11 | 12 | /** 13 | * Bytes in per second from Kafka JMX MBean 14 | * @param url 15 | * @return 16 | */ 17 | MBeanInfo bytesInPerSec(String url); 18 | 19 | /** 20 | * Bytes in per second per topic from Kafka JMX MBean 21 | * @param url 22 | * @param topic 23 | * @return 24 | */ 25 | MBeanInfo bytesInPerSec(String url, String topic); 26 | 27 | /** 28 | * Bytes out per second from Kafka JMX MBean 29 | * @param url 30 | * @return 31 | */ 32 | MBeanInfo bytesOutPerSec(String url); 33 | 34 | /** 35 | * Bytes out per second per topic from Kafka JMX MBean 36 | * @param url 37 | * @param topic 38 | * @return 39 | */ 40 | MBeanInfo bytesOutPerSec(String url, String topic); 41 | 42 | /** 43 | * Get brokers topic all partitions log end offset 44 | * @param uri 45 | * @param topic 46 | * @return 47 | */ 48 | Map logEndOffset(String uri, String topic); 49 | 50 | /** 51 | * Bytes in per second from all topics 52 | * @param uri 53 | * @return 54 | */ 55 | MBeanInfo messagesInPerSec(String uri); 56 | 57 | /** 58 | * Bytes out per second from one topic 59 | * @param uri 60 | * @param topic 61 | * @return 62 | */ 63 | MBeanInfo messagesInPerSec(String uri, String topic); 64 | } 65 | -------------------------------------------------------------------------------- /src/main/java/common/util/KafkaUtils.java: -------------------------------------------------------------------------------- 1 | package common.util; 2 | 3 | import common.protocol.BrokersInfo; 4 | import kafka.consumer.Consumer; 5 | import kafka.javaapi.consumer.ConsumerConnector; 6 | import kafka.server.KafkaConfig; 7 | import org.apache.kafka.clients.CommonClientConfigs; 8 | import org.apache.kafka.clients.consumer.ConsumerConfig; 9 | import org.apache.kafka.clients.consumer.KafkaConsumer; 10 | import scala.Array; 11 | 12 | import java.util.Properties; 13 | 14 | /** 15 | * Created by dubin on 02/10/2017. 16 | */ 17 | public class KafkaUtils { 18 | 19 | public static KafkaConsumer, Array> createNewKafkaConsumer(BrokersInfo brokersInfo, String group) { 20 | Properties props = new Properties(); 21 | props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokersInfo.getHost() + ":" + brokersInfo.getPort()); 22 | props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT"); 23 | props.put(ConsumerConfig.GROUP_ID_CONFIG, group); 24 | props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); 25 | props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "300000"); 26 | props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); 27 | props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); 28 | props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); 29 | 30 | return new KafkaConsumer<>(props); 31 | } 32 | 33 | public static ConsumerConnector createConsumerConnector(String zkAddr, String group) { 34 | Properties props = new Properties(); 35 | props.put(ConsumerConfig.GROUP_ID_CONFIG, group); 36 | props.put(ConsumerConfig.EXCLUDE_INTERNAL_TOPICS_CONFIG, "false"); 37 | props.put(KafkaConfig.ZkConnectProp(), zkAddr); 38 | ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(props)); 39 | return consumerConnector; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # Set everything to be logged to the console 19 | log4j.rootCategory=INFO,TRACE,stdout,stderr 20 | log4j.appender.console=org.apache.log4j.ConsoleAppender 21 | log4j.appender.console.target=System.err 22 | log4j.appender.console.layout=org.apache.log4j.EnhancedPatternLayout 23 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n 24 | 25 | # configure stdout 26 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender 27 | log4j.appender.stdout.Threshold = TRACE 28 | log4j.appender.stdout.Target = System.out 29 | log4j.appender.stdout.layout = org.apache.log4j.EnhancedPatternLayout 30 | log4j.appender.stdout.layout.ConversionPattern = %d{ISO8601}{GMT} %-5p %d [%t][%F:%L] : %m%n 31 | log4j.appender.stdout.filter.filter1=org.apache.log4j.varia.LevelRangeFilter 32 | log4j.appender.stdout.filter.filter1.levelMin=TRACE 33 | log4j.appender.stdout.filter.filter1.levelMax=INFO 34 | 35 | # configure stderr 36 | log4j.appender.stderr = org.apache.log4j.ConsoleAppender 37 | log4j.appender.stderr.Threshold = WARN 38 | log4j.appender.stderr.Target = System.err 39 | log4j.appender.stderr.layout = org.apache.log4j.EnhancedPatternLayout 40 | log4j.appender.stderr.layout.ConversionPattern = %d{ISO8601}{GMT} %-5p %d [%t][%F:%L] : %m%n 41 | -------------------------------------------------------------------------------- /src/main/java/task/KafkaOffsetTask.java: -------------------------------------------------------------------------------- 1 | package task; 2 | 3 | import com.typesafe.config.Config; 4 | import com.typesafe.config.ConfigFactory; 5 | import common.protocol.OffsetInfo; 6 | import core.KafkaOffsetGetter; 7 | import dao.OffsetsDao; 8 | import dao.impl.OffsetsInfluxDBDaoImpl; 9 | import org.apache.log4j.Logger; 10 | 11 | import java.util.List; 12 | import java.util.concurrent.ExecutorService; 13 | import java.util.concurrent.Executors; 14 | 15 | /** 16 | * Created by dubin on 04/10/2017. 17 | */ 18 | public class KafkaOffsetTask implements Runnable { 19 | 20 | private static Config conf = ConfigFactory.load(); 21 | private static Logger logger = Logger.getLogger(KafkaOffsetTask.class); 22 | private static int secondsToSleep = conf.getInt("kafka.offset.task.freq"); 23 | 24 | /** 25 | * When an object implementing interface Runnable is used 26 | * to create a thread, starting the thread causes the object's 27 | * run method to be called in that separately executing 28 | * thread. 29 | *

30 | * The general contract of the method run is that it may 31 | * take any action whatsoever. 32 | * 33 | * @see Thread#run() 34 | */ 35 | @Override 36 | public void run() { 37 | while (true) { 38 | try { 39 | List offsetInfoList = KafkaOffsetGetter.getOffsetQuarz(); 40 | OffsetsDao offsetsDao = new OffsetsInfluxDBDaoImpl(offsetInfoList); 41 | offsetsDao.insert(); 42 | } catch (Exception e) { 43 | logger.error("Offset task error happen " + e.getMessage()); 44 | } finally { 45 | try { 46 | Thread.sleep(secondsToSleep * 1000); 47 | } catch (InterruptedException ex) { 48 | logger.error("Offset task thread sleep error " + ex.getMessage()); 49 | } 50 | } 51 | } 52 | } 53 | 54 | public static void main(String[] args) { 55 | KafkaOffsetTask task = new KafkaOffsetTask(); 56 | ExecutorService es = Executors.newSingleThreadExecutor(); 57 | es.submit(task); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/main/java/common/util/StrUtils.java: -------------------------------------------------------------------------------- 1 | package common.util; 2 | 3 | import java.text.DecimalFormat; 4 | 5 | /** 6 | * Created by dubin on 29/09/2017. 7 | */ 8 | public class StrUtils { 9 | 10 | private final static long KB_IN_BYTES = 1024; 11 | 12 | private final static long MB_IN_BYTES = 1024 * KB_IN_BYTES; 13 | 14 | private final static long GB_IN_BYTES = 1024 * MB_IN_BYTES; 15 | 16 | private final static long TB_IN_BYTES = 1024 * GB_IN_BYTES; 17 | 18 | private final static DecimalFormat df = new DecimalFormat("0.00"); 19 | 20 | private static String SYSTEM_ENCODING = System.getProperty("file.encoding"); 21 | 22 | static { 23 | if (SYSTEM_ENCODING == null) { 24 | SYSTEM_ENCODING = "UTF-8"; 25 | } 26 | } 27 | 28 | private StrUtils() { 29 | } 30 | 31 | /** Formatter byte to kb,mb or gb etc. */ 32 | public static String stringify(long byteNumber) { 33 | if (byteNumber / TB_IN_BYTES > 0) { 34 | return df.format((double) byteNumber / (double) TB_IN_BYTES) + "TB"; 35 | } else if (byteNumber / GB_IN_BYTES > 0) { 36 | return df.format((double) byteNumber / (double) GB_IN_BYTES) + "GB"; 37 | } else if (byteNumber / MB_IN_BYTES > 0) { 38 | return df.format((double) byteNumber / (double) MB_IN_BYTES) + "MB"; 39 | } else if (byteNumber / KB_IN_BYTES > 0) { 40 | return df.format((double) byteNumber / (double) KB_IN_BYTES) + "KB"; 41 | } else { 42 | return String.valueOf(byteNumber) + "B"; 43 | } 44 | } 45 | 46 | /** Formmatter number. */ 47 | public static double numberic(String number) { 48 | DecimalFormat formatter = new DecimalFormat("###.##"); 49 | return Double.valueOf(formatter.format(Double.valueOf(number))); 50 | } 51 | 52 | public static double numberic(Double number) { 53 | DecimalFormat formatter = new DecimalFormat("###.##"); 54 | return Double.valueOf(formatter.format(number)); 55 | 56 | } 57 | 58 | /** Convert string number to double. */ 59 | public static long integer(double number) { 60 | return Math.round(number); 61 | } 62 | 63 | /** Assembly number to string. */ 64 | public static String assembly(String number) { 65 | return stringify(integer(numberic(number))); 66 | } 67 | 68 | public static String assembly(Double number) { 69 | return stringify(integer(numberic(number))); 70 | } 71 | 72 | } 73 | -------------------------------------------------------------------------------- /src/main/java/dao/impl/OffsetsInfluxDBDaoImpl.java: -------------------------------------------------------------------------------- 1 | package dao.impl; 2 | 3 | import com.typesafe.config.Config; 4 | import com.typesafe.config.ConfigFactory; 5 | import common.protocol.OffsetInfo; 6 | import dao.OffsetsDao; 7 | import org.influxdb.InfluxDB; 8 | import org.influxdb.InfluxDBFactory; 9 | import org.influxdb.dto.BatchPoints; 10 | import org.influxdb.dto.Point; 11 | 12 | import java.util.List; 13 | import java.util.concurrent.TimeUnit; 14 | 15 | /** 16 | * Created by dubin on 05/10/2017. 17 | */ 18 | public class OffsetsInfluxDBDaoImpl implements OffsetsDao { 19 | 20 | private final String influxDBUrl; 21 | private final List offsetInfoList; 22 | private final String dbName; 23 | private static final Config conf = ConfigFactory.load(); 24 | 25 | public OffsetsInfluxDBDaoImpl(List offsetInfoList) { 26 | this.influxDBUrl = conf.getString("kafka.db.influx.url"); 27 | this.dbName = conf.getString("kafka.db.influx.tableName.offset"); 28 | this.offsetInfoList = offsetInfoList; 29 | } 30 | 31 | @Override 32 | public void insert() { 33 | InfluxDB influxDB = null; 34 | try { 35 | influxDB = InfluxDBFactory.connect(influxDBUrl); 36 | if (!influxDB.databaseExists(dbName)) { 37 | influxDB.createDatabase(dbName); 38 | } 39 | for (OffsetInfo offsetInfo : offsetInfoList) { 40 | String group = offsetInfo.getGroup(); 41 | String topic = offsetInfo.getTopic(); 42 | Long logSize = offsetInfo.getLogSize(); 43 | Long offsets = offsetInfo.getCommittedOffset(); 44 | Long lag = offsetInfo.getLag(); 45 | Long timestamp = offsetInfo.getTimestamp(); 46 | 47 | BatchPoints batchPoints = BatchPoints 48 | .database(dbName) 49 | .tag("group", group) 50 | .tag("topic", topic) 51 | .build(); 52 | Point point = Point.measurement("offsetsConsumer") 53 | .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) 54 | // .time(timestamp, TimeUnit.MILLISECONDS) 55 | .addField("logSize", logSize) 56 | .addField("offsets", offsets) 57 | .addField("lag", lag) 58 | .build(); 59 | batchPoints.point(point); 60 | influxDB.write(batchPoints); 61 | } 62 | } catch (Exception e) { 63 | e.printStackTrace(); 64 | } finally { 65 | if (influxDB != null) { 66 | influxDB.close(); 67 | } 68 | } 69 | 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/main/java/dao/impl/MBeansInfluxDBDaoImpl.java: -------------------------------------------------------------------------------- 1 | package dao.impl; 2 | 3 | import com.typesafe.config.Config; 4 | import com.typesafe.config.ConfigFactory; 5 | import common.protocol.MBeanInfo; 6 | import dao.MBeansDao; 7 | import org.influxdb.InfluxDB; 8 | import org.influxdb.InfluxDBFactory; 9 | import org.influxdb.dto.BatchPoints; 10 | import org.influxdb.dto.Point; 11 | 12 | import java.util.List; 13 | import java.util.concurrent.TimeUnit; 14 | 15 | /** 16 | * Created by dubin on 05/10/2017. 17 | */ 18 | public class MBeansInfluxDBDaoImpl implements MBeansDao { 19 | 20 | private final String influxDBUrl; 21 | private final List mBeanInfoList; 22 | private final String dbName; 23 | private static final Config conf = ConfigFactory.load(); 24 | 25 | public MBeansInfluxDBDaoImpl(List mBeanInfoList) { 26 | this.influxDBUrl = conf.getString("kafka.db.influx.url"); 27 | this.dbName = conf.getString("kafka.db.influx.tableName.mbean"); 28 | this.mBeanInfoList = mBeanInfoList; 29 | } 30 | 31 | @Override 32 | public void insert() { 33 | 34 | InfluxDB influxDB = null; 35 | try { 36 | influxDB = InfluxDBFactory.connect(influxDBUrl); 37 | if (!influxDB.databaseExists(dbName)) { 38 | influxDB.createDatabase(dbName); 39 | } 40 | for (MBeanInfo mBeanInfo : mBeanInfoList) { 41 | String label = mBeanInfo.getLabel(); 42 | String topic = mBeanInfo.getTopic(); 43 | double oneMinute = mBeanInfo.getOneMinute(); 44 | double fiveMinute = mBeanInfo.getFiveMinute(); 45 | double fifteenMinute = mBeanInfo.getFifteenMinute(); 46 | double meanRate = mBeanInfo.getMeanRate(); 47 | 48 | 49 | BatchPoints batchPoints = BatchPoints 50 | .database(dbName) 51 | .tag("label", label) 52 | .tag("topic", topic) 53 | .build(); 54 | Point point = Point.measurement("mBeanMetric") 55 | .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) 56 | // .time(timestamp, TimeUnit.MILLISECONDS) 57 | .addField("oneMinuteRate", oneMinute) 58 | .addField("fiveMinuteRate", fiveMinute) 59 | .addField("fifteenMinuteRate", fifteenMinute) 60 | .addField("meanRate", meanRate) 61 | .build(); 62 | batchPoints.point(point); 63 | influxDB.write(batchPoints); 64 | } 65 | } catch (Exception e) { 66 | e.printStackTrace(); 67 | } finally { 68 | if (influxDB != null) { 69 | influxDB.close(); 70 | } 71 | } 72 | 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | Kafka-Insight 8 | Kafka-Insight 9 | 1.0-SNAPSHOT 10 | 11 | 12 | 13 | 14 | org.mockito 15 | mockito-core 16 | 1.10.19 17 | test 18 | 19 | 20 | 21 | junit 22 | junit 23 | 4.12 24 | test 25 | 26 | 27 | 28 | com.typesafe 29 | config 30 | 1.2.1 31 | 32 | 33 | 34 | 35 | log4j 36 | log4j 37 | 1.2.17 38 | 39 | 40 | 41 | org.apache.kafka 42 | kafka_2.11 43 | 0.10.2.0 44 | 45 | 46 | org.apache.zookeeper 47 | zookeeper 48 | 49 | 50 | log4j 51 | log4j 52 | 53 | 54 | 55 | 56 | 57 | 58 | org.apache.zookeeper 59 | zookeeper 60 | 3.4.8 61 | 62 | 63 | 64 | com.google.guava 65 | guava 66 | 19.0 67 | 68 | 69 | com.google.code.gson 70 | gson 71 | 2.2.4 72 | 73 | 74 | 75 | com.alibaba 76 | fastjson 77 | 1.2.7 78 | 79 | 80 | 81 | org.influxdb 82 | influxdb-java 83 | 2.7 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | org.apache.maven.plugins 92 | maven-shade-plugin 93 | 3.0.0 94 | 95 | 96 | package 97 | 98 | shade 99 | 100 | 101 | 102 | 103 | 104 | org.apache.maven.plugins 105 | maven-compiler-plugin 106 | 3.6.0 107 | 108 | 1.8 109 | 1.8 110 | 111 | 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /src/main/java/task/KafkaMBeanTask.java: -------------------------------------------------------------------------------- 1 | package task; 2 | 3 | import com.typesafe.config.Config; 4 | import com.typesafe.config.ConfigFactory; 5 | import common.factory.KafkaMBeanService; 6 | import common.factory.KafkaMBeanServiceImpl; 7 | import common.protocol.BrokersInfo; 8 | import common.protocol.MBeanInfo; 9 | import common.util.StrUtils; 10 | import common.util.ZookeeperUtils; 11 | import dao.MBeansDao; 12 | import dao.impl.MBeansInfluxDBDaoImpl; 13 | import org.apache.log4j.Logger; 14 | 15 | import java.lang.reflect.InvocationTargetException; 16 | import java.lang.reflect.Method; 17 | import java.util.*; 18 | import java.util.concurrent.ExecutorService; 19 | import java.util.concurrent.Executors; 20 | 21 | /** 22 | * Created by dubin on 30/09/2017. 23 | */ 24 | public class KafkaMBeanTask implements Runnable{ 25 | 26 | private static Config conf = ConfigFactory.load(); 27 | private static Logger logger = Logger.getLogger(KafkaMBeanService.class); 28 | private static int secondsToSleep = conf.getInt("kafka.mbean.jmx.task.freq"); 29 | 30 | public static List runMBeanTask() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { 31 | 32 | List taskItems = conf.getStringList("kafka.mbean.jmx.task.items"); 33 | 34 | KafkaMBeanService kafkaMBeanService = new KafkaMBeanServiceImpl(); 35 | 36 | List methodListNoTopic = new ArrayList<>(); 37 | 38 | for (String taskItem: taskItems) { 39 | methodListNoTopic.add(kafkaMBeanService.getClass().getDeclaredMethod(taskItem, String.class)); 40 | } 41 | 42 | List brokersInfos = ZookeeperUtils.getAllBrokersInfo(); 43 | 44 | List res = new ArrayList<>(); 45 | 46 | for (Method taskMethod: methodListNoTopic) { 47 | List mBeanInfos = new ArrayList<>(); 48 | for (BrokersInfo brokersInfo: brokersInfos) { 49 | String url = brokersInfo.getHost() + ":" + brokersInfo.getJmxPort(); 50 | MBeanInfo mBeanInfo = (MBeanInfo) taskMethod.invoke(kafkaMBeanService, url); 51 | mBeanInfo.setTopic("all"); 52 | mBeanInfo.setLabel(taskMethod.getName()); 53 | mBeanInfos.add(mBeanInfo); 54 | } 55 | res.add(mergeMBeanInfoList(mBeanInfos)); 56 | } 57 | 58 | return res; 59 | } 60 | 61 | /** 62 | * I do not know how to use function program in Java 8, easy to implement this function in Scala 63 | * Ugly! 64 | * @param mBeanInfos 65 | * @return 66 | */ 67 | private static MBeanInfo mergeMBeanInfoList(List mBeanInfos) { 68 | if ((mBeanInfos == null) || (mBeanInfos.size() == 0)) {return null;} 69 | double fifteenMinute = 0; 70 | double fiveMinute = 0; 71 | double meanRate = 0; 72 | double oneMinute = 0; 73 | for (MBeanInfo mBeanInfo: mBeanInfos) { 74 | fifteenMinute += mBeanInfo.getFifteenMinute(); 75 | fiveMinute += mBeanInfo.getFiveMinute(); 76 | meanRate += mBeanInfo.getMeanRate(); 77 | oneMinute += mBeanInfo.getOneMinute(); 78 | } 79 | MBeanInfo res = new MBeanInfo(); 80 | res.setLabel(mBeanInfos.get(0).getLabel()); 81 | res.setTopic(mBeanInfos.get(0).getTopic()); 82 | res.setFifteenMinute(fifteenMinute); 83 | res.setFiveMinute(fiveMinute); 84 | res.setOneMinute(oneMinute); 85 | res.setMeanRate(meanRate); 86 | System.out.println(res); 87 | return res; 88 | } 89 | 90 | /** 91 | * When an object implementing interface Runnable is used 92 | * to create a thread, starting the thread causes the object's 93 | * run method to be called in that separately executing 94 | * thread. 95 | *

96 | * The general contract of the method run is that it may 97 | * take any action whatsoever. 98 | * 99 | * @see Thread#run() 100 | */ 101 | @Override 102 | public void run() { 103 | while (true) { 104 | try { 105 | List r = runMBeanTask(); 106 | MBeansDao mBeansDao = new MBeansInfluxDBDaoImpl(r); 107 | mBeansDao.insert(); 108 | } catch (Exception e) { 109 | logger.error("MBean service error " + e.getMessage()); 110 | 111 | } finally { 112 | try { 113 | // sleep for a while anyway 114 | Thread.sleep(secondsToSleep * 1000); 115 | } catch (InterruptedException e) { 116 | logger.error("Thread sleep error " + e.getMessage()); 117 | } 118 | } 119 | } 120 | } 121 | 122 | public static void main(String[] args) { 123 | KafkaMBeanTask kafkaMBeanTask = new KafkaMBeanTask(); 124 | ExecutorService es = Executors.newSingleThreadExecutor(); 125 | es.execute(kafkaMBeanTask); 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/main/java/common/util/ZookeeperUtils.java: -------------------------------------------------------------------------------- 1 | package common.util; 2 | 3 | import com.alibaba.fastjson.JSON; 4 | import com.alibaba.fastjson.JSONObject; 5 | import com.typesafe.config.Config; 6 | import com.typesafe.config.ConfigFactory; 7 | import common.protocol.BrokersInfo; 8 | import kafka.consumer.ConsumerThreadId; 9 | import kafka.utils.ZKStringSerializer$; 10 | import kafka.utils.ZkUtils; 11 | import org.I0Itec.zkclient.ZkClient; 12 | import org.apache.log4j.Logger; 13 | import org.apache.zookeeper.data.Stat; 14 | import scala.Option; 15 | import scala.Tuple2; 16 | import scala.collection.JavaConversions; 17 | import scala.collection.Seq; 18 | 19 | import java.util.*; 20 | 21 | /** 22 | * Created by dubin on 30/09/2017. 23 | */ 24 | public class ZookeeperUtils { 25 | 26 | private final static String BROKER_IDS_PATH = "/brokers/ids"; 27 | private final static String BROKER_TOPICS_PATH = "/brokers/topics"; 28 | private final static String CONSUMERS_PATH = "/consumers"; 29 | 30 | private final static Logger logger = Logger.getLogger(ZookeeperUtils.class); 31 | 32 | private static Config conf = ConfigFactory.load(); 33 | 34 | public static String getZkAddr() { 35 | return conf.getString("kafka.zkAddr"); 36 | } 37 | 38 | public static List getAllBrokersInfo() { 39 | String zkAddr = conf.getString("kafka.zkAddr"); 40 | List res = new ArrayList<>(); 41 | ZkClient zkClient = null; 42 | try { 43 | zkClient = new ZkClient(zkAddr, Integer.MAX_VALUE, 100000, ZKStringSerializer$.MODULE$); 44 | if (ZkUtils.apply(zkClient, false).pathExists(BROKER_IDS_PATH)) { 45 | Seq subBrokerIdsPaths = ZkUtils.apply(zkClient, false).getChildren(BROKER_IDS_PATH); 46 | List brokerIds = JavaConversions.seqAsJavaList(subBrokerIdsPaths); 47 | int id = 0; 48 | for (String ids: brokerIds) { 49 | try { 50 | Tuple2, Stat> tuple = ZkUtils.apply(zkClient, false).readDataMaybeNull(BROKER_IDS_PATH + "/" + ids); 51 | BrokersInfo brokersInfo = new BrokersInfo(); 52 | int port = JSON.parseObject(tuple._1.get()).getInteger("port"); 53 | brokersInfo.setHost(JSON.parseObject(tuple._1.get()).getString("host")); 54 | brokersInfo.setPort(JSON.parseObject(tuple._1.get()).getInteger("port")); 55 | brokersInfo.setJmxPort(JSON.parseObject(tuple._1.get()).getInteger("jmx_port")); 56 | brokersInfo.setId(++id); 57 | res.add(brokersInfo); 58 | } catch (Exception e){ 59 | logger.error("get sub broker info failed" + e.getMessage()); 60 | } 61 | } 62 | } 63 | } catch (Exception e) { 64 | logger.error("get Brokers info failed" + e.getMessage()); 65 | } finally { 66 | if (zkClient != null) { 67 | zkClient.close(); 68 | } 69 | } 70 | return res; 71 | } 72 | 73 | public static List getAllTopics() { 74 | String zkAddr = conf.getString("kafka.zkAddr"); 75 | List res = new ArrayList<>(); 76 | ZkClient zkClient = null; 77 | try { 78 | zkClient = new ZkClient(zkAddr, Integer.MAX_VALUE, 100000, ZKStringSerializer$.MODULE$); 79 | if (ZkUtils.apply(zkClient, false).pathExists(BROKER_TOPICS_PATH)) { 80 | Seq subBrokerTopicsPaths = ZkUtils.apply(zkClient, false).getChildren(BROKER_TOPICS_PATH); 81 | res = JavaConversions.seqAsJavaList(subBrokerTopicsPaths); 82 | } 83 | } catch (Exception e) { 84 | logger.error("get Brokers info failed" + e.getMessage()); 85 | } finally { 86 | if (zkClient != null) { 87 | zkClient.close(); 88 | } 89 | } 90 | return res; 91 | } 92 | 93 | public static Set getActiveTopics() { 94 | String zkAddr = conf.getString("kafka.zkAddr"); 95 | Set res = new HashSet<>(); 96 | ZkClient zkClient = null; 97 | try { 98 | zkClient = new ZkClient(zkAddr, Integer.MAX_VALUE, 100000, ZKStringSerializer$.MODULE$); 99 | Seq subConsumerPaths = ZkUtils.apply(zkClient, false).getChildren(CONSUMERS_PATH); 100 | List groups = JavaConversions.seqAsJavaList(subConsumerPaths); 101 | for (String group : groups) { 102 | scala.collection.mutable.Map> topics = ZkUtils.apply(zkClient, false).getConsumersPerTopic(group, false); 103 | for (Map.Entry entry : JavaConversions.mapAsJavaMap(topics).entrySet()) { 104 | String topic = entry.getKey(); 105 | System.out.println(topic); 106 | res.add(topic); 107 | } 108 | } 109 | } catch (Exception e) { 110 | logger.error("get Brokers info failed" + e.getMessage()); 111 | } finally { 112 | if (zkClient != null) { 113 | zkClient.close(); 114 | } 115 | } 116 | return res; 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/main/java/common/factory/KafkaMBeanServiceImpl.java: -------------------------------------------------------------------------------- 1 | package common.factory; 2 | 3 | import com.typesafe.config.Config; 4 | import com.typesafe.config.ConfigFactory; 5 | import common.protocol.MBeanInfo; 6 | import common.util.StrUtils; 7 | import org.apache.log4j.LogManager; 8 | import org.apache.log4j.Logger; 9 | 10 | import javax.management.MBeanServerConnection; 11 | import javax.management.ObjectName; 12 | import javax.management.remote.JMXConnector; 13 | import javax.management.remote.JMXConnectorFactory; 14 | import javax.management.remote.JMXServiceURL; 15 | import java.util.HashMap; 16 | import java.util.Map; 17 | import java.util.Set; 18 | 19 | /** 20 | * Created by dubin on 29/09/2017. 21 | */ 22 | public class KafkaMBeanServiceImpl implements KafkaMBeanService { 23 | 24 | private Config conf = ConfigFactory.load(); 25 | private Logger logger = LogManager.getLogger(KafkaMBeanServiceImpl.class); 26 | private String jmxUrl = "service:jmx:rmi:///jndi/rmi://%s/jmxrmi"; 27 | 28 | /** 29 | * Bytes in per second from Kafka JMX MBean 30 | * 31 | * @param url 32 | * @return 33 | */ 34 | @Override 35 | public MBeanInfo bytesInPerSec(String url) { 36 | String mbean = "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec"; 37 | return common(url, mbean); 38 | } 39 | 40 | /** 41 | * Bytes in per second per topic from Kafka JMX MBean 42 | * 43 | * @param url 44 | * @param topic 45 | * @return 46 | */ 47 | @Override 48 | public MBeanInfo bytesInPerSec(String url, String topic) { 49 | String mbean = "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec,topic=" + topic; 50 | return common(url, topic); 51 | } 52 | 53 | /** 54 | * Bytes out per second from Kafka JMX MBean 55 | * 56 | * @param url 57 | * @return 58 | */ 59 | @Override 60 | public MBeanInfo bytesOutPerSec(String url) { 61 | String mbean = "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec"; 62 | return common(url, mbean); 63 | } 64 | 65 | /** 66 | * Bytes out per second per topic from Kafka JMX MBean 67 | * 68 | * @param url 69 | * @param topic 70 | * @return 71 | */ 72 | @Override 73 | public MBeanInfo bytesOutPerSec(String url, String topic) { 74 | String mbean = "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic=" + topic; 75 | return common(url, mbean); 76 | } 77 | 78 | /** 79 | * Get brokers topic all partitions log end offset 80 | * 81 | * @param url 82 | * @param topic 83 | * @return 84 | */ 85 | @Override 86 | public Map logEndOffset(String url, String topic) { 87 | String mbean = "kafka.log:type=Log,name=LogEndOffset,topic=" + topic + ",partition=*"; 88 | JMXConnector connector = null; 89 | Map endOffsets = new HashMap<>(); 90 | try { 91 | JMXServiceURL jmxSeriverUrl = new JMXServiceURL(String.format(jmxUrl, url)); 92 | connector = JMXConnectorFactory.connect(jmxSeriverUrl); 93 | MBeanServerConnection mbeanConnection = connector.getMBeanServerConnection(); 94 | Set objectNames = mbeanConnection.queryNames(new ObjectName(mbean), null); 95 | for (ObjectName objectName : objectNames) { 96 | int partition = Integer.valueOf(objectName.getKeyProperty("partition")); 97 | Object value = mbeanConnection.getAttribute(new ObjectName(mbean), "Value"); 98 | if (value != null) { 99 | endOffsets.put(partition, Long.valueOf(value.toString())); 100 | } 101 | } 102 | } catch (Exception e) { 103 | logger.error("JMX service url[" + url + "] create has error,msg is " + e.getMessage()); 104 | } finally { 105 | try { 106 | if (connector != null) { 107 | connector.close(); 108 | } 109 | } catch (Exception e) { 110 | logger.error("Close JMXConnector[" + url + "] has error,msg is " + e.getMessage()); 111 | } 112 | } 113 | return endOffsets; 114 | } 115 | 116 | /** 117 | * Bytes in per second from all topics 118 | * 119 | * @param uri 120 | * @return 121 | */ 122 | @Override 123 | public MBeanInfo messagesInPerSec(String uri) { 124 | String mbean = "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec"; 125 | return common(uri, mbean); 126 | } 127 | 128 | /** 129 | * Bytes out per second from one topic 130 | * 131 | * @param uri 132 | * @param topic 133 | * @return 134 | */ 135 | @Override 136 | public MBeanInfo messagesInPerSec(String uri, String topic) { 137 | String mbean = "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=" + topic; 138 | return common(uri, mbean); 139 | } 140 | 141 | 142 | private MBeanInfo common(String uri, String mbean) { 143 | 144 | JMXConnector connector = null; 145 | MBeanInfo MBeanInfo = new MBeanInfo(); 146 | try { 147 | JMXServiceURL jmxSeriverUrl = new JMXServiceURL(String.format(jmxUrl, uri)); 148 | connector = JMXConnectorFactory.connect(jmxSeriverUrl); 149 | MBeanServerConnection mbeanConnection = connector.getMBeanServerConnection(); 150 | Object fifteenMinuteRate = mbeanConnection.getAttribute(new ObjectName(mbean), conf.getString("kafka.mbean.jmx.mbean.fifteenMinuteRate")); 151 | Object fiveMinuteRate = mbeanConnection.getAttribute(new ObjectName(mbean), conf.getString("kafka.mbean.jmx.mbean.fiveMinuteRate")); 152 | Object meanRate = mbeanConnection.getAttribute(new ObjectName(mbean), conf.getString("kafka.mbean.jmx.mbean.meanRate")); 153 | Object oneMinuteRate = mbeanConnection.getAttribute(new ObjectName(mbean), conf.getString("kafka.mbean.jmx.mbean.oneMinuteRate")); 154 | MBeanInfo.setFifteenMinute(StrUtils.numberic(fifteenMinuteRate.toString())); 155 | MBeanInfo.setFiveMinute(StrUtils.numberic(fiveMinuteRate.toString())); 156 | MBeanInfo.setMeanRate(StrUtils.numberic(meanRate.toString())); 157 | MBeanInfo.setOneMinute(StrUtils.numberic(oneMinuteRate.toString())); 158 | } catch (Exception e) { 159 | logger.error("JMX service url[" + uri + "] create has error,msg is " + e.getMessage()); 160 | } finally { 161 | if (connector != null) { 162 | try { 163 | connector.close(); 164 | } catch (Exception e) { 165 | logger.error("Close JMXConnector[" + uri + "] has error,msg is " + e.getMessage()); 166 | } 167 | } 168 | } 169 | return MBeanInfo; 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/main/java/core/KafkaOffsetGetter.java: -------------------------------------------------------------------------------- 1 | package core; 2 | 3 | import common.protocol.BrokersInfo; 4 | import common.protocol.KeyAndValueSchemasInfo; 5 | import common.protocol.MessageValueStructAndVersionInfo; 6 | import common.protocol.OffsetInfo; 7 | import common.util.KafkaUtils; 8 | import common.util.ZookeeperUtils; 9 | import kafka.common.OffsetMetadata; 10 | import kafka.consumer.ConsumerIterator; 11 | import kafka.consumer.KafkaStream; 12 | import kafka.coordinator.GroupTopicPartition; 13 | import kafka.javaapi.consumer.ConsumerConnector; 14 | import kafka.message.MessageAndMetadata; 15 | import org.apache.kafka.clients.consumer.KafkaConsumer; 16 | import kafka.common.OffsetAndMetadata; 17 | import org.apache.kafka.common.PartitionInfo; 18 | import org.apache.kafka.common.TopicPartition; 19 | import org.apache.kafka.common.protocol.types.Field; 20 | import org.apache.kafka.common.protocol.types.Schema; 21 | import org.apache.kafka.common.protocol.types.Struct; 22 | import org.apache.kafka.common.protocol.types.Type; 23 | import org.apache.log4j.Logger; 24 | import scala.Array; 25 | 26 | import java.nio.ByteBuffer; 27 | import java.util.*; 28 | import java.util.concurrent.ConcurrentHashMap; 29 | import java.util.concurrent.ExecutorService; 30 | import java.util.concurrent.Executors; 31 | 32 | /** 33 | * Created by dubin on 02/10/2017. 34 | */ 35 | public class KafkaOffsetGetter { 36 | 37 | private final static Logger logger = Logger.getLogger(KafkaOffsetGetter.class); 38 | 39 | public static Map kafkaConsumerOffsets = new ConcurrentHashMap<>(); 40 | 41 | public static Map logEndOffsetMap = new ConcurrentHashMap<>(); 42 | 43 | static { 44 | List brokersInfoList = ZookeeperUtils.getAllBrokersInfo(); 45 | ExecutorService es = Executors.newCachedThreadPool(); 46 | es.submit(new ConsumerOffsetListener(ZookeeperUtils.getZkAddr())); 47 | for (BrokersInfo brokersInfo: brokersInfoList) { 48 | es.submit(new LogOffsetListener(brokersInfo)); 49 | } 50 | } 51 | 52 | public static List getOffsetQuarz() { 53 | 54 | Map>> groupTopicPartitionListMap = new ConcurrentHashMap<>(); 55 | 56 | for (Map.Entry entry: kafkaConsumerOffsets.entrySet()) { 57 | GroupTopicPartition groupTopicPartition = entry.getKey(); 58 | OffsetAndMetadata offsetAndMetadata = entry.getValue(); 59 | String group = groupTopicPartition.group(); 60 | TopicPartition topicPartition = groupTopicPartition.topicPartition(); 61 | String topic = topicPartition.topic(); 62 | int partition = topicPartition.partition(); 63 | Long committedOffset = offsetAndMetadata.offset(); 64 | 65 | if (!logEndOffsetMap.containsKey(topicPartition)) { 66 | logger.error("The logEndOffsetMap not contains " + topicPartition); 67 | return null; 68 | } 69 | long logSize = logEndOffsetMap.get(topicPartition); 70 | 71 | // May the refresh operation thread take some time to update 72 | logSize = logSize >= committedOffset ? logSize : committedOffset; 73 | long lag = committedOffset == -1 ? 0 : (logSize - committedOffset); 74 | 75 | OffsetInfo offsetInfo = new OffsetInfo(); 76 | offsetInfo.setGroup(group); 77 | offsetInfo.setTopic(topic); 78 | offsetInfo.setCommittedOffset(committedOffset); 79 | offsetInfo.setLogSize(logSize); 80 | offsetInfo.setLag(lag); 81 | offsetInfo.setTimestamp(offsetAndMetadata.commitTimestamp()); 82 | 83 | if (!groupTopicPartitionListMap.containsKey(group)) { 84 | Map> topicPartitionMap = new ConcurrentHashMap<>(); 85 | groupTopicPartitionListMap.put(group, topicPartitionMap); 86 | } 87 | if (!groupTopicPartitionListMap.get(group).containsKey(topic)) { 88 | List offsetInfos = new ArrayList<>(); 89 | groupTopicPartitionListMap.get(group).put(topic, offsetInfos); 90 | } 91 | groupTopicPartitionListMap.get(group).get(topic).add(offsetInfo); 92 | 93 | } 94 | return flattenNestedMap(groupTopicPartitionListMap); 95 | } 96 | 97 | private static List flattenNestedMap(Map>> groupTopicPartitionListMap) { 98 | 99 | List res = new ArrayList<>(); 100 | 101 | for (Map.Entry>> entry: groupTopicPartitionListMap.entrySet()) { 102 | String group = entry.getKey(); 103 | Map> topicPartitionListMap = entry.getValue(); 104 | for (Map.Entry> topicPartitionListEntry: topicPartitionListMap.entrySet()) { 105 | String topic = topicPartitionListEntry.getKey(); 106 | List offsetInfos = topicPartitionListEntry.getValue(); 107 | long committedOffset = 0; 108 | long logSize = 0; 109 | long lag = 0; 110 | long timestamp = 0L; 111 | for (OffsetInfo offsetInfo: offsetInfos) { 112 | committedOffset += offsetInfo.getCommittedOffset(); 113 | logSize += offsetInfo.getLogSize(); 114 | lag += offsetInfo.getLag(); 115 | timestamp = offsetInfo.getTimestamp(); 116 | } 117 | OffsetInfo o = new OffsetInfo(); 118 | o.setGroup(group); 119 | o.setTopic(topic); 120 | o.setCommittedOffset(committedOffset); 121 | o.setLogSize(logSize); 122 | o.setLag(lag); 123 | o.setTimestamp(timestamp); 124 | res.add(o); 125 | } 126 | } 127 | return res; 128 | } 129 | 130 | 131 | public static class ConsumerOffsetListener implements Runnable { 132 | 133 | private final static String CONSUMER_OFFSET_TOPIC = "__consumer_offsets"; 134 | 135 | /** ============================ Start Filter ========================= */ 136 | private static Schema OFFSET_COMMIT_KEY_SCHEMA_V0 = new Schema(new Field("group", Type.STRING), new Field("topic", Type.STRING), new Field("partition", Type.INT32)); 137 | private static Field KEY_GROUP_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("group"); 138 | private static Field KEY_TOPIC_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("topic"); 139 | private static Field KEY_PARTITION_FIELD = OFFSET_COMMIT_KEY_SCHEMA_V0.get("partition"); 140 | 141 | private static Schema OFFSET_COMMIT_VALUE_SCHEMA_V0 = new Schema(new Field("offset", Type.INT64), new Field("metadata", Type.STRING, "Associated metadata.", ""), 142 | new Field("timestamp", Type.INT64)); 143 | 144 | private static Schema OFFSET_COMMIT_VALUE_SCHEMA_V1 = new Schema(new Field("offset", Type.INT64), new Field("metadata", Type.STRING, "Associated metadata.", ""), 145 | new Field("commit_timestamp", Type.INT64), new Field("expire_timestamp", Type.INT64)); 146 | 147 | private static Field VALUE_OFFSET_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("offset"); 148 | private static Field VALUE_METADATA_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("metadata"); 149 | private static Field VALUE_TIMESTAMP_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("timestamp"); 150 | 151 | private static Field VALUE_OFFSET_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("offset"); 152 | private static Field VALUE_METADATA_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("metadata"); 153 | private static Field VALUE_COMMIT_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("commit_timestamp"); 154 | /** ============================ End Filter ========================= */ 155 | 156 | private String zkAddr; 157 | private String group = "kafka-offset-insight-group"; 158 | 159 | public ConsumerOffsetListener(String zkAddr) { 160 | this.zkAddr = zkAddr; 161 | } 162 | 163 | /** 164 | * When an object implementing interface Runnable is used 165 | * to create a thread, starting the thread causes the object's 166 | * run method to be called in that separately executing 167 | * thread. 168 | *

169 | * The general contract of the method run is that it may 170 | * take any action whatsoever. 171 | * 172 | * @see Thread#run() 173 | */ 174 | @Override 175 | public void run() { 176 | ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group); 177 | Map topicCountMap = new HashMap(); 178 | topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1)); 179 | KafkaStream offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0); 180 | 181 | ConsumerIterator it = offsetMsgStream.iterator(); 182 | while (true) { 183 | 184 | MessageAndMetadata offsetMsg = it.next(); 185 | if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) { 186 | try { 187 | GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key())); 188 | if (offsetMsg.message() == null) { 189 | continue; 190 | } 191 | kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message())); 192 | kafkaConsumerOffsets.put(commitKey, commitValue); 193 | } catch (Exception e) { 194 | e.printStackTrace(); 195 | } 196 | } 197 | } 198 | } 199 | 200 | /** Kafka offset memory in schema. */ 201 | @SuppressWarnings("serial") 202 | private static Map OFFSET_SCHEMAS = new HashMap() { 203 | { 204 | KeyAndValueSchemasInfo ks0 = new KeyAndValueSchemasInfo(); 205 | ks0.setKeySchema(OFFSET_COMMIT_KEY_SCHEMA_V0); 206 | ks0.setValueSchema(OFFSET_COMMIT_VALUE_SCHEMA_V0); 207 | put(0, ks0); 208 | 209 | KeyAndValueSchemasInfo ks1 = new KeyAndValueSchemasInfo(); 210 | ks1.setKeySchema(OFFSET_COMMIT_KEY_SCHEMA_V0); 211 | ks1.setValueSchema(OFFSET_COMMIT_VALUE_SCHEMA_V1); 212 | put(1, ks1); 213 | } 214 | }; 215 | 216 | private static KeyAndValueSchemasInfo schemaFor(int version) { 217 | return OFFSET_SCHEMAS.get(version); 218 | } 219 | 220 | /** Analysis of Kafka data in topic in buffer. */ 221 | private static GroupTopicPartition readMessageKey(ByteBuffer buffer) { 222 | short version = buffer.getShort(); 223 | Schema keySchema = schemaFor(version).getKeySchema(); 224 | Struct key = (Struct) keySchema.read(buffer); 225 | String group = key.getString(KEY_GROUP_FIELD); 226 | String topic = key.getString(KEY_TOPIC_FIELD); 227 | int partition = key.getInt(KEY_PARTITION_FIELD); 228 | return new GroupTopicPartition(group, new TopicPartition(topic, partition)); 229 | } 230 | 231 | /** Analysis of buffer data in metadata in Kafka. */ 232 | private static OffsetAndMetadata readMessageValue(ByteBuffer buffer) { 233 | MessageValueStructAndVersionInfo structAndVersion = readMessageValueStruct(buffer); 234 | if (structAndVersion.getValue() == null) { 235 | return null; 236 | } else { 237 | if (structAndVersion.getVersion() == 0) { 238 | long offset = structAndVersion.getValue().getLong(VALUE_OFFSET_FIELD_V0); 239 | String metadata = structAndVersion.getValue().getString(VALUE_METADATA_FIELD_V0); 240 | long timestamp = structAndVersion.getValue().getLong(VALUE_TIMESTAMP_FIELD_V0); 241 | return new OffsetAndMetadata(new OffsetMetadata(offset, metadata), timestamp, timestamp); 242 | } else if (structAndVersion.getVersion() == 1) { 243 | long offset = structAndVersion.getValue().getLong(VALUE_OFFSET_FIELD_V1); 244 | String metadata = structAndVersion.getValue().getString(VALUE_METADATA_FIELD_V1); 245 | long commitTimestamp = structAndVersion.getValue().getLong(VALUE_COMMIT_TIMESTAMP_FIELD_V1); 246 | return new OffsetAndMetadata(new OffsetMetadata(offset, metadata), commitTimestamp, commitTimestamp); 247 | } else { 248 | throw new IllegalStateException("Unknown offset message version: " + structAndVersion.getVersion()); 249 | } 250 | } 251 | } 252 | 253 | /** Analysis of struct data structure in metadata in Kafka. */ 254 | private static MessageValueStructAndVersionInfo readMessageValueStruct(ByteBuffer buffer) { 255 | MessageValueStructAndVersionInfo mvs = new MessageValueStructAndVersionInfo(); 256 | if (buffer == null) { 257 | mvs.setValue(null); 258 | mvs.setVersion(Short.valueOf("-1")); 259 | } else { 260 | short version = buffer.getShort(); 261 | Schema valueSchema = schemaFor(version).getValueSchema(); 262 | Struct value = (Struct) valueSchema.read(buffer); 263 | mvs.setValue(value); 264 | mvs.setVersion(version); 265 | } 266 | return mvs; 267 | } 268 | } 269 | 270 | 271 | public static class LogOffsetListener implements Runnable{ 272 | 273 | private BrokersInfo brokersInfo; 274 | 275 | public LogOffsetListener(BrokersInfo brokersInfo) { 276 | this.brokersInfo = brokersInfo; 277 | } 278 | 279 | /** 280 | * When an object implementing interface Runnable is used 281 | * to create a thread, starting the thread causes the object's 282 | * run method to be called in that separately executing 283 | * thread. 284 | *

285 | * The general contract of the method run is that it may 286 | * take any action whatsoever. 287 | * 288 | * @see Thread#run() 289 | */ 290 | @Override 291 | public void run() { 292 | String group = "kafka-insight-logOffsetListener"; 293 | int sleepTime = 60000; 294 | KafkaConsumer, Array> kafkaConsumer = null; 295 | 296 | while (true) { 297 | 298 | try { 299 | if (null == kafkaConsumer) { 300 | kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group); 301 | } 302 | 303 | Map> topicPartitionsMap = kafkaConsumer.listTopics(); 304 | for (List partitionInfoList : topicPartitionsMap.values()) { 305 | for (PartitionInfo partitionInfo : partitionInfoList) { 306 | TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition()); 307 | Collection topicPartitions = Arrays.asList(topicPartition); 308 | kafkaConsumer.assign(topicPartitions); 309 | kafkaConsumer.seekToEnd(topicPartitions); 310 | Long logEndOffset = kafkaConsumer.position(topicPartition); 311 | logEndOffsetMap.put(topicPartition, logEndOffset); 312 | } 313 | } 314 | 315 | Thread.sleep(sleepTime); 316 | 317 | } catch (Exception e) { 318 | e.printStackTrace(); 319 | if (null != kafkaConsumer) { 320 | kafkaConsumer.close(); 321 | kafkaConsumer = null; 322 | } 323 | } 324 | } 325 | 326 | } 327 | } 328 | } 329 | --------------------------------------------------------------------------------